repo_name
stringlengths
5
100
path
stringlengths
4
299
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1.03M
license
stringclasses
15 values
hash
int64
-9,223,351,895,964,839,000
9,223,297,778B
line_mean
float64
3.17
100
line_max
int64
7
1k
alpha_frac
float64
0.25
0.98
autogenerated
bool
1 class
sriki18/scipy
scipy/sparse/linalg/_onenormest.py
96
15138
"""Sparse block 1-norm estimator. """ from __future__ import division, print_function, absolute_import import numpy as np from scipy.sparse.linalg import aslinearoperator __all__ = ['onenormest'] def onenormest(A, t=2, itmax=5, compute_v=False, compute_w=False): """ Compute a lower bound of the 1-norm of a sparse matrix. Parameters ---------- A : ndarray or other linear operator A linear operator that can be transposed and that can produce matrix products. t : int, optional A positive parameter controlling the tradeoff between accuracy versus time and memory usage. Larger values take longer and use more memory but give more accurate output. itmax : int, optional Use at most this many iterations. compute_v : bool, optional Request a norm-maximizing linear operator input vector if True. compute_w : bool, optional Request a norm-maximizing linear operator output vector if True. Returns ------- est : float An underestimate of the 1-norm of the sparse matrix. v : ndarray, optional The vector such that ||Av||_1 == est*||v||_1. It can be thought of as an input to the linear operator that gives an output with particularly large norm. w : ndarray, optional The vector Av which has relatively large 1-norm. It can be thought of as an output of the linear operator that is relatively large in norm compared to the input. Notes ----- This is algorithm 2.4 of [1]. In [2] it is described as follows. "This algorithm typically requires the evaluation of about 4t matrix-vector products and almost invariably produces a norm estimate (which is, in fact, a lower bound on the norm) correct to within a factor 3." .. versionadded:: 0.13.0 References ---------- .. [1] Nicholas J. Higham and Francoise Tisseur (2000), "A Block Algorithm for Matrix 1-Norm Estimation, with an Application to 1-Norm Pseudospectra." SIAM J. Matrix Anal. Appl. Vol. 21, No. 4, pp. 1185-1201. .. [2] Awad H. Al-Mohy and Nicholas J. Higham (2009), "A new scaling and squaring algorithm for the matrix exponential." SIAM J. Matrix Anal. Appl. Vol. 31, No. 3, pp. 970-989. """ # Check the input. A = aslinearoperator(A) if A.shape[0] != A.shape[1]: raise ValueError('expected the operator to act like a square matrix') # If the operator size is small compared to t, # then it is easier to compute the exact norm. # Otherwise estimate the norm. n = A.shape[1] if t >= n: A_explicit = np.asarray(aslinearoperator(A).matmat(np.identity(n))) if A_explicit.shape != (n, n): raise Exception('internal error: ', 'unexpected shape ' + str(A_explicit.shape)) col_abs_sums = abs(A_explicit).sum(axis=0) if col_abs_sums.shape != (n, ): raise Exception('internal error: ', 'unexpected shape ' + str(col_abs_sums.shape)) argmax_j = np.argmax(col_abs_sums) v = elementary_vector(n, argmax_j) w = A_explicit[:, argmax_j] est = col_abs_sums[argmax_j] else: est, v, w, nmults, nresamples = _onenormest_core(A, A.H, t, itmax) # Report the norm estimate along with some certificates of the estimate. if compute_v or compute_w: result = (est,) if compute_v: result += (v,) if compute_w: result += (w,) return result else: return est def _blocked_elementwise(func): """ Decorator for an elementwise function, to apply it blockwise along first dimension, to avoid excessive memory usage in temporaries. """ block_size = 2**20 def wrapper(x): if x.shape[0] < block_size: return func(x) else: y0 = func(x[:block_size]) y = np.zeros((x.shape[0],) + y0.shape[1:], dtype=y0.dtype) y[:block_size] = y0 del y0 for j in range(block_size, x.shape[0], block_size): y[j:j+block_size] = func(x[j:j+block_size]) return y return wrapper @_blocked_elementwise def sign_round_up(X): """ This should do the right thing for both real and complex matrices. From Higham and Tisseur: "Everything in this section remains valid for complex matrices provided that sign(A) is redefined as the matrix (aij / |aij|) (and sign(0) = 1) transposes are replaced by conjugate transposes." """ Y = X.copy() Y[Y == 0] = 1 Y /= np.abs(Y) return Y @_blocked_elementwise def _max_abs_axis1(X): return np.max(np.abs(X), axis=1) def _sum_abs_axis0(X): block_size = 2**20 r = None for j in range(0, X.shape[0], block_size): y = np.sum(np.abs(X[j:j+block_size]), axis=0) if r is None: r = y else: r += y return r def elementary_vector(n, i): v = np.zeros(n, dtype=float) v[i] = 1 return v def vectors_are_parallel(v, w): # Columns are considered parallel when they are equal or negative. # Entries are required to be in {-1, 1}, # which guarantees that the magnitudes of the vectors are identical. if v.ndim != 1 or v.shape != w.shape: raise ValueError('expected conformant vectors with entries in {-1,1}') n = v.shape[0] return np.dot(v, w) == n def every_col_of_X_is_parallel_to_a_col_of_Y(X, Y): for v in X.T: if not any(vectors_are_parallel(v, w) for w in Y.T): return False return True def column_needs_resampling(i, X, Y=None): # column i of X needs resampling if either # it is parallel to a previous column of X or # it is parallel to a column of Y n, t = X.shape v = X[:, i] if any(vectors_are_parallel(v, X[:, j]) for j in range(i)): return True if Y is not None: if any(vectors_are_parallel(v, w) for w in Y.T): return True return False def resample_column(i, X): X[:, i] = np.random.randint(0, 2, size=X.shape[0])*2 - 1 def less_than_or_close(a, b): return np.allclose(a, b) or (a < b) def _algorithm_2_2(A, AT, t): """ This is Algorithm 2.2. Parameters ---------- A : ndarray or other linear operator A linear operator that can produce matrix products. AT : ndarray or other linear operator The transpose of A. t : int, optional A positive parameter controlling the tradeoff between accuracy versus time and memory usage. Returns ------- g : sequence A non-negative decreasing vector such that g[j] is a lower bound for the 1-norm of the column of A of jth largest 1-norm. The first entry of this vector is therefore a lower bound on the 1-norm of the linear operator A. This sequence has length t. ind : sequence The ith entry of ind is the index of the column A whose 1-norm is given by g[i]. This sequence of indices has length t, and its entries are chosen from range(n), possibly with repetition, where n is the order of the operator A. Notes ----- This algorithm is mainly for testing. It uses the 'ind' array in a way that is similar to its usage in algorithm 2.4. This algorithm 2.2 may be easier to test, so it gives a chance of uncovering bugs related to indexing which could have propagated less noticeably to algorithm 2.4. """ A_linear_operator = aslinearoperator(A) AT_linear_operator = aslinearoperator(AT) n = A_linear_operator.shape[0] # Initialize the X block with columns of unit 1-norm. X = np.ones((n, t)) if t > 1: X[:, 1:] = np.random.randint(0, 2, size=(n, t-1))*2 - 1 X /= float(n) # Iteratively improve the lower bounds. # Track extra things, to assert invariants for debugging. g_prev = None h_prev = None k = 1 ind = range(t) while True: Y = np.asarray(A_linear_operator.matmat(X)) g = _sum_abs_axis0(Y) best_j = np.argmax(g) g.sort() g = g[::-1] S = sign_round_up(Y) Z = np.asarray(AT_linear_operator.matmat(S)) h = _max_abs_axis1(Z) # If this algorithm runs for fewer than two iterations, # then its return values do not have the properties indicated # in the description of the algorithm. # In particular, the entries of g are not 1-norms of any # column of A until the second iteration. # Therefore we will require the algorithm to run for at least # two iterations, even though this requirement is not stated # in the description of the algorithm. if k >= 2: if less_than_or_close(max(h), np.dot(Z[:, best_j], X[:, best_j])): break ind = np.argsort(h)[::-1][:t] h = h[ind] for j in range(t): X[:, j] = elementary_vector(n, ind[j]) # Check invariant (2.2). if k >= 2: if not less_than_or_close(g_prev[0], h_prev[0]): raise Exception('invariant (2.2) is violated') if not less_than_or_close(h_prev[0], g[0]): raise Exception('invariant (2.2) is violated') # Check invariant (2.3). if k >= 3: for j in range(t): if not less_than_or_close(g[j], g_prev[j]): raise Exception('invariant (2.3) is violated') # Update for the next iteration. g_prev = g h_prev = h k += 1 # Return the lower bounds and the corresponding column indices. return g, ind def _onenormest_core(A, AT, t, itmax): """ Compute a lower bound of the 1-norm of a sparse matrix. Parameters ---------- A : ndarray or other linear operator A linear operator that can produce matrix products. AT : ndarray or other linear operator The transpose of A. t : int, optional A positive parameter controlling the tradeoff between accuracy versus time and memory usage. itmax : int, optional Use at most this many iterations. Returns ------- est : float An underestimate of the 1-norm of the sparse matrix. v : ndarray, optional The vector such that ||Av||_1 == est*||v||_1. It can be thought of as an input to the linear operator that gives an output with particularly large norm. w : ndarray, optional The vector Av which has relatively large 1-norm. It can be thought of as an output of the linear operator that is relatively large in norm compared to the input. nmults : int, optional The number of matrix products that were computed. nresamples : int, optional The number of times a parallel column was observed, necessitating a re-randomization of the column. Notes ----- This is algorithm 2.4. """ # This function is a more or less direct translation # of Algorithm 2.4 from the Higham and Tisseur (2000) paper. A_linear_operator = aslinearoperator(A) AT_linear_operator = aslinearoperator(AT) if itmax < 2: raise ValueError('at least two iterations are required') if t < 1: raise ValueError('at least one column is required') n = A.shape[0] if t >= n: raise ValueError('t should be smaller than the order of A') # Track the number of big*small matrix multiplications # and the number of resamplings. nmults = 0 nresamples = 0 # "We now explain our choice of starting matrix. We take the first # column of X to be the vector of 1s [...] This has the advantage that # for a matrix with nonnegative elements the algorithm converges # with an exact estimate on the second iteration, and such matrices # arise in applications [...]" X = np.ones((n, t), dtype=float) # "The remaining columns are chosen as rand{-1,1}, # with a check for and correction of parallel columns, # exactly as for S in the body of the algorithm." if t > 1: for i in range(1, t): # These are technically initial samples, not resamples, # so the resampling count is not incremented. resample_column(i, X) for i in range(t): while column_needs_resampling(i, X): resample_column(i, X) nresamples += 1 # "Choose starting matrix X with columns of unit 1-norm." X /= float(n) # "indices of used unit vectors e_j" ind_hist = np.zeros(0, dtype=np.intp) est_old = 0 S = np.zeros((n, t), dtype=float) k = 1 ind = None while True: Y = np.asarray(A_linear_operator.matmat(X)) nmults += 1 mags = _sum_abs_axis0(Y) est = np.max(mags) best_j = np.argmax(mags) if est > est_old or k == 2: if k >= 2: ind_best = ind[best_j] w = Y[:, best_j] # (1) if k >= 2 and est <= est_old: est = est_old break est_old = est S_old = S if k > itmax: break S = sign_round_up(Y) del Y # (2) if every_col_of_X_is_parallel_to_a_col_of_Y(S, S_old): break if t > 1: # "Ensure that no column of S is parallel to another column of S # or to a column of S_old by replacing columns of S by rand{-1,1}." for i in range(t): while column_needs_resampling(i, S, S_old): resample_column(i, S) nresamples += 1 del S_old # (3) Z = np.asarray(AT_linear_operator.matmat(S)) nmults += 1 h = _max_abs_axis1(Z) del Z # (4) if k >= 2 and max(h) == h[ind_best]: break # "Sort h so that h_first >= ... >= h_last # and re-order ind correspondingly." # # Later on, we will need at most t+len(ind_hist) largest # entries, so drop the rest ind = np.argsort(h)[::-1][:t+len(ind_hist)].copy() del h if t > 1: # (5) # Break if the most promising t vectors have been visited already. if np.in1d(ind[:t], ind_hist).all(): break # Put the most promising unvisited vectors at the front of the list # and put the visited vectors at the end of the list. # Preserve the order of the indices induced by the ordering of h. seen = np.in1d(ind, ind_hist) ind = np.concatenate((ind[~seen], ind[seen])) for j in range(t): X[:, j] = elementary_vector(n, ind[j]) new_ind = ind[:t][~np.in1d(ind[:t], ind_hist)] ind_hist = np.concatenate((ind_hist, new_ind)) k += 1 v = elementary_vector(n, ind_best) return est, v, w, nmults, nresamples
bsd-3-clause
-1,855,589,016,059,843,300
32.27033
79
0.586009
false
fighterCui/L4ReFiascoOC
l4/pkg/python/contrib/Demo/tkinter/matt/dialog-box.py
47
2440
from Tkinter import * from Dialog import Dialog # this shows how to create a new window with a button in it # that can create new windows class Test(Frame): def printit(self): print "hi" def makeWindow(self): """Create a top-level dialog with some buttons. This uses the Dialog class, which is a wrapper around the Tcl/Tk tk_dialog script. The function returns 0 if the user clicks 'yes' or 1 if the user clicks 'no'. """ # the parameters to this call are as follows: d = Dialog( self, ## name of a toplevel window title="fred the dialog box",## title on the window text="click on a choice", ## message to appear in window bitmap="info", ## bitmap (if any) to appear; ## if none, use "" # legal values here are: # string what it looks like # ---------------------------------------------- # error a circle with a slash through it # grey25 grey square # grey50 darker grey square # hourglass use for "wait.." # info a large, lower case "i" # questhead a human head with a "?" in it # question a large "?" # warning a large "!" # @fname X bitmap where fname is the path to the file # default=0, # the index of the default button choice. # hitting return selects this strings=("yes", "no")) # values of the 'strings' key are the labels for the # buttons that appear left to right in the dialog box return d.num def createWidgets(self): self.QUIT = Button(self, text='QUIT', foreground='red', command=self.quit) self.QUIT.pack(side=LEFT, fill=BOTH) # a hello button self.hi_there = Button(self, text='Make a New Window', command=self.makeWindow) self.hi_there.pack(side=LEFT) def __init__(self, master=None): Frame.__init__(self, master) Pack.config(self) self.windownum = 0 self.createWidgets() test = Test() test.mainloop()
gpl-2.0
9,126,449,565,774,107,000
37.125
79
0.496311
false
JacobJacob/pyew
pymsasid/inst.py
16
5413
from operand import O_NONE, P_none import syn_intel as intel #hack MK from syn_intel import intel_operand_syntax #from syn_att import * operator_list_invalid = [ 'invalid'] operator_list_call = ['syscall', 'call', 'vmcall', 'vmmcall'] operator_list_ret = ['sysret', 'iretw', 'iretd', 'iretq', 'ret', 'retf'] operator_list_jmp = ['jmp'] operator_list_jcc = ['jo', 'jno', 'jb', 'jae', 'jz', 'jnz', 'jbe', 'ja', 'js', 'jns', 'jp', 'jnp', 'jl', 'jge', 'jle', 'jg', 'jcxz', 'jecxz', 'jrcxz', 'loopnz', 'loope', 'loop'] operator_list_hlt = ['hlt'] class itab_entry: def __init__(self, operator = None, op1 = O_NONE, op2 = O_NONE, op3 = O_NONE, pfx = 0): self.operator = operator self.operand = [op1, op2, op3] self.prefix = pfx ie_invalid = itab_entry('invalid', O_NONE, O_NONE, O_NONE, P_none) ie_pause = itab_entry('pause', O_NONE, O_NONE, O_NONE, P_none) ie_nop = itab_entry('nop', O_NONE, O_NONE, O_NONE, P_none) class Prefix: def __init__(self): self.rex = 0 self.seg = '' self.opr = 0 self.adr = 0 self.lock = 0 self.rep = 0 self.repe = 0 self.repne = 0 self.insn = 0 def clear(self): self.seg = '' self.opr = 0 self.adr = 0 self.lock = 0 self.repne = 0 self.rep = 0 self.repe = 0 self.rex = 0 self.insn = 0 class Ptr: def __init__(self, off = 0, seg = 0): self.off = off self.seg = seg class Operand: def __init__(self): self.seg = None self.type = None self.size = 0 self.lval = 0 self.base = None self.index = None self.offset = 0 self.scale = 0 self.cast = 0 self.pc = 0 self.value = None self.ref = None def clear(self): self.__init__() def __str__(self): return intel_operand_syntax (self) def __repr__(self): return self.__str__() class Inst: def __init__(self, myInput, add = 0, mode = 16, syntax = intel.intel_syntax): self.input = myInput self.dis_mode = mode self.size = 0 self.add = add self.pc = 0 self.syntax = syntax self.my_syntax = None self.itab_entry = ie_invalid self.operator = 'invalid' self.operand = [] self.pfx = Prefix() self.opr_mode = 0 self.adr_mode = 0 self.branch_dist = None def clear(self): self.pfx.clear() self.itab_entry = ie_invalid self.operator = self.itab_entry.operator for op in self.operand: op.clear() def __str__(self): if(self.my_syntax == None): self.my_syntax = self.syntax(self) # wtf ? return self.my_syntax def __repr__(self): return str(self) def set_pc(self, pc): self.pc = pc for op in self.operand: op.pc = pc def branch(self): if(self.operator in operator_list_invalid or self.operator in operator_list_ret or self.operator in operator_list_hlt): return [] elif self.operator in operator_list_jmp: return [self.target_add()] elif self.operator in operator_list_call or self.operator in operator_list_jcc: return [self.next_add(), self.target_add()] return [self.next_add()] def next_add(self): return long(self.pc) def target_add(self): if(self.operand[0].type == 'OP_JIMM' or self.operand[0].type == 'OP_IMM'): ret = self.add + self.size + self.operand[0].lval elif self.operand[0].type == 'OP_PTR': ret = ((self.operand[0].lval.seg << 4) + self.operand[0].lval.off) elif self.operand[0].type == 'OP_MEM': self.input.seek(self.operand[0].lval) ret = long (self.input.hook.base_address + self.input.read(self.operand[0].size)) else: ret = str(self.operand[0]) if(type(ret) == str): return ret return long(ret) def flow_label(self): if self.operator in operator_list_invalid: return 'invd' elif self.operator in operator_list_call: return 'call' elif self.operator in operator_list_jmp: return 'jmp' elif self.operator in operator_list_jcc: return 'jcc' elif self.operator in operator_list_ret: return 'ret' elif self.operator in operator_list_hlt: return 'hlt' else: return 'seq'
gpl-2.0
-1,828,253,973,830,055,000
26.758974
93
0.457233
false
arpitparmar5739/youtube-dl
youtube_dl/extractor/pornovoisines.py
113
3388
# coding: utf-8 from __future__ import unicode_literals import re import random from .common import InfoExtractor from ..utils import ( int_or_none, float_or_none, unified_strdate, ) class PornoVoisinesIE(InfoExtractor): _VALID_URL = r'http://(?:www\.)?pornovoisines\.com/showvideo/(?P<id>\d+)/(?P<display_id>[^/]+)' _VIDEO_URL_TEMPLATE = 'http://stream%d.pornovoisines.com' \ '/static/media/video/transcoded/%s-640x360-1000-trscded.mp4' _SERVER_NUMBERS = (1, 2) _TEST = { 'url': 'http://www.pornovoisines.com/showvideo/1285/recherche-appartement/', 'md5': '5ac670803bc12e9e7f9f662ce64cf1d1', 'info_dict': { 'id': '1285', 'display_id': 'recherche-appartement', 'ext': 'mp4', 'title': 'Recherche appartement', 'description': 'md5:819ea0b785e2a04667a1a01cdc89594e', 'thumbnail': 're:^https?://.*\.jpg$', 'upload_date': '20140925', 'duration': 120, 'view_count': int, 'average_rating': float, 'categories': ['Débutantes', 'Scénario', 'Sodomie'], 'age_limit': 18, } } @classmethod def build_video_url(cls, num): return cls._VIDEO_URL_TEMPLATE % (random.choice(cls._SERVER_NUMBERS), num) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') webpage = self._download_webpage(url, video_id) video_url = self.build_video_url(video_id) title = self._html_search_regex( r'<h1>(.+?)</h1>', webpage, 'title', flags=re.DOTALL) description = self._html_search_regex( r'<article id="descriptif">(.+?)</article>', webpage, "description", fatal=False, flags=re.DOTALL) thumbnail = self._search_regex( r'<div id="mediaspace%s">\s*<img src="/?([^"]+)"' % video_id, webpage, 'thumbnail', fatal=False) if thumbnail: thumbnail = 'http://www.pornovoisines.com/%s' % thumbnail upload_date = unified_strdate(self._search_regex( r'Publié le ([\d-]+)', webpage, 'upload date', fatal=False)) duration = int_or_none(self._search_regex( 'Durée (\d+)', webpage, 'duration', fatal=False)) view_count = int_or_none(self._search_regex( r'(\d+) vues', webpage, 'view count', fatal=False)) average_rating = self._search_regex( r'Note\s*:\s*(\d+(?:,\d+)?)', webpage, 'average rating', fatal=False) if average_rating: average_rating = float_or_none(average_rating.replace(',', '.')) categories = self._html_search_meta( 'keywords', webpage, 'categories', fatal=False) if categories: categories = [category.strip() for category in categories.split(',')] return { 'id': video_id, 'display_id': display_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, 'upload_date': upload_date, 'duration': duration, 'view_count': view_count, 'average_rating': average_rating, 'categories': categories, 'age_limit': 18, }
unlicense
482,959,195,497,267,260
34.25
99
0.552305
false
ansible/ansible
lib/ansible/modules/import_role.py
15
3445
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = r''' --- author: Ansible Core Team (@ansible) module: import_role short_description: Import a role into a play description: - Much like the C(roles:) keyword, this task loads a role, but it allows you to control when the role tasks run in between other tasks of the play. - Most keywords, loops and conditionals will only be applied to the imported tasks, not to this statement itself. If you want the opposite behavior, use M(ansible.builtin.include_role) instead. - Does not work in handlers. version_added: '2.4' options: name: description: - The name of the role to be executed. type: str required: true tasks_from: description: - File to load from a role's C(tasks/) directory. type: str default: main vars_from: description: - File to load from a role's C(vars/) directory. type: str default: main defaults_from: description: - File to load from a role's C(defaults/) directory. type: str default: main allow_duplicates: description: - Overrides the role's metadata setting to allow using a role more than once with the same parameters. type: bool default: yes handlers_from: description: - File to load from a role's C(handlers/) directory. type: str default: main version_added: '2.8' rolespec_validate: description: - Perform role argument spec validation if an argument spec is defined. type: bool default: yes version_added: '2.11' extends_documentation_fragment: - action_common_attributes attributes: async: support: none become: support: none bypass_host_loop: support: partial conditional: support: none connection: support: none delegation: support: none loops: support: none tags: support: none until: support: none notes: - Handlers are made available to the whole play. - Since Ansible 2.7 variables defined in C(vars) and C(defaults) for the role are exposed to the play at playbook parsing time. Due to this, these variables will be accessible to roles and tasks executed before the location of the M(ansible.builtin.import_role) task. - Unlike M(ansible.builtin.include_role) variable exposure is not configurable, and will always be exposed. seealso: - module: ansible.builtin.import_playbook - module: ansible.builtin.import_tasks - module: ansible.builtin.include_role - module: ansible.builtin.include_tasks - ref: playbooks_reuse_includes description: More information related to including and importing playbooks, roles and tasks. ''' EXAMPLES = r''' - hosts: all tasks: - import_role: name: myrole - name: Run tasks/other.yaml instead of 'main' import_role: name: myrole tasks_from: other - name: Pass variables to role import_role: name: myrole vars: rolevar1: value from task - name: Apply condition to each task in role import_role: name: myrole when: not idontwanttorun ''' RETURN = r''' # This module does not return anything except tasks to execute. '''
gpl-3.0
-3,256,956,347,243,720,000
27.471074
129
0.678084
false
anmolonruby/mongo-connector
mongo_connector/connector.py
1
41715
# Copyright 2013-2014 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Discovers the mongo cluster and starts the connector. """ import json import logging import logging.handlers import os import pymongo import re import shutil import ssl import sys import threading import time from mongo_connector import config, constants, errors, util from mongo_connector.locking_dict import LockingDict from mongo_connector.oplog_manager import OplogThread from mongo_connector.doc_managers import doc_manager_simulator as simulator from mongo_connector.doc_managers.doc_manager_base import DocManagerBase from mongo_connector.command_helper import CommandHelper from mongo_connector.util import log_fatal_exceptions from pymongo import MongoClient LOG = logging.getLogger(__name__) _SSL_POLICY_MAP = { 'ignored': ssl.CERT_NONE, 'optional': ssl.CERT_OPTIONAL, 'required': ssl.CERT_REQUIRED } class Connector(threading.Thread): """Thread that monitors a replica set or sharded cluster. Creates, runs, and monitors an OplogThread for each replica set found. """ def __init__(self, mongo_address, doc_managers=None, **kwargs): super(Connector, self).__init__() # can_run is set to false when we join the thread self.can_run = True # main address - either mongos for sharded setups or a primary otherwise self.address = mongo_address # List of DocManager instances if doc_managers: self.doc_managers = doc_managers else: LOG.warning('No doc managers specified, using simulator.') self.doc_managers = (simulator.DocManager(),) # Password for authentication self.auth_key = kwargs.pop('auth_key', None) # Username for authentication self.auth_username = kwargs.pop('auth_username', None) # The name of the file that stores the progress of the OplogThreads self.oplog_checkpoint = kwargs.pop('oplog_checkpoint', 'oplog.timestamp') # The set of OplogThreads created self.shard_set = {} # Dict of OplogThread/timestamp pairs to record progress self.oplog_progress = LockingDict() # Timezone awareness self.tz_aware = kwargs.get('tz_aware', False) # SSL keyword arguments to MongoClient. ssl_certfile = kwargs.pop('ssl_certfile', None) ssl_ca_certs = kwargs.pop('ssl_ca_certs', None) ssl_keyfile = kwargs.pop('ssl_keyfile', None) ssl_cert_reqs = kwargs.pop('ssl_cert_reqs', None) self.ssl_kwargs = {} if ssl_certfile: self.ssl_kwargs['ssl_certfile'] = ssl_certfile if ssl_ca_certs: self.ssl_kwargs['ssl_ca_certs'] = ssl_ca_certs if ssl_keyfile: self.ssl_kwargs['ssl_keyfile'] = ssl_keyfile if ssl_cert_reqs: self.ssl_kwargs['ssl_cert_reqs'] = ssl_cert_reqs # Save the rest of kwargs. self.kwargs = kwargs # Initialize and set the command helper command_helper = CommandHelper(kwargs.get('ns_set', []), kwargs.get('dest_mapping', {})) for dm in self.doc_managers: dm.command_helper = command_helper if self.oplog_checkpoint is not None: if not os.path.exists(self.oplog_checkpoint): info_str = ("MongoConnector: Can't find %s, " "attempting to create an empty progress log" % self.oplog_checkpoint) LOG.warning(info_str) try: # Create oplog progress file open(self.oplog_checkpoint, "w").close() except IOError as e: LOG.critical("MongoConnector: Could not " "create a progress log: %s" % str(e)) sys.exit(2) else: if (not os.access(self.oplog_checkpoint, os.W_OK) and not os.access(self.oplog_checkpoint, os.R_OK)): LOG.critical("Invalid permissions on %s! Exiting" % (self.oplog_checkpoint)) sys.exit(2) @classmethod def from_config(cls, config): """Create a new Connector instance from a Config object.""" auth_key = None password_file = config['authentication.passwordFile'] if password_file is not None: try: auth_key = open(config['authentication.passwordFile']).read() auth_key = re.sub(r'\s', '', auth_key) except IOError: LOG.error('Could not load password file!') sys.exit(1) password = config['authentication.password'] if password is not None: auth_key = password connector = Connector( mongo_address=config['mainAddress'], doc_managers=config['docManagers'], oplog_checkpoint=config['oplogFile'], collection_dump=(not config['noDump']), batch_size=config['batchSize'], continue_on_error=config['continueOnError'], auth_username=config['authentication.adminUsername'], auth_key=auth_key, fields=config['fields'], ns_set=config['namespaces.include'], dest_mapping=config['namespaces.mapping'], gridfs_set=config['namespaces.gridfs'], ssl_certfile=config['ssl.sslCertfile'], ssl_keyfile=config['ssl.sslKeyfile'], ssl_ca_certs=config['ssl.sslCACerts'], ssl_cert_reqs=config['ssl.sslCertificatePolicy'], tz_aware=config['timezoneAware'] ) return connector def join(self): """ Joins thread, stops it from running """ self.can_run = False for dm in self.doc_managers: dm.stop() threading.Thread.join(self) def write_oplog_progress(self): """ Writes oplog progress to file provided by user """ if self.oplog_checkpoint is None: return None with self.oplog_progress as oplog_prog: oplog_dict = oplog_prog.get_dict() items = [[name, util.bson_ts_to_long(oplog_dict[name])] for name in oplog_dict] if not items: return # write to temp file backup_file = self.oplog_checkpoint + '.backup' os.rename(self.oplog_checkpoint, backup_file) # for each of the threads write to file with open(self.oplog_checkpoint, 'w') as dest: if len(items) == 1: # Write 1-dimensional array, as in previous versions. json_str = json.dumps(items[0]) else: # Write a 2d array to support sharded clusters. json_str = json.dumps(items) try: dest.write(json_str) except IOError: # Basically wipe the file, copy from backup dest.truncate() with open(backup_file, 'r') as backup: shutil.copyfile(backup, dest) os.remove(backup_file) def read_oplog_progress(self): """Reads oplog progress from file provided by user. This method is only called once before any threads are spanwed. """ if self.oplog_checkpoint is None: return None # Check for empty file try: if os.stat(self.oplog_checkpoint).st_size == 0: LOG.info("MongoConnector: Empty oplog progress file.") return None except OSError: return None with open(self.oplog_checkpoint, 'r') as progress_file: try: data = json.load(progress_file) except ValueError: LOG.exception( 'Cannot read oplog progress file "%s". ' 'It may be corrupt after Mongo Connector was shut down' 'uncleanly. You can try to recover from a backup file ' '(may be called "%s.backup") or create a new progress file ' 'starting at the current moment in time by running ' 'mongo-connector --no-dump <other options>. ' 'You may also be trying to read an oplog progress file ' 'created with the old format for sharded clusters. ' 'See https://github.com/10gen-labs/mongo-connector/wiki' '/Oplog-Progress-File for complete documentation.' % (self.oplog_checkpoint, self.oplog_checkpoint)) return # data format: # [name, timestamp] = replica set # [[name, timestamp], [name, timestamp], ...] = sharded cluster if not isinstance(data[0], list): data = [data] with self.oplog_progress: self.oplog_progress.dict = dict( (name, util.long_to_bson_ts(timestamp)) for name, timestamp in data) @log_fatal_exceptions def run(self): """Discovers the mongo cluster and creates a thread for each primary. """ main_conn = MongoClient( self.address, tz_aware=self.tz_aware, **self.ssl_kwargs) if self.auth_key is not None: main_conn['admin'].authenticate(self.auth_username, self.auth_key) self.read_oplog_progress() conn_type = None try: main_conn.admin.command("isdbgrid") except pymongo.errors.OperationFailure: conn_type = "REPLSET" if conn_type == "REPLSET": # Make sure we are connected to a replica set is_master = main_conn.admin.command("isMaster") if "setName" not in is_master: LOG.error( 'No replica set at "%s"! A replica set is required ' 'to run mongo-connector. Shutting down...' % self.address ) return # Establish a connection to the replica set as a whole main_conn.disconnect() main_conn = MongoClient( self.address, replicaSet=is_master['setName'], tz_aware=self.tz_aware, **self.ssl_kwargs) if self.auth_key is not None: main_conn.admin.authenticate(self.auth_username, self.auth_key) # non sharded configuration oplog = OplogThread( main_conn, self.doc_managers, self.oplog_progress, **self.kwargs) self.shard_set[0] = oplog LOG.info('MongoConnector: Starting connection thread %s' % main_conn) oplog.start() while self.can_run: if not self.shard_set[0].running: LOG.error("MongoConnector: OplogThread" " %s unexpectedly stopped! Shutting down" % (str(self.shard_set[0]))) self.oplog_thread_join() for dm in self.doc_managers: dm.stop() return self.write_oplog_progress() time.sleep(1) else: # sharded cluster while self.can_run is True: for shard_doc in main_conn['config']['shards'].find(): shard_id = shard_doc['_id'] if shard_id in self.shard_set: if not self.shard_set[shard_id].running: LOG.error("MongoConnector: OplogThread " "%s unexpectedly stopped! Shutting " "down" % (str(self.shard_set[shard_id]))) self.oplog_thread_join() for dm in self.doc_managers: dm.stop() return self.write_oplog_progress() time.sleep(1) continue try: repl_set, hosts = shard_doc['host'].split('/') except ValueError: cause = "The system only uses replica sets!" LOG.exception("MongoConnector: %s", cause) self.oplog_thread_join() for dm in self.doc_managers: dm.stop() return shard_conn = MongoClient( hosts, replicaSet=repl_set, tz_aware=self.tz_aware, **self.ssl_kwargs) if self.auth_key is not None: shard_conn['admin'].authenticate(self.auth_username, self.auth_key) oplog = OplogThread( shard_conn, self.doc_managers, self.oplog_progress, **self.kwargs) self.shard_set[shard_id] = oplog msg = "Starting connection thread" LOG.info("MongoConnector: %s %s" % (msg, shard_conn)) oplog.start() self.oplog_thread_join() self.write_oplog_progress() def oplog_thread_join(self): """Stops all the OplogThreads """ LOG.info('MongoConnector: Stopping all OplogThreads') for thread in self.shard_set.values(): thread.join() def get_config_options(): result = [] def add_option(*args, **kwargs): opt = config.Option(*args, **kwargs) result.append(opt) return opt main_address = add_option( config_key="mainAddress", default="localhost:27017", type=str) # -m is for the main address, which is a host:port pair, ideally of the # mongos. For non sharded clusters, it can be the primary. main_address.add_cli( "-m", "--main", dest="main_address", help= "Specify the main address, which is a" " host:port pair. For sharded clusters, this" " should be the mongos address. For individual" " replica sets, supply the address of the" " primary. For example, `-m localhost:27217`" " would be a valid argument to `-m`. Don't use" " quotes around the address.") oplog_file = add_option( config_key="oplogFile", default="oplog.timestamp", type=str) # -o is to specify the oplog-config file. This file is used by the system # to store the last timestamp read on a specific oplog. This allows for # quick recovery from failure. oplog_file.add_cli( "-o", "--oplog-ts", dest="oplog_file", help= "Specify the name of the file that stores the " "oplog progress timestamps. " "This file is used by the system to store the last " "timestamp read on a specific oplog. This allows " "for quick recovery from failure. By default this " "is `config.txt`, which starts off empty. An empty " "file causes the system to go through all the mongo " "oplog and sync all the documents. Whenever the " "cluster is restarted, it is essential that the " "oplog-timestamp config file be emptied - otherwise " "the connector will miss some documents and behave " "incorrectly.") no_dump = add_option( config_key="noDump", default=False, type=bool) # --no-dump specifies whether we should read an entire collection from # scratch if no timestamp is found in the oplog_config. no_dump.add_cli( "--no-dump", action="store_true", dest="no_dump", help= "If specified, this flag will ensure that " "mongo_connector won't read the entire contents of a " "namespace iff --oplog-ts points to an empty file.") batch_size = add_option( config_key="batchSize", default=constants.DEFAULT_BATCH_SIZE, type=int) # --batch-size specifies num docs to read from oplog before updating the # --oplog-ts config file with current oplog position batch_size.add_cli( "--batch-size", type="int", dest="batch_size", help= "Specify an int to update the --oplog-ts " "config file with latest position of oplog every " "N documents. By default, the oplog config isn't " "updated until we've read through the entire oplog. " "You may want more frequent updates if you are at risk " "of falling behind the earliest timestamp in the oplog") def apply_verbosity(option, cli_values): if cli_values['verbose']: option.value = 3 if option.value < 0 or option.value > 3: raise errors.InvalidConfiguration( "verbosity must be in the range [0, 3].") verbosity = add_option( config_key="verbosity", default=0, type=int, apply_function=apply_verbosity) # -v enables verbose logging verbosity.add_cli( "-v", "--verbose", action="store_true", dest="verbose", help="Enables verbose logging.") def apply_logging(option, cli_values): if cli_values['logfile'] and cli_values['enable_syslog']: raise errors.InvalidConfiguration( "You cannot specify syslog and a logfile simultaneously," " please choose the logging method you would prefer.") if cli_values['logfile']: when = cli_values['logfile_when'] interval = cli_values['logfile_interval'] if (when and when.startswith('W') and interval != constants.DEFAULT_LOGFILE_INTERVAL): raise errors.InvalidConfiguration( "You cannot specify a log rotation interval when rotating " "based on a weekday (W0 - W6).") option.value['type'] = 'file' option.value['filename'] = cli_values['logfile'] if when: option.value['rotationWhen'] = when if interval: option.value['rotationInterval'] = interval if cli_values['logfile_backups']: option.value['rotationBackups'] = cli_values['logfile_backups'] if cli_values['enable_syslog']: option.value['type'] = 'syslog' if cli_values['syslog_host']: option.value['host'] = cli_values['syslog_host'] if cli_values['syslog_facility']: option.value['facility'] = cli_values['syslog_facility'] default_logging = { 'type': 'file', 'filename': 'mongo-connector.log', 'rotationInterval': constants.DEFAULT_LOGFILE_INTERVAL, 'rotationBackups': constants.DEFAULT_LOGFILE_BACKUPCOUNT, 'rotationWhen': constants.DEFAULT_LOGFILE_WHEN, 'host': constants.DEFAULT_SYSLOG_HOST, 'facility': constants.DEFAULT_SYSLOG_FACILITY } logging = add_option( config_key="logging", default=default_logging, type=dict, apply_function=apply_logging) # -w enables logging to a file logging.add_cli( "-w", "--logfile", dest="logfile", help= "Log all output to a file rather than stream to " "stderr. Omit to stream to stderr.") # -s is to enable syslog logging. logging.add_cli( "-s", "--enable-syslog", action="store_true", dest="enable_syslog", help= "The syslog host, which may be an address like 'localhost:514' or, " "on Unix/Linux, the path to a Unix domain socket such as '/dev/log'.") # --syslog-host is to specify the syslog host. logging.add_cli( "--syslog-host", dest="syslog_host", help= "Used to specify the syslog host." " The default is 'localhost:514'") # --syslog-facility is to specify the syslog facility. logging.add_cli( "--syslog-facility", dest="syslog_facility", help= "Used to specify the syslog facility." " The default is 'user'") # --logfile-when specifies the type of interval of the rotating file # (seconds, minutes, hours) logging.add_cli("--logfile-when", action="store", dest="logfile_when", type="string", help="The type of interval for rotating the log file. " "Should be one of " "'S' (seconds), 'M' (minutes), 'H' (hours), " "'D' (days), 'W0' - 'W6' (days of the week 0 - 6), " "or 'midnight' (the default). See the Python documentation " "for 'logging.handlers.TimedRotatingFileHandler' for more " "details.") # --logfile-interval specifies when to create a new log file logging.add_cli("--logfile-interval", action="store", dest="logfile_interval", type="int", help="How frequently to rotate the log file, " "specifically, how many units of the rotation interval " "should pass before the rotation occurs. For example, " "to create a new file each hour: " " '--logfile-when=H --logfile-interval=1'. " "Defaults to 1. You may not use this option if " "--logfile-when is set to a weekday (W0 - W6). " "See the Python documentation for " "'logging.handlers.TimedRotatingFileHandler' for more " "details. ") # --logfile-backups specifies how many log files will be kept. logging.add_cli("--logfile-backups", action="store", dest="logfile_backups", type="int", help="How many log files will be kept after rotation. " "If set to zero, then no log files will be deleted. " "Defaults to 7.") def apply_authentication(option, cli_values): if cli_values['admin_username']: option.value['adminUsername'] = cli_values['admin_username'] if cli_values['password']: option.value['password'] = cli_values['password'] if cli_values['password_file']: option.value['passwordFile'] = cli_values['password_file'] if option.value.get("adminUsername"): password = option.value.get("password") passwordFile = option.value.get("passwordFile") if not password and not passwordFile: raise errors.InvalidConfiguration( "Admin username specified without password.") if password and passwordFile: raise errors.InvalidConfiguration( "Can't specify both password and password file.") default_authentication = { 'adminUsername': None, 'password': None, 'passwordFile': None } authentication = add_option( config_key="authentication", default=default_authentication, type=dict, apply_function=apply_authentication) # -a is to specify the username for authentication. authentication.add_cli( "-a", "--admin-username", dest="admin_username", help= "Used to specify the username of an admin user to " "authenticate with. To use authentication, the user " "must specify both an admin username and a keyFile.") # -p is to specify the password used for authentication. authentication.add_cli( "-p", "--password", dest="password", help= "Used to specify the password." " This is used by mongos to authenticate" " connections to the shards, and in the" " oplog threads. If authentication is not used, then" " this field can be left empty as the default ") # -f is to specify the authentication key file. This file is used by mongos # to authenticate connections to the shards, and we'll use it in the oplog # threads. authentication.add_cli( "-f", "--password-file", dest="password_file", help= "Used to store the password for authentication." " Use this option if you wish to specify a" " username and password but don't want to" " type in the password. The contents of this" " file should be the password for the admin user.") def apply_fields(option, cli_values): if cli_values['fields']: option.value = cli_values['fields'].split(",") for field in option.value: if '.' in field: print( "WARNING: mongo-connector can only successfully filter " "sub-document fields for inserts and updates, " "not replacements. To catch all changes on " "a sub-document field, specify the name of the " "sub-document instead. You are seeing this " "message because you passed the name of a nested field " "to the 'fields' option: %s" % field) break fields = add_option( config_key="fields", default=[], type=list, apply_function=apply_fields) # -i to specify the list of fields to export fields.add_cli( "-i", "--fields", dest="fields", help= "Used to specify the list of fields to export. " "Specify a field or fields to include in the export. " "Use a comma separated list of fields to specify multiple " "fields. The '_id', 'ns' and '_ts' fields are always " "exported.") def apply_namespaces(option, cli_values): if cli_values['ns_set']: option.value['include'] = cli_values['ns_set'].split(',') if cli_values['gridfs_set']: option.value['gridfs'] = cli_values['gridfs_set'].split(',') if cli_values['dest_ns_set']: ns_set = option.value['include'] dest_ns_set = cli_values['dest_ns_set'].split(',') if len(ns_set) != len(dest_ns_set): raise errors.InvalidConfiguration( "Destination namespace set should be the" " same length as the origin namespace set.") option.value['mapping'] = dict(zip(ns_set, dest_ns_set)) ns_set = option.value['include'] if len(ns_set) != len(set(ns_set)): raise errors.InvalidConfiguration( "Namespace set should not contain any duplicates.") dest_mapping = option.value['mapping'] if len(dest_mapping) != len(set(dest_mapping.values())): raise errors.InvalidConfiguration( "Destination namespaces set should not" " contain any duplicates.") gridfs_set = option.value['gridfs'] if len(gridfs_set) != len(set(gridfs_set)): raise errors.InvalidConfiguration( "GridFS set should not contain any duplicates.") default_namespaces = { "include": [], "mapping": {}, "gridfs": [] } namespaces = add_option( config_key="namespaces", default=default_namespaces, type=dict, apply_function=apply_namespaces) # -n is to specify the namespaces we want to consider. The default # considers all the namespaces namespaces.add_cli( "-n", "--namespace-set", dest="ns_set", help= "Used to specify the namespaces we want to " "consider. For example, if we wished to store all " "documents from the test.test and alpha.foo " "namespaces, we could use `-n test.test,alpha.foo`. " "The default is to consider all the namespaces, " "excluding the system and config databases, and " "also ignoring the \"system.indexes\" collection in " "any database.") # -g is the destination namespace namespaces.add_cli( "-g", "--dest-namespace-set", dest="dest_ns_set", help= "Specify a destination namespace mapping. Each " "namespace provided in the --namespace-set option " "will be mapped respectively according to this " "comma-separated list. These lists must have " "equal length. The default is to use the identity " "mapping. This is currently only implemented " "for mongo-to-mongo connections.") # --gridfs-set is the set of GridFS namespaces to consider namespaces.add_cli( "--gridfs-set", dest="gridfs_set", help= "Used to specify the GridFS namespaces we want to " "consider. For example, if your metadata is stored in " "test.fs.files and chunks are stored in test.fs.chunks, " "you can use `--gridfs-set test.fs`.") def apply_doc_managers(option, cli_values): if cli_values['doc_manager'] is None: if cli_values['target_url']: raise errors.InvalidConfiguration( "Cannot create a Connector with a target URL" " but no doc manager.") else: if option.value is not None: bulk_size = option.value[0].get( 'bulkSize', constants.DEFAULT_MAX_BULK) else: bulk_size = constants.DEFAULT_MAX_BULK option.value = [{ 'docManager': cli_values['doc_manager'], 'targetURL': cli_values['target_url'], 'uniqueKey': cli_values['unique_key'], 'autoCommitInterval': cli_values['auto_commit_interval'], 'bulkSize': bulk_size }] if not option.value: return # validate doc managers and fill in default values for dm in option.value: if not isinstance(dm, dict): raise errors.InvalidConfiguration( "Elements of docManagers must be a dict.") if 'docManager' not in dm: raise errors.InvalidConfiguration( "Every element of docManagers" " must contain 'docManager' property.") if not dm.get('targetURL'): dm['targetURL'] = None if not dm.get('uniqueKey'): dm['uniqueKey'] = constants.DEFAULT_UNIQUE_KEY if dm.get('autoCommitInterval') is None: dm['autoCommitInterval'] = constants.DEFAULT_COMMIT_INTERVAL if not dm.get('args'): dm['args'] = {} if not dm.get('bulkSize'): dm['bulkSize'] = constants.DEFAULT_MAX_BULK aci = dm['autoCommitInterval'] if aci is not None and aci < 0: raise errors.InvalidConfiguration( "autoCommitInterval must be non-negative.") def import_dm_by_name(name): try: full_name = "mongo_connector.doc_managers.%s" % name # importlib doesn't exist in 2.6, but __import__ is everywhere module = __import__(full_name, fromlist=(name,)) dm_impl = module.DocManager if not issubclass(dm_impl, DocManagerBase): raise TypeError("DocManager must inherit DocManagerBase.") return module except ImportError: raise errors.InvalidConfiguration( "Could not import %s." % full_name) sys.exit(1) except (AttributeError, TypeError): raise errors.InvalidConfiguration( "No definition for DocManager found in %s." % full_name) sys.exit(1) # instantiate the doc manager objects dm_instances = [] for dm in option.value: module = import_dm_by_name(dm['docManager']) kwargs = { 'unique_key': dm['uniqueKey'], 'auto_commit_interval': dm['autoCommitInterval'], 'chunk_size': dm['bulkSize'] } for k in dm['args']: if k not in kwargs: kwargs[k] = dm['args'][k] target_url = dm['targetURL'] if target_url: dm_instances.append(module.DocManager(target_url, **kwargs)) else: dm_instances.append(module.DocManager(**kwargs)) option.value = dm_instances doc_managers = add_option( config_key="docManagers", default=None, type=list, apply_function=apply_doc_managers) # -d is to specify the doc manager file. doc_managers.add_cli( "-d", "--doc-manager", dest="doc_manager", help= "Used to specify the path to each doc manager " "file that will be used. DocManagers should be " "specified in the same order as their respective " "target addresses in the --target-urls option. " "URLs are assigned to doc managers " "respectively. Additional doc managers are " "implied to have no target URL. Additional URLs " "are implied to have the same doc manager type as " "the last doc manager for which a URL was " "specified. By default, Mongo Connector will use " "'doc_manager_simulator.py'. It is recommended " "that all doc manager files be kept in the " "doc_managers folder in mongo-connector. For " "more information about making your own doc " "manager, see 'Writing Your Own DocManager' " "section of the wiki") # -d is to specify the doc manager file. doc_managers.add_cli( "-t", "--target-url", dest="target_url", help= "Specify the URL to each target system being " "used. For example, if you were using Solr out of " "the box, you could use '-t " "http://localhost:8080/solr' with the " "SolrDocManager to establish a proper connection. " "URLs should be specified in the same order as " "their respective doc managers in the " "--doc-managers option. URLs are assigned to doc " "managers respectively. Additional doc managers " "are implied to have no target URL. Additional " "URLs are implied to have the same doc manager " "type as the last doc manager for which a URL was " "specified. " "Don't use quotes around addresses. ") # -u is to specify the mongoDB field that will serve as the unique key # for the target system, doc_managers.add_cli( "-u", "--unique-key", dest="unique_key", help= "The name of the MongoDB field that will serve " "as the unique key for the target system. " "Note that this option does not apply " "when targeting another MongoDB cluster. " "Defaults to \"_id\".") # --auto-commit-interval to specify auto commit time interval doc_managers.add_cli( "--auto-commit-interval", type="int", dest="auto_commit_interval", help= "Seconds in-between calls for the Doc Manager" " to commit changes to the target system. A value of" " 0 means to commit after every write operation." " When left unset, Mongo Connector will not make" " explicit commits. Some systems have" " their own mechanism for adjusting a commit" " interval, which should be preferred to this" " option.") continue_on_error = add_option( config_key="continueOnError", default=False, type=bool) def apply_ssl(option, cli_values): option.value = option.value or {} ssl_certfile = cli_values.pop('ssl_certfile') ssl_keyfile = cli_values.pop('ssl_keyfile') ssl_cert_reqs = cli_values.pop('ssl_cert_reqs') ssl_ca_certs = ( cli_values.pop('ssl_ca_certs') or option.value.get('sslCACerts')) if ssl_cert_reqs and ssl_cert_reqs != 'ignored' and not ssl_ca_certs: raise errors.InvalidConfiguration( '--ssl-ca-certs must be provided if the ' '--ssl-certificate-policy is not "ignored".') option.value.setdefault('sslCertfile', ssl_certfile) option.value.setdefault('sslCACerts', ssl_ca_certs) option.value.setdefault('sslKeyfile', ssl_keyfile) option.value['sslCertificatePolicy'] = _SSL_POLICY_MAP.get( ssl_cert_reqs) ssl = add_option( config_key="ssl", default={}, type=dict, apply_function=apply_ssl) ssl.add_cli( '--ssl-certfile', dest='ssl_certfile', help=('Path to a certificate identifying the local connection ' 'to MongoDB.') ) ssl.add_cli( '--ssl-keyfile', dest='ssl_keyfile', help=('Path to the private key for --ssl-certfile. ' 'Not necessary if already included in --ssl-certfile.') ) ssl.add_cli( '--ssl-certificate-policy', dest='ssl_cert_reqs', choices=('required', 'optional', 'ignored'), help=('Policy for validating SSL certificates provided from the other ' 'end of the connection. There are three possible values: ' 'required = Require and validate the remote certificate. ' 'optional = Validate the remote certificate only if one ' 'is provided. ' 'ignored = Remote SSL certificates are ignored completely.') ) ssl.add_cli( '--ssl-ca-certs', dest='ssl_ca_certs', help=('Path to a concatenated set of certificate authority ' 'certificates to validate the other side of the connection. ') ) # --continue-on-error to continue to upsert documents during a collection # dump, even if the documents cannot be inserted for some reason continue_on_error.add_cli( "--continue-on-error", action="store_true", dest="continue_on_error", help= "By default, if any document fails to upsert" " during a collection dump, the entire operation fails." " When this flag is enabled, normally fatal errors" " will be caught and logged, allowing the collection" " dump to continue.\n" "Note: Applying oplog operations to an incomplete" " set of documents due to errors may cause undefined" " behavior. Use this flag to dump only.") config_file = add_option() config_file.add_cli( "-c", "--config-file", dest="config_file", help= "Specify a JSON file to load configurations from. You can find" " an example config file at mongo-connector/config.json") tz_aware = add_option( config_key="timezoneAware", default=False, type=bool) tz_aware.add_cli( "--tz-aware", dest="tz_aware", action="store_true", help="Make all dates and times timezone-aware.") return result def setup_logging(conf): root_logger = logging.getLogger() formatter = logging.Formatter( "%(asctime)s [%(levelname)s] %(name)s:%(lineno)d - %(message)s") log_levels = [ logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG ] loglevel = log_levels[conf['verbosity']] root_logger.setLevel(loglevel) if conf['logging.type'] == 'file': log_out = logging.handlers.TimedRotatingFileHandler( conf['logging.filename'], when=conf['logging.rotationWhen'], interval=conf['logging.rotationInterval'], backupCount=conf['logging.rotationBackups'] ) print("Logging to %s." % conf['logging.filename']) elif conf['logging.type'] == 'syslog': syslog_info = conf['logging.host'] if ':' in syslog_info: log_host, log_port = syslog_info.split(':') syslog_info = (log_host, int(log_port)) log_out = logging.handlers.SysLogHandler( address=syslog_info, facility=conf['logging.facility'] ) print("Logging to system log at %s" % conf['logging.host']) elif conf['logging.type'] == 'stream': log_out = logging.StreamHandler() else: print("Logging type must be one of 'stream', 'syslog', or 'file', not " "'%s'." % conf['logging.type']) sys.exit(1) log_out.setLevel(loglevel) log_out.setFormatter(formatter) root_logger.addHandler(log_out) return root_logger @log_fatal_exceptions def main(): """ Starts the mongo connector (assuming CLI) """ conf = config.Config(get_config_options()) conf.parse_args() setup_logging(conf) LOG.info('Beginning Mongo Connector') connector = Connector.from_config(conf) connector.start() while True: try: time.sleep(3) if not connector.is_alive(): break except KeyboardInterrupt: LOG.info("Caught keyboard interrupt, exiting!") connector.join() break if __name__ == '__main__': main()
apache-2.0
-1,058,628,492,018,720,400
39.110577
91
0.571425
false
maohongyuan/kbengine
kbe/res/scripts/common/Lib/asyncio/queues.py
63
9019
"""Queues""" __all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'JoinableQueue', 'QueueFull', 'QueueEmpty'] import collections import heapq from . import events from . import futures from . import locks from .tasks import coroutine class QueueEmpty(Exception): 'Exception raised by Queue.get(block=0)/get_nowait().' pass class QueueFull(Exception): 'Exception raised by Queue.put(block=0)/put_nowait().' pass class Queue: """A queue, useful for coordinating producer and consumer coroutines. If maxsize is less than or equal to zero, the queue size is infinite. If it is an integer greater than 0, then "yield from put()" will block when the queue reaches maxsize, until an item is removed by get(). Unlike the standard library Queue, you can reliably know this Queue's size with qsize(), since your single-threaded asyncio application won't be interrupted between calling qsize() and doing an operation on the Queue. """ def __init__(self, maxsize=0, *, loop=None): if loop is None: self._loop = events.get_event_loop() else: self._loop = loop self._maxsize = maxsize # Futures. self._getters = collections.deque() # Pairs of (item, Future). self._putters = collections.deque() self._init(maxsize) def _init(self, maxsize): self._queue = collections.deque() def _get(self): return self._queue.popleft() def _put(self, item): self._queue.append(item) def __repr__(self): return '<{} at {:#x} {}>'.format( type(self).__name__, id(self), self._format()) def __str__(self): return '<{} {}>'.format(type(self).__name__, self._format()) def _format(self): result = 'maxsize={!r}'.format(self._maxsize) if getattr(self, '_queue', None): result += ' _queue={!r}'.format(list(self._queue)) if self._getters: result += ' _getters[{}]'.format(len(self._getters)) if self._putters: result += ' _putters[{}]'.format(len(self._putters)) return result def _consume_done_getters(self): # Delete waiters at the head of the get() queue who've timed out. while self._getters and self._getters[0].done(): self._getters.popleft() def _consume_done_putters(self): # Delete waiters at the head of the put() queue who've timed out. while self._putters and self._putters[0][1].done(): self._putters.popleft() def qsize(self): """Number of items in the queue.""" return len(self._queue) @property def maxsize(self): """Number of items allowed in the queue.""" return self._maxsize def empty(self): """Return True if the queue is empty, False otherwise.""" return not self._queue def full(self): """Return True if there are maxsize items in the queue. Note: if the Queue was initialized with maxsize=0 (the default), then full() is never True. """ if self._maxsize <= 0: return False else: return self.qsize() >= self._maxsize @coroutine def put(self, item): """Put an item into the queue. If you yield from put(), wait until a free slot is available before adding item. """ self._consume_done_getters() if self._getters: assert not self._queue, ( 'queue non-empty, why are getters waiting?') getter = self._getters.popleft() # Use _put and _get instead of passing item straight to getter, in # case a subclass has logic that must run (e.g. JoinableQueue). self._put(item) getter.set_result(self._get()) elif self._maxsize > 0 and self._maxsize <= self.qsize(): waiter = futures.Future(loop=self._loop) self._putters.append((item, waiter)) yield from waiter else: self._put(item) def put_nowait(self, item): """Put an item into the queue without blocking. If no free slot is immediately available, raise QueueFull. """ self._consume_done_getters() if self._getters: assert not self._queue, ( 'queue non-empty, why are getters waiting?') getter = self._getters.popleft() # Use _put and _get instead of passing item straight to getter, in # case a subclass has logic that must run (e.g. JoinableQueue). self._put(item) getter.set_result(self._get()) elif self._maxsize > 0 and self._maxsize <= self.qsize(): raise QueueFull else: self._put(item) @coroutine def get(self): """Remove and return an item from the queue. If you yield from get(), wait until a item is available. """ self._consume_done_putters() if self._putters: assert self.full(), 'queue not full, why are putters waiting?' item, putter = self._putters.popleft() self._put(item) # When a getter runs and frees up a slot so this putter can # run, we need to defer the put for a tick to ensure that # getters and putters alternate perfectly. See # ChannelTest.test_wait. self._loop.call_soon(putter._set_result_unless_cancelled, None) return self._get() elif self.qsize(): return self._get() else: waiter = futures.Future(loop=self._loop) self._getters.append(waiter) return (yield from waiter) def get_nowait(self): """Remove and return an item from the queue. Return an item if one is immediately available, else raise QueueEmpty. """ self._consume_done_putters() if self._putters: assert self.full(), 'queue not full, why are putters waiting?' item, putter = self._putters.popleft() self._put(item) # Wake putter on next tick. putter.set_result(None) return self._get() elif self.qsize(): return self._get() else: raise QueueEmpty class PriorityQueue(Queue): """A subclass of Queue; retrieves entries in priority order (lowest first). Entries are typically tuples of the form: (priority number, data). """ def _init(self, maxsize): self._queue = [] def _put(self, item, heappush=heapq.heappush): heappush(self._queue, item) def _get(self, heappop=heapq.heappop): return heappop(self._queue) class LifoQueue(Queue): """A subclass of Queue that retrieves most recently added entries first.""" def _init(self, maxsize): self._queue = [] def _put(self, item): self._queue.append(item) def _get(self): return self._queue.pop() class JoinableQueue(Queue): """A subclass of Queue with task_done() and join() methods.""" def __init__(self, maxsize=0, *, loop=None): super().__init__(maxsize=maxsize, loop=loop) self._unfinished_tasks = 0 self._finished = locks.Event(loop=self._loop) self._finished.set() def _format(self): result = Queue._format(self) if self._unfinished_tasks: result += ' tasks={}'.format(self._unfinished_tasks) return result def _put(self, item): super()._put(item) self._unfinished_tasks += 1 self._finished.clear() def task_done(self): """Indicate that a formerly enqueued task is complete. Used by queue consumers. For each get() used to fetch a task, a subsequent call to task_done() tells the queue that the processing on the task is complete. If a join() is currently blocking, it will resume when all items have been processed (meaning that a task_done() call was received for every item that had been put() into the queue). Raises ValueError if called more times than there were items placed in the queue. """ if self._unfinished_tasks <= 0: raise ValueError('task_done() called too many times') self._unfinished_tasks -= 1 if self._unfinished_tasks == 0: self._finished.set() @coroutine def join(self): """Block until all items in the queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls task_done() to indicate that the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks. """ if self._unfinished_tasks > 0: yield from self._finished.wait()
lgpl-3.0
6,359,050,319,699,617,000
30.315972
79
0.587427
false
priyankadeswal/network-address-translator
src/fd-net-device/bindings/modulegen_customizations.py
128
1118
import os def post_register_types(root_module): enabled_features = os.environ['NS3_ENABLED_FEATURES'].split(',') if 'EmuFdNetDevice' not in enabled_features: if 'ns3::EmuFdNetDeviceHelper'in root_module: root_module.classes.remove(root_module['ns3::EmuFdNetDeviceHelper']) if 'TapFdNetDevice' not in enabled_features: if 'ns3::TapFdNetDeviceHelper'in root_module: root_module.classes.remove(root_module['ns3::TapFdNetDeviceHelper']) if 'PlanetLabFdNetDevice' not in enabled_features: if 'ns3::PlanetLabFdNetDeviceHelper'in root_module: root_module.classes.remove(root_module['ns3::PlanetLabFdNetDeviceHelper']) if 'FdNetDevice' not in enabled_features: for clsname in ['FdNetDevice', 'FdNetDeviceHelper', 'FdNetDeviceFdReader']: if 'ns3::%s' % clsname in root_module: root_module.classes.remove(root_module['ns3::%s' % clsname]) if 'ns3::FdNetDeviceHelper::EncapsulationMode' in root_module: root_module.enums.remove(root_module['ns3::FdNetDeviceHelper::EncapsulationMode'])
gpl-2.0
3,677,463,507,767,869,400
45.583333
94
0.68873
false
tomchristie/django
tests/forms_tests/widget_tests/test_splithiddendatetimewidget.py
45
2390
from datetime import datetime from django.forms import SplitHiddenDateTimeWidget from django.test import override_settings from django.utils import translation from .base import WidgetTest class SplitHiddenDateTimeWidgetTest(WidgetTest): widget = SplitHiddenDateTimeWidget() def test_render_empty(self): self.check_html(self.widget, 'date', '', html=( '<input type="hidden" name="date_0" /><input type="hidden" name="date_1" />' )) def test_render_value(self): d = datetime(2007, 9, 17, 12, 51, 34, 482548) self.check_html(self.widget, 'date', d, html=( '<input type="hidden" name="date_0" value="2007-09-17" />' '<input type="hidden" name="date_1" value="12:51:34" />' )) self.check_html(self.widget, 'date', datetime(2007, 9, 17, 12, 51, 34), html=( '<input type="hidden" name="date_0" value="2007-09-17" />' '<input type="hidden" name="date_1" value="12:51:34" />' )) self.check_html(self.widget, 'date', datetime(2007, 9, 17, 12, 51), html=( '<input type="hidden" name="date_0" value="2007-09-17" />' '<input type="hidden" name="date_1" value="12:51:00" />' )) @override_settings(USE_L10N=True) @translation.override('de-at') def test_l10n(self): d = datetime(2007, 9, 17, 12, 51) self.check_html(self.widget, 'date', d, html=( """ <input type="hidden" name="date_0" value="17.09.2007" /> <input type="hidden" name="date_1" value="12:51:00" /> """ )) def test_constructor_different_attrs(self): html = ( '<input type="hidden" class="foo" value="2006-01-10" name="date_0" />' '<input type="hidden" class="bar" value="07:30:00" name="date_1" />' ) widget = SplitHiddenDateTimeWidget(date_attrs={'class': 'foo'}, time_attrs={'class': 'bar'}) self.check_html(widget, 'date', datetime(2006, 1, 10, 7, 30), html=html) widget = SplitHiddenDateTimeWidget(date_attrs={'class': 'foo'}, attrs={'class': 'bar'}) self.check_html(widget, 'date', datetime(2006, 1, 10, 7, 30), html=html) widget = SplitHiddenDateTimeWidget(time_attrs={'class': 'bar'}, attrs={'class': 'foo'}) self.check_html(widget, 'date', datetime(2006, 1, 10, 7, 30), html=html)
bsd-3-clause
-7,063,624,504,827,261,000
43.259259
100
0.582008
false
mezz64/home-assistant
homeassistant/components/deconz/cover.py
5
3276
"""Support for deCONZ covers.""" from homeassistant.components.cover import ( ATTR_POSITION, DEVICE_CLASS_WINDOW, DOMAIN, SUPPORT_CLOSE, SUPPORT_OPEN, SUPPORT_SET_POSITION, SUPPORT_STOP, CoverEntity, ) from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from .const import COVER_TYPES, DAMPERS, NEW_LIGHT, WINDOW_COVERS from .deconz_device import DeconzDevice from .gateway import get_gateway_from_config_entry async def async_setup_entry(hass, config_entry, async_add_entities): """Set up covers for deCONZ component. Covers are based on the same device class as lights in deCONZ. """ gateway = get_gateway_from_config_entry(hass, config_entry) gateway.entities[DOMAIN] = set() @callback def async_add_cover(lights): """Add cover from deCONZ.""" entities = [] for light in lights: if ( light.type in COVER_TYPES and light.uniqueid not in gateway.entities[DOMAIN] ): entities.append(DeconzCover(light, gateway)) if entities: async_add_entities(entities) gateway.listeners.append( async_dispatcher_connect( hass, gateway.async_signal_new_device(NEW_LIGHT), async_add_cover ) ) async_add_cover(gateway.api.lights.values()) class DeconzCover(DeconzDevice, CoverEntity): """Representation of a deCONZ cover.""" TYPE = DOMAIN def __init__(self, device, gateway): """Set up cover device.""" super().__init__(device, gateway) self._features = SUPPORT_OPEN self._features |= SUPPORT_CLOSE self._features |= SUPPORT_STOP self._features |= SUPPORT_SET_POSITION @property def current_cover_position(self): """Return the current position of the cover.""" return 100 - int(self._device.brightness / 254 * 100) @property def is_closed(self): """Return if the cover is closed.""" return self._device.state @property def device_class(self): """Return the class of the cover.""" if self._device.type in DAMPERS: return "damper" if self._device.type in WINDOW_COVERS: return DEVICE_CLASS_WINDOW @property def supported_features(self): """Flag supported features.""" return self._features async def async_set_cover_position(self, **kwargs): """Move the cover to a specific position.""" position = kwargs[ATTR_POSITION] data = {"on": False} if position < 100: data["on"] = True data["bri"] = 254 - int(position / 100 * 254) await self._device.async_set_state(data) async def async_open_cover(self, **kwargs): """Open cover.""" data = {ATTR_POSITION: 100} await self.async_set_cover_position(**data) async def async_close_cover(self, **kwargs): """Close cover.""" data = {ATTR_POSITION: 0} await self.async_set_cover_position(**data) async def async_stop_cover(self, **kwargs): """Stop cover.""" data = {"bri_inc": 0} await self._device.async_set_state(data)
apache-2.0
-6,904,845,156,815,118,000
27.99115
77
0.614164
false
p0nce/tofbot
plugins/lis.py
2
4512
# Lispy: Scheme Interpreter in Python # (c) Peter Norvig, 2010-16; See http://norvig.com/lispy.html from __future__ import division import math import operator as op # Types Symbol = str # A Lisp Symbol is implemented as a Python str List = list # A Lisp List is implemented as a Python list Number = (int, float) # A Lisp Number is implemented as a Python int or float # Parsing: parse, tokenize, and read_from_tokens def parse(program): "Read a Scheme expression from a string." return read_from_tokens(tokenize(program)) def tokenize(s): "Convert a string into a list of tokens." return s.replace('(', ' ( ').replace(')', ' ) ').split() def read_from_tokens(tokens): "Read an expression from a sequence of tokens." if len(tokens) == 0: raise SyntaxError('unexpected EOF while reading') token = tokens.pop(0) if '(' == token: L = [] while tokens[0] != ')': L.append(read_from_tokens(tokens)) tokens.pop(0) # pop off ')' return L elif ')' == token: raise SyntaxError('unexpected )') else: return atom(token) def atom(token): "Numbers become numbers; every other token is a symbol." try: return int(token) except ValueError: try: return float(token) except ValueError: return Symbol(token) # Environments def standard_env(): "An environment with some Scheme standard procedures." env = Env() env.update(vars(math)) # sin, cos, sqrt, pi, ... env.update({ '+': op.add, '-': op.sub, '*': op.mul, '/': op.truediv, '>': op.gt, '<': op.lt, '>=': op.ge, '<=': op.le, '=': op.eq, 'abs': abs, 'append': op.add, 'apply': apply, 'begin': lambda *x: x[-1], 'car': lambda x: x[0], 'cdr': lambda x: x[1:], 'cons': lambda x, y: [x] + y, 'eq?': op.is_, 'equal?': op.eq, 'length': len, 'list': lambda *x: list(x), 'list?': lambda x: isinstance(x, list), 'map': map, 'max': max, 'min': min, 'not': op.not_, 'null?': lambda x: x == [], 'number?': lambda x: isinstance(x, Number), 'procedure?': callable, 'round': round, 'symbol?': lambda x: isinstance(x, Symbol), }) return env class Env(dict): "An environment: a dict of {'var':val} pairs, with an outer Env." def __init__(self, parms=(), args=(), outer=None): self.update(zip(parms, args)) self.outer = outer def find(self, var): "Find the innermost Env where var appears." return self if (var in self) else self.outer.find(var) global_env = standard_env() # Interaction: A REPL def repl(prompt='lis.py> '): "A prompt-read-eval-print loop." while True: val = eval(parse(raw_input(prompt))) if val is not None: print(lispstr(val)) def lispstr(exp): "Convert a Python object back into a Lisp-readable string." if isinstance(exp, list): return '(' + ' '.join(map(lispstr, exp)) + ')' else: return str(exp) # Procedures class Procedure(object): "A user-defined Scheme procedure." def __init__(self, parms, body, env): self.parms, self.body, self.env = parms, body, env def __call__(self, *args): return eval(self.body, Env(self.parms, args, self.env)) # eval def eval(x, env=global_env): "Evaluate an expression in an environment." if isinstance(x, Symbol): # variable reference return env.find(x)[x] elif not isinstance(x, List): # constant literal return x elif x[0] == 'quote': # (quote exp) (_, exp) = x return exp elif x[0] == 'if': # (if test conseq alt) (_, test, conseq, alt) = x exp = (conseq if eval(test, env) else alt) return eval(exp, env) elif x[0] == 'define': # (define var exp) (_, var, exp) = x env[var] = eval(exp, env) elif x[0] == 'set!': # (set! var exp) (_, var, exp) = x env.find(var)[var] = eval(exp, env) elif x[0] == 'lambda': # (lambda (var...) body) (_, parms, body) = x return Procedure(parms, body, env) else: # (proc arg...) proc = eval(x[0], env) args = [eval(exp, env) for exp in x[1:]] return proc(*args)
bsd-2-clause
-2,001,820,791,099,102,000
27.024845
78
0.537234
false
EDUlib/edx-platform
openedx/core/djangoapps/content/block_structure/tests/helpers.py
4
10731
""" Common utilities for tests in block_structure module """ from contextlib import contextmanager from uuid import uuid4 from unittest.mock import patch from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator from xmodule.modulestore.exceptions import ItemNotFoundError from ..api import get_cache from ..block_structure import BlockStructureBlockData from ..exceptions import BlockStructureNotFound from ..models import BlockStructureModel from ..store import BlockStructureStore from ..transformer import BlockStructureTransformer, FilteringTransformerMixin from ..transformer_registry import TransformerRegistry def is_course_in_block_structure_cache(course_key, store): """ Returns whether the given course is in the Block Structure cache. """ course_usage_key = store.make_course_usage_key(course_key) try: BlockStructureStore(get_cache()).get(course_usage_key) return True except BlockStructureNotFound: return False def is_course_in_block_structure_storage(course_key, store): """ Returns whether the given course is in Block Structure storage. """ course_usage_key = store.make_course_usage_key(course_key) try: BlockStructureModel.get(course_usage_key) return True except BlockStructureNotFound: return False class MockXBlock: """ A mock XBlock to be used in unit tests, thereby decoupling the implementation of the block cache framework from the xBlock implementation. This class provides only the minimum xBlock capabilities needed by the block cache framework. """ def __init__(self, location, field_map=None, children=None, modulestore=None): self.location = location self.field_map = field_map or {} self.children = children or [] self.modulestore = modulestore def __getattr__(self, attr): try: return self.field_map[attr] except KeyError: raise AttributeError # lint-amnesty, pylint: disable=raise-missing-from def get_children(self): """ Returns the children of the mock XBlock. """ return [self.modulestore.get_item(child) for child in self.children] class MockModulestore: """ A mock Modulestore to be used in unit tests, providing only the minimum methods needed by the block cache framework. """ def __init__(self): self.get_items_call_count = 0 self.blocks = None def set_blocks(self, blocks): """ Updates the mock modulestore with a dictionary of blocks. Arguments: blocks ({block key, MockXBlock}) - A map of block_key to its mock xBlock. """ self.blocks = blocks def get_item(self, block_key, depth=None, lazy=False): # pylint: disable=unused-argument """ Returns the mock XBlock (MockXBlock) associated with the given block_key. Raises ItemNotFoundError if the item is not found. """ self.get_items_call_count += 1 item = self.blocks.get(block_key) if not item: raise ItemNotFoundError return item @contextmanager def bulk_operations(self, ignore): # pylint: disable=unused-argument """ A context manager for notifying the store of bulk operations. """ yield class MockCache: """ A mock Cache object, providing only the minimum features needed by the block cache framework. """ def __init__(self): # An in-memory map of cache keys to cache values. self.map = {} self.set_call_count = 0 self.timeout_from_last_call = 0 def set(self, key, val, timeout): """ Associates the given key with the given value in the cache. """ self.set_call_count += 1 self.map[key] = val self.timeout_from_last_call = timeout def get(self, key, default=None): """ Returns the value associated with the given key in the cache; returns default if not found. """ return self.map.get(key, default) def delete(self, key): """ Deletes the given key from the cache. """ del self.map[key] class MockModulestoreFactory: """ A factory for creating MockModulestore objects. """ @classmethod def create(cls, children_map, block_key_factory): """ Creates and returns a MockModulestore from the given children_map. Arguments: children_map ({block_key: [block_key]}) - A dictionary mapping a block key to a list of block keys of the block's corresponding children. """ modulestore = MockModulestore() modulestore.set_blocks({ block_key_factory(block_key): MockXBlock( block_key_factory(block_key), children=[block_key_factory(child) for child in children], modulestore=modulestore, ) for block_key, children in enumerate(children_map) }) return modulestore class MockTransformer(BlockStructureTransformer): """ A mock BlockStructureTransformer class. """ WRITE_VERSION = 1 READ_VERSION = 1 @classmethod def name(cls): # Use the class' name for Mock transformers. return cls.__name__ def transform(self, usage_info, block_structure): pass def __repr__(self): return self.name() class MockFilteringTransformer(FilteringTransformerMixin, BlockStructureTransformer): """ A mock FilteringTransformerMixin class. """ WRITE_VERSION = 1 READ_VERSION = 1 @classmethod def name(cls): # Use the class' name for Mock transformers. return cls.__name__ def transform_block_filters(self, usage_info, block_structure): return [block_structure.create_universal_filter()] def clear_registered_transformers_cache(): """ Test helper to clear out any cached values of registered transformers. """ TransformerRegistry.get_write_version_hash.cache.clear() # lint-amnesty, pylint: disable=no-member @contextmanager def mock_registered_transformers(transformers): """ Context manager for mocking the transformer registry to return the given transformers. """ clear_registered_transformers_cache() with patch( 'openedx.core.djangoapps.content.block_structure.transformer_registry.' 'TransformerRegistry.get_registered_transformers' ) as mock_available_transforms: mock_available_transforms.return_value = {transformer for transformer in transformers} # lint-amnesty, pylint: disable=unnecessary-comprehension yield class ChildrenMapTestMixin: """ A Test Mixin with utility methods for testing with block structures created and manipulated using children_map and parents_map. """ # 0 # / \ # 1 2 # / \ # 3 4 SIMPLE_CHILDREN_MAP = [[1, 2], [3, 4], [], [], []] # 0 # / # 1 # / # 2 # / # 3 LINEAR_CHILDREN_MAP = [[1], [2], [3], []] # 0 # / \ # 1 2 # \ / \ # 3 4 # / \ # 5 6 DAG_CHILDREN_MAP = [[1, 2], [3], [3, 4], [5, 6], [], [], []] def block_key_factory(self, block_id): """ Returns a block key object for the given block_id. Override this method if the block_key should be anything different from the index integer values in the Children Maps. """ return block_id def create_block_structure(self, children_map, block_structure_cls=BlockStructureBlockData): """ Factory method for creating and returning a block structure for the given children_map. """ # create empty block structure block_structure = block_structure_cls(root_block_usage_key=self.block_key_factory(0)) # _add_relation for parent, children in enumerate(children_map): for child in children: block_structure._add_relation(self.block_key_factory(parent), self.block_key_factory(child)) # pylint: disable=protected-access return block_structure def get_parents_map(self, children_map): """ Converts and returns the given children_map to a parents_map. """ parent_map = [[] for _ in children_map] for parent, children in enumerate(children_map): for child in children: parent_map[child].append(parent) return parent_map def assert_block_structure(self, block_structure, children_map, missing_blocks=None): """ Verifies that the relations in the given block structure equate the relations described in the children_map. Use the missing_blocks parameter to pass in any blocks that were removed from the block structure but still have a positional entry in the children_map. """ if not missing_blocks: missing_blocks = [] for block_key, children in enumerate(children_map): # Verify presence assert (self.block_key_factory(block_key) in block_structure) == (block_key not in missing_blocks),\ 'Expected presence in block_structure for block_key {} to match absence in missing_blocks.'\ .format(str(block_key)) # Verify children if block_key not in missing_blocks: assert set(block_structure.get_children(self.block_key_factory(block_key))) ==\ {self.block_key_factory(child) for child in children} # Verify parents parents_map = self.get_parents_map(children_map) for block_key, parents in enumerate(parents_map): if block_key not in missing_blocks: assert set(block_structure.get_parents(self.block_key_factory(block_key))) ==\ {self.block_key_factory(parent) for parent in parents} class UsageKeyFactoryMixin: """ Test Mixin that provides a block_key_factory to create OpaqueKey objects for block_ids rather than simple integers. By default, the children maps in ChildrenMapTestMixin use integers for block_ids. """ def setUp(self): super().setUp() self.course_key = CourseLocator('org', 'course', str(uuid4())) def block_key_factory(self, block_id): """ Returns a block key object for the given block_id. """ return BlockUsageLocator(course_key=self.course_key, block_type='course', block_id=str(block_id))
agpl-3.0
5,292,480,777,940,191,000
30.84273
153
0.630789
false
rdelval/gorealis
vendor/git.apache.org/thrift.git/test/py/RunClientServer.py
14
12502
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from __future__ import division from __future__ import print_function import platform import copy import os import signal import socket import subprocess import sys import time from optparse import OptionParser from util import local_libpath SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) SCRIPTS = [ 'FastbinaryTest.py', 'TestFrozen.py', 'TSimpleJSONProtocolTest.py', 'SerializationTest.py', 'TestEof.py', 'TestSyntax.py', 'TestSocket.py', ] FRAMED = ["TNonblockingServer"] SKIP_ZLIB = ['TNonblockingServer', 'THttpServer'] SKIP_SSL = ['TNonblockingServer', 'THttpServer'] EXTRA_DELAY = dict(TProcessPoolServer=5.5) PROTOS = [ 'accel', 'accelc', 'binary', 'compact', 'json', ] def default_servers(): servers = [ 'TSimpleServer', 'TThreadedServer', 'TThreadPoolServer', 'TNonblockingServer', 'THttpServer', ] if platform.system() != 'Windows': servers.append('TProcessPoolServer') servers.append('TForkingServer') return servers def relfile(fname): return os.path.join(SCRIPT_DIR, fname) def setup_pypath(libdir, gendir): dirs = [libdir, gendir] env = copy.deepcopy(os.environ) pypath = env.get('PYTHONPATH', None) if pypath: dirs.append(pypath) env['PYTHONPATH'] = os.pathsep.join(dirs) if gendir.endswith('gen-py-no_utf8strings'): env['THRIFT_TEST_PY_NO_UTF8STRINGS'] = '1' return env def runScriptTest(libdir, genbase, genpydir, script): env = setup_pypath(libdir, os.path.join(genbase, genpydir)) script_args = [sys.executable, relfile(script)] print('\nTesting script: %s\n----' % (' '.join(script_args))) ret = subprocess.call(script_args, env=env) if ret != 0: print('*** FAILED ***', file=sys.stderr) print('LIBDIR: %s' % libdir, file=sys.stderr) print('PY_GEN: %s' % genpydir, file=sys.stderr) print('SCRIPT: %s' % script, file=sys.stderr) raise Exception("Script subprocess failed, retcode=%d, args: %s" % (ret, ' '.join(script_args))) def runServiceTest(libdir, genbase, genpydir, server_class, proto, port, use_zlib, use_ssl, verbose): env = setup_pypath(libdir, os.path.join(genbase, genpydir)) # Build command line arguments server_args = [sys.executable, relfile('TestServer.py')] cli_args = [sys.executable, relfile('TestClient.py')] for which in (server_args, cli_args): which.append('--protocol=%s' % proto) # accel, binary, compact or json which.append('--port=%d' % port) # default to 9090 if use_zlib: which.append('--zlib') if use_ssl: which.append('--ssl') if verbose == 0: which.append('-q') if verbose == 2: which.append('-v') # server-specific option to select server class server_args.append(server_class) # client-specific cmdline options if server_class in FRAMED: cli_args.append('--transport=framed') else: cli_args.append('--transport=buffered') if server_class == 'THttpServer': cli_args.append('--http=/') if verbose > 0: print('Testing server %s: %s' % (server_class, ' '.join(server_args))) serverproc = subprocess.Popen(server_args, env=env) def ensureServerAlive(): if serverproc.poll() is not None: print(('FAIL: Server process (%s) failed with retcode %d') % (' '.join(server_args), serverproc.returncode)) raise Exception('Server subprocess %s died, args: %s' % (server_class, ' '.join(server_args))) # Wait for the server to start accepting connections on the given port. sleep_time = 0.1 # Seconds max_attempts = 100 attempt = 0 while True: sock4 = socket.socket() sock6 = socket.socket(socket.AF_INET6) try: if sock4.connect_ex(('127.0.0.1', port)) == 0 \ or sock6.connect_ex(('::1', port)) == 0: break attempt += 1 if attempt >= max_attempts: raise Exception("TestServer not ready on port %d after %.2f seconds" % (port, sleep_time * attempt)) ensureServerAlive() time.sleep(sleep_time) finally: sock4.close() sock6.close() try: if verbose > 0: print('Testing client: %s' % (' '.join(cli_args))) ret = subprocess.call(cli_args, env=env) if ret != 0: print('*** FAILED ***', file=sys.stderr) print('LIBDIR: %s' % libdir, file=sys.stderr) print('PY_GEN: %s' % genpydir, file=sys.stderr) raise Exception("Client subprocess failed, retcode=%d, args: %s" % (ret, ' '.join(cli_args))) finally: # check that server didn't die ensureServerAlive() extra_sleep = EXTRA_DELAY.get(server_class, 0) if extra_sleep > 0 and verbose > 0: print('Giving %s (proto=%s,zlib=%s,ssl=%s) an extra %d seconds for child' 'processes to terminate via alarm' % (server_class, proto, use_zlib, use_ssl, extra_sleep)) time.sleep(extra_sleep) sig = signal.SIGKILL if platform.system() != 'Windows' else signal.SIGABRT os.kill(serverproc.pid, sig) serverproc.wait() class TestCases(object): def __init__(self, genbase, libdir, port, gendirs, servers, verbose): self.genbase = genbase self.libdir = libdir self.port = port self.verbose = verbose self.gendirs = gendirs self.servers = servers def default_conf(self): return { 'gendir': self.gendirs[0], 'server': self.servers[0], 'proto': PROTOS[0], 'zlib': False, 'ssl': False, } def run(self, conf, test_count): with_zlib = conf['zlib'] with_ssl = conf['ssl'] try_server = conf['server'] try_proto = conf['proto'] genpydir = conf['gendir'] # skip any servers that don't work with the Zlib transport if with_zlib and try_server in SKIP_ZLIB: return False # skip any servers that don't work with SSL if with_ssl and try_server in SKIP_SSL: return False if self.verbose > 0: print('\nTest run #%d: (includes %s) Server=%s, Proto=%s, zlib=%s, SSL=%s' % (test_count, genpydir, try_server, try_proto, with_zlib, with_ssl)) runServiceTest(self.libdir, self.genbase, genpydir, try_server, try_proto, self.port, with_zlib, with_ssl, self.verbose) if self.verbose > 0: print('OK: Finished (includes %s) %s / %s proto / zlib=%s / SSL=%s. %d combinations tested.' % (genpydir, try_server, try_proto, with_zlib, with_ssl, test_count)) return True def test_feature(self, name, values): test_count = 0 conf = self.default_conf() for try_server in values: conf[name] = try_server if self.run(conf, test_count): test_count += 1 return test_count def run_all_tests(self): test_count = 0 for try_server in self.servers: for genpydir in self.gendirs: for try_proto in PROTOS: for with_zlib in (False, True): # skip any servers that don't work with the Zlib transport if with_zlib and try_server in SKIP_ZLIB: continue for with_ssl in (False, True): # skip any servers that don't work with SSL if with_ssl and try_server in SKIP_SSL: continue test_count += 1 if self.verbose > 0: print('\nTest run #%d: (includes %s) Server=%s, Proto=%s, zlib=%s, SSL=%s' % (test_count, genpydir, try_server, try_proto, with_zlib, with_ssl)) runServiceTest(self.libdir, self.genbase, genpydir, try_server, try_proto, self.port, with_zlib, with_ssl) if self.verbose > 0: print('OK: Finished (includes %s) %s / %s proto / zlib=%s / SSL=%s. %d combinations tested.' % (genpydir, try_server, try_proto, with_zlib, with_ssl, test_count)) return test_count def main(): parser = OptionParser() parser.add_option('--all', action="store_true", dest='all') parser.add_option('--genpydirs', type='string', dest='genpydirs', default='default,slots,oldstyle,no_utf8strings,dynamic,dynamicslots', help='directory extensions for generated code, used as suffixes for \"gen-py-*\" added sys.path for individual tests') parser.add_option("--port", type="int", dest="port", default=9090, help="port number for server to listen on") parser.add_option('-v', '--verbose', action="store_const", dest="verbose", const=2, help="verbose output") parser.add_option('-q', '--quiet', action="store_const", dest="verbose", const=0, help="minimal output") parser.add_option('-L', '--libdir', dest="libdir", default=local_libpath(), help="directory path that contains Thrift Python library") parser.add_option('--gen-base', dest="gen_base", default=SCRIPT_DIR, help="directory path that contains Thrift Python library") parser.set_defaults(verbose=1) options, args = parser.parse_args() generated_dirs = [] for gp_dir in options.genpydirs.split(','): generated_dirs.append('gen-py-%s' % (gp_dir)) # commandline permits a single class name to be specified to override SERVERS=[...] servers = default_servers() if len(args) == 1: if args[0] in servers: servers = args else: print('Unavailable server type "%s", please choose one of: %s' % (args[0], servers)) sys.exit(0) tests = TestCases(options.gen_base, options.libdir, options.port, generated_dirs, servers, options.verbose) # run tests without a client/server first print('----------------') print(' Executing individual test scripts with various generated code directories') print(' Directories to be tested: ' + ', '.join(generated_dirs)) print(' Scripts to be tested: ' + ', '.join(SCRIPTS)) print('----------------') for genpydir in generated_dirs: for script in SCRIPTS: runScriptTest(options.libdir, options.gen_base, genpydir, script) print('----------------') print(' Executing Client/Server tests with various generated code directories') print(' Servers to be tested: ' + ', '.join(servers)) print(' Directories to be tested: ' + ', '.join(generated_dirs)) print(' Protocols to be tested: ' + ', '.join(PROTOS)) print(' Options to be tested: ZLIB(yes/no), SSL(yes/no)') print('----------------') if options.all: tests.run_all_tests() else: tests.test_feature('gendir', generated_dirs) tests.test_feature('server', servers) tests.test_feature('proto', PROTOS) tests.test_feature('zlib', [False, True]) tests.test_feature('ssl', [False, True]) if __name__ == '__main__': sys.exit(main())
apache-2.0
3,355,525,761,540,314,000
37.94704
140
0.581507
false
ahuarte47/QGIS
python/plugins/processing/preconfigured/NewPreconfiguredAlgorithmAction.py
15
1871
# -*- coding: utf-8 -*- """ *************************************************************************** NewPreconfiguredAlgorithmAction.py --------------------- Date : April 2016 Copyright : (C) 2016 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'April 2016' __copyright__ = '(C) 2016, Victor Olaya' from qgis.PyQt.QtCore import QCoreApplication from processing.core.GeoAlgorithm import GeoAlgorithm from processing.gui.ContextAction import ContextAction from processing.preconfigured.PreconfiguredAlgorithmDialog import PreconfiguredAlgorithmDialog from processing.preconfigured.PreconfiguredAlgorithm import PreconfiguredAlgorithm class NewPreconfiguredAlgorithmAction(ContextAction): def __init__(self): super().__init__() self.name = QCoreApplication.translate('NewPreconfiguredAlgorithmAction', 'Create Preconfigured Algorithm…') def isEnabled(self): return (isinstance(self.itemData, GeoAlgorithm) and not isinstance(self.itemData, PreconfiguredAlgorithm)) def execute(self): alg = self.itemData dlg = PreconfiguredAlgorithmDialog(alg, self.toolbox) dlg.exec_()
gpl-2.0
8,637,583,106,124,704,000
40.533333
116
0.538256
false
Ffreasy/crazyflie-clients-python
lib/cflib/crazyflie/toccache.py
31
4215
#!/usr/bin/env python # -*- coding: utf-8 -*- # # || ____ _ __ # +------+ / __ )(_) /_______________ _____ ___ # | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \ # +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/ # || || /_____/_/\__/\___/_/ \__,_/ /___/\___/ # # Copyright (C) 2013 Bitcraze AB # # Crazyflie Nano Quadcopter Client # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. """ Access the TOC cache for reading/writing. It supports both user cache and dist cache. """ __author__ = 'Bitcraze AB' __all__ = ['TocCache'] import os import json from glob import glob import logging logger = logging.getLogger(__name__) from .log import LogTocElement # pylint: disable=W0611 from .param import ParamTocElement # pylint: disable=W0611 class TocCache(): """ Access to TOC cache. To turn of the cache functionality don't supply any directories. """ def __init__(self, ro_cache=None, rw_cache=None): self._cache_files = [] if (ro_cache): self._cache_files += glob(ro_cache + "/*.json") if (rw_cache): self._cache_files += glob(rw_cache + "/*.json") if not os.path.exists(rw_cache): os.makedirs(rw_cache) self._rw_cache = rw_cache def fetch(self, crc): """ Try to get a hit in the cache, return None otherwise """ cache_data = None pattern = "%08X.json" % crc hit = None for name in self._cache_files: if (name.endswith(pattern)): hit = name if (hit): try: cache = open(hit) cache_data = json.load(cache, object_hook=self._decoder) cache.close() except Exception as exp: logger.warning("Error while parsing cache file [%s]:%s", hit, str(exp)) return cache_data def insert(self, crc, toc): """ Save a new cache to file """ if self._rw_cache: try: filename = "%s/%08X.json" % (self._rw_cache, crc) cache = open(filename, 'w') cache.write(json.dumps(toc, indent=2, default=self._encoder)) cache.close() logger.info("Saved cache to [%s]", filename) self._cache_files += [filename] except Exception as exp: logger.warning("Could not save cache to file [%s]: %s", filename, str(exp)) else: logger.warning("Could not save cache, no writable directory") def _encoder(self, obj): """ Encode a toc element leaf-node """ return {'__class__': obj.__class__.__name__, 'ident': obj.ident, 'group': obj.group, 'name': obj.name, 'ctype': obj.ctype, 'pytype': obj.pytype, 'access': obj.access} raise TypeError(repr(obj) + ' is not JSON serializable') def _decoder(self, obj): """ Decode a toc element leaf-node """ if '__class__' in obj: elem = eval(obj['__class__'])() elem.ident = obj['ident'] elem.group = str(obj['group']) elem.name = str(obj['name']) elem.ctype = str(obj['ctype']) elem.pytype = str(obj['pytype']) elem.access = obj['access'] return elem return obj
gpl-2.0
-5,876,656,982,296,195,000
32.452381
73
0.517912
false
eecsu/BET
bet/postProcess/postTools.py
2
12305
# Copyright (C) 2014-2016 The BET Development Team """ This module provides methods for postprocessing probabilities and data. """ import logging import numpy as np import bet.sample as sample class dim_not_matching(Exception): """ Exception for when the dimension is inconsistent. """ class bad_object(Exception): """ Exception for when the wrong type of object is used. """ def sort_by_rho(sample_set): """ This sorts the samples within the sample_set by probability density. If a discretization object is given, then the QoI data is also sorted to maintain the correspondence. Any volumes present in the input space (or just the sample object) are also sorted. :param sample_set: Object containing samples and probabilities :type sample_set: :class:`~bet.sample.sample_set_base` or :class:`~bet.sample.discretization` :param indices: sorting indices :type indices: :class:`numpy.ndarray` of shape (num_samples,) :param sample_set_out: Object containing sorted samples and probabilities :type sample_set_out: :class:`~bet.sample.sample_set` or :class:`~bet.sample.discretization` :rtype: tuple :returns: (sample_set_out, indicices) """ if isinstance(sample_set, sample.discretization): samples = sample_set._input_sample_set.get_values() P_samples = sample_set._input_sample_set.get_probabilities() lam_vol = sample_set._input_sample_set.get_volumes() data = sample_set._output_sample_set.get_values() elif isinstance(sample_set, sample.sample_set_base): samples = sample_set.get_values() P_samples = sample_set.get_probabilities() lam_vol = sample_set.get_volumes() data = None else: raise bad_object("Improper sample object") nnz = np.sum(P_samples > 0) if lam_vol is None: indices = np.argsort(P_samples)[::-1][0:nnz] else: indices = np.argsort(P_samples/lam_vol)[::-1][0:nnz] P_samples = P_samples[indices] samples = samples[indices, :] if lam_vol is not None: lam_vol = lam_vol[indices] if data is not None: data = data[indices, :] if isinstance(sample_set, sample.discretization): samples_out = sample.sample_set(sample_set._input_sample_set.get_dim()) data_out = sample.sample_set(sample_set._output_sample_set.get_dim()) sample_set_out = sample.discretization(samples_out, data_out) sample_set_out._input_sample_set.set_values(samples) sample_set_out._input_sample_set.set_probabilities(P_samples) sample_set_out._input_sample_set.set_volumes(lam_vol) sample_set_out._output_sample_set.set_values(data) else: sample_set_out = sample.sample_set(sample_set.get_dim()) sample_set_out.set_values(samples) sample_set_out.set_probabilities(P_samples) sample_set_out.set_volumes(lam_vol) return (sample_set_out, indices) def sample_prob(percentile, sample_set, sort=True, descending=False): """ This calculates the highest/lowest probability samples whose probability sum to a given value. A new sample_set with the samples corresponding to these highest/lowest probability samples is returned along with the number of samples and the indices. This uses :meth:`~bet.postProcess.sort_by_rho`. The ``descending`` flag determines whether or not to calcuate the highest/lowest. :param percentile: ratio of highest probability samples to select :type percentile: float :param sample_set: Object containing samples and probabilities :type sample_set: :class:`~bet.sample.sample_set_base` or :class:`~bet.sample.discretization` :type indices: :class:`numpy.ndarray` of shape (num_samples,) :param indices: sorting indices :param bool sort: Flag whether or not to sort :param bool descending: Flag order of sorting :param sample_set_out: Object containing sorted samples and probabilities :type sample_set_out: :class:`~bet.sample.sample_set` or :class:`~bet.sample.discretization` :rtype: tuple :returns: ( num_samples, sample_set_out, data) """ if isinstance(sample_set, sample.discretization): samples = sample_set._input_sample_set.get_values() P_samples = sample_set._input_sample_set.get_probabilities() lam_vol = sample_set._input_sample_set.get_volumes() data = sample_set._output_sample_set.get_values() elif isinstance(sample_set, sample.sample_set_base): samples = sample_set.get_values() P_samples = sample_set.get_probabilities() lam_vol = sample_set.get_volumes() data = None else: raise bad_object("Improper sample object") if sort: (sample_set, indices) = sort_by_rho(sample_set) if isinstance(sample_set, sample.discretization): samples = sample_set._input_sample_set.get_values() P_samples = sample_set._input_sample_set.get_probabilities() lam_vol = sample_set._input_sample_set.get_volumes() data = sample_set._output_sample_set.get_values() elif isinstance(sample_set, sample.sample_set_base): samples = sample_set.get_values() P_samples = sample_set.get_probabilities() lam_vol = sample_set.get_volumes() data = None if descending: P_samples = P_samples[::-1] samples = samples[::-1] if lam_vol is not None: lam_vol = lam_vol[::-1] if data is not None: data = data[::-1] indices = indices[::-1] P_sum = np.cumsum(P_samples) num_samples = np.sum(np.logical_and(0.0 < P_sum, P_sum <= percentile)) P_samples = P_samples[0:num_samples] samples = samples[0:num_samples, :] if lam_vol is not None: lam_vol = lam_vol[0:num_samples] if data is not None: if len(data.shape) == 1: data = np.expand_dims(data, axis=1) data = data[0:num_samples, :] if isinstance(sample_set, sample.discretization): samples_out = sample.sample_set(sample_set._input_sample_set.get_dim()) data_out = sample.sample_set(sample_set._output_sample_set.get_dim()) sample_set_out = sample.discretization(samples_out, data_out) sample_set_out._input_sample_set.set_values(samples) sample_set_out._input_sample_set.set_probabilities(P_samples) sample_set_out._input_sample_set.set_volumes(lam_vol) sample_set_out._output_sample_set.set_values(data) else: sample_set_out = sample.sample_set(sample_set.get_dim()) sample_set_out.set_values(samples) sample_set_out.set_probabilities(P_samples) sample_set_out.set_volumes(lam_vol) return (num_samples, sample_set_out, indices[0:num_samples]) def sample_highest_prob(top_percentile, sample_set, sort=True): """ This calculates the highest probability samples whose probability sum to a given value. The number of high probability samples that sum to the value, a new sample_set, and the indices are returned. This uses :meth:`~bet.postProcess.sort_by_rho`. :param top_percentile: ratio of highest probability samples to select :type top_percentile: float :param sample_set: Object containing samples and probabilities :type sample_set: :class:`~bet.sample.sample_set_base` or :class:`~bet.sample.discretization` :type indices: :class:`numpy.ndarray` of shape (num_samples,) :param indices: sorting indices :param bool sort: Flag whether or not to sort :param sample_set_out: Object containing sorted samples and probabilities :type sample_set_out: :class:`~bet.sample.sample_set` or :class:`~bet.sample.discretization` :rtype: tuple :returns: ( num_samples, sample_set_out, indices) """ return sample_prob(top_percentile, sample_set, sort) def sample_lowest_prob(bottom_percentile, sample_set, sort=True): """ This calculates the lowest probability samples whose probability sum to a given value. The number of low probability samples that sum to the value, a new sample_set, and the indices are returned. This uses :meth:`~bet.postProcess.sort_by_rho`. :param top_percentile: ratio of highest probability samples to select :type top_percentile: float :param sample_set: Object containing samples and probabilities :type sample_set: :class:`~bet.sample.sample_set_base` or :class:`~bet.sample.discretization` :type indices: :class:`numpy.ndarray` of shape (num_samples,) :param indices: sorting indices of unsorted ``P_samples`` :param bool sort: Flag whether or not to sort :param sample_set_out: Object containing sorted samples and probabilities :type sample_set_out: :class:`~bet.sample.sample_set` or :class:`~bet.sample.discretization` :rtype: tuple :returns: ( num_samples, sample_set_out, indices) """ return sample_prob(bottom_percentile, sample_set, sort, descending=True) def compare_yield(sort_ind, sample_quality, run_param, column_headings=None): """ .. todo:: Revisit to deprecate later. Compare the quality of samples where ``sample_quality`` is the measure of quality by which the sets of samples have been indexed and ``sort_ind`` is an array of the sorted indices. :param list sort_ind: indices that index ``sample_quality`` in sorted order :param list sample_quality: a measure of quality by which the sets of samples are sorted :param list run_param: zipped list of :class:`~numpy.ndarray` containing information used to generate the sets of samples to be displayed :param list column_headings: Column headings to print to screen """ raise PendingDeprecationWarning if column_headings is None: column_headings = "Run parameters" logging.info("Sample Set No., Quality, "+ str(column_headings)) for i in reversed(sort_ind): logging.info(i, sample_quality[i], np.round(run_param[i], 3)) def in_high_prob(data, rho_D, maximum, sample_nos=None): """ .. todo:: Revisit to deprecate later. Estimates the number of samples in high probability regions of D. :param data: Data associated with ``samples`` :type data: :class:`np.ndarray` :param rho_D: probability density on D :type rho_D: callable function that takes a :class:`np.array` and returns a :class:`np.ndarray` :param float maximum: maximum (or average) value of ``rho_D`` :param list sample_nos: sample numbers to plot :rtype: int :returns: Estimate of number of samples in the high probability area. """ raise PendingDeprecationWarning if sample_nos is None: sample_nos = range(data.shape[0]) if len(data.shape) == 1: rD = rho_D(data[sample_nos]) else: rD = rho_D(data[sample_nos, :]) adjusted_total_prob = int(sum(rD)/maximum) logging.info("Samples in box "+str(adjusted_total_prob)) return adjusted_total_prob def in_high_prob_multi(results_list, rho_D, maximum, sample_nos_list=None): """ .. todo:: Revisit to deprecate later. Estimates the number of samples in high probability regions of D for a list of results. :param list results_list: list of (results, data) tuples :param rho_D: probability density on D :type rho_D: callable function that takes a :class:`np.array` and returns a :class:`np.ndarray` :param float maximum: maximum (or average) value of ``rho_D`` :param list sample_nos_list: list of sample numbers to plot (list of lists) :rtype: list of int :returns: Estimate of number of samples in the high probability area. """ raise PendingDeprecationWarning adjusted_total_prob = list() if sample_nos_list: for result, sample_nos in zip(results_list, sample_nos_list): adjusted_total_prob.append(in_high_prob(result[1], rho_D, maximum, sample_nos)) else: for result in results_list: adjusted_total_prob.append(in_high_prob(result[1], rho_D, maximum)) return adjusted_total_prob
gpl-3.0
-573,817,645,803,026,240
38.187898
79
0.666802
false
stackforge/monasca-api
monasca_api/common/policy/policy_engine.py
1
8717
# Copyright 2017 OP5 AB # Copyright 2017 FUJITSU LIMITED # Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import re import sys import logging from oslo_config import cfg from oslo_policy import policy from monasca_api.common.policy.i18n import _LW CONF = cfg.CONF LOG = logging.getLogger(__name__) POLICIES = None USER_BASED_RESOURCES = ['os-keypairs'] KEY_EXPR = re.compile(r'%\((\w+)\)s') _ENFORCER = None # oslo_policy will read the policy configuration file again when the file # is changed in runtime so the old policy rules will be saved to # saved_file_rules and used to compare with new rules to determine # whether the rules were updated. saved_file_rules = [] def reset(): """Reset Enforcer class.""" global _ENFORCER if _ENFORCER: _ENFORCER.clear() _ENFORCER = None def init(policy_file=None, rules=None, default_rule=None, use_conf=True): """Init an Enforcer class. :param policy_file: Custom policy file to use, if none is specified, `CONF.policy_file` will be used. :param rules: Default dictionary / Rules to use. It will be considered just in the first instantiation. :param default_rule: Default rule to use, CONF.default_rule will be used if none is specified. :param use_conf: Whether to load rules from config file. """ global _ENFORCER global saved_file_rules if not _ENFORCER: _ENFORCER = policy.Enforcer(CONF, policy_file=policy_file, rules=rules, default_rule=default_rule, use_conf=use_conf ) register_rules(_ENFORCER) _ENFORCER.load_rules() # Only the rules which are loaded from file may be changed current_file_rules = _ENFORCER.file_rules current_file_rules = _serialize_rules(current_file_rules) if saved_file_rules != current_file_rules: _warning_for_deprecated_user_based_rules(current_file_rules) saved_file_rules = copy.deepcopy(current_file_rules) def _serialize_rules(rules): """Serialize all the Rule object as string. New string is used to compare the rules list. """ result = [(rule_name, str(rule)) for rule_name, rule in rules.items()] return sorted(result, key=lambda rule: rule[0]) def _warning_for_deprecated_user_based_rules(rules): """Warning user based policy enforcement used in the rule but the rule doesn't support it. """ for rule in rules: # We will skip the warning for the resources which support user based # policy enforcement. if [resource for resource in USER_BASED_RESOURCES if resource in rule[0]]: continue if 'user_id' in KEY_EXPR.findall(rule[1]): LOG.warning(_LW("The user_id attribute isn't supported in the " "rule '%s'. All the user_id based policy " "enforcement will be removed in the " "future."), rule[0]) def register_rules(enforcer): """Register default policy rules.""" rules = POLICIES.list_rules() enforcer.register_defaults(rules) def authorize(context, action, target, do_raise=True): """Verify that the action is valid on the target in this context. :param context: monasca project context :param action: String representing the action to be checked. This should be colon separated for clarity. :param target: Dictionary representing the object of the action for object creation. This should be a dictionary representing the location of the object e.g. ``{'project_id': 'context.project_id'}`` :param do_raise: if True (the default), raises PolicyNotAuthorized, if False returns False :type context: object :type action: str :type target: dict :type do_raise: bool :return: returns a non-False value (not necessarily True) if authorized, and the False if not authorized and do_raise if False :raises oslo_policy.policy.PolicyNotAuthorized: if verification fails """ init() credentials = context.to_policy_values() try: result = _ENFORCER.authorize(action, target, credentials, do_raise=do_raise, action=action) return result except policy.PolicyNotRegistered: LOG.exception('Policy not registered') raise except Exception: LOG.debug('Policy check for %(action)s failed with credentials ' '%(credentials)s', {'action': action, 'credentials': credentials}) raise def check_is_admin(context): """Check if roles contains 'admin' role according to policy settings.""" init() credentials = context.to_policy_values() target = credentials return _ENFORCER.authorize('admin_required', target, credentials) def set_rules(rules, overwrite=True, use_conf=False): # pragma: no cover """Set rules based on the provided dict of rules. Note: Used in tests only. :param rules: New rules to use. It should be an instance of dict :param overwrite: Whether to overwrite current rules or update them with the new rules. :param use_conf: Whether to reload rules from config file. """ init(use_conf=False) _ENFORCER.set_rules(rules, overwrite, use_conf) def verify_deprecated_policy(old_policy, new_policy, default_rule, context): """Check the rule of the deprecated policy action If the current rule of the deprecated policy action is set to a non-default value, then a warning message is logged stating that the new policy action should be used to dictate permissions as the old policy action is being deprecated. :param old_policy: policy action that is being deprecated :param new_policy: policy action that is replacing old_policy :param default_rule: the old_policy action default rule value :param context: the monasca context """ if _ENFORCER: current_rule = str(_ENFORCER.rules[old_policy]) else: current_rule = None if current_rule != default_rule: LOG.warning("Start using the new action '{0}'. The existing " "action '{1}' is being deprecated and will be " "removed in future release.".format(new_policy, old_policy)) target = {'project_id': context.project_id, 'user_id': context.user_id} return authorize(context=context, action=old_policy, target=target) else: return False def get_rules(): if _ENFORCER: return _ENFORCER.rules def get_enforcer(): # This method is for use by oslopolicy CLI scripts. Those scripts need the # 'output-file' and 'namespace' options, but having those in sys.argv means # loading the project config options will fail as those are not expected to # be present. So we pass in an arg list with those stripped out. conf_args = [] # Start at 1 because cfg.CONF expects the equivalent of sys.argv[1:] i = 1 while i < len(sys.argv): if sys.argv[i].strip('-') in ['namespace', 'output-file']: i += 2 continue conf_args.append(sys.argv[i]) i += 1 cfg.CONF(conf_args, project='monasca') init() return _ENFORCER @policy.register('is_admin') class IsAdminCheck(policy.Check): """An explicit check for is_admin.""" def __init__(self, kind, match): """Initialize the check.""" self.expected = (match.lower() == 'true') super(IsAdminCheck, self).__init__(kind, str(self.expected)) def __call__(self, target, creds, enforcer): """Determine whether is_admin matches the requested value.""" return creds['is_admin'] == self.expected
apache-2.0
4,959,844,794,876,556,000
34.149194
79
0.63336
false
jstoxrocky/statsmodels
statsmodels/regression/tests/test_regression.py
6
37622
""" Test functions for models.regression """ # TODO: Test for LM from statsmodels.compat.python import long, lrange import warnings import pandas import numpy as np from numpy.testing import (assert_almost_equal, assert_approx_equal, assert_raises, assert_equal, assert_allclose) from scipy.linalg import toeplitz from statsmodels.tools.tools import add_constant, categorical from statsmodels.compat.numpy import np_matrix_rank from statsmodels.regression.linear_model import OLS, WLS, GLS, yule_walker from statsmodels.datasets import longley from scipy.stats import t as student_t DECIMAL_4 = 4 DECIMAL_3 = 3 DECIMAL_2 = 2 DECIMAL_1 = 1 DECIMAL_7 = 7 DECIMAL_0 = 0 class CheckRegressionResults(object): """ res2 contains results from Rmodelwrap or were obtained from a statistical packages such as R, Stata, or SAS and were written to model_results """ decimal_params = DECIMAL_4 def test_params(self): assert_almost_equal(self.res1.params, self.res2.params, self.decimal_params) decimal_standarderrors = DECIMAL_4 def test_standarderrors(self): assert_almost_equal(self.res1.bse,self.res2.bse, self.decimal_standarderrors) decimal_confidenceintervals = DECIMAL_4 def test_confidenceintervals(self): #NOTE: stata rounds residuals (at least) to sig digits so approx_equal conf1 = self.res1.conf_int() conf2 = self.res2.conf_int() for i in range(len(conf1)): assert_approx_equal(conf1[i][0], conf2[i][0], self.decimal_confidenceintervals) assert_approx_equal(conf1[i][1], conf2[i][1], self.decimal_confidenceintervals) decimal_conf_int_subset = DECIMAL_4 def test_conf_int_subset(self): if len(self.res1.params) > 1: ci1 = self.res1.conf_int(cols=(1,2)) ci2 = self.res1.conf_int()[1:3] assert_almost_equal(ci1, ci2, self.decimal_conf_int_subset) else: pass decimal_scale = DECIMAL_4 def test_scale(self): assert_almost_equal(self.res1.scale, self.res2.scale, self.decimal_scale) decimal_rsquared = DECIMAL_4 def test_rsquared(self): assert_almost_equal(self.res1.rsquared, self.res2.rsquared, self.decimal_rsquared) decimal_rsquared_adj = DECIMAL_4 def test_rsquared_adj(self): assert_almost_equal(self.res1.rsquared_adj, self.res2.rsquared_adj, self.decimal_rsquared_adj) def test_degrees(self): assert_equal(self.res1.model.df_model, self.res2.df_model) assert_equal(self.res1.model.df_resid, self.res2.df_resid) decimal_ess = DECIMAL_4 def test_ess(self): #Explained Sum of Squares assert_almost_equal(self.res1.ess, self.res2.ess, self.decimal_ess) decimal_ssr = DECIMAL_4 def test_sumof_squaredresids(self): assert_almost_equal(self.res1.ssr, self.res2.ssr, self.decimal_ssr) decimal_mse_resid = DECIMAL_4 def test_mse_resid(self): #Mean squared error of residuals assert_almost_equal(self.res1.mse_model, self.res2.mse_model, self.decimal_mse_resid) decimal_mse_model = DECIMAL_4 def test_mse_model(self): assert_almost_equal(self.res1.mse_resid, self.res2.mse_resid, self.decimal_mse_model) decimal_mse_total = DECIMAL_4 def test_mse_total(self): assert_almost_equal(self.res1.mse_total, self.res2.mse_total, self.decimal_mse_total, err_msg="Test class %s" % self) decimal_fvalue = DECIMAL_4 def test_fvalue(self): #didn't change this, not sure it should complain -inf not equal -inf #if not (np.isinf(self.res1.fvalue) and np.isinf(self.res2.fvalue)): assert_almost_equal(self.res1.fvalue, self.res2.fvalue, self.decimal_fvalue) decimal_loglike = DECIMAL_4 def test_loglike(self): assert_almost_equal(self.res1.llf, self.res2.llf, self.decimal_loglike) decimal_aic = DECIMAL_4 def test_aic(self): assert_almost_equal(self.res1.aic, self.res2.aic, self.decimal_aic) decimal_bic = DECIMAL_4 def test_bic(self): assert_almost_equal(self.res1.bic, self.res2.bic, self.decimal_bic) decimal_pvalues = DECIMAL_4 def test_pvalues(self): assert_almost_equal(self.res1.pvalues, self.res2.pvalues, self.decimal_pvalues) decimal_wresid = DECIMAL_4 def test_wresid(self): assert_almost_equal(self.res1.wresid, self.res2.wresid, self.decimal_wresid) decimal_resids = DECIMAL_4 def test_resids(self): assert_almost_equal(self.res1.resid, self.res2.resid, self.decimal_resids) decimal_norm_resids = DECIMAL_4 def test_norm_resids(self): assert_almost_equal(self.res1.resid_pearson, self.res2.resid_pearson, self.decimal_norm_resids) #TODO: test fittedvalues and what else? class TestOLS(CheckRegressionResults): @classmethod def setupClass(cls): from .results.results_regression import Longley data = longley.load() data.exog = add_constant(data.exog, prepend=False) res1 = OLS(data.endog, data.exog).fit() res2 = Longley() res2.wresid = res1.wresid # workaround hack cls.res1 = res1 cls.res2 = res2 res_qr = OLS(data.endog, data.exog).fit(method="qr") model_qr = OLS(data.endog, data.exog) Q, R = np.linalg.qr(data.exog) model_qr.exog_Q, model_qr.exog_R = Q, R model_qr.normalized_cov_params = np.linalg.inv(np.dot(R.T, R)) model_qr.rank = np_matrix_rank(R) res_qr2 = model_qr.fit(method="qr") cls.res_qr = res_qr cls.res_qr_manual = res_qr2 def test_eigenvalues(self): eigenval_perc_diff = (self.res_qr.eigenvals - self.res_qr_manual.eigenvals) eigenval_perc_diff /= self.res_qr.eigenvals zeros = np.zeros_like(eigenval_perc_diff) assert_almost_equal(eigenval_perc_diff, zeros, DECIMAL_7) # Robust error tests. Compare values computed with SAS def test_HC0_errors(self): #They are split up because the copied results do not have any DECIMAL_4 #places for the last place. assert_almost_equal(self.res1.HC0_se[:-1], self.res2.HC0_se[:-1], DECIMAL_4) assert_approx_equal(np.round(self.res1.HC0_se[-1]), self.res2.HC0_se[-1]) def test_HC1_errors(self): assert_almost_equal(self.res1.HC1_se[:-1], self.res2.HC1_se[:-1], DECIMAL_4) assert_approx_equal(self.res1.HC1_se[-1], self.res2.HC1_se[-1]) def test_HC2_errors(self): assert_almost_equal(self.res1.HC2_se[:-1], self.res2.HC2_se[:-1], DECIMAL_4) assert_approx_equal(self.res1.HC2_se[-1], self.res2.HC2_se[-1]) def test_HC3_errors(self): assert_almost_equal(self.res1.HC3_se[:-1], self.res2.HC3_se[:-1], DECIMAL_4) assert_approx_equal(self.res1.HC3_se[-1], self.res2.HC3_se[-1]) def test_qr_params(self): assert_almost_equal(self.res1.params, self.res_qr.params, 6) def test_qr_normalized_cov_params(self): #todo: need assert_close assert_almost_equal(np.ones_like(self.res1.normalized_cov_params), self.res1.normalized_cov_params / self.res_qr.normalized_cov_params, 5) def test_missing(self): data = longley.load() data.exog = add_constant(data.exog, prepend=False) data.endog[[3, 7, 14]] = np.nan mod = OLS(data.endog, data.exog, missing='drop') assert_equal(mod.endog.shape[0], 13) assert_equal(mod.exog.shape[0], 13) def test_rsquared_adj_overfit(self): # Test that if df_resid = 0, rsquared_adj = 0. # This is a regression test for user issue: # https://github.com/statsmodels/statsmodels/issues/868 with warnings.catch_warnings(record=True): x = np.random.randn(5) y = np.random.randn(5, 6) results = OLS(x, y).fit() rsquared_adj = results.rsquared_adj assert_equal(rsquared_adj, np.nan) def test_qr_alternatives(self): assert_allclose(self.res_qr.params, self.res_qr_manual.params, rtol=5e-12) def test_norm_resid(self): resid = self.res1.wresid norm_resid = resid / np.sqrt(np.sum(resid**2.0) / self.res1.df_resid) model_norm_resid = self.res1.resid_pearson assert_almost_equal(model_norm_resid, norm_resid, DECIMAL_7) def test_norm_resid_zero_variance(self): with warnings.catch_warnings(record=True): y = self.res1.model.endog res = OLS(y,y).fit() assert_allclose(res.scale, 0, atol=1e-20) assert_allclose(res.wresid, res.resid_pearson, atol=5e-11) class TestRTO(CheckRegressionResults): @classmethod def setupClass(cls): from .results.results_regression import LongleyRTO data = longley.load() res1 = OLS(data.endog, data.exog).fit() res2 = LongleyRTO() res2.wresid = res1.wresid # workaround hack cls.res1 = res1 cls.res2 = res2 res_qr = OLS(data.endog, data.exog).fit(method="qr") cls.res_qr = res_qr class TestFtest(object): """ Tests f_test vs. RegressionResults """ @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) cls.res1 = OLS(data.endog, data.exog).fit() R = np.identity(7)[:-1,:] cls.Ftest = cls.res1.f_test(R) def test_F(self): assert_almost_equal(self.Ftest.fvalue, self.res1.fvalue, DECIMAL_4) def test_p(self): assert_almost_equal(self.Ftest.pvalue, self.res1.f_pvalue, DECIMAL_4) def test_Df_denom(self): assert_equal(self.Ftest.df_denom, self.res1.model.df_resid) def test_Df_num(self): assert_equal(self.Ftest.df_num, 6) class TestFTest2(object): """ A joint test that the coefficient on GNP = the coefficient on UNEMP and that the coefficient on POP = the coefficient on YEAR for the Longley dataset. Ftest1 is from statsmodels. Results are from Rpy using R's car library. """ @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) res1 = OLS(data.endog, data.exog).fit() R2 = [[0,1,-1,0,0,0,0],[0, 0, 0, 0, 1, -1, 0]] cls.Ftest1 = res1.f_test(R2) hyp = 'x2 = x3, x5 = x6' cls.NewFtest1 = res1.f_test(hyp) def test_new_ftest(self): assert_equal(self.NewFtest1.fvalue, self.Ftest1.fvalue) def test_fvalue(self): assert_almost_equal(self.Ftest1.fvalue, 9.7404618732968196, DECIMAL_4) def test_pvalue(self): assert_almost_equal(self.Ftest1.pvalue, 0.0056052885317493459, DECIMAL_4) def test_df_denom(self): assert_equal(self.Ftest1.df_denom, 9) def test_df_num(self): assert_equal(self.Ftest1.df_num, 2) class TestFtestQ(object): """ A joint hypothesis test that Rb = q. Coefficient tests are essentially made up. Test values taken from Stata. """ @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) res1 = OLS(data.endog, data.exog).fit() R = np.array([[0,1,1,0,0,0,0], [0,1,0,1,0,0,0], [0,1,0,0,0,0,0], [0,0,0,0,1,0,0], [0,0,0,0,0,1,0]]) q = np.array([0,0,0,1,0]) cls.Ftest1 = res1.f_test((R,q)) def test_fvalue(self): assert_almost_equal(self.Ftest1.fvalue, 70.115557, 5) def test_pvalue(self): assert_almost_equal(self.Ftest1.pvalue, 6.229e-07, 10) def test_df_denom(self): assert_equal(self.Ftest1.df_denom, 9) def test_df_num(self): assert_equal(self.Ftest1.df_num, 5) class TestTtest(object): """ Test individual t-tests. Ie., are the coefficients significantly different than zero. """ @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) cls.res1 = OLS(data.endog, data.exog).fit() R = np.identity(7) cls.Ttest = cls.res1.t_test(R) hyp = 'x1 = 0, x2 = 0, x3 = 0, x4 = 0, x5 = 0, x6 = 0, const = 0' cls.NewTTest = cls.res1.t_test(hyp) def test_new_tvalue(self): assert_equal(self.NewTTest.tvalue, self.Ttest.tvalue) def test_tvalue(self): assert_almost_equal(self.Ttest.tvalue, self.res1.tvalues, DECIMAL_4) def test_sd(self): assert_almost_equal(self.Ttest.sd, self.res1.bse, DECIMAL_4) def test_pvalue(self): assert_almost_equal(self.Ttest.pvalue, student_t.sf( np.abs(self.res1.tvalues), self.res1.model.df_resid)*2, DECIMAL_4) def test_df_denom(self): assert_equal(self.Ttest.df_denom, self.res1.model.df_resid) def test_effect(self): assert_almost_equal(self.Ttest.effect, self.res1.params) class TestTtest2(object): """ Tests the hypothesis that the coefficients on POP and YEAR are equal. Results from RPy using 'car' package. """ @classmethod def setupClass(cls): R = np.zeros(7) R[4:6] = [1,-1] data = longley.load() data.exog = add_constant(data.exog, prepend=False) res1 = OLS(data.endog, data.exog).fit() cls.Ttest1 = res1.t_test(R) def test_tvalue(self): assert_almost_equal(self.Ttest1.tvalue, -4.0167754636397284, DECIMAL_4) def test_sd(self): assert_almost_equal(self.Ttest1.sd, 455.39079425195314, DECIMAL_4) def test_pvalue(self): assert_almost_equal(self.Ttest1.pvalue, 2*0.0015163772380932246, DECIMAL_4) def test_df_denom(self): assert_equal(self.Ttest1.df_denom, 9) def test_effect(self): assert_almost_equal(self.Ttest1.effect, -1829.2025687186533, DECIMAL_4) class TestGLS(object): """ These test results were obtained by replication with R. """ @classmethod def setupClass(cls): from .results.results_regression import LongleyGls data = longley.load() exog = add_constant(np.column_stack((data.exog[:,1], data.exog[:,4])), prepend=False) tmp_results = OLS(data.endog, exog).fit() rho = np.corrcoef(tmp_results.resid[1:], tmp_results.resid[:-1])[0][1] # by assumption order = toeplitz(np.arange(16)) sigma = rho**order GLS_results = GLS(data.endog, exog, sigma=sigma).fit() cls.res1 = GLS_results cls.res2 = LongleyGls() # attach for test_missing cls.sigma = sigma cls.exog = exog cls.endog = data.endog def test_aic(self): assert_approx_equal(self.res1.aic+2, self.res2.aic, 3) def test_bic(self): assert_approx_equal(self.res1.bic, self.res2.bic, 2) def test_loglike(self): assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_0) def test_params(self): assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_1) def test_resid(self): assert_almost_equal(self.res1.resid, self.res2.resid, DECIMAL_4) def test_scale(self): assert_almost_equal(self.res1.scale, self.res2.scale, DECIMAL_4) def test_tvalues(self): assert_almost_equal(self.res1.tvalues, self.res2.tvalues, DECIMAL_4) def test_standarderrors(self): assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4) def test_fittedvalues(self): assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues, DECIMAL_4) def test_pvalues(self): assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4) def test_missing(self): endog = self.endog.copy() # copy or changes endog for other methods endog[[4,7,14]] = np.nan mod = GLS(endog, self.exog, sigma=self.sigma, missing='drop') assert_equal(mod.endog.shape[0], 13) assert_equal(mod.exog.shape[0], 13) assert_equal(mod.sigma.shape, (13,13)) class TestGLS_alt_sigma(CheckRegressionResults): """ Test that GLS with no argument is equivalent to OLS. """ @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) ols_res = OLS(data.endog, data.exog).fit() gls_res = GLS(data.endog, data.exog).fit() gls_res_scalar = GLS(data.endog, data.exog, sigma=1) cls.endog = data.endog cls.exog = data.exog cls.res1 = gls_res cls.res2 = ols_res cls.res3 = gls_res_scalar # self.res2.conf_int = self.res2.conf_int() def test_wrong_size_sigma_1d(self): n = len(self.endog) assert_raises(ValueError, GLS, self.endog, self.exog, sigma=np.ones(n-1)) def test_wrong_size_sigma_2d(self): n = len(self.endog) assert_raises(ValueError, GLS, self.endog, self.exog, sigma=np.ones((n-1,n-1))) # def check_confidenceintervals(self, conf1, conf2): # assert_almost_equal(conf1, conf2, DECIMAL_4) class TestLM(object): @classmethod def setupClass(cls): # TODO: Test HAC method X = np.random.randn(100,3) b = np.ones((3,1)) e = np.random.randn(100,1) y = np.dot(X,b) + e # Cases? # Homoskedastic # HC0 cls.res1_full = OLS(y,X).fit() cls.res1_restricted = OLS(y,X[:,0]).fit() cls.res2_full = cls.res1_full.get_robustcov_results('HC0') cls.res2_restricted = cls.res1_restricted.get_robustcov_results('HC0') cls.X = X cls.Y = y def test_LM_homoskedastic(self): resid = self.res1_restricted.wresid n = resid.shape[0] X = self.X S = np.dot(resid,resid) / n * np.dot(X.T,X) / n Sinv = np.linalg.inv(S) s = np.mean(X * resid[:,None], 0) LMstat = n * np.dot(np.dot(s,Sinv),s.T) LMstat_OLS = self.res1_full.compare_lm_test(self.res1_restricted) LMstat2 = LMstat_OLS[0] assert_almost_equal(LMstat, LMstat2, DECIMAL_7) def test_LM_heteroskedastic_nodemean(self): resid = self.res1_restricted.wresid n = resid.shape[0] X = self.X scores = X * resid[:,None] S = np.dot(scores.T,scores) / n Sinv = np.linalg.inv(S) s = np.mean(scores, 0) LMstat = n * np.dot(np.dot(s,Sinv),s.T) LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted, demean=False) LMstat2 = LMstat_OLS[0] assert_almost_equal(LMstat, LMstat2, DECIMAL_7) def test_LM_heteroskedastic_demean(self): resid = self.res1_restricted.wresid n = resid.shape[0] X = self.X scores = X * resid[:,None] scores_demean = scores - scores.mean(0) S = np.dot(scores_demean.T,scores_demean) / n Sinv = np.linalg.inv(S) s = np.mean(scores, 0) LMstat = n * np.dot(np.dot(s,Sinv),s.T) LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted) LMstat2 = LMstat_OLS[0] assert_almost_equal(LMstat, LMstat2, DECIMAL_7) def test_LM_heteroskedastic_LRversion(self): resid = self.res1_restricted.wresid resid_full = self.res1_full.wresid n = resid.shape[0] X = self.X scores = X * resid[:,None] s = np.mean(scores, 0) scores = X * resid_full[:,None] S = np.dot(scores.T,scores) / n Sinv = np.linalg.inv(S) LMstat = n * np.dot(np.dot(s,Sinv),s.T) LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted, use_lr = True) LMstat2 = LMstat_OLS[0] assert_almost_equal(LMstat, LMstat2, DECIMAL_7) def test_LM_nonnested(self): assert_raises(ValueError, self.res2_restricted.compare_lm_test, self.res2_full) class TestOLS_GLS_WLS_equivalence(object): @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) y = data.endog X = data.exog n = y.shape[0] w = np.ones(n) cls.results = [] cls.results.append(OLS(y, X).fit()) cls.results.append(WLS(y, X, w).fit()) cls.results.append(GLS(y, X, 100*w).fit()) cls.results.append(GLS(y, X, np.diag(0.1*w)).fit()) def test_ll(self): llf = np.array([r.llf for r in self.results]) llf_1 = np.ones_like(llf) * self.results[0].llf assert_almost_equal(llf, llf_1, DECIMAL_7) ic = np.array([r.aic for r in self.results]) ic_1 = np.ones_like(ic) * self.results[0].aic assert_almost_equal(ic, ic_1, DECIMAL_7) ic = np.array([r.bic for r in self.results]) ic_1 = np.ones_like(ic) * self.results[0].bic assert_almost_equal(ic, ic_1, DECIMAL_7) def test_params(self): params = np.array([r.params for r in self.results]) params_1 = np.array([self.results[0].params] * len(self.results)) assert_allclose(params, params_1) def test_ss(self): bse = np.array([r.bse for r in self.results]) bse_1 = np.array([self.results[0].bse] * len(self.results)) assert_allclose(bse, bse_1) def test_rsquared(self): rsquared = np.array([r.rsquared for r in self.results]) rsquared_1 = np.array([self.results[0].rsquared] * len(self.results)) assert_almost_equal(rsquared, rsquared_1, DECIMAL_7) class TestGLS_WLS_equivalence(TestOLS_GLS_WLS_equivalence): # reuse test methods @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) y = data.endog X = data.exog n = y.shape[0] np.random.seed(5) w = np.random.uniform(0.5, 1, n) w_inv = 1. / w cls.results = [] cls.results.append(WLS(y, X, w).fit()) cls.results.append(WLS(y, X, 0.01 * w).fit()) cls.results.append(GLS(y, X, 100 * w_inv).fit()) cls.results.append(GLS(y, X, np.diag(0.1 * w_inv)).fit()) def test_rsquared(self): # TODO: WLS rsquared is ok, GLS might have wrong centered_tss # We only check that WLS and GLS rsquared is invariant to scaling # WLS and GLS have different rsquared assert_almost_equal(self.results[1].rsquared, self.results[0].rsquared, DECIMAL_7) assert_almost_equal(self.results[3].rsquared, self.results[2].rsquared, DECIMAL_7) class TestNonFit(object): @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) cls.endog = data.endog cls.exog = data.exog cls.ols_model = OLS(data.endog, data.exog) def test_df_resid(self): df_resid = self.endog.shape[0] - self.exog.shape[1] assert_equal(self.ols_model.df_resid, long(9)) class TestWLS_CornerCases(object): @classmethod def setupClass(cls): cls.exog = np.ones((1,)) cls.endog = np.ones((1,)) weights = 1 cls.wls_res = WLS(cls.endog, cls.exog, weights=weights).fit() def test_wrong_size_weights(self): weights = np.ones((10,10)) assert_raises(ValueError, WLS, self.endog, self.exog, weights=weights) class TestWLSExogWeights(CheckRegressionResults): #Test WLS with Greene's credit card data #reg avgexp age income incomesq ownrent [aw=1/incomesq] def __init__(self): from .results.results_regression import CCardWLS from statsmodels.datasets.ccard import load dta = load() dta.exog = add_constant(dta.exog, prepend=False) nobs = 72. weights = 1/dta.exog[:,2] # for comparison with stata analytic weights scaled_weights = ((weights * nobs)/weights.sum()) self.res1 = WLS(dta.endog, dta.exog, weights=scaled_weights).fit() self.res2 = CCardWLS() self.res2.wresid = scaled_weights ** .5 * self.res2.resid # correction because we use different definition for loglike/llf corr_ic = 2 * (self.res1.llf - self.res2.llf) self.res2.aic -= corr_ic self.res2.bic -= corr_ic self.res2.llf += 0.5 * np.sum(np.log(self.res1.model.weights)) def test_wls_example(): #example from the docstring, there was a note about a bug, should #be fixed now Y = [1,3,4,5,2,3,4] X = lrange(1,8) X = add_constant(X, prepend=False) wls_model = WLS(Y,X, weights=lrange(1,8)).fit() #taken from R lm.summary assert_almost_equal(wls_model.fvalue, 0.127337843215, 6) assert_almost_equal(wls_model.scale, 2.44608530786**2, 6) def test_wls_tss(): y = np.array([22, 22, 22, 23, 23, 23]) X = [[1, 0], [1, 0], [1, 1], [0, 1], [0, 1], [0, 1]] ols_mod = OLS(y, add_constant(X, prepend=False)).fit() yw = np.array([22, 22, 23.]) Xw = [[1,0],[1,1],[0,1]] w = np.array([2, 1, 3.]) wls_mod = WLS(yw, add_constant(Xw, prepend=False), weights=w).fit() assert_equal(ols_mod.centered_tss, wls_mod.centered_tss) class TestWLSScalarVsArray(CheckRegressionResults): @classmethod def setupClass(cls): from statsmodels.datasets.longley import load dta = load() dta.exog = add_constant(dta.exog, prepend=True) wls_scalar = WLS(dta.endog, dta.exog, weights=1./3).fit() weights = [1/3.] * len(dta.endog) wls_array = WLS(dta.endog, dta.exog, weights=weights).fit() cls.res1 = wls_scalar cls.res2 = wls_array #class TestWLS_GLS(CheckRegressionResults): # @classmethod # def setupClass(cls): # from statsmodels.datasets.ccard import load # data = load() # cls.res1 = WLS(data.endog, data.exog, weights = 1/data.exog[:,2]).fit() # cls.res2 = GLS(data.endog, data.exog, sigma = data.exog[:,2]).fit() # # def check_confidenceintervals(self, conf1, conf2): # assert_almost_equal(conf1, conf2(), DECIMAL_4) def test_wls_missing(): from statsmodels.datasets.ccard import load data = load() endog = data.endog endog[[10, 25]] = np.nan mod = WLS(data.endog, data.exog, weights = 1/data.exog[:,2], missing='drop') assert_equal(mod.endog.shape[0], 70) assert_equal(mod.exog.shape[0], 70) assert_equal(mod.weights.shape[0], 70) class TestWLS_OLS(CheckRegressionResults): @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) cls.res1 = OLS(data.endog, data.exog).fit() cls.res2 = WLS(data.endog, data.exog).fit() def check_confidenceintervals(self, conf1, conf2): assert_almost_equal(conf1, conf2(), DECIMAL_4) class TestGLS_OLS(CheckRegressionResults): @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) cls.res1 = GLS(data.endog, data.exog).fit() cls.res2 = OLS(data.endog, data.exog).fit() def check_confidenceintervals(self, conf1, conf2): assert_almost_equal(conf1, conf2(), DECIMAL_4) #TODO: test AR # why the two-stage in AR? #class test_ar(object): # from statsmodels.datasets.sunspots import load # data = load() # model = AR(data.endog, rho=4).fit() # R_res = RModel(data.endog, aic="FALSE", order_max=4) # def test_params(self): # assert_almost_equal(self.model.rho, # pass # def test_order(self): # In R this can be defined or chosen by minimizing the AIC if aic=True # pass class TestYuleWalker(object): @classmethod def setupClass(cls): from statsmodels.datasets.sunspots import load data = load() cls.rho, cls.sigma = yule_walker(data.endog, order=4, method="mle") cls.R_params = [1.2831003105694765, -0.45240924374091945, -0.20770298557575195, 0.047943648089542337] def test_params(self): assert_almost_equal(self.rho, self.R_params, DECIMAL_4) class TestDataDimensions(CheckRegressionResults): @classmethod def setupClass(cls): np.random.seed(54321) cls.endog_n_ = np.random.uniform(0,20,size=30) cls.endog_n_one = cls.endog_n_[:,None] cls.exog_n_ = np.random.uniform(0,20,size=30) cls.exog_n_one = cls.exog_n_[:,None] cls.degen_exog = cls.exog_n_one[:-1] cls.mod1 = OLS(cls.endog_n_one, cls.exog_n_one) cls.mod1.df_model += 1 cls.res1 = cls.mod1.fit() # Note that these are created for every subclass.. # A little extra overhead probably cls.mod2 = OLS(cls.endog_n_one, cls.exog_n_one) cls.mod2.df_model += 1 cls.res2 = cls.mod2.fit() def check_confidenceintervals(self, conf1, conf2): assert_almost_equal(conf1, conf2(), DECIMAL_4) class TestGLS_large_data(TestDataDimensions): @classmethod def setupClass(cls): nobs = 1000 y = np.random.randn(nobs,1) X = np.random.randn(nobs,20) sigma = np.ones_like(y) cls.gls_res = GLS(y, X, sigma=sigma).fit() cls.gls_res_scalar = GLS(y, X, sigma=1).fit() cls.gls_res_none= GLS(y, X).fit() cls.ols_res = OLS(y, X).fit() def test_large_equal_params(self): assert_almost_equal(self.ols_res.params, self.gls_res.params, DECIMAL_7) def test_large_equal_loglike(self): assert_almost_equal(self.ols_res.llf, self.gls_res.llf, DECIMAL_7) def test_large_equal_params_none(self): assert_almost_equal(self.gls_res.params, self.gls_res_none.params, DECIMAL_7) class TestNxNx(TestDataDimensions): @classmethod def setupClass(cls): super(TestNxNx, cls).setupClass() cls.mod2 = OLS(cls.endog_n_, cls.exog_n_) cls.mod2.df_model += 1 cls.res2 = cls.mod2.fit() class TestNxOneNx(TestDataDimensions): @classmethod def setupClass(cls): super(TestNxOneNx, cls).setupClass() cls.mod2 = OLS(cls.endog_n_one, cls.exog_n_) cls.mod2.df_model += 1 cls.res2 = cls.mod2.fit() class TestNxNxOne(TestDataDimensions): @classmethod def setupClass(cls): super(TestNxNxOne, cls).setupClass() cls.mod2 = OLS(cls.endog_n_, cls.exog_n_one) cls.mod2.df_model += 1 cls.res2 = cls.mod2.fit() def test_bad_size(): np.random.seed(54321) data = np.random.uniform(0,20,31) assert_raises(ValueError, OLS, data, data[1:]) def test_const_indicator(): np.random.seed(12345) X = np.random.randint(0, 3, size=30) X = categorical(X, drop=True) y = np.dot(X, [1., 2., 3.]) + np.random.normal(size=30) modc = OLS(y, add_constant(X[:,1:], prepend=True)).fit() mod = OLS(y, X, hasconst=True).fit() assert_almost_equal(modc.rsquared, mod.rsquared, 12) def test_706(): # make sure one regressor pandas Series gets passed to DataFrame # for conf_int. y = pandas.Series(np.random.randn(10)) x = pandas.Series(np.ones(10)) res = OLS(y,x).fit() conf_int = res.conf_int() np.testing.assert_equal(conf_int.shape, (1, 2)) np.testing.assert_(isinstance(conf_int, pandas.DataFrame)) def test_summary(): # test 734 import re dta = longley.load_pandas() X = dta.exog X["constant"] = 1 y = dta.endog with warnings.catch_warnings(record=True): res = OLS(y, X).fit() table = res.summary().as_latex() # replace the date and time table = re.sub("(?<=\n\\\\textbf\{Date:\} &).+?&", " Sun, 07 Apr 2013 &", table) table = re.sub("(?<=\n\\\\textbf\{Time:\} &).+?&", " 13:46:07 &", table) expected = """\\begin{center} \\begin{tabular}{lclc} \\toprule \\textbf{Dep. Variable:} & TOTEMP & \\textbf{ R-squared: } & 0.995 \\\\ \\textbf{Model:} & OLS & \\textbf{ Adj. R-squared: } & 0.992 \\\\ \\textbf{Method:} & Least Squares & \\textbf{ F-statistic: } & 330.3 \\\\ \\textbf{Date:} & Sun, 07 Apr 2013 & \\textbf{ Prob (F-statistic):} & 4.98e-10 \\\\ \\textbf{Time:} & 13:46:07 & \\textbf{ Log-Likelihood: } & -109.62 \\\\ \\textbf{No. Observations:} & 16 & \\textbf{ AIC: } & 233.2 \\\\ \\textbf{Df Residuals:} & 9 & \\textbf{ BIC: } & 238.6 \\\\ \\textbf{Df Model:} & 6 & \\textbf{ } & \\\\ \\bottomrule \\end{tabular} \\begin{tabular}{lccccc} & \\textbf{coef} & \\textbf{std err} & \\textbf{t} & \\textbf{P$>$$|$t$|$} & \\textbf{[95.0\\% Conf. Int.]} \\\\ \\midrule \\textbf{GNPDEFL} & 15.0619 & 84.915 & 0.177 & 0.863 & -177.029 207.153 \\\\ \\textbf{GNP} & -0.0358 & 0.033 & -1.070 & 0.313 & -0.112 0.040 \\\\ \\textbf{UNEMP} & -2.0202 & 0.488 & -4.136 & 0.003 & -3.125 -0.915 \\\\ \\textbf{ARMED} & -1.0332 & 0.214 & -4.822 & 0.001 & -1.518 -0.549 \\\\ \\textbf{POP} & -0.0511 & 0.226 & -0.226 & 0.826 & -0.563 0.460 \\\\ \\textbf{YEAR} & 1829.1515 & 455.478 & 4.016 & 0.003 & 798.788 2859.515 \\\\ \\textbf{constant} & -3.482e+06 & 8.9e+05 & -3.911 & 0.004 & -5.5e+06 -1.47e+06 \\\\ \\bottomrule \\end{tabular} \\begin{tabular}{lclc} \\textbf{Omnibus:} & 0.749 & \\textbf{ Durbin-Watson: } & 2.559 \\\\ \\textbf{Prob(Omnibus):} & 0.688 & \\textbf{ Jarque-Bera (JB): } & 0.684 \\\\ \\textbf{Skew:} & 0.420 & \\textbf{ Prob(JB): } & 0.710 \\\\ \\textbf{Kurtosis:} & 2.434 & \\textbf{ Cond. No. } & 4.86e+09 \\\\ \\bottomrule \\end{tabular} %\\caption{OLS Regression Results} \\end{center}""" assert_equal(table, expected) class TestRegularizedFit(object): # Make sure there are no issues when there are no selected # variables. def test_empty_model(self): np.random.seed(742) n = 100 endog = np.random.normal(size=n) exog = np.random.normal(size=(n, 3)) model = OLS(endog, exog) result = model.fit_regularized(alpha=1000) assert_equal(result.params, 0.) assert_equal(result.bse, 0.) def test_regularized(self): import os from . import glmnet_r_results cur_dir = os.path.dirname(os.path.abspath(__file__)) data = np.loadtxt(os.path.join(cur_dir, "results", "lasso_data.csv"), delimiter=",") tests = [x for x in dir(glmnet_r_results) if x.startswith("rslt_")] for test in tests: vec = getattr(glmnet_r_results, test) n = vec[0] p = vec[1] L1_wt = float(vec[2]) lam = float(vec[3]) params = vec[4:].astype(np.float64) endog = data[0:n, 0] exog = data[0:n, 1:(p+1)] endog = endog - endog.mean() endog /= endog.std(ddof=1) exog = exog - exog.mean(0) exog /= exog.std(0, ddof=1) mod = OLS(endog, exog) rslt = mod.fit_regularized(L1_wt=L1_wt, alpha=lam) assert_almost_equal(rslt.params, params, decimal=3) # Smoke test for summary smry = rslt.summary() def test_formula_missing_cat(): # gh-805 import statsmodels.api as sm from statsmodels.formula.api import ols from patsy import PatsyError dta = sm.datasets.grunfeld.load_pandas().data dta.ix[0, 'firm'] = np.nan mod = ols(formula='value ~ invest + capital + firm + year', data=dta.dropna()) res = mod.fit() mod2 = ols(formula='value ~ invest + capital + firm + year', data=dta) res2 = mod2.fit() assert_almost_equal(res.params.values, res2.params.values) assert_raises(PatsyError, ols, 'value ~ invest + capital + firm + year', data=dta, missing='raise') def test_missing_formula_predict(): # see 2171 nsample = 30 data = pandas.DataFrame({'x': np.linspace(0, 10, nsample)}) null = pandas.DataFrame({'x': np.array([np.nan])}) data = pandas.concat([data, null]) beta = np.array([1, 0.1]) e = np.random.normal(size=nsample+1) data['y'] = beta[0] + beta[1] * data['x'] + e model = OLS.from_formula('y ~ x', data=data) fit = model.fit() pred = fit.predict(exog=data[:-1]) if __name__=="__main__": import nose # run_module_suite() nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'], exit=False) # nose.runmodule(argv=[__file__,'-vvs','-x'], exit=False) #, '--pdb'
bsd-3-clause
4,867,643,556,276,461,000
33.80296
131
0.590452
false
splav/servo
tests/wpt/web-platform-tests/common/security-features/subresource/font.py
16
4580
import os, sys, base64 sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) import subresource def generate_payload(request, server_data): data = ('{"headers": %(headers)s}') % server_data if "id" in request.GET: request.server.stash.put(request.GET["id"], data) # Simple base64 encoded .tff font return base64.decodestring("AAEAAAANAIAAAwBQRkZUTU6u6MkAAAXcAAAAHE9TLzJWYW" "QKAAABWAAAAFZjbWFwAA8D7wAAAcAAAAFCY3Z0IAAhAnkA" "AAMEAAAABGdhc3D//wADAAAF1AAAAAhnbHlmCC6aTwAAAx" "QAAACMaGVhZO8ooBcAAADcAAAANmhoZWEIkAV9AAABFAAA" "ACRobXR4EZQAhQAAAbAAAAAQbG9jYQBwAFQAAAMIAAAACm" "1heHAASQA9AAABOAAAACBuYW1lehAVOgAAA6AAAAIHcG9z" "dP+uADUAAAWoAAAAKgABAAAAAQAAMhPyuV8PPPUACwPoAA" "AAAMU4Lm0AAAAAxTgubQAh/5wFeAK8AAAACAACAAAAAAAA" "AAEAAAK8/5wAWgXcAAAAAAV4AAEAAAAAAAAAAAAAAAAAAA" "AEAAEAAAAEAAwAAwAAAAAAAgAAAAEAAQAAAEAALgAAAAAA" "AQXcAfQABQAAAooCvAAAAIwCigK8AAAB4AAxAQIAAAIABg" "kAAAAAAAAAAAABAAAAAAAAAAAAAAAAUGZFZABAAEEAQQMg" "/zgAWgK8AGQAAAABAAAAAAAABdwAIQAAAAAF3AAABdwAZA" "AAAAMAAAADAAAAHAABAAAAAAA8AAMAAQAAABwABAAgAAAA" "BAAEAAEAAABB//8AAABB////wgABAAAAAAAAAQYAAAEAAA" "AAAAAAAQIAAAACAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAAAAAAhAnkAAAAqACoAKgBGAAAAAgAhAA" "ABKgKaAAMABwAusQEALzyyBwQA7TKxBgXcPLIDAgDtMgCx" "AwAvPLIFBADtMrIHBgH8PLIBAgDtMjMRIREnMxEjIQEJ6M" "fHApr9ZiECWAAAAwBk/5wFeAK8AAMABwALAAABNSEVATUh" "FQE1IRUB9AH0/UQDhPu0BRQB9MjI/tTIyP7UyMgAAAAAAA" "4ArgABAAAAAAAAACYATgABAAAAAAABAAUAgQABAAAAAAAC" "AAYAlQABAAAAAAADACEA4AABAAAAAAAEAAUBDgABAAAAAA" "AFABABNgABAAAAAAAGAAUBUwADAAEECQAAAEwAAAADAAEE" "CQABAAoAdQADAAEECQACAAwAhwADAAEECQADAEIAnAADAA" "EECQAEAAoBAgADAAEECQAFACABFAADAAEECQAGAAoBRwBD" "AG8AcAB5AHIAaQBnAGgAdAAgACgAYwApACAAMgAwADAAOA" "AgAE0AbwB6AGkAbABsAGEAIABDAG8AcgBwAG8AcgBhAHQA" "aQBvAG4AAENvcHlyaWdodCAoYykgMjAwOCBNb3ppbGxhIE" "NvcnBvcmF0aW9uAABNAGEAcgBrAEEAAE1hcmtBAABNAGUA" "ZABpAHUAbQAATWVkaXVtAABGAG8AbgB0AEYAbwByAGcAZQ" "AgADIALgAwACAAOgAgAE0AYQByAGsAQQAgADoAIAA1AC0A" "MQAxAC0AMgAwADAAOAAARm9udEZvcmdlIDIuMCA6IE1hcm" "tBIDogNS0xMS0yMDA4AABNAGEAcgBrAEEAAE1hcmtBAABW" "AGUAcgBzAGkAbwBuACAAMAAwADEALgAwADAAMAAgAABWZX" "JzaW9uIDAwMS4wMDAgAABNAGEAcgBrAEEAAE1hcmtBAAAA" "AgAAAAAAAP+DADIAAAABAAAAAAAAAAAAAAAAAAAAAAAEAA" "AAAQACACQAAAAAAAH//wACAAAAAQAAAADEPovuAAAAAMU4" "Lm0AAAAAxTgubQ=="); def generate_report_headers_payload(request, server_data): stashed_data = request.server.stash.take(request.GET["id"]) return stashed_data def main(request, response): handler = lambda data: generate_payload(request, data) content_type = 'application/x-font-truetype' if "report-headers" in request.GET: handler = lambda data: generate_report_headers_payload(request, data) content_type = 'application/json' subresource.respond(request, response, payload_generator = handler, content_type = content_type, access_control_allow_origin = "*")
mpl-2.0
-2,519,275,568,950,266,400
62.611111
79
0.586463
false
ryfeus/lambda-packs
Keras_tensorflow_nightly/source2.7/tensorflow/contrib/factorization/python/ops/gen_factorization_ops.py
1
11829
"""Python wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit. Original C++ source file: gen_factorization_ops.cc """ import collections as _collections import six as _six from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow from tensorflow.python.eager import context as _context from tensorflow.python.eager import core as _core from tensorflow.python.eager import execute as _execute from tensorflow.python.framework import dtypes as _dtypes from tensorflow.python.framework import errors as _errors from tensorflow.python.framework import tensor_shape as _tensor_shape from tensorflow.core.framework import op_def_pb2 as _op_def_pb2 # Needed to trigger the call to _set_call_cpp_shape_fn. from tensorflow.python.framework import common_shapes as _common_shapes from tensorflow.python.framework import op_def_registry as _op_def_registry from tensorflow.python.framework import ops as _ops from tensorflow.python.framework import op_def_library as _op_def_library from tensorflow.python.util.tf_export import tf_export @tf_export('masked_matmul') def masked_matmul(a, b, mask_indices, transpose_a, transpose_b, name=None): r"""Computes the product a * b, but only for indices (i, j) in mask_indices. The result is stored in prod_values, a rank 1 tensor, such that for all i, prod_values[i] = (a * b)[mask_indices[i, 0], mask_indices[i, 1]]. Note that the shapes of the input matrices a, b should be compatible (after transposing as specified by the arguments transpose_a and transpose_b). Input arguments: Args: a: A `Tensor` of type `float32`. A rank 2 tensor of shape [m, n]. b: A `Tensor` of type `float32`. A rank 2 tensor of shape [s, t]. The inner dimensions of a and b should match after transposition. mask_indices: A `Tensor` of type `int64`. A rank 2 tensor, of shape [nnz, 2] where nnz is the number of non-zero elements in the output. The indices are not assumed to be in lexicographic, or any particular order. For all i, mask_indices[i, :] should represent a valid index of the product matrix (a * b) (after transposition). That is: mask_indices[i, 0] should be in [0, m) if !transpose_a, and in [0, n) otherwise. mask_indices[i, 1] should be in [0, t) if !transpose_b, and in [0, s) otherwise. transpose_a: A `Tensor` of type `bool`. A boolean, specifies whether to transpose the matrix a. transpose_b: A `Tensor` of type `bool`. A boolean, specifies whether to transpose the matrix b. Output arguments: name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. A rank 1 tensor of shape [nnz], representing the values of the non-zero elements in the product, such that for all i, prod_values[i] = (a * b)[mask_indices[i, 0], mask_indices[i, 1]]. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "MaskedMatmul", a=a, b=b, mask_indices=mask_indices, transpose_a=transpose_a, transpose_b=transpose_b, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = None _execute.record_gradient( "MaskedMatmul", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "MaskedMatmul", name, _ctx._post_execution_callbacks, a, b, mask_indices, transpose_a, transpose_b) return _result except _core._FallbackException: return masked_matmul_eager_fallback( a, b, mask_indices, transpose_a, transpose_b, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def masked_matmul_eager_fallback(a, b, mask_indices, transpose_a, transpose_b, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function masked_matmul """ _ctx = ctx if ctx else _context.context() a = _ops.convert_to_tensor(a, _dtypes.float32) b = _ops.convert_to_tensor(b, _dtypes.float32) mask_indices = _ops.convert_to_tensor(mask_indices, _dtypes.int64) transpose_a = _ops.convert_to_tensor(transpose_a, _dtypes.bool) transpose_b = _ops.convert_to_tensor(transpose_b, _dtypes.bool) _inputs_flat = [a, b, mask_indices, transpose_a, transpose_b] _attrs = None _result = _execute.execute(b"MaskedMatmul", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "MaskedMatmul", _inputs_flat, _attrs, _result, name) _result, = _result return _result _ops.RegisterShape("MaskedMatmul")(None) _wals_compute_partial_lhs_and_rhs_outputs = ["partial_lhs", "partial_rhs"] _WALSComputePartialLhsAndRhsOutput = _collections.namedtuple( "WALSComputePartialLhsAndRhs", _wals_compute_partial_lhs_and_rhs_outputs) @tf_export('wals_compute_partial_lhs_and_rhs') def wals_compute_partial_lhs_and_rhs(factors, factor_weights, unobserved_weights, input_weights, input_indices, input_values, input_block_size, input_is_transpose, name=None): r"""Computes the partial left-hand side and right-hand side of WALS update. Args: factors: A `Tensor` of type `float32`. Matrix of size m * k. factor_weights: A `Tensor` of type `float32`. Vector of size m. Corresponds to column weights unobserved_weights: A `Tensor` of type `float32`. Scalar. Weight for unobserved input entries. input_weights: A `Tensor` of type `float32`. Vector of size n. Corresponds to row weights. input_indices: A `Tensor` of type `int64`. Indices for the input SparseTensor. input_values: A `Tensor` of type `float32`. Values for the input SparseTensor. input_block_size: A `Tensor` of type `int64`. Scalar. Number of rows spanned by input. input_is_transpose: A `Tensor` of type `bool`. If true, logically transposes the input for processing. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (partial_lhs, partial_rhs). partial_lhs: A `Tensor` of type `float32`. 3-D tensor with size input_block_size x k x k. partial_rhs: A `Tensor` of type `float32`. Matrix with size input_block_size x k. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "WALSComputePartialLhsAndRhs", factors=factors, factor_weights=factor_weights, unobserved_weights=unobserved_weights, input_weights=input_weights, input_indices=input_indices, input_values=input_values, input_block_size=input_block_size, input_is_transpose=input_is_transpose, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = None _execute.record_gradient( "WALSComputePartialLhsAndRhs", _inputs_flat, _attrs, _result, name) _result = _WALSComputePartialLhsAndRhsOutput._make(_result) return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "WALSComputePartialLhsAndRhs", name, _ctx._post_execution_callbacks, factors, factor_weights, unobserved_weights, input_weights, input_indices, input_values, input_block_size, input_is_transpose) _result = _WALSComputePartialLhsAndRhsOutput._make(_result) return _result except _core._FallbackException: return wals_compute_partial_lhs_and_rhs_eager_fallback( factors, factor_weights, unobserved_weights, input_weights, input_indices, input_values, input_block_size, input_is_transpose, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def wals_compute_partial_lhs_and_rhs_eager_fallback(factors, factor_weights, unobserved_weights, input_weights, input_indices, input_values, input_block_size, input_is_transpose, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function wals_compute_partial_lhs_and_rhs """ _ctx = ctx if ctx else _context.context() factors = _ops.convert_to_tensor(factors, _dtypes.float32) factor_weights = _ops.convert_to_tensor(factor_weights, _dtypes.float32) unobserved_weights = _ops.convert_to_tensor(unobserved_weights, _dtypes.float32) input_weights = _ops.convert_to_tensor(input_weights, _dtypes.float32) input_indices = _ops.convert_to_tensor(input_indices, _dtypes.int64) input_values = _ops.convert_to_tensor(input_values, _dtypes.float32) input_block_size = _ops.convert_to_tensor(input_block_size, _dtypes.int64) input_is_transpose = _ops.convert_to_tensor(input_is_transpose, _dtypes.bool) _inputs_flat = [factors, factor_weights, unobserved_weights, input_weights, input_indices, input_values, input_block_size, input_is_transpose] _attrs = None _result = _execute.execute(b"WALSComputePartialLhsAndRhs", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "WALSComputePartialLhsAndRhs", _inputs_flat, _attrs, _result, name) _result = _WALSComputePartialLhsAndRhsOutput._make(_result) return _result _ops.RegisterShape("WALSComputePartialLhsAndRhs")(None) def _InitOpDefLibrary(op_list_proto_bytes): op_list = _op_def_pb2.OpList() op_list.ParseFromString(op_list_proto_bytes) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list) return op_def_lib # op { # name: "MaskedMatmul" # input_arg { # name: "a" # type: DT_FLOAT # } # input_arg { # name: "b" # type: DT_FLOAT # } # input_arg { # name: "mask_indices" # type: DT_INT64 # } # input_arg { # name: "transpose_a" # type: DT_BOOL # } # input_arg { # name: "transpose_b" # type: DT_BOOL # } # output_arg { # name: "prod_values" # type: DT_FLOAT # } # } # op { # name: "WALSComputePartialLhsAndRhs" # input_arg { # name: "factors" # type: DT_FLOAT # } # input_arg { # name: "factor_weights" # type: DT_FLOAT # } # input_arg { # name: "unobserved_weights" # type: DT_FLOAT # } # input_arg { # name: "input_weights" # type: DT_FLOAT # } # input_arg { # name: "input_indices" # type: DT_INT64 # } # input_arg { # name: "input_values" # type: DT_FLOAT # } # input_arg { # name: "input_block_size" # type: DT_INT64 # } # input_arg { # name: "input_is_transpose" # type: DT_BOOL # } # output_arg { # name: "partial_lhs" # type: DT_FLOAT # } # output_arg { # name: "partial_rhs" # type: DT_FLOAT # } # } _op_def_lib = _InitOpDefLibrary(b"\na\n\014MaskedMatmul\022\005\n\001a\030\001\022\005\n\001b\030\001\022\020\n\014mask_indices\030\t\022\017\n\013transpose_a\030\n\022\017\n\013transpose_b\030\n\032\017\n\013prod_values\030\001\n\336\001\n\033WALSComputePartialLhsAndRhs\022\013\n\007factors\030\001\022\022\n\016factor_weights\030\001\022\026\n\022unobserved_weights\030\001\022\021\n\rinput_weights\030\001\022\021\n\rinput_indices\030\t\022\020\n\014input_values\030\001\022\024\n\020input_block_size\030\t\022\026\n\022input_is_transpose\030\n\032\017\n\013partial_lhs\030\001\032\017\n\013partial_rhs\030\001")
mit
2,590,565,299,051,894,300
39.372014
616
0.679263
false
rmboggs/django
tests/forms_tests/widget_tests/test_splitdatetimewidget.py
202
1943
from datetime import date, datetime, time from django.forms import SplitDateTimeWidget from .base import WidgetTest class SplitDateTimeWidgetTest(WidgetTest): widget = SplitDateTimeWidget() def test_render_empty(self): self.check_html(self.widget, 'date', '', html=( '<input type="text" name="date_0" /><input type="text" name="date_1" />' )) def test_render_none(self): self.check_html(self.widget, 'date', None, html=( '<input type="text" name="date_0" /><input type="text" name="date_1" />' )) def test_render_datetime(self): self.check_html(self.widget, 'date', datetime(2006, 1, 10, 7, 30), html=( '<input type="text" name="date_0" value="2006-01-10" />' '<input type="text" name="date_1" value="07:30:00" />' )) def test_render_date_and_time(self): self.check_html(self.widget, 'date', [date(2006, 1, 10), time(7, 30)], html=( '<input type="text" name="date_0" value="2006-01-10" />' '<input type="text" name="date_1" value="07:30:00" />' )) def test_constructor_attrs(self): widget = SplitDateTimeWidget(attrs={'class': 'pretty'}) self.check_html(widget, 'date', datetime(2006, 1, 10, 7, 30), html=( '<input type="text" class="pretty" value="2006-01-10" name="date_0" />' '<input type="text" class="pretty" value="07:30:00" name="date_1" />' )) def test_formatting(self): """ Use 'date_format' and 'time_format' to change the way a value is displayed. """ widget = SplitDateTimeWidget( date_format='%d/%m/%Y', time_format='%H:%M', ) self.check_html(widget, 'date', datetime(2006, 1, 10, 7, 30), html=( '<input type="text" name="date_0" value="10/01/2006" />' '<input type="text" name="date_1" value="07:30" />' ))
bsd-3-clause
-1,318,895,577,394,594,000
37.098039
85
0.559444
false
DIYgod/python
DIYgod/0006/important_word.py
1
1368
# -*- coding: utf-8 -*- # 第 0006 题:你有一个目录,放了你一个月的日记,都是 txt,为了避免分词的问题,假设内容都是英文,请统计出你认为每篇日记最重要的词。 import re import os # Get all files in designated path def get_files(path): filepath = os.listdir(path) files = [] for fp in filepath: fppath = path + '/' + fp if(os.path.isfile(fppath)): files.append(fppath) elif(os.path.isdir(fppath)): files += get_files(fppath) return files # Get the most popular word in designated files def get_important_word(files): worddict = {} for filename in files: f = open(filename, 'rb') s = f.read() words = re.findall(r'[a-zA-Z0-9]+', s) for word in words: worddict[word] = worddict[word] + 1 if word in worddict else 1 f.close() wordsort = sorted(worddict.items(), key=lambda e:e[1], reverse=True) return wordsort if __name__ == '__main__': files = get_files('.') print files wordsort = get_important_word(files) # 避免遗漏有多个最大值的情况 maxnum = 1 for i in range(len(wordsort) - 1): if wordsort[i][1] == wordsort[i + 1][1]: maxnum += 1 else: break for i in range(maxnum): print wordsort[i]
mit
9,018,017,784,309,201,000
26.818182
74
0.575163
false
ropable/resource_tracking
tracking/migrations/0006_auto_20200213_1716.py
1
1161
# Generated by Django 2.1.11 on 2020-02-13 09:16 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('tracking', '0005_device_hidden'), ] operations = [ migrations.AlterField( model_name='device', name='hidden', field=models.BooleanField(default=True, verbose_name='Hidden/private use'), ), migrations.AlterField( model_name='device', name='source_device_type', field=models.CharField(choices=[('tracplus', 'TracPlus'), ('iriditrak', 'Iriditrak'), ('dplus', 'DPlus'), ('spot', 'Spot'), ('dfes', 'DFES'), ('mp70', 'MP70'), ('fleetcare', 'fleetcare'), ('other', 'Other')], default='other', max_length=32), ), migrations.AlterField( model_name='loggedpoint', name='source_device_type', field=models.CharField(choices=[('tracplus', 'TracPlus'), ('iriditrak', 'Iriditrak'), ('dplus', 'DPlus'), ('spot', 'Spot'), ('dfes', 'DFES'), ('mp70', 'MP70'), ('fleetcare', 'fleetcare'), ('other', 'Other')], default='other', max_length=32), ), ]
bsd-3-clause
-1,708,325,020,738,498,800
40.464286
253
0.569337
false
chiamingyen/pygroup
wsgi/static/Brython2.1.4-20140810-083054/Lib/functools.py
730
13596
"""functools.py - Tools for working with functions and callable objects """ # Python module wrapper for _functools C module # to allow utilities written in Python to be added # to the functools module. # Written by Nick Coghlan <ncoghlan at gmail.com> # and Raymond Hettinger <python at rcn.com> # Copyright (C) 2006-2010 Python Software Foundation. # See C source code for _functools credits/copyright __all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES', 'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial'] from _functools import partial, reduce from collections import namedtuple try: from _thread import RLock except: class RLock: 'Dummy reentrant lock for builds without threads' def __enter__(self): pass def __exit__(self, exctype, excinst, exctb): pass ################################################################################ ### update_wrapper() and wraps() decorator ################################################################################ # update_wrapper() and wraps() are tools to help write # wrapper functions that can handle naive introspection WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__', '__annotations__') WRAPPER_UPDATES = ('__dict__',) def update_wrapper(wrapper, wrapped, assigned = WRAPPER_ASSIGNMENTS, updated = WRAPPER_UPDATES): """Update a wrapper function to look like the wrapped function wrapper is the function to be updated wrapped is the original function assigned is a tuple naming the attributes assigned directly from the wrapped function to the wrapper function (defaults to functools.WRAPPER_ASSIGNMENTS) updated is a tuple naming the attributes of the wrapper that are updated with the corresponding attribute from the wrapped function (defaults to functools.WRAPPER_UPDATES) """ wrapper.__wrapped__ = wrapped for attr in assigned: try: value = getattr(wrapped, attr) except AttributeError: pass else: setattr(wrapper, attr, value) for attr in updated: getattr(wrapper, attr).update(getattr(wrapped, attr, {})) # Return the wrapper so this can be used as a decorator via partial() return wrapper def wraps(wrapped, assigned = WRAPPER_ASSIGNMENTS, updated = WRAPPER_UPDATES): """Decorator factory to apply update_wrapper() to a wrapper function Returns a decorator that invokes update_wrapper() with the decorated function as the wrapper argument and the arguments to wraps() as the remaining arguments. Default arguments are as for update_wrapper(). This is a convenience function to simplify applying partial() to update_wrapper(). """ return partial(update_wrapper, wrapped=wrapped, assigned=assigned, updated=updated) ################################################################################ ### total_ordering class decorator ################################################################################ def total_ordering(cls): """Class decorator that fills in missing ordering methods""" convert = { '__lt__': [('__gt__', lambda self, other: not (self < other or self == other)), ('__le__', lambda self, other: self < other or self == other), ('__ge__', lambda self, other: not self < other)], '__le__': [('__ge__', lambda self, other: not self <= other or self == other), ('__lt__', lambda self, other: self <= other and not self == other), ('__gt__', lambda self, other: not self <= other)], '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)), ('__ge__', lambda self, other: self > other or self == other), ('__le__', lambda self, other: not self > other)], '__ge__': [('__le__', lambda self, other: (not self >= other) or self == other), ('__gt__', lambda self, other: self >= other and not self == other), ('__lt__', lambda self, other: not self >= other)] } # Find user-defined comparisons (not those inherited from object). roots = [op for op in convert if getattr(cls, op, None) is not getattr(object, op, None)] if not roots: raise ValueError('must define at least one ordering operation: < > <= >=') root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ for opname, opfunc in convert[root]: if opname not in roots: opfunc.__name__ = opname opfunc.__doc__ = getattr(int, opname).__doc__ setattr(cls, opname, opfunc) return cls ################################################################################ ### cmp_to_key() function converter ################################################################################ def cmp_to_key(mycmp): """Convert a cmp= function into a key= function""" class K(object): __slots__ = ['obj'] def __init__(self, obj): self.obj = obj def __lt__(self, other): return mycmp(self.obj, other.obj) < 0 def __gt__(self, other): return mycmp(self.obj, other.obj) > 0 def __eq__(self, other): return mycmp(self.obj, other.obj) == 0 def __le__(self, other): return mycmp(self.obj, other.obj) <= 0 def __ge__(self, other): return mycmp(self.obj, other.obj) >= 0 def __ne__(self, other): return mycmp(self.obj, other.obj) != 0 __hash__ = None return K try: from _functools import cmp_to_key except ImportError: pass ################################################################################ ### LRU Cache function decorator ################################################################################ _CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) class _HashedSeq(list): """ This class guarantees that hash() will be called no more than once per element. This is important because the lru_cache() will hash the key multiple times on a cache miss. """ __slots__ = 'hashvalue' def __init__(self, tup, hash=hash): self[:] = tup self.hashvalue = hash(tup) def __hash__(self): return self.hashvalue def _make_key(args, kwds, typed, kwd_mark = (object(),), fasttypes = {int, str, frozenset, type(None)}, sorted=sorted, tuple=tuple, type=type, len=len): """Make a cache key from optionally typed positional and keyword arguments The key is constructed in a way that is flat as possible rather than as a nested structure that would take more memory. If there is only a single argument and its data type is known to cache its hash value, then that argument is returned without a wrapper. This saves space and improves lookup speed. """ key = args if kwds: sorted_items = sorted(kwds.items()) key += kwd_mark for item in sorted_items: key += item if typed: key += tuple(type(v) for v in args) if kwds: key += tuple(type(v) for k, v in sorted_items) elif len(key) == 1 and type(key[0]) in fasttypes: return key[0] return _HashedSeq(key) def lru_cache(maxsize=128, typed=False): """Least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. Arguments to the cached function must be hashable. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used """ # Users should only access the lru_cache through its public API: # cache_info, cache_clear, and f.__wrapped__ # The internals of the lru_cache are encapsulated for thread safety and # to allow the implementation to change (including a possible C version). # Constants shared by all lru cache instances: sentinel = object() # unique object used to signal cache misses make_key = _make_key # build a key from the function arguments PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields def decorating_function(user_function): cache = {} hits = misses = 0 full = False cache_get = cache.get # bound method to lookup a key or return None lock = RLock() # because linkedlist updates aren't threadsafe root = [] # root of the circular doubly linked list root[:] = [root, root, None, None] # initialize by pointing to self if maxsize == 0: def wrapper(*args, **kwds): # No caching -- just a statistics update after a successful call nonlocal misses result = user_function(*args, **kwds) misses += 1 return result elif maxsize is None: def wrapper(*args, **kwds): # Simple caching without ordering or size limit nonlocal hits, misses key = make_key(args, kwds, typed) result = cache_get(key, sentinel) if result is not sentinel: hits += 1 return result result = user_function(*args, **kwds) cache[key] = result misses += 1 return result else: def wrapper(*args, **kwds): # Size limited caching that tracks accesses by recency nonlocal root, hits, misses, full key = make_key(args, kwds, typed) with lock: link = cache_get(key) if link is not None: # Move the link to the front of the circular queue link_prev, link_next, _key, result = link link_prev[NEXT] = link_next link_next[PREV] = link_prev last = root[PREV] last[NEXT] = root[PREV] = link link[PREV] = last link[NEXT] = root hits += 1 return result result = user_function(*args, **kwds) with lock: if key in cache: # Getting here means that this same key was added to the # cache while the lock was released. Since the link # update is already done, we need only return the # computed result and update the count of misses. pass elif full: # Use the old root to store the new key and result. oldroot = root oldroot[KEY] = key oldroot[RESULT] = result # Empty the oldest link and make it the new root. # Keep a reference to the old key and old result to # prevent their ref counts from going to zero during the # update. That will prevent potentially arbitrary object # clean-up code (i.e. __del__) from running while we're # still adjusting the links. root = oldroot[NEXT] oldkey = root[KEY] oldresult = root[RESULT] root[KEY] = root[RESULT] = None # Now update the cache dictionary. del cache[oldkey] # Save the potentially reentrant cache[key] assignment # for last, after the root and links have been put in # a consistent state. cache[key] = oldroot else: # Put result in a new link at the front of the queue. last = root[PREV] link = [last, root, key, result] last[NEXT] = root[PREV] = cache[key] = link full = (len(cache) >= maxsize) misses += 1 return result def cache_info(): """Report cache statistics""" with lock: return _CacheInfo(hits, misses, maxsize, len(cache)) def cache_clear(): """Clear the cache and cache statistics""" nonlocal hits, misses, full with lock: cache.clear() root[:] = [root, root, None, None] hits = misses = 0 full = False wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear return update_wrapper(wrapper, user_function) return decorating_function
gpl-2.0
-8,797,689,499,681,836,000
40.075529
93
0.528391
false
fitermay/intellij-community
python/helpers/py2only/docutils/io.py
104
17048
# $Id: io.py 7596 2013-01-25 13:42:17Z milde $ # Author: David Goodger <[email protected]> # Copyright: This module has been placed in the public domain. """ I/O classes provide a uniform API for low-level input and output. Subclasses exist for a variety of input/output mechanisms. """ __docformat__ = 'reStructuredText' import sys import os import re import codecs from docutils import TransformSpec from docutils._compat import b from docutils.utils.error_reporting import locale_encoding, ErrorString, ErrorOutput class InputError(IOError): pass class OutputError(IOError): pass def check_encoding(stream, encoding): """Test, whether the encoding of `stream` matches `encoding`. Returns :None: if `encoding` or `stream.encoding` are not a valid encoding argument (e.g. ``None``) or `stream.encoding is missing. :True: if the encoding argument resolves to the same value as `encoding`, :False: if the encodings differ. """ try: return codecs.lookup(stream.encoding) == codecs.lookup(encoding) except (LookupError, AttributeError, TypeError): return None class Input(TransformSpec): """ Abstract base class for input wrappers. """ component_type = 'input' default_source_path = None def __init__(self, source=None, source_path=None, encoding=None, error_handler='strict'): self.encoding = encoding """Text encoding for the input source.""" self.error_handler = error_handler """Text decoding error handler.""" self.source = source """The source of input data.""" self.source_path = source_path """A text reference to the source.""" if not source_path: self.source_path = self.default_source_path self.successful_encoding = None """The encoding that successfully decoded the source data.""" def __repr__(self): return '%s: source=%r, source_path=%r' % (self.__class__, self.source, self.source_path) def read(self): raise NotImplementedError def decode(self, data): """ Decode a string, `data`, heuristically. Raise UnicodeError if unsuccessful. The client application should call ``locale.setlocale`` at the beginning of processing:: locale.setlocale(locale.LC_ALL, '') """ if self.encoding and self.encoding.lower() == 'unicode': assert isinstance(data, unicode), ( 'input encoding is "unicode" ' 'but input is not a unicode object') if isinstance(data, unicode): # Accept unicode even if self.encoding != 'unicode'. return data if self.encoding: # We believe the user/application when the encoding is # explicitly given. encodings = [self.encoding] else: data_encoding = self.determine_encoding_from_data(data) if data_encoding: # If the data declares its encoding (explicitly or via a BOM), # we believe it. encodings = [data_encoding] else: # Apply heuristics only if no encoding is explicitly given and # no BOM found. Start with UTF-8, because that only matches # data that *IS* UTF-8: encodings = ['utf-8', 'latin-1'] if locale_encoding: encodings.insert(1, locale_encoding) for enc in encodings: try: decoded = unicode(data, enc, self.error_handler) self.successful_encoding = enc # Return decoded, removing BOMs. return decoded.replace(u'\ufeff', u'') except (UnicodeError, LookupError), err: error = err # in Python 3, the <exception instance> is # local to the except clause raise UnicodeError( 'Unable to decode input data. Tried the following encodings: ' '%s.\n(%s)' % (', '.join([repr(enc) for enc in encodings]), ErrorString(error))) coding_slug = re.compile(b("coding[:=]\s*([-\w.]+)")) """Encoding declaration pattern.""" byte_order_marks = ((codecs.BOM_UTF8, 'utf-8'), # 'utf-8-sig' new in v2.5 (codecs.BOM_UTF16_BE, 'utf-16-be'), (codecs.BOM_UTF16_LE, 'utf-16-le'),) """Sequence of (start_bytes, encoding) tuples for encoding detection. The first bytes of input data are checked against the start_bytes strings. A match indicates the given encoding.""" def determine_encoding_from_data(self, data): """ Try to determine the encoding of `data` by looking *in* `data`. Check for a byte order mark (BOM) or an encoding declaration. """ # check for a byte order mark: for start_bytes, encoding in self.byte_order_marks: if data.startswith(start_bytes): return encoding # check for an encoding declaration pattern in first 2 lines of file: for line in data.splitlines()[:2]: match = self.coding_slug.search(line) if match: return match.group(1).decode('ascii') return None class Output(TransformSpec): """ Abstract base class for output wrappers. """ component_type = 'output' default_destination_path = None def __init__(self, destination=None, destination_path=None, encoding=None, error_handler='strict'): self.encoding = encoding """Text encoding for the output destination.""" self.error_handler = error_handler or 'strict' """Text encoding error handler.""" self.destination = destination """The destination for output data.""" self.destination_path = destination_path """A text reference to the destination.""" if not destination_path: self.destination_path = self.default_destination_path def __repr__(self): return ('%s: destination=%r, destination_path=%r' % (self.__class__, self.destination, self.destination_path)) def write(self, data): """`data` is a Unicode string, to be encoded by `self.encode`.""" raise NotImplementedError def encode(self, data): if self.encoding and self.encoding.lower() == 'unicode': assert isinstance(data, unicode), ( 'the encoding given is "unicode" but the output is not ' 'a Unicode string') return data if not isinstance(data, unicode): # Non-unicode (e.g. bytes) output. return data else: return data.encode(self.encoding, self.error_handler) class FileInput(Input): """ Input for single, simple file-like objects. """ def __init__(self, source=None, source_path=None, encoding=None, error_handler='strict', autoclose=True, handle_io_errors=None, mode='rU'): """ :Parameters: - `source`: either a file-like object (which is read directly), or `None` (which implies `sys.stdin` if no `source_path` given). - `source_path`: a path to a file, which is opened and then read. - `encoding`: the expected text encoding of the input file. - `error_handler`: the encoding error handler to use. - `autoclose`: close automatically after read (except when `sys.stdin` is the source). - `handle_io_errors`: ignored, deprecated, will be removed. - `mode`: how the file is to be opened (see standard function `open`). The default 'rU' provides universal newline support for text files. """ Input.__init__(self, source, source_path, encoding, error_handler) self.autoclose = autoclose self._stderr = ErrorOutput() if source is None: if source_path: # Specify encoding in Python 3 if sys.version_info >= (3,0): kwargs = {'encoding': self.encoding, 'errors': self.error_handler} else: kwargs = {} try: self.source = open(source_path, mode, **kwargs) except IOError, error: raise InputError(error.errno, error.strerror, source_path) else: self.source = sys.stdin elif (sys.version_info >= (3,0) and check_encoding(self.source, self.encoding) is False): # TODO: re-open, warn or raise error? raise UnicodeError('Encoding clash: encoding given is "%s" ' 'but source is opened with encoding "%s".' % (self.encoding, self.source.encoding)) if not source_path: try: self.source_path = self.source.name except AttributeError: pass def read(self): """ Read and decode a single file and return the data (Unicode string). """ try: # In Python < 2.5, try...except has to be nested in try...finally. try: if self.source is sys.stdin and sys.version_info >= (3,0): # read as binary data to circumvent auto-decoding data = self.source.buffer.read() # normalize newlines data = b('\n').join(data.splitlines()) + b('\n') else: data = self.source.read() except (UnicodeError, LookupError), err: # (in Py3k read() decodes) if not self.encoding and self.source_path: # re-read in binary mode and decode with heuristics b_source = open(self.source_path, 'rb') data = b_source.read() b_source.close() # normalize newlines data = b('\n').join(data.splitlines()) + b('\n') else: raise finally: if self.autoclose: self.close() return self.decode(data) def readlines(self): """ Return lines of a single file as list of Unicode strings. """ return self.read().splitlines(True) def close(self): if self.source is not sys.stdin: self.source.close() class FileOutput(Output): """ Output for single, simple file-like objects. """ mode = 'w' """The mode argument for `open()`.""" # 'wb' for binary (e.g. OpenOffice) files (see also `BinaryFileOutput`). # (Do not use binary mode ('wb') for text files, as this prevents the # conversion of newlines to the system specific default.) def __init__(self, destination=None, destination_path=None, encoding=None, error_handler='strict', autoclose=True, handle_io_errors=None, mode=None): """ :Parameters: - `destination`: either a file-like object (which is written directly) or `None` (which implies `sys.stdout` if no `destination_path` given). - `destination_path`: a path to a file, which is opened and then written. - `encoding`: the text encoding of the output file. - `error_handler`: the encoding error handler to use. - `autoclose`: close automatically after write (except when `sys.stdout` or `sys.stderr` is the destination). - `handle_io_errors`: ignored, deprecated, will be removed. - `mode`: how the file is to be opened (see standard function `open`). The default is 'w', providing universal newline support for text files. """ Output.__init__(self, destination, destination_path, encoding, error_handler) self.opened = True self.autoclose = autoclose if mode is not None: self.mode = mode self._stderr = ErrorOutput() if destination is None: if destination_path: self.opened = False else: self.destination = sys.stdout elif (# destination is file-type object -> check mode: mode and hasattr(self.destination, 'mode') and mode != self.destination.mode): print >>self._stderr, ('Warning: Destination mode "%s" ' 'differs from specified mode "%s"' % (self.destination.mode, mode)) if not destination_path: try: self.destination_path = self.destination.name except AttributeError: pass def open(self): # Specify encoding in Python 3. if sys.version_info >= (3,0) and 'b' not in self.mode: kwargs = {'encoding': self.encoding, 'errors': self.error_handler} else: kwargs = {} try: self.destination = open(self.destination_path, self.mode, **kwargs) except IOError, error: raise OutputError(error.errno, error.strerror, self.destination_path) self.opened = True def write(self, data): """Encode `data`, write it to a single file, and return it. With Python 3 or binary output mode, `data` is returned unchanged, except when specified encoding and output encoding differ. """ if not self.opened: self.open() if ('b' not in self.mode and sys.version_info < (3,0) or check_encoding(self.destination, self.encoding) is False ): if sys.version_info >= (3,0) and os.linesep != '\n': data = data.replace('\n', os.linesep) # fix endings data = self.encode(data) try: # In Python < 2.5, try...except has to be nested in try...finally. try: self.destination.write(data) except TypeError, e: if sys.version_info >= (3,0) and isinstance(data, bytes): try: self.destination.buffer.write(data) except AttributeError: if check_encoding(self.destination, self.encoding) is False: raise ValueError('Encoding of %s (%s) differs \n' ' from specified encoding (%s)' % (self.destination_path or 'destination', self.destination.encoding, self.encoding)) else: raise e except (UnicodeError, LookupError), err: raise UnicodeError( 'Unable to encode output data. output-encoding is: ' '%s.\n(%s)' % (self.encoding, ErrorString(err))) finally: if self.autoclose: self.close() return data def close(self): if self.destination not in (sys.stdout, sys.stderr): self.destination.close() self.opened = False class BinaryFileOutput(FileOutput): """ A version of docutils.io.FileOutput which writes to a binary file. """ # Used by core.publish_cmdline_to_binary() which in turn is used by # rst2odt (OpenOffice writer) mode = 'wb' class StringInput(Input): """ Direct string input. """ default_source_path = '<string>' def read(self): """Decode and return the source string.""" return self.decode(self.source) class StringOutput(Output): """ Direct string output. """ default_destination_path = '<string>' def write(self, data): """Encode `data`, store it in `self.destination`, and return it.""" self.destination = self.encode(data) return self.destination class NullInput(Input): """ Degenerate input: read nothing. """ default_source_path = 'null input' def read(self): """Return a null string.""" return u'' class NullOutput(Output): """ Degenerate output: write nothing. """ default_destination_path = 'null output' def write(self, data): """Do nothing ([don't even] send data to the bit bucket).""" pass class DocTreeInput(Input): """ Adapter for document tree input. The document tree must be passed in the ``source`` parameter. """ default_source_path = 'doctree input' def read(self): """Return the document tree.""" return self.source
apache-2.0
8,932,914,518,473,063,000
34.442827
84
0.554435
false
zubron/servo
tests/wpt/harness/wptrunner/wptrunner.py
50
9660
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from __future__ import unicode_literals import json import os import sys import environment as env import products import testloader import wptcommandline import wptlogging import wpttest from testrunner import ManagerGroup here = os.path.split(__file__)[0] logger = None """Runner for web-platform-tests The runner has several design goals: * Tests should run with no modification from upstream. * Tests should be regarded as "untrusted" so that errors, timeouts and even crashes in the tests can be handled without failing the entire test run. * For performance tests can be run in multiple browsers in parallel. The upstream repository has the facility for creating a test manifest in JSON format. This manifest is used directly to determine which tests exist. Local metadata files are used to store the expected test results. """ def setup_logging(*args, **kwargs): global logger logger = wptlogging.setup(*args, **kwargs) def get_loader(test_paths, product, ssl_env, debug=None, run_info_extras=None, **kwargs): if run_info_extras is None: run_info_extras = {} run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=debug, extras=run_info_extras) test_manifests = testloader.ManifestLoader(test_paths, force_manifest_update=kwargs["manifest_update"]).load() manifest_filters = [] meta_filters = [] if kwargs["include"] or kwargs["exclude"] or kwargs["include_manifest"]: manifest_filters.append(testloader.TestFilter(include=kwargs["include"], exclude=kwargs["exclude"], manifest_path=kwargs["include_manifest"], test_manifests=test_manifests)) if kwargs["tags"]: meta_filters.append(testloader.TagFilter(tags=kwargs["tags"])) test_loader = testloader.TestLoader(test_manifests, kwargs["test_types"], run_info, manifest_filters=manifest_filters, meta_filters=meta_filters, chunk_type=kwargs["chunk_type"], total_chunks=kwargs["total_chunks"], chunk_number=kwargs["this_chunk"], include_https=ssl_env.ssl_enabled) return run_info, test_loader def list_test_groups(test_paths, product, **kwargs): env.do_delayed_imports(logger, test_paths) ssl_env = env.ssl_env(logger, **kwargs) run_info, test_loader = get_loader(test_paths, product, ssl_env, **kwargs) for item in sorted(test_loader.groups(kwargs["test_types"])): print item def list_disabled(test_paths, product, **kwargs): env.do_delayed_imports(logger, test_paths) rv = [] ssl_env = env.ssl_env(logger, **kwargs) run_info, test_loader = get_loader(test_paths, product, ssl_env, **kwargs) for test_type, tests in test_loader.disabled_tests.iteritems(): for test in tests: rv.append({"test": test.id, "reason": test.disabled()}) print json.dumps(rv, indent=2) def get_pause_after_test(test_loader, **kwargs): total_tests = sum(len(item) for item in test_loader.tests.itervalues()) if kwargs["pause_after_test"] is None: if kwargs["repeat_until_unexpected"]: return False if kwargs["repeat"] == 1 and total_tests == 1: return True return False return kwargs["pause_after_test"] def run_tests(config, test_paths, product, **kwargs): with wptlogging.CaptureIO(logger, not kwargs["no_capture_stdio"]): env.do_delayed_imports(logger, test_paths) (check_args, browser_cls, get_browser_kwargs, executor_classes, get_executor_kwargs, env_options, run_info_extras) = products.load_product(config, product) ssl_env = env.ssl_env(logger, **kwargs) check_args(**kwargs) if "test_loader" in kwargs: run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=None, extras=run_info_extras(**kwargs)) test_loader = kwargs["test_loader"] else: run_info, test_loader = get_loader(test_paths, product, ssl_env, run_info_extras=run_info_extras(**kwargs), **kwargs) if kwargs["run_by_dir"] is False: test_source_cls = testloader.SingleTestSource test_source_kwargs = {} else: # A value of None indicates infinite depth test_source_cls = testloader.PathGroupedSource test_source_kwargs = {"depth": kwargs["run_by_dir"]} logger.info("Using %i client processes" % kwargs["processes"]) unexpected_total = 0 kwargs["pause_after_test"] = get_pause_after_test(test_loader, **kwargs) with env.TestEnvironment(test_paths, ssl_env, kwargs["pause_after_test"], kwargs["debug_info"], env_options) as test_environment: try: test_environment.ensure_started() except env.TestEnvironmentError as e: logger.critical("Error starting test environment: %s" % e.message) raise browser_kwargs = get_browser_kwargs(ssl_env=ssl_env, **kwargs) repeat = kwargs["repeat"] repeat_count = 0 repeat_until_unexpected = kwargs["repeat_until_unexpected"] while repeat_count < repeat or repeat_until_unexpected: repeat_count += 1 if repeat_until_unexpected: logger.info("Repetition %i" % (repeat_count)) elif repeat > 1: logger.info("Repetition %i / %i" % (repeat_count, repeat)) unexpected_count = 0 logger.suite_start(test_loader.test_ids, run_info) for test_type in kwargs["test_types"]: logger.info("Running %s tests" % test_type) for test in test_loader.disabled_tests[test_type]: logger.test_start(test.id) logger.test_end(test.id, status="SKIP") executor_cls = executor_classes.get(test_type) executor_kwargs = get_executor_kwargs(test_type, test_environment.external_config, test_environment.cache_manager, run_info, **kwargs) if executor_cls is None: logger.error("Unsupported test type %s for product %s" % (test_type, product)) continue with ManagerGroup("web-platform-tests", kwargs["processes"], test_source_cls, test_source_kwargs, browser_cls, browser_kwargs, executor_cls, executor_kwargs, kwargs["pause_after_test"], kwargs["pause_on_unexpected"], kwargs["debug_info"]) as manager_group: try: manager_group.run(test_type, test_loader.tests) except KeyboardInterrupt: logger.critical("Main thread got signal") manager_group.stop() raise unexpected_count += manager_group.unexpected_count() unexpected_total += unexpected_count logger.info("Got %i unexpected results" % unexpected_count) if repeat_until_unexpected and unexpected_total > 0: break logger.suite_end() return unexpected_total == 0 def main(): """Main entry point when calling from the command line""" kwargs = wptcommandline.parse_args() try: if kwargs["prefs_root"] is None: kwargs["prefs_root"] = os.path.abspath(os.path.join(here, "prefs")) setup_logging(kwargs, {"raw": sys.stdout}) if kwargs["list_test_groups"]: list_test_groups(**kwargs) elif kwargs["list_disabled"]: list_disabled(**kwargs) else: return not run_tests(**kwargs) except Exception: if kwargs["pdb"]: import pdb, traceback print traceback.format_exc() pdb.post_mortem() else: raise
mpl-2.0
-3,684,081,465,269,544,000
38.109312
114
0.521222
false
hschovanec-usgs/magpy
magpy/lib/format_autodif_fread.py
2
3452
""" MagPy Auxiliary input filter - Write AUTODIF read-in data for F (also read) Written by Rachel Bailey - contains test and read function, toDo: write function """ from magpy.stream import * def isAUTODIF_FREAD(filename): """ Checks whether a file is text POS-1 file format. """ try: line = open(filename, 'r').readline() except: return False try: temp = line.split() if len(temp) == 5: if len(temp[0]) == 8 and len(temp[1]) == 5 and len(temp[2]) == 2: logging.debug("lib - format_autodif: Found Autodif Text file %s" % filename) return True else: return False else: return False except: return False def readAUTODIF_FREAD(filename, headonly=False, **kwargs): ''' Reading AUTODIF format data. Looks like: 48486304 00126 80 01.01.13 00:00:00,00 48486309 00036 80 01.01.13 00:00:05,00 48486314 00027 80 01.01.13 00:00:10,00 ''' # Reading AUTODIF text format data. starttime = kwargs.get('starttime') endtime = kwargs.get('endtime') getfile = True fh = open(filename, 'rb') # read file and split text into channels stream = DataStream() # Check whether header infromation is already present if stream.header is None: headers = {} else: headers = stream.header data = [] key = None theday = extractDateFromString(filename) try: day = datetime.strftime(theday,"%Y-%m-%d") # Select only files within eventually defined time range if starttime: if not datetime.strptime(day,'%Y-%m-%d') >= datetime.strptime(datetime.strftime(stream._testtime(starttime),'%Y-%m-%d'),'%Y-%m-%d'): getfile = False if endtime: if not datetime.strptime(day,'%Y-%m-%d') <= datetime.strptime(datetime.strftime(stream._testtime(endtime),'%Y-%m-%d'),'%Y-%m-%d'): getfile = False except: logging.warning("Could not identify date in %s. Reading all ..." % daystring) getfile = True if getfile: line = fh.readline() while line != "": data = line.split() row = LineStruct() timestring = data[3] + ' ' + data[4] time = datetime.strptime(timestring, "%m.%d.%y %H:%M:%S,%f") row.time = date2num(time) row.f = float(data[0])/1000. row.df = float(data[1])/1000. row.var1 = float(data[2]) stream.add(row) line = fh.readline() #print "Finished file reading of %s" % filename fh.close() return DataStream(stream, headers) def writeAUTODIF_FREAD(datastream, filename, **kwargs): """ Function to write AUTODIF-format data """ headdict = datastream.header myFile= open( filename, 'wb' ) try: for elem in datastream: time = datetime.strftime(num2date(elem.time).replace(tzinfo=None), "%m.%d.%y %H:%M:%S,%f") if elem.var1 > 9 and elem.var1 < 90: line = '%08d %05d %02d %s\n' % (elem.f*1000., elem.df*1000., elem.var1, time[:20]) else: line = '%08d %05d 80 %s\n' % (elem.f*1000., elem.df*1000., time[:20]) myFile.write(line) except: logging.warning('lib - format_autodif write: Data missing/wrong data format.') myFile.close()
gpl-3.0
-7,470,526,449,428,472,000
28.254237
144
0.565469
false
neteler/QGIS
python/plugins/processing/algs/qgis/DeleteDuplicateGeometries.py
6
2947
# -*- coding: utf-8 -*- """ *************************************************************************** DeleteDuplicateGeometries.py --------------------- Date : May 2010 Copyright : (C) 2010 by Michael Minn Email : pyqgis at michaelminn dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Michael Minn' __date__ = 'May 2010' __copyright__ = '(C) 2010, Michael Minn' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from qgis.core import QgsGeometry, QgsFeatureRequest from processing.core.GeoAlgorithm import GeoAlgorithm from processing.core.parameters import ParameterVector from processing.core.outputs import OutputVector from processing.tools import dataobjects, vector class DeleteDuplicateGeometries(GeoAlgorithm): INPUT = 'INPUT' OUTPUT = 'OUTPUT' def defineCharacteristics(self): self.name = 'Delete duplicate geometries' self.group = 'Vector general tools' self.addParameter(ParameterVector(self.INPUT, self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY])) self.addOutput(OutputVector(self.OUTPUT, self.tr('Output'))) def processAlgorithm(self, progress): layer = dataobjects.getObjectFromUri( self.getParameterValue(self.INPUT)) fields = layer.pendingFields() writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fields, layer.wkbType(), layer.crs()) features = vector.features(layer) count = len(features) total = 100.0 / float(count) geoms = dict() for count, f in enumerate(features): geoms[f.id()] = QgsGeometry(f.geometry()) progress.setPercentage(int(count * total)) cleaned = dict(geoms) for i, g in geoms.iteritems(): for j in cleaned.keys(): if i == j or i not in cleaned: continue if g.isGeosEqual(cleaned[j]): del cleaned[j] count = len(cleaned) total = 100.0 / float(count) request = QgsFeatureRequest().setFilterFids(cleaned.keys()) for count, f in enumerate(layer.getFeatures(request)): writer.addFeature(f) progress.setPercentage(int(count * total)) del writer
gpl-2.0
1,474,510,051,428,170,000
35.382716
76
0.536817
false
felixonmars/babel
tests/test_plural.py
11
8301
# -*- coding: utf-8 -*- # # Copyright (C) 2008-2011 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://babel.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://babel.edgewall.org/log/. import decimal import unittest import pytest from babel import plural def test_plural_rule(): rule = plural.PluralRule({'one': 'n is 1'}) assert rule(1) == 'one' assert rule(2) == 'other' rule = plural.PluralRule({'one': 'n is 1'}) assert rule.rules == {'one': 'n is 1'} def test_plural_rule_operands_i(): rule = plural.PluralRule({'one': 'i is 1'}) assert rule(1.2) == 'one' assert rule(2) == 'other' def test_plural_rule_operands_v(): rule = plural.PluralRule({'one': 'v is 2'}) assert rule(decimal.Decimal('1.20')) == 'one' assert rule(decimal.Decimal('1.2')) == 'other' assert rule(2) == 'other' def test_plural_rule_operands_w(): rule = plural.PluralRule({'one': 'w is 2'}) assert rule(decimal.Decimal('1.23')) == 'one' assert rule(decimal.Decimal('1.20')) == 'other' assert rule(1.2) == 'other' def test_plural_rule_operands_f(): rule = plural.PluralRule({'one': 'f is 20'}) assert rule(decimal.Decimal('1.23')) == 'other' assert rule(decimal.Decimal('1.20')) == 'one' assert rule(1.2) == 'other' def test_plural_rule_operands_t(): rule = plural.PluralRule({'one': 't = 5'}) assert rule(decimal.Decimal('1.53')) == 'other' assert rule(decimal.Decimal('1.50')) == 'one' assert rule(1.5) == 'one' def test_plural_other_is_ignored(): rule = plural.PluralRule({'one': 'n is 1', 'other': '@integer 2'}) assert rule(1) == 'one' def test_to_javascript(): assert (plural.to_javascript({'one': 'n is 1'}) == "(function(n) { return (n == 1) ? 'one' : 'other'; })") def test_to_python(): func = plural.to_python({'one': 'n is 1', 'few': 'n in 2..4'}) assert func(1) == 'one' assert func(3) == 'few' func = plural.to_python({'one': 'n in 1,11', 'few': 'n in 3..10,13..19'}) assert func(11) == 'one' assert func(15) == 'few' def test_to_gettext(): assert (plural.to_gettext({'one': 'n is 1', 'two': 'n is 2'}) == 'nplurals=3; plural=((n == 1) ? 0 : (n == 2) ? 1 : 2)') def test_in_range_list(): assert plural.in_range_list(1, [(1, 3)]) assert plural.in_range_list(3, [(1, 3)]) assert plural.in_range_list(3, [(1, 3), (5, 8)]) assert not plural.in_range_list(1.2, [(1, 4)]) assert not plural.in_range_list(10, [(1, 4)]) assert not plural.in_range_list(10, [(1, 4), (6, 8)]) def test_within_range_list(): assert plural.within_range_list(1, [(1, 3)]) assert plural.within_range_list(1.0, [(1, 3)]) assert plural.within_range_list(1.2, [(1, 4)]) assert plural.within_range_list(8.8, [(1, 4), (7, 15)]) assert not plural.within_range_list(10, [(1, 4)]) assert not plural.within_range_list(10.5, [(1, 4), (20, 30)]) def test_cldr_modulo(): assert plural.cldr_modulo(-3, 5) == -3 assert plural.cldr_modulo(-3, -5) == -3 assert plural.cldr_modulo(3, 5) == 3 def test_plural_within_rules(): p = plural.PluralRule({'one': 'n is 1', 'few': 'n within 2,4,7..9'}) assert repr(p) == "<PluralRule 'one: n is 1, few: n within 2,4,7..9'>" assert plural.to_javascript(p) == ( "(function(n) { " "return ((n == 2) || (n == 4) || (n >= 7 && n <= 9))" " ? 'few' : (n == 1) ? 'one' : 'other'; })") assert plural.to_gettext(p) == ( 'nplurals=3; plural=(((n == 2) || (n == 4) || (n >= 7 && n <= 9))' ' ? 1 : (n == 1) ? 0 : 2)') assert p(0) == 'other' assert p(1) == 'one' assert p(2) == 'few' assert p(3) == 'other' assert p(4) == 'few' assert p(5) == 'other' assert p(6) == 'other' assert p(7) == 'few' assert p(8) == 'few' assert p(9) == 'few' def test_locales_with_no_plural_rules_have_default(): from babel import Locale aa_plural = Locale.parse('aa').plural_form assert aa_plural(1) == 'other' assert aa_plural(2) == 'other' assert aa_plural(15) == 'other' WELL_FORMED_TOKEN_TESTS = ( ('', []), ('n = 1', [('value', '1'), ('symbol', '='), ('word', 'n'), ]), ('n = 1 @integer 1', [('value', '1'), ('symbol', '='), ('word', 'n'), ]), ('n is 1', [('value', '1'), ('word', 'is'), ('word', 'n'), ]), ('n % 100 = 3..10', [('value', '10'), ('ellipsis', '..'), ('value', '3'), ('symbol', '='), ('value', '100'), ('symbol', '%'), ('word', 'n'), ]), ) @pytest.mark.parametrize('rule_text,tokens', WELL_FORMED_TOKEN_TESTS) def test_tokenize_well_formed(rule_text, tokens): assert plural.tokenize_rule(rule_text) == tokens MALFORMED_TOKEN_TESTS = ( 'a = 1', 'n ! 2', ) @pytest.mark.parametrize('rule_text', MALFORMED_TOKEN_TESTS) def test_tokenize_malformed(rule_text): with pytest.raises(plural.RuleError): plural.tokenize_rule(rule_text) class TestNextTokenTestCase(unittest.TestCase): def test_empty(self): assert not plural.test_next_token([], '') def test_type_ok_and_no_value(self): assert plural.test_next_token([('word', 'and')], 'word') def test_type_ok_and_not_value(self): assert not plural.test_next_token([('word', 'and')], 'word', 'or') def test_type_ok_and_value_ok(self): assert plural.test_next_token([('word', 'and')], 'word', 'and') def test_type_not_ok_and_value_ok(self): assert not plural.test_next_token([('abc', 'and')], 'word', 'and') def make_range_list(*values): ranges = [] for v in values: if isinstance(v, int): val_node = plural.value_node(v) ranges.append((val_node, val_node)) else: assert isinstance(v, tuple) ranges.append((plural.value_node(v[0]), plural.value_node(v[1]))) return plural.range_list_node(ranges) class PluralRuleParserTestCase(unittest.TestCase): def setUp(self): self.n = plural.ident_node('n') def n_eq(self, v): return 'relation', ('in', self.n, make_range_list(v)) def test_error_when_unexpected_end(self): with pytest.raises(plural.RuleError): plural._Parser('n =') def test_eq_relation(self): assert plural._Parser('n = 1').ast == self.n_eq(1) def test_in_range_relation(self): assert plural._Parser('n = 2..4').ast == \ ('relation', ('in', self.n, make_range_list((2, 4)))) def test_negate(self): assert plural._Parser('n != 1').ast == plural.negate(self.n_eq(1)) def test_or(self): assert plural._Parser('n = 1 or n = 2').ast ==\ ('or', (self.n_eq(1), self.n_eq(2))) def test_and(self): assert plural._Parser('n = 1 and n = 2').ast ==\ ('and', (self.n_eq(1), self.n_eq(2))) def test_or_and(self): assert plural._Parser('n = 0 or n != 1 and n % 100 = 1..19').ast == \ ('or', (self.n_eq(0), ('and', (plural.negate(self.n_eq(1)), ('relation', ('in', ('mod', (self.n, plural.value_node(100))), (make_range_list((1, 19))))))) )) EXTRACT_OPERANDS_TESTS = ( (1, 1, 1, 0, 0, 0, 0), ('1.0', '1.0', 1, 1, 0, 0, 0), ('1.00', '1.00', 1, 2, 0, 0, 0), ('1.3', '1.3', 1, 1, 1, 3, 3), ('1.30', '1.30', 1, 2, 1, 30, 3), ('1.03', '1.03', 1, 2, 2, 3, 3), ('1.230', '1.230', 1, 3, 2, 230, 23), (-1, 1, 1, 0, 0, 0, 0), (1.3, '1.3', 1, 1, 1, 3, 3), ) @pytest.mark.parametrize('source,n,i,v,w,f,t', EXTRACT_OPERANDS_TESTS) def test_extract_operands(source, n, i, v, w, f, t): source = decimal.Decimal(source) if isinstance(source, str) else source assert (plural.extract_operands(source) == decimal.Decimal(n), i, v, w, f, t)
bsd-3-clause
8,445,123,276,048,686,000
31.552941
77
0.540055
false
bxlab/bx-python
lib/bx/cookbook/doc_optparse.py
1
2840
""" :Author: M. Simionato :Date: April 2004 :Title: A much simplified interface to optparse. You should use optionparse in your scripts as follows. First, write a module level docstring containing something like this (this is just an example):: '''usage: %prog files [options] -d, --delete: delete all files -e, --erase = ERASE: erase the given file''' Then write a main program of this kind: # sketch of a script to delete files:: if __name__=='__main__': import optionparse option,args=optionparse.parse(__doc__) if not args and not option: optionparse.exit() elif option.delete: print "Delete all files" elif option.erase: print "Delete the given file" Notice that ``optionparse`` parses the docstring by looking at the characters ",", ":", "=", "\\n", so be careful in using them. If the docstring is not correctly formatted you will get a SyntaxError or worse, the script will not work as expected. """ import optparse import re import sys import traceback USAGE = re.compile(r'(?s)\s*usage: (.*?)(\n[ \t]*\n|$)') def nonzero(self): # will become the nonzero method of optparse.Values "True if options were given" for v in self.__dict__.values(): if v is not None: return True return False optparse.Values.__nonzero__ = nonzero # dynamically fix optparse.Values class ParsingError(Exception): pass optionstring = "" def exception(msg=""): print("Exception while parsing command line:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) exit(msg) def exit(msg=""): raise SystemExit(msg or optionstring.replace("%prog", sys.argv[0])) def parse(docstring, arglist=None): global optionstring optionstring = docstring match = USAGE.search(optionstring) if not match: raise ParsingError("Cannot find the option string") optlines = match.group(1).splitlines() try: p = optparse.OptionParser(optlines[0], conflict_handler="resolve") for line in optlines[1:]: opt, help = line.split(':')[:2] # Make both short and long optional (but at least one) # Old: short,long=opt.split(',')[:2] opt_strings = [] action = "store_true" for k in opt.split(', '): k = k.strip() if k.startswith("--") and "=" in k: action = "store" k = k.split("=")[0] opt_strings.append(k) p.add_option(*opt_strings, **dict(action=action, help=help.strip())) except (IndexError, ValueError): raise ParsingError("Cannot parse the option string correctly") return p.parse_args(arglist) def help_callback(option, opt, value, parser, help): print(help, file=sys.stderr) sys.exit(1)
mit
-8,315,270,348,796,834,000
28.583333
80
0.627817
false
suryaambrose/code_bits
decompose_number_on_base.py
1
1690
# Standard library import unittest def decompose_number_on_base(number, base): """ Returns a number's decomposition on a defined base :param number: The number to decompose :param base: list representing the base. It must be sorted and each element must be a multiple of its predecessor. First element must be 1. :raises TypeError: if base is invalid """ _base = list(base) try: assert(_base[0] == 1) for i in range(1, len(_base)): assert(_base[i] > _base[i-1]) ratio = float(_base[i]) / float(_base[i-1]) assert(ratio == int(ratio)) except AssertionError: raise TypeError("Base (%s) is invalid"%_base) _base.reverse() output = [0]*len(_base) for base_index in range(len(_base)): r = number % _base[base_index] output[base_index] = int((number-r)/_base[base_index]) number = r output.reverse() return output class TestDecomposition(unittest.TestCase): def test_decomposition(self): with self.assertRaises(TypeError): # Does not start with 1 decompose_number_on_base(10, [2,3]) with self.assertRaises(TypeError): # Not sorted decompose_number_on_base(10, [1,4,2]) with self.assertRaises(TypeError): # Not all elements are multiple of the previous element decompose_number_on_base(10, [1,2,3]) assert(decompose_number_on_base(10, [1,2,4,8]) == [0,1,0,1]) # binary base assert(decompose_number_on_base(10, [1,10,100]) == [0,1,0]) # decimal base assert(decompose_number_on_base(10, [1,16,256]) == [10,0,0]) # hexadecimal base assert(decompose_number_on_base(10, [1,8,64]) == [2,1,0]) # octal base assert(decompose_number_on_base(100, [1,2,6,18,36]) == [0,2,1,1,2]) if __name__ == "__main__": unittest.main()
mit
-830,798,061,533,962,200
27.183333
81
0.670414
false
SOKP/kernel_cyanogen_msm8916
scripts/build-all.py
704
14699
#! /usr/bin/env python # Copyright (c) 2009-2014, The Linux Foundation. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Linux Foundation nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Build the kernel for all targets using the Android build environment. from collections import namedtuple import glob from optparse import OptionParser import os import re import shutil import subprocess import sys import threading import Queue version = 'build-all.py, version 1.99' build_dir = '../all-kernels' make_command = ["vmlinux", "modules", "dtbs"] all_options = {} compile64 = os.environ.get('CROSS_COMPILE64') def error(msg): sys.stderr.write("error: %s\n" % msg) def fail(msg): """Fail with a user-printed message""" error(msg) sys.exit(1) if not os.environ.get('CROSS_COMPILE'): fail("CROSS_COMPILE must be set in the environment") def check_kernel(): """Ensure that PWD is a kernel directory""" if (not os.path.isfile('MAINTAINERS') or not os.path.isfile('arch/arm/mach-msm/Kconfig')): fail("This doesn't seem to be an MSM kernel dir") def check_build(): """Ensure that the build directory is present.""" if not os.path.isdir(build_dir): try: os.makedirs(build_dir) except OSError as exc: if exc.errno == errno.EEXIST: pass else: raise def build_threads(): """Determine the number of build threads requested by the user""" if all_options.load_average: return all_options.load_average return all_options.jobs or 1 failed_targets = [] BuildResult = namedtuple('BuildResult', ['status', 'messages']) class BuildSequence(namedtuple('BuildSequence', ['log_name', 'short_name', 'steps'])): def set_width(self, width): self.width = width def __enter__(self): self.log = open(self.log_name, 'w') def __exit__(self, type, value, traceback): self.log.close() def run(self): self.status = None messages = ["Building: " + self.short_name] def printer(line): text = "[%-*s] %s" % (self.width, self.short_name, line) messages.append(text) self.log.write(text) self.log.write('\n') for step in self.steps: st = step.run(printer) if st: self.status = BuildResult(self.short_name, messages) break if not self.status: self.status = BuildResult(None, messages) class BuildTracker: """Manages all of the steps necessary to perform a build. The build consists of one or more sequences of steps. The different sequences can be processed independently, while the steps within a sequence must be done in order.""" def __init__(self): self.sequence = [] self.lock = threading.Lock() def add_sequence(self, log_name, short_name, steps): self.sequence.append(BuildSequence(log_name, short_name, steps)) def longest_name(self): longest = 0 for seq in self.sequence: longest = max(longest, len(seq.short_name)) return longest def __repr__(self): return "BuildTracker(%s)" % self.sequence def run_child(self, seq): seq.set_width(self.longest) tok = self.build_tokens.get() with self.lock: print "Building:", seq.short_name with seq: seq.run() self.results.put(seq.status) self.build_tokens.put(tok) def run(self): self.longest = self.longest_name() self.results = Queue.Queue() children = [] errors = [] self.build_tokens = Queue.Queue() nthreads = build_threads() print "Building with", nthreads, "threads" for i in range(nthreads): self.build_tokens.put(True) for seq in self.sequence: child = threading.Thread(target=self.run_child, args=[seq]) children.append(child) child.start() for child in children: stats = self.results.get() if all_options.verbose: with self.lock: for line in stats.messages: print line sys.stdout.flush() if stats.status: errors.append(stats.status) for child in children: child.join() if errors: fail("\n ".join(["Failed targets:"] + errors)) class PrintStep: """A step that just prints a message""" def __init__(self, message): self.message = message def run(self, outp): outp(self.message) class MkdirStep: """A step that makes a directory""" def __init__(self, direc): self.direc = direc def run(self, outp): outp("mkdir %s" % self.direc) os.mkdir(self.direc) class RmtreeStep: def __init__(self, direc): self.direc = direc def run(self, outp): outp("rmtree %s" % self.direc) shutil.rmtree(self.direc, ignore_errors=True) class CopyfileStep: def __init__(self, src, dest): self.src = src self.dest = dest def run(self, outp): outp("cp %s %s" % (self.src, self.dest)) shutil.copyfile(self.src, self.dest) class ExecStep: def __init__(self, cmd, **kwargs): self.cmd = cmd self.kwargs = kwargs def run(self, outp): outp("exec: %s" % (" ".join(self.cmd),)) with open('/dev/null', 'r') as devnull: proc = subprocess.Popen(self.cmd, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **self.kwargs) stdout = proc.stdout while True: line = stdout.readline() if not line: break line = line.rstrip('\n') outp(line) result = proc.wait() if result != 0: return ('error', result) else: return None class Builder(): def __init__(self, name, defconfig): self.name = name self.defconfig = defconfig self.confname = self.defconfig.split('/')[-1] # Determine if this is a 64-bit target based on the location # of the defconfig. self.make_env = os.environ.copy() if "/arm64/" in defconfig: if compile64: self.make_env['CROSS_COMPILE'] = compile64 else: fail("Attempting to build 64-bit, without setting CROSS_COMPILE64") self.make_env['ARCH'] = 'arm64' else: self.make_env['ARCH'] = 'arm' self.make_env['KCONFIG_NOTIMESTAMP'] = 'true' self.log_name = "%s/log-%s.log" % (build_dir, self.name) def build(self): steps = [] dest_dir = os.path.join(build_dir, self.name) log_name = "%s/log-%s.log" % (build_dir, self.name) steps.append(PrintStep('Building %s in %s log %s' % (self.name, dest_dir, log_name))) if not os.path.isdir(dest_dir): steps.append(MkdirStep(dest_dir)) defconfig = self.defconfig dotconfig = '%s/.config' % dest_dir savedefconfig = '%s/defconfig' % dest_dir staging_dir = 'install_staging' modi_dir = '%s' % staging_dir hdri_dir = '%s/usr' % staging_dir steps.append(RmtreeStep(os.path.join(dest_dir, staging_dir))) steps.append(ExecStep(['make', 'O=%s' % dest_dir, self.confname], env=self.make_env)) if not all_options.updateconfigs: # Build targets can be dependent upon the completion of # previous build targets, so build them one at a time. cmd_line = ['make', 'INSTALL_HDR_PATH=%s' % hdri_dir, 'INSTALL_MOD_PATH=%s' % modi_dir, 'O=%s' % dest_dir] build_targets = [] for c in make_command: if re.match(r'^-{1,2}\w', c): cmd_line.append(c) else: build_targets.append(c) for t in build_targets: steps.append(ExecStep(cmd_line + [t], env=self.make_env)) # Copy the defconfig back. if all_options.configs or all_options.updateconfigs: steps.append(ExecStep(['make', 'O=%s' % dest_dir, 'savedefconfig'], env=self.make_env)) steps.append(CopyfileStep(savedefconfig, defconfig)) return steps def update_config(file, str): print 'Updating %s with \'%s\'\n' % (file, str) with open(file, 'a') as defconfig: defconfig.write(str + '\n') def scan_configs(): """Get the full list of defconfigs appropriate for this tree.""" names = [] arch_pats = ( r'[fm]sm[0-9]*_defconfig', r'apq*_defconfig', r'qsd*_defconfig', r'mdm*_defconfig', r'mpq*_defconfig', ) arch64_pats = ( r'msm*_defconfig', ) for p in arch_pats: for n in glob.glob('arch/arm/configs/' + p): name = os.path.basename(n)[:-10] names.append(Builder(name, n)) if 'CROSS_COMPILE64' in os.environ: for p in arch64_pats: for n in glob.glob('arch/arm64/configs/' + p): name = os.path.basename(n)[:-10] + "-64" names.append(Builder(name, n)) return names def build_many(targets): print "Building %d target(s)" % len(targets) # If we are requesting multiple builds, divide down the job number # to construct the make_command, giving it a floor of 2, so there # is still some parallelism. if all_options.jobs and all_options.jobs > 1: j = max(all_options.jobs / len(targets), 2) make_command.append("-j" + str(j)) tracker = BuildTracker() for target in targets: if all_options.updateconfigs: update_config(target.defconfig, all_options.updateconfigs) steps = target.build() tracker.add_sequence(target.log_name, target.name, steps) tracker.run() def main(): global make_command check_kernel() check_build() configs = scan_configs() usage = (""" %prog [options] all -- Build all targets %prog [options] target target ... -- List specific targets %prog [options] perf -- Build all perf targets %prog [options] noperf -- Build all non-perf targets""") parser = OptionParser(usage=usage, version=version) parser.add_option('--configs', action='store_true', dest='configs', help="Copy configs back into tree") parser.add_option('--list', action='store_true', dest='list', help='List available targets') parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='Output to stdout in addition to log file') parser.add_option('--oldconfig', action='store_true', dest='oldconfig', help='Only process "make oldconfig"') parser.add_option('--updateconfigs', dest='updateconfigs', help="Update defconfigs with provided option setting, " "e.g. --updateconfigs=\'CONFIG_USE_THING=y\'") parser.add_option('-j', '--jobs', type='int', dest="jobs", help="Number of simultaneous jobs") parser.add_option('-l', '--load-average', type='int', dest='load_average', help="Don't start multiple jobs unless load is below LOAD_AVERAGE") parser.add_option('-k', '--keep-going', action='store_true', dest='keep_going', default=False, help="Keep building other targets if a target fails") parser.add_option('-m', '--make-target', action='append', help='Build the indicated make target (default: %s)' % ' '.join(make_command)) (options, args) = parser.parse_args() global all_options all_options = options if options.list: print "Available targets:" for target in configs: print " %s" % target.name sys.exit(0) if options.oldconfig: make_command = ["oldconfig"] elif options.make_target: make_command = options.make_target if args == ['all']: build_many(configs) elif args == ['perf']: targets = [] for t in configs: if "perf" in t.name: targets.append(t) build_many(targets) elif args == ['noperf']: targets = [] for t in configs: if "perf" not in t.name: targets.append(t) build_many(targets) elif len(args) > 0: all_configs = {} for t in configs: all_configs[t.name] = t targets = [] for t in args: if t not in all_configs: parser.error("Target '%s' not one of %s" % (t, all_configs.keys())) targets.append(all_configs[t]) build_many(targets) else: parser.error("Must specify a target to build, or 'all'") if __name__ == "__main__": main()
gpl-2.0
6,500,356,498,737,040,000
33.183721
86
0.580312
false
ivaano/zato
code/zato-server/src/zato/server/service/internal/outgoing/odoo.py
6
2708
# -*- coding: utf-8 -*- """ Copyright (C) 2014 Dariusz Suchojad <dsuch at zato.io> Licensed under LGPLv3, see LICENSE.txt for terms and conditions. """ from __future__ import absolute_import, division, print_function, unicode_literals # stdlib from contextlib import closing from time import time from uuid import uuid4 # Zato from zato.common.broker_message import OUTGOING from zato.common.odb.model import OutgoingOdoo from zato.common.odb.query import out_odoo_list from zato.common.util import ping_odoo from zato.server.service.internal import AdminService, AdminSIO, ChangePasswordBase from zato.server.service.meta import CreateEditMeta, DeleteMeta, GetListMeta elem = 'email_imap' model = OutgoingOdoo label = 'an Odoo connection' broker_message = OUTGOING broker_message_prefix = 'ODOO_' list_func = out_odoo_list skip_input_params = ['password'] def instance_hook(service, input, instance, attrs): if 'create' in service.get_name().lower(): instance.password = uuid4().hex def broker_message_hook(service, input, instance, attrs, service_type): if service_type == 'create_edit': input.password = instance.password class GetList(AdminService): __metaclass__ = GetListMeta class Create(AdminService): __metaclass__ = CreateEditMeta class Edit(AdminService): __metaclass__ = CreateEditMeta class Delete(AdminService): __metaclass__ = DeleteMeta class ChangePassword(ChangePasswordBase): """ Changes the password of an Odoo connection """ password_required = False class SimpleIO(ChangePasswordBase.SimpleIO): request_elem = 'zato_outgoing_odoo_change_password_request' response_elem = 'zato_outgoing_odoo_change_password_response' def handle(self): def _auth(instance, password): instance.password = password return self._handle(OutgoingOdoo, _auth, OUTGOING.ODOO_CHANGE_PASSWORD.value, publish_instance_attrs=['host', 'protocol', 'port', 'database', 'user', 'password', 'pool_size']) class Ping(AdminService): class SimpleIO(AdminSIO): request_elem = 'zato_outgoing_odoo_ping_request' response_elem = 'zato_outgoing_odoo_ping_response' input_required = ('id',) output_required = ('info',) def handle(self): with closing(self.odb.session()) as session: item = session.query(OutgoingOdoo).filter_by(id=self.request.input.id).one() with self.outgoing.odoo[item.name].conn.client() as client: start_time = time() ping_odoo(client) response_time = time() - start_time self.response.payload.info = 'Ping OK, took:`{0:03.4f} s`'.format(response_time)
gpl-3.0
-791,674,962,149,542,800
30.126437
109
0.694978
false
WadeYuChen/django-oscar
src/oscar/apps/dashboard/partners/views.py
8
10728
from django.contrib import messages from django.contrib.auth.models import Permission from django.core.urlresolvers import reverse_lazy, reverse from django.shortcuts import get_object_or_404, redirect from django.utils.translation import ugettext_lazy as _ from django.template.loader import render_to_string from django.views import generic from oscar.apps.customer.utils import normalise_email from oscar.core.loading import get_classes, get_model from oscar.core.compat import get_user_model from oscar.views import sort_queryset User = get_user_model() Partner = get_model('partner', 'Partner') ( PartnerSearchForm, PartnerCreateForm, PartnerAddressForm, NewUserForm, UserEmailForm, ExistingUserForm ) = get_classes( 'dashboard.partners.forms', ['PartnerSearchForm', 'PartnerCreateForm', 'PartnerAddressForm', 'NewUserForm', 'UserEmailForm', 'ExistingUserForm']) class PartnerListView(generic.ListView): model = Partner context_object_name = 'partners' template_name = 'dashboard/partners/partner_list.html' form_class = PartnerSearchForm def get_queryset(self): qs = self.model._default_manager.all() qs = sort_queryset(qs, self.request, ['name']) self.description = _("All partners") # We track whether the queryset is filtered to determine whether we # show the search form 'reset' button. self.is_filtered = False self.form = self.form_class(self.request.GET) if not self.form.is_valid(): return qs data = self.form.cleaned_data if data['name']: qs = qs.filter(name__icontains=data['name']) self.description = _("Partners matching '%s'") % data['name'] self.is_filtered = True return qs def get_context_data(self, **kwargs): ctx = super(PartnerListView, self).get_context_data(**kwargs) ctx['queryset_description'] = self.description ctx['form'] = self.form ctx['is_filtered'] = self.is_filtered return ctx class PartnerCreateView(generic.CreateView): model = Partner template_name = 'dashboard/partners/partner_form.html' form_class = PartnerCreateForm success_url = reverse_lazy('dashboard:partner-list') def get_context_data(self, **kwargs): ctx = super(PartnerCreateView, self).get_context_data(**kwargs) ctx['title'] = _('Create new partner') return ctx def get_success_url(self): messages.success(self.request, _("Partner '%s' was created successfully.") % self.object.name) return reverse('dashboard:partner-list') class PartnerManageView(generic.UpdateView): """ This multi-purpose view renders out a form to edit the partner's details, the associated address and a list of all associated users. """ template_name = 'dashboard/partners/partner_manage.html' form_class = PartnerAddressForm success_url = reverse_lazy('dashboard:partner-list') def get_object(self, queryset=None): self.partner = get_object_or_404(Partner, pk=self.kwargs['pk']) address = self.partner.primary_address if address is None: address = self.partner.addresses.model(partner=self.partner) return address def get_initial(self): return {'name': self.partner.name} def get_context_data(self, **kwargs): ctx = super(PartnerManageView, self).get_context_data(**kwargs) ctx['partner'] = self.partner ctx['title'] = self.partner.name ctx['users'] = self.partner.users.all() return ctx def form_valid(self, form): messages.success( self.request, _("Partner '%s' was updated successfully.") % self.partner.name) self.partner.name = form.cleaned_data['name'] self.partner.save() return super(PartnerManageView, self).form_valid(form) class PartnerDeleteView(generic.DeleteView): model = Partner template_name = 'dashboard/partners/partner_delete.html' def get_success_url(self): messages.success(self.request, _("Partner '%s' was deleted successfully.") % self.object.name) return reverse('dashboard:partner-list') # ============= # Partner users # ============= class PartnerUserCreateView(generic.CreateView): model = User template_name = 'dashboard/partners/partner_user_form.html' form_class = NewUserForm def dispatch(self, request, *args, **kwargs): self.partner = get_object_or_404( Partner, pk=kwargs.get('partner_pk', None)) return super(PartnerUserCreateView, self).dispatch( request, *args, **kwargs) def get_context_data(self, **kwargs): ctx = super(PartnerUserCreateView, self).get_context_data(**kwargs) ctx['partner'] = self.partner ctx['title'] = _('Create user') return ctx def get_form_kwargs(self): kwargs = super(PartnerUserCreateView, self).get_form_kwargs() kwargs['partner'] = self.partner return kwargs def get_success_url(self): name = self.object.get_full_name() or self.object.email messages.success(self.request, _("User '%s' was created successfully.") % name) return reverse('dashboard:partner-list') class PartnerUserSelectView(generic.ListView): template_name = 'dashboard/partners/partner_user_select.html' form_class = UserEmailForm context_object_name = 'users' def dispatch(self, request, *args, **kwargs): self.partner = get_object_or_404( Partner, pk=kwargs.get('partner_pk', None)) return super(PartnerUserSelectView, self).dispatch( request, *args, **kwargs) def get(self, request, *args, **kwargs): data = None if 'email' in request.GET: data = request.GET self.form = self.form_class(data) return super(PartnerUserSelectView, self).get(request, *args, **kwargs) def get_context_data(self, **kwargs): ctx = super(PartnerUserSelectView, self).get_context_data(**kwargs) ctx['partner'] = self.partner ctx['form'] = self.form return ctx def get_queryset(self): if self.form.is_valid(): email = normalise_email(self.form.cleaned_data['email']) return User.objects.filter(email__icontains=email) else: return User.objects.none() class PartnerUserLinkView(generic.View): def get(self, request, user_pk, partner_pk): # need to allow GET to make Undo link in PartnerUserUnlinkView work return self.post(request, user_pk, partner_pk) def post(self, request, user_pk, partner_pk): user = get_object_or_404(User, pk=user_pk) name = user.get_full_name() or user.email partner = get_object_or_404(Partner, pk=partner_pk) if self.link_user(user, partner): messages.success( request, _("User '%(name)s' was linked to '%(partner_name)s'") % {'name': name, 'partner_name': partner.name}) else: messages.info( request, _("User '%(name)s' is already linked to '%(partner_name)s'") % {'name': name, 'partner_name': partner.name}) return redirect('dashboard:partner-manage', pk=partner_pk) def link_user(self, user, partner): """ Links a user to a partner, and adds the dashboard permission if needed. Returns False if the user was linked already; True otherwise. """ if partner.users.filter(pk=user.pk).exists(): return False partner.users.add(user) if not user.is_staff: dashboard_access_perm = Permission.objects.get( codename='dashboard_access', content_type__app_label='partner') user.user_permissions.add(dashboard_access_perm) return True class PartnerUserUnlinkView(generic.View): def unlink_user(self, user, partner): """ Unlinks a user from a partner, and removes the dashboard permission if they are not linked to any other partners. Returns False if the user was not linked to the partner; True otherwise. """ if not partner.users.filter(pk=user.pk).exists(): return False partner.users.remove(user) if not user.is_staff and not user.partners.exists(): dashboard_access_perm = Permission.objects.get( codename='dashboard_access', content_type__app_label='partner') user.user_permissions.remove(dashboard_access_perm) return True def post(self, request, user_pk, partner_pk): user = get_object_or_404(User, pk=user_pk) name = user.get_full_name() or user.email partner = get_object_or_404(Partner, pk=partner_pk) if self.unlink_user(user, partner): msg = render_to_string( 'dashboard/partners/messages/user_unlinked.html', {'user_name': name, 'partner_name': partner.name, 'user_pk': user_pk, 'partner_pk': partner_pk}) messages.success(self.request, msg, extra_tags='safe noicon') else: messages.error( request, _("User '%(name)s' is not linked to '%(partner_name)s'") % {'name': name, 'partner_name': partner.name}) return redirect('dashboard:partner-manage', pk=partner_pk) # ===== # Users # ===== class PartnerUserUpdateView(generic.UpdateView): template_name = 'dashboard/partners/partner_user_form.html' form_class = ExistingUserForm def get_object(self, queryset=None): self.partner = get_object_or_404(Partner, pk=self.kwargs['partner_pk']) return get_object_or_404(User, pk=self.kwargs['user_pk'], partners__pk=self.kwargs['partner_pk']) def get_context_data(self, **kwargs): ctx = super(PartnerUserUpdateView, self).get_context_data(**kwargs) name = self.object.get_full_name() or self.object.email ctx['partner'] = self.partner ctx['title'] = _("Edit user '%s'") % name return ctx def get_success_url(self): name = self.object.get_full_name() or self.object.email messages.success(self.request, _("User '%s' was updated successfully.") % name) return reverse('dashboard:partner-list')
bsd-3-clause
-8,535,434,178,053,574,000
35.243243
79
0.618848
false
hmoco/osf.io
scripts/migration/migrate_meetings_preprints_to_preprintservices.py
9
58159
from datetime import timedelta import json import logging import re import sys from modularodm import Q from modularodm.storage.base import KeyExistsException from modularodm.exceptions import NoResultsFound from framework.mongo import database from framework.transactions.context import TokuTransaction from scripts import utils as script_utils from website.app import init_app from website import models from website import settings logger = logging.getLogger(__name__) # Target set. Loaded from --targets flag target_data = [] POSSIBLE_PREPRINT_PROVIDER_KEYS = None SOC_SUBJ_ID = None ENG_SUBJ_ID = None PSY_SUBJ_ID = None def set_globals(): # Must be run after backends are set with init_app global POSSIBLE_PREPRINT_PROVIDER_KEYS global SOC_SUBJ_ID global ENG_SUBJ_ID global PSY_SUBJ_ID POSSIBLE_PREPRINT_PROVIDER_KEYS = set([t._id for t in models.Tag.find(Q('lower', 'in', ['psyarxiv','engrxiv','socarxiv']))]) try: # PLOS SOC_SUBJ_ID = models.Subject.find_one(Q('text', 'eq', 'Social and behavioral sciences'))._id ENG_SUBJ_ID = models.Subject.find_one(Q('text', 'eq', 'Engineering and technology'))._id PSY_SUBJ_ID = models.Subject.find_one(Q('text', 'eq', 'Social psychology'))._id except NoResultsFound: try: # BePress SOC_SUBJ_ID = models.Subject.find_one(Q('text', 'eq', 'Social and Behavioral Sciences'))._id ENG_SUBJ_ID = models.Subject.find_one(Q('text', 'eq', 'Engineering'))._id PSY_SUBJ_ID = models.Subject.find_one(Q('text', 'eq', 'Social Psychology'))._id except: raise RuntimeError('Unable to find default subjects. Please ensure the existence of:\n\t' + \ '\'Engineering and technology\' (BePress: \'Engineering\'),\n\t' + \ '\'Social and behavioral sciences\' (BePress: \'Social and Behavioral Sciences\'),\n\t' + \ '\'Social psychology\' (BePress: \'Social Psychology\')' ) # Multiple updates to any <node>.child_node_subscriptions causes only the last one to succeed. # Cache the intended value instead, updating it here before writing. cns_dict_to_update = {} # Dictionary containing {<preprint._id>: <node._id>} mapping for pairs that swapped guids preprint_node_swapped_ids_map = {} successes = [] failures = [] created_preprints = [] external_preprints = [] preprint_node_mapping = {} def create_indices(): logger.info('Creating database indices...') database.nodelog.ensure_index([('params.auth.callback_url', 1)]) database.nodelog.ensure_index([('params.node', 1)]) database.nodelog.ensure_index([('params.parent', 1)]) database.nodelog.ensure_index([('params.project', 1)]) database.nodelog.ensure_index([('params.parent_node', 1)]) database.nodelog.ensure_index([('params.destination.nid', 1)]) database.nodelog.ensure_index([('params.destination.resource', 1)]) database.nodelog.ensure_index([('params.destination.node._id', 1)]) database.nodelog.ensure_index([('params.pointer.id', 1)]) database.nodelog.ensure_index([('params.source.nid', 1)]) database.nodelog.ensure_index([('params.source.node._id', 1)]) database.nodelog.ensure_index([('params.source.resource', 1)]) database.nodelog.ensure_index([('params.template_node.id', 1)]) database.nodelog.ensure_index([('params.registration', 1)]) database.nodelog.ensure_index([('params.fork', 1)]) database.nodelog.ensure_index([('params.source.node._id', 1)]) database.nodewikipage.ensure_index([('node', 1)]) database.identifier.ensure_index([('referent', 1)]) database.session.ensure_index([('data', 1)]) def drop_indices(): logger.info('Cleaning up indices...') database.nodelog.drop_index([('params.auth.callback_url', 1)]) database.nodelog.drop_index([('params.node', 1)]) database.nodelog.drop_index([('params.parent', 1)]) database.nodelog.drop_index([('params.project', 1)]) database.nodelog.drop_index([('params.parent_node', 1)]) database.nodelog.drop_index([('params.destination.nid', 1)]) database.nodelog.drop_index([('params.destination.resource', 1)]) database.nodelog.drop_index([('params.destination.node._id', 1)]) database.nodelog.drop_index([('params.pointer.id', 1)]) database.nodelog.drop_index([('params.source.nid', 1)]) database.nodelog.drop_index([('params.source.node._id', 1)]) database.nodelog.drop_index([('params.source.resource', 1)]) database.nodelog.drop_index([('params.template_node.id', 1)]) database.nodelog.drop_index([('params.registration', 1)]) database.nodelog.drop_index([('params.fork', 1)]) database.nodelog.drop_index([('params.source.node._id', 1)]) database.nodewikipage.drop_index([('node', 1)]) database.identifier.drop_index([('referent', 1)]) database.session.drop_index([('data', 1)]) def validate_target(target): logger.info('* Validating and updating node {}'.format(target['node_id'])) updates = {} node = database['node'].find_one(target['node_id']) file = database['storedfilenode'].find_one(target['file_id']) assert node, 'Unable to find Node with _id {}'.format(target['node_id']) assert file, 'Unable to find File with _id {}'.format(target['file_id']) # 1 node to be migrated has the socarxiv tag on the parent_node # assert target['provider_id'] in set([tag.lower() for tag in node.get('tags', [])]) & POSSIBLE_PREPRINT_PROVIDER_KEYS, 'Unable to infer PreprintProvider for node {} with tags {}'.format(node['_id'], node['tags']) assert file['node'] == node['_id'], 'File {} with `node` {} not attached to Node {}'.format(file_id, file['node'], node['_id']) assert not database['preprintservice'].find({'node': target['node_id']}, {'_id': 1}).count(), 'Cannot migrate a node that already has a preprint' if target.get('subjects'): validate_subjects(target['subjects']) if not node.get('preprint_file', None): updates.update({'preprint_file': file['_id']}) if not node.get('preprint_created', None): updates.update({'preprint_created': infer_preprint_created(target['node_id'], target['provider_id'])}) if updates: logger.debug('{} has no preprint_file, setting'.format(node['_id'])) database['node'].find_and_modify( {'_id': node['_id']}, {'$set': updates} ) def validate_subjects(subj_hierarchy): for subject_list in subj_hierarchy: for subject_id in subject_list: subject = models.Subject.load(subject_id) if not subject: logger.error('Found nonexistant subject {}.'.format(subject_id)) raise Exception('Found nonexistant subject {}.'.format(subject_id)) if subject.parents and not set([c._id for c in subject.parents]) & set(subject_list): logger.error('Found subject {} without parents.'.format(subject_id)) raise Exception('Found subject {} without parents.'.format(subject_id)) def infer_preprint_created(node_id, provider_id): logs = models.NodeLog.find(Q('node', 'eq', node_id) & Q('action', 'eq', 'tag_added') & Q('params.tag', 'in', list(POSSIBLE_PREPRINT_PROVIDER_KEYS))) if not logs: parent_node_id = database['node'].find_one({'_id': node_id})['parent_node'] if parent_node_id: return infer_preprint_created(parent_node_id, provider_id) else: raise AssertionError('Cannot infer created from tag_added log') return min([l.date for l in logs if re.match(provider_id, l.params['tag'], re.I)]) def add_preprint_log(preprint): logs = models.NodeLog.find(Q('node', 'eq', preprint.node._id) & Q('action', 'eq', 'tag_added') & Q('params.tag', 'in', [preprint.provider._id])) date_preprint_created = min([l.date for l in logs]) user = logs[0].user new_log = models.NodeLog( action='preprint_initiated', user=user, params={ 'node': preprint.node._id, 'preprint': preprint._id }, node=preprint.node._id, original_node=preprint.node._id, date=date_preprint_created, ) new_log.save() def create_preprint_service_from_target(target, swap_cutoff): created = {} node_doc = database['node'].find_one(target['node_id']) provider_id = target['provider_id'] non_osf_provider = provider_id != 'osf' node = models.Node.load(node_doc['_id']) provider = models.PreprintProvider.load(provider_id) # primary_file already set correctly* on node if not provider: raise Exception('Unable to find provider {} for node {}, erroring'.format(provider_id, node_doc['_id'])) if not node: raise Exception('Unable to find node {}, erroring.'.format(node_doc['_id'])) subjects = target.get('subjects', {'socarxiv': [[SOC_SUBJ_ID]], 'engrxiv': [[ENG_SUBJ_ID]], 'psyarxiv': [[SOC_SUBJ_ID, PSY_SUBJ_ID]]}[provider_id]) try: logger.info('* Creating preprint for node {}'.format(node._id)) preprint = models.PreprintService(node=node, provider=provider) preprint.save() database['preprintservice'].find_and_modify( {'_id': preprint._id}, {'$set': { 'date_created': node_doc['preprint_created'], 'date_published': node_doc['preprint_created'], 'subjects': subjects, 'is_published': True }} ) except KeyExistsException: logger.warn('Duplicate PreprintService found for provider {} on node {}, skipping'.format(provider._id, node._id)) else: if node_doc.get('preprint_doi'): database['node'].find_and_modify( {'_id': node._id}, {'$set': { 'preprint_article_doi': node_doc['preprint_doi'] }} ) database['node'].find_and_modify( {'_id': node._id}, {'$unset': { 'preprint_doi': '', 'preprint_created': '' }} ) node.reload() preprint.reload() if should_swap_guids(node, preprint, swap_cutoff): swap_guids(node, preprint) node.reload() preprint.reload() preprint.node.reload() # add_preprint_log(preprint) # Don't log this action database['preprintservice'].find_and_modify( {'_id': preprint._id}, {'$set': { 'date_modified': node_doc['preprint_created'], }} ) created.update({preprint._id: (node._id, non_osf_provider)}) node.system_tags.append('migrated_from_osf4m') if not node.description: wiki_home = node.get_wiki_page('home') if wiki_home and wiki_home.content: node.description = wiki_home.content node.save() return created def should_swap_guids(node, preprint, swap_cutoff): logger.info('Preprint {} - Node {} timedelta = {}'.format(preprint._id, node._id, preprint.date_created - node.date_created)) not_too_old = preprint.date_created - node.date_created < swap_cutoff not_previously_swapped = not database['preprintservice'].find({'node': node._id}).count() > 1 if not_too_old and not_previously_swapped: return True if not not_too_old: logger.info('* Not swapping guids for preprint {} and preexisting node {}'.format(preprint._id, node._id)) if not not_previously_swapped: logger.info('* Not swapping guids for preprint {} and already-swapped node {}'.format(preprint._id, node._id)) return False def swap_guids(node, preprint): if node._backrefs.get('addons', {}).get('addonfilesnodesettings'): database['node'].find_and_modify( {'_id': node._id}, {'$unset': { '__backrefs.addons.addonfilesnodesettings': '' }} ) node.reload() if node._backrefs.get('uploads', {}).get('nodefile'): database['node'].find_and_modify( {'_id': node._id}, {'$unset': { '__backrefs.uploads.nodefile': '' }} ) node.reload() logger.info('* Swapping guids for preprint {} and node {}'.format(preprint._id, node._id)) preprint_node_swapped_ids_map[node._id] = preprint._id # node._id is about to become preprint._id, reverse here old_guid = models.Guid.load(node._id) new_guid = models.Guid.load(preprint._id) node._id = new_guid._id node.save() preprint._id = old_guid._id preprint.node = node preprint.save() old_guid.referent = preprint new_guid.referent = node old_guid.save() new_guid.save() update_foreign_fields(old_guid._id, node) def update_foreign_fields(old_id, node): dry_run = '--dry' in sys.argv logger.info('* Updating ForeignFields for node {}->{}'.format(old_id, node)) bns_owner = list(database['boxnodesettings'].find({'owner': old_id})) if bns_owner: logger.info('** Updating {} BoxNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in bns_owner])) for doc in bns_owner: database['boxnodesettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'owner': node._id }} ) bus_og = list(database['boxusersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}})) if bus_og: logger.info('** Updating {} BoxUserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in bus_og])) for doc in bus_og: og = doc['oauth_grants'] og[node._id] = og.pop(old_id) database['boxusersettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'oauth_grants': og }} ) advns_o = list(database['addondataversenodesettings'].find({'owner': old_id})) if advns_o: logger.info('** Updating {} AddonDataverseNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in advns_o])) for doc in advns_o: database['addondataversenodesettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'owner': node._id }} ) advus_og = list(database['addondataverseusersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}})) if advus_og: logger.info('** Updating {} AddonDataverseUserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in advus_og])) for doc in advus_og: og = doc['oauth_grants'] og[node._id] = og.pop(old_id) database['addondataverseusersettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'oauth_grants': og }} ) dbns_o = list(database['dropboxnodesettings'].find({'owner': old_id})) if dbns_o: logger.info('** Updating {} DropboxNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in dbns_o])) for doc in dbns_o: database['dropboxnodesettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'owner': node._id }} ) dbus_og = list(database['dropboxusersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}})) if dbus_og: logger.info('** Updating {} DropboxUserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in dbus_og])) for doc in dbus_og: og = doc['oauth_grants'] og[node._id] = og.pop(old_id) database['dropboxusersettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'oauth_grants': og }} ) afsns_o = list(database['addonfigsharenodesettings'].find({'owner': old_id})) if afsns_o: logger.info('** Updating {} AddonFigShareNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in afsns_o])) for doc in afsns_o: database['addonfigsharenodesettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'owner': node._id }} ) ## Figshare has no oauth_grants fwns_o = list(database['forwardnodesettings'].find({'owner': old_id})) if fwns_o: logger.info('** Updating {} ForwardNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in fwns_o])) for doc in fwns_o: database['forwardnodesettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'owner': node._id }} ) ghns_o = list(database['githubnodesettings'].find({'owner': old_id})) if ghns_o: logger.info('** Updating {} GithubNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in ghns_o])) for doc in ghns_o: database['githubnodesettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'owner': node._id }} ) ghus_og = list(database['githubusersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}})) if ghus_og: logger.info('** Updating {} GithubUserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in ghus_og])) for doc in ghus_og: og = doc['oauth_grants'] og[node._id] = og.pop(old_id) database['githubusersettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'oauth_grants': og }} ) gdns_o = list(database['googledrivenodesettings'].find({'owner': old_id})) if gdns_o: logger.info('** Updating {} GoogleDriveNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in gdns_o])) for doc in gdns_o: database['googledrivenodesettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'owner': node._id }} ) gdus_og = list(database['googledriveusersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}})) if gdus_og: logger.info('** Updating {} GoogleDriveUserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in gdus_og])) for doc in gdus_og: og = doc['oauth_grants'] og[node._id] = og.pop(old_id) database['googledriveusersettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'oauth_grants': og }} ) mns_o = list(database['mendeleynodesettings'].find({'owner': old_id})) if mns_o: logger.info('** Updating {} MendeleyNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in mns_o])) for doc in mns_o: database['mendeleynodesettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'owner': node._id }} ) mus_og = list(database['mendeleyusersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}})) if mus_og: logger.info('** Updating {} MendeleyUserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in mus_og])) for doc in mus_og: og = doc['oauth_grants'] og[node._id] = og.pop(old_id) database['mendeleyusersettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'oauth_grants': og }} ) osfsns_o = list(database['osfstoragenodesettings'].find({'owner': old_id})) if osfsns_o: logger.info('** Updating {} OsfStorageNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in osfsns_o])) for doc in osfsns_o: database['osfstoragenodesettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'owner': node._id }} ) ocns_o = list(database['addonowncloudnodesettings'].find({'owner': old_id})) if ocns_o: logger.info('** Updating {} AddonOwnCloudNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in ocns_o])) for doc in ocns_o: database['addonowncloudnodesettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'owner': node._id }} ) ocus_og = list(database['addonowncloudusersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}})) if ocus_og: logger.info('** Updating {} AddonOwnCloudUserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in ocus_og])) for doc in ocus_og: og = doc['oauth_grants'] og[node._id] = og.pop(old_id) database['addonowncloudusersettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'oauth_grants': og }} ) s3ns_o = list(database['s3nodesettings'].find({'owner': old_id})) if s3ns_o: logger.info('** Updating {} s3NodeSettings (owner) {}'.format(old_id, [d['_id'] for d in s3ns_o])) for doc in s3ns_o: database['s3nodesettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'owner': node._id }} ) s3us_og = list(database['s3usersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}})) if s3us_og: logger.info('** Updating {} S3UserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in s3us_og])) for doc in s3us_og: og = doc['oauth_grants'] og[node._id] = og.pop(old_id) database['s3usersettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'oauth_grants': og }} ) awns_o = list(database['addonwikinodesettings'].find({'owner': old_id})) if awns_o: logger.info('** Updating {} AddonWikiNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in awns_o])) for doc in awns_o: database['addonwikinodesettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'owner': node._id }} ) nwp_n = list(database['nodewikipage'].find({'node': old_id})) if nwp_n: logger.info('** Updating {} NodeWikiPage (node) {}'.format(old_id, [d['_id'] for d in nwp_n])) for doc in nwp_n: database['nodewikipage'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'node': node._id }} ) zns_o = list(database['zoteronodesettings'].find({'owner': old_id})) if zns_o: logger.info('** Updating {} ZoteroNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in zns_o])) for doc in zns_o: database['zoteronodesettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'owner': node._id }} ) zus_og = list(database['zoterousersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}})) if zus_og: logger.info('** Updating {} ZoteroUserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in zus_og])) for doc in zus_og: og = doc['oauth_grants'] og[node._id] = og.pop(old_id) database['zoterousersettings'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'oauth_grants': og }} ) aj_sn = list(database['archivejob'].find({'src_node': old_id})) if aj_sn: logger.info('** Updating {} ArchiveJobs (src_node) {}'.format(old_id, [d['_id'] for d in aj_sn])) for doc in aj_sn: database['archivejob'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'src_node': node._id }} ) tfn_n = list(database['trashedfilenode'].find({'node': old_id})) if tfn_n: logger.info('** Updating {} TrashedFileNodes (node) {}'.format(old_id, [d['_id'] for d in tfn_n])) for doc in tfn_n: del_on = doc.pop('deleted_on') # Remove non-JSON-serializable datetime fields last_touch = doc.pop('last_touched') hist_mods = [doc['history'][doc['history'].index(h)].pop('modified') for h in doc['history']] replacement = json.loads(re.sub(r'\b{}\b'.format(old_id), node._id, json.dumps(doc))) for i, mod in enumerate(hist_mods): replacement['history'][i]['modified'] = mod database['trashedfilenode'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'node': replacement['node'], 'history': replacement['history'] }} ) sfn_n = list(database['storedfilenode'].find({'node': old_id})) if sfn_n: logger.info('** Updating {} StoredFileNodes (node) {}'.format(old_id, [d['_id'] for d in sfn_n])) for doc in sfn_n: doc.pop('last_touched') # Remove non-JSON-serializable datetime fields hist_mods = [doc['history'][doc['history'].index(h)].pop('modified') for h in doc['history']] replacement = json.loads(re.sub(r'\b{}\b'.format(old_id), node._id, json.dumps(doc))) for i, mod in enumerate(hist_mods): replacement['history'][i]['modified'] = mod database['storedfilenode'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'node': replacement['node'], 'history': replacement['history'] }} ) com_n = list(database['comment'].find({'node': old_id})) if com_n: logger.info('** Updating {} Comments (node) {}'.format(old_id, [d['_id'] for d in com_n])) for doc in com_n: database['comment'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'node': node._id }} ) com_t = list(database['comment'].find({'target': {'$in': [old_id]}})) if com_t: logger.info('** Updating {} Comments (target) {}'.format(old_id, [d['_id'] for d in com_t])) for doc in com_t: targ = doc['target'] targ.insert(targ.index(old_id), node._id) targ.remove(old_id) database['comment'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'target': targ }} ) com_t = list(database['comment'].find({'root_target': {'$in': [old_id]}})) if com_t: logger.info('** Updating {} Comments (root_target) {}'.format(old_id, [d['_id'] for d in com_t])) for doc in com_t: rtarg = doc['root_target'] rtarg.insert(rtarg.index(old_id), node._id) rtarg.remove(old_id) database['comment'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'root_target': rtarg }} ) nl_on = list(database['nodelog'].find({'original_node': old_id})) if nl_on: logger.info('** Updating {} NodeLogs (original_node) {}'.format(old_id, [d['_id'] for d in nl_on])) for doc in nl_on: database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'original_node': node._id }} ) nl_n = list(database['nodelog'].find({'node': old_id})) if nl_n: logger.info('** Updating {} NodeLogs (node) {}'.format(old_id, [d['_id'] for d in nl_n])) for doc in nl_n: database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'node': node._id }} ) nl_pac = list(database['nodelog'].find({'params.auth.callback_url': {'$regex': '/{}/'.format(old_id)}})) if nl_pac: logger.info('** Updating {} NodeLogs (params.auth.callback_url) {}'.format(old_id, [d['_id'] for d in nl_pac])) for doc in nl_pac: params = doc['params'] params['auth']['callback_url'] = params['auth']['callback_url'].replace('{}/'.format(old_id), '{}/'.format(node._id)) database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'params': params }} ) nl_pn = list(database['nodelog'].find({'params.node': old_id})) if nl_pn: logger.info('** Updating {} NodeLogs (params.node) {}'.format(old_id, [d['_id'] for d in nl_pn])) for doc in nl_pn: params = doc['params'] params['node'] = node._id database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'params': params }} ) nl_ppar = list(database['nodelog'].find({'params.parent': old_id})) if nl_ppar: logger.info('** Updating {} NodeLogs (params.parent) {}'.format(old_id, [d['_id'] for d in nl_ppar])) for doc in nl_ppar: params = doc['params'] params['parent'] = node._id database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'params': params }} ) nl_ppro = list(database['nodelog'].find({'params.project': old_id})) if nl_ppro: logger.info('** Updating {} NodeLogs (params.project) {}'.format(old_id, [d['_id'] for d in nl_ppro])) for doc in nl_ppro: params = doc['params'] params['project'] = node._id database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'params': params }} ) nl_ppn = list(database['nodelog'].find({'params.parent_node': old_id})) if nl_ppn: logger.info('** Updating {} NodeLogs (params.parent_node) {}'.format(old_id, [d['_id'] for d in nl_ppn])) for doc in nl_ppn: params = doc['params'] params['parent_node'] = node._id database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'params': params }} ) nl_pdn = list(database['nodelog'].find({'params.destination.nid': old_id})) if nl_pdn: logger.info('** Updating {} NodeLogs (params.destination.nid) {}'.format(old_id, [d['_id'] for d in nl_pdn])) for doc in nl_pdn: params = doc['params'] params['destination']['nid'] = node._id if params['destination'].get('url', None): params['destination']['url'] = params['destination']['url'].replace('{}/'.format(old_id), '{}/'.format(node._id)) database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'params': params }} ) nl_pdr = list(database['nodelog'].find({'params.destination.resource': old_id})) if nl_pdr: logger.info('** Updating {} NodeLogs (params.destination.resource) {}'.format(old_id, [d['_id'] for d in nl_pdr])) for doc in nl_pdr: params = doc['params'] params['destination']['resource'] = node._id database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'params': params }} ) nl_pdni = list(database['nodelog'].find({'params.destination.node._id': old_id})) if nl_pdni: logger.info('** Updating {} NodeLogs (params.destination.node._id) {}'.format(old_id, [d['_id'] for d in nl_pdni])) for doc in nl_pdni: params = doc['params'] params['destination']['node']['_id'] = node._id if params['destination']['node'].get('url', None): params['destination']['node']['url'] = params['destination']['node']['url'].replace('{}/'.format(old_id), '{}/'.format(node._id)) database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'params': params }} ) nl_ppi = list(database['nodelog'].find({'params.pointer.id': old_id})) if nl_ppi: logger.info('** Updating {} NodeLogs (params.pointer.id) {}'.format(old_id, [d['_id'] for d in nl_ppi])) for doc in nl_ppi: params = doc['params'] params['pointer']['id'] = node._id if params['pointer'].get('url', None): params['pointer']['url'] = params['pointer']['url'].replace('{}/'.format(old_id), '{}/'.format(node._id)) database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'params': params }} ) nl_psn = list(database['nodelog'].find({'params.source.nid': old_id})) if nl_psn: logger.info('** Updating {} NodeLogs (params.source.nid) {}'.format(old_id, [d['_id'] for d in nl_psn])) for doc in nl_psn: params = doc['params'] params['source']['nid'] = node._id if params['source'].get('url', None): params['source']['url'] = params['source']['url'].replace('{}/'.format(old_id), '{}/'.format(node._id)) database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'params': params }} ) nl_psni = list(database['nodelog'].find({'params.source.node._id': old_id})) if nl_psni: logger.info('** Updating {} NodeLogs (params.source.node._id) {}'.format(old_id, [d['_id'] for d in nl_psni])) for doc in nl_psni: params = doc['params'] params['source']['node']['_id'] = node._id if params['source']['node'].get('url', None): params['source']['node']['url'] = params['source']['node']['url'].replace('{}/'.format(old_id), '{}/'.format(node._id)) database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'params': params }} ) nl_psr = list(database['nodelog'].find({'params.source.resource': old_id})) if nl_psr: logger.info('** Updating {} NodeLogs (params.source.resource) {}'.format(old_id, [d['_id'] for d in nl_psr])) for doc in nl_psr: params = doc['params'] params['source']['resource'] = node._id database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'params': params }} ) nl_ptni = list(database['nodelog'].find({'params.template_node.id': old_id})) if nl_ptni: logger.info('** Updating {} NodeLogs (params.template_node.id) {}'.format(old_id, [d['_id'] for d in nl_ptni])) for doc in nl_ptni: params = doc['params'] params['template_node']['id'] = node._id if params['template_node'].get('url', None): params['template_node']['url'] = params['template_node']['url'].replace('{}/'.format(old_id), '{}/'.format(node._id)) database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'params': params }} ) nl_pr = list(database['nodelog'].find({'params.registration': old_id})) if nl_pr: logger.info('** Updating {} NodeLogs (params.registration) {}'.format(old_id, [d['_id'] for d in nl_pr])) for doc in nl_pr: params = doc['params'] params['registration'] = node._id database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'params': params }} ) nl_pf = list(database['nodelog'].find({'params.fork': old_id})) if nl_pf: logger.info('** Updating {} NodeLogs (params.fork) {}'.format(old_id, [d['_id'] for d in nl_pf])) for doc in nl_pf: params = doc['params'] params['fork'] = node._id database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'params': params }} ) nl_pud = list(database['nodelog'].find({'params.urls.download': {'$regex': '/{}/'.format(old_id)}})) if nl_pud: logger.info('** Updating {} NodeLogs (params.source.node._id) {}'.format(old_id, [d['_id'] for d in nl_pud])) for doc in nl_pud: params = doc['params'] params['urls']['download'] = params['urls']['download'].replace('{}/'.format(old_id), '{}/'.format(node._id)) if params['urls'].get('view', None): params['urls']['view'] = params['urls']['view'].replace('{}/'.format(old_id), '{}/'.format(node._id)) database['nodelog'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'params': params }} ) ptr_n = list(database['pointer'].find({'node': old_id})) if ptr_n: logger.info('** Updating {} Pointers (node) {}'.format(old_id, [d['_id'] for d in ptr_n])) for doc in ptr_n: database['pointer'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'node': node._id }} ) n_tn = list(database['node'].find({'template_node': old_id})) if n_tn: logger.info('** Updating {} Nodes (template_node) {}'.format(old_id, [d['_id'] for d in n_tn])) for doc in n_tn: database['node'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'template_node': node._id }} ) n_ff = list(database['node'].find({'forked_from': old_id})) if n_ff: logger.info('** Updating {} Nodes (forked_from) {}'.format(old_id, [d['_id'] for d in n_ff])) for doc in n_ff: database['node'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'forked_from': node._id }} ) n_rf = list(database['node'].find({'registered_from': old_id})) if n_rf: logger.info('** Updating {} Nodes (registered_from) {}'.format(old_id, [d['_id'] for d in n_rf])) for doc in n_rf: database['node'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'registered_from': node._id }} ) n_root = list(database['node'].find({'root': old_id})) if n_root: logger.info('** Updating {} Nodes (root) {}'.format(old_id, [d['_id'] for d in n_root])) for doc in n_root: database['node'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'root': node._id }} ) n_par = list(database['node'].find({'parent': old_id})) if n_par: logger.info('** Updating {} Nodes (parent) {}'.format(old_id, [d['_id'] for d in n_par])) for doc in n_par: database['node'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'parent': node._id }} ) n_cns = list(database['node'].find({'$where': 'if (this.child_node_subscriptions!==undefined){{var keys=Object.keys(this.child_node_subscriptions);for(var i=0;i<keys.length;i+=1){{if(this.child_node_subscriptions[keys[i]].indexOf("{}")!==-1){{return true}}}}}}return false;'.format(old_id)})) if n_cns: docs = list(n_cns) logger.info('** Updating {} Nodes (child_node_subscriptions) {}'.format(old_id, [d['_id'] for d in docs])) for doc in docs: if doc['_id'] in cns_dict_to_update: cns = cns_dict_to_update[doc['_id']] else: cns = doc['child_node_subscriptions'] replacement = json.loads(re.sub(r'\b{}\b'.format(old_id), node._id, json.dumps(cns))) cns_dict_to_update[doc['_id']] = replacement database['node'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'child_node_subscriptions': replacement }} ) nd_nl = list(database['notificationdigest'].find({'node_lineage': {'$in': [old_id]}})) if nd_nl: logger.info('** Updating {} NotificationDigest (node_lineage) {}'.format(old_id, [d['_id'] for d in nd_nl])) for doc in nd_nl: nl = doc['node_lineage'] nl.insert(nl.index(old_id), node._id) nl.remove(old_id) if doc['message'].find('/{}/'.format(old_id)) != -1: # avoid html regexes message = doc['message'].replace('/{}/'.format(old_id), '/{}/'.format(node._id)) database['notificationdigest'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'message': message, 'node_lineage': nl }} ) else: database['notificationdigest'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'node_lineage': nl }} ) ns_i = list(database['notificationsubscription'].find({'_id': {'$regex': old_id}})) if ns_i: logger.info('** Updating {} NotificationSubscription (_id, owner) {}'.format(old_id, [d['_id'] for d in ns_i])) for doc in ns_i: replacement = json.loads(re.sub(r'\b{}\b'.format(old_id), node._id, json.dumps(doc))) new_id = replacement.pop('_id') database['notificationsubscription'].find_and_modify( {'_id': new_id}, {'$set':replacement}, upsert=True ) database['notificationsubscription'].remove({'_id': doc['_id']}) u_uc = list(database['user'].find({'unclaimed_records.{}'.format(old_id): {'$ne': None}})) if u_uc: logger.info('** Updating {} Users (unclaimed_records) {}'.format(old_id, [d['_id'] for d in u_uc])) for doc in u_uc: ucr = doc['unclaimed_records'] ucr[node._id] = ucr.pop(old_id) database['user'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'unclaimed_records': ucr }} ) u_caer = list(database['user'].find({'contributor_added_email_records.{}'.format(old_id): {'$ne': None}})) if u_caer: logger.info('** Updating {} Users (contributor_added_email_records) {}'.format(old_id, [d['_id'] for d in u_caer])) for doc in u_caer: caer = doc['contributor_added_email_records'] caer[node._id] = caer.pop(old_id) database['user'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'contributor_added_email_records': caer }} ) u_nc = list(database['user'].find({'notifications_configured.{}'.format(old_id): {'$ne': None}})) if u_nc: logger.info('** Updating {} Users (notifications_configured) {}'.format(old_id, [d['_id'] for d in u_nc])) for doc in u_nc: nc = doc['notifications_configured'] nc[node._id] = nc.pop(old_id) database['user'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'notifications_configured': nc }} ) u_cvt = list(database['user'].find({'comments_viewed_timestamp.{}'.format(old_id): {'$ne': None}})) if u_cvt: logger.info('** Updating {} Users (comments_viewed_timestamp) {}'.format(old_id, [d['_id'] for d in u_cvt])) for doc in u_cvt: nc = doc['comments_viewed_timestamp'] nc[node._id] = nc.pop(old_id) database['user'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'comments_viewed_timestamp': nc }} ) pc_i = list(database['pagecounters'].find({'_id': {'$regex': ':{}:'.format(old_id)}})) if pc_i: logger.info('** Updating {} PageCounters (_id) {}'.format(old_id, [d['_id'] for d in pc_i])) for doc in pc_i: replacement = json.loads(re.sub(r'\b{}\b'.format(old_id), node._id, json.dumps(doc))) new_id = replacement.pop('_id') database['pagecounters'].find_and_modify( {'_id': new_id}, {'$set':replacement}, upsert=True ) database['pagecounters'].remove({'_id': doc['_id']}) ss_dv = list(database['session'].find({'data.visited': {'$regex': ':{}:'.format(old_id)}})) if ss_dv: logger.info('** Updating {} Session (data) {}'.format(old_id, [d['_id'] for d in ss_dv])) for doc in ss_dv: repl_data = json.loads(re.sub(r'\b{}\b'.format(old_id), node._id, json.dumps(doc['data']))) database['session'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'data': repl_data }} ) wc_n = list(database['watchconfig'].find({'node': old_id})) if wc_n: logger.info('** Updating {} WatchConfigs (node) {}'.format(old_id, [d['_id'] for d in wc_n])) for doc in wc_n: database['watchconfig'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'node': node._id }} ) pl_n = list(database['privatelink'].find({'nodes': old_id})) if pl_n: logger.info('** Updating {} PrivateLinks (nodes) {}'.format(old_id, [d['_id'] for d in pl_n])) for d in pl_n: new_nodes = d['nodes'] new_nodes.remove(old_id) new_nodes.append(node._id) database['privatelink'].find_and_modify( {'_id': d['_id']}, {'$set':{ 'nodes': new_nodes }} ) dr_bf = list(database['draftregistration'].find({'branched_from': old_id})) if dr_bf: logger.info('** Updating {} DraftRegistrations (branched_from) {}'.format(old_id, [d['_id'] for d in dr_bf])) for doc in dr_bf: database['draftregistration'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'branched_from': node._id }} ) dr_rn = list(database['draftregistration'].find({'registered_node': old_id})) if dr_rn: logger.info('** Updating {} DraftRegistrations (registered_node) {}'.format(old_id, [d['_id'] for d in dr_rn])) for doc in dr_rn: database['draftregistration'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'registered_node': node._id }} ) eta_er = list(database['embargoterminationapproval'].find({'embargoed_registration': old_id})) if eta_er: logger.info('** Updating {} EmbargoTerminationApprovals (embargoed_registration) {}'.format(old_id, [d['_id'] for d in eta_er])) for doc in eta_er: database['embargoterminationapproval'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'embargoed_registration': node._id }} ) ra_su = list(database['registrationapproval'].find({'$where': 'var keys=Object.keys(this.stashed_urls);for(var i=0;i<keys.length;i+=1){{if(this.stashed_urls[keys[i]].view.indexOf("{}")!==-1){{return true}}if(this.stashed_urls[keys[i]].approve.indexOf("{}")!==-1){{return true}}if(this.stashed_urls[keys[i]].reject.indexOf("{}")!==-1){{return true}}}}return false;'.format(old_id, old_id, old_id)})) if ra_su: logger.info('** Updating {} RegistrationApprovals (stashed_urls) {}'.format(old_id, [d['_id'] for d in ra_su])) for doc in ra_su: updated_stash = json.loads(re.sub(r'\b{}\b'.format(old_id), node._id, json.dumps(doc['stashed_urls']))) database['registrationapproval'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'stashed_urls': updated_stash }} ) idf_r = list(database['identifier'].find({'referent': old_id})) if idf_r: logger.info('** Updating {} Identifiers (referent) {}'.format(old_id, [d['_id'] for d in idf_r])) for doc in idf_r: ref = doc['referent'] ref[1] = 'preprintservice' database['identifier'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'referent': ref }} ) qm_dn = list(database['queuedmail'].find({'data.nid': old_id})) if qm_dn: logger.info('** Updating {} QueuedMails (data.nid) {}'.format(old_id, [d['_id'] for d in qm_dn])) for doc in qm_dn: repl_data = json.loads(re.sub(r'\b{}\b'.format(old_id), node._id, json.dumps(doc['data']))) database['queuedmail'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'data': repl_data }} ) mr_r = list(database['mailrecord'].find({'records': [old_id, 'node']})) if mr_r: logger.info('** Updating {} MailRecords (records) {}'.format(old_id, [d['_id'] for d in mr_r])) for doc in mr_r: records = doc['records'] records[records.index([old_id, 'node'])][0] = node._id database['mailrecord'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'records': records }} ) ps_n = list(database['preprintservice'].find({'node': old_id})) if ps_n: logger.info('** Updating {} PreprintServices (node) {}'.format(old_id, [d['_id'] for d in ps_n])) for doc in ps_n: database['preprintservice'].find_and_modify( {'_id': doc['_id']}, {'$set':{ 'node': node._id }} ) def enumerate_and_set_subject_hierarchies(preprint): logger.info('* Migrating subjects for node {}'.format(preprint.node._id)) hierarchical_subjects, flat_subjects = [], set(preprint.node.preprint_subjects) for subject_id in preprint.node.preprint_subjects: subject = models.Subject.load(subject_id) if set([c._id for c in subject.children]) & flat_subjects: continue trees = [(subject, )] while trees: tree = trees.pop(0) if not tree[0].parents: hierarchical_subjects.append([s._id for s in tree]) else: trees.extend([(p, ) + tree for p in tree[0].parents if p._id in flat_subjects]) assert set(flat_subjects) == set(sum(hierarchical_subjects, [])), \ 'Flat subject set `{}` not equal to hierarchical subject set `{}`'.format(flat_subjects, hierarchical_subjects) preprint.subjects = hierarchical_subjects preprint.save() def migrate_target(target, swap_cutoff, target_count): validate_target(target) preprints = create_preprint_service_from_target(target, swap_cutoff) if not preprints: failures.append(target['node_id']) logger.error('({}-{}/{}) Failed to create any PreprintServices for node {}'.format( len(successes), len(failures), target_count, target['node_id']) ) else: for preprint_id in preprints: created_preprints.append(preprint_id) if preprints[preprint_id][1]: external_preprints.append(preprint_id) preprint_node_mapping.update(preprints) successes.append(target['node_id']) logger.info('({}-{}/{}) Successfully migrated {}'.format( len(successes), len(failures), target_count, target['node_id'] ) ) def migrate(swap_cutoff): dry_run = '--dry' in sys.argv target_data = parse_input() target_ids = [d['node_id'] for d in target_data] target_count = len(target_data) def log_results(): new_osf_preprints = list(set(created_preprints)-set(target_ids + external_preprints)) logger.info('OSF Preprints with new _ids (older than {} minutes): {}'.format( swap_cutoff.seconds/60, # timedeltas have .days, .seconds, and .microseonds but not .minutes new_osf_preprints)) logger.info('OSF Preprint-Node map: {}'.format( ''.join(['{}-{}, '.format(preprint_id, preprint_node_mapping[preprint_id][0]) for preprint_id in new_osf_preprints]))) logger.info('External Preprints with new _ids: {}'.format(list(external_preprints))) logger.info('External Preprint-Node map: {}'.format( ''.join(['{}-{}, '.format(preprint_id, preprint_node_mapping[preprint_id][0]) for preprint_id in external_preprints]))) logger.info('Swapped Preprint-Node map: {}'.format( ''.join(['{}-{}, '.format(preprint_id, preprint_node_swapped_ids_map[preprint_id]) for preprint_id in preprint_node_swapped_ids_map]))) logger.info('Successes: {}'.format(successes)) logger.info('Failures: {}'.format(failures)) logger.info('Missed nodes: {}'.format(list(set(target_ids)-set(successes + failures)))) logger.info('Created {} preprints from {} nodes'.format(len(created_preprints), target_count)) logger.info('Preparing to migrate {} preprint nodes.'.format(target_count)) logger.info('Cutoff delta for swapping guids is {} seconds'.format(swap_cutoff.total_seconds())) for target in target_data: try: if not dry_run: with TokuTransaction(): migrate_target(target, swap_cutoff, target_count) else: migrate_target(target, swap_cutoff, target_count) except Exception as e: if not isinstance(e, RuntimeError): logger.error('MIGRATION FAILED: {}'.format(target)) log_results() raise log_results() def parse_input(): logger.info('Acquiring targets...') if '--targets' not in sys.argv and '--auto' not in sys.argv: raise RuntimeError('Must either request `--auto` for target selection or manually specify input set with `--targets`.\n\nThis is expected to be a JSON-formatted list of sets of `node_id`, `file_id` and `provider_id`, e.g.\ \'{"data": [{"node_id": "asdfg", "file_id": "notarealfileid", "provider_id": "notarealproviderid"}]}\'') if '--targets' in sys.argv and '--auto' in sys.argv: raise RuntimeError('May not automatically get targets and receive specified targets.') if '--auto' in sys.argv: count = None try: count = int(sys.argv[1 + sys.argv.index('--auto')]) except (IndexError, ValueError): pass targets = [ { 'file_id': database['storedfilenode'].find({'node': n._id, 'is_file': True, 'provider': 'osfstorage'}, {'_id': 1})[0]['_id'], 'node_id': n._id, 'provider_id': list(set([t.lower for t in n.tags]) & set(['socarxiv', 'engrxiv', 'psyarxiv']))[0] } for n in models.Node.find(Q('tags', 'in', list(POSSIBLE_PREPRINT_PROVIDER_KEYS)) & Q('system_tags', 'ne', 'migrated_from_osf4m') & Q('is_deleted', 'ne', True)) if database['storedfilenode'].find({'node': n._id, 'is_file': True, 'provider': 'osfstorage'}).count() == 1 and len(list(set([t.lower for t in n.tags]) & set(['socarxiv', 'engrxiv', 'psyarxiv']))) == 1 and not database['preprintservice'].find({'node': n._id}, {'_id': 1}).count() ] if count and count < len(targets): return targets[:count] return targets input_string = sys.argv[1 + sys.argv.index('--targets')] return json.loads(input_string)['data'] def main(): dry_run = '--dry' in sys.argv td = timedelta() if '--minutes' in sys.argv: td += timedelta(minutes=int(sys.argv[1 + sys.argv.index('--minutes')])) if '--hours' in sys.argv: td += timedelta(hours=int(sys.argv[1 + sys.argv.index('--hours')])) if td.total_seconds() == 0: td += timedelta(hours=1) if not dry_run: script_utils.add_file_logger(logger, __file__) init_app(set_backends=True, routes=False) settings.SHARE_URL = None set_globals() assert all([ENG_SUBJ_ID, SOC_SUBJ_ID, PSY_SUBJ_ID]), 'Default subjects not set.' if '--no-addindex' not in sys.argv: create_indices() if dry_run: with TokuTransaction(): migrate(swap_cutoff=td) raise RuntimeError('Dry run, transaction rolled back.') else: migrate(swap_cutoff=td) if '--no-dropindex' not in sys.argv: drop_indices() if __name__ == "__main__": main()
apache-2.0
-5,792,235,606,305,090,000
41.826951
402
0.51722
false
fly19890211/edx-platform
common/lib/xmodule/xmodule/modulestore/inheritance.py
52
13259
""" Support for inheritance of fields down an XBlock hierarchy. """ from __future__ import absolute_import from datetime import datetime from pytz import UTC from xmodule.partitions.partitions import UserPartition from xblock.fields import Scope, Boolean, String, Float, XBlockMixin, Dict, Integer, List from xblock.runtime import KeyValueStore, KvsFieldData from xmodule.fields import Date, Timedelta from django.conf import settings # Make '_' a no-op so we can scrape strings _ = lambda text: text class UserPartitionList(List): """Special List class for listing UserPartitions""" def from_json(self, values): return [UserPartition.from_json(v) for v in values] def to_json(self, values): return [user_partition.to_json() for user_partition in values] class InheritanceMixin(XBlockMixin): """Field definitions for inheritable fields.""" graded = Boolean( help="Whether this module contributes to the final course grade", scope=Scope.settings, default=False, ) start = Date( help="Start time when this module is visible", default=datetime(2030, 1, 1, tzinfo=UTC), scope=Scope.settings ) due = Date( display_name=_("Due Date"), help=_("Enter the default date by which problems are due."), scope=Scope.settings, ) visible_to_staff_only = Boolean( help=_("If true, can be seen only by course staff, regardless of start date."), default=False, scope=Scope.settings, ) course_edit_method = String( display_name=_("Course Editor"), help=_("Enter the method by which this course is edited (\"XML\" or \"Studio\")."), default="Studio", scope=Scope.settings, deprecated=True # Deprecated because user would not change away from Studio within Studio. ) giturl = String( display_name=_("GIT URL"), help=_("Enter the URL for the course data GIT repository."), scope=Scope.settings ) xqa_key = String( display_name=_("XQA Key"), help=_("This setting is not currently supported."), scope=Scope.settings, deprecated=True ) annotation_storage_url = String( help=_("Enter the location of the annotation storage server. The textannotation, videoannotation, and imageannotation advanced modules require this setting."), scope=Scope.settings, default="http://your_annotation_storage.com", display_name=_("URL for Annotation Storage") ) annotation_token_secret = String( help=_("Enter the secret string for annotation storage. The textannotation, videoannotation, and imageannotation advanced modules require this string."), scope=Scope.settings, default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", display_name=_("Secret Token String for Annotation") ) graceperiod = Timedelta( help="Amount of time after the due date that submissions will be accepted", scope=Scope.settings, ) group_access = Dict( help=_("Enter the ids for the content groups this problem belongs to."), scope=Scope.settings, ) showanswer = String( display_name=_("Show Answer"), help=_( 'Specify when the Show Answer button appears for each problem. ' 'Valid values are "always", "answered", "attempted", "closed", ' '"finished", "past_due", "correct_or_past_due", and "never".' ), scope=Scope.settings, default="finished", ) rerandomize = String( display_name=_("Randomization"), help=_( 'Specify the default for how often variable values in a problem are randomized. ' 'This setting should be set to \"never\" unless you plan to provide a Python ' 'script to identify and randomize values in most of the problems in your course. ' 'Valid values are \"always\", \"onreset\", \"never\", and \"per_student\".' ), scope=Scope.settings, default="never", ) days_early_for_beta = Float( display_name=_("Days Early for Beta Users"), help=_("Enter the number of days before the start date that beta users can access the course."), scope=Scope.settings, default=None, ) static_asset_path = String( display_name=_("Static Asset Path"), help=_("Enter the path to use for files on the Files & Uploads page. This value overrides the Studio default, c4x://."), scope=Scope.settings, default='', ) text_customization = Dict( display_name=_("Text Customization"), help=_("Enter string customization substitutions for particular locations."), scope=Scope.settings, ) use_latex_compiler = Boolean( display_name=_("Enable LaTeX Compiler"), help=_("Enter true or false. If true, you can use the LaTeX templates for HTML components and advanced Problem components."), default=False, scope=Scope.settings ) max_attempts = Integer( display_name=_("Maximum Attempts"), help=_("Enter the maximum number of times a student can try to answer problems. By default, Maximum Attempts is set to null, meaning that students have an unlimited number of attempts for problems. You can override this course-wide setting for individual problems. However, if the course-wide setting is a specific number, you cannot set the Maximum Attempts for individual problems to unlimited."), values={"min": 0}, scope=Scope.settings ) matlab_api_key = String( display_name=_("Matlab API key"), help=_("Enter the API key provided by MathWorks for accessing the MATLAB Hosted Service. " "This key is granted for exclusive use in this course for the specified duration. " "Do not share the API key with other courses. Notify MathWorks immediately " "if you believe the key is exposed or compromised. To obtain a key for your course, " "or to report an issue, please contact [email protected]"), scope=Scope.settings ) # This is should be scoped to content, but since it's defined in the policy # file, it is currently scoped to settings. user_partitions = UserPartitionList( display_name=_("Group Configurations"), help=_("Enter the configurations that govern how students are grouped together."), default=[], scope=Scope.settings ) video_speed_optimizations = Boolean( display_name=_("Enable video caching system"), help=_("Enter true or false. If true, video caching will be used for HTML5 videos."), default=True, scope=Scope.settings ) video_bumper = Dict( display_name=_("Video Pre-Roll"), help=_( """Identify a video, 5-10 seconds in length, to play before course videos. Enter the video ID from""" """ the Video Uploads page and one or more transcript files in the following format:""" """ {"video_id": "ID", "transcripts": {"language": "/static/filename.srt"}}.""" """ For example, an entry for a video with two transcripts looks like this:""" """ {"video_id": "77cef264-d6f5-4cf2-ad9d-0178ab8c77be",""" """ "transcripts": {"en": "/static/DemoX-D01_1.srt", "uk": "/static/DemoX-D01_1_uk.srt"}}""" ), scope=Scope.settings ) reset_key = "DEFAULT_SHOW_RESET_BUTTON" default_reset_button = getattr(settings, reset_key) if hasattr(settings, reset_key) else False show_reset_button = Boolean( display_name=_("Show Reset Button for Problems"), help=_("Enter true or false. If true, problems in the course default to always displaying a 'Reset' button. You can " "override this in each problem's settings. All existing problems are affected when this course-wide setting is changed."), scope=Scope.settings, default=default_reset_button ) edxnotes = Boolean( display_name=_("Enable Student Notes"), help=_("Enter true or false. If true, students can use the Student Notes feature."), default=False, scope=Scope.settings ) edxnotes_visibility = Boolean( display_name="Student Notes Visibility", help=_("Indicates whether Student Notes are visible in the course. " "Students can also show or hide their notes in the courseware."), default=True, scope=Scope.user_info ) in_entrance_exam = Boolean( display_name=_("Tag this module as part of an Entrance Exam section"), help=_("Enter true or false. If true, answer submissions for problem modules will be " "considered in the Entrance Exam scoring/gating algorithm."), scope=Scope.settings, default=False ) def compute_inherited_metadata(descriptor): """Given a descriptor, traverse all of its descendants and do metadata inheritance. Should be called on a CourseDescriptor after importing a course. NOTE: This means that there is no such thing as lazy loading at the moment--this accesses all the children.""" if descriptor.has_children: parent_metadata = descriptor.xblock_kvs.inherited_settings.copy() # add any of descriptor's explicitly set fields to the inheriting list for field in InheritanceMixin.fields.values(): if field.is_set_on(descriptor): # inherited_settings values are json repr parent_metadata[field.name] = field.read_json(descriptor) for child in descriptor.get_children(): inherit_metadata(child, parent_metadata) compute_inherited_metadata(child) def inherit_metadata(descriptor, inherited_data): """ Updates this module with metadata inherited from a containing module. Only metadata specified in self.inheritable_metadata will be inherited `inherited_data`: A dictionary mapping field names to the values that they should inherit """ try: descriptor.xblock_kvs.inherited_settings = inherited_data except AttributeError: # the kvs doesn't have inherited_settings probably b/c it's an error module pass def own_metadata(module): """ Return a JSON-friendly dictionary that contains only non-inherited field keys, mapped to their serialized values """ return module.get_explicitly_set_fields_by_scope(Scope.settings) class InheritingFieldData(KvsFieldData): """A `FieldData` implementation that can inherit value from parents to children.""" def __init__(self, inheritable_names, **kwargs): """ `inheritable_names` is a list of names that can be inherited from parents. """ super(InheritingFieldData, self).__init__(**kwargs) self.inheritable_names = set(inheritable_names) def default(self, block, name): """ The default for an inheritable name is found on a parent. """ if name in self.inheritable_names: # Walk up the content tree to find the first ancestor # that this field is set on. Use the field from the current # block so that if it has a different default than the root # node of the tree, the block's default will be used. field = block.fields[name] ancestor = block.get_parent() while ancestor is not None: if field.is_set_on(ancestor): return field.read_json(ancestor) else: ancestor = ancestor.get_parent() return super(InheritingFieldData, self).default(block, name) def inheriting_field_data(kvs): """Create an InheritanceFieldData that inherits the names in InheritanceMixin.""" return InheritingFieldData( inheritable_names=InheritanceMixin.fields.keys(), kvs=kvs, ) class InheritanceKeyValueStore(KeyValueStore): """ Common superclass for kvs's which know about inheritance of settings. Offers simple dict-based storage of fields and lookup of inherited values. Note: inherited_settings is a dict of key to json values (internal xblock field repr) """ def __init__(self, initial_values=None, inherited_settings=None): super(InheritanceKeyValueStore, self).__init__() self.inherited_settings = inherited_settings or {} self._fields = initial_values or {} def get(self, key): return self._fields[key.field_name] def set(self, key, value): # xml backed courses are read-only, but they do have some computed fields self._fields[key.field_name] = value def delete(self, key): del self._fields[key.field_name] def has(self, key): return key.field_name in self._fields def default(self, key): """ Check to see if the default should be from inheritance. If not inheriting, this will raise KeyError which will cause the caller to use the field's global default. """ return self.inherited_settings[key.field_name]
agpl-3.0
208,249,426,258,677,340
40.564263
407
0.648993
false
tellybug/dynamodb-mock
tests/functional/boto/test_query.py
2
8037
# -*- coding: utf-8 -*- import unittest import boto TABLE_NAME = 'Table-HR' TABLE_NAME_404 = 'Waldo' TABLE_RT = 45 TABLE_WT = 123 TABLE_RT2 = 10 TABLE_WT2 = 10 TABLE_HK_NAME = u'hash_key' TABLE_HK_TYPE = u'N' TABLE_RK_NAME = u'range_key' TABLE_RK_TYPE = u'S' HK_VALUE = u'123' HK_VALUE_404 = u'404' RK_VALUE1 = u'Waldo-1' RK_VALUE2 = u'Waldo-2' RK_VALUE3 = u'Waldo-3' RK_VALUE4 = u'Waldo-4' RK_VALUE5 = u'Waldo-5' ITEM1 = { TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE}, TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE1}, u'relevant_data': {u'S': u'tata'}, } ITEM2 = { TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE}, TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE2}, u'relevant_data': {u'S': u'tete'}, } ITEM3 = { TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE}, TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE3}, u'relevant_data': {u'S': u'titi'}, } ITEM4 = { TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE}, TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE4}, u'relevant_data': {u'S': u'toto'}, } ITEM5 = { TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE}, TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE5}, u'relevant_data': {u'S': u'tutu'}, } # Please note that most query features are not yet implemented hence not tested class TestQuery(unittest.TestCase): def setUp(self): from ddbmock.database.db import dynamodb from ddbmock.database.table import Table from ddbmock.database.key import PrimaryKey dynamodb.hard_reset() hash_key = PrimaryKey(TABLE_HK_NAME, TABLE_HK_TYPE) range_key = PrimaryKey(TABLE_RK_NAME, TABLE_RK_TYPE) self.t1 = Table(TABLE_NAME, TABLE_RT, TABLE_WT, hash_key, range_key) dynamodb.data[TABLE_NAME] = self.t1 self.t1.put(ITEM1, {}) self.t1.put(ITEM2, {}) self.t1.put(ITEM3, {}) self.t1.put(ITEM4, {}) self.t1.put(ITEM5, {}) def tearDown(self): from ddbmock.database.db import dynamodb from ddbmock import clean_boto_patch dynamodb.hard_reset() clean_boto_patch() def test_query_all(self): from ddbmock import connect_boto_patch from ddbmock.database.db import dynamodb expected = { u"Count": 5, u"Items": [ITEM1, ITEM2, ITEM3, ITEM4, ITEM5], u"ConsumedCapacityUnits": 0.5, } db = connect_boto_patch() ret = db.layer1.query(TABLE_NAME, {TABLE_HK_TYPE: HK_VALUE}) self.assertEqual(expected, ret) # Regression test for #9 def test_query_all_404(self): from ddbmock import connect_boto_patch from ddbmock.database.db import dynamodb expected = { u"Count": 0, u'Items': [], u"ConsumedCapacityUnits": 0.5, } db = connect_boto_patch() ret = db.layer1.query(TABLE_NAME, {TABLE_HK_TYPE: HK_VALUE_404}) self.assertEqual(expected, ret) def test_query_2_first(self): from ddbmock import connect_boto_patch from ddbmock.database.db import dynamodb expected = { u"Count": 2, u"Items": [ITEM1, ITEM2], u"ConsumedCapacityUnits": 0.5, u'LastEvaluatedKey': { u'HashKeyElement': {u'N': u'123'}, u'RangeKeyElement': {u'S': u'Waldo-2'}, }, } db = connect_boto_patch() ret = db.layer1.query(TABLE_NAME, {TABLE_HK_TYPE: HK_VALUE}, limit=2) self.assertEqual(expected, ret) def test_query_paged(self): from ddbmock import connect_boto_patch from ddbmock.database.db import dynamodb from boto.dynamodb.exceptions import DynamoDBValidationError esk = { u'HashKeyElement': {u'N': u'123'}, u'RangeKeyElement': {u'S': u'Waldo-3'}, } bad_esk = { u'HashKeyElement': {u'N': u'123.43'}, u'RangeKeyElement': {u'S': u'Waldo-3'}, } expected1 = { u"Count": 3, u"Items": [ITEM1, ITEM2, ITEM3], u"ConsumedCapacityUnits": 0.5, u'LastEvaluatedKey': esk, } expected2 = { u"Count": 2, u"Items": [ITEM4, ITEM5], u"ConsumedCapacityUnits": 0.5, } db = connect_boto_patch() ret = db.layer1.query(TABLE_NAME, {TABLE_HK_TYPE: HK_VALUE}, limit=3) self.assertEqual(expected1, ret) ret = db.layer1.query(TABLE_NAME, {TABLE_HK_TYPE: HK_VALUE}, limit=3, exclusive_start_key=esk) self.assertEqual(expected2, ret) self.assertRaises(DynamoDBValidationError, db.layer1.query, TABLE_NAME, {TABLE_HK_TYPE: HK_VALUE}, limit=3, exclusive_start_key=bad_esk) def test_query_2_last(self): from ddbmock import connect_boto_patch from ddbmock.database.db import dynamodb expected = { u"Count": 2, u"Items": [ITEM5, ITEM4], u"ConsumedCapacityUnits": 0.5, u'LastEvaluatedKey': { u'HashKeyElement': {u'N': u'123'}, u'RangeKeyElement': {u'S': u'Waldo-4'}, } } db = connect_boto_patch() ret = db.layer1.query(TABLE_NAME, {TABLE_HK_TYPE: HK_VALUE}, limit=2, scan_index_forward=False) self.assertEqual(expected, ret) def test_query_all_filter_fields(self): from ddbmock import connect_boto_patch from ddbmock.database.db import dynamodb expected = { u"Count": 5, u"Items": [ {u"relevant_data": {u"S": "tata"}}, {u"relevant_data": {u"S": "tete"}}, {u"relevant_data": {u"S": "titi"}}, {u"relevant_data": {u"S": "toto"}}, {u"relevant_data": {u"S": "tutu"}}, ], u"ConsumedCapacityUnits": 0.5, } fields = [u'relevant_data'] db = connect_boto_patch() ret = db.layer1.query(TABLE_NAME, {TABLE_HK_TYPE: HK_VALUE}, None, fields) self.assertEqual(expected, ret) # No need to test all conditions/type mismatch as they are unit tested def test_query_condition_filter_fields(self): from ddbmock import connect_boto_patch from ddbmock.database.db import dynamodb expected = { u"Count": 3, u"Items": [ {u"relevant_data": {u"S": u"titi"}}, {u"relevant_data": {u"S": u"toto"}}, {u"relevant_data": {u"S": u"tutu"}}, ], u"ConsumedCapacityUnits": 0.5, } condition = {"AttributeValueList":[{"S":"Waldo-2"}],"ComparisonOperator":"GT"} fields = [u'relevant_data'] db = connect_boto_patch() ret = db.layer1.query(TABLE_NAME, {TABLE_HK_TYPE: HK_VALUE}, condition, fields) self.assertEqual(expected, ret) def test_query_all_consistent(self): from ddbmock import connect_boto_patch from ddbmock.database.db import dynamodb expected = { u"Count": 5, u"Items": [ITEM1, ITEM2, ITEM3, ITEM4, ITEM5], u"ConsumedCapacityUnits": 1, } db = connect_boto_patch() ret = db.layer1.query(TABLE_NAME, {TABLE_HK_TYPE: HK_VALUE}, consistent_read=True) self.assertEqual(expected, ret) def test_query_invalid_condition_multiple_data_in_field(self): from ddbmock import connect_boto_patch from ddbmock.database.db import dynamodb from boto.dynamodb.exceptions import DynamoDBValidationError condition = { "AttributeValueList":[ {"S":"Waldo-2"}, {"S":"Waldo-3"}, ], "ComparisonOperator":"GT" } fields = [u'relevant_data'] db = connect_boto_patch() self.assertRaises(DynamoDBValidationError, db.layer1.query, TABLE_NAME, {TABLE_HK_TYPE: HK_VALUE}, condition, fields)
lgpl-3.0
-509,470,251,985,386,500
29.328302
103
0.55792
false
cezary12/blaze
blaze/io/scidb/query.py
13
3519
""" SciDB query generation and execution. The query building themselves are fairly low-level, since their only concern is whether to generate temporary arrays or not. """ from __future__ import absolute_import, division, print_function import uuid from itertools import chain #------------------------------------------------------------------------ # Query Interface #------------------------------------------------------------------------ def temp_name(): return 'arr_' + str(uuid.uuid4()).replace("-", "_") class Query(object): """ Holds an intermediate SciDB query. This builds up a little query graph to deal with expression reuse. For instance, consider the code: b = a * 2 eval(b + b) This would generate the query "(a * 2) + (a * 2)". In this case the blaze expression graph itself knows about the duplication of the expression. However, scidb kernels may themselves reuse expressions multiple times, which can lead to exponential code generation. E.g. consider function `f(a) = a * a`. Now f(f(f(a))) has `a` 8 times. """ temp_name = None def __init__(self, pattern, args, kwds, interpolate=str.format): self.pattern = pattern self.args = args self.kwds = kwds self.interpolate = interpolate self.uses = [] for arg in chain(self.args, self.kwds.values()): if isinstance(arg, Query): arg.uses.append(self) def _result(self): """ Format the expression. """ return self.interpolate(self.pattern, *self.args, **self.kwds) def generate_code(self, code, cleanup, seen): """ Generate a query to produce a temporary array for the expression. The temporary array can be referenced multiple times. """ if self in seen: return seen.add(self) for arg in chain(self.args, self.kwds.values()): if isinstance(arg, Query): arg.generate_code(code, cleanup, seen) if len(self.uses) > 1: self.temp_name = temp_name() code.append("store({expr}, {temp})".format(expr=self._result(), temp=self.temp_name)) cleanup.append("remove({temp})".format(temp=self.temp_name)) def result(self): """ The result in the AFL expression we are building. """ if len(self.uses) > 1: return self.temp_name return self._result() def __str__(self): if self.temp_name: return self.temp_name return self.result() def qformat(s, *args, **kwds): return Query(s, args, kwds) #------------------------------------------------------------------------ # Query Execution #------------------------------------------------------------------------ def execute_query(conn, query, persist=False): return conn.query(query, persist=persist) #------------------------------------------------------------------------ # Query Generation #------------------------------------------------------------------------ def apply(name, *args): arglist = ["{%d}" % (i,) for i in range(len(args))] pattern = "{name}({arglist})".format(name=name, arglist=", ".join(arglist)) return qformat(pattern, *args) def expr(e): return qformat("({0})", expr) def iff(expr, a, b): return apply("iff", expr, a, b) def build(arr, expr): return apply("build", arr, expr)
bsd-3-clause
5,839,656,666,500,585,000
29.608696
79
0.521171
false
jumpojoy/neutron
neutron/common/utils.py
2
17935
# Copyright 2011, VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Borrowed from nova code base, more utilities will be added/borrowed as and # when needed. """Utilities and helper functions.""" import collections import datetime import decimal import errno import functools import hashlib import multiprocessing import netaddr import os import random import signal import socket import sys import tempfile import uuid import debtcollector from eventlet.green import subprocess from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils import six from stevedore import driver from neutron.common import constants as n_const from neutron.i18n import _LE TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" LOG = logging.getLogger(__name__) SYNCHRONIZED_PREFIX = 'neutron-' synchronized = lockutils.synchronized_with_prefix(SYNCHRONIZED_PREFIX) class cache_method_results(object): """This decorator is intended for object methods only.""" def __init__(self, func): self.func = func functools.update_wrapper(self, func) self._first_call = True self._not_cached = object() def _get_from_cache(self, target_self, *args, **kwargs): func_name = "%(module)s.%(class)s.%(func_name)s" % { 'module': target_self.__module__, 'class': target_self.__class__.__name__, 'func_name': self.func.__name__, } key = (func_name,) + args if kwargs: key += dict2tuple(kwargs) try: item = target_self._cache.get(key, self._not_cached) except TypeError: LOG.debug("Method %(func_name)s cannot be cached due to " "unhashable parameters: args: %(args)s, kwargs: " "%(kwargs)s", {'func_name': func_name, 'args': args, 'kwargs': kwargs}) return self.func(target_self, *args, **kwargs) if item is self._not_cached: item = self.func(target_self, *args, **kwargs) target_self._cache.set(key, item, None) return item def __call__(self, target_self, *args, **kwargs): if not hasattr(target_self, '_cache'): raise NotImplementedError( "Instance of class %(module)s.%(class)s must contain _cache " "attribute" % { 'module': target_self.__module__, 'class': target_self.__class__.__name__}) if not target_self._cache: if self._first_call: LOG.debug("Instance of class %(module)s.%(class)s doesn't " "contain attribute _cache therefore results " "cannot be cached for %(func_name)s.", {'module': target_self.__module__, 'class': target_self.__class__.__name__, 'func_name': self.func.__name__}) self._first_call = False return self.func(target_self, *args, **kwargs) return self._get_from_cache(target_self, *args, **kwargs) def __get__(self, obj, objtype): return functools.partial(self.__call__, obj) @debtcollector.removals.remove(message="This will removed in the N cycle.") def read_cached_file(filename, cache_info, reload_func=None): """Read from a file if it has been modified. :param cache_info: dictionary to hold opaque cache. :param reload_func: optional function to be called with data when file is reloaded due to a modification. :returns: data from file """ mtime = os.path.getmtime(filename) if not cache_info or mtime != cache_info.get('mtime'): LOG.debug("Reloading cached file %s", filename) with open(filename) as fap: cache_info['data'] = fap.read() cache_info['mtime'] = mtime if reload_func: reload_func(cache_info['data']) return cache_info['data'] @debtcollector.removals.remove(message="This will removed in the N cycle.") def find_config_file(options, config_file): """Return the first config file found. We search for the paste config file in the following order: * If --config-file option is used, use that * Search for the configuration files via common cfg directories :retval Full path to config file, or None if no config file found """ fix_path = lambda p: os.path.abspath(os.path.expanduser(p)) if options.get('config_file'): if os.path.exists(options['config_file']): return fix_path(options['config_file']) dir_to_common = os.path.dirname(os.path.abspath(__file__)) root = os.path.join(dir_to_common, '..', '..', '..', '..') # Handle standard directory search for the config file config_file_dirs = [fix_path(os.path.join(os.getcwd(), 'etc')), fix_path(os.path.join('~', '.neutron-venv', 'etc', 'neutron')), fix_path('~'), os.path.join(cfg.CONF.state_path, 'etc'), os.path.join(cfg.CONF.state_path, 'etc', 'neutron'), fix_path(os.path.join('~', '.local', 'etc', 'neutron')), '/usr/etc/neutron', '/usr/local/etc/neutron', '/etc/neutron/', '/etc'] if 'plugin' in options: config_file_dirs = [ os.path.join(x, 'neutron', 'plugins', options['plugin']) for x in config_file_dirs ] if os.path.exists(os.path.join(root, 'plugins')): plugins = [fix_path(os.path.join(root, 'plugins', p, 'etc')) for p in os.listdir(os.path.join(root, 'plugins'))] plugins = [p for p in plugins if os.path.isdir(p)] config_file_dirs.extend(plugins) for cfg_dir in config_file_dirs: cfg_file = os.path.join(cfg_dir, config_file) if os.path.exists(cfg_file): return cfg_file def ensure_dir(dir_path): """Ensure a directory with 755 permissions mode.""" try: os.makedirs(dir_path, 0o755) except OSError as e: # If the directory already existed, don't raise the error. if e.errno != errno.EEXIST: raise def _subprocess_setup(): # Python installs a SIGPIPE handler by default. This is usually not what # non-Python subprocesses expect. signal.signal(signal.SIGPIPE, signal.SIG_DFL) def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False, env=None, preexec_fn=_subprocess_setup, close_fds=True): return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, preexec_fn=preexec_fn, close_fds=close_fds, env=env) def parse_mappings(mapping_list, unique_values=True): """Parse a list of mapping strings into a dictionary. :param mapping_list: a list of strings of the form '<key>:<value>' :param unique_values: values must be unique if True :returns: a dict mapping keys to values """ mappings = {} for mapping in mapping_list: mapping = mapping.strip() if not mapping: continue split_result = mapping.split(':') if len(split_result) != 2: raise ValueError(_("Invalid mapping: '%s'") % mapping) key = split_result[0].strip() if not key: raise ValueError(_("Missing key in mapping: '%s'") % mapping) value = split_result[1].strip() if not value: raise ValueError(_("Missing value in mapping: '%s'") % mapping) if key in mappings: raise ValueError(_("Key %(key)s in mapping: '%(mapping)s' not " "unique") % {'key': key, 'mapping': mapping}) if unique_values and value in mappings.values(): raise ValueError(_("Value %(value)s in mapping: '%(mapping)s' " "not unique") % {'value': value, 'mapping': mapping}) mappings[key] = value return mappings def get_hostname(): return socket.gethostname() def get_first_host_ip(net, ip_version): return str(netaddr.IPAddress(net.first + 1, ip_version)) def compare_elements(a, b): """Compare elements if a and b have same elements. This method doesn't consider ordering """ if a is None: a = [] if b is None: b = [] return set(a) == set(b) def safe_sort_key(value): """Return value hash or build one for dictionaries.""" if isinstance(value, collections.Mapping): return sorted(value.items()) return value def dict2str(dic): return ','.join("%s=%s" % (key, val) for key, val in sorted(six.iteritems(dic))) def str2dict(string): res_dict = {} for keyvalue in string.split(','): (key, value) = keyvalue.split('=', 1) res_dict[key] = value return res_dict def dict2tuple(d): items = list(d.items()) items.sort() return tuple(items) def diff_list_of_dict(old_list, new_list): new_set = set([dict2str(l) for l in new_list]) old_set = set([dict2str(l) for l in old_list]) added = new_set - old_set removed = old_set - new_set return [str2dict(a) for a in added], [str2dict(r) for r in removed] def is_extension_supported(plugin, ext_alias): return ext_alias in getattr( plugin, "supported_extension_aliases", []) def log_opt_values(log): cfg.CONF.log_opt_values(log, logging.DEBUG) def get_random_mac(base_mac): mac = [int(base_mac[0], 16), int(base_mac[1], 16), int(base_mac[2], 16), random.randint(0x00, 0xff), random.randint(0x00, 0xff), random.randint(0x00, 0xff)] if base_mac[3] != '00': mac[3] = int(base_mac[3], 16) return ':'.join(["%02x" % x for x in mac]) def get_random_string(length): """Get a random hex string of the specified length. based on Cinder library cinder/transfer/api.py """ rndstr = "" random.seed(datetime.datetime.now().microsecond) while len(rndstr) < length: base_str = str(random.random()).encode('utf-8') rndstr += hashlib.sha224(base_str).hexdigest() return rndstr[0:length] def get_dhcp_agent_device_id(network_id, host): # Split host so as to always use only the hostname and # not the domain name. This will guarantee consistency # whether a local hostname or an fqdn is passed in. local_hostname = host.split('.')[0] host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, str(local_hostname)) return 'dhcp%s-%s' % (host_uuid, network_id) def cpu_count(): try: return multiprocessing.cpu_count() except NotImplementedError: return 1 class exception_logger(object): """Wrap a function and log raised exception :param logger: the logger to log the exception default is LOG.exception :returns: origin value if no exception raised; re-raise the exception if any occurred """ def __init__(self, logger=None): self.logger = logger def __call__(self, func): if self.logger is None: LOG = logging.getLogger(func.__module__) self.logger = LOG.exception def call(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: with excutils.save_and_reraise_exception(): self.logger(e) return call def is_dvr_serviced(device_owner): """Check if the port need to be serviced by DVR Helper function to check the device owners of the ports in the compute and service node to make sure if they are required for DVR or any service directly or indirectly associated with DVR. """ dvr_serviced_device_owners = (n_const.DEVICE_OWNER_LOADBALANCER, n_const.DEVICE_OWNER_LOADBALANCERV2, n_const.DEVICE_OWNER_DHCP) return (device_owner.startswith('compute:') or device_owner in dvr_serviced_device_owners) @debtcollector.removals.remove(message="This will removed in the N cycle.") def get_keystone_url(conf): if conf.auth_uri: auth_uri = conf.auth_uri.rstrip('/') else: auth_uri = ('%(protocol)s://%(host)s:%(port)s' % {'protocol': conf.auth_protocol, 'host': conf.auth_host, 'port': conf.auth_port}) # NOTE(ihrachys): all existing consumers assume version 2.0 return '%s/v2.0/' % auth_uri def ip_to_cidr(ip, prefix=None): """Convert an ip with no prefix to cidr notation :param ip: An ipv4 or ipv6 address. Convertable to netaddr.IPNetwork. :param prefix: Optional prefix. If None, the default 32 will be used for ipv4 and 128 for ipv6. """ net = netaddr.IPNetwork(ip) if prefix is not None: # Can't pass ip and prefix separately. Must concatenate strings. net = netaddr.IPNetwork(str(net.ip) + '/' + str(prefix)) return str(net) def fixed_ip_cidrs(fixed_ips): """Create a list of a port's fixed IPs in cidr notation. :param fixed_ips: A neutron port's fixed_ips dictionary """ return [ip_to_cidr(fixed_ip['ip_address'], fixed_ip.get('prefixlen')) for fixed_ip in fixed_ips] def is_cidr_host(cidr): """Determines if the cidr passed in represents a single host network :param cidr: Either an ipv4 or ipv6 cidr. :returns: True if the cidr is /32 for ipv4 or /128 for ipv6. :raises ValueError: raises if cidr does not contain a '/'. This disallows plain IP addresses specifically to avoid ambiguity. """ if '/' not in str(cidr): raise ValueError("cidr doesn't contain a '/'") net = netaddr.IPNetwork(cidr) if net.version == 4: return net.prefixlen == n_const.IPv4_BITS return net.prefixlen == n_const.IPv6_BITS def ip_version_from_int(ip_version_int): if ip_version_int == 4: return n_const.IPv4 if ip_version_int == 6: return n_const.IPv6 raise ValueError(_('Illegal IP version number')) def is_port_trusted(port): """Used to determine if port can be trusted not to attack network. Trust is currently based on the device_owner field starting with 'network:' since we restrict who can use that in the default policy.json file. """ return port['device_owner'].startswith('network:') class DelayedStringRenderer(object): """Takes a callable and its args and calls when __str__ is called Useful for when an argument to a logging statement is expensive to create. This will prevent the callable from being called if it's never converted to a string. """ def __init__(self, function, *args, **kwargs): self.function = function self.args = args self.kwargs = kwargs def __str__(self): return str(self.function(*self.args, **self.kwargs)) def camelize(s): return ''.join(s.replace('_', ' ').title().split()) def round_val(val): # we rely on decimal module since it behaves consistently across Python # versions (2.x vs. 3.x) return int(decimal.Decimal(val).quantize(decimal.Decimal('1'), rounding=decimal.ROUND_HALF_UP)) def replace_file(file_name, data): """Replaces the contents of file_name with data in a safe manner. First write to a temp file and then rename. Since POSIX renames are atomic, the file is unlikely to be corrupted by competing writes. We create the tempfile on the same device to ensure that it can be renamed. """ base_dir = os.path.dirname(os.path.abspath(file_name)) with tempfile.NamedTemporaryFile('w+', dir=base_dir, delete=False) as tmp_file: tmp_file.write(data) os.chmod(tmp_file.name, 0o644) os.rename(tmp_file.name, file_name) def load_class_by_alias_or_classname(namespace, name): """Load class using stevedore alias or the class name :param namespace: namespace where the alias is defined :param name: alias or class name of the class to be loaded :returns class if calls can be loaded :raises ImportError if class cannot be loaded """ if not name: LOG.error(_LE("Alias or class name is not set")) raise ImportError(_("Class not found.")) try: # Try to resolve class by alias mgr = driver.DriverManager(namespace, name) class_to_load = mgr.driver except RuntimeError: e1_info = sys.exc_info() # Fallback to class name try: class_to_load = importutils.import_class(name) except (ImportError, ValueError): LOG.error(_LE("Error loading class by alias"), exc_info=e1_info) LOG.error(_LE("Error loading class by class name"), exc_info=True) raise ImportError(_("Class not found.")) return class_to_load
apache-2.0
-6,411,567,067,371,682,000
33.096958
79
0.603067
false
sajeeshcs/nested_quota
doc/source/conf.py
18
9425
# -*- coding: utf-8 -*- # # nova documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set to # its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'ext.nova_todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'oslosphinx', ] todo_include_todos = True # Add any paths that contain templates here, relative to this directory. # Changing the path so that the Hudson build output contains GA code # and the source docs do not contain the code so local, offline sphinx builds # are "clean." templates_path = [] if os.getenv('HUDSON_PUBLISH_DOCS'): templates_path = ['_ga', '_templates'] else: templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'nova' copyright = u'2010-present, OpenStack Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # from nova.version import version_info # The full version, including alpha/beta/rc tags. release = version_info.release_string() # The short X.Y version. version = version_info.version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. unused_docs = [ 'api_ext/rst_extension_template', 'vmwareapi_readme', 'installer', ] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use # for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['nova.'] # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' man_pages = [ ('man/nova-all', 'nova-all', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-api-ec2', 'nova-api-ec2', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-api-metadata', 'nova-api-metadata', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-api-os-compute', 'nova-api-os-compute', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-api', 'nova-api', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-cert', 'nova-cert', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-compute', 'nova-compute', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-console', 'nova-console', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-consoleauth', 'nova-consoleauth', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-dhcpbridge', 'nova-dhcpbridge', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-manage', 'nova-manage', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-network', 'nova-network', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-novncproxy', 'nova-novncproxy', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-spicehtml5proxy', 'nova-spicehtml5proxy', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-serialproxy', 'nova-serialproxy', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-objectstore', 'nova-objectstore', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-rootwrap', 'nova-rootwrap', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-scheduler', 'nova-scheduler', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-xvpvncproxy', 'nova-xvpvncproxy', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-conductor', 'nova-conductor', u'Cloud controller fabric', [u'OpenStack'], 1), ] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1" html_last_updated_fmt = os.popen(git_cmd).read() # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'novadoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Nova.tex', u'Nova Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True
apache-2.0
-9,079,635,192,354,095,000
33.650735
84
0.681167
false
cherusk/ansible
lib/ansible/plugins/lookup/inventory_hostnames.py
117
1942
# (c) 2012, Michael DeHaan <[email protected]> # (c) 2013, Steven Dossett <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.plugins.lookup import LookupBase from ansible.inventory import Inventory class LookupModule(LookupBase): def get_hosts(self, variables, pattern): hosts = [] if pattern[0] in ('!','&'): obj = pattern[1:] else: obj = pattern if obj in variables['groups']: hosts = variables['groups'][obj] elif obj in variables['groups']['all']: hosts = [obj] return hosts def run(self, terms, variables=None, **kwargs): host_list = [] for term in terms: patterns = Inventory.order_patterns(Inventory.split_host_pattern(term)) for p in patterns: that = self.get_hosts(variables, p) if p.startswith("!"): host_list = [ h for h in host_list if h not in that] elif p.startswith("&"): host_list = [ h for h in host_list if h in that ] else: host_list.extend(that) # return unique list return list(set(host_list))
gpl-3.0
2,472,174,354,347,261,000
33.070175
83
0.623069
false
luiseduardohdbackup/odoo
addons/website_gengo/controllers/main.py
350
1799
# -*- coding: utf-8 -*- import openerp from openerp import http, SUPERUSER_ID from openerp.http import request import time GENGO_DEFAULT_LIMIT = 20 class website_gengo(http.Controller): @http.route('/website/get_translated_length', type='json', auth='user', website=True) def get_translated_length(self, translated_ids, lang): ir_translation_obj = request.registry['ir.translation'] result={"done":0} gengo_translation_ids = ir_translation_obj.search(request.cr, request.uid, [('id','in',translated_ids),('gengo_translation','!=', False)]) for trans in ir_translation_obj.browse(request.cr, request.uid, gengo_translation_ids): result['done'] += len(trans.source.split()) return result @http.route('/website/check_gengo_set', type='json', auth='user', website=True) def check_gengo_set(self): user = request.registry['res.users'].browse(request.cr, SUPERUSER_ID, request.uid) company_flag = 0 if not user.company_id.gengo_public_key or not user.company_id.gengo_private_key: company_flag = user.company_id.id return company_flag @http.route('/website/set_gengo_config', type='json', auth='user', website=True) def set_gengo_config(self,config): user = request.registry['res.users'].browse(request.cr, request.uid, request.uid) if user.company_id: request.registry['res.company'].write(request.cr, request.uid, user.company_id.id, config) return True @http.route('/website/post_gengo_jobs', type='json', auth='user', website=True) def post_gengo_jobs(self): request.registry['base.gengo.translations']._sync_request(request.cr, request.uid, limit=GENGO_DEFAULT_LIMIT, context=request.context) return True
agpl-3.0
-3,846,516,236,858,520,600
45.128205
146
0.670928
false
fcolamar/AliPhysics
PWGJE/EMCALJetTasks/Tracks/analysis/ext/__init__.py
369
1063
#************************************************************************** #* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. * #* * #* Author: The ALICE Off-line Project. * #* Contributors are mentioned in the code where appropriate. * #* * #* Permission to use, copy, modify and distribute this software and its * #* documentation strictly for non-commercial purposes is hereby granted * #* without fee, provided that the above copyright notice appears in all * #* copies and that both the copyright notice and this permission notice * #* appear in the supporting documentation. The authors make no claims * #* about the suitability of this software for any purpose. It is * #* provided "as is" without express or implied warranty. * #**************************************************************************
bsd-3-clause
-2,974,482,915,423,181,000
75
75
0.482596
false
HPNetworking/HP-Intelligent-Management-Center
build/lib/pyhpimc/plat/groups.py
3
4490
'''Copyright 2015 Chris Young Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.''' # IMC Server Build Project 1.0 # Chris Young a.k.a Darth # # Hewlett Packard Company Revision 1.0 # # Change History.... 3/19/15 # # This series of functions is intended to help automate the build of an IMC server using # the eAPI function. The eAPI is available natively on the IMC enterprise edition # and can be added to the standard edition through the purchase of the # eAPI addon license. # This section imports required libraries import requests import json HEADERS = {'Accept': 'application/json', 'Content-Type': 'application/json', 'Accept-encoding': 'application/json'} """ This section deals with HPE IMC Custom View functions """ def get_custom_views(auth, url,name=None,headers=HEADERS): """ function requires no input and returns a list of dictionaries of custom views from an HPE IMC. Optional name argument will return only the specified view. :param name: str containing the name of the desired custom view :return: list of dictionaties containing attributes of the custom views """ if name is None: get_custom_view_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&desc=false&total=false' elif name is not None: get_custom_view_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&name='+name+'&desc=false&total=false' f_url = url + get_custom_view_url r = requests.get(f_url, auth=auth, headers=headers) try: if r.status_code == 200: custom_view_list = (json.loads(r.text))["customView"] if type(custom_view_list) == dict: custom_view_list = [custom_view_list] return custom_view_list else: return custom_view_list except requests.exceptions.RequestException as e: return "Error:\n" + str(e) + ' get_custom_views: An Error has occured' def create_custom_views(auth, url,name=None, upperview=None): """ function takes no input and issues a RESTFUL call to get a list of custom views from HPE IMC. Optioanl Name input will return only the specified view. :param name: string containg the name of the desired custom view :return: list of dictionaries containing attributes of the custom views. """ create_custom_views_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&desc=false&total=false' f_url = url + create_custom_views_url if upperview is None: payload = '''{ "name": "''' + name + '''", "upLevelSymbolId" : ""}''' print (payload) else: parentviewid = get_custom_views(auth, url, upperview)[0]['symbolId'] payload = '''{ "name": "'''+name+ '''", "upLevelSymbolId" : "'''+str(parentviewid)+'''"}''' print (payload) r = requests.post(f_url, data = payload, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents try: if r.status_code == 201: return 'View ' + name +' created successfully' except requests.exceptions.RequestException as e: return "Error:\n" + str(e) + ' get_custom_views: An Error has occured' def delete_custom_view(auth, url, name): """ function takes input of auth, url, and name and issues a RESTFUL call to delete a specific of custom views from HPE IMC. :param name: string containg the name of the desired custom view :return: """ view_id = get_custom_views(auth, url,name )[0]['symbolId'] delete_custom_view_url = '/imcrs/plat/res/view/custom/'+str(view_id) f_url = url + delete_custom_view_url r = requests.delete(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents try: if r.status_code == 204: return 'View ' + name +' deleted successfully' except requests.exceptions.RequestException as e: return "Error:\n" + str(e) + ' delete_custom_view: An Error has occured'
apache-2.0
-8,054,688,239,667,306,000
40.583333
134
0.678619
false
ArcherCraftStore/ArcherVMPeridot
Python/Lib/encodings/big5hkscs.py
816
1039
# # big5hkscs.py: Python Unicode Codec for BIG5HKSCS # # Written by Hye-Shik Chang <[email protected]> # import _codecs_hk, codecs import _multibytecodec as mbc codec = _codecs_hk.getcodec('big5hkscs') class Codec(codecs.Codec): encode = codec.encode decode = codec.decode class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder): codec = codec class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder): codec = codec class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): codec = codec class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): codec = codec def getregentry(): return codecs.CodecInfo( name='big5hkscs', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
apache-2.0
8,997,595,608,263,025,000
25.641026
74
0.703561
false
Cheppers/zulip
zerver/management/commands/add_users_to_streams.py
113
2394
from __future__ import absolute_import from optparse import make_option from django.core.management.base import BaseCommand from zerver.lib.actions import create_stream_if_needed, do_add_subscription from zerver.models import Realm, UserProfile, get_user_profile_by_email class Command(BaseCommand): help = """Add some or all users in a realm to a set of streams.""" option_list = BaseCommand.option_list + ( make_option('-d', '--domain', dest='domain', type='str', help='The name of the realm in which you are adding people to streams.'), make_option('-s', '--streams', dest='streams', type='str', help='A comma-separated list of stream names.'), make_option('-u', '--users', dest='users', type='str', help='A comma-separated list of email addresses.'), make_option('-a', '--all-users', dest='all_users', action="store_true", default=False, help='Add all users in this realm to these streams.'), ) def handle(self, **options): if options["domain"] is None or options["streams"] is None or \ (options["users"] is None and options["all_users"] is None): self.print_help("python manage.py", "add_users_to_streams") exit(1) stream_names = set([stream.strip() for stream in options["streams"].split(",")]) realm = Realm.objects.get(domain=options["domain"]) if options["all_users"]: user_profiles = UserProfile.objects.filter(realm=realm) else: emails = set([email.strip() for email in options["users"].split(",")]) user_profiles = [] for email in emails: user_profiles.append(get_user_profile_by_email(email)) for stream_name in set(stream_names): for user_profile in user_profiles: stream, _ = create_stream_if_needed(user_profile.realm, stream_name) did_subscribe = do_add_subscription(user_profile, stream) print "%s %s to %s" % ( "Subscribed" if did_subscribe else "Already subscribed", user_profile.email, stream_name)
apache-2.0
6,632,769,799,165,786,000
41.75
93
0.553049
false
c0defreak/python-for-android
python3-alpha/python3-src/Lib/idlelib/UndoDelegator.py
67
10305
import string from tkinter import * from idlelib.Delegator import Delegator #$ event <<redo>> #$ win <Control-y> #$ unix <Alt-z> #$ event <<undo>> #$ win <Control-z> #$ unix <Control-z> #$ event <<dump-undo-state>> #$ win <Control-backslash> #$ unix <Control-backslash> class UndoDelegator(Delegator): max_undo = 1000 def __init__(self): Delegator.__init__(self) self.reset_undo() def setdelegate(self, delegate): if self.delegate is not None: self.unbind("<<undo>>") self.unbind("<<redo>>") self.unbind("<<dump-undo-state>>") Delegator.setdelegate(self, delegate) if delegate is not None: self.bind("<<undo>>", self.undo_event) self.bind("<<redo>>", self.redo_event) self.bind("<<dump-undo-state>>", self.dump_event) def dump_event(self, event): from pprint import pprint pprint(self.undolist[:self.pointer]) print("pointer:", self.pointer, end=' ') print("saved:", self.saved, end=' ') print("can_merge:", self.can_merge, end=' ') print("get_saved():", self.get_saved()) pprint(self.undolist[self.pointer:]) return "break" def reset_undo(self): self.was_saved = -1 self.pointer = 0 self.undolist = [] self.undoblock = 0 # or a CommandSequence instance self.set_saved(1) def set_saved(self, flag): if flag: self.saved = self.pointer else: self.saved = -1 self.can_merge = False self.check_saved() def get_saved(self): return self.saved == self.pointer saved_change_hook = None def set_saved_change_hook(self, hook): self.saved_change_hook = hook was_saved = -1 def check_saved(self): is_saved = self.get_saved() if is_saved != self.was_saved: self.was_saved = is_saved if self.saved_change_hook: self.saved_change_hook() def insert(self, index, chars, tags=None): self.addcmd(InsertCommand(index, chars, tags)) def delete(self, index1, index2=None): self.addcmd(DeleteCommand(index1, index2)) # Clients should call undo_block_start() and undo_block_stop() # around a sequence of editing cmds to be treated as a unit by # undo & redo. Nested matching calls are OK, and the inner calls # then act like nops. OK too if no editing cmds, or only one # editing cmd, is issued in between: if no cmds, the whole # sequence has no effect; and if only one cmd, that cmd is entered # directly into the undo list, as if undo_block_xxx hadn't been # called. The intent of all that is to make this scheme easy # to use: all the client has to worry about is making sure each # _start() call is matched by a _stop() call. def undo_block_start(self): if self.undoblock == 0: self.undoblock = CommandSequence() self.undoblock.bump_depth() def undo_block_stop(self): if self.undoblock.bump_depth(-1) == 0: cmd = self.undoblock self.undoblock = 0 if len(cmd) > 0: if len(cmd) == 1: # no need to wrap a single cmd cmd = cmd.getcmd(0) # this blk of cmds, or single cmd, has already # been done, so don't execute it again self.addcmd(cmd, 0) def addcmd(self, cmd, execute=True): if execute: cmd.do(self.delegate) if self.undoblock != 0: self.undoblock.append(cmd) return if self.can_merge and self.pointer > 0: lastcmd = self.undolist[self.pointer-1] if lastcmd.merge(cmd): return self.undolist[self.pointer:] = [cmd] if self.saved > self.pointer: self.saved = -1 self.pointer = self.pointer + 1 if len(self.undolist) > self.max_undo: ##print "truncating undo list" del self.undolist[0] self.pointer = self.pointer - 1 if self.saved >= 0: self.saved = self.saved - 1 self.can_merge = True self.check_saved() def undo_event(self, event): if self.pointer == 0: self.bell() return "break" cmd = self.undolist[self.pointer - 1] cmd.undo(self.delegate) self.pointer = self.pointer - 1 self.can_merge = False self.check_saved() return "break" def redo_event(self, event): if self.pointer >= len(self.undolist): self.bell() return "break" cmd = self.undolist[self.pointer] cmd.redo(self.delegate) self.pointer = self.pointer + 1 self.can_merge = False self.check_saved() return "break" class Command: # Base class for Undoable commands tags = None def __init__(self, index1, index2, chars, tags=None): self.marks_before = {} self.marks_after = {} self.index1 = index1 self.index2 = index2 self.chars = chars if tags: self.tags = tags def __repr__(self): s = self.__class__.__name__ t = (self.index1, self.index2, self.chars, self.tags) if self.tags is None: t = t[:-1] return s + repr(t) def do(self, text): pass def redo(self, text): pass def undo(self, text): pass def merge(self, cmd): return 0 def save_marks(self, text): marks = {} for name in text.mark_names(): if name != "insert" and name != "current": marks[name] = text.index(name) return marks def set_marks(self, text, marks): for name, index in marks.items(): text.mark_set(name, index) class InsertCommand(Command): # Undoable insert command def __init__(self, index1, chars, tags=None): Command.__init__(self, index1, None, chars, tags) def do(self, text): self.marks_before = self.save_marks(text) self.index1 = text.index(self.index1) if text.compare(self.index1, ">", "end-1c"): # Insert before the final newline self.index1 = text.index("end-1c") text.insert(self.index1, self.chars, self.tags) self.index2 = text.index("%s+%dc" % (self.index1, len(self.chars))) self.marks_after = self.save_marks(text) ##sys.__stderr__.write("do: %s\n" % self) def redo(self, text): text.mark_set('insert', self.index1) text.insert(self.index1, self.chars, self.tags) self.set_marks(text, self.marks_after) text.see('insert') ##sys.__stderr__.write("redo: %s\n" % self) def undo(self, text): text.mark_set('insert', self.index1) text.delete(self.index1, self.index2) self.set_marks(text, self.marks_before) text.see('insert') ##sys.__stderr__.write("undo: %s\n" % self) def merge(self, cmd): if self.__class__ is not cmd.__class__: return False if self.index2 != cmd.index1: return False if self.tags != cmd.tags: return False if len(cmd.chars) != 1: return False if self.chars and \ self.classify(self.chars[-1]) != self.classify(cmd.chars): return False self.index2 = cmd.index2 self.chars = self.chars + cmd.chars return True alphanumeric = string.ascii_letters + string.digits + "_" def classify(self, c): if c in self.alphanumeric: return "alphanumeric" if c == "\n": return "newline" return "punctuation" class DeleteCommand(Command): # Undoable delete command def __init__(self, index1, index2=None): Command.__init__(self, index1, index2, None, None) def do(self, text): self.marks_before = self.save_marks(text) self.index1 = text.index(self.index1) if self.index2: self.index2 = text.index(self.index2) else: self.index2 = text.index(self.index1 + " +1c") if text.compare(self.index2, ">", "end-1c"): # Don't delete the final newline self.index2 = text.index("end-1c") self.chars = text.get(self.index1, self.index2) text.delete(self.index1, self.index2) self.marks_after = self.save_marks(text) ##sys.__stderr__.write("do: %s\n" % self) def redo(self, text): text.mark_set('insert', self.index1) text.delete(self.index1, self.index2) self.set_marks(text, self.marks_after) text.see('insert') ##sys.__stderr__.write("redo: %s\n" % self) def undo(self, text): text.mark_set('insert', self.index1) text.insert(self.index1, self.chars) self.set_marks(text, self.marks_before) text.see('insert') ##sys.__stderr__.write("undo: %s\n" % self) class CommandSequence(Command): # Wrapper for a sequence of undoable cmds to be undone/redone # as a unit def __init__(self): self.cmds = [] self.depth = 0 def __repr__(self): s = self.__class__.__name__ strs = [] for cmd in self.cmds: strs.append(" %r" % (cmd,)) return s + "(\n" + ",\n".join(strs) + "\n)" def __len__(self): return len(self.cmds) def append(self, cmd): self.cmds.append(cmd) def getcmd(self, i): return self.cmds[i] def redo(self, text): for cmd in self.cmds: cmd.redo(text) def undo(self, text): cmds = self.cmds[:] cmds.reverse() for cmd in cmds: cmd.undo(text) def bump_depth(self, incr=1): self.depth = self.depth + incr return self.depth def main(): from idlelib.Percolator import Percolator root = Tk() root.wm_protocol("WM_DELETE_WINDOW", root.quit) text = Text() text.pack() text.focus_set() p = Percolator(text) d = UndoDelegator() p.insertfilter(d) root.mainloop() if __name__ == "__main__": main()
apache-2.0
8,614,393,566,248,164,000
28.275568
75
0.554876
false
mahmoud/hematite
hematite/tests/test_url.py
1
4540
# -*- coding: utf-8 -*- #from compat import unicode, bytes # TODO: round-tripping tests import pytest from hematite.url import URL, _URL_RE, parse_authority TEST_URLS = [ '*', # e.g., OPTIONS * 'http://googlewebsite.com/e-shops.aspx', 'http://example.com:8080/search?q=123&business=Nothing%20Special', 'http://hatnote.com:9000?arg=1&arg=2&arg=3', 'https://xn--bcher-kva.ch', 'http://xn--ggbla1c4e.xn--ngbc5azd/', 'http://tools.ietf.org/html/rfc3986#section-3.4', 'http://wiki:[email protected]', 'ftp://ftp.rfc-editor.org/in-notes/tar/RFCs0001-0500.tar.gz', 'http://[1080:0:0:0:8:800:200C:417A]/index.html', 'ssh://192.0.2.16:22/', 'https://[::101.45.75.219]:80/?hi=bye', 'ldap://[::192.9.5.5]/dc=example,dc=com??sub?(sn=Jensen)', 'mailto:[email protected][email protected]&body=hi%20http://wikipedia.org', 'news:alt.rec.motorcycle', 'tel:+1-800-867-5309', 'urn:oasis:member:A00024:x', ('magnet:?xt=urn:btih:1a42b9e04e122b97a5254e3df77ab3c4b7da725f&dn=Puppy%' '20Linux%20precise-5.7.1.iso&tr=udp://tracker.openbittorrent.com:80&' 'tr=udp://tracker.publicbt.com:80&tr=udp://tracker.istole.it:6969&' 'tr=udp://tracker.ccc.de:80&tr=udp://open.demonii.com:1337')] UNICODE_URLS = [ # 'http://مثال.آزمایشی' ('\xd9\x85\xd8\xab\xd8\xa7\xd9\x84' '.\xd8\xa2\xd8\xb2\xd9\x85\xd8\xa7' '\xdb\x8c\xd8\xb4\xdb\x8c')] @pytest.fixture(scope="module", params=TEST_URLS) def test_url(request): param = request.param #request.addfinalizer(lambda: None) return param @pytest.fixture(scope="module", params=TEST_URLS) def test_authority(request): match = _URL_RE.match(request.param) return match.groupdict()['authority'] def test_regex(test_url): match = _URL_RE.match(test_url) assert match.groupdict() def test_parse_authorities(test_authority): if not test_authority: return True else: _, _, family, host, port = parse_authority(test_authority) assert bool(host) # TODO def test_basic(): u1 = URL('http://googlewebsite.com/e-shops.aspx') assert isinstance(u1.to_text(), unicode) assert u1.host == 'googlewebsite.com' def test_idna(): u1 = URL('http://bücher.ch') assert u1.host == u'bücher.ch' assert u1.to_text(display=False) == 'http://xn--bcher-kva.ch' assert u1.to_text(display=True) == u'http://bücher.ch' u2 = URL('https://xn--bcher-kva.ch') assert u2.host == u'bücher.ch' assert u2.to_text(display=False) == 'https://xn--bcher-kva.ch' assert u2.to_text(display=True) == u'https://bücher.ch' def test_urlparse_equiv(test_url): from urlparse import urlparse, urlunparse url_obj = URL(test_url) assert urlunparse(urlparse(test_url)) == urlunparse(url_obj) def test_query_params(test_url): url_obj = URL(test_url) if not url_obj.query_params: return True assert test_url.endswith(url_obj.get_query_string()) def test_iri_query(): url = URL(u'http://minerals.rocks.ore/?mountain=\N{MOUNTAIN}') assert url.query_params['mountain'] == u'\N{MOUNTAIN}' assert url.query_params.to_bytes().endswith('%E2%9B%B0') assert url.query_params.to_text().endswith(u'\N{MOUNTAIN}') # fails because urlparse assumes query strings are encoded with latin1 url2 = URL(url.to_bytes()) assert url2.query_params['mountain'] == u'\N{MOUNTAIN}' def test_iri_path(): url = URL(u'http://minerals.rocks.ore/mountain/\N{MOUNTAIN}/') assert url.path == u'/mountain/\N{MOUNTAIN}/' assert url.to_bytes().endswith('%E2%9B%B0/') #def test_urlparse_obj_input(): # TODO # with pytest.raises(TypeError): # URL(object()) def test_url_copy(): url = URL('http://example.com/foo?bar=baz') url_copy = URL(url) assert url == url_copy def test_invalid_url(): pass #with pytest.raises(ValueError): # URL('this is pretty much the furthest thing from a url') # TODO # URL('???????????????????') # TODOx2 def test_invalid_port(): with pytest.raises(ValueError): URL('http://reader.googlewebsite.com:neverforget') def test_invalid_ipv6(): invalid_ipv6_ips = ['2001::0234:C1ab::A0:aabc:003F', '2001::1::3F'] for ip in invalid_ipv6_ips: with pytest.raises(ValueError): URL('http://[' + ip + ']') def test_is_absolute(): url = URL('/hi/hello?yes=no') assert not url.is_absolute url = URL('http://googlewebsite.biz/hi') assert url.is_absolute
bsd-3-clause
7,513,489,347,499,159,000
28.763158
77
0.635279
false
matmutant/sl4a
python/src/Lib/test/test_threaded_import.py
77
2535
# This is a variant of the very old (early 90's) file # Demo/threads/bug.py. It simply provokes a number of threads into # trying to import the same module "at the same time". # There are no pleasant failure modes -- most likely is that Python # complains several times about module random having no attribute # randrange, and then Python hangs. import thread from test.test_support import verbose, TestSkipped, TestFailed critical_section = thread.allocate_lock() done = thread.allocate_lock() def task(): global N, critical_section, done import random x = random.randrange(1, 3) critical_section.acquire() N -= 1 # Must release critical_section before releasing done, else the main # thread can exit and set critical_section to None as part of global # teardown; then critical_section.release() raises AttributeError. finished = N == 0 critical_section.release() if finished: done.release() def test_import_hangers(): import sys if verbose: print "testing import hangers ...", import test.threaded_import_hangers try: if test.threaded_import_hangers.errors: raise TestFailed(test.threaded_import_hangers.errors) elif verbose: print "OK." finally: # In case this test is run again, make sure the helper module # gets loaded from scratch again. del sys.modules['test.threaded_import_hangers'] # Tricky: When regrtest imports this module, the thread running regrtest # grabs the import lock and won't let go of it until this module returns. # All other threads attempting an import hang for the duration. Since # this test spawns threads that do little *but* import, we can't do that # successfully until after this module finishes importing and regrtest # regains control. To make this work, a special case was added to # regrtest to invoke a module's "test_main" function (if any) after # importing it. def test_main(): # magic name! see above global N, done import imp if imp.lock_held(): # This triggers on, e.g., from test import autotest. raise TestSkipped("can't run when import lock is held") done.acquire() for N in (20, 50) * 3: if verbose: print "Trying", N, "threads ...", for i in range(N): thread.start_new_thread(task, ()) done.acquire() if verbose: print "OK." done.release() test_import_hangers() if __name__ == "__main__": test_main()
apache-2.0
-7,247,927,586,235,504,000
32.8
73
0.669428
false
foundit/Piped
doc/tutorials/twitter/1_basic/twitter_tutorial/provider.py
2
1500
import tweepy from zope import interface from twisted.application import service from twisted.internet import defer, threads, reactor from piped import exceptions, log, resource, util class MyTwitterProvider(object): # state that we are a resource provider, so that the piped plugin system finds us interface.classProvides(resource.IResourceProvider) def __init__(self): # we store the apis by the account name since we might have # multiple consumers of the same api. self._api_by_name = dict() def configure(self, runtime_environment): # look up the twitter account configurations: self.twitter_configs = runtime_environment.get_configuration_value('twitter', dict()) for account_name, account_config in self.twitter_configs.items(): auth = tweepy.BasicAuthHandler(**account_config['auth']) self._api_by_name[account_name] = tweepy.API(auth) # tell the resource manager that we can provide the named twitter accounts runtime_environment.resource_manager.register('twitter.%s' % account_name, provider=self) def add_consumer(self, resource_dependency): # since we registered for 'twitter.<account_name>', we can find the account_name requested by splitting: twitter, account_name = resource_dependency.provider.split('.') # give the tweepy API instance to the resource: resource_dependency.on_resource_ready(self._api_by_name[account_name])
mit
-8,478,760,929,149,595,000
41.857143
112
0.706667
false
jiemakel/omorfi
src/python/omorfi/apertium_formatter.py
1
14261
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Functions to format apertium style analyses from omorfi data.""" # Author: Omorfi contributors <[email protected]> 2015 # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # utils to format apertium style data from omorfi database values from .error_logging import fail_formatting_missing_for, just_fail from .formatter import Formatter from .settings import optional_hyphen, weak_boundary, word_boundary from .string_manglers import lexc_escape class ApertiumFormatter(Formatter): apertium_multichars = { "-", "", "+", "adj", "abbr", "abe", "abl", "acc", "acr", "actv", "ade", "adv", "agent", "all", "ant", "card", "cnjcoo", "cnjcoo><vblex", "cnjsub", "cnjadv", "cog", "com", "cond", "conneg", "def", "det", "dem", "ela", "enc", "ess", "f", "gen", "ij", "ill", "imp", "impers", "ind", "ine", "infa", "infe", "infma", "infminen", "ins", "itg", "lat", "loc", "m", "mf", "n", "neg", "nom", "np", "num", "ord", "p1", "p2", "p3", "p1><pl", "p1><sg", "p2><pl", "p2><sg", "p3><pl", "p3><sg", "par", "part", "past", "pasv", "pers", "pl", "pneg", "pos", "post", "pot", "pp", "pprs", "pri", "prn", "punct", "pxpl1", "pxpl2", "pxsg1", "pxsg2", "pxsp3", "qst", "qu", "rec", "reflex", "rel", "sg", "sup", "sym", "top", "tra", "use_dialect", "use_archaic", "use_nonstd", "use_foreign", "vaux", "vblex" } stuff2apertium = { "Aiden": "", "Aien": "", "Aiin": "", "Ain": "", "Ayn": "", "Aän": "", "Aön": "", "Aisiin": "", "Aseen": "", "Aä": "", "Ajä": "", "Atä": "", "Ajen": "", "Aten": "", "Ahin": "", "Ahen": "", "Ahyn": "", "Aihin": "", "Aiä": "", "Ana": "", "Asa": "", "Aitten": "", "Aan": "", "Aen": "", "Ahan": "", "Ahon": "", "Ahun": "", "Aon": "", "Aun": "", "Aa": "", "Aia": "", "Aita": "", "Aja": "", "Ahän": "", "Ahön": "", "Aitä": "", "Ata": "", "ABBREVIATION": "abbr", "ACRONYM": "acr", "ADJ": "adj", "ADP": "post", "ADPOSITION": "post", "ADV": "adv", "ADVERB": "adv", "SCONJ": "cnjsub", "ADVERBIAL": "cnjadv", "AINF_arg": "vaux", "ARTWORK": "", "ARROW": "", "AUX": "vaux", "B-": "-", "B←": "-", "B→": "-", "Bc": "+", "CARDINAL": "card", "Ccmp": "com", "CLAUSE-BOUNDARY": "", "Cma": "agent", "Cmaton": "pneg", "Cnut": "pp", "COMMA": "", "COMPARATIVE": "cnjsub", "COMP": "com", "CONJUNCTION": "", "CONJUNCTIONVERB": "cnjcoo><vblex", "CONJ": "cnjcoo", "COORDINATING": "cnjcoo", "Cpos": "pos", "Csup": "sup", "CULTGRP": "", "Cva": "pprs", "DASH": "", "DECIMAL": "", "DERSTI": "", "DERTTAIN": "", "DET": "det", "DEMONSTRATIVE": "dem", "DIGIT": "", "Din": "+in<n", "Din²": "", "Ds": "+s<n", "Dhko": "+hko<adj", "Disa": "+isa<adj", "Dllinen": "+llinen<n", "Dlainen": "+lainen<n", "Dla": "+la<n", "Dnen": "+nen<n", "Dtar": "+tar<n", "Dton": "+ton<adj", "Dmainen": "+mainen<adj", "Du": "+u<n", "Dtava": "+tava<adj", "Dma": "+ma<n", "Dinen": "+inen<n", "Dja": "+ja<n", "Dmpi": "", "Dmaisilla": "+maisilla<adv", "Dminen": "+minen<n", "Dnut": "+nut<adj", "Dtu": "+tu<adj", "Duus": "+uus<adj", "Dva": "+va<adj", "Dmaton": "+maton<adj", "Dttain": "+ttain<adv", "Dttaa": "+ttaa<vblex", "Dtattaa": "+tattaa<vblex", "Dtatuttaa": "+tatuttaa<vblex", "Dtuttaa": "+tuttaa<vblex", "Dsti": "+sti<adv", "EVENT": "", "FEMALE": "f", "FINAL-BRACKET": "", "FINAL-QUOTE": "", "FIRST": "ant", "FRACTION": "", "GEO": "top", "Ia": "infa", "Ie": "infe", "Ima": "infma", "Iminen": "infminen", "INDEFINITE": "ind", "INITIAL-BRACKET": "", "INITIAL-QUOTE": "", "INTRANSITIVE_arg": "vblex", "INTJ": "ij", "INTERROGATIVE": "itg", "LAST": "cog", "LEMMA-START": "", "LEMMA-END": "", "MALE": "m", "MAINF_arg": "vaux", "MEDIA": "", "MISC": "", "MULTIPLICATIVE": "", "Ncon": "conneg", "N??": "", "Nneg": "neg", "NOUN": "n", "Npl": "pl", "Nsg": "sg", "NUM": "num", "NUMERAL": "num", "O3": "pxsp3", "Opl1": "pxpl1", "Opl2": "pxpl2", "ORDINAL": "ord", "ORG": "", "Osg1": "pxsg1", "Osg2": "pxsg2", "PARTICLE": "part", "PERSONAL": "pers", "PL1": "p1", "PL2": "p2", "PL3": "p3", "Ppe4": "impers", "Ppl1": "p1><pl", "Ppl2": "p2><pl", "Ppl3": "p3><pl", "PRON": "prn", "PRODUCT": "", "PROPN": "np", "PROPER": "np", "Psg1": "p1><sg", "Psg2": "p2><sg", "Psg3": "p3><sg", "PUNCT": "punct", "Qhan": "+han<enc", "Qkaan": "+kaan<enc", "Qka": "+ka<enc", "Qkin": "+kin<enc", "Qko": "+ko<qst", "Qpa": "+pa<enc", "Qs": "+s<enc", "QUALIFIER": "adj", "QUANTOR": "qu", "RECIPROCAL": "rec", "REFLEXIVE": "reflex", "RELATIVE": "rel", "ROMAN": "", ".sent": "", "SG1": "p1", "SG2": "p2", "SG3": "p3", "SENTENCE-BOUNDARY": "", "SPACE": "", "SUFFIX": "", "SUPERL": "sup", "SYM": "sym", "Tcond": "cond", "Timp": "imp", "Topt": "", "Tpast": "past", "Tpot": "pot", "Tpres": "pri", "Uarch": "use_archaic", "Udial": "use_nonstd", "Unonstd": "use_nonstd", "UNSPECIFIED": "part", "Urare": "", "Vact": "actv", "VERB": "vblex", "Vpss": "pasv", "X???": "", "Xabe": "abe", "Xabl": "abl", "Xacc": "acc", "Xade": "ade", "Xall": "all", "Xcom": "com", "Xela": "ela", "Xess": "ess", "Xgen": "gen", "Xill": "ill", "Xine": "ine", "Xins": "ins", "Xlat": "lat", "Xnom": "nom", "Xpar": "par", "Xtra": "tra", "ADESSIVE": "ade", "ABLATIVE": "abl", "ALLATIVE": "all", "INESSIVE": "ine", "ILLATIVE": "ill", "LOCATIVE": "loc", "FTB3MAN": "", "FTB3man": "", ".": "", "XForeign": "use_foreign", "X": "", "": "" } def __init__(self, verbose=True): self.verbose = verbose for stuff, ape in self.stuff2apertium.items(): if len(ape) < 2: continue elif ape.startswith('+'): if not ape[ape.find('+'):]: just_fail("There are conflicting formattings in here! " + ape[ape.find('+'):] + " is not a valid apertium multichar_symbol!") elif ape not in self.apertium_multichars: just_fail("There are conflicting formattings in here! " + ape + " is not a valid apertium multichar_symbol!") def stuff2lexc(self, stuff): if len(stuff) == 0: return "" elif stuff in self.stuff2apertium: if self.stuff2apertium[stuff] in ['+', '-', '#', '0', '']: return self.stuff2apertium[stuff] elif self.stuff2apertium[stuff].startswith('+'): return (lexc_escape(self.stuff2apertium[stuff]) + '%>') else: return ('%<' + lexc_escape(self.stuff2apertium[stuff]) + '%>') else: fail_formatting_missing_for(stuff, "apertium") return "" def analyses2lexc(self, anals, surf): apestring = '' for i in anals.split('|'): if i == '@@COPY-STEM@@': apestring += lexc_escape(surf) elif i.startswith('@@LITERAL:') and i.endswith('@@'): apestring += lexc_escape(i[len('@@LITERAL:'):-len('@@')]) else: apestring += self.stuff2lexc(i) return apestring def continuation2lexc(self, anals, surf, cont): analstring = self.analyses2lexc(anals, surf) surf = lexc_escape(surf) return "%s:%s\t%s ;\n" % (analstring, surf, cont) def wordmap2lexc(self, wordmap): if wordmap['lemma'] == ' ': # apertium fails when surf == ' ' return '' wordmap['analysis'] = lexc_escape(wordmap['lemma']) wordmap['analysis'] = wordmap['analysis'].replace( word_boundary, '+').replace(weak_boundary, '') if wordmap['is_suffix']: wordmap['analysis'] = "+" + wordmap['analysis'] elif wordmap['is_prefix']: wordmap['analysis'] += "+" elif wordmap['upos'] == 'PROPN': wordmap['analysis'] += self.stuff2lexc(wordmap['upos']) if wordmap['proper_noun_class']: wordmap['analysis'] +=\ self.stuff2lexc(wordmap['proper_noun_class']) if wordmap['sem'] in ['MALE', 'FEMALE']: wordmap['analysis'] += self.stuff2lexc(wordmap['sem']) elif wordmap['upos'] == 'VERB': if wordmap['argument']: wordmap[ 'analysis'] += self.stuff2lexc(wordmap['argument'] + '_arg') else: wordmap['analysis'] += self.stuff2lexc(wordmap['upos']) elif wordmap['upos'] == 'CONJ|VERB': if wordmap['lemma'] == 'eikä': wordmap['lemma'] = 'ei' wordmap['analysis'] = 'ja' + \ self.stuff2lexc('COORDINATING') + \ '+ei' + \ self.stuff2lexc('Nneg') else: wordmap['analysis'] = wordmap['lemma'][:-2] +\ self.stuff2lexc('ADVERBIAL') + \ '+' + wordmap['lemma'][-2:] + \ self.stuff2lexc('Nneg') elif wordmap['particle']: for pclass in wordmap['particle'].split('|'): wordmap['analysis'] += self.stuff2lexc(pclass) else: wordmap['analysis'] += self.stuff2lexc(wordmap['upos']) if wordmap['pronoun']: for stuff in wordmap['pronoun'].split("|"): wordmap['analysis'] += self.stuff2lexc(stuff) if wordmap['lex']: for stuff in wordmap['lex'].split("|"): wordmap['analysis'] += self.stuff2lexc(stuff) if wordmap['abbr']: for stuff in wordmap['abbr'].split("|"): wordmap['analysis'] += self.stuff2lexc(stuff) if wordmap['numtype']: for stuff in wordmap['numtype'].split("|"): wordmap['analysis'] += self.stuff2lexc(stuff) if wordmap['symbol']: for subcat in wordmap['symbol'].split('|'): wordmap['analysis'] += self.stuff2lexc(subcat) if wordmap['stub'] in ";:": wordmap['analysis'] += self.stuff2lexc("SENTENCE-BOUNDARY") # XXX: for now if wordmap['lemma'] in "¹²³½¼=≥µ#/%": wordmap['analysis'] += self.stuff2lexc("NOUN") wordmap['stub'] = wordmap['stub'].replace( word_boundary, optional_hyphen) wordmap['stub'] = lexc_escape(wordmap['stub']) if 'BLACKLIST' in wordmap['new_para']: return "!%s:%s\t%s\t;" % (wordmap['analysis'], wordmap['stub'], wordmap['new_para']) else: return "%s:%s\t%s\t;" % (wordmap['analysis'], wordmap['stub'], wordmap['new_para']) def multichars_lexc(self): multichars = "Multichar_Symbols\n!! Apertium standard tags:\n" for mcs in sorted(self.apertium_multichars): if '><' not in mcs and mcs not in ['', '+', '-', '#', '0']: multichars += '%<' + lexc_escape(mcs) + "%>\n" multichars += Formatter.multichars_lexc(self) return multichars def root_lexicon_lexc(self): root = Formatter.root_lexicon_lexc(self) return root # self test if __name__ == '__main__': formatter = ApertiumFormatter() exit(0)
gpl-3.0
-961,318,677,182,429,800
27.763636
80
0.421478
false
ojii/django-cms
cms/tests/navextender.py
27
3475
# -*- coding: utf-8 -*- from __future__ import with_statement from cms.models import Page from cms.test_utils.fixtures.navextenders import NavextendersFixture from cms.test_utils.testcases import SettingsOverrideTestCase from cms.test_utils.util.menu_extender import TestMenu from django.conf import settings from django.template import Template from menus.menu_pool import menu_pool class NavExtenderTestCase(NavextendersFixture, SettingsOverrideTestCase): """ Tree from fixture: page1 page2 page3 page4 page5 """ def setUp(self): if not menu_pool.discovered: menu_pool.discover_menus() self.old_menu = menu_pool.menus menu_pool.menus = {'CMSMenu':self.old_menu['CMSMenu'], 'TestMenu':TestMenu()} def tearDown(self): menu_pool.menus = self.old_menu def _get_page(self, num): return Page.objects.get(title_set__title='page%s' % num) def _update_page(self, num, **stuff): Page.objects.filter(title_set__title='page%s' % num).update(**stuff) def test_menu_registration(self): self.assertEqual(len(menu_pool.menus), 2) self.assertEqual(len(menu_pool.modifiers) >=4, True) def test_extenders_on_root(self): self._update_page(1, navigation_extenders="TestMenu") menu_pool.clear(settings.SITE_ID) context = self.get_context() tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}") tpl.render(context) nodes = context['children'] self.assertEqual(len(nodes), 2) self.assertEqual(len(nodes[0].children), 4) self.assertEqual(len(nodes[0].children[3].children), 1) self._update_page(1, in_navigation=False) menu_pool.clear(settings.SITE_ID) tpl = Template("{% load menu_tags %}{% show_menu %}") tpl.render(context) nodes = context['children'] self.assertEqual(len(nodes), 5) def test_extenders_on_root_child(self): self._update_page(4, navigation_extenders="TestMenu") menu_pool.clear(settings.SITE_ID) context = self.get_context() tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}") tpl.render(context) nodes = context['children'] self.assertEqual(len(nodes), 2) self.assertEqual(len(nodes[1].children), 4) def test_extenders_on_child(self): """ TestMenu has 4 flat nodes """ self._update_page(1, in_navigation=False) self._update_page(2, navigation_extenders="TestMenu") menu_pool.clear(settings.SITE_ID) menu_pool.clear(settings.SITE_ID) context = self.get_context() tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}") tpl.render(context) nodes = context['children'] self.assertEqual(len(nodes), 2) self.assertEqual(len(nodes[0].children), 4) self.assertEqual(nodes[0].children[1].get_absolute_url(), "/" ) def test_incorrect_nav_extender_in_db(self): self._update_page(2, navigation_extenders="SomethingWrong") menu_pool.clear(settings.SITE_ID) context = self.get_context() tpl = Template("{% load menu_tags %}{% show_menu %}") tpl.render(context) nodes = context['children'] self.assertEqual(len(nodes), 2)
bsd-3-clause
546,447,794,242,377,400
36.365591
85
0.610072
false
kow3ns/contrib
rescheduler/vendor/github.com/ugorji/go/codec/test.py
1516
4019
#!/usr/bin/env python # This will create golden files in a directory passed to it. # A Test calls this internally to create the golden files # So it can process them (so we don't have to checkin the files). # Ensure msgpack-python and cbor are installed first, using: # sudo apt-get install python-dev # sudo apt-get install python-pip # pip install --user msgpack-python msgpack-rpc-python cbor # Ensure all "string" keys are utf strings (else encoded as bytes) import cbor, msgpack, msgpackrpc, sys, os, threading def get_test_data_list(): # get list with all primitive types, and a combo type l0 = [ -8, -1616, -32323232, -6464646464646464, 192, 1616, 32323232, 6464646464646464, 192, -3232.0, -6464646464.0, 3232.0, 6464.0, 6464646464.0, False, True, u"null", None, u"someday", 1328176922000002000, u"", -2206187877999998000, u"bytestring", 270, u"none", -2013855847999995777, #-6795364578871345152, ] l1 = [ { "true": True, "false": False }, { "true": u"True", "false": False, "uint16(1616)": 1616 }, { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ], "int32":32323232, "bool": True, "LONG STRING": u"123456789012345678901234567890123456789012345678901234567890", "SHORT STRING": u"1234567890" }, { True: "true", 138: False, "false": 200 } ] l = [] l.extend(l0) l.append(l0) l.append(1) l.extend(l1) return l def build_test_data(destdir): l = get_test_data_list() for i in range(len(l)): # packer = msgpack.Packer() serialized = msgpack.dumps(l[i]) f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb') f.write(serialized) f.close() serialized = cbor.dumps(l[i]) f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb') f.write(serialized) f.close() def doRpcServer(port, stopTimeSec): class EchoHandler(object): def Echo123(self, msg1, msg2, msg3): return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3)) def EchoStruct(self, msg): return ("%s" % msg) addr = msgpackrpc.Address('localhost', port) server = msgpackrpc.Server(EchoHandler()) server.listen(addr) # run thread to stop it after stopTimeSec seconds if > 0 if stopTimeSec > 0: def myStopRpcServer(): server.stop() t = threading.Timer(stopTimeSec, myStopRpcServer) t.start() server.start() def doRpcClientToPythonSvc(port): address = msgpackrpc.Address('localhost', port) client = msgpackrpc.Client(address, unpack_encoding='utf-8') print client.call("Echo123", "A1", "B2", "C3") print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) def doRpcClientToGoSvc(port): # print ">>>> port: ", port, " <<<<<" address = msgpackrpc.Address('localhost', port) client = msgpackrpc.Client(address, unpack_encoding='utf-8') print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) def doMain(args): if len(args) == 2 and args[0] == "testdata": build_test_data(args[1]) elif len(args) == 3 and args[0] == "rpc-server": doRpcServer(int(args[1]), int(args[2])) elif len(args) == 2 and args[0] == "rpc-client-python-service": doRpcClientToPythonSvc(int(args[1])) elif len(args) == 2 and args[0] == "rpc-client-go-service": doRpcClientToGoSvc(int(args[1])) else: print("Usage: test.py " + "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") if __name__ == "__main__": doMain(sys.argv[1:])
apache-2.0
8,253,453,196,340,918,000
30.896825
96
0.569545
false
CSC301H-Fall2013/JuakStore
site-packages/tests/regressiontests/managers_regress/tests.py
46
7920
from __future__ import absolute_import import copy from django.conf import settings from django.db import models from django.db.models.loading import cache from django.test import TestCase from django.test.utils import override_settings from .models import ( Child1, Child2, Child3, Child4, Child5, Child6, Child7, AbstractBase1, AbstractBase2, AbstractBase3, ) class ManagersRegressionTests(TestCase): def test_managers(self): Child1.objects.create(name='fred', data='a1') Child1.objects.create(name='barney', data='a2') Child2.objects.create(name='fred', data='b1', value=1) Child2.objects.create(name='barney', data='b2', value=42) Child3.objects.create(name='fred', data='c1', comment='yes') Child3.objects.create(name='barney', data='c2', comment='no') Child4.objects.create(name='fred', data='d1') Child4.objects.create(name='barney', data='d2') Child5.objects.create(name='fred', comment='yes') Child5.objects.create(name='barney', comment='no') Child6.objects.create(name='fred', data='f1', value=42) Child6.objects.create(name='barney', data='f2', value=42) Child7.objects.create(name='fred') Child7.objects.create(name='barney') self.assertQuerysetEqual(Child1.manager1.all(), ["<Child1: a1>"]) self.assertQuerysetEqual(Child1.manager2.all(), ["<Child1: a2>"]) self.assertQuerysetEqual(Child1._default_manager.all(), ["<Child1: a1>"]) self.assertQuerysetEqual(Child2._default_manager.all(), ["<Child2: b1>"]) self.assertQuerysetEqual(Child2.restricted.all(), ["<Child2: b2>"]) self.assertQuerysetEqual(Child3._default_manager.all(), ["<Child3: c1>"]) self.assertQuerysetEqual(Child3.manager1.all(), ["<Child3: c1>"]) self.assertQuerysetEqual(Child3.manager2.all(), ["<Child3: c2>"]) # Since Child6 inherits from Child4, the corresponding rows from f1 and # f2 also appear here. This is the expected result. self.assertQuerysetEqual(Child4._default_manager.order_by('data'), [ "<Child4: d1>", "<Child4: d2>", "<Child4: f1>", "<Child4: f2>" ] ) self.assertQuerysetEqual(Child4.manager1.all(), [ "<Child4: d1>", "<Child4: f1>" ] ) self.assertQuerysetEqual(Child5._default_manager.all(), ["<Child5: fred>"]) self.assertQuerysetEqual(Child6._default_manager.all(), ["<Child6: f1>"]) self.assertQuerysetEqual(Child7._default_manager.order_by('name'), [ "<Child7: barney>", "<Child7: fred>" ] ) def test_abstract_manager(self): # Accessing the manager on an abstract model should # raise an attribute error with an appropriate message. try: AbstractBase3.objects.all() self.fail('Should raise an AttributeError') except AttributeError as e: # This error message isn't ideal, but if the model is abstract and # a lot of the class instantiation logic isn't invoked; if the # manager is implied, then we don't get a hook to install the # error-raising manager. self.assertEqual(str(e), "type object 'AbstractBase3' has no attribute 'objects'") def test_custom_abstract_manager(self): # Accessing the manager on an abstract model with an custom # manager should raise an attribute error with an appropriate # message. try: AbstractBase2.restricted.all() self.fail('Should raise an AttributeError') except AttributeError as e: self.assertEqual(str(e), "Manager isn't available; AbstractBase2 is abstract") def test_explicit_abstract_manager(self): # Accessing the manager on an abstract model with an explicit # manager should raise an attribute error with an appropriate # message. try: AbstractBase1.objects.all() self.fail('Should raise an AttributeError') except AttributeError as e: self.assertEqual(str(e), "Manager isn't available; AbstractBase1 is abstract") def test_swappable_manager(self): try: # This test adds dummy models to the app cache. These # need to be removed in order to prevent bad interactions # with the flush operation in other tests. old_app_models = copy.deepcopy(cache.app_models) old_app_store = copy.deepcopy(cache.app_store) settings.TEST_SWAPPABLE_MODEL = 'managers_regress.Parent' class SwappableModel(models.Model): class Meta: swappable = 'TEST_SWAPPABLE_MODEL' # Accessing the manager on a swappable model should # raise an attribute error with a helpful message try: SwappableModel.objects.all() self.fail('Should raise an AttributeError') except AttributeError as e: self.assertEqual(str(e), "Manager isn't available; SwappableModel has been swapped for 'managers_regress.Parent'") finally: del settings.TEST_SWAPPABLE_MODEL cache.app_models = old_app_models cache.app_store = old_app_store def test_custom_swappable_manager(self): try: # This test adds dummy models to the app cache. These # need to be removed in order to prevent bad interactions # with the flush operation in other tests. old_app_models = copy.deepcopy(cache.app_models) old_app_store = copy.deepcopy(cache.app_store) settings.TEST_SWAPPABLE_MODEL = 'managers_regress.Parent' class SwappableModel(models.Model): stuff = models.Manager() class Meta: swappable = 'TEST_SWAPPABLE_MODEL' # Accessing the manager on a swappable model with an # explicit manager should raise an attribute error with a # helpful message try: SwappableModel.stuff.all() self.fail('Should raise an AttributeError') except AttributeError as e: self.assertEqual(str(e), "Manager isn't available; SwappableModel has been swapped for 'managers_regress.Parent'") finally: del settings.TEST_SWAPPABLE_MODEL cache.app_models = old_app_models cache.app_store = old_app_store def test_explicit_swappable_manager(self): try: # This test adds dummy models to the app cache. These # need to be removed in order to prevent bad interactions # with the flush operation in other tests. old_app_models = copy.deepcopy(cache.app_models) old_app_store = copy.deepcopy(cache.app_store) settings.TEST_SWAPPABLE_MODEL = 'managers_regress.Parent' class SwappableModel(models.Model): objects = models.Manager() class Meta: swappable = 'TEST_SWAPPABLE_MODEL' # Accessing the manager on a swappable model with an # explicit manager should raise an attribute error with a # helpful message try: SwappableModel.objects.all() self.fail('Should raise an AttributeError') except AttributeError as e: self.assertEqual(str(e), "Manager isn't available; SwappableModel has been swapped for 'managers_regress.Parent'") finally: del settings.TEST_SWAPPABLE_MODEL cache.app_models = old_app_models cache.app_store = old_app_store
mit
1,195,267,344,263,632,000
39.824742
130
0.609722
false
Epirex/android_external_chromium_org
tools/perf/metrics/media.py
23
2791
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import os from metrics import Metric class MediaMetric(Metric): """MediaMetric class injects and calls JS responsible for recording metrics. Default media metrics are collected for every media element in the page, such as decoded_frame_count, dropped_frame_count, decoded_video_bytes, and decoded_audio_bytes. """ def __init__(self, tab): super(MediaMetric, self).__init__() with open(os.path.join(os.path.dirname(__file__), 'media.js')) as f: js = f.read() tab.ExecuteJavaScript(js) self._results = None self._skip_basic_metrics = False def Start(self, page, tab): """Create the media metrics for all media elements in the document.""" if hasattr(page, 'skip_basic_metrics'): self._skip_basic_metrics = page.skip_basic_metrics tab.ExecuteJavaScript('window.__createMediaMetricsForDocument()') def Stop(self, page, tab): self._results = tab.EvaluateJavaScript('window.__getAllMetrics()') def AddResults(self, tab, results): """Reports all recorded metrics as Telemetry perf results.""" trace_names = [] for media_metric in self._results: trace_names.append(self._AddResultsForMediaElement(media_metric, results)) return '_'.join(trace_names) or tab.url def _AddResultsForMediaElement(self, media_metric, results): """Reports metrics for one media element. Media metrics contain an ID identifying the media element and values: media_metric = { 'id': 'video_1', 'metrics': { 'time_to_play': 120, 'decoded_bytes': 13233, ... } } """ def AddOneResult(metric, unit): metrics = media_metric['metrics'] for m in metrics: if m.startswith(metric): special_label = m[len(metric):] if isinstance(metrics[m], list): values = [float(v) for v in metrics[m]] else: values = float(metrics[m]) results.Add(trace + special_label, unit, values, chart_name=metric, data_type='default') trace = media_metric['id'] if not trace: logging.error('Metrics ID is missing in results.') return if not self._skip_basic_metrics: AddOneResult('buffering_time', 'ms') AddOneResult('decoded_audio_bytes', 'bytes') AddOneResult('decoded_video_bytes', 'bytes') AddOneResult('decoded_frame_count', 'frames') AddOneResult('dropped_frame_count', 'frames') AddOneResult('time_to_play', 'ms') AddOneResult('avg_loop_time', 'ms') AddOneResult('seek', 'ms') AddOneResult('mse', 'ms') return trace
bsd-3-clause
3,301,479,785,618,899,500
32.22619
80
0.647796
false
bbrezillon/linux-sunxi
tools/perf/scripts/python/export-to-postgresql.py
293
26298
# export-to-postgresql.py: export perf data to a postgresql database # Copyright (c) 2014, Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. import os import sys import struct import datetime # To use this script you will need to have installed package python-pyside which # provides LGPL-licensed Python bindings for Qt. You will also need the package # libqt4-sql-psql for Qt postgresql support. # # The script assumes postgresql is running on the local machine and that the # user has postgresql permissions to create databases. Examples of installing # postgresql and adding such a user are: # # fedora: # # $ sudo yum install postgresql postgresql-server python-pyside qt-postgresql # $ sudo su - postgres -c initdb # $ sudo service postgresql start # $ sudo su - postgres # $ createuser <your user id here> # Shall the new role be a superuser? (y/n) y # # ubuntu: # # $ sudo apt-get install postgresql python-pyside.qtsql libqt4-sql-psql # $ sudo su - postgres # $ createuser -s <your user id here> # # An example of using this script with Intel PT: # # $ perf record -e intel_pt//u ls # $ perf script -s ~/libexec/perf-core/scripts/python/export-to-postgresql.py pt_example branches calls # 2015-05-29 12:49:23.464364 Creating database... # 2015-05-29 12:49:26.281717 Writing to intermediate files... # 2015-05-29 12:49:27.190383 Copying to database... # 2015-05-29 12:49:28.140451 Removing intermediate files... # 2015-05-29 12:49:28.147451 Adding primary keys # 2015-05-29 12:49:28.655683 Adding foreign keys # 2015-05-29 12:49:29.365350 Done # # To browse the database, psql can be used e.g. # # $ psql pt_example # pt_example=# select * from samples_view where id < 100; # pt_example=# \d+ # pt_example=# \d+ samples_view # pt_example=# \q # # An example of using the database is provided by the script # call-graph-from-postgresql.py. Refer to that script for details. # # Tables: # # The tables largely correspond to perf tools' data structures. They are largely self-explanatory. # # samples # # 'samples' is the main table. It represents what instruction was executing at a point in time # when something (a selected event) happened. The memory address is the instruction pointer or 'ip'. # # calls # # 'calls' represents function calls and is related to 'samples' by 'call_id' and 'return_id'. # 'calls' is only created when the 'calls' option to this script is specified. # # call_paths # # 'call_paths' represents all the call stacks. Each 'call' has an associated record in 'call_paths'. # 'calls_paths' is only created when the 'calls' option to this script is specified. # # branch_types # # 'branch_types' provides descriptions for each type of branch. # # comm_threads # # 'comm_threads' shows how 'comms' relates to 'threads'. # # comms # # 'comms' contains a record for each 'comm' - the name given to the executable that is running. # # dsos # # 'dsos' contains a record for each executable file or library. # # machines # # 'machines' can be used to distinguish virtual machines if virtualization is supported. # # selected_events # # 'selected_events' contains a record for each kind of event that has been sampled. # # symbols # # 'symbols' contains a record for each symbol. Only symbols that have samples are present. # # threads # # 'threads' contains a record for each thread. # # Views: # # Most of the tables have views for more friendly display. The views are: # # calls_view # call_paths_view # comm_threads_view # dsos_view # machines_view # samples_view # symbols_view # threads_view # # More examples of browsing the database with psql: # Note that some of the examples are not the most optimal SQL query. # Note that call information is only available if the script's 'calls' option has been used. # # Top 10 function calls (not aggregated by symbol): # # SELECT * FROM calls_view ORDER BY elapsed_time DESC LIMIT 10; # # Top 10 function calls (aggregated by symbol): # # SELECT symbol_id,(SELECT name FROM symbols WHERE id = symbol_id) AS symbol, # SUM(elapsed_time) AS tot_elapsed_time,SUM(branch_count) AS tot_branch_count # FROM calls_view GROUP BY symbol_id ORDER BY tot_elapsed_time DESC LIMIT 10; # # Note that the branch count gives a rough estimation of cpu usage, so functions # that took a long time but have a relatively low branch count must have spent time # waiting. # # Find symbols by pattern matching on part of the name (e.g. names containing 'alloc'): # # SELECT * FROM symbols_view WHERE name LIKE '%alloc%'; # # Top 10 function calls for a specific symbol (e.g. whose symbol_id is 187): # # SELECT * FROM calls_view WHERE symbol_id = 187 ORDER BY elapsed_time DESC LIMIT 10; # # Show function calls made by function in the same context (i.e. same call path) (e.g. one with call_path_id 254): # # SELECT * FROM calls_view WHERE parent_call_path_id = 254; # # Show branches made during a function call (e.g. where call_id is 29357 and return_id is 29370 and tid is 29670) # # SELECT * FROM samples_view WHERE id >= 29357 AND id <= 29370 AND tid = 29670 AND event LIKE 'branches%'; # # Show transactions: # # SELECT * FROM samples_view WHERE event = 'transactions'; # # Note transaction start has 'in_tx' true whereas, transaction end has 'in_tx' false. # Transaction aborts have branch_type_name 'transaction abort' # # Show transaction aborts: # # SELECT * FROM samples_view WHERE event = 'transactions' AND branch_type_name = 'transaction abort'; # # To print a call stack requires walking the call_paths table. For example this python script: # #!/usr/bin/python2 # # import sys # from PySide.QtSql import * # # if __name__ == '__main__': # if (len(sys.argv) < 3): # print >> sys.stderr, "Usage is: printcallstack.py <database name> <call_path_id>" # raise Exception("Too few arguments") # dbname = sys.argv[1] # call_path_id = sys.argv[2] # db = QSqlDatabase.addDatabase('QPSQL') # db.setDatabaseName(dbname) # if not db.open(): # raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text()) # query = QSqlQuery(db) # print " id ip symbol_id symbol dso_id dso_short_name" # while call_path_id != 0 and call_path_id != 1: # ret = query.exec_('SELECT * FROM call_paths_view WHERE id = ' + str(call_path_id)) # if not ret: # raise Exception("Query failed: " + query.lastError().text()) # if not query.next(): # raise Exception("Query failed") # print "{0:>6} {1:>10} {2:>9} {3:<30} {4:>6} {5:<30}".format(query.value(0), query.value(1), query.value(2), query.value(3), query.value(4), query.value(5)) # call_path_id = query.value(6) from PySide.QtSql import * # Need to access PostgreSQL C library directly to use COPY FROM STDIN from ctypes import * libpq = CDLL("libpq.so.5") PQconnectdb = libpq.PQconnectdb PQconnectdb.restype = c_void_p PQfinish = libpq.PQfinish PQstatus = libpq.PQstatus PQexec = libpq.PQexec PQexec.restype = c_void_p PQresultStatus = libpq.PQresultStatus PQputCopyData = libpq.PQputCopyData PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ] PQputCopyEnd = libpq.PQputCopyEnd PQputCopyEnd.argtypes = [ c_void_p, c_void_p ] sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') # These perf imports are not used at present #from perf_trace_context import * #from Core import * perf_db_export_mode = True perf_db_export_calls = False perf_db_export_callchains = False def usage(): print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]" print >> sys.stderr, "where: columns 'all' or 'branches'" print >> sys.stderr, " calls 'calls' => create calls and call_paths table" print >> sys.stderr, " callchains 'callchains' => create call_paths table" raise Exception("Too few arguments") if (len(sys.argv) < 2): usage() dbname = sys.argv[1] if (len(sys.argv) >= 3): columns = sys.argv[2] else: columns = "all" if columns not in ("all", "branches"): usage() branches = (columns == "branches") for i in range(3,len(sys.argv)): if (sys.argv[i] == "calls"): perf_db_export_calls = True elif (sys.argv[i] == "callchains"): perf_db_export_callchains = True else: usage() output_dir_name = os.getcwd() + "/" + dbname + "-perf-data" os.mkdir(output_dir_name) def do_query(q, s): if (q.exec_(s)): return raise Exception("Query failed: " + q.lastError().text()) print datetime.datetime.today(), "Creating database..." db = QSqlDatabase.addDatabase('QPSQL') query = QSqlQuery(db) db.setDatabaseName('postgres') db.open() try: do_query(query, 'CREATE DATABASE ' + dbname) except: os.rmdir(output_dir_name) raise query.finish() query.clear() db.close() db.setDatabaseName(dbname) db.open() query = QSqlQuery(db) do_query(query, 'SET client_min_messages TO WARNING') do_query(query, 'CREATE TABLE selected_events (' 'id bigint NOT NULL,' 'name varchar(80))') do_query(query, 'CREATE TABLE machines (' 'id bigint NOT NULL,' 'pid integer,' 'root_dir varchar(4096))') do_query(query, 'CREATE TABLE threads (' 'id bigint NOT NULL,' 'machine_id bigint,' 'process_id bigint,' 'pid integer,' 'tid integer)') do_query(query, 'CREATE TABLE comms (' 'id bigint NOT NULL,' 'comm varchar(16))') do_query(query, 'CREATE TABLE comm_threads (' 'id bigint NOT NULL,' 'comm_id bigint,' 'thread_id bigint)') do_query(query, 'CREATE TABLE dsos (' 'id bigint NOT NULL,' 'machine_id bigint,' 'short_name varchar(256),' 'long_name varchar(4096),' 'build_id varchar(64))') do_query(query, 'CREATE TABLE symbols (' 'id bigint NOT NULL,' 'dso_id bigint,' 'sym_start bigint,' 'sym_end bigint,' 'binding integer,' 'name varchar(2048))') do_query(query, 'CREATE TABLE branch_types (' 'id integer NOT NULL,' 'name varchar(80))') if branches: do_query(query, 'CREATE TABLE samples (' 'id bigint NOT NULL,' 'evsel_id bigint,' 'machine_id bigint,' 'thread_id bigint,' 'comm_id bigint,' 'dso_id bigint,' 'symbol_id bigint,' 'sym_offset bigint,' 'ip bigint,' 'time bigint,' 'cpu integer,' 'to_dso_id bigint,' 'to_symbol_id bigint,' 'to_sym_offset bigint,' 'to_ip bigint,' 'branch_type integer,' 'in_tx boolean)') else: do_query(query, 'CREATE TABLE samples (' 'id bigint NOT NULL,' 'evsel_id bigint,' 'machine_id bigint,' 'thread_id bigint,' 'comm_id bigint,' 'dso_id bigint,' 'symbol_id bigint,' 'sym_offset bigint,' 'ip bigint,' 'time bigint,' 'cpu integer,' 'to_dso_id bigint,' 'to_symbol_id bigint,' 'to_sym_offset bigint,' 'to_ip bigint,' 'period bigint,' 'weight bigint,' 'transaction bigint,' 'data_src bigint,' 'branch_type integer,' 'in_tx boolean,' 'call_path_id bigint)') if perf_db_export_calls or perf_db_export_callchains: do_query(query, 'CREATE TABLE call_paths (' 'id bigint NOT NULL,' 'parent_id bigint,' 'symbol_id bigint,' 'ip bigint)') if perf_db_export_calls: do_query(query, 'CREATE TABLE calls (' 'id bigint NOT NULL,' 'thread_id bigint,' 'comm_id bigint,' 'call_path_id bigint,' 'call_time bigint,' 'return_time bigint,' 'branch_count bigint,' 'call_id bigint,' 'return_id bigint,' 'parent_call_path_id bigint,' 'flags integer)') do_query(query, 'CREATE VIEW machines_view AS ' 'SELECT ' 'id,' 'pid,' 'root_dir,' 'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest' ' FROM machines') do_query(query, 'CREATE VIEW dsos_view AS ' 'SELECT ' 'id,' 'machine_id,' '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,' 'short_name,' 'long_name,' 'build_id' ' FROM dsos') do_query(query, 'CREATE VIEW symbols_view AS ' 'SELECT ' 'id,' 'name,' '(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,' 'dso_id,' 'sym_start,' 'sym_end,' 'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding' ' FROM symbols') do_query(query, 'CREATE VIEW threads_view AS ' 'SELECT ' 'id,' 'machine_id,' '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,' 'process_id,' 'pid,' 'tid' ' FROM threads') do_query(query, 'CREATE VIEW comm_threads_view AS ' 'SELECT ' 'comm_id,' '(SELECT comm FROM comms WHERE id = comm_id) AS command,' 'thread_id,' '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' '(SELECT tid FROM threads WHERE id = thread_id) AS tid' ' FROM comm_threads') if perf_db_export_calls or perf_db_export_callchains: do_query(query, 'CREATE VIEW call_paths_view AS ' 'SELECT ' 'c.id,' 'to_hex(c.ip) AS ip,' 'c.symbol_id,' '(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,' '(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,' '(SELECT dso FROM symbols_view WHERE id = c.symbol_id) AS dso_short_name,' 'c.parent_id,' 'to_hex(p.ip) AS parent_ip,' 'p.symbol_id AS parent_symbol_id,' '(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,' '(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,' '(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name' ' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id') if perf_db_export_calls: do_query(query, 'CREATE VIEW calls_view AS ' 'SELECT ' 'calls.id,' 'thread_id,' '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' '(SELECT tid FROM threads WHERE id = thread_id) AS tid,' '(SELECT comm FROM comms WHERE id = comm_id) AS command,' 'call_path_id,' 'to_hex(ip) AS ip,' 'symbol_id,' '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,' 'call_time,' 'return_time,' 'return_time - call_time AS elapsed_time,' 'branch_count,' 'call_id,' 'return_id,' 'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,' 'parent_call_path_id' ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id') do_query(query, 'CREATE VIEW samples_view AS ' 'SELECT ' 'id,' 'time,' 'cpu,' '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' '(SELECT tid FROM threads WHERE id = thread_id) AS tid,' '(SELECT comm FROM comms WHERE id = comm_id) AS command,' '(SELECT name FROM selected_events WHERE id = evsel_id) AS event,' 'to_hex(ip) AS ip_hex,' '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,' 'sym_offset,' '(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,' 'to_hex(to_ip) AS to_ip_hex,' '(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,' 'to_sym_offset,' '(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,' '(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,' 'in_tx' ' FROM samples') file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0) file_trailer = "\377\377" def open_output_file(file_name): path_name = output_dir_name + "/" + file_name file = open(path_name, "w+") file.write(file_header) return file def close_output_file(file): file.write(file_trailer) file.close() def copy_output_file_direct(file, table_name): close_output_file(file) sql = "COPY " + table_name + " FROM '" + file.name + "' (FORMAT 'binary')" do_query(query, sql) # Use COPY FROM STDIN because security may prevent postgres from accessing the files directly def copy_output_file(file, table_name): conn = PQconnectdb("dbname = " + dbname) if (PQstatus(conn)): raise Exception("COPY FROM STDIN PQconnectdb failed") file.write(file_trailer) file.seek(0) sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')" res = PQexec(conn, sql) if (PQresultStatus(res) != 4): raise Exception("COPY FROM STDIN PQexec failed") data = file.read(65536) while (len(data)): ret = PQputCopyData(conn, data, len(data)) if (ret != 1): raise Exception("COPY FROM STDIN PQputCopyData failed, error " + str(ret)) data = file.read(65536) ret = PQputCopyEnd(conn, None) if (ret != 1): raise Exception("COPY FROM STDIN PQputCopyEnd failed, error " + str(ret)) PQfinish(conn) def remove_output_file(file): name = file.name file.close() os.unlink(name) evsel_file = open_output_file("evsel_table.bin") machine_file = open_output_file("machine_table.bin") thread_file = open_output_file("thread_table.bin") comm_file = open_output_file("comm_table.bin") comm_thread_file = open_output_file("comm_thread_table.bin") dso_file = open_output_file("dso_table.bin") symbol_file = open_output_file("symbol_table.bin") branch_type_file = open_output_file("branch_type_table.bin") sample_file = open_output_file("sample_table.bin") if perf_db_export_calls or perf_db_export_callchains: call_path_file = open_output_file("call_path_table.bin") if perf_db_export_calls: call_file = open_output_file("call_table.bin") def trace_begin(): print datetime.datetime.today(), "Writing to intermediate files..." # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs evsel_table(0, "unknown") machine_table(0, 0, "unknown") thread_table(0, 0, 0, -1, -1) comm_table(0, "unknown") dso_table(0, 0, "unknown", "unknown", "") symbol_table(0, 0, 0, 0, 0, "unknown") sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) if perf_db_export_calls or perf_db_export_callchains: call_path_table(0, 0, 0, 0) unhandled_count = 0 def trace_end(): print datetime.datetime.today(), "Copying to database..." copy_output_file(evsel_file, "selected_events") copy_output_file(machine_file, "machines") copy_output_file(thread_file, "threads") copy_output_file(comm_file, "comms") copy_output_file(comm_thread_file, "comm_threads") copy_output_file(dso_file, "dsos") copy_output_file(symbol_file, "symbols") copy_output_file(branch_type_file, "branch_types") copy_output_file(sample_file, "samples") if perf_db_export_calls or perf_db_export_callchains: copy_output_file(call_path_file, "call_paths") if perf_db_export_calls: copy_output_file(call_file, "calls") print datetime.datetime.today(), "Removing intermediate files..." remove_output_file(evsel_file) remove_output_file(machine_file) remove_output_file(thread_file) remove_output_file(comm_file) remove_output_file(comm_thread_file) remove_output_file(dso_file) remove_output_file(symbol_file) remove_output_file(branch_type_file) remove_output_file(sample_file) if perf_db_export_calls or perf_db_export_callchains: remove_output_file(call_path_file) if perf_db_export_calls: remove_output_file(call_file) os.rmdir(output_dir_name) print datetime.datetime.today(), "Adding primary keys" do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE comms ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE comm_threads ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE dsos ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)') if perf_db_export_calls or perf_db_export_callchains: do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)') if perf_db_export_calls: do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)') print datetime.datetime.today(), "Adding foreign keys" do_query(query, 'ALTER TABLE threads ' 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),' 'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)') do_query(query, 'ALTER TABLE comm_threads ' 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),' 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)') do_query(query, 'ALTER TABLE dsos ' 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id)') do_query(query, 'ALTER TABLE symbols ' 'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id)') do_query(query, 'ALTER TABLE samples ' 'ADD CONSTRAINT evselfk FOREIGN KEY (evsel_id) REFERENCES selected_events (id),' 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),' 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),' 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),' 'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id),' 'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),' 'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),' 'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)') if perf_db_export_calls or perf_db_export_callchains: do_query(query, 'ALTER TABLE call_paths ' 'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),' 'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)') if perf_db_export_calls: do_query(query, 'ALTER TABLE calls ' 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),' 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),' 'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),' 'ADD CONSTRAINT callfk FOREIGN KEY (call_id) REFERENCES samples (id),' 'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),' 'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)') do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') if (unhandled_count): print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events" print datetime.datetime.today(), "Done" def trace_unhandled(event_name, context, event_fields_dict): global unhandled_count unhandled_count += 1 def sched__sched_switch(*x): pass def evsel_table(evsel_id, evsel_name, *x): n = len(evsel_name) fmt = "!hiqi" + str(n) + "s" value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name) evsel_file.write(value) def machine_table(machine_id, pid, root_dir, *x): n = len(root_dir) fmt = "!hiqiii" + str(n) + "s" value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir) machine_file.write(value) def thread_table(thread_id, machine_id, process_id, pid, tid, *x): value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid) thread_file.write(value) def comm_table(comm_id, comm_str, *x): n = len(comm_str) fmt = "!hiqi" + str(n) + "s" value = struct.pack(fmt, 2, 8, comm_id, n, comm_str) comm_file.write(value) def comm_thread_table(comm_thread_id, comm_id, thread_id, *x): fmt = "!hiqiqiq" value = struct.pack(fmt, 3, 8, comm_thread_id, 8, comm_id, 8, thread_id) comm_thread_file.write(value) def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x): n1 = len(short_name) n2 = len(long_name) n3 = len(build_id) fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s" value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id) dso_file.write(value) def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x): n = len(symbol_name) fmt = "!hiqiqiqiqiii" + str(n) + "s" value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name) symbol_file.write(value) def branch_type_table(branch_type, name, *x): n = len(name) fmt = "!hiii" + str(n) + "s" value = struct.pack(fmt, 2, 4, branch_type, n, name) branch_type_file.write(value) def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, *x): if branches: value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiq", 18, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id) else: value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiq", 22, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id) sample_file.write(value) def call_path_table(cp_id, parent_id, symbol_id, ip, *x): fmt = "!hiqiqiqiq" value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip) call_path_file.write(value) def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x): fmt = "!hiqiqiqiqiqiqiqiqiqiqii" value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags) call_file.write(value)
gpl-2.0
1,330,984,015,306,651,000
35.323204
360
0.674462
false
provaleks/o8
addons/website_certification/controllers/main.py
373
2149
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.addons.web import http from openerp.addons.web.http import request class WebsiteCertifiedPartners(http.Controller): @http.route(['/certifications', '/certifications/<model("certification.type"):cert_type>'], type='http', auth='public', website=True) def certified_partners(self, cert_type=None, **post): cr, uid, context = request.cr, request.uid, request.context certification_obj = request.registry['certification.certification'] cert_type_obj = request.registry['certification.type'] domain = [] if cert_type: domain.append(('type_id', '=', cert_type.id)) certifications_ids = certification_obj.search(cr, uid, domain, context=context) certifications = certification_obj.browse(cr, uid, certifications_ids, context=context) types = cert_type_obj.browse(cr, uid, cert_type_obj.search(cr, uid, [], context=context), context=context) data = { 'certifications': certifications, 'types': types } return request.website.render("website_certification.certified_partners", data)
agpl-3.0
-1,575,957,588,092,809,500
42.857143
114
0.629595
false
apagac/cfme_tests
cfme/tests/cloud_infra_common/test_retirement_manual.py
2
3580
# -*- coding: utf-8 -*- # pylint: skip-file """Manual tests""" import pytest from cfme import test_requirements pytestmark = [ pytest.mark.ignore_stream('upstream'), pytest.mark.manual, test_requirements.retirement ] @pytest.mark.tier(2) def test_retire_infra_vms_folder(): """ test the retire funtion of vm on infra providers, at least two vm, retire now button vms page Polarion: assignee: tpapaioa casecomponent: Provisioning caseimportance: medium initialEstimate: 1/2h """ pass def test_retirement_date_uses_correct_time_zone(): """ Bug 1565128 - Wrong timezone when selecting retirement time Bugzilla: 1565128 After saving VM retirement date/time (using both "Specific Date and Time" and "Time Delay from Now" options), the displayed Retirement Date has the correct date and time-zone appropriate time. Polarion: assignee: tpapaioa casecomponent: Infra caseimportance: medium initialEstimate: 1/15h startsin: 5.9 title: Retirement date uses correct time zone """ pass @pytest.mark.tier(2) def test_retire_cloud_vms_date_folder(): """ test the retire funtion of vm on cloud providers, at leat two vm, set retirement date button from vms page(without notification) Polarion: assignee: tpapaioa casecomponent: Provisioning caseimportance: medium initialEstimate: 1/2h """ pass @pytest.mark.tier(2) def test_retire_infra_vms_notification_folder(): """ test the retire funtion of vm on infra providers, select at least two vms and press retirement date button from vms main page and specify retirement warning period (1week, 2weeks, 1 months). Polarion: assignee: tpapaioa casecomponent: Provisioning caseimportance: medium initialEstimate: 1/2h """ pass @pytest.mark.tier(2) def test_retire_infra_vms_date_folder(): """ test the retire funtion of vm on infra providers, at least two vm, set retirement date button from vms page(without notification) Polarion: assignee: tpapaioa casecomponent: Provisioning caseimportance: medium initialEstimate: 1/2h """ pass def test_vms_retirement_state_field_is_capitalized_correctly(): """ Bug 1518926 - Inconsistent capitalization for Retirement State field Bugzilla: 1518926 When a VM is retiring or retired, the VM should show a "Retirement State" field, not "Retirement state". Polarion: assignee: tpapaioa casecomponent: WebUI caseimportance: medium initialEstimate: 1/15h title: VM's Retirement State field is capitalized correctly """ pass @pytest.mark.tier(2) def test_retire_cloud_vms_folder(): """ test the retire funtion of vm on cloud providers, at leat two vm, retire now button vms page Polarion: assignee: tpapaioa casecomponent: Provisioning caseimportance: medium initialEstimate: 1/2h """ pass @pytest.mark.tier(2) def test_retire_cloud_vms_notification_folder(): """ test the retire funtion of vm on cloud providers, one vm, set retirement date button from vm summary page with notification for two vms for one of the period (1week, 2weeks, 1 months) Polarion: assignee: tpapaioa casecomponent: Provisioning caseimportance: medium initialEstimate: 1/2h """ pass
gpl-2.0
-8,732,525,682,461,570,000
23.520548
74
0.663687
false
svirusxxx/cjdns
node_build/dependencies/libuv/build/gyp/test/gyp-defines/gyptest-multiple-values.py
47
1314
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that when multiple values are supplied for a gyp define, the last one is used. """ import os import TestGyp test = TestGyp.TestGyp() os.environ['GYP_DEFINES'] = 'key=value1 key=value2 key=value3' test.run_gyp('defines.gyp') test.build('defines.gyp') test.must_contain('action.txt', 'value3') # The last occurrence of a repeated set should take precedence over other # values. os.environ['GYP_DEFINES'] = 'key=repeated_value key=value1 key=repeated_value' test.run_gyp('defines.gyp') if test.format == 'msvs' and not test.uses_msbuild: # msvs versions before 2010 don't detect build rule changes not reflected # in file system timestamps. Rebuild to see differences. test.build('defines.gyp', rebuild=True) elif test.format == 'android': # The Android build system doesn't currently have a way to get files whose # build rules have changed (but whose timestamps haven't) to be rebuilt. # See bug http://code.google.com/p/gyp/issues/detail?id=308 test.unlink('action.txt') test.build('defines.gyp') else: test.build('defines.gyp') test.must_contain('action.txt', 'repeated_value') test.pass_test()
gpl-3.0
464,753,058,703,018,200
30.285714
78
0.733638
false
yf2009017/huhamhire-hosts
_build.py
24
11102
#!/usr/bin/env python # -*- coding: utf-8 -*- # # _build.py : Tools to make packages for different platforms # # Copyleft (C) 2014 - huhamhire hosts team <[email protected]> # ===================================================================== # Licensed under the GNU General Public License, version 3. You should # have received a copy of the GNU General Public License along with # this program. If not, see <http://www.gnu.org/licenses/>. # # This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING # THE WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE. # ===================================================================== __author__ = "huhamhire <[email protected]>" import os import sys import shutil from __version__ import __version__ SCRIPT = "hoststool.py" SCRIPT_DIR = os.getcwd() + '/' RELEASE_DIR = "../release/" # Shared package settings and metadata NAME = "HostsUtl" VERSION = __version__ DESCRIPTION = "HostsUtl - Hosts Setup Utility" AUTHOR = "Hamhire Hu" AUTHOR_EMAIL = "[email protected]", LICENSE = "Public Domain, Python, BSD, GPLv3 (see LICENSE)", URL = "https://hosts.huhamhire.com", CLASSIFIERS = [ "Development Status :: 4 - Beta", "Environment :: MacOS X", "Environment :: Win32 (MS Windows)", "Environment :: X11 Applications :: Qt", "Intended Audience :: Developers", "Intended Audience :: End Users/Desktop", "Intended Audience :: Other Audience", "Intended Audience :: System Administrators", "License :: OSI Approved :: Python Software Foundation License", "License :: OSI Approved :: BSD License", "License :: OSI Approved :: GNU General Public License v3", "License :: Public Domain", "Natural Language :: English", "Natural Language :: Chinese (Simplified)", "Operating System :: OS Independent", "Programming Language :: Python :: 2.7", "Topic :: Communications", "Topic :: Database", "Topic :: Desktop Environment", "Topic :: Documentation", "Topic :: Internet :: Name Service (DNS)", "Topic :: System :: Networking", "Topic :: Software Development :: Documentation", "Topic :: Text Processing", "Topic :: CommonUtil", ] DATA_FILES = [ ("gui/lang", [ "gui/lang/en_US.qm", "gui/lang/zh_CN.qm", "gui/lang/zh_TW.qm", ]), ("gui/theme", [ "gui/theme/default.qss", ]), (".", [ "LICENSE", "README.rst", "network.conf", ]), ] if sys.argv > 1: tar_flag = 0 includes = [] excludes = [] file_path = lambda rel_path: SCRIPT_DIR + rel_path if sys.argv[1] == "py2tar": # Pack up script package for Linux users includes = [ "*.py", "gui/lang/*.qm", "gui/theme/*.qss", "*/*.py", "LICENSE", "README.rst", "network.conf", ] excludes = [ "_build.py", "_pylupdate4.py", "_pyuic4.py", ".gitattributes", ".gitignore", ] ex_files = [] prefix = "HostsTool-x11-gpl-" tar_flag = 1 elif sys.argv[1] == "py2source": # Pack up source package for Linux users includes = ["*"] excludes = [ ".gitattributes", ".gitignore", "hostslist.data", ] ex_files = [] prefix = "HostsTool-source-gpl-" tar_flag = 1 else: prefix = "Error" ex_files = [] if tar_flag: import glob import tarfile TAR_NAME = prefix + VERSION + ".tar.gz" RELEASE_PATH = RELEASE_DIR + TAR_NAME if not os.path.exists(RELEASE_DIR): os.mkdir(RELEASE_DIR) if os.path.isfile(RELEASE_PATH): os.remove(RELEASE_PATH) rel_len = len(SCRIPT_DIR) tar = tarfile.open(RELEASE_PATH, "w|gz") for name_format in excludes: ex_files.extend(glob.glob(file_path(name_format))) for name_format in includes: files = glob.glob(file_path(name_format)) for src_file in files: if src_file not in ex_files: tar_path = os.path.join(prefix + VERSION, src_file[rel_len:]) tar.add(src_file, tar_path) print "compressing: %s" % src_file tar.close() exit(1) from util import CommonUtil system = CommonUtil.check_platform()[0] if system == "Windows": # Build binary executables for Windows import struct import zipfile from distutils.core import setup import py2exe # Set working directories WORK_DIR = SCRIPT_DIR + "work/" DIR_NAME = "HostsTool" DIST_DIR = WORK_DIR + DIR_NAME + '/' WIN_OPTIONS = { "includes": ["sip"], "excludes": ["_scproxy", "_sysconfigdata"], "dll_excludes": ["MSVCP90.dll"], "dist_dir": DIST_DIR, "compressed": 1, "optimize": 2, } # Clean work space before build if os.path.exists(DIST_DIR): shutil.rmtree(DIST_DIR) # Build Executable print " Building Executable ".center(78, '=') EXE_NAME = SCRIPT.split(".")[0] setup( name=NAME, version=VERSION, options={"py2exe": WIN_OPTIONS}, console=[ {"script": SCRIPT, "dest_base": "hoststool_tui", "uac_info": "highestAvailable", }, ], windows=[ {"script": SCRIPT, "icon_resources": [(1, "res/img/icons/hosts_utl.ico")], "dest_base": EXE_NAME, "uac_info": "highestAvailable", }, ], description=DESCRIPTION, author=AUTHOR, author_email=AUTHOR_EMAIL, license=LICENSE, url=URL, zipfile="lib/shared.lib", data_files=DATA_FILES, classifiers=CLASSIFIERS, ) # Clean work directory after build shutil.rmtree(SCRIPT_DIR + "build/") # Pack up executable to ZIP file print " Compressing to ZIP ".center(78, '=') if struct.calcsize("P") * 8 == 64: PLAT = "x64" elif struct.calcsize("P") * 8 == 32: PLAT = "x86" else: PLAT = "unknown" DIR_NAME = DIR_NAME + '-win-gpl-' + VERSION + '-' + PLAT ZIP_NAME = DIR_NAME + ".zip" ZIP_FILE = WORK_DIR + ZIP_NAME compressed = zipfile.ZipFile(ZIP_FILE, 'w', zipfile.ZIP_DEFLATED) for root, dirs, files in os.walk(DIST_DIR): rel_path = os.path.relpath(root, os.path.dirname(DIST_DIR)) for name in files: print "compressing: %s" % os.path.join(root, name) compressed.write( os.path.join(root, name), os.path.join(DIR_NAME, rel_path, name)) compressed.close() # Move ZIP file to release directory RELEASE_PATH = RELEASE_DIR + ZIP_NAME if not os.path.exists(RELEASE_DIR): os.mkdir(RELEASE_DIR) if os.path.isfile(RELEASE_PATH): os.remove(RELEASE_PATH) shutil.move(ZIP_FILE, RELEASE_PATH) shutil.rmtree(WORK_DIR) print "Done!" elif system == "OS X": # Build binary executables for Mac OS X from setuptools import setup # Set working directories WORK_DIR = SCRIPT_DIR + "work/" RES_DIR = SCRIPT_DIR + "res/mac/" APP_NAME = "HostsTool.app" APP_PATH = WORK_DIR + APP_NAME DIST_DIR = APP_PATH + "/Contents/" # Set build configuration MAC_OPTIONS = { "iconfile": "res/img/icons/hosts_utl.icns", "includes": ["sip", "PyQt4.QtCore", "PyQt4.QtGui"], "excludes": [ "PyQt4.QtDBus", "PyQt4.QtDeclarative", "PyQt4.QtDesigner", "PyQt4.QtHelp", "PyQt4.QtMultimedia", "PyQt4.QtNetwork", "PyQt4.QtOpenGL", "PyQt4.QtScript", "PyQt4.QtScriptTools", "PyQt4.QtSql", "PyQt4.QtSvg", "PyQt4.QtTest", "PyQt4.QtWebKit", "PyQt4.QtXml", "PyQt4.QtXmlPatterns", "PyQt4.phonon"], "compressed": 1, "dist_dir": DIST_DIR, "optimize": 2, "plist": { "CFBundleAllowMixedLocalizations": True, "CFBundleSignature": "hamh", "CFBundleIdentifier": "org.pythonmac.huhamhire.HostsTool", "NSHumanReadableCopyright": "(C) 2014, huhamhire hosts Team"} } # Clean work space before build if os.path.exists(APP_PATH): shutil.rmtree(APP_PATH) if not os.path.exists(WORK_DIR): os.mkdir(WORK_DIR) # Make daemon APP OSAC_CMD = "osacompile -o %s %sHostsUtl.scpt" % (APP_PATH, RES_DIR) os.system(OSAC_CMD) # Build APP print " Building Application ".center(78, '=') setup( app=[SCRIPT], name=NAME, version=VERSION, options={"py2app": MAC_OPTIONS}, setup_requires=["py2app"], description=DESCRIPTION, author=AUTHOR, author_email=AUTHOR_EMAIL, license=LICENSE, url=URL, data_files=DATA_FILES, classifiers=CLASSIFIERS, ) # Clean work directory after build os.remove(DIST_DIR + "Resources/applet.icns") shutil.copy2( SCRIPT_DIR + "res/img/icons/hosts_utl.icns", DIST_DIR + "Resources/applet.icns") shutil.copy2(RES_DIR + "Info.plist", DIST_DIR + "Info.plist") shutil.rmtree(SCRIPT_DIR + "build/") # Pack APP to DMG file VDMG_DIR = WORK_DIR + "package_vdmg/" DMG_TMP = WORK_DIR + "pack_tmp.dmg" DMG_RES_DIR = RES_DIR + "dmg/" VOL_NAME = "HostsTool" DMG_NAME = VOL_NAME + "-mac-gpl-" + VERSION + ".dmg" DMG_PATH = WORK_DIR + DMG_NAME # Clean work space before pack up if os.path.exists(VDMG_DIR): shutil.rmtree(VDMG_DIR) if os.path.isfile(DMG_TMP): os.remove(DMG_TMP) if os.path.isfile(DMG_PATH): os.remove(DMG_PATH) # Prepare files in DMG package os.mkdir(VDMG_DIR) shutil.move(APP_PATH, VDMG_DIR) os.symlink("/Applications", VDMG_DIR + " ") shutil.copy2(DMG_RES_DIR + "background.png", VDMG_DIR + ".background.png") shutil.copy2(DMG_RES_DIR + "DS_Store_dmg", VDMG_DIR + ".DS_Store") # Make DMG file print " Making DMG Package ".center(78, '=') MK_CMD = ( "hdiutil makehybrid -hfs -hfs-volume-name %s " "-hfs-openfolder %s %s -o %s" % ( VOL_NAME, VDMG_DIR, VDMG_DIR, DMG_TMP)) PACK_CMD = "hdiutil convert -format UDZO %s -o %s" % (DMG_TMP, DMG_PATH) os.system(MK_CMD) os.system(PACK_CMD) # Clean work directory after make DMG package shutil.rmtree(VDMG_DIR) os.remove(DMG_TMP) # Move DMG file to release directory RELEASE_PATH = RELEASE_DIR + DMG_NAME if not os.path.exists(RELEASE_DIR): os.mkdir(RELEASE_DIR) if os.path.isfile(RELEASE_PATH): os.remove(RELEASE_PATH) print "moving DMG file to: %s" % RELEASE_PATH shutil.move(DMG_PATH, RELEASE_PATH) shutil.rmtree(WORK_DIR) print "Done!"
gpl-3.0
-4,614,772,202,932,472,000
31.273256
78
0.558998
false
KellyChan/Python
javascript/backbone/backbone-templates/backbone-fileupload/venvs/lib/python2.7/site-packages/django/contrib/admin/templatetags/admin_modify.py
98
2293
from django import template register = template.Library() @register.inclusion_tag('admin/prepopulated_fields_js.html', takes_context=True) def prepopulated_fields_js(context): """ Creates a list of prepopulated_fields that should render Javascript for the prepopulated fields for both the admin form and inlines. """ prepopulated_fields = [] if context['add'] and 'adminform' in context: prepopulated_fields.extend(context['adminform'].prepopulated_fields) if 'inline_admin_formsets' in context: for inline_admin_formset in context['inline_admin_formsets']: for inline_admin_form in inline_admin_formset: if inline_admin_form.original is None: prepopulated_fields.extend(inline_admin_form.prepopulated_fields) context.update({'prepopulated_fields': prepopulated_fields}) return context @register.inclusion_tag('admin/submit_line.html', takes_context=True) def submit_row(context): """ Displays the row of buttons for delete and save. """ opts = context['opts'] change = context['change'] is_popup = context['is_popup'] save_as = context['save_as'] return { 'onclick_attrib': (opts.get_ordered_objects() and change and 'onclick="submitOrderForm();"' or ''), 'show_delete_link': (not is_popup and context['has_delete_permission'] and (change or context['show_delete'])), 'show_save_as_new': not is_popup and change and save_as, 'show_save_and_add_another': context['has_add_permission'] and not is_popup and (not save_as or context['add']), 'show_save_and_continue': not is_popup and context['has_change_permission'], 'is_popup': is_popup, 'show_save': True } @register.filter def cell_count(inline_admin_form): """Returns the number of cells used in a tabular inline""" count = 1 # Hidden cell with hidden 'id' field for fieldset in inline_admin_form: # Loop through all the fields (one per cell) for line in fieldset: for field in line: count += 1 if inline_admin_form.formset.can_delete: # Delete checkbox count += 1 return count
mit
1,659,611,317,826,097,400
39.946429
85
0.635848
false
ndp-systemes/odoo-addons
stock_account_improved/stock_account_improved.py
1
1355
# -*- coding: utf8 -*- # # Copyright (C) 2017 NDP Systèmes (<http://www.ndp-systemes.fr>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from openerp import models, api class StockAccountImprovedStockMove(models.Model): _inherit = 'stock.move' @api.model def default_get(self, fields_list): result = super(StockAccountImprovedStockMove, self).default_get(fields_list) picking_id = result.get('default_picking_id') or self.env.context.get('default_picking_id') if picking_id: picking = self.env['stock.picking'].browse(picking_id) if picking.invoice_state: result['invoice_state'] = picking.invoice_state return result
agpl-3.0
2,104,406,411,903,891,000
38.823529
99
0.691285
false
rghe/ansible
lib/ansible/modules/cloud/amazon/aws_sgw_facts.py
20
11531
#!/usr/bin/python # Copyright (c) 2018 Loic BLOT <[email protected]> # This module is sponsored by E.T.A.I. (www.etai.fr) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: aws_sgw_facts short_description: Fetch AWS Storage Gateway facts description: - Fetch AWS Storage Gateway facts version_added: "2.6" requirements: [ boto3 ] author: "Loic Blot <[email protected]>" options: gather_local_disks: description: - Gather local disks attached to the storage gateway. type: bool required: false default: true gather_tapes: description: - Gather tape information for storage gateways in tape mode. type: bool required: false default: true gather_file_shares: description: - Gather file share information for storage gateways in s3 mode. type: bool required: false default: true gather_volumes: description: - Gather volume information for storage gateways in iSCSI (cached & stored) modes. type: bool required: false default: true extends_documentation_fragment: - aws - ec2 ''' RETURN = ''' gateways: description: list of gateway objects returned: always type: complex contains: gateway_arn: description: "Storage Gateway ARN" returned: always type: string sample: "arn:aws:storagegateway:eu-west-1:367709993819:gateway/sgw-9999F888" gateway_id: description: "Storage Gateway ID" returned: always type: string sample: "sgw-9999F888" gateway_name: description: "Storage Gateway friendly name" returned: always type: string sample: "my-sgw-01" gateway_operational_state: description: "Storage Gateway operational state" returned: always type: string sample: "ACTIVE" gateway_type: description: "Storage Gateway type" returned: always type: string sample: "FILE_S3" file_shares: description: "Storage gateway file shares" returned: when gateway_type == "FILE_S3" type: complex contains: file_share_arn: description: "File share ARN" returned: always type: string sample: "arn:aws:storagegateway:eu-west-1:399805793479:share/share-AF999C88" file_share_id: description: "File share ID" returned: always type: string sample: "share-AF999C88" file_share_status: description: "File share status" returned: always type: string sample: "AVAILABLE" tapes: description: "Storage Gateway tapes" returned: when gateway_type == "VTL" type: complex contains: tape_arn: description: "Tape ARN" returned: always type: string sample: "arn:aws:storagegateway:eu-west-1:399805793479:tape/tape-AF999C88" tape_barcode: description: "Tape ARN" returned: always type: string sample: "tape-AF999C88" tape_size_in_bytes: description: "Tape ARN" returned: always type: integer sample: 555887569 tape_status: description: "Tape ARN" returned: always type: string sample: "AVAILABLE" local_disks: description: "Storage gateway local disks" returned: always type: complex contains: disk_allocation_type: description: "Disk allocation type" returned: always type: string sample: "CACHE STORAGE" disk_id: description: "Disk ID on the system" returned: always type: string sample: "pci-0000:00:1f.0" disk_node: description: "Disk parent block device" returned: always type: string sample: "/dev/sdb" disk_path: description: "Disk path used for the cache" returned: always type: string sample: "/dev/nvme1n1" disk_size_in_bytes: description: "Disk size in bytes" returned: always type: integer sample: 107374182400 disk_status: description: "Disk status" returned: always type: string sample: "present" ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: "Get AWS storage gateway facts" aws_sgw_facts: - name: "Get AWS storage gateway facts for region eu-west-3" aws_sgw_facts: region: eu-west-3 ''' from ansible.module_utils.aws.core import AnsibleAWSModule from ansible.module_utils.ec2 import camel_dict_to_snake_dict try: from botocore.exceptions import BotoCoreError, ClientError except ImportError: pass # caught by imported HAS_BOTO3 class SGWFactsManager(object): def __init__(self, client, module): self.client = client self.module = module self.name = self.module.params.get('name') def fetch(self): gateways = self.list_gateways() for gateway in gateways: if self.module.params.get('gather_local_disks'): self.list_local_disks(gateway) # File share gateway if gateway["gateway_type"] == "FILE_S3" and self.module.params.get('gather_file_shares'): self.list_gateway_file_shares(gateway) # Volume tape gateway elif gateway["gateway_type"] == "VTL" and self.module.params.get('gather_tapes'): self.list_gateway_vtl(gateway) # iSCSI gateway elif gateway["gateway_type"] in ["CACHED", "STORED"] and self.module.params.get('gather_volumes'): self.list_gateway_volumes(gateway) self.module.exit_json(gateways=gateways) """ List all storage gateways for the AWS endpoint. """ def list_gateways(self): try: paginator = self.client.get_paginator('list_gateways') response = paginator.paginate( PaginationConfig={ 'PageSize': 100, } ).build_full_result() gateways = [] for gw in response["Gateways"]: gateways.append(camel_dict_to_snake_dict(gw)) return gateways except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Couldn't list storage gateways") """ Read file share objects from AWS API response. Drop the gateway_arn attribute from response, as it will be duplicate with parent object. """ @staticmethod def _read_gateway_fileshare_response(fileshares, aws_reponse): for share in aws_reponse["FileShareInfoList"]: share_obj = camel_dict_to_snake_dict(share) if "gateway_arn" in share_obj: del share_obj["gateway_arn"] fileshares.append(share_obj) return aws_reponse["NextMarker"] if "NextMarker" in aws_reponse else None """ List file shares attached to AWS storage gateway when in S3 mode. """ def list_gateway_file_shares(self, gateway): try: response = self.client.list_file_shares( GatewayARN=gateway["gateway_arn"], Limit=100 ) gateway["file_shares"] = [] marker = self._read_gateway_fileshare_response(gateway["file_shares"], response) while marker is not None: response = self.client.list_file_shares( GatewayARN=gateway["gateway_arn"], Marker=marker, Limit=100 ) marker = self._read_gateway_fileshare_response(gateway["file_shares"], response) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Couldn't list gateway file shares") """ List storage gateway local disks """ def list_local_disks(self, gateway): try: gateway['local_disks'] = [camel_dict_to_snake_dict(disk) for disk in self.client.list_local_disks(GatewayARN=gateway["gateway_arn"])['Disks']] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Couldn't list storage gateway local disks") """ Read tape objects from AWS API response. Drop the gateway_arn attribute from response, as it will be duplicate with parent object. """ @staticmethod def _read_gateway_tape_response(tapes, aws_response): for tape in aws_response["TapeInfos"]: tape_obj = camel_dict_to_snake_dict(tape) if "gateway_arn" in tape_obj: del tape_obj["gateway_arn"] tapes.append(tape_obj) return aws_response["Marker"] if "Marker" in aws_response else None """ List VTL & VTS attached to AWS storage gateway in VTL mode """ def list_gateway_vtl(self, gateway): try: response = self.client.list_tapes( Limit=100 ) gateway["tapes"] = [] marker = self._read_gateway_tape_response(gateway["tapes"], response) while marker is not None: response = self.client.list_tapes( Marker=marker, Limit=100 ) marker = self._read_gateway_tape_response(gateway["tapes"], response) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Couldn't list storage gateway tapes") """ List volumes attached to AWS storage gateway in CACHED or STORAGE mode """ def list_gateway_volumes(self, gateway): try: paginator = self.client.get_paginator('list_volumes') response = paginator.paginate( GatewayARN=gateway["gateway_arn"], PaginationConfig={ 'PageSize': 100, } ).build_full_result() gateway["volumes"] = [] for volume in response["VolumeInfos"]: volume_obj = camel_dict_to_snake_dict(volume) if "gateway_arn" in volume_obj: del volume_obj["gateway_arn"] if "gateway_id" in volume_obj: del volume_obj["gateway_id"] gateway["volumes"].append(volume_obj) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Couldn't list storage gateway volumes") def main(): argument_spec = dict( gather_local_disks=dict(type='bool', default=True), gather_tapes=dict(type='bool', default=True), gather_file_shares=dict(type='bool', default=True), gather_volumes=dict(type='bool', default=True) ) module = AnsibleAWSModule(argument_spec=argument_spec) client = module.client('storagegateway') if client is None: # this should never happen module.fail_json(msg='Unknown error, failed to create storagegateway client, no information from boto.') SGWFactsManager(client, module).fetch() if __name__ == '__main__': main()
gpl-3.0
3,598,023,190,552,389,600
31.573446
112
0.591709
false
yakky/django
django/contrib/gis/gdal/raster/source.py
17
13572
import json import os from ctypes import addressof, byref, c_double, c_void_p from django.contrib.gis.gdal.base import GDALBase from django.contrib.gis.gdal.driver import Driver from django.contrib.gis.gdal.error import GDALException from django.contrib.gis.gdal.prototypes import raster as capi from django.contrib.gis.gdal.raster.band import BandList from django.contrib.gis.gdal.raster.const import GDAL_RESAMPLE_ALGORITHMS from django.contrib.gis.gdal.srs import SpatialReference, SRSException from django.contrib.gis.geometry.regex import json_regex from django.utils import six from django.utils.encoding import ( force_bytes, force_text, python_2_unicode_compatible, ) from django.utils.functional import cached_property class TransformPoint(list): indices = { 'origin': (0, 3), 'scale': (1, 5), 'skew': (2, 4), } def __init__(self, raster, prop): x = raster.geotransform[self.indices[prop][0]] y = raster.geotransform[self.indices[prop][1]] list.__init__(self, [x, y]) self._raster = raster self._prop = prop @property def x(self): return self[0] @x.setter def x(self, value): gtf = self._raster.geotransform gtf[self.indices[self._prop][0]] = value self._raster.geotransform = gtf @property def y(self): return self[1] @y.setter def y(self, value): gtf = self._raster.geotransform gtf[self.indices[self._prop][1]] = value self._raster.geotransform = gtf @python_2_unicode_compatible class GDALRaster(GDALBase): """ Wraps a raster GDAL Data Source object. """ def __init__(self, ds_input, write=False): self._write = 1 if write else 0 Driver.ensure_registered() # Preprocess json inputs. This converts json strings to dictionaries, # which are parsed below the same way as direct dictionary inputs. if isinstance(ds_input, six.string_types) and json_regex.match(ds_input): ds_input = json.loads(ds_input) # If input is a valid file path, try setting file as source. if isinstance(ds_input, six.string_types): if not os.path.exists(ds_input): raise GDALException('Unable to read raster source input "{}"'.format(ds_input)) try: # GDALOpen will auto-detect the data source type. self._ptr = capi.open_ds(force_bytes(ds_input), self._write) except GDALException as err: raise GDALException('Could not open the datasource at "{}" ({}).'.format(ds_input, err)) elif isinstance(ds_input, dict): # A new raster needs to be created in write mode self._write = 1 # Create driver (in memory by default) driver = Driver(ds_input.get('driver', 'MEM')) # For out of memory drivers, check filename argument if driver.name != 'MEM' and 'name' not in ds_input: raise GDALException('Specify name for creation of raster with driver "{}".'.format(driver.name)) # Check if width and height where specified if 'width' not in ds_input or 'height' not in ds_input: raise GDALException('Specify width and height attributes for JSON or dict input.') # Check if srid was specified if 'srid' not in ds_input: raise GDALException('Specify srid for JSON or dict input.') # Create GDAL Raster self._ptr = capi.create_ds( driver._ptr, force_bytes(ds_input.get('name', '')), ds_input['width'], ds_input['height'], ds_input.get('nr_of_bands', len(ds_input.get('bands', []))), ds_input.get('datatype', 6), None ) # Set band data if provided for i, band_input in enumerate(ds_input.get('bands', [])): band = self.bands[i] band.data(band_input['data']) if 'nodata_value' in band_input: band.nodata_value = band_input['nodata_value'] # Set SRID self.srs = ds_input.get('srid') # Set additional properties if provided if 'origin' in ds_input: self.origin.x, self.origin.y = ds_input['origin'] if 'scale' in ds_input: self.scale.x, self.scale.y = ds_input['scale'] if 'skew' in ds_input: self.skew.x, self.skew.y = ds_input['skew'] elif isinstance(ds_input, c_void_p): # Instantiate the object using an existing pointer to a gdal raster. self._ptr = ds_input else: raise GDALException('Invalid data source input type: "{}".'.format(type(ds_input))) def __del__(self): if self._ptr and capi: capi.close_ds(self._ptr) def __str__(self): return self.name def __repr__(self): """ Short-hand representation because WKB may be very large. """ return '<Raster object at %s>' % hex(addressof(self._ptr)) def _flush(self): """ Flush all data from memory into the source file if it exists. The data that needs flushing are geotransforms, coordinate systems, nodata_values and pixel values. This function will be called automatically wherever it is needed. """ # Raise an Exception if the value is being changed in read mode. if not self._write: raise GDALException('Raster needs to be opened in write mode to change values.') capi.flush_ds(self._ptr) @property def name(self): """ Returns the name of this raster. Corresponds to filename for file-based rasters. """ return force_text(capi.get_ds_description(self._ptr)) @cached_property def driver(self): """ Returns the GDAL Driver used for this raster. """ ds_driver = capi.get_ds_driver(self._ptr) return Driver(ds_driver) @property def width(self): """ Width (X axis) in pixels. """ return capi.get_ds_xsize(self._ptr) @property def height(self): """ Height (Y axis) in pixels. """ return capi.get_ds_ysize(self._ptr) @property def srs(self): """ Returns the SpatialReference used in this GDALRaster. """ try: wkt = capi.get_ds_projection_ref(self._ptr) if not wkt: return None return SpatialReference(wkt, srs_type='wkt') except SRSException: return None @srs.setter def srs(self, value): """ Sets the spatial reference used in this GDALRaster. The input can be a SpatialReference or any parameter accepted by the SpatialReference constructor. """ if isinstance(value, SpatialReference): srs = value elif isinstance(value, six.integer_types + six.string_types): srs = SpatialReference(value) else: raise ValueError('Could not create a SpatialReference from input.') capi.set_ds_projection_ref(self._ptr, srs.wkt.encode()) self._flush() @property def srid(self): """ Shortcut to access the srid of this GDALRaster. """ return self.srs.srid @srid.setter def srid(self, value): """ Shortcut to set this GDALRaster's srs from an srid. """ self.srs = value @property def geotransform(self): """ Returns the geotransform of the data source. Returns the default geotransform if it does not exist or has not been set previously. The default is [0.0, 1.0, 0.0, 0.0, 0.0, -1.0]. """ # Create empty ctypes double array for data gtf = (c_double * 6)() capi.get_ds_geotransform(self._ptr, byref(gtf)) return list(gtf) @geotransform.setter def geotransform(self, values): "Sets the geotransform for the data source." if sum([isinstance(x, (int, float)) for x in values]) != 6: raise ValueError('Geotransform must consist of 6 numeric values.') # Create ctypes double array with input and write data values = (c_double * 6)(*values) capi.set_ds_geotransform(self._ptr, byref(values)) self._flush() @property def origin(self): """ Coordinates of the raster origin. """ return TransformPoint(self, 'origin') @property def scale(self): """ Pixel scale in units of the raster projection. """ return TransformPoint(self, 'scale') @property def skew(self): """ Skew of pixels (rotation parameters). """ return TransformPoint(self, 'skew') @property def extent(self): """ Returns the extent as a 4-tuple (xmin, ymin, xmax, ymax). """ # Calculate boundary values based on scale and size xval = self.origin.x + self.scale.x * self.width yval = self.origin.y + self.scale.y * self.height # Calculate min and max values xmin = min(xval, self.origin.x) xmax = max(xval, self.origin.x) ymin = min(yval, self.origin.y) ymax = max(yval, self.origin.y) return xmin, ymin, xmax, ymax @property def bands(self): return BandList(self) def warp(self, ds_input, resampling='NearestNeighbour', max_error=0.0): """ Returns a warped GDALRaster with the given input characteristics. The input is expected to be a dictionary containing the parameters of the target raster. Allowed values are width, height, SRID, origin, scale, skew, datatype, driver, and name (filename). By default, the warp functions keeps all parameters equal to the values of the original source raster. For the name of the target raster, the name of the source raster will be used and appended with _copy. + source_driver_name. In addition, the resampling algorithm can be specified with the "resampling" input parameter. The default is NearestNeighbor. For a list of all options consult the GDAL_RESAMPLE_ALGORITHMS constant. """ # Get the parameters defining the geotransform, srid, and size of the raster if 'width' not in ds_input: ds_input['width'] = self.width if 'height' not in ds_input: ds_input['height'] = self.height if 'srid' not in ds_input: ds_input['srid'] = self.srs.srid if 'origin' not in ds_input: ds_input['origin'] = self.origin if 'scale' not in ds_input: ds_input['scale'] = self.scale if 'skew' not in ds_input: ds_input['skew'] = self.skew # Get the driver, name, and datatype of the target raster if 'driver' not in ds_input: ds_input['driver'] = self.driver.name if 'name' not in ds_input: ds_input['name'] = self.name + '_copy.' + self.driver.name if 'datatype' not in ds_input: ds_input['datatype'] = self.bands[0].datatype() # Set the number of bands ds_input['nr_of_bands'] = len(self.bands) # Create target raster target = GDALRaster(ds_input, write=True) # Copy nodata values to warped raster for index, band in enumerate(self.bands): target.bands[index].nodata_value = band.nodata_value # Select resampling algorithm algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling] # Reproject image capi.reproject_image( self._ptr, self.srs.wkt.encode(), target._ptr, target.srs.wkt.encode(), algorithm, 0.0, max_error, c_void_p(), c_void_p(), c_void_p() ) # Make sure all data is written to file target._flush() return target def transform(self, srid, driver=None, name=None, resampling='NearestNeighbour', max_error=0.0): """ Returns a copy of this raster reprojected into the given SRID. """ # Convert the resampling algorithm name into an algorithm id algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling] # Instantiate target spatial reference system target_srs = SpatialReference(srid) # Create warped virtual dataset in the target reference system target = capi.auto_create_warped_vrt( self._ptr, self.srs.wkt.encode(), target_srs.wkt.encode(), algorithm, max_error, c_void_p() ) target = GDALRaster(target) # Construct the target warp dictionary from the virtual raster data = { 'srid': srid, 'width': target.width, 'height': target.height, 'origin': [target.origin.x, target.origin.y], 'scale': [target.scale.x, target.scale.y], 'skew': [target.skew.x, target.skew.y], } # Set the driver and filepath if provided if driver: data['driver'] = driver if name: data['name'] = name # Warp the raster into new srid return self.warp(data, resampling=resampling, max_error=max_error)
bsd-3-clause
-3,694,522,684,417,132,000
32.761194
112
0.585396
false
AOSPU/external_chromium_org
build/android/pylib/utils/flakiness_dashboard_results_uploader.py
11
7863
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Uploads the results to the flakiness dashboard server.""" # pylint: disable=E1002,R0201 import logging import os import shutil import sys import tempfile import xml # Include path when ran from a Chromium checkout. sys.path.append( os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir, 'third_party', 'WebKit', 'Tools', 'Scripts'))) # Include path when ran from a WebKit checkout. sys.path.append( os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir, os.pardir, os.pardir, os.pardir, 'Tools', 'Scripts'))) # pylint: disable=F0401 from webkitpy.common.system import executive, filesystem from webkitpy.layout_tests.layout_package import json_results_generator # pylint: enable=F0401 #TODO(craigdh): pylib/utils/ should not depend on pylib/. from pylib import cmd_helper from pylib import constants from pylib.utils import repo_utils # The JSONResultsGenerator gets the filesystem.join operation from the Port # object. Creating a Port object requires specifying information that only # makes sense for running WebKit layout tests, so we provide a dummy object # that contains the fields required by the generator. class PortDummy(object): def __init__(self): self._executive = executive.Executive() self._filesystem = filesystem.FileSystem() class JSONResultsGenerator(json_results_generator.JSONResultsGeneratorBase): """Writes test results to a JSON file and handles uploading that file to the test results server. """ def __init__(self, port, builder_name, build_name, build_number, tmp_folder, test_results_map, test_results_server, test_type, master_name): super(JSONResultsGenerator, self).__init__( port=port, builder_name=builder_name, build_name=build_name, build_number=build_number, results_file_base_path=tmp_folder, builder_base_url=None, test_results_map=test_results_map, svn_repositories=(('webkit', 'third_party/WebKit'), ('chrome', '.')), test_results_server=test_results_server, test_type=test_type, master_name=master_name) #override def _get_modifier_char(self, test_name): if test_name not in self._test_results_map: return self.__class__.NO_DATA_RESULT return self._test_results_map[test_name].modifier #override def _get_svn_revision(self, in_directory): """Returns the git/svn revision for the given directory. Args: in_directory: The directory relative to src. """ def _is_git_directory(in_directory): """Returns true if the given directory is in a git repository. Args: in_directory: The directory path to be tested. """ if os.path.exists(os.path.join(in_directory, '.git')): return True parent = os.path.dirname(in_directory) if parent == constants.DIR_SOURCE_ROOT or parent == in_directory: return False return _is_git_directory(parent) in_directory = os.path.join(constants.DIR_SOURCE_ROOT, in_directory) if not os.path.exists(os.path.join(in_directory, '.svn')): if _is_git_directory(in_directory): return repo_utils.GetGitHeadSHA1(in_directory) else: return '' output = cmd_helper.GetCmdOutput(['svn', 'info', '--xml'], cwd=in_directory) try: dom = xml.dom.minidom.parseString(output) return dom.getElementsByTagName('entry')[0].getAttribute('revision') except xml.parsers.expat.ExpatError: return '' return '' class ResultsUploader(object): """Handles uploading buildbot tests results to the flakiness dashboard.""" def __init__(self, tests_type): self._build_number = os.environ.get('BUILDBOT_BUILDNUMBER') self._builder_name = os.environ.get('BUILDBOT_BUILDERNAME') self._tests_type = tests_type if not self._build_number or not self._builder_name: raise Exception('You should not be uploading tests results to the server' 'from your local machine.') upstream = (tests_type != 'Chromium_Android_Instrumentation') if upstream: # TODO(frankf): Use factory properties (see buildbot/bb_device_steps.py) # This requires passing the actual master name (e.g. 'ChromiumFYI' not # 'chromium.fyi'). from slave import slave_utils # pylint: disable=F0401 self._build_name = slave_utils.SlaveBuildName(constants.DIR_SOURCE_ROOT) self._master_name = slave_utils.GetActiveMaster() else: self._build_name = 'chromium-android' buildbot_branch = os.environ.get('BUILDBOT_BRANCH') if not buildbot_branch: buildbot_branch = 'master' self._master_name = '%s-%s' % (self._build_name, buildbot_branch) self._test_results_map = {} def AddResults(self, test_results): # TODO(frankf): Differentiate between fail/crash/timeouts. conversion_map = [ (test_results.GetPass(), False, json_results_generator.JSONResultsGeneratorBase.PASS_RESULT), (test_results.GetFail(), True, json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT), (test_results.GetCrash(), True, json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT), (test_results.GetTimeout(), True, json_results_generator.JSONResultsGeneratorBase.FAIL_RESULT), (test_results.GetUnknown(), True, json_results_generator.JSONResultsGeneratorBase.NO_DATA_RESULT), ] for results_list, failed, modifier in conversion_map: for single_test_result in results_list: test_result = json_results_generator.TestResult( test=single_test_result.GetName(), failed=failed, elapsed_time=single_test_result.GetDur() / 1000) # The WebKit TestResult object sets the modifier it based on test name. # Since we don't use the same test naming convention as WebKit the # modifier will be wrong, so we need to overwrite it. test_result.modifier = modifier self._test_results_map[single_test_result.GetName()] = test_result def Upload(self, test_results_server): if not self._test_results_map: return tmp_folder = tempfile.mkdtemp() try: results_generator = JSONResultsGenerator( port=PortDummy(), builder_name=self._builder_name, build_name=self._build_name, build_number=self._build_number, tmp_folder=tmp_folder, test_results_map=self._test_results_map, test_results_server=test_results_server, test_type=self._tests_type, master_name=self._master_name) json_files = ["incremental_results.json", "times_ms.json"] results_generator.generate_json_output() results_generator.generate_times_ms_file() results_generator.upload_json_files(json_files) except Exception as e: logging.error("Uploading results to test server failed: %s." % e) finally: shutil.rmtree(tmp_folder) def Upload(results, flakiness_dashboard_server, test_type): """Reports test results to the flakiness dashboard for Chrome for Android. Args: results: test results. flakiness_dashboard_server: the server to upload the results to. test_type: the type of the tests (as displayed by the flakiness dashboard). """ uploader = ResultsUploader(test_type) uploader.AddResults(results) uploader.Upload(flakiness_dashboard_server)
bsd-3-clause
3,862,580,606,380,291,600
36.802885
80
0.667048
false
AltarBeastiful/qt-creator
tests/system/suite_SCOM/tst_SCOM04/test.py
4
3506
############################################################################# ## ## Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies). ## Contact: http://www.qt-project.org/legal ## ## This file is part of Qt Creator. ## ## Commercial License Usage ## Licensees holding valid commercial Qt licenses may use this file in ## accordance with the commercial license agreement provided with the ## Software or, alternatively, in accordance with the terms contained in ## a written agreement between you and Digia. For licensing terms and ## conditions see http://www.qt.io/licensing. For further information ## use the contact form at http://www.qt.io/contact-us. ## ## GNU Lesser General Public License Usage ## Alternatively, this file may be used under the terms of the GNU Lesser ## General Public License version 2.1 or version 3 as published by the Free ## Software Foundation and appearing in the file LICENSE.LGPLv21 and ## LICENSE.LGPLv3 included in the packaging of this file. Please review the ## following information to ensure the GNU Lesser General Public License ## requirements will be met: https://www.gnu.org/licenses/lgpl.html and # http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html. ## ## In addition, as a special exception, Digia gives you certain additional ## rights. These rights are described in the Digia Qt LGPL Exception ## version 1.1, included in the file LGPL_EXCEPTION.txt in this package. ## ############################################################################# source("../../shared/qtcreator.py") source("../../shared/suites_qtta.py") # entry of test def main(): # expected error texts - for different compilers expectedErrorAlternatives = ["'SyntaxError' was not declared in this scope", "'SyntaxError' : undeclared identifier", "use of undeclared identifier 'SyntaxError'"] startApplication("qtcreator" + SettingsPath) if not startedWithoutPluginError(): return # create qt quick application checkedTargets, projectName = createNewQtQuickApplication(tempDir(), "SampleApp") # create syntax error in cpp file openDocument("SampleApp.Sources.main\\.cpp") if not appendToLine(waitForObject(":Qt Creator_CppEditor::Internal::CPPEditorWidget"), "viewer.showExpanded();", "SyntaxError"): invokeMenuItem("File", "Exit") return # save all invokeMenuItem("File", "Save All") # build it - on all build configurations availableConfigs = iterateBuildConfigs(len(checkedTargets)) if not availableConfigs: test.fatal("Haven't found a suitable Qt version - leaving without building.") for kit, config in availableConfigs: selectBuildConfig(len(checkedTargets), kit, config) # try to compile test.log("Testing build configuration: " + config) clickButton(waitForObject(":*Qt Creator.Build Project_Core::Internal::FancyToolButton")) # wait until build finished waitForCompile() # open issues list view ensureChecked(waitForObject(":Qt Creator_Issues_Core::Internal::OutputPaneToggleButton")) issuesView = waitForObject(":Qt Creator.Issues_QListView") # verify that error is properly reported test.verify(checkSyntaxError(issuesView, expectedErrorAlternatives, False), "Verifying cpp syntax error while building simple qt quick application.") # exit qt creator invokeMenuItem("File", "Exit")
lgpl-2.1
-5,114,908,739,355,210,000
49.085714
132
0.678551
false
astrorafael/ema
ema/mqttclient.py
1
13699
# ---------------------------------------------------------------------- # Copyright (c) 2015 Rafael Gonzalez. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # ---------------------------------------------------------------------- # ========================== DESIGN NOTES ============================== # An MQTT class implementing a MQTT client with a puere publishing-only # behaviour. No disconnection requests are ever made. # # This class ingerits from Lazy to periodically execute a work() procedure # whichi s responsible of most of the things, including keeping the connection alive # The work() procedure eexectues twice as fast as the keepalive timeout specidied to # the client MQTT library. # # This version publushes a 24h bulk dump to the MQTT broker # by using an object of class Command and implementing the necessary callbacks. # # ====================================================================== import logging import paho.mqtt.client as mqtt import socket import datetime from server import Lazy, Server from emaproto import SPSB, STATLEN from command import Command, COMMAND from dev.todtimer import Timer # FLASH Pages where History data re stored FLASH_START = 300 FLASH_END = 300 # tog info every NPLUBLIS times (ticks) NPUBLISH = 60 # MQTT Connection Status NOT_CONNECTED = 0 CONNECTING = 1 CONNECTED = 2 FAILED = 3 DISCONNECTING = 4 log = logging.getLogger('mqtt') # The callback for when the client receives a CONNACK response from the server. def on_connect(client, userdata, flags, rc): userdata.on_connect(flags, rc) def on_disconnect(client, userdata, rc): userdata.on_disconnect(rc) # The callback for when a PUBLISH message is received from the server. # Not Needed. This is a pure 'publish type' client. def on_message(client, userdata, msg): userdata.on_message(msg) # Utility function def transform(message): '''Transform EMA status message into a pure ASCII string''' return "%s%03d%s" % (message[:SPSB], ord(message[SPSB]), message[SPSB+1:]) class BulkDumpCommand(Command): ''' Commad subclass to handle bulk dump request and responses via callbacks ''' def __init__(self, ema, retries, **kargs): Command.__init__(self,ema,retries,**kargs) # delegate to MQTT client object as it has all the needed context def onPartialCommand(self, message, userdata): ''' Partial bulk dump handler ''' self.ema.mqttclient.onPartialCommand(message,userdata) # delegate to MQTT client object as it has all he needed context def onCommandComplete(self, message, userdata): ''' Bulk dump Command complete handler ''' self.ema.mqttclient.onCommandComplete(message,userdata) class MQTTClient(Lazy): # TOPIC Default vaules TOPIC_EVENTS = "EMA/events" TOPIC_TOPICS = "EMA/topics" TOPIC_HISTORY_MINMAX = "EMA/history/minmax" TOPIC_CURRENT_STATUS = "EMA/current/status" def __init__(self, ema, parser, **kargs): lvl = parser.get("MQTT", "mqtt_log") log.setLevel(lvl) id = parser.get("MQTT", "mqtt_id") host = parser.get("MQTT", "mqtt_host") port = parser.getint("MQTT", "mqtt_port") period = parser.getint("MQTT", "mqtt_period") histflag = parser.getboolean("MQTT", "mqtt_publish_history") publish_status = parser.getboolean("MQTT", "mqtt_publish_status") Lazy.__init__(self, period / 2.0 ) MQTTClient.TOPIC_EVENTS = "EMA/%s/events" % id MQTTClient.TOPIC_TOPICS = "EMA/%s/topics" % id MQTTClient.TOPIC_HISTORY_MINMAX = "EMA/%s/history/minmax" % id MQTTClient.TOPIC_CURRENT_STATUS = "EMA/%s/current/status" % id self.ema = ema self.__id = id self.__topics = False self.__stats = 0 self.__count = 0 self.__histflag = histflag self.__state = NOT_CONNECTED self.__host = host self.__port = port self.__period = period self.__pubstat = publish_status self.__emastat = "()" self.__mqtt = mqtt.Client(client_id=id+'@'+socket.gethostname(), userdata=self) self.__mqtt.on_connect = on_connect self.__mqtt.on_disconnect = on_disconnect ema.addLazy(self) ema.todtimer.addSubscriber(self) if publish_status: ema.subscribeStatus(self) log.info("MQTT client created") # ---------------------------------------- # MQTT Callbacks # ----------------------------------------- def on_connect(self, flags, rc): '''Send the initial event and set last will on unexpected diconnection''' if rc == 0: self.__state = CONNECTED self.__mqtt.publish(MQTTClient.TOPIC_EVENTS, payload="EMA Server connected", qos=2, retain=True) self.__mqtt.will_set(MQTTClient.TOPIC_EVENTS, payload="EMA Server disconnected", qos=2, retain=True) self.__mqtt.will_set(MQTTClient.TOPIC_TOPICS, payload=MQTTClient.TOPIC_EVENTS, qos=2, retain=True) log.info("Conected successfully") else: self.__state = FAILED log.error("Connection failed, rc =%d" % rc) def on_disconnect(self, rc): log.warning("Unexpected disconnection, rc =%d" % rc) self.__state = NOT_CONNECTED self.__topics = False try: self.ema.delReadable(self) except ValueError as e: log.warning("Recovered from mqtt library 'double disconnection' bug") # ---------------------------------------- # Implement the EMA Status Message calback # ----------------------------------------- def onStatus(self, message): '''Pick up status message and transform it into pure ASCII string''' tstamp = (datetime.datetime.utcnow() + \ datetime.timedelta(seconds=0.5)).strftime("\n(%H:%M:%S %d/%m/%Y)") self.__emastat = transform(message) self.__emastat += tstamp # --------------------------------- # Implement the Event I/O Interface # --------------------------------- def onInput(self): ''' Read from message buffer and notify handlers if message complete. Called from Server object ''' log.debug("onInput will use mqtt lib for reading") self.__mqtt.loop_read() def fileno(self): '''Implement this interface to be added in select() system call''' return self.__mqtt.socket().fileno() # ----------------------------------------------- # Implement the TOD Timer onNewInterval interface # ----------------------------------------------- def onNewInterval(self, where, i): if self.__state == CONNECTED: if self.__histflag: self.publishBulkDump() else: log.warn("Not connected to broker: can't publish minmax history") # ---------------------------------------- # Implement The Lazy interface # ----------------------------------------- def work(self): ''' Writes data to serial port configured at init. Called periodically from a Server object. Write blocking behaviour. ''' log.debug("mqttclient.work()") if not self.ema.isSyncDone(): return if self.__state == NOT_CONNECTED: self.connect() return # Do this only once in server lifetime if self.__state == CONNECTED and not self.__topics: self.__topics = True self.publishTopics() if self.__histflag: self.publishBulkDump() self.__count = (self.__count + 1) % 2 if self.__state == CONNECTED and self.__count == 0: self.publish() self.__mqtt.loop_misc() # ---------------------------------------- # Implement Command callbacks # ----------------------------------------- def onPartialCommand(self, message, userdata): ''' Partial bulk dump request command handler ''' if len(message) == STATLEN: self.bulkDump.append(transform(message)) else: self.bulkDump.append(message) def onCommandComplete(self, message, userdata): ''' Bulk dump request command complete handler ''' log.debug("onCommandComplete => %s", message) self.bulkDump.append(message) if self.page < FLASH_END : self.page += 1 self.requestPage(self.page) else: date = message[10:20] log.info("Uploading (%s) hourly minmax history to %s", date, MQTTClient.TOPIC_HISTORY_MINMAX) self.__mqtt.publish(topic=MQTTClient.TOPIC_HISTORY_MINMAX, payload='\n'.join(self.bulkDump), qos=2, retain=True) log.info("Upload complete, processed %d lines", len(self.bulkDump)) # -------------- # Helper methods # -------------- def connect(self): ''' Connect to MQTT Broker with parameters passed at creation time. Add MQTT library to the (external) EMA I/O event loop. ''' try: log.info("Connecting to MQTT Broker %s:%s", self.__host, self.__port) self.__state = CONNECTING self.__mqtt.connect(self.__host, self.__port, self.__period) self.ema.addReadable(self) except IOError, e: log.error("%s",e) if e.errno == 101: log.warning("Trying to connect on the next cycle") self.__state = NOT_CONNECTED else: self.__state = FAILED raise def publish(self): ''' Publish real time individual readings to MQTT Broker ''' # publish raw status line if self.__pubstat: self.__mqtt.publish(topic=MQTTClient.TOPIC_CURRENT_STATUS, payload=self.__emastat) self.__emastat = "()" # publish last current values for device in self.ema.currentList: if ('mqtt','current') in device.publishable: try: for key, value in device.current.iteritems(): log.debug("%s publishing current %s => %s %s", device.name, key, value[0], value[1]) topic = "%s/current/%s/%s" % (self.__id, device.name, key) payload = "%s %s" % value self.__mqtt.publish(topic=topic, payload=payload) except IndexError as e: log.error("publish(current) Exception: %s reading device=%s", e, device.name) # Publish averages for device in self.ema.averageList: if ('mqtt','average') in device.publishable: try: for key, value in device.average.iteritems(): log.debug("%s publishing average %s => %s %s", device.name, key, value[0], value[1]) topic = "%s/average/%s/%s" % (self.__id, device.name, key) payload = "%s %s" % value self.__mqtt.publish(topic=topic, payload=payload) except IndexError as e: log.error("publish(average) Exception: %s reading device=%s", e, device.name) if self.__stats % NPUBLISH == 0: log.info("Published %d measurements" % self.__stats) self.__stats += 1 def publishTopics(self): ''' Publish active topics ''' topics = [MQTTClient.TOPIC_EVENTS, MQTTClient.TOPIC_HISTORY_MINMAX] if self.__pubstat: topics.append(MQTTClient.TOPIC_CURRENT_STATUS) for device in self.ema.currentList: if ('mqtt','current') in device.publishable: try: for key in device.current.iterkeys(): topics.append('%s/current/%s/%s' % (self.__id, device.name, key)) except IndexError as e: log.error("Exception: %s listing device key=%s", e, device.name) continue for device in self.ema.averageList: if ('mqtt','average') in device.publishable: try: for key in device.average.iterkeys(): topics.append('%s/average/%s/%s' % (self.__id, device.name, key)) except IndexError as e: log.error("Exception: %s listing device key=%s", e, device.name) continue self.__mqtt.publish(topic=MQTTClient.TOPIC_TOPICS, payload='\n'.join(topics), qos=2, retain=True) log.info("Sent active topics to %s", MQTTClient.TOPIC_TOPICS) def requestPage(self, page): ''' Request current flash page to EMA ''' log.debug("requesting page %d", page) cmd = BulkDumpCommand(self.ema, retries=0, **COMMAND[-1]) cmd.request("(@H%04d)" % page, page) def publishBulkDump(self): ''' Publish last 24h bulk dump ''' self.bulkDump = [] self.page = FLASH_START self.requestPage(self.page) log.debug("Request to publish 24h Bulk data") if __name__ == "__main__": pass
mit
1,719,672,702,868,759,800
33.681013
120
0.591795
false
Jorl17/jar2app
jar2app.py
1
25831
#!/usr/bin/env python # -*- coding: utf-8 -*- ## ## Copyright (C) 2015-2018 João Ricardo Lourenço <[email protected]> ## ## Github: https://github.com/Jorl17 ## ## Project main repository: https://github.com/Jorl17/jar2app ## ## This file is part of jar2app. ## ## jar2app is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## jar2app is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with jar2app. If not, see <http://www.gnu.org/licenses/>. ## from optparse import OptionParser import os.path import shutil import tempfile from zipfile import ZipFile import sys import shlex __author__ = 'jorl17' VERSION = '1.0.1' # Python 2 compatibility is_python2 = sys.version_info[0] == 2 if is_python2: FileExistsError = OSError #------------------------------------------------------------------------------ # Defaults #------------------------------------------------------------------------------ DEFAULT_VERSION='1.0.1' DEFAULT_BUNDLE_IDENTIFIER_PREFIX='com.jar2app.example.' DEFAULT_SIGNATURE='????' DEFAULT_EXECUTABLE_NAME='JavaAppLauncher' #------------------------------------------------------------------------------ # The info.plist file with placeholders #------------------------------------------------------------------------------ info_plist = """<?xml version="1.0" ?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <!-- This file was autogenerated by jar2app {version} (https://github.com/Jorl17/jar2app) --> <dict> <key>CFBundleDevelopmentRegion</key> <string>English</string> <key>CFBundleExecutable</key> <string>{executable}</string> <key>CFBundleIconFile</key> <string>{icon}</string> <key>CFBundleIdentifier</key> <string>{bundle_identifier}</string> <key>CFBundleDisplayName</key> <string>{bundle_displayname}</string> <key>CFBundleInfoDictionaryVersion</key> <string>6.0</string> <key>CFBundleName</key> <string>{bundle_name}</string> <key>CFBundlePackageType</key> <string>APPL</string> {retina_support} <key>CFBundleShortVersionString</key> <string>{short_version_string}</string> <key>CFBundleSignature</key> <string>{unique_signature}</string> <key>CFBundleVersion</key> <string>{bundle_version}</string> <key>NSHumanReadableCopyright</key> <string>{copyright}</string> {jdk} <key>JVMMainClassName</key> <string>{main_class_name}</string> <key>JVMOptions</key> <array> {jvm_options} </array> <key>JVMArguments</key> <array> {jvm_arguments} </array> </dict> </plist> """ retina_support_string = """<key>NSPrincipalClass</key> <string>NSApplication</string> <key>NSHighResolutionCapable</key> <string>True</string>""" #------------------------------------------------------------------------------ # Create a directory and ignore the "File already exists" error #------------------------------------------------------------------------------ def mkdir_ignore_exists(p): try: os.mkdir(p) return True except FileExistsError: return False #------------------------------------------------------------------------------ # Make a file executable #------------------------------------------------------------------------------ def make_executable(path): mode = os.stat(path).st_mode mode |= (mode & 0o444) >> 2 os.chmod(path, mode) #------------------------------------------------------------------------------ # Just strip the extension from a name #------------------------------------------------------------------------------ def strip_extension_from_name(name): return os.path.splitext(name)[0] #------------------------------------------------------------------------------ # Determine the main class in a JAR file. This basically involves searching # through the JAR (it's just a zip file), locating the MANIFEST.MF file, # decompressing it and then finding the main-class line. #------------------------------------------------------------------------------ def find_jar_mainclass(jar_file): f = ZipFile(jar_file, 'r') for file in f.infolist(): orig_fn = file.filename lower_fn = orig_fn.lower() if lower_fn.startswith('meta-inf') and lower_fn.endswith('manifest.mf'): manifest_mf = f.read(orig_fn) for line in manifest_mf.decode().split('\n'): if line.strip().lower().startswith('main-class'): return line.split(':')[1].strip() #------------------------------------------------------------------------------ # Build the main directory structure of the .App. It should look like # <appname>.App/ # Contents/ # Java/ # MacOS/ # Resources/ # en.lproj/ #------------------------------------------------------------------------------ def build_directory_structure(app_full_path): mkdir_ignore_exists(os.path.dirname(app_full_path)) #Base output directory where the app is placed. Create it. mkdir_ignore_exists(app_full_path) mkdir_ignore_exists(os.path.join(app_full_path, 'Contents')) mkdir_ignore_exists(os.path.join(app_full_path, 'Contents', 'Java')) mkdir_ignore_exists(os.path.join(app_full_path, 'Contents', 'MacOS')) mkdir_ignore_exists(os.path.join(app_full_path, 'Contents', 'PlugIns')) mkdir_ignore_exists(os.path.join(app_full_path, 'Contents', 'Resources')) mkdir_ignore_exists(os.path.join(app_full_path, 'Contents', 'Resources', 'en.lproj')) #------------------------------------------------------------------------------ # Write the plist file in the desired output folder. Note that these arguments # are passed directly to the info_plist string, so some of them (jdk, # jvm_arguments...) should have a bit of XML. # # The destination folder is typically <appname>.App/Contents #------------------------------------------------------------------------------ def create_plist_file(destination_folder, icon, bundle_identifier, bundle_displayname, bundle_name,bundle_version, short_version_string,copyright_str, main_class_name, jvm_arguments, jvm_options, jdk, unique_signature, retina_support, executable): filled_info_plist=info_plist.format(icon=icon, bundle_identifier=bundle_identifier, bundle_displayname=bundle_displayname, bundle_name=bundle_name, bundle_version=bundle_version, short_version_string=short_version_string, copyright=copyright_str, main_class_name=main_class_name, jvm_arguments=jvm_arguments, jvm_options=jvm_options, jdk=jdk, unique_signature=unique_signature, retina_support=retina_support, executable=executable, version=VERSION) with open(os.path.join(destination_folder, 'Info.plist'), 'w') as f: f.write(filled_info_plist) #------------------------------------------------------------------------------ # Convert a sequence of strings, separated by spaces, into a sequence of # <string></string> strings. # E.g., "a b c" becomes "<string>a</string>b<string></string><string>c</string> # # This is to be used for the JVMArguments and JVMOptions in the plist.xml file. # Also note that there is some whitespace added. This is to comply with the # indent of the xml file. #------------------------------------------------------------------------------ def string_to_plist_xmlarray_values(s): if not s: return '' return ' <string>' + '</string>\n <string>'.join( [i.strip() for i in shlex.split(s) ] ) + '</string>' #------------------------------------------------------------------------------ # Check if JDK/JRE is valid. It can be a zip file or it can be a directory. # Returns: # * The xml string to use # * The JDK/JRE folder name (not its full path; e.g. for zip files, it strips # the zip extension) # * Whether the JDK/JRE is a file or a directory #------------------------------------------------------------------------------ def determine_jdk(jdk): if not jdk: return '','',True isfile = os.path.isfile(jdk) if isfile: if not jdk.lower().endswith('.zip'): exit('JDK/JRE file is not a zip file.') jdk = strip_extension_from_name(os.path.basename(jdk)) dir, name = os.path.split(jdk) return '<key>JVMRuntime</key>\n<string>' + name + '</string>',jdk,isfile #------------------------------------------------------------------------------ # Copy a JDK to the bundled .app. The app_full_path should be the root of # the app (e.g. Test.app/). JDK should be the path to the JDK/JRE and # jdk_isfile comes from determine_jdk and indicates if the this JDK is a zip # file or a directory. # # In case it's a directory, we just copy it over. If it's a zip file, we must # first decompress it. # # In general, the JVM should go to <appname>.App/Contents/PlugIns, e.g. the # structure might become # <appname>.App/Contents/PlugIns/jdk1.8.0_40.jdk # # This JDK should be in the format expected by AppBundler (check if the first # directory is just a Contents folder) #------------------------------------------------------------------------------ def copy_jdk(app_full_path, jdk, jdk_isfile): if jdk: if jdk_isfile: tmpdir = tempfile.mkdtemp() f = ZipFile(jdk, 'r') f.extractall(tmpdir) jdk_dir = tmpdir try: destination_path = os.path.join(app_full_path, 'Contents', 'PlugIns') os.rmdir(destination_path) shutil.copytree(jdk_dir, destination_path) except FileExistsError as e: raise # FIXME try: base_path = os.path.join(app_full_path, 'Contents', 'PlugIns') dir = os.listdir(base_path)[0] final_dir = os.path.join(base_path, strip_extension_from_name(os.path.basename(jdk))) shutil.rmtree(final_dir, ignore_errors=True) # Delete old folder (if it exists) os.rename(os.path.join(base_path, dir), final_dir) shutil.rmtree(tmpdir) except: raise #FIXME else: destination = os.path.join(app_full_path, 'Contents', 'PlugIns', os.path.basename(jdk)) shutil.rmtree(destination, ignore_errors=True) # Delete old folder (if it exists) shutil.copytree(jdk, destination, symlinks=True) # ------------------------------------------------------------------------------ # Copy all files while also preserving status information. If status cannot be # copied, drop it and copy mode instead. # ------------------------------------------------------------------------------ def copy_preserve_status(src, dst): try: shutil.copy2(src, dst) except OSError: shutil.copy(src, dst) #------------------------------------------------------------------------------ # Copy all files to the previously created directory. This involes copying # the Localizable.strings file, the JavaAppLauncher executable and, finally, # the JDK/JRE and application icon if they were provided #------------------------------------------------------------------------------ def copy_base_files(app_full_path, icon, jar_file, jdk, jdk_isfile, executable, executable_file): if icon: copy_preserve_status(icon,os.path.join(app_full_path, 'Contents', 'Resources')) copy_preserve_status(os.path.join(os.path.dirname(sys.argv[0]), 'jar2app_basefiles', 'Localizable.strings'), os.path.join(app_full_path, 'Contents', 'Resources', 'en.lproj', 'Localizable.strings')) if executable_file: copy_preserve_status(executable_file, os.path.join(app_full_path, 'Contents', 'MacOS', executable)) else: copy_preserve_status(os.path.join(os.path.dirname(sys.argv[0]), 'jar2app_basefiles', 'JavaAppLauncher'), os.path.join(app_full_path, 'Contents', 'MacOS', executable)) make_executable(os.path.join(app_full_path, 'Contents', 'MacOS', executable)) copy_preserve_status(jar_file, os.path.join(app_full_path, 'Contents', 'Java', os.path.basename(jar_file))) copy_jdk(app_full_path, jdk, jdk_isfile) #------------------------------------------------------------------------------ # Determine the destination Appname (and full path) taking into account the # parameters. Note that: # 1. If output is provided and it is a destination file, its name is used as # the appname # 2. If output is provided but it is a destination folder, then name must be # figured out with the next steps (as if output wasn't provided). # 3. Prefer the bundle name # 4. Prefer the bundle displayname # 5. Prefer the jar name (without extension, of course) # We also assume that by default the output should go to the current directory. #------------------------------------------------------------------------------ def determine_app_name(jar_name, output, bundle_displayname, bundle_name, auto_append_app): if output: dir,name = os.path.split(output) if not dir: #All that was given was a filename dir = '.' else: #Assume default directory dir = '.' name = '' if not name: # If no .app name is provided, prefer: # 1. The bundle name, if it was provided # 2. the bundle_displayname, if it was provided # 3. The jar name if bundle_name: return os.path.join(dir,bundle_name + '.app') elif bundle_displayname: return os.path.join(dir,bundle_displayname + '.app') elif jar_name: return os.path.join(dir,strip_extension_from_name(jar_name) + '.app') else: # Ensure the name ends with .app, unless we were told not to do so if auto_append_app: if name.lower().endswith('.app'): return os.path.join(dir,name) else: return os.path.join(dir,name + '.app') else: return os.path.join(dir,name) #------------------------------------------------------------------------------ # Print summary info on the fields used, if they are used. Used when the # process is done #------------------------------------------------------------------------------ def print_final_file_info(icon, bundle_identifier, bundle_displayname, bundle_name, short_version_string, unique_signature, bundle_version, copyright_str, orig_jvm_options, main_class_name, jdk, retina_support, use_screen_menu_bar, working_directory, executable): def print_field_if_not_null(name, field): if field: print('{}: {}'.format(name, field)) print_field_if_not_null('CFBundleIconFile', icon) print_field_if_not_null('CFBundleIdentifier', bundle_identifier) print_field_if_not_null('CFBundleDisplayName', bundle_displayname) print_field_if_not_null('CFBundleName', bundle_name) print_field_if_not_null('CFBundleShortVersionString', short_version_string) print_field_if_not_null('CFBundleSignature', unique_signature) print_field_if_not_null('CFBundleVersion', bundle_version) print_field_if_not_null('NSHumanReadableCopyright', copyright_str) if retina_support: print('Retina support enabled.') if use_screen_menu_bar: print('macOS menubar support enabled (might not always work).') print('---') print_field_if_not_null('JVMOptions', orig_jvm_options) print_field_if_not_null('JVMMainClassName', main_class_name) print_field_if_not_null('JVMRuntime', jdk) print_field_if_not_null('Executable Name (CFBundleExecutable)', executable) print_field_if_not_null('JAR Working directory', working_directory) #------------------------------------------------------------------------------ # This is the main application logic. It receives the arguments straight from # the user, gives appropriate defaults, builds the directory structure, # copies files (packing the JDK/JRE) and creates the plist file. In the end, # if all went well, it displays summary info. #------------------------------------------------------------------------------ def make_app(jar_file, output='.', icon=None, bundle_identifier=None, bundle_displayname=None, bundle_name=None, bundle_version=None, short_version_string=None, copyright_str=None, main_class_name=None, jvm_arguments=None, jvm_options=None, jdk=None, unique_signature=None, auto_append_app=True, retina_screen=True, use_screen_menu_bar=False, working_directory=None, executable=None, executable_file=None): def default_value(d, default): return d if d else default orig_jvm_options = jvm_options if not jvm_options: jvm_options = '' jar_name = os.path.basename(jar_file) app_full_path = determine_app_name(jar_name, output, bundle_displayname, bundle_name, auto_append_app) app_name = strip_extension_from_name(os.path.basename(app_full_path)) icon = default_value(icon, '') bundle_identifier = default_value(bundle_identifier, DEFAULT_BUNDLE_IDENTIFIER_PREFIX + app_name) if jdk: # Remove any trailing forward and backslashes which might screw up os.path.basename when copying the JDK. jdk = jdk.rstrip('/').rstrip('\\') if use_screen_menu_bar: jvm_options += ' -Dapple.laf.useScreenMenuBar=true' if working_directory: jvm_options += ' -Duser.dir="%s"' % working_directory if not bundle_displayname: # If no bundle_displayname is provided: # 1. Use the bundle_name # 2. use the app_name (note that the app_name was already determined based on what the user gave us. # For instance, if no app_name was given, and no displayname was given, and no app name was given, the # first choice is the bundle_name. if bundle_name: bundle_displayname = bundle_name else: bundle_displayname = app_name # Set the app name in the macOS menu bar (About and Quit menu items) jvm_options += ' -Xdock:name="%s"' % bundle_displayname # When we get here, we always have a displayname. So if there's no bundlename, go with that. It may itself have # come from the app name bundle_name = default_value(bundle_name, bundle_displayname) if not bundle_version: bundle_version = short_version_string if short_version_string else DEFAULT_VERSION # When we get here, we always have bundle_version, even if it is the default short_version_string = default_value(short_version_string, bundle_version) copyright_str = default_value(copyright_str, '') main_class_name = default_value(main_class_name, find_jar_mainclass(jar_file)) unique_signature = default_value(unique_signature, '????') jvm_arguments = string_to_plist_xmlarray_values(jvm_arguments) jvm_options = string_to_plist_xmlarray_values(jvm_options) jdk_xml,jdk_name,jdk_isfile = determine_jdk(jdk) if retina_screen: retina_screen = retina_support_string else: retina_screen = '' print('Packing {} into {}'.format(jar_file, os.path.abspath(app_full_path))) build_directory_structure(app_full_path) create_plist_file(os.path.join(app_full_path, 'Contents'), os.path.basename(icon), bundle_identifier, bundle_displayname, bundle_name,bundle_version,short_version_string,copyright_str, main_class_name, jvm_arguments, jvm_options, jdk_xml, unique_signature, retina_screen, executable) copy_base_files(app_full_path, icon, jar_file, jdk, jdk_isfile, executable, executable_file) print_final_file_info(icon, bundle_identifier, bundle_displayname, bundle_name, short_version_string, unique_signature, bundle_version, copyright_str, orig_jvm_options, main_class_name, jdk_name, retina_screen, use_screen_menu_bar, working_directory, executable) print("\n{} packaged to {}.".format(jar_file, os.path.abspath(app_full_path))) def parse_input(): parser = OptionParser() parser.add_option('-n', '--name', help='Package/Bundle name.', dest='bundle_name', type='string', default=None) parser.add_option('-d', '--display-name', help='Package/Bundle display name.', dest='bundle_displayname', type='string',default=None) parser.add_option('-i', '--icon',help='Icon (in .icns format). (Default: None)', dest='icon', type='string', default=None) parser.add_option('-b', '--bundle-identifier', help='Package/Bundle identifier (e.g. com.example.test) (Default is application name prefix by {}.'.format(DEFAULT_BUNDLE_IDENTIFIER_PREFIX), dest='bundle_identifier',type='string', default=None) parser.add_option('-v', '--version', help='Package/Bundle version (e.g. 1.0.0) (Default: {}).'.format(DEFAULT_VERSION),dest='bundle_version', type='string', default=DEFAULT_VERSION) parser.add_option('-s', '--short-version', help='Package/Bundle short version (see Apple\'s documentation on CFBundleShortVersionString) (Default: {}).'.format(DEFAULT_VERSION), dest='short_version_string',type='string', default=DEFAULT_VERSION) parser.add_option('-c', '--copyright',help='Package/Bundle copyright string (e.g. (c) 2015 Awesome Person) (Default: empty)',dest='copyright_str', type='string', default=None) parser.add_option('-u', '--unique-signature', help='4 Byte unique signature of your application (Default: {})'.format(DEFAULT_SIGNATURE),dest='signature', type='string', default=DEFAULT_SIGNATURE) parser.add_option('-m', '--main-class', help='Jar main class. Blank for auto-detection (usually right).',dest='main_class_name', type='string', default=None) parser.add_option('-r', '--runtime', help='JRE/JDK runtime to bundle. Can be a folder or a zip file. If none is given, the default on the system is used (default: None)',dest='jdk', type='string', default=None) parser.add_option('-j', '--jvm-options',help='Extra JVM options. Place one by one, separated by spaces, inside single quotes (e.g. -o \'-Xmx1024M -Xms256M\'). (Default: None)',dest='jvm_options', type='string', default=None) parser.add_option('-a', '--no-append-app-to-name', help='Do not try to append .app to the output file by default.', dest='auto_append_name', action='store_false') parser.add_option('-l', '--low-res-mode', help='Do not try to report retina-screen capabilities (use low resolution mode; by default high resolution mode is used).',dest='retina_screen', action='store_false') parser.add_option('-o', '--use-osx-menubar', help='Use OSX menu bar instead of Java menu bar (Default: False).', dest='use_screen_menu_bar', action='store_true') parser.add_option('-x', '--executable-file', help='Internal executable to launch. By default, JavaAppLauncher provided by jar2app is used.', dest='executable_file', type='string', default=None) parser.add_option('-e', '--executable-name', help='Name of the internal executable to launch (Default: %s).' % DEFAULT_EXECUTABLE_NAME, dest='executable', default='JavaAppLauncher') parser.add_option('-w','--working-directory', help='Set current working directory (user.dir) on launch (Default: $APP_ROOT/Contents).', dest='working_directory', type='string', default='$APP_ROOT/Contents') (options, args) = parser.parse_args() if len(args) == 2: input_file = args[0] output = args[1] elif len(args) > 2: parser.error('Extra arguments provided!') elif len(args) == 1: input_file = args[0] output = None else: parser.error('An input file is needed. Optionally, you can also provide an output file or directory. E.g.\n{} in.jar\n{} in.jar out.app\n{} in.jar out/'.format(sys.argv[0], sys.argv[0], sys.argv[0]) ) if options.auto_append_name == None: options.auto_append_name = True if options.retina_screen == None: options.retina_screen = True jvm_arguments = '' return input_file, output, options.icon, options.bundle_identifier, options.bundle_displayname, options.bundle_name,\ options.bundle_version, options.short_version_string, options.copyright_str, options.main_class_name,\ jvm_arguments, options.jvm_options, options.jdk, options.signature, options.auto_append_name,\ options.retina_screen, options.use_screen_menu_bar, options.working_directory, options.executable,\ options.executable_file def main(): print('jar2app %s, João Ricardo Lourenço, 2015-2017 <[email protected]>.' % VERSION) print('Github page: https://github.com/Jorl17/jar2app/') make_app(*parse_input()) main()
gpl-2.0
722,115,594,481,827,200
48.382409
249
0.589306
false
rahuldhote/odoo
addons/website_quote/controllers/main.py
83
8996
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import SUPERUSER_ID from openerp.addons.web import http from openerp.addons.web.http import request import werkzeug import datetime import time from openerp.tools.translate import _ class sale_quote(http.Controller): @http.route([ "/quote/<int:order_id>", "/quote/<int:order_id>/<token>" ], type='http', auth="public", website=True) def view(self, order_id, token=None, message=False, **post): # use SUPERUSER_ID allow to access/view order for public user # only if he knows the private token order = request.registry.get('sale.order').browse(request.cr, token and SUPERUSER_ID or request.uid, order_id, request.context) now = time.strftime('%Y-%m-%d') if token: if token != order.access_token: return request.website.render('website.404') # Log only once a day if request.session.get('view_quote',False)!=now: request.session['view_quote'] = now body=_('Quotation viewed by customer') self.__message_post(body, order_id, type='comment') days = 0 if order.validity_date: days = (datetime.datetime.strptime(order.validity_date, '%Y-%m-%d') - datetime.datetime.now()).days + 1 values = { 'quotation': order, 'message': message and int(message) or False, 'option': bool(filter(lambda x: not x.line_id, order.options)), 'order_valid': (not order.validity_date) or (now <= order.validity_date), 'days_valid': days, } return request.website.render('website_quote.so_quotation', values) @http.route(['/quote/accept'], type='json', auth="public", website=True) def accept(self, order_id, token=None, signer=None, sign=None, **post): order_obj = request.registry.get('sale.order') order = order_obj.browse(request.cr, SUPERUSER_ID, order_id) if token != order.access_token: return request.website.render('website.404') attachments=sign and [('signature.png', sign.decode('base64'))] or [] order_obj.signal_workflow(request.cr, SUPERUSER_ID, [order_id], 'order_confirm', context=request.context) message = _('Order signed by %s') % (signer,) self.__message_post(message, order_id, type='comment', subtype='mt_comment', attachments=attachments) return True @http.route(['/quote/<int:order_id>/<token>/decline'], type='http', auth="public", website=True) def decline(self, order_id, token, **post): order_obj = request.registry.get('sale.order') order = order_obj.browse(request.cr, SUPERUSER_ID, order_id) if token != order.access_token: return request.website.render('website.404') request.registry.get('sale.order').action_cancel(request.cr, SUPERUSER_ID, [order_id]) message = post.get('decline_message') if message: self.__message_post(message, order_id, type='comment', subtype='mt_comment') return werkzeug.utils.redirect("/quote/%s/%s?message=2" % (order_id, token)) @http.route(['/quote/<int:order_id>/<token>/post'], type='http', auth="public", website=True) def post(self, order_id, token, **post): # use SUPERUSER_ID allow to access/view order for public user order_obj = request.registry.get('sale.order') order = order_obj.browse(request.cr, SUPERUSER_ID, order_id) message = post.get('comment') if token != order.access_token: return request.website.render('website.404') if message: self.__message_post(message, order_id, type='comment', subtype='mt_comment') return werkzeug.utils.redirect("/quote/%s/%s?message=1" % (order_id, token)) def __message_post(self, message, order_id, type='comment', subtype=False, attachments=[]): request.session.body = message cr, uid, context = request.cr, request.uid, request.context user = request.registry['res.users'].browse(cr, SUPERUSER_ID, uid, context=context) if 'body' in request.session and request.session.body: request.registry.get('sale.order').message_post(cr, SUPERUSER_ID, order_id, body=request.session.body, type=type, subtype=subtype, author_id=user.partner_id.id, context=context, attachments=attachments ) request.session.body = False return True @http.route(['/quote/update_line'], type='json', auth="public", website=True) def update(self, line_id, remove=False, unlink=False, order_id=None, token=None, **post): order = request.registry.get('sale.order').browse(request.cr, SUPERUSER_ID, int(order_id)) if token != order.access_token: return request.website.render('website.404') if order.state not in ('draft','sent'): return False line_id=int(line_id) if unlink: request.registry.get('sale.order.line').unlink(request.cr, SUPERUSER_ID, [line_id], context=request.context) return False number=(remove and -1 or 1) order_line_obj = request.registry.get('sale.order.line') order_line_val = order_line_obj.read(request.cr, SUPERUSER_ID, [line_id], [], context=request.context)[0] quantity = order_line_val['product_uom_qty'] + number order_line_obj.write(request.cr, SUPERUSER_ID, [line_id], {'product_uom_qty': (quantity)}, context=request.context) return [str(quantity), str(order.amount_total)] @http.route(["/quote/template/<model('sale.quote.template'):quote>"], type='http', auth="user", website=True) def template_view(self, quote, **post): values = { 'template': quote } return request.website.render('website_quote.so_template', values) @http.route(["/quote/add_line/<int:option_id>/<int:order_id>/<token>"], type='http', auth="public", website=True) def add(self, option_id, order_id, token, **post): vals = {} order = request.registry.get('sale.order').browse(request.cr, SUPERUSER_ID, order_id) if token != order.access_token: return request.website.render('website.404') if order.state not in ['draft', 'sent']: return request.website.render('website.http_error', {'status_code': 'Forbidden', 'status_message': _('You cannot add options to a confirmed order.')}) option_obj = request.registry.get('sale.order.option') option = option_obj.browse(request.cr, SUPERUSER_ID, option_id) res = request.registry.get('sale.order.line').product_id_change(request.cr, SUPERUSER_ID, order_id, False, option.product_id.id, option.quantity, option.uom_id.id, option.quantity, option.uom_id.id, option.name, order.partner_id.id, False, True, time.strftime('%Y-%m-%d'), False, order.fiscal_position.id, True, dict(request.context or {}, company_id=order.company_id.id)) vals = res.get('value', {}) if 'tax_id' in vals: vals['tax_id'] = [(6, 0, vals['tax_id'])] vals.update({ 'price_unit': option.price_unit, 'website_description': option.website_description, 'name': option.name, 'order_id': order.id, 'product_id' : option.product_id.id, 'product_uos_qty': option.quantity, 'product_uos': option.uom_id.id, 'product_uom_qty': option.quantity, 'product_uom': option.uom_id.id, 'discount': option.discount, }) line = request.registry.get('sale.order.line').create(request.cr, SUPERUSER_ID, vals, context=request.context) option_obj.write(request.cr, SUPERUSER_ID, [option.id], {'line_id': line}, context=request.context) return werkzeug.utils.redirect("/quote/%s/%s#pricing" % (order.id, token))
agpl-3.0
-4,916,520,339,369,498,000
51.302326
162
0.615385
false
francof2a/APC
sources/dataset.py
1
4426
# Module related with dataset import os import urllib import zipfile import numpy as np def download(dataset_name = 'UCI HAR'): """"Download database to local folder """ # Init dataset_valid = False dataset_folder = "./" download_folder = os.path.abspath("../dataset/donwload/") + "/" # Porcess dataset identifier if dataset_name == 'UCI HAR': dataset_folder = os.path.abspath("../dataset/UCI HAR/") + "/" dataset_name = "UCI HAR Dataset" dataset_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00240/UCI%20HAR%20Dataset.zip" dataset_file = "UCI HAR Dataset.zip" dataset_names_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00240/UCI%20HAR%20Dataset.names" dataset_names_file = "UCI HAR Dataset.names" dataset_valid = True else: print "Dataset's identifier not valid!" # Check if database option is valid if dataset_valid == True: # Check folder for downloading if not os.path.exists(download_folder): os.makedirs(download_folder) # Check if dataset was donwloaded previously if not os.path.exists(download_folder + dataset_file): # Downloading try: print "Downloading dataset from {}...".format(dataset_url) ds_file = urllib.URLopener() ds_file.retrieve(dataset_url, download_folder + dataset_file) ds_file = urllib.URLopener() ds_file.retrieve(dataset_names_url, download_folder + dataset_names_file) print "Dataset downloaded in {}".format(os.path.abspath(download_folder)) except ValueError: print "Dataset could't be downloaded." else: print "Dataset is already downloaded." # Check dataset folder in project structure if not os.path.exists(dataset_folder): os.makedirs(dataset_folder) # unzip files try: print "Unzipping files..." z = zipfile.ZipFile(download_folder + dataset_file, 'r') z.extractall(dataset_folder) z.close() print "Files unzipped in {}".format(dataset_folder) except: print "Error unzipping files." else: print "Dataset folder {} exist. File is not unzipped".format(dataset_folder) def get_data(filenames, dataset_name = 'UCI HAR', print_on = False): """ Extract the data from database files """ # DataTrain [variable, #sample, samples] data = [] for filename in filenames: if print_on: print filename with open(filename) as f: sensorData = [] for row in f: if dataset_name == 'UCI HAR': row_formated = row.replace(' ', ' ').strip() str_samples = row_formated.split(' ') samples = np.fromstring(row_formated, dtype=np.float32, sep=' ') sensorData.append(samples) sensorData = np.array(sensorData) data.append(sensorData) data = np.array(data) return data def get_labels(filename, dataset_name = 'UCI HAR', print_on = False): """ Extract the labels from database files """ dataLabel = [] with open(filename) as f: if print_on: print filename for row in f: dataLabel.append(np.float(row.strip())) dataLabel = np.array(dataLabel) return dataLabel def encode_onehot(labels): """ Enconde to one-hot the labels' array """ LabelMax = int(np.max(labels)) labels_coded = np.zeros([len(labels), LabelMax + 1]) for idx, label in enumerate(labels): labels_coded[idx,int(labels[idx])] = 1 return labels_coded def make_labels_dictionary(descFilename): # labelNum labelDescription with open(descFilename) as f: lines = [] label_dict = {} for row in f: line = row.strip().split(' ') label_dict[float(line[0])] = line[1] return label_dict
gpl-3.0
-8,843,835,220,582,871,000
28.317881
119
0.548577
false
kamcpp/tensorflow
tensorflow/python/framework/contrib_test.py
23
1521
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test that the contrib module shows up properly.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import inspect from tensorflow.python.platform import googletest class ContribTest(googletest.TestCase): def testContrib(self): # pylint: disable=g-import-not-at-top import tensorflow as tf _ = tf.contrib.layers # `tf.contrib` is loaded lazily on first use. assert inspect.ismodule(tf.contrib) def testLayers(self): # pylint: disable=g-import-not-at-top import tensorflow as tf assert inspect.ismodule(tf.contrib.layers) def testLinearOptimizer(self): # pylint: disable=g-import-not-at-top import tensorflow as tf assert inspect.ismodule(tf.contrib.linear_optimizer) if __name__ == '__main__': googletest.main()
apache-2.0
-652,221,414,502,769,800
33.568182
80
0.701512
false
google/rysim
python/results_analyzer/Main.py
1
119456
# Copyright 2014 The RySim Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABCMeta, abstractmethod from array import * import collections import gflags import numpy import os import pprint import re import scipy.integrate import scipy.interpolate import sqlite3 import sys from matplotlib import pylab import pandas as pd import statsmodels.formula.api as sm from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as pyplot # Global state experiment_db = None event_count_buckets = [5000, 10000, 20000, 40000, 50000] bucketing_factor = 0.001 kernel_results_table = None kernel_machine_results_table = None kernel_machine_type_results_table = None fit_comparison_table = dict() # gflag defn's and registration FLAGS = gflags.FLAGS gflags.DEFINE_string('root_dir', '.', 'Root directory to start searching and where to store the database. Defaults to the current ' 'directory') gflags.DEFINE_string('output_db', 'experiment.db', 'Name of the database file that should be created. If the file already exists it will be ' 'overwritten. Defaults to "experiment.db"') gflags.DEFINE_bool('read_inputs', False, 'Controls if the application should re-read the inputs. If so the output DB will be clobbered ' 'entirely. If not only the analysis tables will be removed') class DBWrapper(object): def __init__(self, db_filename): self.db = sqlite3.connect(db_filename, check_same_thread=False) def commit(self): self.db.commit() def execute_safe(self, cmd): self.execute(cmd) self.commit() def execute(self, cmd): self.db.execute(cmd) def select(self, cmd): return self.db.execute(cmd) def cleanup(self): self.db.commit() self.db.close() self.db = None class ResultsTable(object): filtered_table_entry = collections.namedtuple('FilteredTableEntry', ['event_count', 'event_count_std', 'agents', 'agents_std', 'connections', 'connections_std', 'cpu', 'cpu_std', 'maxmem', 'maxmem_std']) filtered_entry = collections.namedtuple('FilteredEntry', ['mean', 'std']) def __init__(self): self.raw_table = dict() self.filtered_table = dict() def get_keys(self): return self.filtered_table.keys() def add_entry(self, key, bucket, model, event_count, agents, connections, cpu, maxmem): if key not in self.raw_table.keys(): self.raw_table[key] = dict() if model not in self.raw_table[key].keys(): self.raw_table[key][model] = dict() if agents not in self.raw_table[key][model].keys(): self.raw_table[key][model][agents] = dict() if connections not in self.raw_table[key][model][agents].keys(): self.raw_table[key][model][agents][connections] = dict() if bucket not in self.raw_table[key][model][agents][connections].keys(): self.raw_table[key][model][agents][connections][bucket] = dict() self.raw_table[key][model][agents][connections][bucket]["cpu"] = list() self.raw_table[key][model][agents][connections][bucket]["maxmem"] = list() self.raw_table[key][model][agents][connections][bucket]["event_count"] = list() self.raw_table[key][model][agents][connections][bucket]["cpu"].append(cpu) self.raw_table[key][model][agents][connections][bucket]["maxmem"].append(maxmem) self.raw_table[key][model][agents][connections][bucket]["event_count"].append(event_count) def create_filtered_table(self): self.filtered_table = dict() for key in self.raw_table.keys(): self.filtered_table[key] = list() for model in self.raw_table[key].keys(): for agents in self.raw_table[key][model].keys(): for connections in self.raw_table[key][model][agents].keys(): for bucket in self.raw_table[key][model][agents][connections].keys(): if len(self.raw_table[key][model][agents][connections][bucket]["event_count"]) is 0: continue event_count = ResultsTable.filter_bucket_entry( self.raw_table[key][model][agents][connections][bucket]["event_count"]) cpu = ResultsTable.filter_bucket_entry( self.raw_table[key][model][agents][connections][bucket]["cpu"]) maxmem = ResultsTable.filter_bucket_entry( self.raw_table[key][model][agents][connections][bucket]["maxmem"]) self.filtered_table[key].append(ResultsTable.filtered_table_entry( event_count=event_count.mean, event_count_std=event_count.std, agents=agents, agents_std=0, connections=connections, connections_std=0, cpu=cpu.mean, cpu_std=cpu.std, maxmem=maxmem.mean, maxmem_std=maxmem.std)) @staticmethod def filter_bucket_entry(entry): return ResultsTable.filtered_entry(mean=numpy.mean(entry), std=numpy.std(entry)) def get_entries_for_key(self, key): return self.filtered_table[key] def get_event_count_lists_for_key(self, key): key_data = self.get_entries_for_key(key) return ResultsTable.filtered_entry(mean=[row[0] for row in key_data], std=[row[1] for row in key_data]) def get_agents_lists_for_key(self, key): key_data = self.get_entries_for_key(key) return ResultsTable.filtered_entry(mean=[row[2] for row in key_data], std=[row[3] for row in key_data]) def get_connections_lists_for_key(self, key): key_data = self.get_entries_for_key(key) return ResultsTable.filtered_entry(mean=[row[4] for row in key_data], std=[row[5] for row in key_data]) def get_cpu_lists_for_key(self, key): key_data = self.get_entries_for_key(key) return ResultsTable.filtered_entry(mean=[row[6] for row in key_data], std=[row[7] for row in key_data]) def get_maxmem_lists_for_key(self, key): key_data = self.get_entries_for_key(key) return ResultsTable.filtered_entry(mean=[row[8] for row in key_data], std=[row[9] for row in key_data]) class ScoreTable(object): def __init__(self, kernels, tag): global fit_comparison_table fit_comparison_table[tag] = 0.0 self.tag = tag self.r2_values = list() self.kernels = kernels self.table = dict() self.total_count = 0 self.total_score_idx = len(kernels) for kernel in kernels: self.table[kernel] = array('I', [0] * (1 + len(kernels))) def get_table(self): return self.table def get_total_count(self): return self.total_count def add_1d_fit_score(self, fits): self.total_count += 1 f_list = list() for kernel in fits.keys(): slope = fits[kernel][0][0] intercept = fits[kernel][1] self.r2_values.append(float(fits[kernel][2])) x_min = float(fits[kernel][3][0]) x_max = float(fits[kernel][4][0]) f_list.append((scipy.integrate.quad(lambda x: slope * x + intercept, x_min, x_max)[0], kernel[0])) f_list.sort() for i in range(0, len(f_list)): self.table[f_list[i][1]][i] += 1 self.table[f_list[i][1]][self.total_score_idx] += len(f_list) - i global fit_comparison_table fit_comparison_table[self.tag] = numpy.mean(self.r2_values) def add_2d_fit_score(self, fits): self.total_count += 1 f_list = list() for kernel in fits.keys(): slope_x = fits[kernel][0][0] slope_y = fits[kernel][0][1] intercept = fits[kernel][1] self.r2_values.append(float(fits[kernel][2])) x_min = float(fits[kernel][3][0]) x_max = float(fits[kernel][4][0]) y_min = float(fits[kernel][3][1]) y_max = float(fits[kernel][4][1]) f_list.append((scipy.integrate.dblquad(lambda x, y: slope_x * x + slope_y * y + intercept, x_min, x_max, lambda x: y_min, lambda x: y_max)[0], kernel[0])) f_list.sort() for i in range(0, len(f_list)): self.table[f_list[i][1]][i] += 1 self.table[f_list[i][1]][self.total_score_idx] += len(f_list) - i global fit_comparison_table fit_comparison_table[self.tag] = numpy.mean(self.r2_values) def add_3d_fit_score(self, fits): self.total_count += 1 f_list = list() for kernel in fits.keys(): slope_x = fits[kernel][0][0] slope_y = fits[kernel][0][1] slope_z = fits[kernel][0][2] intercept = fits[kernel][1] self.r2_values.append(float(fits[kernel][2])) x_min = float(fits[kernel][3][0]) x_max = float(fits[kernel][4][0]) y_min = float(fits[kernel][3][1]) y_max = float(fits[kernel][4][1]) z_min = float(fits[kernel][3][2]) z_max = float(fits[kernel][4][2]) f_list.append((scipy.integrate.tplquad(lambda x, y, z: slope_x * x + slope_y * y + slope_z * z + intercept, x_min, x_max, lambda x: y_min, lambda x: y_max, lambda x, y: z_min, lambda x, y: z_max)[0], kernel[0])) f_list.sort() for i in range(0, len(f_list)): self.table[f_list[i][1]][i] += 1 self.table[f_list[i][1]][self.total_score_idx] += len(f_list) - i global fit_comparison_table fit_comparison_table[self.tag] = numpy.mean(self.r2_values) class MachineComparisonTable(object): machine_core_counts = {'m3.large': 2, 'm3.2xlarge': 8, 'm3.medium': 1, 'm3.xlarge': 4} def __init__(self, kernels): self.per_kernel_means = dict() self.per_kernel_data = dict() self.kernels = kernels self.table = dict() self.per_kernel_splines = dict() for kernel in self.kernels: self.per_kernel_means[kernel] = dict() self.per_kernel_data[kernel] = dict() self.box_props = dict(linewidth=0.5, color='DimGray', markeredgecolor='DimGray') def add_1d_fit_score(self, fits, machine): machine_entry = self.get_machine_entry(machine) for kernel in fits.keys(): slope = fits[kernel][0][0] intercept = fits[kernel][1] x_min = float(fits[kernel][3][0]) x_max = float(fits[kernel][4][0]) machine_entry[kernel[0]].append(scipy.integrate.quad(lambda x: slope * x + intercept, x_min, x_max)[0]) def add_2d_fit_score(self, fits, machine): machine_entry = self.get_machine_entry(machine) for kernel in fits.keys(): slope_x = fits[kernel][0][0] slope_y = fits[kernel][0][1] intercept = fits[kernel][1] x_min = float(fits[kernel][3][0]) x_max = float(fits[kernel][4][0]) y_min = float(fits[kernel][3][1]) y_max = float(fits[kernel][4][1]) machine_entry[kernel[0]].append(scipy.integrate.dblquad(lambda x, y: slope_x * x + slope_y * y + intercept, x_min, x_max, lambda x: y_min, lambda x: y_max)[0]) def add_3d_fit_score(self, fits, machine): machine_entry = self.get_machine_entry(machine) for kernel in fits.keys(): slope_x = fits[kernel][0][0] slope_y = fits[kernel][0][1] slope_z = fits[kernel][0][2] intercept = fits[kernel][1] x_min = float(fits[kernel][3][0]) x_max = float(fits[kernel][4][0]) y_min = float(fits[kernel][3][1]) y_max = float(fits[kernel][4][1]) z_min = float(fits[kernel][3][2]) z_max = float(fits[kernel][4][2]) machine_entry[kernel[0]].append(scipy.integrate.tplquad( lambda x, y, z: slope_x * x + slope_y * y + slope_z * z + intercept, x_min, x_max, lambda x: y_min, lambda x: y_max, lambda x, y: z_min, lambda x, y: z_max)[0]) def get_machine_entry(self, machine): if machine in self.table: return self.table[machine] else: self.table[machine] = dict() for kernel in self.kernels: self.table[machine][kernel] = list() return self.table[machine] def generate_per_kernel_means(self): for machine in self.table.keys(): for kernel in self.kernels: self.per_kernel_means[kernel][MachineComparisonTable.machine_core_counts[machine]] = \ numpy.mean(self.table[machine][kernel]) def generate_per_kernel_data(self): for machine in self.table.keys(): for kernel in self.kernels: self.per_kernel_data[kernel][MachineComparisonTable.machine_core_counts[machine]] = \ self.table[machine][kernel] def generate_mean_list(self, kernel): mean_list = list() for cores, value in self.per_kernel_means[kernel].iteritems(): mean_list.append((cores, value)) mean_list.sort() return mean_list def generate_1d_plot(self, dependent_caption, dependent_filename, independent_caption, independent_filename, kernel, key_label_filename): data_list = self.generate_mean_list(kernel) x_list = list() y_list = list() for entry in data_list: x_list.append(entry[0]) y_list.append(entry[1]) x_data = numpy.array(x_list) y_data = numpy.array(y_list) x_new = numpy.linspace(x_data.min(), x_data.max(), 300) y_new = scipy.interpolate.spline(x_data, y_data, x_new) self.per_kernel_splines[kernel] = dict() self.per_kernel_splines[kernel]['x_data'] = x_data self.per_kernel_splines[kernel]['y_data'] = y_data self.per_kernel_splines[kernel]['x_new'] = x_new self.per_kernel_splines[kernel]['y_new'] = y_new GenericArtifacts.set_figure_params() filename_base = "machine_comparison_{}_vs_{}_{}_{}".format(independent_filename, dependent_filename, str(kernel).lower(), key_label_filename) plot_filename = os.path.join(FLAGS.root_dir, "{}_plot.eps".format(filename_base)) print "\tGenerating {}".format(plot_filename) pylab.figure(1) pylab.clf() pylab.plot(x_data, y_data, linestyle='-', color='k') pylab.scatter(x_data, y_data, marker='s', color='k', label=kernel) pylab.autoscale() pylab.xlabel("Number of Cores") pylab.ylabel(dependent_caption) pylab.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=4) pylab.savefig(plot_filename, bbox_inches='tight', orientation='portrait') caption = "Plot of Machine Comparison of {} for {} vs {}".format(kernel, independent_caption, dependent_caption) tex_filename = os.path.join(FLAGS.root_dir, "{}_plot.tex".format(filename_base)) print "\tGenerating {}".format(tex_filename) tex_figure_path = os.path.join("figures", "auto", "{}_plot.eps".format(filename_base)) output_latex = r"""\begin{figure} \centering """ output_latex += "\\includegraphics{%s}\n" % tex_figure_path output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{fig:%s}\n" % filename_base output_latex += r"""\end{figure}""" with open(tex_filename, 'w') as f: f.write(output_latex) def generate_box_whisker_plot(self, dependent_caption, dependent_filename, independent_caption, independent_filename, kernel, key_label_filename): positions = self.per_kernel_data[kernel].keys() positions.sort() box_data = list() for position in positions: box_data.append(self.per_kernel_data[kernel][position]) x_data = self.per_kernel_splines[kernel]['x_data'] y_data = self.per_kernel_splines[kernel]['y_data'] GenericArtifacts.set_figure_params() filename_base = "machine_comparison_box_{}_vs_{}_{}_{}".format(independent_filename, dependent_filename, str(kernel).lower(), key_label_filename) plot_filename = os.path.join(FLAGS.root_dir, "{}_bwplot.eps".format(filename_base)) print "\tGenerating {}".format(plot_filename) pylab.figure(1) pylab.clf() flier_props = self.box_props.copy() flier_props['marker'] = 's' pylab.boxplot(x=box_data, positions=positions, boxprops=self.box_props, whiskerprops=self.box_props, capprops=self.box_props, flierprops=flier_props, medianprops=self.box_props, meanprops=self.box_props) pylab.plot(x_data, y_data, linestyle='-', color='k') pylab.scatter(x_data, y_data, marker='s', color='k', label=kernel) pylab.autoscale() pylab.xlabel("Number of Cores") pylab.ylabel(dependent_caption) pylab.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=4) pylab.savefig(plot_filename, bbox_inches='tight', orientation='portrait') caption = "Box \& Whisker Plot of Machine Comparison of {} for {} vs {}".format(kernel, independent_caption, dependent_caption) tex_filename = os.path.join(FLAGS.root_dir, "{}_bwplot.tex".format(filename_base)) print "\tGenerating {}".format(tex_filename) tex_figure_path = os.path.join("figures", "auto", "{}_bwplot.eps".format(filename_base)) output_latex = r"""\begin{figure} \centering """ output_latex += "\\includegraphics{%s}\n" % tex_figure_path output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{fig:%s}\n" % filename_base output_latex += r"""\end{figure}""" with open(tex_filename, 'w') as f: f.write(output_latex) def generate_table(self, dependent_caption, dependent_filename, independent_caption, independent_filename, kernel, key_label_caption, key_label_filename): filename_base = "machine_comparison_{}_vs_{}_{}_{}".format(independent_filename, dependent_filename, str(kernel).lower(), key_label_filename) tex_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base)) print "\tGenerating {}".format(tex_filename) output_latex = r"""\begin{table}[h] \centering """ output_latex += r"""\begin{tabular}{|c|c|} \hline """ output_latex += r"""Cores & Score \\ \hline """ for entry in self.generate_mean_list(kernel): cores = entry[0] score = entry[1] output_latex += "%d & %.4e \\\\ \n" % (cores, score) output_latex += r"""\hline \end{tabular} """ output_latex += "\\caption{Machine Comparison of %s for %s vs %s in %s}\n" % (kernel, independent_caption, dependent_caption, key_label_caption) output_latex += "\\label{tab:%s}\n" % filename_base output_latex += r"""\end{table}""" with open(tex_filename, 'w') as f: f.write(output_latex) def generate_multiline_plot(self, dependent_caption, dependent_filename, independent_caption, independent_filename, key_label_filename): filename_base = "machine_comparison_{}_vs_{}_{}".format(independent_filename, dependent_filename, key_label_filename) plot_filename = os.path.join(FLAGS.root_dir, "{}_plot.eps".format(filename_base)) print "\tGenerating {}".format(plot_filename) pylab.figure(1) pylab.clf() markers = ['v', '^', 's', 'D', 'x', '*', 'h'] markers_count = 0 for kernel in self.kernels: x_data = self.per_kernel_splines[kernel]['x_data'] y_data = self.per_kernel_splines[kernel]['y_data'] pylab.plot(x_data, y_data, linestyle='-', color='k') pylab.scatter(x_data, y_data, marker=markers[markers_count], color='k', label=kernel) markers_count += 1 pylab.autoscale() pylab.xlabel("Number of Cores") pylab.ylabel(dependent_caption) pylab.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=4) pylab.savefig(plot_filename, bbox_inches='tight', orientation='portrait') caption = "Multi-line Plot of Machine Comparison for {} vs {}".format(independent_caption, dependent_caption) tex_filename = os.path.join(FLAGS.root_dir, "{}_plot.tex".format(filename_base)) print "\tGenerating {}".format(tex_filename) tex_figure_path = os.path.join("figures", "auto", "{}_plot.eps".format(filename_base)) output_latex = r"""\begin{figure} \centering """ output_latex += "\\includegraphics{%s}\n" % tex_figure_path output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{fig:%s}\n" % filename_base output_latex += r"""\end{figure}""" with open(tex_filename, 'w') as f: f.write(output_latex) def generate_multiline_box_whisker_plot(self, dependent_caption, dependent_filename, independent_caption, independent_filename, key_label_filename): filename_base = "machine_comparison_box_{}_vs_{}_{}".format(independent_filename, dependent_filename, key_label_filename) plot_filename = os.path.join(FLAGS.root_dir, "{}_bwplot.eps".format(filename_base)) print "\tGenerating {}".format(plot_filename) pylab.figure(1) pylab.clf() markers = ['v', '^', 's', 'D', 'x', '*', 'h'] markers_count = 0 for kernel in self.kernels: x_data = self.per_kernel_splines[kernel]['x_data'] y_data = self.per_kernel_splines[kernel]['y_data'] positions = self.per_kernel_data[kernel].keys() positions.sort() box_data = list() for position in positions: box_data.append(self.per_kernel_data[kernel][position]) flier_props = self.box_props.copy() flier_props['marker'] = markers[markers_count] width = 0.1 * float(markers_count + 1) whisker_props = self.box_props.copy() whisker_props['linestyle'] = 'none' pylab.boxplot(x=box_data, positions=positions, widths=width, boxprops=self.box_props, whiskerprops=whisker_props, showcaps=False, showfliers=False, medianprops=self.box_props, meanprops=self.box_props) pylab.plot(x_data, y_data, linestyle='-', color='k') pylab.scatter(x_data, y_data, marker=markers[markers_count], color='k', label=kernel) markers_count += 1 pylab.autoscale() pylab.xlabel("Number of Cores") pylab.ylabel(dependent_caption) pylab.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=4) pylab.savefig(plot_filename, bbox_inches='tight', orientation='portrait') caption = "Multi-line Box \& Whisker Plot of Machine Comparison for {} vs {}".format(independent_caption, dependent_caption) tex_filename = os.path.join(FLAGS.root_dir, "{}_bwplot.tex".format(filename_base)) print "\tGenerating {}".format(tex_filename) tex_figure_path = os.path.join("figures", "auto", "{}_bwplot.eps".format(filename_base)) output_latex = r"""\begin{figure} \centering """ output_latex += "\\includegraphics{%s}\n" % tex_figure_path output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{fig:%s}\n" % filename_base output_latex += r"""\end{figure}""" with open(tex_filename, 'w') as f: f.write(output_latex) def generate_artifacts(self, key_label_caption, key_label_filename, independent_caption, independent_filename, dependent_caption, dependent_filename): self.generate_per_kernel_means() self.generate_per_kernel_data() for kernel in self.kernels: self.generate_1d_plot(dependent_caption, dependent_filename, independent_caption, independent_filename, kernel, key_label_filename) self.generate_box_whisker_plot(dependent_caption, dependent_filename, independent_caption, independent_filename, kernel, key_label_filename) self.generate_table(dependent_caption, dependent_filename, independent_caption, independent_filename, kernel, key_label_caption, key_label_filename) self.generate_multiline_plot(dependent_caption, dependent_filename, independent_caption, independent_filename, key_label_filename) self.generate_multiline_box_whisker_plot(dependent_caption, dependent_filename, independent_caption, independent_filename, key_label_filename) class GenericArtifacts: __metaclass__ = ABCMeta linear_regression = collections.namedtuple('LinearRegression', ['slope', 'intercept', 'r_squared', 'min', 'max']) def __init__(self, results_table, key_label_tuple): self.results_table = results_table self.key_label_tuple = key_label_tuple self.keys = self.results_table.get_keys() self.sub_key_label_tuple = None if len(self.key_label_tuple) is 1 else self.key_label_tuple[1:] self.sub_keys = None if not self.sub_key_label_tuple else set() self.kernels = set() self.cpu_ranges = dict() self.maxmem_ranges = dict() self.event_count_ranges = dict() self.agents_ranges = dict() self.connections_ranges = dict() self.event_count_vs_cpu_fits = dict() self.event_count_vs_maxmem_fits = dict() self.agents_vs_cpu_fits = dict() self.agents_vs_maxmem_fits = dict() self.connections_vs_cpu_fits = dict() self.connections_vs_maxmem_fits = dict() self.event_count_and_agents_vs_cpu_fits = dict() self.event_count_and_agents_vs_maxmem_fits = dict() self.event_count_and_connections_vs_cpu_fits = dict() self.event_count_and_connections_vs_maxmem_fits = dict() self.agents_and_connections_vs_cpu_fits = dict() self.agents_and_connections_vs_maxmem_fits = dict() self.event_count_and_agents_and_connections_vs_cpu_fits = dict() self.event_count_and_agents_and_connections_vs_maxmem_fits = dict() for key in self.keys: self.calculate_fits_for_key(key) self.kernels.add(key[0]) if self.sub_keys is not None: self.sub_keys.add(key[1:]) def calculate_fits_for_key(self, key): self.cpu_ranges[key] = self.results_table.get_cpu_lists_for_key(key).mean self.maxmem_ranges[key] = self.results_table.get_maxmem_lists_for_key(key).mean self.event_count_ranges[key] = self.results_table.get_event_count_lists_for_key(key).mean self.agents_ranges[key] = self.results_table.get_event_count_lists_for_key(key).mean self.connections_ranges[key] = self.results_table.get_event_count_lists_for_key(key).mean self.event_count_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_1d( self.event_count_ranges[key], self.cpu_ranges[key]) self.event_count_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_1d( self.event_count_ranges[key], self.maxmem_ranges[key]) self.agents_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_1d( self.agents_ranges[key], self.cpu_ranges[key]) self.agents_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_1d( self.agents_ranges[key], self.maxmem_ranges[key]) self.connections_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_1d( self.connections_ranges[key], self.cpu_ranges[key]) self.connections_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_1d( self.connections_ranges[key], self.maxmem_ranges[key]) self.event_count_and_agents_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_2d( self.event_count_ranges[key], self.agents_ranges[key], self.cpu_ranges[key]) self.event_count_and_agents_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_2d( self.event_count_ranges[key], self.agents_ranges[key], self.maxmem_ranges[key]) self.event_count_and_connections_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_2d( self.event_count_ranges[key], self.connections_ranges[key], self.cpu_ranges[key]) self.event_count_and_connections_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_2d( self.event_count_ranges[key], self.connections_ranges[key], self.maxmem_ranges[key]) self.agents_and_connections_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_2d( self.agents_ranges[key], self.connections_ranges[key], self.cpu_ranges[key]) self.agents_and_connections_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_2d( self.agents_ranges[key], self.connections_ranges[key], self.maxmem_ranges[key]) self.event_count_and_agents_and_connections_vs_cpu_fits[key] = \ GenericArtifacts.calculate_linear_regression_3d( self.event_count_ranges[key], self.agents_ranges[key], self.connections_ranges[key], self.cpu_ranges[key]) self.event_count_and_agents_and_connections_vs_maxmem_fits[key] = \ GenericArtifacts.calculate_linear_regression_3d( self.event_count_ranges[key], self.agents_ranges[key], self.connections_ranges[key], self.maxmem_ranges[key]) def filter_dict_for_sub_key(self, raw_dict, sub_key): return_dict = dict() for kernel in iter(self.kernels): return_dict[(kernel,)] = raw_dict[(kernel,) + sub_key] return return_dict @staticmethod def key_tuple_to_caption_string(key_tuple, capitialize=False): return_string = "" for entry in key_tuple: if not capitialize: return_string += "{} and ".format(entry) else: return_string += "{} and ".format(str(entry).capitalize()) return return_string[:-5] @staticmethod def key_tuple_to_filename_string(key_tuple, lowercase=False): return_string = "" for entry in key_tuple: if not lowercase: return_string += "{}_".format(entry) else: return_string += "{}_".format(str(entry).lower()) return return_string[:-1] @abstractmethod def generate_multiline_plots(self): pass @abstractmethod def generate_fit_tables(self): pass @abstractmethod def generate_score_tables(self): pass @abstractmethod def generate_machine_comparison_tables(self): pass @staticmethod def set_figure_params(): fig_width = 7.5 # width in inches fig_height = 3.75 # height in inches fig_size = [fig_width, fig_height] fig_params = {'backend': 'ps', 'axes.labelsize': 8, 'text.fontsize': 8, 'legend.fontsize': 8, 'xtick.labelsize': 6, 'ytick.labelsize': 6, 'text.usetex': True, 'figure.figsize': fig_size} pylab.rcParams.update(fig_params) @staticmethod def calculate_linear_regression_1d(x_list, f_list): results = sm.ols(formula="F ~ X", data=({'F': f_list, 'X': x_list})).fit() slope = list() slope.append(results.params['X']) min_value = list() min_value.append(min(x_list)) max_value = list() max_value.append(max(x_list)) intercept = results.params['Intercept'] r_squared = results.rsquared return GenericArtifacts.linear_regression(slope=slope, intercept=intercept, r_squared=r_squared, min=min_value, max=max_value) @staticmethod def calculate_linear_regression_2d(x_list, y_list, f_list): results = sm.ols(formula="F ~ X + Y", data=({'F': f_list, 'X': x_list, 'Y': y_list})).fit() slope = list() slope.append(results.params['X']) slope.append(results.params['Y']) min_value = list() min_value.append(min(x_list)) min_value.append(min(y_list)) max_value = list() max_value.append(max(x_list)) max_value.append(max(y_list)) intercept = results.params['Intercept'] r_squared = results.rsquared return GenericArtifacts.linear_regression(slope=slope, intercept=intercept, r_squared=r_squared, min=min_value, max=max_value) @staticmethod def calculate_linear_regression_3d(x_list, y_list, z_list, f_list): results = sm.ols(formula="F ~ X + Y + Z", data=({'F': f_list, 'X': x_list, 'Y': y_list, 'Z': z_list})).fit() slope = list() slope.append(results.params['X']) slope.append(results.params['Y']) slope.append(results.params['Z']) min_value = list() min_value.append(min(x_list)) min_value.append(min(y_list)) min_value.append(min(z_list)) max_value = list() max_value.append(max(x_list)) max_value.append(max(y_list)) max_value.append(max(z_list)) intercept = results.params['Intercept'] r_squared = results.rsquared return GenericArtifacts.linear_regression(slope=slope, intercept=intercept, r_squared=r_squared, min=min_value, max=max_value) @staticmethod def generate_1d_multiline_plot(fits, x_ranges, x_label, f_label, caption, filename_base): markers = ['v', '^', 's', 'D', 'x', '*', 'h'] GenericArtifacts.set_figure_params() filename_base = filename_base.replace('.', '_') plot_filename = os.path.join(FLAGS.root_dir, "{}.eps".format(filename_base)) print "\tGenerating {}".format(plot_filename) pylab.figure(1) pylab.clf() marker_count = 0 for kernel in fits.keys(): x_list = [0] x_max = max(x_ranges[kernel]) x_list.append(x_max / 2) x_list.append(x_max) f_fit = lambda x: x * fits[kernel][0][0] + fits[kernel][1] y_list = [f_fit(entry) for entry in x_list] pylab.plot(x_list, y_list, marker=markers[marker_count], linestyle='-', color='k', label=kernel[0]) marker_count += 1 pylab.autoscale() pylab.xlabel(x_label) pylab.ylabel(f_label) pylab.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=4, mode="expand", borderaxespad=0.) pylab.savefig(plot_filename, bbox_inches='tight', orientation='portrait') tex_filename = os.path.join(FLAGS.root_dir, "{}.tex".format(filename_base)) print "\tGenerating {}".format(tex_filename) tex_figure_path = os.path.join("figures", "auto", filename_base) output_latex = r"""\begin{figure} \centering """ output_latex += "\\includegraphics{%s}\n" % tex_figure_path output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{fig:%s}\n" % filename_base output_latex += r"""\end{figure}""" with open(tex_filename, 'w') as f: f.write(output_latex) @staticmethod def generate_1d_fit_table(key_labels, fits, caption, filename_base): filename_base = filename_base.replace('.', '_') table_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base)) print "\tGenerating {}".format(table_filename) output_latex = r"""\begin{table}[h] \centering """ output_latex += "\\begin{tabular}{|" for _ in key_labels: output_latex += "l|" output_latex += "|c|c|c|}\n" output_latex += "\\hline\n" for label in key_labels: output_latex += "{} & ".format(label) output_latex += "Slope & Intercept & $R^2$ \\\\\n\\hline\n" for key in fits.keys(): for entry in key: output_latex += "%s & " % entry output_latex += " %.4g & %.4g & %.4g \\\\\n" % (fits[key][0][0], fits[key][1], fits[key][2]) output_latex += r"""\hline \end{tabular} """ output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{tab:%s}\n" % filename_base output_latex += r"""\end{table}""" with open(table_filename, 'w') as f: f.write(output_latex) @staticmethod def generate_2d_fit_table(key_labels, fits, x_label, y_label, caption, filename_base): filename_base = filename_base.replace('.', '_') table_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base)) print "\tGenerating {}".format(table_filename) output_latex = r"""\begin{table}[h] \centering """ output_latex += "\\begin{tabular}{|" for _ in key_labels: output_latex += "l|" output_latex += "|c|c|c|c|}\n" output_latex += "\\hline\n" for label in key_labels: output_latex += "{} & ".format(label) output_latex += "{} Slope & {} Slope & Intercept & $R^2$ \\\\\n\\hline\n".format(x_label, y_label) for key in fits.keys(): for entry in key: output_latex += "%s & " % entry output_latex += "%.4g & %.4g & %.4g & %.4g \\\\\n" % (fits[key][0][0], fits[key][0][1], fits[key][1], fits[key][2]) output_latex += r"""\hline \end{tabular} """ output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{tab:%s}\n" % filename_base output_latex += r"""\end{table}""" with open(table_filename, 'w') as f: f.write(output_latex) @staticmethod def generate_3d_fit_table(key_labels, fits, x_label, y_label, z_label, caption, filename_base): filename_base = filename_base.replace('.', '_') table_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base)) print "\tGenerating {}".format(table_filename) output_latex = r"""\begin{table}[h] \centering """ output_latex += "\\begin{tabular}{|" for _ in key_labels: output_latex += "l|" output_latex += "|c|c|c|c|c|}\n" output_latex += "\\hline\n" for label in key_labels: output_latex += "{} & ".format(label) output_latex += "{} Slope & {} Slope & {} Slope & Intercept & $R^2$ \\\\\n\\hline\n".format(x_label, y_label, z_label) for key in fits.keys(): for entry in key: output_latex += "%s & " % entry output_latex += "%.4g & %.4g & %.4g & %.4g & %.4g \\\\\n" % (fits[key][0][0], fits[key][0][1], fits[key][0][2], fits[key][1], fits[key][2]) output_latex += r"""\hline \end{tabular} """ output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{tab:%s}\n" % filename_base output_latex += r"""\end{table}""" with open(table_filename, 'w') as f: f.write(output_latex) @staticmethod def generate_score_table(score_table, caption, filename_base): ordinal_ranks = ["1st", "2nd", "3rd", "4th", "5th", "6th", "7th", "8th", "9th", "10th"] filename_base = filename_base.replace('.', '_') table_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base)) print "\tGenerating {}".format(table_filename) output_latex = r"""\begin{table}[h] \centering """ output_latex += "\\begin{tabular}{|l|" for _ in score_table.get_table().keys(): output_latex += "|c" output_latex += "||c" output_latex += "|}\\hline\n" output_latex += "Kernel " for i in range(0, len(score_table.get_table().keys())): output_latex += "& %s " % ordinal_ranks[i] output_latex += "& Total Score " output_latex += "\\\\\n" output_latex += r"""\hline """ total_count = score_table.get_total_count() assert(total_count > 0) for kernel in score_table.get_table().keys(): output_latex += "%s " % kernel for entry in score_table.get_table()[kernel]: if entry > 0: output_latex += "& %d " % entry else: output_latex += "& \\textemdash " output_latex += "\\\\\n" output_latex += r"""\hline \end{tabular} """ output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{tab:%s}\n" % filename_base output_latex += r"""\end{table}""" with open(table_filename, 'w') as f: f.write(output_latex) @staticmethod def generate_score_percentage_table(score_table, caption, filename_base): ordinal_ranks = ["1st", "2nd", "3rd", "4th", "5th", "6th", "7th", "8th", "9th", "10th"] filename_base = filename_base.replace('.', '_') table_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base)) print "\tGenerating {}".format(table_filename) output_latex = r"""\begin{table}[h] \centering """ output_latex += "\\begin{tabular}{|l|" for _ in score_table.get_table().keys(): output_latex += "|c" output_latex += "|}\\hline\n" output_latex += "Kernel " for i in range(0, len(score_table.get_table().keys())): output_latex += "& %s " % ordinal_ranks[i] output_latex += "\\\\\n" output_latex += r"""\hline """ total_count = float(score_table.get_total_count()) assert(total_count > 0.0) for kernel in score_table.get_table().keys(): output_latex += "%s " % kernel for i in range(0, len(score_table.get_table().keys())): entry = score_table.get_table()[kernel][i] if entry > 0: output_latex += "& %5.4f " % (float(entry) / total_count) else: output_latex += "& \\textemdash " output_latex += "\\\\\n" output_latex += r"""\hline \end{tabular} """ output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{tab:%s}\n" % filename_base output_latex += r"""\end{table}""" with open(table_filename, 'w') as f: f.write(output_latex) class KernelArtifacts(GenericArtifacts): def __init__(self, results_table): super(KernelArtifacts, self).__init__(results_table, ("Kernel",)) def generate_multiline_plots(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) GenericArtifacts.generate_1d_multiline_plot(self.event_count_vs_cpu_fits, self.event_count_ranges, "Event Count", "CPU Time (mS)", "Trend lines for Event Count vs CPU Time for per {} fits".format( key_label_caption), "event_count_vs_cpu_per_{}_multiline_plot".format( key_label_filename)) GenericArtifacts.generate_1d_multiline_plot(self.event_count_vs_maxmem_fits, self.event_count_ranges, "Event Count", "Max Memory (kB)", "Trend lines for Event Count vs Max Memory for per {} fits".format( key_label_caption), "event_count_vs_maxmem_per_{}_multiline_plot".format( key_label_filename)) GenericArtifacts.generate_1d_multiline_plot(self.agents_vs_cpu_fits, self.agents_ranges, "Agents", "CPU Time (mS)", "Trend lines for Agents vs CPU Time for per {} fits".format( key_label_caption), "agents_vs_cpu_per_{}_multiline_plot".format(key_label_filename)) GenericArtifacts.generate_1d_multiline_plot(self.agents_vs_maxmem_fits, self.agents_ranges, "Agents", "Max Memory (kB)", "Trend lines for Agents vs Max Memory for per {} fits".format( key_label_caption), "agents_vs_maxmem_per_{}_multiline_plot".format(key_label_filename)) GenericArtifacts.generate_1d_multiline_plot(self.connections_vs_cpu_fits, self.connections_ranges, "Connections", "CPU Time (mS)", "Trend lines for Connections vs CPU Time for per {} fits".format( key_label_caption), "connections_vs_cpu_per_{}_multiline_plot".format( key_label_filename)) GenericArtifacts.generate_1d_multiline_plot(self.connections_vs_maxmem_fits, self.connections_ranges, "Connections", "Max Memory (kB)", "Trend lines for Connections vs Max Memory for per {} fits".format( key_label_caption), "connections_vs_maxmem_per_{}_multiline_plot".format( key_label_filename)) def generate_fit_tables(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) GenericArtifacts.generate_1d_fit_table(self.key_label_tuple, self.event_count_vs_cpu_fits, "Event Count vs CPU Time (mS) for per {} fits".format(key_label_caption), "event_count_vs_cpu_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_1d_fit_table(self.key_label_tuple, self.event_count_vs_maxmem_fits, "Event Count vs Max Memory (kB) for per {} fits".format( key_label_caption), "event_count_vs_maxmem_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_1d_fit_table(self.key_label_tuple, self.agents_vs_cpu_fits, "Agents vs CPU Time (mS) for per {} fits".format(key_label_caption), "agents_vs_cpu_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_1d_fit_table(self.key_label_tuple, self.agents_vs_maxmem_fits, "Agents vs Max Memory (kB) for per {} fits".format(key_label_caption), "agents_vs_maxmem_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_1d_fit_table(self.key_label_tuple, self.connections_vs_cpu_fits, "Connections vs CPU Time (mS) for per {} fits".format(key_label_caption), "connections_vs_cpu_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_1d_fit_table(self.key_label_tuple, self.connections_vs_maxmem_fits, "Connections vs Max Memory (kB) for per {} fits".format( key_label_caption), "connections_vs_maxmem_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_2d_fit_table(self.key_label_tuple, self.event_count_and_agents_vs_cpu_fits, "Event Count", "Agents", "Event Count and Agents vs CPU Time (mS) for per {} fits".format( key_label_caption), "event_count_and_agents_vs_cpu_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_2d_fit_table(self.key_label_tuple, self.event_count_and_agents_vs_maxmem_fits, "Event Count", "Agents", "Event Count and Agents vs Max Memory (kB) for per {} fits".format( key_label_caption), "event_count_and_agents_vs_maxmem_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_2d_fit_table(self.key_label_tuple, self.event_count_and_connections_vs_cpu_fits, "Event Count", "Connections", "Event Count and Connections vs CPU Time (mS) for per {} fits".format( key_label_caption), "event_count_and_connections_vs_cpu_per_{}_fit".format( key_label_filename)) GenericArtifacts.generate_2d_fit_table(self.key_label_tuple, self.event_count_and_connections_vs_maxmem_fits, "Event Count", "Connections", "Event Count and Connections vs Max Memory (kB) for per {} fits".format( key_label_caption), "event_count_and_connections_vs_maxmem_per_{}_fit".format( key_label_filename)) GenericArtifacts.generate_2d_fit_table(self.key_label_tuple, self.agents_and_connections_vs_cpu_fits, "Agents", "Connections", "Agents and Connections vs CPU Time (mS) for per {} fits".format( key_label_caption), "agents_and_connections_vs_cpu_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_2d_fit_table(self.key_label_tuple, self.agents_and_connections_vs_maxmem_fits, "Agents", "Connections", "Agents and Connections vs Max Memory (kB) for per {} fits".format( key_label_caption), "agents_and_connections_vs_maxmem_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_3d_fit_table(self.key_label_tuple, self.event_count_and_agents_and_connections_vs_cpu_fits, "Event Count", "Agents", "Connections", "Event Count and Agents and Connections vs CPU Time (mS) for per {} " "fits".format(key_label_caption), "event_count_and_agents_and_connections_vs_cpu_per_{}_fit".format( key_label_filename)) GenericArtifacts.generate_3d_fit_table(self.key_label_tuple, self.event_count_and_agents_and_connections_vs_maxmem_fits, "Event Count", "Agents", "Connections", "Event Count and Agents and Connections vs Max Memory (kB) for per " "{} fits".format(key_label_caption), "event_count_and_agents_and_connections_vs_maxmem_per_{}_fit".format( key_label_filename)) def generate_score_tables(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) score_tables = dict() selection = (("Kernel", ), ("Agents", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_1d_fit_score(self.agents_vs_cpu_fits) selection = (("Kernel", ), ("Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_1d_fit_score(self.connections_vs_cpu_fits) selection = (("Kernel", ), ("Events", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_1d_fit_score(self.event_count_vs_cpu_fits) selection = (("Kernel", ), ("Agents", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_2d_fit_score(self.agents_and_connections_vs_cpu_fits) selection = (("Kernel", ), ("Events", "Agents", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_2d_fit_score(self.event_count_and_agents_vs_cpu_fits) selection = (("Kernel", ), ("Events", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_2d_fit_score(self.event_count_and_connections_vs_cpu_fits) selection = (("Kernel", ), ("Events", "Agents", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_3d_fit_score(self.event_count_and_agents_and_connections_vs_cpu_fits) selection = (("Kernel", ), ("Agents", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_1d_fit_score(self.agents_vs_maxmem_fits) selection = (("Kernel", ), ("Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_1d_fit_score(self.connections_vs_maxmem_fits) selection = (("Kernel", ), ("Events", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_1d_fit_score(self.event_count_vs_maxmem_fits) selection = (("Kernel", ), ("Agents", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_2d_fit_score(self.agents_and_connections_vs_maxmem_fits) selection = (("Kernel", ), ("Events", "Agents", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_2d_fit_score(self.event_count_and_agents_vs_maxmem_fits) selection = (("Kernel", ), ("Events", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_2d_fit_score(self.event_count_and_connections_vs_maxmem_fits) selection = (("Kernel", ), ("Events", "Agents", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_3d_fit_score(self.event_count_and_agents_and_connections_vs_maxmem_fits) for selection, table in score_tables.iteritems(): independent_vars = selection[1] independent_caption = "" independent_filename = "" for var in independent_vars: independent_caption += "{} and ".format(var) independent_filename += "{}_".format(str(var).lower()) independent_caption = independent_caption[:-5] independent_filename = independent_filename[:-1] dependent_caption = selection[2] if dependent_caption == "CPU": dependent_filename = "cpu" else: dependent_filename = "maxmem" GenericArtifacts.generate_score_table(table, "Scores based on {} vs {} for {} fits".format(independent_caption, dependent_caption, key_label_caption), "{}_vs_{}_per_{}_fits_scores".format(independent_filename, dependent_filename, key_label_filename)) GenericArtifacts.generate_score_percentage_table(table, "Score percentages based on {} vs {} for {} fits".format( independent_caption, dependent_caption, key_label_caption), "{}_vs_{}_per_{}_fits_score_percentage".format( independent_filename, dependent_filename, key_label_filename)) def generate_machine_comparison_tables(self): pass class KernelMachineArtifacts(GenericArtifacts): def __init__(self, results_table): super(KernelMachineArtifacts, self).__init__(results_table, ("Kernel", "Machine")) def generate_multiline_plots(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) for sub_key in iter(self.sub_keys): sub_key_caption = GenericArtifacts.key_tuple_to_caption_string(sub_key) sub_key_filename = GenericArtifacts.key_tuple_to_filename_string(sub_key) event_count_ranges = self.filter_dict_for_sub_key(self.event_count_ranges, sub_key) agents_ranges = self.filter_dict_for_sub_key(self.agents_ranges, sub_key) connections_ranges = self.filter_dict_for_sub_key(self.connections_ranges, sub_key) event_count_vs_cpu_fits = self.filter_dict_for_sub_key(self.event_count_vs_cpu_fits, sub_key) event_count_vs_maxmem_fits = self.filter_dict_for_sub_key(self.event_count_vs_maxmem_fits, sub_key) agents_vs_cpu_fits = self.filter_dict_for_sub_key(self.agents_vs_cpu_fits, sub_key) agents_vs_maxmem_fits = self.filter_dict_for_sub_key(self.agents_vs_maxmem_fits, sub_key) connections_vs_cpu_fits = self.filter_dict_for_sub_key(self.connections_vs_cpu_fits, sub_key) connections_vs_maxmem_fits = self.filter_dict_for_sub_key(self.connections_vs_maxmem_fits, sub_key) GenericArtifacts.generate_1d_multiline_plot(event_count_vs_cpu_fits, event_count_ranges, "Event Count", "CPU Time (mS)", "Trend lines for Event Count vs CPU Time for per {} fits for {}" .format(key_label_caption, sub_key_caption), "event_count_vs_cpu_per_{}_multiline_plot_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_multiline_plot(event_count_vs_maxmem_fits, event_count_ranges, "Event Count", "Max Memory (kB)", "Trend lines for Event Count vs Max Memory for per {} fits " "for {}".format(key_label_caption, sub_key_caption), "event_count_vs_maxmem_per_{}_multiline_plot_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_multiline_plot(agents_vs_cpu_fits, agents_ranges, "Agents", "CPU Time (mS)", "Trend lines for Agents vs CPU Time for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "agents_vs_cpu_per_{}_multiline_plot_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_multiline_plot(agents_vs_maxmem_fits, agents_ranges, "Agents", "Max Memory (kB)", "Trend lines for Agents vs Max Memory for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "agents_vs_maxmem_per_{}_multiline_plot_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_multiline_plot(connections_vs_cpu_fits, connections_ranges, "Connections", "CPU Time (mS)", "Trend lines for Connections vs CPU Time for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "connections_vs_cpu_per_{}_multiline_plot_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_multiline_plot(connections_vs_maxmem_fits, connections_ranges, "Connections", "Max Memory (kB)", "Trend lines for Connections vs Max Memory for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "connections_vs_maxmem_per_{}_multiline_plot_for_{}".format( key_label_filename, sub_key_filename)) def generate_fit_tables(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) for sub_key in iter(self.sub_keys): sub_key_caption = GenericArtifacts.key_tuple_to_caption_string(sub_key) sub_key_filename = GenericArtifacts.key_tuple_to_filename_string(sub_key) event_count_vs_cpu_fits = self.filter_dict_for_sub_key(self.event_count_vs_cpu_fits, sub_key) event_count_vs_maxmem_fits = self.filter_dict_for_sub_key(self.event_count_vs_maxmem_fits, sub_key) agents_vs_cpu_fits = self.filter_dict_for_sub_key(self.agents_vs_cpu_fits, sub_key) agents_vs_maxmem_fits = self.filter_dict_for_sub_key(self.agents_vs_maxmem_fits, sub_key) connections_vs_cpu_fits = self.filter_dict_for_sub_key(self.connections_vs_cpu_fits, sub_key) connections_vs_maxmem_fits = self.filter_dict_for_sub_key(self.connections_vs_maxmem_fits, sub_key) event_count_and_agents_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_vs_cpu_fits, sub_key) event_count_and_agents_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_vs_maxmem_fits, sub_key) event_count_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_cpu_fits, sub_key) event_count_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_maxmem_fits, sub_key) agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.agents_and_connections_vs_cpu_fits, sub_key) agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.agents_and_connections_vs_maxmem_fits, sub_key) event_count_and_agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_cpu_fits, sub_key) event_count_and_agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_maxmem_fits, sub_key) GenericArtifacts.generate_1d_fit_table(("Kernel",), event_count_vs_cpu_fits, "Event Count vs CPU Time (mS) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_vs_cpu_per_{}_fit_for_" "{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), event_count_vs_maxmem_fits, "Event Count vs Max Memory (kB) for per {} fits for {}".format( key_label_caption, sub_key_caption), "event_count_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), agents_vs_cpu_fits, "Agents vs CPU Time (mS) for per {} fits for {}".format( key_label_caption, sub_key_caption), "agents_vs_cpu_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), agents_vs_maxmem_fits, "Agents vs Max Memory (kB) for per {} fits for {}".format( key_label_caption, sub_key_caption), "agents_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), connections_vs_cpu_fits, "Connections vs CPU Time (mS) for per {} fits for {}".format( key_label_caption, sub_key_caption), "connections_vs_cpu_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), connections_vs_maxmem_fits, "Connections vs Max Memory (kB) for per {} fits for {}".format( key_label_caption, sub_key_caption), "connections_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), event_count_and_agents_vs_cpu_fits, "Event Count", "Agents", "Event Count and Agents vs CPU Time (mS) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_and_agents_vs_cpu_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), event_count_and_agents_vs_maxmem_fits, "Event Count", "Agents", "Event Count and Agents vs Max Memory (kB) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_and_agents_vs_maxmem_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), event_count_and_connections_vs_cpu_fits, "Event Count", "Connections", "Event Count and Connections vs CPU Time (mS) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_and_connections_vs_cpu_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), event_count_and_connections_vs_maxmem_fits, "Event Count", "Connections", "Event Count and Connections vs Max Memory (kB) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_and_connections_vs_maxmem_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), agents_and_connections_vs_cpu_fits, "Agents", "Connections", "Agents and Connections vs CPU Time (mS) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "agents_and_connections_vs_cpu_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), agents_and_connections_vs_maxmem_fits, "Agents", "Connections", "Agents and Connections vs Max Memory (kB) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "agents_and_connections_vs_maxmem_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_3d_fit_table(("Kernel",), event_count_and_agents_and_connections_vs_cpu_fits, "Event Count", "Agents", "Connections", "Event Count and Agents and Connections vs CPU Time (mS) for per {} " "fits".format(key_label_caption, sub_key_caption), "event_count_and_agents_and_connections_vs_cpu_per_{}_fit_for_" "{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_3d_fit_table(("Kernel",), event_count_and_agents_and_connections_vs_maxmem_fits, "Event Count", "Agents", "Connections", "Event Count and Agents and Connections vs Max Memory (kB) for per " "{} fits for {}".format(key_label_caption, sub_key_caption), "event_count_and_agents_and_connections_vs_maxmem_per_{}_fit_for_" "{}".format(key_label_filename, sub_key_filename)) def generate_score_tables(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) score_tables = dict() selection = (("Kernel", "Machine", ), ("Agents", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Events", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Agents", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Events", "Agents", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Events", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Events", "Agents", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Agents", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Events", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Agents", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Events", "Agents", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Events", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Events", "Agents", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) for sub_key in iter(self.sub_keys): event_count_vs_cpu_fits = self.filter_dict_for_sub_key(self.event_count_vs_cpu_fits, sub_key) event_count_vs_maxmem_fits = self.filter_dict_for_sub_key(self.event_count_vs_maxmem_fits, sub_key) agents_vs_cpu_fits = self.filter_dict_for_sub_key(self.agents_vs_cpu_fits, sub_key) agents_vs_maxmem_fits = self.filter_dict_for_sub_key(self.agents_vs_maxmem_fits, sub_key) connections_vs_cpu_fits = self.filter_dict_for_sub_key(self.connections_vs_cpu_fits, sub_key) connections_vs_maxmem_fits = self.filter_dict_for_sub_key(self.connections_vs_maxmem_fits, sub_key) event_count_and_agents_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_vs_cpu_fits, sub_key) event_count_and_agents_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_vs_maxmem_fits, sub_key) event_count_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_cpu_fits, sub_key) event_count_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_maxmem_fits, sub_key) agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.agents_and_connections_vs_cpu_fits, sub_key) agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.agents_and_connections_vs_maxmem_fits, sub_key) event_count_and_agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_cpu_fits, sub_key) event_count_and_agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_maxmem_fits, sub_key) score_tables[(("Kernel", "Machine", ), ("Agents", ), "CPU")].add_1d_fit_score(agents_vs_cpu_fits) score_tables[(("Kernel", "Machine", ), ("Agents", ), "Max Memory")].add_1d_fit_score(agents_vs_maxmem_fits) score_tables[(("Kernel", "Machine", ), ("Connections", ), "CPU")].add_1d_fit_score(connections_vs_cpu_fits) score_tables[(("Kernel", "Machine", ), ("Connections", ), "Max Memory")].add_1d_fit_score( connections_vs_maxmem_fits) score_tables[(("Kernel", "Machine", ), ("Events", ), "CPU")].add_1d_fit_score(event_count_vs_cpu_fits) score_tables[(("Kernel", "Machine", ), ("Events", ), "Max Memory")].add_1d_fit_score( event_count_vs_maxmem_fits) score_tables[(("Kernel", "Machine", ), ("Agents", "Connections",), "CPU")].add_2d_fit_score( agents_and_connections_vs_cpu_fits) score_tables[(("Kernel", "Machine", ), ("Agents", "Connections",), "Max Memory")].add_2d_fit_score( agents_and_connections_vs_maxmem_fits) score_tables[(("Kernel", "Machine", ), ("Events", "Agents",), "CPU")].add_2d_fit_score( event_count_and_agents_vs_cpu_fits) score_tables[(("Kernel", "Machine", ), ("Events", "Agents",), "Max Memory")].add_2d_fit_score( event_count_and_agents_vs_maxmem_fits) score_tables[(("Kernel", "Machine", ), ("Events", "Connections",), "CPU")].add_2d_fit_score( event_count_and_connections_vs_cpu_fits) score_tables[(("Kernel", "Machine", ), ("Events", "Connections",), "Max Memory")].add_2d_fit_score( event_count_and_connections_vs_maxmem_fits) score_tables[(("Kernel", "Machine", ), ("Events", "Agents", "Connections",), "CPU")].add_2d_fit_score( event_count_and_agents_and_connections_vs_cpu_fits) score_tables[(("Kernel", "Machine", ), ("Events", "Agents", "Connections",), "Max Memory")].\ add_3d_fit_score(event_count_and_agents_and_connections_vs_maxmem_fits) for selection, table in score_tables.iteritems(): independent_vars = selection[1] independent_caption = "" independent_filename = "" for var in independent_vars: independent_caption += "{} and ".format(var) independent_filename += "{}_".format(str(var).lower()) independent_caption = independent_caption[:-5] independent_filename = independent_filename[:-1] dependent_caption = selection[2] if dependent_caption == "CPU": dependent_filename = "cpu" else: dependent_filename = "maxmem" GenericArtifacts.generate_score_table(table, "Scores based on {} vs {} for {} fits".format(independent_caption, dependent_caption, key_label_caption), "{}_vs_{}_per_{}_fits_scores".format(independent_filename, dependent_filename, key_label_filename)) GenericArtifacts.generate_score_percentage_table(table, "Score percentages based on {} vs {} for {} fits".format( independent_caption, dependent_caption, key_label_caption), "{}_vs_{}_per_{}_fits_score_percentage".format( independent_filename, dependent_filename, key_label_filename)) def generate_machine_comparison_tables(self): pass class KernelMachineTypeArtifacts(GenericArtifacts): def __init__(self, results_table): super(KernelMachineTypeArtifacts, self).__init__(results_table, ("Kernel", "Machine", "Type")) def generate_multiline_plots(self): pass def generate_fit_tables(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) for sub_key in iter(self.sub_keys): sub_key_caption = GenericArtifacts.key_tuple_to_caption_string(sub_key) sub_key_filename = GenericArtifacts.key_tuple_to_filename_string(sub_key) event_count_vs_cpu_fits = self.filter_dict_for_sub_key(self.event_count_vs_cpu_fits, sub_key) event_count_vs_maxmem_fits = self.filter_dict_for_sub_key(self.event_count_vs_maxmem_fits, sub_key) agents_vs_cpu_fits = self.filter_dict_for_sub_key(self.agents_vs_cpu_fits, sub_key) agents_vs_maxmem_fits = self.filter_dict_for_sub_key(self.agents_vs_maxmem_fits, sub_key) connections_vs_cpu_fits = self.filter_dict_for_sub_key(self.connections_vs_cpu_fits, sub_key) connections_vs_maxmem_fits = self.filter_dict_for_sub_key(self.connections_vs_maxmem_fits, sub_key) event_count_and_agents_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_vs_cpu_fits, sub_key) event_count_and_agents_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_vs_maxmem_fits, sub_key) event_count_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_cpu_fits, sub_key) event_count_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_maxmem_fits, sub_key) agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.agents_and_connections_vs_cpu_fits, sub_key) agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.agents_and_connections_vs_maxmem_fits, sub_key) event_count_and_agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_cpu_fits, sub_key) event_count_and_agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_maxmem_fits, sub_key) GenericArtifacts.generate_1d_fit_table(("Kernel",), event_count_vs_cpu_fits, "Event Count vs CPU Time (mS) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_vs_cpu_per_{}_fit_for_" "{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), event_count_vs_maxmem_fits, "Event Count vs Max Memory (kB) for per {} fits for {}".format( key_label_caption, sub_key_caption), "event_count_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), agents_vs_cpu_fits, "Agents vs CPU Time (mS) for per {} fits for {}".format( key_label_caption, sub_key_caption), "agents_vs_cpu_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), agents_vs_maxmem_fits, "Agents vs Max Memory (kB) for per {} fits for {}".format( key_label_caption, sub_key_caption), "agents_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), connections_vs_cpu_fits, "Connections vs CPU Time (mS) for per {} fits for {}".format( key_label_caption, sub_key_caption), "connections_vs_cpu_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), connections_vs_maxmem_fits, "Connections vs Max Memory (kB) for per {} fits for {}".format( key_label_caption, sub_key_caption), "connections_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), event_count_and_agents_vs_cpu_fits, "Event Count", "Agents", "Event Count and Agents vs CPU Time (mS) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_and_agents_vs_cpu_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), event_count_and_agents_vs_maxmem_fits, "Event Count", "Agents", "Event Count and Agents vs Max Memory (kB) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_and_agents_vs_maxmem_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), event_count_and_connections_vs_cpu_fits, "Event Count", "Connections", "Event Count and Connections vs CPU Time (mS) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_and_connections_vs_cpu_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), event_count_and_connections_vs_maxmem_fits, "Event Count", "Connections", "Event Count and Connections vs Max Memory (kB) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_and_connections_vs_maxmem_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), agents_and_connections_vs_cpu_fits, "Agents", "Connections", "Agents and Connections vs CPU Time (mS) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "agents_and_connections_vs_cpu_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), agents_and_connections_vs_maxmem_fits, "Agents", "Connections", "Agents and Connections vs Max Memory (kB) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "agents_and_connections_vs_maxmem_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_3d_fit_table(("Kernel",), event_count_and_agents_and_connections_vs_cpu_fits, "Event Count", "Agents", "Connections", "Event Count and Agents and Connections vs CPU Time (mS) for per {} " "fits".format(key_label_caption, sub_key_caption), "event_count_and_agents_and_connections_vs_cpu_per_{}_fit_for_" "{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_3d_fit_table(("Kernel",), event_count_and_agents_and_connections_vs_maxmem_fits, "Event Count", "Agents", "Connections", "Event Count and Agents and Connections vs Max Memory (kB) for per " "{} fits for {}".format(key_label_caption, sub_key_caption), "event_count_and_agents_and_connections_vs_maxmem_per_{}_fit_for_" "{}".format(key_label_filename, sub_key_filename)) def generate_score_tables(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) score_tables = dict() selection = (("Kernel", "Machine", "Type", ), ("Agents", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Events", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Agents", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Events", "Agents", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Events", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Events", "Agents", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Agents", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Events", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Agents", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Events", "Agents", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Events", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Events", "Agents", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) for sub_key in iter(self.sub_keys): event_count_vs_cpu_fits = self.filter_dict_for_sub_key(self.event_count_vs_cpu_fits, sub_key) event_count_vs_maxmem_fits = self.filter_dict_for_sub_key(self.event_count_vs_maxmem_fits, sub_key) agents_vs_cpu_fits = self.filter_dict_for_sub_key(self.agents_vs_cpu_fits, sub_key) agents_vs_maxmem_fits = self.filter_dict_for_sub_key(self.agents_vs_maxmem_fits, sub_key) connections_vs_cpu_fits = self.filter_dict_for_sub_key(self.connections_vs_cpu_fits, sub_key) connections_vs_maxmem_fits = self.filter_dict_for_sub_key(self.connections_vs_maxmem_fits, sub_key) event_count_and_agents_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_vs_cpu_fits, sub_key) event_count_and_agents_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_vs_maxmem_fits, sub_key) event_count_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_cpu_fits, sub_key) event_count_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_maxmem_fits, sub_key) agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.agents_and_connections_vs_cpu_fits, sub_key) agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.agents_and_connections_vs_maxmem_fits, sub_key) event_count_and_agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_cpu_fits, sub_key) event_count_and_agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_maxmem_fits, sub_key) score_tables[(("Kernel", "Machine", "Type", ), ("Agents", ), "CPU")].add_1d_fit_score(agents_vs_cpu_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Agents", ), "Max Memory")].add_1d_fit_score( agents_vs_maxmem_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Connections", ), "CPU")].add_1d_fit_score( connections_vs_cpu_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Connections", ), "Max Memory")].add_1d_fit_score( connections_vs_maxmem_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Events", ), "CPU")].add_1d_fit_score( event_count_vs_cpu_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Events", ), "Max Memory")].add_1d_fit_score( event_count_vs_maxmem_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Agents", "Connections",), "CPU")].add_2d_fit_score( agents_and_connections_vs_cpu_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Agents", "Connections",), "Max Memory")].add_2d_fit_score( agents_and_connections_vs_maxmem_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Agents",), "CPU")].add_2d_fit_score( event_count_and_agents_vs_cpu_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Agents",), "Max Memory")].add_2d_fit_score( event_count_and_agents_vs_maxmem_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Connections",), "CPU")].add_2d_fit_score( event_count_and_connections_vs_cpu_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Connections",), "Max Memory")].add_2d_fit_score( event_count_and_connections_vs_maxmem_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Agents", "Connections",), "CPU")].\ add_2d_fit_score(event_count_and_agents_and_connections_vs_cpu_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Agents", "Connections",), "Max Memory")].\ add_3d_fit_score(event_count_and_agents_and_connections_vs_maxmem_fits) for selection, table in score_tables.iteritems(): independent_vars = selection[1] independent_caption = "" independent_filename = "" for var in independent_vars: independent_caption += "{} and ".format(var) independent_filename += "{}_".format(str(var).lower()) independent_caption = independent_caption[:-5] independent_filename = independent_filename[:-1] dependent_caption = selection[2] if dependent_caption == "CPU": dependent_filename = "cpu" else: dependent_filename = "maxmem" GenericArtifacts.generate_score_table(table, "Scores based on {} vs {} for {} fits".format(independent_caption, dependent_caption, key_label_caption), "{}_vs_{}_per_{}_fits_scores".format(independent_filename, dependent_filename, key_label_filename)) GenericArtifacts.generate_score_percentage_table(table, "Score percentages based on {} vs {} for {} fits".format( independent_caption, dependent_caption, key_label_caption), "{}_vs_{}_per_{}_fits_score_percentage".format( independent_filename, dependent_filename, key_label_filename)) def generate_machine_comparison_tables(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) machine_comparison_tables = dict() machine_comparison_tables[(("Events", "Connections", ), "CPU")] = MachineComparisonTable(self.kernels) machine_comparison_tables[(("Events", "Agents", "Connections", ), "Max Memory")] = MachineComparisonTable( self.kernels) for sub_key in self.sub_keys: machine = sub_key[0] event_count_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_cpu_fits, sub_key) event_count_and_agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_maxmem_fits, sub_key) machine_comparison_tables[(("Events", "Connections",), "CPU")].add_2d_fit_score( event_count_and_connections_vs_cpu_fits, machine) machine_comparison_tables[(("Events", "Agents", "Connections",), "Max Memory")].add_3d_fit_score( event_count_and_agents_and_connections_vs_maxmem_fits, machine) selection = (("Events", "Connections",), "CPU") independent_caption = "Events \& Connections" independent_filename = "events_connections" dependent_caption = "CPU" dependent_filename = "cpu" print "Printing results for {}".format(selection) machine_comparison_tables[selection].generate_artifacts(key_label_caption, key_label_filename, independent_caption, independent_filename, dependent_caption, dependent_filename) selection = (("Events", "Agents", "Connections",), "Max Memory") independent_caption = "Events \& Agents \& Connections" independent_filename = "events_agents_connections" dependent_caption = "Maximum Memory" dependent_filename = "maxmem" print "Printing results for {}".format(selection) machine_comparison_tables[selection].generate_artifacts(key_label_caption, key_label_filename, independent_caption, independent_filename, dependent_caption, dependent_filename) def read_raw_inputs(): print "Reading in raw results" create_str = "CREATE TABLE IF NOT EXISTS raw_results (machine text, kernel text, type text, model text, " \ "iteration long, event_count long, final_time long, cpu long, maxmem long, agents long, " \ "connections long, bucket long)" experiment_db.execute(create_str) for input_file in os.listdir(FLAGS.root_dir): if re.search(r'run_result.*\.db', input_file): result_file = os.path.join(FLAGS.root_dir, input_file) print 'Reading results from {}'.format(result_file) input_db = DBWrapper(result_file) read_results(input_db) input_db.cleanup() def get_correct_type(row): row = list(row) model = row[3] if re.match("CompleteBi.*", model): row[2] = "complete-bipartite" elif re.match("SmallModel.*", model): row[2] = "Watts-Strogatz" elif re.match("Cycle.*", model): row[2] = "cycle" elif re.match("Hyper.*", model): row[2] = "hypercube" elif re.match("Star.*", model): row[2] = "star" elif re.match("Complete.*", model): row[2] = "complete" elif re.match("Erdos.*", model): row[2] = "erdos-reyni" elif re.match("Wheel.*", model): row[2] = "wheel" elif re.match("Circular.*", model): row[2] = "circular-ladder" elif re.match("Periodic.*", model): row[2] = "periodic-2grid" elif re.match("NonPeriodic.*", model): row[2] = "nonperiodic-2grid" else: print "Unknown model {}".format(model) assert False return row def get_bucket_event_count(event_count): global event_count_buckets global bucketing_factor for bucket in event_count_buckets: if (1.0 + bucketing_factor) * bucket >= event_count >= (1.0 - bucketing_factor) * bucket: return bucket return None def read_results(input_db): global experiment_db cmd_str = "SELECT machine, kernel, type, model, iteration, event_count, final_time, cpu, maxmem, agents, " \ "connections FROM 'raw_results'" for row in input_db.select(cmd_str): if row[2] == "None": row = get_correct_type(row) bucket = get_bucket_event_count(row[5]) if bucket is None: continue cmd_str = "INSERT INTO raw_results " \ "(machine, kernel, type, model, iteration, event_count, final_time, cpu, maxmem, agents, " \ "connections, bucket) " \ "VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}')" \ .format(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10], bucket) experiment_db.execute(cmd_str) experiment_db.commit() def generate_per_kernel_results_table(): global experiment_db global kernel_results_table global event_count_buckets kernel_results_table = ResultsTable() select_cmd = "SELECT kernel, bucket, model, event_count, agents, connections, cpu, maxmem FROM raw_results" for row in experiment_db.select(select_cmd): kernel_results_table.add_entry((row[0],), row[1], row[2], row[3], row[4], row[5], row[6], row[7]) kernel_results_table.create_filtered_table() def generate_per_kernel_results_artifacts(): print "Generating per kernel artifacts" global kernel_results_table kernel_artifacts = KernelArtifacts(kernel_results_table) kernel_artifacts.generate_multiline_plots() kernel_artifacts.generate_fit_tables() kernel_artifacts.generate_score_tables() kernel_artifacts.generate_machine_comparison_tables() print "Finished per kernel artifacts" def generate_per_kernel_and_machine_results_table(): global experiment_db global kernel_machine_results_table global event_count_buckets kernel_machine_results_table = ResultsTable() select_cmd = "SELECT kernel, machine, bucket, model, event_count, agents, connections, cpu, maxmem FROM raw_results" for row in experiment_db.select(select_cmd): kernel_machine_results_table.add_entry((row[0], row[1]), row[2], row[3], row[4], row[5], row[6], row[7], row[8]) kernel_machine_results_table.create_filtered_table() def generate_per_kernel_and_machine_results_artifacts(): print "Generating per kernel and machine artifacts" global kernel_machine_results_table kernel_and_machine_artifacts = KernelMachineArtifacts(kernel_machine_results_table) kernel_and_machine_artifacts.generate_multiline_plots() kernel_and_machine_artifacts.generate_fit_tables() kernel_and_machine_artifacts.generate_score_tables() kernel_and_machine_artifacts.generate_machine_comparison_tables() print "Finished per kernel and machine artifacts" def generate_per_kernel_and_machine_and_type_results_table(): global experiment_db global kernel_machine_type_results_table global event_count_buckets kernel_machine_type_results_table = ResultsTable() select_cmd = "SELECT kernel, machine, type, bucket, model, event_count, agents, connections, cpu, maxmem FROM " \ "raw_results" for row in experiment_db.select(select_cmd): kernel_machine_type_results_table.add_entry((row[0], row[1], row[2]), row[3], row[4], row[5], row[6], row[7], row[8], row[9]) kernel_machine_type_results_table.create_filtered_table() def generate_per_kernel_and_machine_and_type_results_artifacts(): print "Generating per kernel and machine and type artifacts" global kernel_machine_type_results_table kernel_and_machine_and_type_artifacts = KernelMachineTypeArtifacts(kernel_machine_type_results_table) kernel_and_machine_and_type_artifacts.generate_fit_tables() kernel_and_machine_and_type_artifacts.generate_score_tables() kernel_and_machine_and_type_artifacts.generate_machine_comparison_tables() print "Finished per kernel and machine and type artifacts" def generate_fit_comparison_artifacts(): global fit_comparison_table cpu_fit_list = list() memory_fit_list = list() for key, value in fit_comparison_table.iteritems(): if key[2] == "CPU": cpu_fit_list.append((value, key,)) else: memory_fit_list.append((value, key,)) cpu_fit_list.sort() cpu_fit_list.reverse() cpu_comparison_filename = os.path.join(FLAGS.root_dir, "cpu_fit_comparison_table.tex") print "\tGenerating {}".format(cpu_comparison_filename) output_latex = r"""\begin{table}[h] \centering """ output_latex += r"""\begin{tabular}{|l|l|c|} \hline """ output_latex += r"""Selection Keys & Independent Variables & $R^2$ \\ \hline """ for cpu_fit in cpu_fit_list: score = cpu_fit[0] entry = cpu_fit[1] for key in entry[0]: output_latex += "{} \& ".format(key) output_latex = output_latex[:-4] output_latex += " & " for var in entry[1]: output_latex += "{} \& ".format(var) output_latex = output_latex[:-4] output_latex += " & %5.4f" % float(score) output_latex += r""" \\ """ output_latex += r"""\hline \end{tabular} """ output_latex += "\\caption{Comparisons for CPU fits}\n" output_latex += "\\label{tab:cpu_fit_comparison}\n" output_latex += r"""\end{table}""" with open(cpu_comparison_filename, 'w') as f: f.write(output_latex) memory_fit_list.sort() memory_fit_list.reverse() memory_comparison_filename = os.path.join(FLAGS.root_dir, "memory_fit_comparison_table.tex") print "\tGenerating {}".format(memory_comparison_filename) output_latex = r"""\begin{table}[h] \centering """ output_latex += r"""\begin{tabular}{|l|l|c|} \hline """ output_latex += r"""Selection Keys & Independent Variables & $R^2$ \\ \hline """ for memory_fit in memory_fit_list: score = memory_fit[0] entry = memory_fit[1] for key in entry[0]: output_latex += "{} \& ".format(key) output_latex = output_latex[:-4] output_latex += " & " for var in entry[1]: output_latex += "{} \& ".format(var) output_latex = output_latex[:-4] output_latex += " & %5.4f" % float(score) output_latex += r""" \\ """ output_latex += r"""\hline \end{tabular} """ output_latex += "\\caption{Comparisons for Memory fits}\n" output_latex += "\\label{tab:memory_fit_comparison}\n" output_latex += r"""\end{table}""" with open(memory_comparison_filename, 'w') as f: f.write(output_latex) def process_raw_results(): generate_per_kernel_results_table() generate_per_kernel_results_artifacts() generate_per_kernel_and_machine_results_table() generate_per_kernel_and_machine_results_artifacts() generate_per_kernel_and_machine_and_type_results_table() generate_per_kernel_and_machine_and_type_results_artifacts() generate_fit_comparison_artifacts() def main(argv): global experiment_db try: FLAGS(argv) # parse flags except gflags.FlagsError, e: print '%s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], FLAGS) sys.exit(1) full_path = os.path.join(FLAGS.root_dir, FLAGS.output_db) if FLAGS.read_inputs: print "Unlinking {}".format(full_path) try: os.unlink(full_path) except OSError, e: print "Unable able to unlink {} due to {}".format(full_path, e) else: print "Reusing {}".format(full_path) experiment_db = DBWrapper(full_path) if FLAGS.read_inputs: read_raw_inputs() process_raw_results() experiment_db.cleanup() if __name__ == '__main__': main(sys.argv)
apache-2.0
-456,720,360,468,172,540
55.938036
120
0.525164
false
Peddle/hue
desktop/core/src/desktop/migrations/0012_auto__chg_field_documentpermission_perms.py
30
8715
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'DocumentPermission.perms' db.alter_column(u'desktop_documentpermission', 'perms', self.gf('django.db.models.fields.CharField')(max_length=10)) def backwards(self, orm): # Changing field 'DocumentPermission.perms' db.alter_column(u'desktop_documentpermission', 'perms', self.gf('django.db.models.fields.TextField')()) models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'desktop.document': { 'Meta': {'object_name': 'Document'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), 'description': ('django.db.models.fields.TextField', [], {'default': "''"}), 'extra': ('django.db.models.fields.TextField', [], {'default': "''"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'doc_owner'", 'to': u"orm['auth.User']"}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['desktop.DocumentTag']", 'db_index': 'True', 'symmetrical': 'False'}), 'version': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}) }, u'desktop.document2': { 'Meta': {'object_name': 'Document2'}, 'data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}), 'dependencies': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'dependencies_rel_+'", 'db_index': 'True', 'to': u"orm['desktop.Document2']"}), 'description': ('django.db.models.fields.TextField', [], {'default': "''"}), 'extra': ('django.db.models.fields.TextField', [], {'default': "''"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_history': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'doc2_owner'", 'to': u"orm['auth.User']"}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'tags_rel_+'", 'db_index': 'True', 'to': u"orm['desktop.Document2']"}), 'type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'db_index': 'True'}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "'21bfcc51-ff98-46c5-a2c0-c23f7ea44256'", 'max_length': '36', 'db_index': 'True'}), 'version': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'db_index': 'True'}) }, u'desktop.documentpermission': { 'Meta': {'object_name': 'DocumentPermission'}, 'doc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['desktop.Document']"}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'db_index': 'True', 'to': u"orm['auth.Group']", 'db_table': "'documentpermission_groups'", 'symmetrical': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'perms': ('django.db.models.fields.CharField', [], {'default': "'read'", 'max_length': '10'}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'db_index': 'True', 'to': u"orm['auth.User']", 'db_table': "'documentpermission_users'", 'symmetrical': 'False'}) }, u'desktop.documenttag': { 'Meta': {'object_name': 'DocumentTag'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'tag': ('django.db.models.fields.SlugField', [], {'max_length': '50'}) }, u'desktop.settings': { 'Meta': {'object_name': 'Settings'}, 'collect_usage': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'tours_and_tutorials': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}) }, u'desktop.userpreferences': { 'Meta': {'object_name': 'UserPreferences'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'value': ('django.db.models.fields.TextField', [], {'max_length': '4096'}) } } complete_apps = ['desktop']
apache-2.0
7,916,051,459,008,315,000
74.791304
195
0.559266
false
MonicaHsu/truvaluation
venv/lib/python2.7/site-packages/flask/templating.py
783
4707
# -*- coding: utf-8 -*- """ flask.templating ~~~~~~~~~~~~~~~~ Implements the bridge to Jinja2. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import posixpath from jinja2 import BaseLoader, Environment as BaseEnvironment, \ TemplateNotFound from .globals import _request_ctx_stack, _app_ctx_stack from .signals import template_rendered from .module import blueprint_is_module from ._compat import itervalues, iteritems def _default_template_ctx_processor(): """Default template context processor. Injects `request`, `session` and `g`. """ reqctx = _request_ctx_stack.top appctx = _app_ctx_stack.top rv = {} if appctx is not None: rv['g'] = appctx.g if reqctx is not None: rv['request'] = reqctx.request rv['session'] = reqctx.session return rv class Environment(BaseEnvironment): """Works like a regular Jinja2 environment but has some additional knowledge of how Flask's blueprint works so that it can prepend the name of the blueprint to referenced templates if necessary. """ def __init__(self, app, **options): if 'loader' not in options: options['loader'] = app.create_global_jinja_loader() BaseEnvironment.__init__(self, **options) self.app = app class DispatchingJinjaLoader(BaseLoader): """A loader that looks for templates in the application and all the blueprint folders. """ def __init__(self, app): self.app = app def get_source(self, environment, template): for loader, local_name in self._iter_loaders(template): try: return loader.get_source(environment, local_name) except TemplateNotFound: pass raise TemplateNotFound(template) def _iter_loaders(self, template): loader = self.app.jinja_loader if loader is not None: yield loader, template # old style module based loaders in case we are dealing with a # blueprint that is an old style module try: module, local_name = posixpath.normpath(template).split('/', 1) blueprint = self.app.blueprints[module] if blueprint_is_module(blueprint): loader = blueprint.jinja_loader if loader is not None: yield loader, local_name except (ValueError, KeyError): pass for blueprint in itervalues(self.app.blueprints): if blueprint_is_module(blueprint): continue loader = blueprint.jinja_loader if loader is not None: yield loader, template def list_templates(self): result = set() loader = self.app.jinja_loader if loader is not None: result.update(loader.list_templates()) for name, blueprint in iteritems(self.app.blueprints): loader = blueprint.jinja_loader if loader is not None: for template in loader.list_templates(): prefix = '' if blueprint_is_module(blueprint): prefix = name + '/' result.add(prefix + template) return list(result) def _render(template, context, app): """Renders the template and fires the signal""" rv = template.render(context) template_rendered.send(app, template=template, context=context) return rv def render_template(template_name_or_list, **context): """Renders a template from the template folder with the given context. :param template_name_or_list: the name of the template to be rendered, or an iterable with template names the first one existing will be rendered :param context: the variables that should be available in the context of the template. """ ctx = _app_ctx_stack.top ctx.app.update_template_context(context) return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list), context, ctx.app) def render_template_string(source, **context): """Renders a template from the given template source string with the given context. :param source: the sourcecode of the template to be rendered :param context: the variables that should be available in the context of the template. """ ctx = _app_ctx_stack.top ctx.app.update_template_context(context) return _render(ctx.app.jinja_env.from_string(source), context, ctx.app)
mit
2,457,897,741,085,516,000
31.916084
83
0.612492
false
mmottahedi/neuralnilm_prototype
scripts/e349.py
2
6140
from __future__ import print_function, division import matplotlib import logging from sys import stdout matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! from neuralnilm import (Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer, BidirectionalRecurrentLayer) from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff from neuralnilm.experiment import run_experiment, init_experiment from neuralnilm.net import TrainingError from neuralnilm.layers import MixtureDensityLayer from neuralnilm.objectives import (scaled_cost, mdn_nll, scaled_cost_ignore_inactive, ignore_inactive, scaled_cost3) from neuralnilm.plot import MDNPlotter, CentralOutputPlotter from lasagne.nonlinearities import sigmoid, rectify, tanh from lasagne.objectives import mse from lasagne.init import Uniform, Normal from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer, RecurrentLayer) from lasagne.updates import nesterov_momentum, momentum from functools import partial import os import __main__ from copy import deepcopy from math import sqrt import numpy as np import theano.tensor as T NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0] PATH = "/homes/dk3810/workspace/python/neuralnilm/figures" SAVE_PLOT_INTERVAL = 5000 GRADIENT_STEPS = 100 source_dict = dict( filename='/data/dk3810/ukdale.h5', appliances=[ ['fridge freezer', 'fridge', 'freezer'], 'hair straighteners', 'television', 'dish washer', ['washer dryer', 'washing machine'] ], max_appliance_powers=[300, 500, 200, 2500, 2400], on_power_thresholds=[5] * 5, max_input_power=5900, min_on_durations=[60, 60, 60, 1800, 1800], min_off_durations=[12, 12, 12, 1800, 600], window=("2013-06-01", "2014-07-01"), seq_length=512, output_one_appliance=False, boolean_targets=False, train_buildings=[1], validation_buildings=[1], skip_probability=0.7, one_target_per_seq=False, n_seq_per_batch=16, subsample_target=2, include_diff=False, clip_appliance_power=True, target_is_prediction=False, # independently_center_inputs = True, standardise_input=True, unit_variance_targets=True, # input_padding=8, lag=0, output_central_value=True # reshape_target_to_2D=True # input_stats={'mean': np.array([ 0.05526326], dtype=np.float32), # 'std': np.array([ 0.12636775], dtype=np.float32)}, # target_stats={ # 'mean': np.array([ 0.04066789, 0.01881946, # 0.24639061, 0.17608672, 0.10273963], # dtype=np.float32), # 'std': np.array([ 0.11449792, 0.07338708, # 0.26608968, 0.33463112, 0.21250485], # dtype=np.float32)} ) N = 50 net_dict = dict( save_plot_interval=SAVE_PLOT_INTERVAL, # loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH), # loss_function=lambda x, t: mdn_nll(x, t).mean(), loss_function=lambda x, t: mse(x, t).mean(), # loss_function=partial(scaled_cost, loss_func=mse), # loss_function=ignore_inactive, # loss_function=partial(scaled_cost3, ignore_inactive=False), updates_func=momentum, learning_rate=1e-3, learning_rate_changes_by_iteration={ # 200: 1e-2, # 400: 1e-3, # 800: 1e-4 # 500: 1e-3 # 4000: 1e-03, # 6000: 5e-06, # 7000: 1e-06 # 2000: 5e-06 # 3000: 1e-05 # 7000: 5e-06, # 10000: 1e-06, # 15000: 5e-07, # 50000: 1e-07 }, do_save_activations=True, auto_reshape=False, plotter=CentralOutputPlotter # plotter=MDNPlotter ) """ |||||||||| |||||||||| |||||||||| |||||||||| |||||||||| |||||||||| 12345678901234567890 """ def exp_a(name): global source # source_dict_copy = deepcopy(source_dict) # source = RealApplianceSource(**source_dict_copy) net_dict_copy = deepcopy(net_dict) net_dict_copy.update(dict( experiment_name=name, source=source )) N = 512 output_shape = source.output_shape_after_processing() net_dict_copy['layers_config'] = [ { 'type': DimshuffleLayer, 'pattern': (0, 2, 1) # (batch, features, time) }, { 'type': Conv1DLayer, # convolve over the time axis 'num_filters': 16, 'filter_length': 4, 'stride': 1, 'nonlinearity': rectify, 'border': 'same' }, { 'type': Conv1DLayer, # convolve over the time axis 'num_filters': 16, 'filter_length': 4, 'stride': 1, 'nonlinearity': rectify, 'border': 'same' }, { 'type': DimshuffleLayer, 'pattern': (0, 2, 1) # back to (batch, time, features) }, { 'type': DenseLayer, 'num_units': N // 2, 'nonlinearity': rectify }, { 'type': DenseLayer, 'num_units': output_shape[1] * output_shape[2], 'nonlinearity': T.nnet.softplus } ] net = Net(**net_dict_copy) return net def main(): # EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz') EXPERIMENTS = list('a') for experiment in EXPERIMENTS: full_exp_name = NAME + experiment func_call = init_experiment(PATH, experiment, full_exp_name) logger = logging.getLogger(full_exp_name) try: net = eval(func_call) run_experiment(net, epochs=None) except KeyboardInterrupt: logger.info("KeyboardInterrupt") break except Exception as exception: logger.exception("Exception") raise finally: logging.shutdown() if __name__ == "__main__": main()
mit
4,608,381,862,903,523,000
30.167513
86
0.580619
false
lionloveqin/gstreamill
test/m3u8client.py
3
1581
""" pip install m3u8 first, please """ import sys import m3u8 import time import urllib2 import os url = "http://localhost:20119/cctv21/encoder/0/playlist.m3u8" media_sequence = 0 playlist = m3u8.load(url) while playlist.is_variant: url = playlist.base_uri + "/" + playlist.playlists[0].uri playlist = m3u8.load(url) current_sequence = media_sequence = playlist.media_sequence for segment in playlist.segments: print current_sequence, segment.uri current_sequence += 1 while True: playlist = m3u8.load(url) if playlist.media_sequence == media_sequence: time.sleep(1) continue media_sequence = playlist.media_sequence target_duration = playlist.target_duration if current_sequence < media_sequence: print "ERROR : missing segment" current_sequence = media_sequence index = -1 for segment in playlist.segments: index += 1 if media_sequence + index < current_sequence: continue seg_url = "%s/%s" % (playlist.base_uri, segment.uri) try: response = urllib2.urlopen(seg_url) buf = response.read() if not os.path.isdir(os.path.dirname(segment.uri)): os.mkdir(os.path.dirname(segment.uri)) f = open(segment.uri, "w") f.write(buf) f.close print "index: ", media_sequence + index, ", uri: ", segment.uri, ", size ", len(buf) current_sequence += 1 except: print "url: ", seg_url, sys.exc_info() time.sleep(target_duration - 1)
gpl-3.0
5,587,781,003,607,756,000
27.232143
96
0.616698
false
hgonzale/hssim
polyped/util.py
1
1885
import copy class Lambda(object): """ Lambda lambda-like class Acts like a lambda function, but its string representation is Python code that yields the object when executed. >>> f = Lambda('x : x**2') >>> f(1.41) 1.9880999999999998 >>> g = eval(str(f)) >>> g(2.82) 7.952399999999999 >>> f = Lambda('lambda x,y : x + y') >>> f(2,-0.1) 1.8999999999999999 """ def __init__(self, lam): if not lam.strip().startswith('lambda'): lam = 'lambda '+lam self.lam = lam self.op = eval(lam) def __call__(self, *args): return self.op(*args) def __repr__(self): return 'Lambda("'+self.lam+'")' class Struct(object): """ Struct struct-like class Acts like a dict, but keys are members of the object. >>> a = Struct(foo='bar', goo=7) >>> b = a.copy() >>> b.hoo = [1,2,3] >>> print a {'goo': 7, 'foo': 'bar'} >>> print b {'hoo': [1, 2, 3], 'goo': 7, 'foo': 'bar'} >>> c = eval(str(a)) >>> a.ioo = [] >>> print a {'ioo': [], 'goo': 7, 'foo': 'bar'} >>> print c {'goo': 7, 'foo': 'bar'} """ def __init__( self, file=None, **kwds ): if file: self.__dict__.update( eval( open(file).read() ) ) self.__dict__.update( kwds ) def copy( self ): return Struct(**copy.deepcopy(self.__dict__)) def read( self, file, globals={"__builtins__":None}, locals={} ): #s = open(file).read() #for k in subs.keys(): # s = s.replace( k, subs[k] ) self.__dict__.update( eval( open(file).read(), globals, locals ) ) def write( self, file ): open(file,'w').write(str(self)) def __repr__( self ): return str( self.__dict__ ) def __getstate__(self): return self.__dict__ def __setstate__( self, kwds ): self.__dict__.update( kwds ) if __name__ == "__main__": import doctest doctest.testmod()
bsd-2-clause
1,351,704,746,592,097,300
25.180556
70
0.522546
false
mfeurer/liac-arff
tests/test_data.py
1
9741
import types import unittest import arff class ConversorStub(object): def __init__(self, r_value): self.r_value = r_value def __call__(self, value): return self.r_value(value) class COOStub(object): def __init__(self, data, row, col): self.data = data self.row = row self.col = col class TestData(unittest.TestCase): def setUp(self): self.attributes = [('a1', 'INTEGER'), ('a2', 'INTEGER'), ('a3', 'STRING'), ('a4', 'STRING')] self.data = arff.Data() # -------------------------------------------------------------------------- # Tests for the decoding part def test_conversor(self): '''Basic data instances.''' conversors = [ConversorStub(str if arff.PY3 else unicode), ConversorStub(float), ConversorStub(int), ConversorStub(str if arff.PY3 else unicode)] fixture = u'Iris,3.4,2,Setosa' self.data.decode_data(fixture, conversors) result = self.data.data[0] expected = [u'Iris', 3.4, 2, u'Setosa'] self.assertEqual(len(result), 4) self.assertEqual(result[0], expected[0]) self.assertEqual(result[1], expected[1]) self.assertEqual(result[2], expected[2]) self.assertEqual(result[3], expected[3]) def test_sparse(self): '''Basic data instances.''' conversors = [ConversorStub(str if arff.PY3 else unicode), ConversorStub(float), ConversorStub(int), ConversorStub(str if arff.PY3 else unicode), ConversorStub(float), ConversorStub(int)] fixture = u'{0 Iris,1 3.4, 2 2}' self.data.decode_data(fixture, conversors) result = self.data.data[0] expected = [u'Iris', 3.4, 2, u'0', 0.0, 0] self.assertEqual(len(result), len(expected)) for i in range(len(expected)): self.assertEqual(result[i], expected[i]) # -------------------------------------------------------------------------- # Tests for the encoding part def test_simple(self): fixture = [[1, 3, 'Renato', 'Name with spaces']] result = self.data.encode_data(fixture, self.attributes) expected = u"1,3,Renato,'Name with spaces'" self.assertTrue(isinstance(result, types.GeneratorType)) self.assertEqual(next(result), expected) def test_null_value(self): fixture = [[1, None, 'Renato', '']] result = self.data.encode_data(fixture, self.attributes) expected = u"1,?,Renato,?" self.assertTrue(isinstance(result, types.GeneratorType)) self.assertEqual(next(result), expected) def test_too_short(self): fixture = [[1, None]] generator = self.data.encode_data(fixture, self.attributes) self.assertRaises(arff.BadObject, next, generator) class TestCOOData(unittest.TestCase): def setUp(self): self.attributes = [('a1', 'INTEGER'), ('a2', 'INTEGER'), ('a3', 'STRING'), ('a4', 'STRING')] self.data = arff.COOData() # -------------------------------------------------------------------------- # Tests for the decoding part def test_conversor(self): '''Basic data instances.''' conversors = [ConversorStub(str if arff.PY3 else unicode), ConversorStub(float), ConversorStub(int), ConversorStub(str if arff.PY3 else unicode)] fixture = u'{0 Iris,1 3.4,2 2,3 Setosa}' self.data.decode_data(fixture, conversors) result = self.data.data[0] row = self.data.data[1] col = self.data.data[2] expected = [u'Iris', 3.4, 2, u'Setosa'] self.assertEqual(len(result), 4) self.assertEqual(result[0], expected[0]) self.assertEqual(result[1], expected[1]) self.assertEqual(result[2], expected[2]) self.assertEqual(result[3], expected[3]) self.assertEqual(row, [0, 0, 0, 0]) self.assertEqual(col, [0, 1, 2, 3]) def test_sparse(self): '''Basic data instances.''' conversors = [ConversorStub(str if arff.PY3 else unicode), ConversorStub(float), ConversorStub(int), ConversorStub(str if arff.PY3 else unicode), ConversorStub(float), ConversorStub(int)] fixture = u'{0 Iris,1 3.4, 2 2}' self.data.decode_data(fixture, conversors) result = self.data.data[0] expected = {0: u'Iris', 1: 3.4, 2: 2} self.assertEqual(len(result), len(expected)) for i in range(len(expected)): self.assertEqual(result[i], expected[i]) # -------------------------------------------------------------------------- # Tests for the encoding part def test_simple(self): fixture = COOStub([1, None, 'Renato', 'Name with spaces'], [0, 0, 0, 0], [0, 1, 2, 3]) result = self.data.encode_data(fixture, self.attributes) self.assertTrue(isinstance(result, types.GeneratorType)) self.assertEqual(next(result), u'{ 0 1,1 ?,2 Renato,3 \'Name with spaces\' }') def test_null_value(self): fixture = COOStub([1, None, 'Renato', ''], [0, 0, 0, 0], [0, 1, 2, 3]) result = self.data.encode_data(fixture, self.attributes) self.assertTrue(isinstance(result, types.GeneratorType)) self.assertEqual(next(result), u'{ 0 1,1 ?,2 Renato,3 ? }') def test_sparse_matrix(self): fixture = COOStub([1, None, 'Renato', ''], [0, 5, 17, 55], [0, 1, 2, 3]) result = self.data.encode_data(fixture, self.attributes) self.assertTrue(isinstance(result, types.GeneratorType)) lines = [line for line in result] self.assertEqual(lines[0], '{ 0 1 }') self.assertEqual(lines[1], '{ }') self.assertEqual(lines[55], '{ 3 ? }') self.assertEqual(len(lines), 56) def test_encode_scipy_coo_example(self): attributes = (('', ''), ('', ''), ('', ''), ('', '')) fixture = COOStub([1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 3, 1, 0, 0], [0, 2, 1, 3, 1, 0, 0]) generator = self.data.encode_data(fixture, attributes) self.assertRaises(ValueError, next, generator) class TestLODData(unittest.TestCase): def setUp(self): self.attributes = [('a1', 'INTEGER'), ('a2', 'INTEGER'), ('a3', 'STRING'), ('a4', 'STRING')] self.data = arff.LODData() # -------------------------------------------------------------------------- # Tests for the decoding part def test_conversor(self): '''Basic data instances.''' conversors = [ConversorStub(str if arff.PY3 else unicode), ConversorStub(float), ConversorStub(int), ConversorStub(str if arff.PY3 else unicode)] fixture = u'{0 Iris,1 3.4,2 2,3 Setosa}' self.data.decode_data(fixture, conversors) result = self.data.data[0] expected = {0: u'Iris', 1: 3.4, 2: 2, 3: u'Setosa'} self.assertEqual(len(result), 4) self.assertEqual(result[0], expected[0]) self.assertEqual(result[1], expected[1]) self.assertEqual(result[2], expected[2]) self.assertEqual(result[3], expected[3]) def test_sparse(self): '''Basic data instances.''' conversors = [ConversorStub(str if arff.PY3 else unicode), ConversorStub(float), ConversorStub(int), ConversorStub(str if arff.PY3 else unicode), ConversorStub(float), ConversorStub(int)] fixture = u'{0 Iris,1 3.4, 2 2}' self.data.decode_data(fixture, conversors) result = self.data.data[0] expected = [u'Iris', 3.4, 2] self.assertEqual(len(result), len(expected)) for i in range(len(expected)): self.assertEqual(result[i], expected[i]) # -------------------------------------------------------------------------- # Tests for the encoding part def test_simple(self): fixture = [{0: 1, 1: None, 2: 'Renato', 3: 'Name with spaces'}] result = self.data.encode_data(fixture, self.attributes) self.assertTrue(isinstance(result, types.GeneratorType)) self.assertEqual(next(result), u'{ 0 1,1 ?,2 Renato,3 \'Name with spaces\' }') def test_null_value(self): fixture = [{0: 1, 1: None, 2: 'Renato', 3: ''}] result = self.data.encode_data(fixture, self.attributes) self.assertTrue(isinstance(result, types.GeneratorType)) self.assertEqual(next(result), u'{ 0 1,1 ?,2 Renato,3 ? }') def test_sparse_matrix(self): fixture = [{0: 1}] fixture.extend([{}] * 4) fixture.append({1: None}) fixture.extend([{}] * 11) fixture.append({2: 'Renato'}) fixture.extend([{}] * 37) fixture.append({3: ''}) result = self.data.encode_data(fixture, self.attributes) self.assertTrue(isinstance(result, types.GeneratorType)) lines = [line for line in result] self.assertEqual(lines[0], '{ 0 1 }') self.assertEqual(lines[1], '{ }') self.assertEqual(lines[55], '{ 3 ? }') self.assertEqual(len(lines), 56)
mit
-1,370,606,490,509,260,000
36.183206
80
0.520994
false
clips-tk/gradefisl
sample-settings.py
1
5008
# Django settings for minha_grade_fisl project. import os PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__)) DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', '[email protected]'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(PROJECT_ROOT, 'minha_grade.db'), 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Sao_Paulo' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'pt-br' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media') # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '/media/' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = os.path.join(PROJECT_ROOT, 'public') # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # URL prefix for admin static files -- CSS, JavaScript and images. # Make sure to use a trailing slash. # Examples: "http://foo.com/static/admin/", "/static/admin/". ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(PROJECT_ROOT, 'static'), ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'rg@3j66)+0)hypnb2ufr3gxg+00($in-wd=5+0%tbem0dk)n9^' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'urls' TEMPLATE_DIRS = ( os.path.join(PROJECT_ROOT, "templates"), ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'grade', 'twitterauth', # Uncomment the next line to enable the admin: 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', ) LOGIN_URL = "/login/" AUTH_PROFILE_MODULE = 'twitterauth.Profile' # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } # Arquivo para ambiente de testes (desenvolvimento) local_settings = os.path.join(PROJECT_ROOT, 'local_settings.py') if os.path.isfile(local_settings): execfile(local_settings)
mit
-5,491,289,979,042,328,000
30.898089
79
0.699081
false
liu602348184/django
tests/utils_tests/test_dateformat.py
265
6177
from __future__ import unicode_literals from datetime import date, datetime from django.test import SimpleTestCase, override_settings from django.test.utils import TZ_SUPPORT, requires_tz_support from django.utils import dateformat, translation from django.utils.dateformat import format from django.utils.timezone import ( get_default_timezone, get_fixed_timezone, make_aware, utc, ) @override_settings(TIME_ZONE='Europe/Copenhagen') class DateFormatTests(SimpleTestCase): def setUp(self): self._orig_lang = translation.get_language() translation.activate('en-us') def tearDown(self): translation.activate(self._orig_lang) def test_date(self): d = date(2009, 5, 16) self.assertEqual(date.fromtimestamp(int(format(d, 'U'))), d) def test_naive_datetime(self): dt = datetime(2009, 5, 16, 5, 30, 30) self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt) @requires_tz_support def test_datetime_with_local_tzinfo(self): ltz = get_default_timezone() dt = make_aware(datetime(2009, 5, 16, 5, 30, 30), ltz) self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt) self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.replace(tzinfo=None)) @requires_tz_support def test_datetime_with_tzinfo(self): tz = get_fixed_timezone(-510) ltz = get_default_timezone() dt = make_aware(datetime(2009, 5, 16, 5, 30, 30), ltz) self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz), dt) self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt) # astimezone() is safe here because the target timezone doesn't have DST self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.astimezone(ltz).replace(tzinfo=None)) self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz).utctimetuple(), dt.utctimetuple()) self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz).utctimetuple(), dt.utctimetuple()) def test_epoch(self): udt = datetime(1970, 1, 1, tzinfo=utc) self.assertEqual(format(udt, 'U'), '0') def test_empty_format(self): my_birthday = datetime(1979, 7, 8, 22, 00) self.assertEqual(dateformat.format(my_birthday, ''), '') def test_am_pm(self): my_birthday = datetime(1979, 7, 8, 22, 00) self.assertEqual(dateformat.format(my_birthday, 'a'), 'p.m.') def test_microsecond(self): # Regression test for #18951 dt = datetime(2009, 5, 16, microsecond=123) self.assertEqual(dateformat.format(dt, 'u'), '000123') def test_date_formats(self): my_birthday = datetime(1979, 7, 8, 22, 00) timestamp = datetime(2008, 5, 19, 11, 45, 23, 123456) self.assertEqual(dateformat.format(my_birthday, 'A'), 'PM') self.assertEqual(dateformat.format(timestamp, 'c'), '2008-05-19T11:45:23.123456') self.assertEqual(dateformat.format(my_birthday, 'd'), '08') self.assertEqual(dateformat.format(my_birthday, 'j'), '8') self.assertEqual(dateformat.format(my_birthday, 'l'), 'Sunday') self.assertEqual(dateformat.format(my_birthday, 'L'), 'False') self.assertEqual(dateformat.format(my_birthday, 'm'), '07') self.assertEqual(dateformat.format(my_birthday, 'M'), 'Jul') self.assertEqual(dateformat.format(my_birthday, 'b'), 'jul') self.assertEqual(dateformat.format(my_birthday, 'n'), '7') self.assertEqual(dateformat.format(my_birthday, 'N'), 'July') def test_time_formats(self): my_birthday = datetime(1979, 7, 8, 22, 00) self.assertEqual(dateformat.format(my_birthday, 'P'), '10 p.m.') self.assertEqual(dateformat.format(my_birthday, 's'), '00') self.assertEqual(dateformat.format(my_birthday, 'S'), 'th') self.assertEqual(dateformat.format(my_birthday, 't'), '31') self.assertEqual(dateformat.format(my_birthday, 'w'), '0') self.assertEqual(dateformat.format(my_birthday, 'W'), '27') self.assertEqual(dateformat.format(my_birthday, 'y'), '79') self.assertEqual(dateformat.format(my_birthday, 'Y'), '1979') self.assertEqual(dateformat.format(my_birthday, 'z'), '189') def test_dateformat(self): my_birthday = datetime(1979, 7, 8, 22, 00) self.assertEqual(dateformat.format(my_birthday, r'Y z \C\E\T'), '1979 189 CET') self.assertEqual(dateformat.format(my_birthday, r'jS \o\f F'), '8th of July') def test_futuredates(self): the_future = datetime(2100, 10, 25, 0, 00) self.assertEqual(dateformat.format(the_future, r'Y'), '2100') def test_timezones(self): my_birthday = datetime(1979, 7, 8, 22, 00) summertime = datetime(2005, 10, 30, 1, 00) wintertime = datetime(2005, 10, 30, 4, 00) timestamp = datetime(2008, 5, 19, 11, 45, 23, 123456) # 3h30m to the west of UTC tz = get_fixed_timezone(-210) aware_dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=tz) if TZ_SUPPORT: self.assertEqual(dateformat.format(my_birthday, 'O'), '+0100') self.assertEqual(dateformat.format(my_birthday, 'r'), 'Sun, 8 Jul 1979 22:00:00 +0100') self.assertEqual(dateformat.format(my_birthday, 'T'), 'CET') self.assertEqual(dateformat.format(my_birthday, 'e'), '') self.assertEqual(dateformat.format(aware_dt, 'e'), '-0330') self.assertEqual(dateformat.format(my_birthday, 'U'), '300315600') self.assertEqual(dateformat.format(timestamp, 'u'), '123456') self.assertEqual(dateformat.format(my_birthday, 'Z'), '3600') self.assertEqual(dateformat.format(summertime, 'I'), '1') self.assertEqual(dateformat.format(summertime, 'O'), '+0200') self.assertEqual(dateformat.format(wintertime, 'I'), '0') self.assertEqual(dateformat.format(wintertime, 'O'), '+0100') # Ticket #16924 -- We don't need timezone support to test this self.assertEqual(dateformat.format(aware_dt, 'O'), '-0330')
bsd-3-clause
1,270,093,093,130,593,500
44.755556
111
0.640926
false
zjuwangg/scrapy
scrapy/spidermiddlewares/urllength.py
152
1059
""" Url Length Spider Middleware See documentation in docs/topics/spider-middleware.rst """ import logging from scrapy.http import Request from scrapy.exceptions import NotConfigured logger = logging.getLogger(__name__) class UrlLengthMiddleware(object): def __init__(self, maxlength): self.maxlength = maxlength @classmethod def from_settings(cls, settings): maxlength = settings.getint('URLLENGTH_LIMIT') if not maxlength: raise NotConfigured return cls(maxlength) def process_spider_output(self, response, result, spider): def _filter(request): if isinstance(request, Request) and len(request.url) > self.maxlength: logger.debug("Ignoring link (url length > %(maxlength)d): %(url)s ", {'maxlength': self.maxlength, 'url': request.url}, extra={'spider': spider}) return False else: return True return (r for r in result or () if _filter(r))
bsd-3-clause
-127,814,537,147,781,490
27.621622
84
0.607177
false
jerrylei98/Dailydos
venv/lib/python2.7/site-packages/jinja2/runtime.py
335
22530
# -*- coding: utf-8 -*- """ jinja2.runtime ~~~~~~~~~~~~~~ Runtime helpers. :copyright: (c) 2010 by the Jinja Team. :license: BSD. """ import sys from itertools import chain from jinja2.nodes import EvalContext, _context_function_types from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \ internalcode, object_type_repr from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \ TemplateNotFound from jinja2._compat import imap, text_type, iteritems, \ implements_iterator, implements_to_string, string_types, PY2 # these variables are exported to the template runtime __all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup', 'TemplateRuntimeError', 'missing', 'concat', 'escape', 'markup_join', 'unicode_join', 'to_string', 'identity', 'TemplateNotFound', 'make_logging_undefined'] #: the name of the function that is used to convert something into #: a string. We can just use the text type here. to_string = text_type #: the identity function. Useful for certain things in the environment identity = lambda x: x _last_iteration = object() def markup_join(seq): """Concatenation that escapes if necessary and converts to unicode.""" buf = [] iterator = imap(soft_unicode, seq) for arg in iterator: buf.append(arg) if hasattr(arg, '__html__'): return Markup(u'').join(chain(buf, iterator)) return concat(buf) def unicode_join(seq): """Simple args to unicode conversion and concatenation.""" return concat(imap(text_type, seq)) def new_context(environment, template_name, blocks, vars=None, shared=None, globals=None, locals=None): """Internal helper to for context creation.""" if vars is None: vars = {} if shared: parent = vars else: parent = dict(globals or (), **vars) if locals: # if the parent is shared a copy should be created because # we don't want to modify the dict passed if shared: parent = dict(parent) for key, value in iteritems(locals): if key[:2] == 'l_' and value is not missing: parent[key[2:]] = value return environment.context_class(environment, parent, template_name, blocks) class TemplateReference(object): """The `self` in templates.""" def __init__(self, context): self.__context = context def __getitem__(self, name): blocks = self.__context.blocks[name] return BlockReference(name, self.__context, blocks, 0) def __repr__(self): return '<%s %r>' % ( self.__class__.__name__, self.__context.name ) class Context(object): """The template context holds the variables of a template. It stores the values passed to the template and also the names the template exports. Creating instances is neither supported nor useful as it's created automatically at various stages of the template evaluation and should not be created by hand. The context is immutable. Modifications on :attr:`parent` **must not** happen and modifications on :attr:`vars` are allowed from generated template code only. Template filters and global functions marked as :func:`contextfunction`\s get the active context passed as first argument and are allowed to access the context read-only. The template context supports read only dict operations (`get`, `keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`, `__getitem__`, `__contains__`). Additionally there is a :meth:`resolve` method that doesn't fail with a `KeyError` but returns an :class:`Undefined` object for missing variables. """ __slots__ = ('parent', 'vars', 'environment', 'eval_ctx', 'exported_vars', 'name', 'blocks', '__weakref__') def __init__(self, environment, parent, name, blocks): self.parent = parent self.vars = {} self.environment = environment self.eval_ctx = EvalContext(self.environment, name) self.exported_vars = set() self.name = name # create the initial mapping of blocks. Whenever template inheritance # takes place the runtime will update this mapping with the new blocks # from the template. self.blocks = dict((k, [v]) for k, v in iteritems(blocks)) def super(self, name, current): """Render a parent block.""" try: blocks = self.blocks[name] index = blocks.index(current) + 1 blocks[index] except LookupError: return self.environment.undefined('there is no parent block ' 'called %r.' % name, name='super') return BlockReference(name, self, blocks, index) def get(self, key, default=None): """Returns an item from the template context, if it doesn't exist `default` is returned. """ try: return self[key] except KeyError: return default def resolve(self, key): """Looks up a variable like `__getitem__` or `get` but returns an :class:`Undefined` object with the name of the name looked up. """ if key in self.vars: return self.vars[key] if key in self.parent: return self.parent[key] return self.environment.undefined(name=key) def get_exported(self): """Get a new dict with the exported variables.""" return dict((k, self.vars[k]) for k in self.exported_vars) def get_all(self): """Return a copy of the complete context as dict including the exported variables. """ return dict(self.parent, **self.vars) @internalcode def call(__self, __obj, *args, **kwargs): """Call the callable with the arguments and keyword arguments provided but inject the active context or environment as first argument if the callable is a :func:`contextfunction` or :func:`environmentfunction`. """ if __debug__: __traceback_hide__ = True # noqa # Allow callable classes to take a context fn = __obj.__call__ for fn_type in ('contextfunction', 'evalcontextfunction', 'environmentfunction'): if hasattr(fn, fn_type): __obj = fn break if isinstance(__obj, _context_function_types): if getattr(__obj, 'contextfunction', 0): args = (__self,) + args elif getattr(__obj, 'evalcontextfunction', 0): args = (__self.eval_ctx,) + args elif getattr(__obj, 'environmentfunction', 0): args = (__self.environment,) + args try: return __obj(*args, **kwargs) except StopIteration: return __self.environment.undefined('value was undefined because ' 'a callable raised a ' 'StopIteration exception') def derived(self, locals=None): """Internal helper function to create a derived context.""" context = new_context(self.environment, self.name, {}, self.parent, True, None, locals) context.vars.update(self.vars) context.eval_ctx = self.eval_ctx context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks)) return context def _all(meth): proxy = lambda self: getattr(self.get_all(), meth)() proxy.__doc__ = getattr(dict, meth).__doc__ proxy.__name__ = meth return proxy keys = _all('keys') values = _all('values') items = _all('items') # not available on python 3 if PY2: iterkeys = _all('iterkeys') itervalues = _all('itervalues') iteritems = _all('iteritems') del _all def __contains__(self, name): return name in self.vars or name in self.parent def __getitem__(self, key): """Lookup a variable or raise `KeyError` if the variable is undefined. """ item = self.resolve(key) if isinstance(item, Undefined): raise KeyError(key) return item def __repr__(self): return '<%s %s of %r>' % ( self.__class__.__name__, repr(self.get_all()), self.name ) # register the context as mapping if possible try: from collections import Mapping Mapping.register(Context) except ImportError: pass class BlockReference(object): """One block on a template reference.""" def __init__(self, name, context, stack, depth): self.name = name self._context = context self._stack = stack self._depth = depth @property def super(self): """Super the block.""" if self._depth + 1 >= len(self._stack): return self._context.environment. \ undefined('there is no parent block called %r.' % self.name, name='super') return BlockReference(self.name, self._context, self._stack, self._depth + 1) @internalcode def __call__(self): rv = concat(self._stack[self._depth](self._context)) if self._context.eval_ctx.autoescape: rv = Markup(rv) return rv class LoopContext(object): """A loop context for dynamic iteration.""" def __init__(self, iterable, recurse=None, depth0=0): self._iterator = iter(iterable) self._recurse = recurse self._after = self._safe_next() self.index0 = -1 self.depth0 = depth0 # try to get the length of the iterable early. This must be done # here because there are some broken iterators around where there # __len__ is the number of iterations left (i'm looking at your # listreverseiterator!). try: self._length = len(iterable) except (TypeError, AttributeError): self._length = None def cycle(self, *args): """Cycles among the arguments with the current loop index.""" if not args: raise TypeError('no items for cycling given') return args[self.index0 % len(args)] first = property(lambda x: x.index0 == 0) last = property(lambda x: x._after is _last_iteration) index = property(lambda x: x.index0 + 1) revindex = property(lambda x: x.length - x.index0) revindex0 = property(lambda x: x.length - x.index) depth = property(lambda x: x.depth0 + 1) def __len__(self): return self.length def __iter__(self): return LoopContextIterator(self) def _safe_next(self): try: return next(self._iterator) except StopIteration: return _last_iteration @internalcode def loop(self, iterable): if self._recurse is None: raise TypeError('Tried to call non recursive loop. Maybe you ' "forgot the 'recursive' modifier.") return self._recurse(iterable, self._recurse, self.depth0 + 1) # a nifty trick to enhance the error message if someone tried to call # the the loop without or with too many arguments. __call__ = loop del loop @property def length(self): if self._length is None: # if was not possible to get the length of the iterator when # the loop context was created (ie: iterating over a generator) # we have to convert the iterable into a sequence and use the # length of that + the number of iterations so far. iterable = tuple(self._iterator) self._iterator = iter(iterable) iterations_done = self.index0 + 2 self._length = len(iterable) + iterations_done return self._length def __repr__(self): return '<%s %r/%r>' % ( self.__class__.__name__, self.index, self.length ) @implements_iterator class LoopContextIterator(object): """The iterator for a loop context.""" __slots__ = ('context',) def __init__(self, context): self.context = context def __iter__(self): return self def __next__(self): ctx = self.context ctx.index0 += 1 if ctx._after is _last_iteration: raise StopIteration() next_elem = ctx._after ctx._after = ctx._safe_next() return next_elem, ctx class Macro(object): """Wraps a macro function.""" def __init__(self, environment, func, name, arguments, defaults, catch_kwargs, catch_varargs, caller): self._environment = environment self._func = func self._argument_count = len(arguments) self.name = name self.arguments = arguments self.defaults = defaults self.catch_kwargs = catch_kwargs self.catch_varargs = catch_varargs self.caller = caller @internalcode def __call__(self, *args, **kwargs): # try to consume the positional arguments arguments = list(args[:self._argument_count]) off = len(arguments) # if the number of arguments consumed is not the number of # arguments expected we start filling in keyword arguments # and defaults. if off != self._argument_count: for idx, name in enumerate(self.arguments[len(arguments):]): try: value = kwargs.pop(name) except KeyError: try: value = self.defaults[idx - self._argument_count + off] except IndexError: value = self._environment.undefined( 'parameter %r was not provided' % name, name=name) arguments.append(value) # it's important that the order of these arguments does not change # if not also changed in the compiler's `function_scoping` method. # the order is caller, keyword arguments, positional arguments! if self.caller: caller = kwargs.pop('caller', None) if caller is None: caller = self._environment.undefined('No caller defined', name='caller') arguments.append(caller) if self.catch_kwargs: arguments.append(kwargs) elif kwargs: raise TypeError('macro %r takes no keyword argument %r' % (self.name, next(iter(kwargs)))) if self.catch_varargs: arguments.append(args[self._argument_count:]) elif len(args) > self._argument_count: raise TypeError('macro %r takes not more than %d argument(s)' % (self.name, len(self.arguments))) return self._func(*arguments) def __repr__(self): return '<%s %s>' % ( self.__class__.__name__, self.name is None and 'anonymous' or repr(self.name) ) @implements_to_string class Undefined(object): """The default undefined type. This undefined type can be printed and iterated over, but every other access will raise an :exc:`jinja2.exceptions.UndefinedError`: >>> foo = Undefined(name='foo') >>> str(foo) '' >>> not foo True >>> foo + 42 Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ __slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name', '_undefined_exception') def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError): self._undefined_hint = hint self._undefined_obj = obj self._undefined_name = name self._undefined_exception = exc @internalcode def _fail_with_undefined_error(self, *args, **kwargs): """Regular callback function for undefined objects that raises an `jinja2.exceptions.UndefinedError` on call. """ if self._undefined_hint is None: if self._undefined_obj is missing: hint = '%r is undefined' % self._undefined_name elif not isinstance(self._undefined_name, string_types): hint = '%s has no element %r' % ( object_type_repr(self._undefined_obj), self._undefined_name ) else: hint = '%r has no attribute %r' % ( object_type_repr(self._undefined_obj), self._undefined_name ) else: hint = self._undefined_hint raise self._undefined_exception(hint) @internalcode def __getattr__(self, name): if name[:2] == '__': raise AttributeError(name) return self._fail_with_undefined_error() __add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \ __truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \ __mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \ __getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \ __float__ = __complex__ = __pow__ = __rpow__ = \ _fail_with_undefined_error def __eq__(self, other): return type(self) is type(other) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return id(type(self)) def __str__(self): return u'' def __len__(self): return 0 def __iter__(self): if 0: yield None def __nonzero__(self): return False __bool__ = __nonzero__ def __repr__(self): return 'Undefined' def make_logging_undefined(logger=None, base=None): """Given a logger object this returns a new undefined class that will log certain failures. It will log iterations and printing. If no logger is given a default logger is created. Example:: logger = logging.getLogger(__name__) LoggingUndefined = make_logging_undefined( logger=logger, base=Undefined ) .. versionadded:: 2.8 :param logger: the logger to use. If not provided, a default logger is created. :param base: the base class to add logging functionality to. This defaults to :class:`Undefined`. """ if logger is None: import logging logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stderr)) if base is None: base = Undefined def _log_message(undef): if undef._undefined_hint is None: if undef._undefined_obj is missing: hint = '%s is undefined' % undef._undefined_name elif not isinstance(undef._undefined_name, string_types): hint = '%s has no element %s' % ( object_type_repr(undef._undefined_obj), undef._undefined_name) else: hint = '%s has no attribute %s' % ( object_type_repr(undef._undefined_obj), undef._undefined_name) else: hint = undef._undefined_hint logger.warning('Template variable warning: %s', hint) class LoggingUndefined(base): def _fail_with_undefined_error(self, *args, **kwargs): try: return base._fail_with_undefined_error(self, *args, **kwargs) except self._undefined_exception as e: logger.error('Template variable error: %s', str(e)) raise e def __str__(self): rv = base.__str__(self) _log_message(self) return rv def __iter__(self): rv = base.__iter__(self) _log_message(self) return rv if PY2: def __nonzero__(self): rv = base.__nonzero__(self) _log_message(self) return rv def __unicode__(self): rv = base.__unicode__(self) _log_message(self) return rv else: def __bool__(self): rv = base.__bool__(self) _log_message(self) return rv return LoggingUndefined @implements_to_string class DebugUndefined(Undefined): """An undefined that returns the debug info when printed. >>> foo = DebugUndefined(name='foo') >>> str(foo) '{{ foo }}' >>> not foo True >>> foo + 42 Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ __slots__ = () def __str__(self): if self._undefined_hint is None: if self._undefined_obj is missing: return u'{{ %s }}' % self._undefined_name return '{{ no such element: %s[%r] }}' % ( object_type_repr(self._undefined_obj), self._undefined_name ) return u'{{ undefined value printed: %s }}' % self._undefined_hint @implements_to_string class StrictUndefined(Undefined): """An undefined that barks on print and iteration as well as boolean tests and all kinds of comparisons. In other words: you can do nothing with it except checking if it's defined using the `defined` test. >>> foo = StrictUndefined(name='foo') >>> str(foo) Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined >>> not foo Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined >>> foo + 42 Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ __slots__ = () __iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \ __ne__ = __bool__ = __hash__ = \ Undefined._fail_with_undefined_error # remove remaining slots attributes, after the metaclass did the magic they # are unneeded and irritating as they contain wrong data for the subclasses. del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
mit
8,344,343,672,676,308,000
32.778111
96
0.566001
false
i386x/doit
doit/support/app/errors.py
1
2027
# -*- coding: utf-8 -*- #! \file ./doit/support/app/errors.py #! \author Jiří Kučera, <[email protected]> #! \stamp 2016-09-03 14:38:52 (UTC+01:00, DST+01:00) #! \project DoIt!: Tools and Libraries for Building DSLs #! \license MIT #! \version 0.0.0 #! \fdesc @pyfile.docstr # """\ Application errors.\ """ __license__ = """\ Copyright (c) 2014 - 2017 Jiří Kučera. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\ """ from doit.support.errors import DoItError ERROR_APPLICATION = DoItError.alloc_codes(1) class ApplicationError(DoItError): """ """ __slots__ = [] def __init__(self, emsg): """ """ DoItError.__init__(self, ERROR_APPLICATION, emsg) #-def #-class class ApplicationExit(ApplicationError): """ """ __slots__ = [] def __init__(self): """ """ ApplicationError.__init__(self, "I am a signal rather then error. Please, handle me properly" ) #-def #-class
mit
3,798,925,105,162,348,500
29.621212
79
0.670955
false
Tilo15/PhotoFiddle2
PF2/Tools/Denoise.py
1
1956
import cv2 import Tool from scipy import ndimage class Denoise(Tool.Tool): def on_init(self): self.id = "denoise" self.name = "Denoise" self.icon_path = "ui/PF2_Icons/Denoise.png" self.properties = [ # Detailer Tool.Property("enabled", "Denoise", "Header", False, has_toggle=True, has_button=False), Tool.Property("strength", "Strength", "Slider", 0, max=100, min=0), Tool.Property("w_strength", "White Strength", "Slider", 20, max=100, min=0), Tool.Property("b_strength", "Black Strength", "Slider", 70, max=100, min=0), Tool.Property("method", "Method", "Combo", 0, options=[ "Mean", "Gaussian", ]), ] def on_update(self, image): im = image if(self.props["enabled"].get_value()): bpp = int(str(im.dtype).replace("uint", "").replace("float", "")) np = float(2 ** bpp - 1) method = self.props["method"].get_value() strength = self.props["strength"].get_value() b_strength = self.props["b_strength"].get_value() w_strength = self.props["w_strength"].get_value() filtered = None if(method == 0): filtered = ndimage.median_filter(im, 3) elif(method == 1): filtered = ndimage.gaussian_filter(im, 2) w_filter = im w_filter[im > (float(np) / 10) * 9] = filtered[im > (float(np) / 10) * 9] b_filter = im b_filter[im < (float(np) / 10)] = filtered[im < (float(np) / 10)] # Blend im = cv2.addWeighted(filtered, (strength / 100), im, 1 - (strength / 100), 0) im = cv2.addWeighted(w_filter, (w_strength / 100), im, 1 - (w_strength / 100), 0) im = cv2.addWeighted(b_filter, (b_strength / 100), im, 1 - (b_strength / 100), 0) return im
gpl-3.0
-9,079,458,874,268,996,000
37.352941
100
0.514826
false
dahlstrom-g/intellij-community
python/helpers/py3only/docutils/readers/__init__.py
170
3465
# $Id: __init__.py 7648 2013-04-18 07:36:22Z milde $ # Authors: David Goodger <[email protected]>; Ueli Schlaepfer # Copyright: This module has been placed in the public domain. """ This package contains Docutils Reader modules. """ __docformat__ = 'reStructuredText' import sys from docutils import utils, parsers, Component from docutils.transforms import universal if sys.version_info < (2,5): from docutils._compat import __import__ class Reader(Component): """ Abstract base class for docutils Readers. Each reader module or package must export a subclass also called 'Reader'. The two steps of a Reader's responsibility are `scan()` and `parse()`. Call `read()` to process a document. """ component_type = 'reader' config_section = 'readers' def get_transforms(self): return Component.get_transforms(self) + [ universal.Decorations, universal.ExposeInternals, universal.StripComments,] def __init__(self, parser=None, parser_name=None): """ Initialize the Reader instance. Several instance attributes are defined with dummy initial values. Subclasses may use these attributes as they wish. """ self.parser = parser """A `parsers.Parser` instance shared by all doctrees. May be left unspecified if the document source determines the parser.""" if parser is None and parser_name: self.set_parser(parser_name) self.source = None """`docutils.io` IO object, source of input data.""" self.input = None """Raw text input; either a single string or, for more complex cases, a collection of strings.""" def set_parser(self, parser_name): """Set `self.parser` by name.""" parser_class = parsers.get_parser_class(parser_name) self.parser = parser_class() def read(self, source, parser, settings): self.source = source if not self.parser: self.parser = parser self.settings = settings self.input = self.source.read() self.parse() return self.document def parse(self): """Parse `self.input` into a document tree.""" self.document = document = self.new_document() self.parser.parse(self.input, document) document.current_source = document.current_line = None def new_document(self): """Create and return a new empty document tree (root node).""" document = utils.new_document(self.source.source_path, self.settings) return document class ReReader(Reader): """ A reader which rereads an existing document tree (e.g. a deserializer). Often used in conjunction with `writers.UnfilteredWriter`. """ def get_transforms(self): # Do not add any transforms. They have already been applied # by the reader which originally created the document. return Component.get_transforms(self) _reader_aliases = {} def get_reader_class(reader_name): """Return the Reader class from the `reader_name` module.""" reader_name = reader_name.lower() if reader_name in _reader_aliases: reader_name = _reader_aliases[reader_name] try: module = __import__(reader_name, globals(), locals(), level=1) except ImportError: module = __import__(reader_name, globals(), locals(), level=0) return module.Reader
apache-2.0
-3,942,269,144,584,981,000
29.663717
78
0.644156
false
peterlauri/django
django/templatetags/static.py
91
4391
from django import template from django.apps import apps from django.utils.encoding import iri_to_uri from django.utils.six.moves.urllib.parse import urljoin register = template.Library() class PrefixNode(template.Node): def __repr__(self): return "<PrefixNode for %r>" % self.name def __init__(self, varname=None, name=None): if name is None: raise template.TemplateSyntaxError( "Prefix nodes must be given a name to return.") self.varname = varname self.name = name @classmethod def handle_token(cls, parser, token, name): """ Class method to parse prefix node and return a Node. """ # token.split_contents() isn't useful here because tags using this method don't accept variable as arguments tokens = token.contents.split() if len(tokens) > 1 and tokens[1] != 'as': raise template.TemplateSyntaxError( "First argument in '%s' must be 'as'" % tokens[0]) if len(tokens) > 1: varname = tokens[2] else: varname = None return cls(varname, name) @classmethod def handle_simple(cls, name): try: from django.conf import settings except ImportError: prefix = '' else: prefix = iri_to_uri(getattr(settings, name, '')) return prefix def render(self, context): prefix = self.handle_simple(self.name) if self.varname is None: return prefix context[self.varname] = prefix return '' @register.tag def get_static_prefix(parser, token): """ Populates a template variable with the static prefix, ``settings.STATIC_URL``. Usage:: {% get_static_prefix [as varname] %} Examples:: {% get_static_prefix %} {% get_static_prefix as static_prefix %} """ return PrefixNode.handle_token(parser, token, "STATIC_URL") @register.tag def get_media_prefix(parser, token): """ Populates a template variable with the media prefix, ``settings.MEDIA_URL``. Usage:: {% get_media_prefix [as varname] %} Examples:: {% get_media_prefix %} {% get_media_prefix as media_prefix %} """ return PrefixNode.handle_token(parser, token, "MEDIA_URL") class StaticNode(template.Node): def __init__(self, varname=None, path=None): if path is None: raise template.TemplateSyntaxError( "Static template nodes must be given a path to return.") self.path = path self.varname = varname def url(self, context): path = self.path.resolve(context) return self.handle_simple(path) def render(self, context): url = self.url(context) if self.varname is None: return url context[self.varname] = url return '' @classmethod def handle_simple(cls, path): if apps.is_installed('django.contrib.staticfiles'): from django.contrib.staticfiles.storage import staticfiles_storage return staticfiles_storage.url(path) else: return urljoin(PrefixNode.handle_simple("STATIC_URL"), path) @classmethod def handle_token(cls, parser, token): """ Class method to parse prefix node and return a Node. """ bits = token.split_contents() if len(bits) < 2: raise template.TemplateSyntaxError( "'%s' takes at least one argument (path to file)" % bits[0]) path = parser.compile_filter(bits[1]) if len(bits) >= 2 and bits[-2] == 'as': varname = bits[3] else: varname = None return cls(varname, path) @register.tag('static') def do_static(parser, token): """ Joins the given path with the STATIC_URL setting. Usage:: {% static path [as varname] %} Examples:: {% static "myapp/css/base.css" %} {% static variable_with_path %} {% static "myapp/css/base.css" as admin_base_css %} {% static variable_with_path as varname %} """ return StaticNode.handle_token(parser, token) def static(path): """ Given a relative path to a static asset, return the absolute path to the asset. """ return StaticNode.handle_simple(path)
bsd-3-clause
5,384,387,752,229,967,000
25.93865
116
0.591893
false
yencarnacion/jaikuengine
.google_appengine/lib/django-1.5/django/utils/tzinfo.py
110
3247
"Implementation of tzinfo classes for use with datetime.datetime." from __future__ import unicode_literals import time from datetime import timedelta, tzinfo from django.utils.encoding import force_str, force_text, DEFAULT_LOCALE_ENCODING # Python's doc say: "A tzinfo subclass must have an __init__() method that can # be called with no arguments". FixedOffset and LocalTimezone don't honor this # requirement. Defining __getinitargs__ is sufficient to fix copy/deepcopy as # well as pickling/unpickling. class FixedOffset(tzinfo): "Fixed offset in minutes east from UTC." def __init__(self, offset): if isinstance(offset, timedelta): self.__offset = offset offset = self.__offset.seconds // 60 else: self.__offset = timedelta(minutes=offset) sign = '-' if offset < 0 else '+' self.__name = "%s%02d%02d" % (sign, abs(offset) / 60., abs(offset) % 60) def __repr__(self): return self.__name def __getinitargs__(self): return self.__offset, def utcoffset(self, dt): return self.__offset def tzname(self, dt): return self.__name def dst(self, dt): return timedelta(0) # This implementation is used for display purposes. It uses an approximation # for DST computations on dates >= 2038. # A similar implementation exists in django.utils.timezone. It's used for # timezone support (when USE_TZ = True) and focuses on correctness. class LocalTimezone(tzinfo): "Proxy timezone information from time module." def __init__(self, dt): tzinfo.__init__(self) self.__dt = dt self._tzname = self.tzname(dt) def __repr__(self): return force_str(self._tzname) def __getinitargs__(self): return self.__dt, def utcoffset(self, dt): if self._isdst(dt): return timedelta(seconds=-time.altzone) else: return timedelta(seconds=-time.timezone) def dst(self, dt): if self._isdst(dt): return timedelta(seconds=-time.altzone) - timedelta(seconds=-time.timezone) else: return timedelta(0) def tzname(self, dt): try: return force_text(time.tzname[self._isdst(dt)], DEFAULT_LOCALE_ENCODING) except UnicodeDecodeError: return None def _isdst(self, dt): tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, 0) try: stamp = time.mktime(tt) except (OverflowError, ValueError): # 32 bit systems can't handle dates after Jan 2038, and certain # systems can't handle dates before ~1901-12-01: # # >>> time.mktime((1900, 1, 13, 0, 0, 0, 0, 0, 0)) # OverflowError: mktime argument out of range # >>> time.mktime((1850, 1, 13, 0, 0, 0, 0, 0, 0)) # ValueError: year out of range # # In this case, we fake the date, because we only care about the # DST flag. tt = (2037,) + tt[1:] stamp = time.mktime(tt) tt = time.localtime(stamp) return tt.tm_isdst > 0
apache-2.0
2,230,284,132,824,383,000
31.47
87
0.587927
false
eg-zhang/scikit-learn
benchmarks/bench_mnist.py
76
6136
""" ======================= MNIST dataset benchmark ======================= Benchmark on the MNIST dataset. The dataset comprises 70,000 samples and 784 features. Here, we consider the task of predicting 10 classes - digits from 0 to 9 from their raw images. By contrast to the covertype dataset, the feature space is homogenous. Example of output : [..] Classification performance: =========================== Classifier train-time test-time error-rat ------------------------------------------------------------ Nystroem-SVM 105.07s 0.91s 0.0227 ExtraTrees 48.20s 1.22s 0.0288 RandomForest 47.17s 1.21s 0.0304 SampledRBF-SVM 140.45s 0.84s 0.0486 CART 22.84s 0.16s 0.1214 dummy 0.01s 0.02s 0.8973 """ from __future__ import division, print_function # Author: Issam H. Laradji # Arnaud Joly <[email protected]> # License: BSD 3 clause import os from time import time import argparse import numpy as np from sklearn.datasets import fetch_mldata from sklearn.datasets import get_data_home from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.dummy import DummyClassifier from sklearn.externals.joblib import Memory from sklearn.kernel_approximation import Nystroem from sklearn.kernel_approximation import RBFSampler from sklearn.metrics import zero_one_loss from sklearn.pipeline import make_pipeline from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier from sklearn.utils import check_array from sklearn.linear_model import LogisticRegression # Memoize the data extraction and memory map the resulting # train / test splits in readonly mode memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'), mmap_mode='r') @memory.cache def load_data(dtype=np.float32, order='F'): """Load the data, then cache and memmap the train/test split""" ###################################################################### ## Load dataset print("Loading dataset...") data = fetch_mldata('MNIST original') X = check_array(data['data'], dtype=dtype, order=order) y = data["target"] # Normalize features X = X / 255 ## Create train-test split (as [Joachims, 2006]) print("Creating train-test split...") n_train = 60000 X_train = X[:n_train] y_train = y[:n_train] X_test = X[n_train:] y_test = y[n_train:] return X_train, X_test, y_train, y_test ESTIMATORS = { "dummy": DummyClassifier(), 'CART': DecisionTreeClassifier(), 'ExtraTrees': ExtraTreesClassifier(n_estimators=100), 'RandomForest': RandomForestClassifier(n_estimators=100), 'Nystroem-SVM': make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)), 'SampledRBF-SVM': make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)), 'LinearRegression-SAG': LogisticRegression(solver='sag', tol=1e-1, C=1e4) } if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--classifiers', nargs="+", choices=ESTIMATORS, type=str, default=['ExtraTrees', 'Nystroem-SVM'], help="list of classifiers to benchmark.") parser.add_argument('--n-jobs', nargs="?", default=1, type=int, help="Number of concurrently running workers for " "models that support parallelism.") parser.add_argument('--order', nargs="?", default="C", type=str, choices=["F", "C"], help="Allow to choose between fortran and C ordered " "data") parser.add_argument('--random-seed', nargs="?", default=0, type=int, help="Common seed used by random number generator.") args = vars(parser.parse_args()) print(__doc__) X_train, X_test, y_train, y_test = load_data(order=args["order"]) print("") print("Dataset statistics:") print("===================") print("%s %d" % ("number of features:".ljust(25), X_train.shape[1])) print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size)) print("%s %s" % ("data type:".ljust(25), X_train.dtype)) print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25), X_train.shape[0], int(X_train.nbytes / 1e6))) print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25), X_test.shape[0], int(X_test.nbytes / 1e6))) print() print("Training Classifiers") print("====================") error, train_time, test_time = {}, {}, {} for name in sorted(args["classifiers"]): print("Training %s ... " % name, end="") estimator = ESTIMATORS[name] estimator_params = estimator.get_params() estimator.set_params(**{p: args["random_seed"] for p in estimator_params if p.endswith("random_state")}) if "n_jobs" in estimator_params: estimator.set_params(n_jobs=args["n_jobs"]) time_start = time() estimator.fit(X_train, y_train) train_time[name] = time() - time_start time_start = time() y_pred = estimator.predict(X_test) test_time[name] = time() - time_start error[name] = zero_one_loss(y_test, y_pred) print("done") print() print("Classification performance:") print("===========================") print("{0: <24} {1: >10} {2: >11} {3: >12}" "".format("Classifier ", "train-time", "test-time", "error-rate")) print("-" * 60) for name in sorted(args["classifiers"], key=error.get): print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}" "".format(name, train_time[name], test_time[name], error[name])) print()
bsd-3-clause
662,011,068,475,212,700
35.963855
80
0.569915
false
andris210296/andris-projeto
backend/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/trie/py.py
1323
1775
from __future__ import absolute_import, division, unicode_literals from pip._vendor.six import text_type from bisect import bisect_left from ._base import Trie as ABCTrie class Trie(ABCTrie): def __init__(self, data): if not all(isinstance(x, text_type) for x in data.keys()): raise TypeError("All keys must be strings") self._data = data self._keys = sorted(data.keys()) self._cachestr = "" self._cachepoints = (0, len(data)) def __contains__(self, key): return key in self._data def __len__(self): return len(self._data) def __iter__(self): return iter(self._data) def __getitem__(self, key): return self._data[key] def keys(self, prefix=None): if prefix is None or prefix == "" or not self._keys: return set(self._keys) if prefix.startswith(self._cachestr): lo, hi = self._cachepoints start = i = bisect_left(self._keys, prefix, lo, hi) else: start = i = bisect_left(self._keys, prefix) keys = set() if start == len(self._keys): return keys while self._keys[i].startswith(prefix): keys.add(self._keys[i]) i += 1 self._cachestr = prefix self._cachepoints = (start, i) return keys def has_keys_with_prefix(self, prefix): if prefix in self._data: return True if prefix.startswith(self._cachestr): lo, hi = self._cachepoints i = bisect_left(self._keys, prefix, lo, hi) else: i = bisect_left(self._keys, prefix) if i == len(self._keys): return False return self._keys[i].startswith(prefix)
mit
-3,805,703,339,996,145,700
25.492537
66
0.552676
false
ciarams87/PyU4V
docs/source/programmers_guide_src/code/performance-diagnostic_calls.py
2
2738
# Copyright (c) 2020 Dell Inc. or its subsidiaries. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """docs/source/programmers_guide_src/code/performance-diagnostic_calls.py""" import PyU4V # Initialise PyU4V Unisphere connection conn = PyU4V.U4VConn() # Get a list of performance categories category_list = conn.performance.get_performance_categories_list() # Get a list of supported metrics for the category 'FEDirector' fe_dir_metrics = conn.performance.get_performance_metrics_list( category='FEDirector') # Get a list of KPI only metrics for the category 'StorageGroup' storage_group_metrics = conn.performance.get_performance_metrics_list( category='StorageGroup', kpi_only=True) # Get array KPI performance metrics for the most recent timestamp only, set # recency so timestamp has to be less than 5 minutes old array_performance_data = conn.performance.get_array_stats(metrics='KPI', recency=5) # Get ResponseTime for each SRP for the last 4 hours # Firstly get the most recent performance timestamp for your array recent_timestamp = conn.performance.get_last_available_timestamp() # Set the performance recency value to 10 minutes and check if the most # recent timestamp meets that recency value conn.performance.recency = 10 is_recent_ten = conn.performance.is_timestamp_current(recent_timestamp) # Recency can also be passed to is_timestamp_current is_recent_five = conn.performance.is_timestamp_current(recent_timestamp, minutes=5) # Get the start and end times by providing the most recent timestamp and # specifying a 4 hour difference start_time, end_time = conn.performance.get_timestamp_by_hour( end_time=recent_timestamp, hours_difference=4) # Get the list of SRPs srp_keys = conn.performance.get_storage_resource_pool_keys() srp_list = list() for key in srp_keys: srp_list.append(key.get('srpId')) # Get the performance data for each of the SRPs in the list for srp in srp_list: srp_data = conn.performance.get_storage_resource_pool_stats( srp_id=srp, metrics='ResponseTime', start_time=start_time, end_time=end_time) # Close the session conn.close_session()
mit
-3,690,300,471,233,520,000
41.78125
76
0.735939
false
ESS-LLP/erpnext-healthcare
erpnext/hr/report/department_analytics/department_analytics.py
11
1903
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe import _ def execute(filters=None): if not filters: filters = {} columns = get_columns() employees = get_employees(filters) departments_result = get_department(filters) departments = [] if departments_result: for department in departments_result: departments.append(department) chart = get_chart_data(departments,employees) return columns, employees, None, chart def get_columns(): return [ _("Employee") + ":Link/Employee:120", _("Name") + ":Data:200", _("Date of Birth")+ ":Date:100", _("Branch") + ":Link/Branch:120", _("Department") + ":Link/Department:120", _("Designation") + ":Link/Designation:120", _("Gender") + "::60", _("Company") + ":Link/Company:120" ] def get_conditions(filters): conditions = "" if filters.get("department"): conditions += " and department = '%s'" % \ filters["department"].replace("'", "\\'") return conditions def get_employees(filters): conditions = get_conditions(filters) return frappe.db.sql("""select name, employee_name, date_of_birth, branch, department, designation, gender, company from `tabEmployee` where status = 'Active' %s""" % conditions, as_list=1) def get_department(filters): return frappe.db.sql("""select name from `tabDepartment`""" , as_list=1) def get_chart_data(departments,employees): if not departments: departments = [] datasets = [] for department in departments: if department: total_employee = frappe.db.sql("""select count(*) from \ `tabEmployee` where \ department = %s""" ,(department[0]), as_list=1) datasets.append(total_employee[0][0]) chart = { "data": { 'labels': departments, 'datasets': [{'name': 'Employees','values': datasets}] } } chart["type"] = "bar" return chart
gpl-3.0
8,818,555,275,118,910,000
30.716667
102
0.68103
false
ujjwalsharma045/Angular2-Creative-Tim-Admin-Theme-With-Code-
node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py
1812
9537
# Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Utility functions shared amongst the Windows generators.""" import copy import os # A dictionary mapping supported target types to extensions. TARGET_TYPE_EXT = { 'executable': 'exe', 'loadable_module': 'dll', 'shared_library': 'dll', 'static_library': 'lib', } def _GetLargePdbShimCcPath(): """Returns the path of the large_pdb_shim.cc file.""" this_dir = os.path.abspath(os.path.dirname(__file__)) src_dir = os.path.abspath(os.path.join(this_dir, '..', '..')) win_data_dir = os.path.join(src_dir, 'data', 'win') large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc') return large_pdb_shim_cc def _DeepCopySomeKeys(in_dict, keys): """Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|. Arguments: in_dict: The dictionary to copy. keys: The keys to be copied. If a key is in this list and doesn't exist in |in_dict| this is not an error. Returns: The partially deep-copied dictionary. """ d = {} for key in keys: if key not in in_dict: continue d[key] = copy.deepcopy(in_dict[key]) return d def _SuffixName(name, suffix): """Add a suffix to the end of a target. Arguments: name: name of the target (foo#target) suffix: the suffix to be added Returns: Target name with suffix added (foo_suffix#target) """ parts = name.rsplit('#', 1) parts[0] = '%s_%s' % (parts[0], suffix) return '#'.join(parts) def _ShardName(name, number): """Add a shard number to the end of a target. Arguments: name: name of the target (foo#target) number: shard number Returns: Target name with shard added (foo_1#target) """ return _SuffixName(name, str(number)) def ShardTargets(target_list, target_dicts): """Shard some targets apart to work around the linkers limits. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. Returns: Tuple of the new sharded versions of the inputs. """ # Gather the targets to shard, and how many pieces. targets_to_shard = {} for t in target_dicts: shards = int(target_dicts[t].get('msvs_shard', 0)) if shards: targets_to_shard[t] = shards # Shard target_list. new_target_list = [] for t in target_list: if t in targets_to_shard: for i in range(targets_to_shard[t]): new_target_list.append(_ShardName(t, i)) else: new_target_list.append(t) # Shard target_dict. new_target_dicts = {} for t in target_dicts: if t in targets_to_shard: for i in range(targets_to_shard[t]): name = _ShardName(t, i) new_target_dicts[name] = copy.copy(target_dicts[t]) new_target_dicts[name]['target_name'] = _ShardName( new_target_dicts[name]['target_name'], i) sources = new_target_dicts[name].get('sources', []) new_sources = [] for pos in range(i, len(sources), targets_to_shard[t]): new_sources.append(sources[pos]) new_target_dicts[name]['sources'] = new_sources else: new_target_dicts[t] = target_dicts[t] # Shard dependencies. for t in new_target_dicts: for deptype in ('dependencies', 'dependencies_original'): dependencies = copy.copy(new_target_dicts[t].get(deptype, [])) new_dependencies = [] for d in dependencies: if d in targets_to_shard: for i in range(targets_to_shard[d]): new_dependencies.append(_ShardName(d, i)) else: new_dependencies.append(d) new_target_dicts[t][deptype] = new_dependencies return (new_target_list, new_target_dicts) def _GetPdbPath(target_dict, config_name, vars): """Returns the path to the PDB file that will be generated by a given configuration. The lookup proceeds as follows: - Look for an explicit path in the VCLinkerTool configuration block. - Look for an 'msvs_large_pdb_path' variable. - Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is specified. - Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'. Arguments: target_dict: The target dictionary to be searched. config_name: The name of the configuration of interest. vars: A dictionary of common GYP variables with generator-specific values. Returns: The path of the corresponding PDB file. """ config = target_dict['configurations'][config_name] msvs = config.setdefault('msvs_settings', {}) linker = msvs.get('VCLinkerTool', {}) pdb_path = linker.get('ProgramDatabaseFile') if pdb_path: return pdb_path variables = target_dict.get('variables', {}) pdb_path = variables.get('msvs_large_pdb_path', None) if pdb_path: return pdb_path pdb_base = target_dict.get('product_name', target_dict['target_name']) pdb_base = '%s.%s.pdb' % (pdb_base, TARGET_TYPE_EXT[target_dict['type']]) pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base return pdb_path def InsertLargePdbShims(target_list, target_dicts, vars): """Insert a shim target that forces the linker to use 4KB pagesize PDBs. This is a workaround for targets with PDBs greater than 1GB in size, the limit for the 1KB pagesize PDBs created by the linker by default. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. vars: A dictionary of common GYP variables with generator-specific values. Returns: Tuple of the shimmed version of the inputs. """ # Determine which targets need shimming. targets_to_shim = [] for t in target_dicts: target_dict = target_dicts[t] # We only want to shim targets that have msvs_large_pdb enabled. if not int(target_dict.get('msvs_large_pdb', 0)): continue # This is intended for executable, shared_library and loadable_module # targets where every configuration is set up to produce a PDB output. # If any of these conditions is not true then the shim logic will fail # below. targets_to_shim.append(t) large_pdb_shim_cc = _GetLargePdbShimCcPath() for t in targets_to_shim: target_dict = target_dicts[t] target_name = target_dict.get('target_name') base_dict = _DeepCopySomeKeys(target_dict, ['configurations', 'default_configuration', 'toolset']) # This is the dict for copying the source file (part of the GYP tree) # to the intermediate directory of the project. This is necessary because # we can't always build a relative path to the shim source file (on Windows # GYP and the project may be on different drives), and Ninja hates absolute # paths (it ends up generating the .obj and .obj.d alongside the source # file, polluting GYPs tree). copy_suffix = 'large_pdb_copy' copy_target_name = target_name + '_' + copy_suffix full_copy_target_name = _SuffixName(t, copy_suffix) shim_cc_basename = os.path.basename(large_pdb_shim_cc) shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name shim_cc_path = shim_cc_dir + '/' + shim_cc_basename copy_dict = copy.deepcopy(base_dict) copy_dict['target_name'] = copy_target_name copy_dict['type'] = 'none' copy_dict['sources'] = [ large_pdb_shim_cc ] copy_dict['copies'] = [{ 'destination': shim_cc_dir, 'files': [ large_pdb_shim_cc ] }] # This is the dict for the PDB generating shim target. It depends on the # copy target. shim_suffix = 'large_pdb_shim' shim_target_name = target_name + '_' + shim_suffix full_shim_target_name = _SuffixName(t, shim_suffix) shim_dict = copy.deepcopy(base_dict) shim_dict['target_name'] = shim_target_name shim_dict['type'] = 'static_library' shim_dict['sources'] = [ shim_cc_path ] shim_dict['dependencies'] = [ full_copy_target_name ] # Set up the shim to output its PDB to the same location as the final linker # target. for config_name, config in shim_dict.get('configurations').iteritems(): pdb_path = _GetPdbPath(target_dict, config_name, vars) # A few keys that we don't want to propagate. for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']: config.pop(key, None) msvs = config.setdefault('msvs_settings', {}) # Update the compiler directives in the shim target. compiler = msvs.setdefault('VCCLCompilerTool', {}) compiler['DebugInformationFormat'] = '3' compiler['ProgramDataBaseFileName'] = pdb_path # Set the explicit PDB path in the appropriate configuration of the # original target. config = target_dict['configurations'][config_name] msvs = config.setdefault('msvs_settings', {}) linker = msvs.setdefault('VCLinkerTool', {}) linker['GenerateDebugInformation'] = 'true' linker['ProgramDatabaseFile'] = pdb_path # Add the new targets. They must go to the beginning of the list so that # the dependency generation works as expected in ninja. target_list.insert(0, full_copy_target_name) target_list.insert(0, full_shim_target_name) target_dicts[full_copy_target_name] = copy_dict target_dicts[full_shim_target_name] = shim_dict # Update the original target to depend on the shim target. target_dict.setdefault('dependencies', []).append(full_shim_target_name) return (target_list, target_dicts)
mit
-9,221,370,984,675,481,000
34.322222
80
0.66761
false
swift-lang/swift-e-lab
parsl/addresses.py
1
1381
import logging import os import platform import requests import socket import fcntl import struct logger = logging.getLogger(__name__) def address_by_route(): logger.debug("Finding address by querying local routing table") addr = os.popen("/sbin/ip route get 8.8.8.8 | awk '{print $NF;exit}'").read().strip() logger.debug("Address found: {}".format(addr)) return addr def address_by_query(): logger.debug("Finding address by querying remote service") addr = requests.get('https://api.ipify.org').text logger.debug("Address found: {}".format(addr)) return addr def address_by_hostname(): logger.debug("Finding address by using local hostname") addr = platform.node() logger.debug("Address found: {}".format(addr)) return addr def address_by_interface(ifname): """Returns the IP address of the given interface name, e.g. 'eth0' Parameters ---------- ifname : str Name of the interface whose address is to be returned. Required. Taken from this Stack Overflow answer: https://stackoverflow.com/questions/24196932/how-can-i-get-the-ip-address-of-eth0-in-python#24196955 """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl( s.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', bytes(ifname[:15], 'utf-8')) )[20:24])
apache-2.0
-4,019,134,570,393,182,700
27.770833
143
0.673425
false
Dobatymo/livestreamer
src/livestreamer/plugins/livestation.py
34
2666
import re from livestreamer.plugin import Plugin, PluginError, PluginOptions from livestreamer.plugin.api import http, validate from livestreamer.stream import HLSStream LOGIN_PAGE_URL = "http://www.livestation.com/en/users/new" LOGIN_POST_URL = "http://www.livestation.com/en/sessions.json" _csrf_token_re = re.compile("<meta content=\"([^\"]+)\" name=\"csrf-token\"") _hls_playlist_re = re.compile("<meta content=\"([^\"]+.m3u8)\" property=\"og:video\" />") _url_re = re.compile("http(s)?://(\w+\.)?livestation.com") _csrf_token_schema = validate.Schema( validate.transform(_csrf_token_re.search), validate.any(None, validate.get(1)) ) _hls_playlist_schema = validate.Schema( validate.transform(_hls_playlist_re.search), validate.any( None, validate.all( validate.get(1), validate.url(scheme="http", path=validate.endswith(".m3u8")) ) ) ) _login_schema = validate.Schema({ "email": validate.text, validate.optional("errors"): validate.all( { "base": [validate.text] }, validate.get("base"), ) }) class Livestation(Plugin): options = PluginOptions({ "email": "", "password": "" }) @classmethod def can_handle_url(self, url): return _url_re.match(url) def _authenticate(self, email, password): csrf_token = http.get(LOGIN_PAGE_URL, schema=_csrf_token_schema) if not csrf_token: raise PluginError("Unable to find CSRF token") data = { "authenticity_token": csrf_token, "channel_id": "", "commit": "Login", "plan_id": "", "session[email]": email, "session[password]": password, "utf8": "\xE2\x9C\x93", # Check Mark Character } res = http.post(LOGIN_POST_URL, data=data, acceptable_status=(200, 422)) result = http.json(res, schema=_login_schema) errors = result.get("errors") if errors: errors = ", ".join(errors) raise PluginError("Unable to authenticate: {0}".format(errors)) self.logger.info("Successfully logged in as {0}", result["email"]) def _get_streams(self): login_email = self.options.get("email") login_password = self.options.get("password") if login_email and login_password: self._authenticate(login_email, login_password) hls_playlist = http.get(self.url, schema=_hls_playlist_schema) if not hls_playlist: return return HLSStream.parse_variant_playlist(self.session, hls_playlist) __plugin__ = Livestation
bsd-2-clause
-4,288,592,926,004,190,000
29.295455
89
0.599775
false
SnoringFrog/retain
retain/__init__.py
1
9569
#!/usr/bin/env python # -*- coding: iso-8859-1 -*- ''' retain - Command-line utility that removes all files except the ones specified on the command line. Usage: ------ retain [-fnrsv] [-d directory] filename [...] Options: -------- --directory <dir> The directory to operate on. Defaults to the current -d <dir> directory --force, -f Do not ask for confirmation if file to retain --no-exec, -n Show what would be done, but do not actually do it (implies -v) --recursive, -r Delete directories, too (recursively) "--safe, -s If a file is not found, program exits, nothing is deleted (overrides any previous -f option)", --verbose, -v Enable verbose messages Description: ------------ retain is the opposite of "rm" or "del": It takes a series of file names on the command line, and it deletes all files in the specified directory except the specified files. Copyright and License: ---------------------- Copyright © 2008 Brian M. Clapper This is free software, released under the following BSD-like license: Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The end-user documentation included with the redistribution, if any, must include the following acknowlegement: This product includes software developed by Brian M. Clapper ([email protected], http://www.clapper.org/bmc/). That software is copyright © 2008 Brian M. Clapper. Alternately, this acknowlegement may appear in the software itself, if and wherever such third-party acknowlegements normally appear. THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL BRIAN M. CLAPPER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' # $Id$ # Info about the module __version__ = '2.0.1' __author__ = 'Brian Clapper' __email__ = '[email protected]' __url__ = 'http://github.com/bmc/retain' __copyright__ = '© 2003-2011 Brian M. Clapper' __license__ = 'BSD-style license' # Package stuff __all__ = ["retain"] # Use the built-in 'set' type if using Python 2.4 or better. Otherwise, use # the old sets module. try: set except NameError: from sets import Set as set, ImmutableSet as frozenset # --------------------------------------------------------------------------- # Imports # --------------------------------------------------------------------------- from getopt import getopt, GetoptError import string import sys import os from sets import Set import stat import shutil # --------------------------------------------------------------------------- # Classes # --------------------------------------------------------------------------- class RetainException(Exception): def __init__(self, value): self.__value = value def __str__(self): return `self.__value` def get_value(self): return self.__value value = property(get_value, doc="Get the string for the exception") class RetainUsageException(RetainException): def __init__(self, value): RetainException.__init__(self, value) class Verbose: def __init__(self, verbose=0): self.__verbose = verbose def println(self, msg): if self.__verbose: sys.stderr.write(msg + "\n") def __call__(self, msg): self.println(msg) class FileRetainer: def __init__(self, argv): self.__parseParams(argv) self.__verbose = Verbose(self.__verbose) def retain(self): verbose = self.__verbose verbose("Changing directory to " + self.__dir) try: os.chdir(self.__dir) except OSError, ex: raise RetainException(str(ex)) for dirFile in os.listdir("."): self.__process_file(dirFile) # ----------------------------------------------------------------------- # Private Methods # ----------------------------------------------------------------------- def __process_file(self, dirFile): verbose = self.__verbose if dirFile in self.__files: verbose("Retaining " + dirFile) return try: mode = os.stat(dirFile)[stat.ST_MODE] if stat.S_ISDIR(mode): if not self.__recursive: sys.stderr.write("Skipping directory \"" + dirFile + "\" because -r (--recursive) " + "was not specified.\n"); else: verbose("Deleting " + dirFile) if not self.__no_exec: shutil.rmtree(dirFile) else: verbose("Deleting " + dirFile) if not self.__no_exec: os.unlink(dirFile) except OSError, ex: sys.stderr.write("Warning: Can't unlink \"" + dirFile + "\": " + str (ex) + "\n") def __parseParams(self, argv): # Parse the command-line parameters try: opts, args = getopt(argv[1:], "nvrfsd:", ["directory=", "no-exec", "recursive", "verbose", "force", "safe"]) except GetoptError, ex: self.__usage(argv[0], str (ex)) # throws an exception self.__no_exec = 0 self.__verbose = 0 self.__recursive = 0 self.__force = 0 self.__safe = 0 self.__dir = "." for o, a in opts: if o in ("--no-exec", "-n"): self.__no_exec = 1 self.__verbose = 1 continue if o in ("--verbose", "-v"): self.__verbose = 1 continue if o in ("--directory", "-d"): self.__dir = a continue if o in ("--recursive", "-r"): self.__recursive = 1 continue if o in ("--force", "-f"): self.__force = 1 self.__safe = 0 continue if o in ("--safe", "-s"): self.__safe = 1 self.__force = 0 continue files = [] if len(args) > 0: self.__files = set(args[0:]) if not self.__force: self.__checkFiles(self.__files) else: self.__usage(argv[0], "Missing file(s) to retain.") def __checkFiles(self, files): for fname in files: if self.__dir != ".": fpath = self.__dir + os.path.sep + fname else: fpath = fname if not (os.path.isfile(fpath)) and not (self.__recursive and os.path.isdir(fpath)): if self.__safe: raise RetainException, fpath + " not found, canceling execution." else: answer = raw_input(fpath + " was not found, continue? (y/n)\n") if answer.lower() != "y": raise RetainException, "User canceled execution" def __usage(self, prog, msg): u = [ "", "retain, version %s" % __version__, "", "Usage: %s [-fnrsv] [-d directory] filename [...]" % os.path.basename(prog), "", "Retain all the specified files, removing anything else.", "", "OPTIONS", "", "--directory <dir>", "-d <dir> Directory to operate on. Defaults to current directory", "--force, -f Do not ask for confirmation if file is not found (overrides any previous -s option)", "--no-exec, -n Show what would be done, but don't really do it (implies -v)", "--recursive, -r Delete directories, too (recursively)", "--safe, -s If a file is not found, program exits, nothing is deleted (overrides any previous -f option)", "--verbose, -v Enable verbose messages" ] result = [] if msg != None: result.append(msg) for i in range (len (u)): result.append(u[i]) raise RetainUsageException, result # --------------------------------------------------------------------------- # Main Program # --------------------------------------------------------------------------- def main(): try: retainer = FileRetainer(sys.argv) retainer.retain() except RetainUsageException, ex: for i in ex.value: sys.stderr.write(i + "\n") sys.exit(1) except RetainException, ex: sys.stderr.write(str (ex) + "\n") sys.exit(1) sys.exit(0) if __name__ == "__main__": main()
bsd-3-clause
5,046,276,541,689,229,000
29.571885
116
0.514474
false
byterom/android_external_chromium_org
third_party/cython/src/Cython/Plex/Traditional.py
102
3502
#======================================================================= # # Python Lexical Analyser # # Traditional Regular Expression Syntax # #======================================================================= from Regexps import Alt, Seq, Rep, Rep1, Opt, Any, AnyBut, Bol, Eol, Char from Errors import PlexError class RegexpSyntaxError(PlexError): pass def re(s): """ Convert traditional string representation of regular expression |s| into Plex representation. """ return REParser(s).parse_re() class REParser(object): def __init__(self, s): self.s = s self.i = -1 self.end = 0 self.next() def parse_re(self): re = self.parse_alt() if not self.end: self.error("Unexpected %s" % repr(self.c)) return re def parse_alt(self): """Parse a set of alternative regexps.""" re = self.parse_seq() if self.c == '|': re_list = [re] while self.c == '|': self.next() re_list.append(self.parse_seq()) re = Alt(*re_list) return re def parse_seq(self): """Parse a sequence of regexps.""" re_list = [] while not self.end and not self.c in "|)": re_list.append(self.parse_mod()) return Seq(*re_list) def parse_mod(self): """Parse a primitive regexp followed by *, +, ? modifiers.""" re = self.parse_prim() while not self.end and self.c in "*+?": if self.c == '*': re = Rep(re) elif self.c == '+': re = Rep1(re) else: # self.c == '?' re = Opt(re) self.next() return re def parse_prim(self): """Parse a primitive regexp.""" c = self.get() if c == '.': re = AnyBut("\n") elif c == '^': re = Bol elif c == '$': re = Eol elif c == '(': re = self.parse_alt() self.expect(')') elif c == '[': re = self.parse_charset() self.expect(']') else: if c == '\\': c = self.get() re = Char(c) return re def parse_charset(self): """Parse a charset. Does not include the surrounding [].""" char_list = [] invert = 0 if self.c == '^': invert = 1 self.next() if self.c == ']': char_list.append(']') self.next() while not self.end and self.c != ']': c1 = self.get() if self.c == '-' and self.lookahead(1) != ']': self.next() c2 = self.get() for a in xrange(ord(c1), ord(c2) + 1): char_list.append(chr(a)) else: char_list.append(c1) chars = ''.join(char_list) if invert: return AnyBut(chars) else: return Any(chars) def next(self): """Advance to the next char.""" s = self.s i = self.i = self.i + 1 if i < len(s): self.c = s[i] else: self.c = '' self.end = 1 def get(self): if self.end: self.error("Premature end of string") c = self.c self.next() return c def lookahead(self, n): """Look ahead n chars.""" j = self.i + n if j < len(self.s): return self.s[j] else: return '' def expect(self, c): """ Expect to find character |c| at current position. Raises an exception otherwise. """ if self.c == c: self.next() else: self.error("Missing %s" % repr(c)) def error(self, mess): """Raise exception to signal syntax error in regexp.""" raise RegexpSyntaxError("Syntax error in regexp %s at position %d: %s" % ( repr(self.s), self.i, mess))
bsd-3-clause
-2,021,016,205,671,642,400
21.74026
78
0.506568
false
Spiderlover/Toontown
toontown/toon/DistributedNPCBankerAI.py
3
3381
from otp.ai.AIBaseGlobal import * from direct.task.Task import Task from pandac.PandaModules import * from DistributedNPCToonBaseAI import * from toontown.estate import BankGlobals class DistributedNPCBankerAI(DistributedNPCToonBaseAI): FourthGagVelvetRopeBan = config.GetBool('want-ban-fourth-gag-velvet-rope', 0) def __init__(self, air, npcId, questCallback = None, hq = 0): DistributedNPCToonBaseAI.__init__(self, air, npcId, questCallback) self.hq = hq self.tutorial = 0 self.pendingAvId = None self.task = '' def avatarEnter(self): avId = self.air.getAvatarIdFromSender() if self.busy: self.sendClearMovie(av=avId) return self.busy = avId self.sendGUIMovie() self.acceptOnce(self.air.getAvatarExitEvent(avId), self.__handleUnexpectedExit, extraArgs=[avId]) DistributedNPCToonBaseAI.avatarEnter(self) def transferMoney(self, transactionAmount): av = self.air.doId2do.get(self.busy) if not av: return money = min(av.getMoney() - transactionAmount, 10000) av.b_setMoney(money) if transactionAmount != 0: self.air.bankManager.setMoney(self.busy, av.getBankMoney() + transactionAmount) self.clearTasks() self.sendDoneMovie() def sendGUIMovie(self): if self.task: taskMgr.remove(self.task) self.task = self.uniqueName('timeoutMovie') taskMgr.doMethodLater(60, self.sendTimeoutMovie, self.task) self.sendUpdate('setMovie', [BankGlobals.BANK_MOVIE_GUI, self.busy, ClockDelta.globalClockDelta.getRealNetworkTime()]) def sendTimeoutMovie(self, task=None): self.pendingAvId = None self.sendUpdate('setMovie', [BankGlobals.BANK_MOVIE_TIMEOUT, self.busy, ClockDelta.globalClockDelta.getRealNetworkTime()]) self.busy = 0 if self.task: taskMgr.remove(self.task) self.task = self.uniqueName('clearMovie') taskMgr.doMethodLater(5.5, self.sendClearMovie, self.task) if task is not None: return task.done def sendClearMovie(self, task=None, av=0): self.sendUpdate('setMovie', [BankGlobals.BANK_MOVIE_CLEAR, av, ClockDelta.globalClockDelta.getRealNetworkTime()]) if task is not None: return task.done def sendDoneMovie(self): self.sendUpdate('setMovie', [BankGlobals.BANK_MOVIE_DEPOSIT, self.busy, ClockDelta.globalClockDelta.getRealNetworkTime()]) self.busy = 0 if self.task: taskMgr.remove(self.task) self.task = self.uniqueName('clearMovie') taskMgr.doMethodLater(5.5, self.sendClearMovie, self.task) def rejectAvatar(self, avId): self.busy = avId self.sendUpdate('setMovie', [BankGlobals.BANK_MOVIE_REJECT, avId, ClockDelta.globalClockDelta.getRealNetworkTime()]) def __handleUnexpectedExit(self, avId): self.notify.warning('avatar:' + str(avId) + ' has exited unexpectedly') self.clearTasks() self.sendTimeoutMovie() def clearTasks(self): if self.task: taskMgr.remove(self.task) self.task = None
mit
-847,188,060,848,581,900
30.598131
81
0.628217
false
Azure/azure-sdk-for-python
sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/_configuration.py
1
3355
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING from azure.core.configuration import Configuration from azure.core.pipeline import policies from azure.mgmt.core.policies import ARMHttpLoggingPolicy from ._version import VERSION if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any from azure.core.credentials import TokenCredential class EventHubManagementClientConfiguration(Configuration): """Configuration for EventHubManagementClient. Note that all parameters used to create this instance are saved as instance attributes. :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials.TokenCredential :param subscription_id: Subscription credentials that uniquely identify a Microsoft Azure subscription. The subscription ID forms part of the URI for every service call. :type subscription_id: str """ def __init__( self, credential, # type: "TokenCredential" subscription_id, # type: str **kwargs # type: Any ): # type: (...) -> None if credential is None: raise ValueError("Parameter 'credential' must not be None.") if subscription_id is None: raise ValueError("Parameter 'subscription_id' must not be None.") super(EventHubManagementClientConfiguration, self).__init__(**kwargs) self.credential = credential self.subscription_id = subscription_id self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) kwargs.setdefault('sdk_moniker', 'azure-mgmt-eventhub/{}'.format(VERSION)) self._configure(**kwargs) def _configure( self, **kwargs # type: Any ): # type: (...) -> None self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs) self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) self.authentication_policy = kwargs.get('authentication_policy') if self.credential and not self.authentication_policy: self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
mit
-802,870,887,900,001,000
46.253521
173
0.677496
false
anaruse/chainer
tests/chainer_tests/functions_tests/math_tests/test_trigonometric.py
2
6481
import unittest import numpy import chainer from chainer.backends import cuda import chainer.functions as F from chainer import gradient_check from chainer import testing from chainer.testing import attr @testing.parameterize(*testing.product({ 'func_name': ['cos', 'sin', 'tan'], 'shape': [(3, 2), ()], 'dtype': [numpy.float16, numpy.float32, numpy.float64], })) class TrigonometricFunctionsTest(unittest.TestCase): def setUp(self): self.x = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype) self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) self.ggx = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype) self.func = getattr(F, self.func_name) camel_name = self.func_name[0].upper() + self.func_name[1:] self.func_class = getattr(F, camel_name) self.np_func = getattr(numpy, self.func_name) if self.dtype == numpy.float16: self.backward_options = {'eps': 1e-3, 'atol': 1e-2, 'rtol': 1e-2} self.double_backward_options = { 'eps': 1e-3, 'atol': 1e-2, 'rtol': 1e-2} else: self.backward_options = {'atol': 1e-4, 'rtol': 1e-3} self.double_backward_options = {'atol': 1e-4, 'rtol': 1e-3} def check_forward(self, x_data): x = chainer.Variable(x_data) y = self.func(x) testing.assert_allclose( self.np_func(self.x), y.data, atol=1e-4, rtol=1e-4) def test_forward_cpu(self): self.check_forward(self.x) @attr.gpu def test_forward_gpu(self): self.check_forward(cuda.to_gpu(self.x)) def check_backward(self, x_data, y_grad): gradient_check.check_backward( self.func, x_data, y_grad, dtype='d', **self.backward_options) def test_backward_cpu(self): self.check_backward(self.x, self.gy) @attr.gpu def test_backward_gpu(self): self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy)) def check_double_backward(self, x_data, y_grad, x_grad_grad): gradient_check.check_double_backward( self.func, x_data, y_grad, x_grad_grad, dtype='d', **self.double_backward_options) def test_double_backward_cpu(self): self.check_double_backward(self.x, self.gy, self.ggx) @attr.gpu def test_double_backward_gpu(self): self.check_double_backward(cuda.to_gpu( self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx)) def test_label(self): self.assertEqual(self.func_class().label, self.func_name) def make_data(shape, dtype): x = numpy.random.uniform(-.9, .9, shape).astype(dtype) gy = numpy.random.uniform(-1, 1, shape).astype(dtype) ggx = numpy.random.uniform(-.9, .9, shape).astype(dtype) return x, gy, ggx @testing.unary_math_function_unittest( F.arcsin, make_data=make_data, forward_options={'atol': 1e-3, 'rtol': 1e-3}, double_backward_options={'eps': 1e-3}, ) class TestArcsin(unittest.TestCase): pass @testing.unary_math_function_unittest( F.arccos, make_data=make_data, forward_options={'atol': 1e-3, 'rtol': 1e-3}, double_backward_options={'eps': 1e-3}, ) class TestArccos(unittest.TestCase): pass @testing.unary_math_function_unittest(F.arctan, make_data=make_data) class TestArctan(unittest.TestCase): pass @testing.parameterize(*testing.product({ 'shape': [(3, 2), ()], 'dtype': [numpy.float16, numpy.float32, numpy.float64], })) class TestArctan2(unittest.TestCase): def setUp(self): self.x1 = numpy.random.uniform( -10.0, 10.0, self.shape).astype(self.dtype) self.x2 = numpy.random.uniform( -10.0, 10.0, self.shape).astype(self.dtype) self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype) self.ggx1 = numpy.random.uniform( -10.0, 10.0, self.shape).astype(self.dtype) self.ggx2 = numpy.random.uniform( -10.0, 10.0, self.shape).astype(self.dtype) if self.dtype == numpy.float16: self.backward_options = { 'eps': 1e-3, 'atol': 2 ** -4, 'rtol': 2 ** -4} self.double_backward_options = { 'eps': 1e-3, 'atol': 2 ** -4, 'rtol': 2 ** -4} else: self.backward_options = { 'atol': 1e-3, 'rtol': 1e-3} self.double_backward_options = { 'atol': 1e-3, 'rtol': 1e-3} # Avoid non-differentiable point self.x1[(abs(self.x1) < 1e-2) & (self.x2 < 0)] = 1 self.ggx1[(abs(self.ggx1) < 1e-2) & (self.ggx2 < 0)] = 1 def check_forward(self, x1_data, x2_data): y = F.arctan2(x1_data, x2_data) numpy.testing.assert_array_less( cuda.to_cpu(y.data), numpy.full(y.shape, numpy.pi)) numpy.testing.assert_array_less( numpy.full(y.shape, -numpy.pi), cuda.to_cpu(y.data)) testing.assert_allclose( numpy.arctan2(self.x1, self.x2), y.data, atol=1e-4, rtol=1e-4) def test_forward_cpu(self): self.check_forward(self.x1, self.x2) @attr.gpu def test_forward_gpu(self): self.check_forward(cuda.to_gpu(self.x1), cuda.to_gpu(self.x2)) def check_backward(self, x1_data, x2_data, y_grad): gradient_check.check_backward( F.arctan2, (x1_data, x2_data), y_grad, dtype='d', **self.backward_options) def test_backward_cpu(self): self.check_backward(self.x1, self.x2, self.gy) @attr.gpu def test_backward_gpu(self): self.check_backward(cuda.to_gpu(self.x1), cuda.to_gpu(self.x2), cuda.to_gpu(self.gy)) def check_double_backward( self, x1_data, x2_data, y_grad, x1_grad_grad, x2_grad_grad): gradient_check.check_double_backward( F.arctan2, (x1_data, x2_data), y_grad, (x1_grad_grad, x2_grad_grad), dtype='d', **self.double_backward_options) def test_double_backward_cpu(self): self.check_double_backward( self.x1, self.x2, self.gy, self.ggx1, self.ggx2) @attr.gpu def test_double_backward_gpu(self): self.check_double_backward( cuda.to_gpu(self.x1), cuda.to_gpu(self.x2), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx1), cuda.to_gpu(self.ggx2)) testing.run_module(__name__, __file__)
mit
-6,865,400,044,098,279,000
32.407216
77
0.590495
false
nburn42/tensorflow
tensorflow/python/pywrap_tensorflow.py
33
3219
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """A wrapper for TensorFlow SWIG-generated bindings.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import ctypes import sys import traceback from tensorflow.python.platform import self_check # Perform pre-load sanity checks in order to produce a more actionable error # than we get from an error during SWIG import. self_check.preload_check() # pylint: disable=wildcard-import,g-import-not-at-top,unused-import,line-too-long try: # This import is expected to fail if there is an explicit shared object # dependency (with_framework_lib=true), since we do not need RTLD_GLOBAL. from tensorflow.python import pywrap_dlopen_global_flags _use_dlopen_global_flags = True except ImportError: _use_dlopen_global_flags = False # On UNIX-based platforms, pywrap_tensorflow is a SWIG-generated # python library that dynamically loads _pywrap_tensorflow.so. _can_set_rtld_local = (hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags')) if _can_set_rtld_local: _default_dlopen_flags = sys.getdlopenflags() try: if _use_dlopen_global_flags: pywrap_dlopen_global_flags.set_dlopen_flags() elif _can_set_rtld_local: # Ensure RTLD_LOCAL behavior for platforms where it isn't the default # (macOS). On Linux RTLD_LOCAL is 0, so this does nothing (and would not # override an RTLD_GLOBAL in _default_dlopen_flags). sys.setdlopenflags(_default_dlopen_flags | ctypes.RTLD_LOCAL) from tensorflow.python.pywrap_tensorflow_internal import * from tensorflow.python.pywrap_tensorflow_internal import __version__ from tensorflow.python.pywrap_tensorflow_internal import __git_version__ from tensorflow.python.pywrap_tensorflow_internal import __compiler_version__ from tensorflow.python.pywrap_tensorflow_internal import __cxx11_abi_flag__ from tensorflow.python.pywrap_tensorflow_internal import __monolithic_build__ if _use_dlopen_global_flags: pywrap_dlopen_global_flags.reset_dlopen_flags() elif _can_set_rtld_local: sys.setdlopenflags(_default_dlopen_flags) except ImportError: msg = """%s\n\nFailed to load the native TensorFlow runtime.\n See https://www.tensorflow.org/install/install_sources#common_installation_problems\n for some common reasons and solutions. Include the entire stack trace above this error message when asking for help.""" % traceback.format_exc() raise ImportError(msg) # pylint: enable=wildcard-import,g-import-not-at-top,unused-import,line-too-long
apache-2.0
4,763,410,037,395,336,000
41.355263
85
0.742467
false