commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
0ee1150e1f8f0c8cae7b906c4d349b8867bbe0b4
|
Add dmrg example
|
examples/dmrg/30-dmrg_casscf_nevpt2_for_Cr2.py
|
examples/dmrg/30-dmrg_casscf_nevpt2_for_Cr2.py
|
Python
| 0 |
@@ -0,0 +1,994 @@
+#!/usr/bin/env python%0Aimport numpy%0Afrom pyscf import gto%0Afrom pyscf import scf%0Afrom pyscf import mcscf%0Afrom pyscf.mrpt.nevpt2 import sc_nevpt%0Afrom pyscf.dmrgscf.dmrgci import DMRGSCF%0A%0A#%0A# This calculation requires about 10 GB memory per processor.%0A#%0A%0Ab = 1.5%0Amol = gto.Mole()%0Amol.verbose = 5%0Amol.output = 'cr2-%253.2f.out' %25 b%0Amol.max_memory = 70000%0Amol.atom = %5B%0A %5B'Cr',( 0.000000, 0.000000, -b/2)%5D,%0A %5B'Cr',( 0.000000, 0.000000, b/2)%5D,%0A%5D%0Amol.basis = %7B'Cr': 'ccpvdz-dk'%7D%0Amol.symmetry = True%0Amol.build()%0A%0Am = scf.sfx2c1e(scf.RHF(mol))%0Am.conv_tol = 1e-9%0Am.chkfile = 'hf_chk-%25s'%25b%0Am.level_shift = 0.5%0Am.kernel()%0A%0Adm = m.make_rdm1()%0Am.level_shift = 0%0Am.scf(dm)%0A%0Amc = DMRGSCF(m, 20, 28) # 20o, 28e%0Amc.fcisolver.maxM = 1000%0Amc.fcisolver.tol = 1e-6%0A%0Amc.chkfile = 'mc_chk_18o-%25s'%25b%0Acas_occ = %7B'A1g':4, 'A1u':4,%0A 'E1ux':2, 'E1uy':2, 'E1gx':2, 'E1gy':2,%0A 'E2ux':1, 'E2uy':1, 'E2gx':1, 'E2gy':1%7D%0Amo = mc.sort_mo_by_irrep(cas_occ)%0Amc.kernel(mo)%0A%0A#%0A# DMRG-NEVPT2%0A#%0Asc_nevpt(mc)%0A
|
|
61516c8a98bb9f0286bec2e8cfeb93cc96d5e74e
|
Print the initial metrics in relativity.py
|
examples/relativity.py
|
examples/relativity.py
|
#!/usr/bin/env python
import iam_sympy_example
"""
This example calculates the Ricci tensor from the metric and does this
on the example of Schwarzschild solution.
"""
from sympy import exp, Symbol, sin, Rational, Derivative, dsolve, Function, \
Matrix, Eq, pprint, Pow
def grad(f,X):
a=[]
for x in X:
a.append( f.diff(x) )
return a
def d(m,x):
return grad(m[0,0],x)
class MT(object):
def __init__(self,m):
self.gdd=m
self.guu=m.inv()
def __str__(self):
return "g_dd =\n" + str(self.gdd)
def dd(self,i,j):
return self.gdd[i,j]
def uu(self,i,j):
return self.guu[i,j]
class G(object):
def __init__(self,g,x):
self.g = g
self.x = x
def udd(self,i,k,l):
g=self.g
x=self.x
r=0
for m in [0,1,2,3]:
r+=g.uu(i,m)/2 * (g.dd(m,k).diff(x[l])+g.dd(m,l).diff(x[k]) \
- g.dd(k,l).diff(x[m]))
return r
class Riemann(object):
def __init__(self,G,x):
self.G = G
self.x = x
def uddd(self,rho,sigma,mu,nu):
G=self.G
x=self.x
r=G.udd(rho,nu,sigma).diff(x[mu])-G.udd(rho,mu,sigma).diff(x[nu])
for lam in [0,1,2,3]:
r+=G.udd(rho,mu,lam)*G.udd(lam,nu,sigma) \
-G.udd(rho,nu,lam)*G.udd(lam,mu,sigma)
return r
class Ricci(object):
def __init__(self,R,x):
self.R = R
self.x = x
self.g = R.G.g
def dd(self,mu,nu):
R=self.R
x=self.x
r=0
for lam in [0,1,2,3]:
r+=R.uddd(lam,mu,lam,nu)
return r
def ud(self,mu,nu):
r=0
for lam in [0,1,2,3]:
r+=self.g.uu(mu,lam)*self.dd(lam,nu)
return r.expand()
def curvature(Rmn):
return Rmn.ud(0,0)+Rmn.ud(1,1)+Rmn.ud(2,2)+Rmn.ud(3,3)
#class nu(Function):
# def getname(self):
# return r"\nu"
# return r"nu"
#class lam(Function):
# def getname(self):
# return r"\lambda"
# return r"lambda"
nu = Function("nu")
lam = Function("lamda")
t=Symbol("t")
r=Symbol("r")
theta=Symbol(r"theta")
phi=Symbol(r"phi")
#general, spherically symmetric metric
gdd=Matrix((
(-exp(nu(r)),0,0,0),
(0, exp(lam(r)), 0, 0),
(0, 0, r**2, 0),
(0, 0, 0, r**2*sin(theta)**2)
))
#spherical - flat
#gdd=Matrix((
# (-1, 0, 0, 0),
# (0, 1, 0, 0),
# (0, 0, r**2, 0),
# (0, 0, 0, r**2*sin(theta)**2)
# ))
#polar - flat
#gdd=Matrix((
# (-1, 0, 0, 0),
# (0, 1, 0, 0),
# (0, 0, 1, 0),
# (0, 0, 0, r**2)
# ))
#polar - on the sphere, on the north pole
#gdd=Matrix((
# (-1, 0, 0, 0),
# (0, 1, 0, 0),
# (0, 0, r**2*sin(theta)**2, 0),
# (0, 0, 0, r**2)
# ))
g=MT(gdd)
X=(t,r,theta,phi)
Gamma=G(g,X)
Rmn=Ricci(Riemann(Gamma,X),X)
def pprint_Gamma_udd(i,k,l):
pprint( Eq(Symbol('Gamma^%i_%i%i' % (i,k,l)), Gamma.udd(i,k,l)) )
def pprint_Rmn_dd(i,j):
pprint( Eq(Symbol('R_%i%i' % (i,j)), Rmn.dd(i,j)) )
def main():
#print g
print "-"*40
print "Christoffel symbols:"
pprint_Gamma_udd(0,1,0)
pprint_Gamma_udd(0,0,1)
print
pprint_Gamma_udd(1,0,0)
pprint_Gamma_udd(1,1,1)
pprint_Gamma_udd(1,2,2)
pprint_Gamma_udd(1,3,3)
print
pprint_Gamma_udd(2,2,1)
pprint_Gamma_udd(2,1,2)
pprint_Gamma_udd(2,3,3)
print
pprint_Gamma_udd(3,2,3)
pprint_Gamma_udd(3,3,2)
pprint_Gamma_udd(3,1,3)
pprint_Gamma_udd(3,3,1)
print"-"*40
print"Ricci tensor:"
pprint_Rmn_dd(0,0)
e = Rmn.dd(1,1)
pprint_Rmn_dd(1,1)
pprint_Rmn_dd(2,2)
pprint_Rmn_dd(3,3)
#print
#print "scalar curvature:"
#print curvature(Rmn)
print "-"*40
print "solve the Einstein's equations:"
e = e.subs(nu(r), -lam(r))
l = dsolve(e, [lam(r)])
pprint( Eq(lam(r), l) )
metric = gdd.subs(lam(r), l).subs(nu(r),-l)#.combine()
print "metric:"
pprint( metric )
if __name__ == "__main__":
main()
|
Python
| 0.001055 |
@@ -3029,16 +3029,47 @@
-#
print
-g
+%22Initial metric:%22%0A pprint(gdd)
%0A
|
1830c24988fccd7069bb4f9d4c66940ce623425f
|
add execute apcupsd cgi sample
|
execute_apcupsd_cgi.py
|
execute_apcupsd_cgi.py
|
Python
| 0 |
@@ -0,0 +1,409 @@
+from http.server import CGIHTTPRequestHandler, test%0D%0Aimport os%0D%0A%0D%0Adef main():%0D%0A # http://stackoverflow.com/questions/11419572/how-to-set-the-documentroot-while-using-pythons-httpserver%0D%0A os.chdir(r%22C:%5Capcupsd%22)%0D%0A%0D%0A # %E3%83%87%E3%82%A3%E3%83%AC%E3%82%AF%E3%83%88%E3%83%AA%E5%90%8D%E3%81%AE%E5%89%8D%E3%81%AE%60/%60%E3%82%92%E4%BB%98%E3%81%91%E5%BF%98%E3%82%8C%E3%82%8B%E3%81%A8%E6%AD%A3%E5%B8%B8%E3%81%AB%E5%8B%95%E4%BD%9C%E3%81%97%E3%81%AA%E3%81%84%0D%0A CGIHTTPRequestHandler.cgi_directories = %5B%22/cgi%22%5D%0D%0A%0D%0A test(HandlerClass=CGIHTTPRequestHandler, port=8080)%0D%0A%0D%0Aif __name__ == %22__main__%22:%0D%0A main()
|
|
c3afc6c28530c3dfc3bd57d9a1841a60bf92ba4f
|
Fix bug which caused page cyclers to always clear cache before load.
|
tools/perf/benchmarks/netsim_top25.py
|
tools/perf/benchmarks/netsim_top25.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from perf_tools import page_cycler
class NetsimTop25(test.Test):
"""Measures load time of the top 25 sites under simulated cable network."""
test = page_cycler.PageCycler
test.clear_cache_before_each_run = True
page_set = 'tools/perf/page_sets/top_25.json'
options = {
'extra_wpr_args': [
'--shaping_type=proxy',
'--net=cable'
],
'pageset_repeat': '5',
}
|
Python
| 0.000008 |
@@ -364,50 +364,8 @@
ler%0A
- test.clear_cache_before_each_run = True%0A
pa
@@ -538,8 +538,120 @@
,%0A %7D%0A
+%0A def __init__(self):%0A super(NetsimTop25, self).__init__()%0A self.test.clear_cache_before_each_run = True%0A
|
b5c21a5eeb8894ae93290c4c78fa23e5207bc0b3
|
Create Flaskapp.wsgi
|
Flaskapp.wsgi
|
Flaskapp.wsgi
|
Python
| 0.000206 |
@@ -0,0 +1,211 @@
+#!/usr/bin/python%0Aimport sys%0Aimport logging%0Alogging.basicConfig(stream=sys.stderr)%0Asys.path.insert(0,%22/var/www/FlaskApp/%22)%0A%0Afrom FlaskApp import app as application%0Aapplication.secret_key = 'Add your secret key'%0A
|
|
3c2f3baa1a76d386d5604c0c1dc8d4f3a33b11ad
|
Create Helloworld.py
|
Helloworld.py
|
Helloworld.py
|
Python
| 0.000446 |
@@ -0,0 +1,22 @@
+print('Hello World!')%0A
|
|
ee614036b45e9f10f680cef56a5eaa2d86c424fb
|
Create cybercrimeatmtracker.py
|
plugins/feeds/public/cybercrimeatmtracker.py
|
plugins/feeds/public/cybercrimeatmtracker.py
|
Python
| 0.000156 |
@@ -0,0 +1,1441 @@
+import re%0Aimport logging%0Afrom dateutil import parser%0Afrom datetime import datetime, timedelta%0Afrom core.observables import Hash%0Afrom core.feed import Feed%0Afrom core.errors import ObservableValidationError%0A%0A%0Aclass CybercrimeAtmTracker(Feed):%0A%0A default_values = %7B%0A 'frequency': timedelta(hours=1),%0A 'name': 'CybercrimeAtmTracker',%0A 'source': 'http://atm.cybercrime-tracker.net/rss.php',%0A 'description': 'CyberCrime ATM Tracker - Latest 40 CnC URLS',%0A %7D%0A%0A def update(self):%0A for item in self.update_xml(%0A 'item', %5B'title', 'link', 'pubDate', 'description'%5D):%0A self.analyze(item)%0A%0A def analyze(self, item):%0A observable_sample = item%5B'title'%5D%0A context_sample = %7B%7D%0A context_sample%5B'description'%5D = 'ATM sample'%0A context_sample%5B'date_added'%5D = parser.parse(item%5B'pubDate'%5D)%0A context_sample%5B'source'%5D = self.name%0A family = False%0A if ' - ' in observable_sample:%0A family, observable_sample = observable_sample.split(' - ')%0A%0A try:%0A sample = Hash.get_or_create(value=observable_sample)%0A sample.add_context(context_sample)%0A sample.add_source('feed')%0A sample_tags = %5B'atm'%5D%0A if family:%0A sample_tags.append(family)%0A sample.tag(sample_tags)%0A except ObservableValidationError as e:%0A logging.error(e)%0A return%0A
|
|
ece838042acd75ba7edde833856ac02e4efe9977
|
Create PPTconnect.py
|
PPTconnect.py
|
PPTconnect.py
|
Python
| 0 |
@@ -0,0 +1,2303 @@
+from TwitterAPI import TwitterAPI%0Aimport win32com.client%0Afrom MSO import *%0A%0A # Open PowerPoint%0AApplication = win32com.client.Dispatch(%22PowerPoint.Application%22)%0A%0A# Add a presentation%0APresentation = Application.Presentations.Add()%0A%0A# Go to http://dev.twitter.com and create an app.%0A# The consumer key and secret will be generated for you after%0Aconsumer_key = %22e1WhbINIG0betPfLmm16g%22%0Aconsumer_secret = %22JVU8Rhrq9QANJX8rybNhWhEKhqMrU4yqC7yvU2Gxh0%22%0A %0A# After the step above, you will be redirected to your app's page.%0A# Create an access token under the the %22Your access token%22 section%0Aaccess_token_key = %2214888261-5JLox5DCiHe7iQRPdJaTb93syK9W8DqZotMy8V5OF%22%0Aaccess_token_secret =%22Ws1dUSp5eApbtPggPtOn276t5fM1LgnHiFyVWaylbKsKP%22%0A %0A %0A# Create a Twitter client %0Atwitter = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret)%0A %0A%0A##for tweet in results.get_iterator():%0A##%09print (tweet%5B'id'%5D, tweet%5B'text'%5D)%0Adef draw_tweet(Base, item, pos):%0A y = 40 + (pos %25 4) * 120%0A %0A image = Base.Shapes.AddPicture(%0A # To get the larger resolution image, just remove _normal from the URL%0A item%5B'user'%5D%5B'profile_image_url'%5D.replace('_normal', ''),%0A LinkToFile=True,%0A SaveWithDocument=False,%0A Left=20, Top=y,%0A Width=100, Height=100)%0A %0A try:%0A status = item%5B'text'%5D.encode('cp1252')%0A except UnicodeEncodeError:%0A status = item%5B'text'%5D%0A text = Base.Shapes.AddShape(1, 130, y, 460, 100)%0A text.Fill.ForeColor.ObjectThemeColor = 2%0A text.Fill.ForeColor.Brightness = +0.95%0A text.Line.Visible = False%0A text.TextFrame.TextRange.Text = status%0A text.TextFrame.TextRange.Font.Color.ObjectThemeColor = 3%0A text.TextFrame.TextRange.ParagraphFormat.Alignment = 1%0A %0A user = Base.Shapes.AddShape(9, 600, y, 100, 100)%0A user.Fill.ForeColor.ObjectThemeColor = 4%0A user.Line.Visible = False%0A user.TextFrame.TextRange.Text = '@' + item%5B'user'%5D%5B'screen_name'%5D%0A%0A%0ABase = Presentation.Slides.Add(1, 12)%0A %0A%0A#query = %7B'q' : 'Top Chef', 'lang' : 'es', 'count': 100%7D%0Aresults = twitter.request('statuses/filter', %7B'track': 'blue'%7D)%0A##for tweet in results.get_iterator():%0A##%09print (tweet%5B'id'%5D, tweet%5B'text'%5D)%0A%09%0Afor pos, item in enumerate(results.get_iterator()):%0A draw_tweet(Base, item, pos)%0A if pos %3E 20:%0A break%0A
|
|
65029a09af9dcafc156a5a0632a63e3cf4b6c50d
|
add benchmark to compare to lasagne
|
benchmarks/lag_task_lasgne.py
|
benchmarks/lag_task_lasgne.py
|
Python
| 0 |
@@ -0,0 +1,2387 @@
+from __future__ import division, absolute_import%0Afrom __future__ import print_function, unicode_literals%0A%0Aimport numpy as np%0Aimport theano%0Aimport theano.tensor as T%0Aimport lasagne%0A%0AfX = theano.config.floatX%0A%0A# ################################## config ##################################%0A%0AN_TRAIN = 1000%0ALAG = 10%0ALENGTH = 50%0AHIDDEN_STATE_SIZE = 10%0ABATCH_SIZE = 64%0A%0A# ############################### prepare data ###############################%0A%0A%0Adef binary_toy_data(lag=1, length=20):%0A inputs = np.random.randint(0, 2, length).astype(fX)%0A outputs = np.array(lag * %5B0%5D + list(inputs), dtype=fX)%5B:length%5D%0A return inputs, outputs%0A%0A%0Adef minibatch(lag, length, batch_size):%0A inputs = %5B%5D%0A outputs = %5B%5D%0A for _ in range(batch_size):%0A i, o = binary_toy_data(lag, length)%0A inputs.append(i)%0A outputs.append(o)%0A return np.array(inputs)%5B..., np.newaxis%5D, np.array(outputs)%5B..., np.newaxis%5D%0A%0A%0A# ############################## prepare model ##############################%0A%0Al = lasagne.layers.InputLayer(shape=(None, None, 1))%0Al = lasagne.layers.LSTMLayer(l,%0A num_units=HIDDEN_STATE_SIZE,%0A grad_clipping=1,%0A learn_init=True)%0Al = lasagne.layers.ReshapeLayer(l, shape=(-1, HIDDEN_STATE_SIZE))%0Al = lasagne.layers.DenseLayer(l,%0A num_units=1,%0A nonlinearity=lasagne.nonlinearities.sigmoid)%0A%0Ain_var = T.tensor3()%0Atargets = T.tensor3()%0Aoutputs = lasagne.layers.get_output(l, in_var).reshape(in_var.shape)%0Aloss = T.mean((targets - outputs) ** 2)%0Aall_params = lasagne.layers.get_all_params(l)%0Aupdates = lasagne.updates.adam(loss, all_params)%0A%0Atrain_fn = theano.function(%5Bin_var, targets%5D, %5Bloss%5D, updates=updates)%0Avalid_fn = theano.function(%5Bin_var%5D, %5Boutputs%5D)%0A%0A%0A# ################################# training #################################%0A%0Aprint(%22Starting training...%22)%0A%0Aimport time%0Ast = time.time()%0Afor i in range(N_TRAIN):%0A inputs, outputs = minibatch(lag=LAG, length=LENGTH, batch_size=BATCH_SIZE)%0A loss = train_fn(inputs, outputs)%5B0%5D%0A print(loss)%0Aprint(%22total_time: %25s%22 %25 (time.time() - st))%0A%0Ainputs, outputs = minibatch(lag=LAG, length=LENGTH, batch_size=BATCH_SIZE)%0Apred = valid_fn(inputs)%5B0%5D%0Apred_accuracies = (np.round(pred) == outputs).mean(axis=0)%5BLAG:%5D%0Aprint(pred_accuracies)%0Aprint(pred_accuracies.mean())%0A
|
|
985087efdcd80c4896f5ea215dddab1d98662f1d
|
set unit to B by default in doc string
|
src/collectors/processresources/processresources.py
|
src/collectors/processresources/processresources.py
|
# coding=utf-8
"""
A Diamond collector that collects memory usage of each process defined in it's
config file by matching them with their executable filepath or the process name.
This collector can also be used to collect memory usage for the Diamond process.
Example config file ProcessResourcesCollector.conf
```
enabled=True
unit=kB
cpu_interval=0.1
[process]
[[postgres]]
exe=^\/usr\/lib\/postgresql\/+d.+d\/bin\/postgres$
name=^postgres,^pg
[[diamond]]
selfmon=True
```
exe and name are both lists of comma-separated regexps.
cpu_interval is the interval in seconds used to calculate cpu usage percentage.
From psutil's docs:
'''get_cpu_percent(interval=0.1)'''
Return a float representing the process CPU utilization as a percentage.
* When interval is > 0.0 compares process times to system CPU times elapsed
before and after the interval (blocking).
* When interval is 0.0 compares process times to system CPU times
elapsed since last call, returning immediately. In this case is recommended
for accuracy that this function be called with at least 0.1 seconds between
calls.
"""
import os
import re
import diamond.collector
import diamond.convertor
try:
import psutil
psutil
except ImportError:
psutil = None
def process_filter(proc, cfg):
"""
Decides whether a process matches with a given process descriptor
:param proc: a psutil.Process instance
:param cfg: the dictionary from processes that describes with the
process group we're testing for
:return: True if it matches
:rtype: bool
"""
if cfg['selfmon'] and proc.pid == os.getpid():
return True
for exe in cfg['exe']:
try:
if exe.search(proc.exe):
return True
except psutil.AccessDenied:
break
for name in cfg['name']:
if name.search(proc.name):
return True
for cmdline in cfg['cmdline']:
if cmdline.search(' '.join(proc.cmdline)):
return True
return False
class ProcessResourcesCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(ProcessResourcesCollector,
self).get_default_config_help()
config_help.update({
'unit': 'The unit in which memory data is collected.',
'process': ("A subcategory of settings inside of which each "
"collected process has it's configuration"),
'cpu_interval': (
"""The time interval used to calculate cpu percentage
* When interval is > 0.0 compares process times to system CPU times elapsed
before and after the interval (blocking).
* When interval is 0.0 compares process times to system CPU times
elapsed since last call, returning immediately. In this case is recommended
for accuracy that this function be called with at least 0.1 seconds between
calls."""),
})
return config_help
def get_default_config(self):
"""
Default settings are:
path: 'memory.process'
unit: 'B'
"""
config = super(ProcessResourcesCollector, self).get_default_config()
config.update({
'path': 'memory.process',
'unit': 'B',
'process': '',
'cpu_interval': 0.1
})
return config
def setup_config(self):
"""
prepare self.processes, which is a descriptor dictionary in
processgroup --> {
exe: [regex],
name: [regex],
cmdline: [regex],
selfmon: [boolean],
procs: [psutil.Process]
}
"""
self.processes = {}
for process, cfg in self.config['process'].items():
# first we build a dictionary with the process aliases and the
# matching regexps
proc = {'procs': []}
for key in ('exe', 'name', 'cmdline'):
proc[key] = cfg.get(key, [])
if not isinstance(proc[key], list):
proc[key] = [proc[key]]
proc[key] = [re.compile(e) for e in proc[key]]
proc['selfmon'] = cfg.get('selfmon', '').lower() == 'true'
self.processes[process] = proc
def filter_processes(self):
"""
Populates self.processes[processname]['procs'] with the corresponding
list of psutil.Process instances
"""
class ProcessResources(object):
def __init__(self, **kwargs):
for name, val in kwargs.items():
setattr(self, name, val)
# requires setup_config to be run before this
interval = float(self.config['cpu_interval'])
for proc in psutil.process_iter():
# get process data
loaded = None
try:
proc_dummy = ProcessResources(
rss=proc.get_memory_info().rss,
vms=proc.get_memory_info().vms,
cpu_percent=proc.get_cpu_percent(interval=interval)
)
loaded = True
except psutil.NoSuchProcess:
loaded = False
if loaded:
# filter and divide the system processes amongst the different
# process groups defined in the config file
for procname, cfg in self.processes.items():
if process_filter(proc, cfg):
cfg['procs'].append(proc_dummy)
break
def collect(self):
"""
Collects the RSS memory usage of each process defined under the
`process` subsection of the config file
"""
self.setup_config()
self.filter_processes()
unit = self.config['unit']
for process, cfg in self.processes.items():
# finally publish the results for each process group
metric_name = "%s.rss" % process
metric_value = diamond.convertor.binary.convert(
sum(p.rss for p in cfg['procs']),
oldUnit='byte', newUnit=unit)
# Publish Metric
self.publish(metric_name, metric_value)
metric_name = "%s.vms" % process
metric_value = diamond.convertor.binary.convert(
sum(p.vms for p in cfg['procs']),
oldUnit='byte', newUnit=unit)
# Publish Metric
self.publish(metric_name, metric_value)
# CPU percent
metric_name = "%s.cpu_percent" % process
metric_value = sum(p.cpu_percent for p in cfg['procs'])
# Publish Metric
self.publish(metric_name, metric_value)
|
Python
| 0.000002 |
@@ -329,17 +329,16 @@
ue%0Aunit=
-k
B%0Acpu_in
|
33abec38e82e132a6e192d5ae0535b84d8aa47f4
|
add import script for Poole
|
polling_stations/apps/data_collection/management/commands/import_poole.py
|
polling_stations/apps/data_collection/management/commands/import_poole.py
|
Python
| 0 |
@@ -0,0 +1,359 @@
+from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter%0A%0Aclass Command(BaseXpressDemocracyClubCsvImporter):%0A council_id = 'E06000029'%0A addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017.CSV'%0A stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017.CSV'%0A elections = %5B'parl.2017-06-08'%5D%0A
|
|
b6b92e278202c27b124909aa5352726799d8d162
|
add stack with max python solution
|
08-stack-n-queue/8.1-stack-with-max/python/stackMax.py
|
08-stack-n-queue/8.1-stack-with-max/python/stackMax.py
|
Python
| 0 |
@@ -0,0 +1,2027 @@
+#!/usr/bin/env python2%0A# -*- coding: utf-8 -*-%0A%22%22%22%0ACreated on Fri Aug 18 10:08:25 2017%0A%0A@author: LiuQianKevin%0A%22%22%22%0Aclass Stack:%0A class cache:%0A def __init__(self, _max = -float('inf'), count = 0):%0A self.max = _max;%0A self.count = count;%0A %0A def __init__(self):%0A self._element = %5B%5D;%0A self._maxCache =%5B%5D;%0A %0A def push(self, x):%0A %0A #update elemetn%0A self._element.append(x);%0A %0A #update cache%0A #if x larger than maxchache%5B-1%5D, or maxcheche empty, add %0A if(not self._maxCache or x %3E self._maxCache%5B-1%5D.max):%0A self._maxCache.append(self.cache(x, 1));%0A #if x equal to maxcache%5B-1%5D.max, cout += 1%0A elif(x == self._maxCache%5B-1%5D.max):%0A self._maxCache%5B-1%5D.count += 1;%0A #if x larger than maxchache%5B-1%5D.max, do nothing%0A %0A %0A def pop(self):%0A #update element%0A result = self._element.pop();%0A %0A #update cache%0A #if result %3C maxCache%5B-1%5D.max, no update%0A #if result == ---------------, cout -= 1, if cout == 0, pop it%0A if(result == self.max()):%0A self._maxCache%5B-1%5D.count -= 1;%0A if(self._maxCache%5B-1%5D.count == 0):%0A self._maxCache.pop();%0A %0A return result;%0A %0A %0A def empty(self):%0A return not self._element;%0A %0A %0A def max(self):%0A return self._maxCache%5B-1%5D.max;%0A %0A%0Adef main():%0A s = Stack()%0A s.push(1)%0A s.push(2)%0A assert s.max() == 2%0A print(s.max()) # 2%0A print(s.pop()) # 2%0A assert s.max() == 1%0A print(s.max()) # 1%0A s.push(3)%0A s.push(2)%0A assert s.max() == 3%0A print(s.max()) # 3%0A s.pop()%0A assert s.max() == 3%0A print(s.max()) # 3%0A s.pop()%0A assert s.max() == 1%0A print(s.max()) # 1%0A s.pop()%0A try:%0A s.max()%0A s.pop()%0A s.pop()%0A s.pop()%0A s.pop()%0A except IndexError as e:%0A print(e)%0A%0A%0Aif __name__ == '__main__':%0A main()
|
|
f78f74d836d2eca1cafe3b6401b5c8d13e6d139b
|
Fix type1/type2
|
geotrek/tourism/migrations/0004_auto_20190328_1339.py
|
geotrek/tourism/migrations/0004_auto_20190328_1339.py
|
Python
| 0.999983 |
@@ -0,0 +1,885 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.14 on 2019-03-28 12:39%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('tourism', '0003_auto_20190306_1417'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='touristiccontent',%0A name='type1',%0A field=models.ManyToManyField(blank=True, db_table=b't_r_contenu_touristique_type1', related_name='contents1', to='tourism.TouristicContentType1', verbose_name='Type 1'),%0A ),%0A migrations.AlterField(%0A model_name='touristiccontent',%0A name='type2',%0A field=models.ManyToManyField(blank=True, db_table=b't_r_contenu_touristique_type2', related_name='contents2', to='tourism.TouristicContentType2', verbose_name='Type 2'),%0A ),%0A %5D%0A
|
|
f29a0845bc0983e18ce6484543b206dfb3091818
|
Add easier way to import cv2
|
vision/opencv.py
|
vision/opencv.py
|
Python
| 0.000003 |
@@ -0,0 +1,63 @@
+import sys%0Asys.path.append('lib/opencv/build/lib')%0A%0Aimport cv2%0A
|
|
2848955e59b5106ffe48c4ebfa05095a6be460e5
|
Add visual script
|
visual/visual.py
|
visual/visual.py
|
Python
| 0.000001 |
@@ -0,0 +1,1347 @@
+#!/usr/bin/env python3%0A%0Aimport re%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0Aimport unittest%0A%0Aclass Parser:%0A%09'''Wta log parser'''%0A%0A%09def __init__(self):%0A%09%09'''Open log file'''%0A%09%09self._pattern = re.compile(%0A%09%09%09r'%5Era=((?:%5B0-9a-fA-F%5D%7B2%7D:)%7B5%7D(?:%5B0-9a-fA-F%5D%7B2%7D))%5B %5Ct%5D+' +%0A%09%09%09r'ta=((?:%5B0-9a-fA-F%5D%7B2%7D:)%7B5%7D(?:%5B0-9a-fA-F%5D%7B2%7D))%5B %5Ct%5D+' +%0A%09%09%09r'tsf=(%5B0-9%5D+)%5B %5Ct%5D+' + %0A%09%09%09r'seq=(%5B0-9%5D+)%5B %5Ct%5D+' +%0A%09%09%09r'rssi=(-%5B0-9%5D+)$')%0A%0A%09def _match(self, line, ra, ta):%0A%09%09match = self._pattern.match(line)%0A%09%09if not match:%0A%09%09%09return None%0A%09%09if ra == match.group(1) and ta == match.group(2):%0A%09%09%09return (match.group(1), match.group(2), int(match.group(3)),%0A%09%09%09%09int(match.group(5)))%0A%0A%09def getRecords(self, path, ra, ta):%0A%09%09f = open(path)%0A%09%09records = %5B%5D%0A%09%09for line in self.f.lines():%0A%09%09%09r = _match(line, ra, ta)%0A%09%09%09if r:%0A%09%09%09%09records.append(r)%0A%09%09return records%0A%0Aclass ParserTest(unittest.TestCase):%0A%09'''Parser's unit test class'''%0A%0A%09def test_match(self):%0A%09%09line = %22ra=00:4b:69:6e:73:30 ta=c8:93:46:a3:8e:74 tsf=1473507516 seq=28769 rssi=-60%22%0A%09%09ra = %2200:4b:69:6e:73:30%22%0A%09%09ta = %22c8:93:46:a3:8e:74%22%0A%09%09tsf = 1473507516%0A%09%09rssi = -60%0A%09%09p = Parser()%0A%09%09r = p._match(line, ra, ta)%0A%09%09self.assertTrue(r is not None)%0A%09%09self.assertEqual(r%5B0%5D, ra)%0A%09%09self.assertEqual(r%5B1%5D, ta)%0A%09%09self.assertEqual(r%5B2%5D, tsf)%0A%09%09self.assertEqual(r%5B3%5D, rssi)%0A%0Adef main():%0A%09pass%0A%0Aif __name__ == %22__main__%22:%0A%09main()%0A
|
|
b5cc83a705eaa22872d304b92c7b6e57b5581604
|
Add unit-test for "readbytes_multiple"
|
puresnmp/test/test/test_helpers.py
|
puresnmp/test/test/test_helpers.py
|
Python
| 0.000002 |
@@ -0,0 +1,877 @@
+'''%0ATests for unit-test helpers%0A'''%0A%0Afrom textwrap import dedent%0Afrom binascii import hexlify%0Aimport puresnmp.test as th%0Afrom io import StringIO%0A%0A%0Adef test_readbytes_multiple():%0A data = StringIO(dedent(%0A '''%5C%0A #%0A # This is a comment%0A #%0A%0A 30 2d 02 01 01 04 07 70 72 69 76 61 74 65 a2 1f 0-.....private..%0A%0A ----%0A%0A 30 2d 02 01 01 04 07 70 72 69 76 61 74 65 a2 1f 0-.....private..%0A%0A ----%0A%0A 30 2e 02 01 01 04 07 70 72 69 76 61 74 65 a2 20 0......private.%0A '''%0A ))%0A expected = %5B%0A b'%5Cx30%5Cx2d%5Cx02%5Cx01%5Cx01%5Cx04%5Cx07%5Cx70%5Cx72%5Cx69%5Cx76%5Cx61%5Cx74%5Cx65%5Cxa2%5Cx1f',%0A b'%5Cx30%5Cx2d%5Cx02%5Cx01%5Cx01%5Cx04%5Cx07%5Cx70%5Cx72%5Cx69%5Cx76%5Cx61%5Cx74%5Cx65%5Cxa2%5Cx1f',%0A b'%5Cx30%5Cx2e%5Cx02%5Cx01%5Cx01%5Cx04%5Cx07%5Cx70%5Cx72%5Cx69%5Cx76%5Cx61%5Cx74%5Cx65%5Cxa2%5Cx20',%0A %5D%0A%0A result = list(th.readbytes_multiple(data))%0A%0A assert result == expected%0A
|
|
86075483e28000066f3d8298cbd80d12aefc5908
|
Support default_value for boolean type handler
|
pybindgen/typehandlers/booltype.py
|
pybindgen/typehandlers/booltype.py
|
# docstrings not neede here (the type handler interfaces are fully
# documented in base.py) pylint: disable-msg=C0111
from base import ReturnValue, Parameter, \
ReverseWrapperBase, ForwardWrapperBase
class BoolParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['bool']
def convert_c_to_python(self, wrapper):
assert isinstance(wrapper, ReverseWrapperBase)
wrapper.build_params.add_parameter('N', ["PyBool_FromLong(%s)" % (self.value,)])
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
name = wrapper.declarations.declare_variable(self.ctype_no_const, self.name)
py_name = wrapper.declarations.declare_variable('PyObject *', 'py_'+self.name)
wrapper.parse_params.add_parameter('O', ['&'+py_name], self.value)
wrapper.before_call.write_code("%s = (bool) PyObject_IsTrue(%s);" % (name, py_name))
wrapper.call_params.append(name)
class BoolReturn(ReturnValue):
CTYPES = ['bool']
def get_c_error_return(self):
return "return false;"
def convert_python_to_c(self, wrapper):
py_name = wrapper.declarations.declare_variable('PyObject *', 'py_boolretval')
wrapper.parse_params.add_parameter("O", ["&"+py_name], prepend=True)
wrapper.after_call.write_code(
"%s = PyObject_IsTrue(%s);" % (self.value, py_name))
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter(
"N", ["PyBool_FromLong(%s)" % self.value], prepend=True)
class BoolPtrParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_OUT,
Parameter.DIRECTION_IN|Parameter.DIRECTION_OUT]
CTYPES = ['bool*']
def convert_c_to_python(self, wrapper):
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter(
'N', ["PyBool_FromLong(*%s)" % (self.value,)])
if self.direction & self.DIRECTION_OUT:
py_name = wrapper.declarations.declare_variable(
'PyObject *', 'py_'+self.name)
wrapper.parse_params.add_parameter("O", ["&"+py_name], self.value)
wrapper.after_call.write_code(
"*%s = PyObject_IsTrue(%s);" % (self.value, py_name,))
def convert_python_to_c(self, wrapper):
#assert self.ctype == 'bool*'
name = wrapper.declarations.declare_variable(self.ctype_no_const[:-1], self.name)
wrapper.call_params.append('&'+name)
if self.direction & self.DIRECTION_IN:
py_name = wrapper.declarations.declare_variable("PyObject*", 'py_'+self.name)
wrapper.parse_params.add_parameter("O", ["&"+py_name], self.value)
wrapper.before_call.write_code(
"%s = PyObject_IsTrue(%s);" % (name, py_name,))
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter(
'N', ["PyBool_FromLong(%s)" % name])
class BoolRefParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_OUT,
Parameter.DIRECTION_IN|Parameter.DIRECTION_OUT]
CTYPES = ['bool&']
def convert_c_to_python(self, wrapper):
if self.direction & self.DIRECTION_IN:
wrapper.build_params.add_parameter(
'N', ["PyBool_FromLong(%s)" % (self.value,)])
if self.direction & self.DIRECTION_OUT:
py_name = wrapper.declarations.declare_variable(
'PyObject *', 'py_'+self.name)
wrapper.parse_params.add_parameter("O", ["&"+py_name], self.name)
wrapper.after_call.write_code(
"%s = PyObject_IsTrue(%s);" % (self.value, py_name,))
def convert_python_to_c(self, wrapper):
#assert self.ctype == 'bool&'
name = wrapper.declarations.declare_variable(self.ctype_no_const[:-1], self.name)
wrapper.call_params.append(name)
if self.direction & self.DIRECTION_IN:
py_name = wrapper.declarations.declare_variable("PyObject*", 'py_'+self.name)
wrapper.parse_params.add_parameter("O", ["&"+py_name], self.value)
wrapper.before_call.write_code(
"%s = PyObject_IsTrue(%s);" % (name, py_name,))
if self.direction & self.DIRECTION_OUT:
wrapper.build_params.add_parameter(
'N', ["PyBool_FromLong(%s)" % (name,)])
|
Python
| 0 |
@@ -660,32 +660,67 @@
nst, self.name)%0A
+ if self.default_value:%0A
py_name
@@ -802,75 +802,415 @@
-wrapper.parse_params.add_parameter('O', %5B'&'+py_name%5D, self.value)%0A
+else:%0A py_name = wrapper.declarations.declare_variable('PyObject *', 'py_'+self.name, 'NULL')%0A wrapper.parse_params.add_parameter('O', %5B'&'+py_name%5D, self.value, optional=(self.default_value is not None))%0A if self.default_value:%0A wrapper.before_call.write_code(%22%25s = %25s? (bool) PyObject_IsTrue(%25s) : %25s;%22 %25 (name, py_name, py_name, self.default_value))%0A else:%0A
|
f434e45b58bfa7001d21d1920a65903f941df833
|
Add __main__.py so that the package can be executed by `python -m jiebarpc` [ciskip]
|
jiebarpc/__main__.py
|
jiebarpc/__main__.py
|
Python
| 0.000001 |
@@ -0,0 +1,890 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0Afrom __future__ import absolute_import, unicode_literals%0Aimport sys%0Aimport argparse%0A%0Afrom jiebarpc import JiebaRPCServer, JiebaRPCDispatcher%0A%0A%0Adef main(host, port, processnum=1):%0A server = JiebaRPCServer(JiebaRPCDispatcher(processnum))%0A server.listen(host, port)%0A server.start()%0A return 0%0A%0A%0Aif __name__ == '__main__':%0A parser = argparse.ArgumentParser(%0A 'python -m jiebarpc',%0A description='Run jiebarpc server'%0A )%0A parser.add_argument('-n', '--processnum', type=int, default=1,%0A help='How many processes to use.')%0A parser.add_argument('address',%0A help='Server listen address like localhost:8888',)%0A ns = parser.parse_args()%0A%0A address = ns.address.split(':')%0A host = address%5B0%5D%0A port = int(address%5B1%5D)%0A%0A sys.exit(main(host, port, ns.processnum))%0A
|
|
5d297710416ebaea3a79e1ded0604d53178c493a
|
add python solution for Project Euler problem 1
|
python_challenges/project_euler/problem_1.py
|
python_challenges/project_euler/problem_1.py
|
Python
| 0.998958 |
@@ -0,0 +1,651 @@
+__author__ = 'tilmannbruckhaus'%0A%0A%0Adef divisible_by_3_or_5(i):%0A divisible = i %25 3 == 0 or i %25 5 == 0%0A # print(%22natural number:%22, i, %22is divisible:%22, divisible)%0A return divisible%0A%0A%0Adef sum_of_multiples_of_3_or_5(limit):%0A # If we list all the natural numbers below 10 that are multiples of 3 or 5,%0A # we get 3, 5, 6 and 9. The sum of these multiples is 23.%0A # Find the sum of all the multiples of 3 or 5 below 1000.%0A multi_sum = 0%0A for limit in range(limit):%0A if divisible_by_3_or_5(limit):%0A multi_sum += limit%0A return multi_sum%0A%0Afor test_limit in %5B10, 1000%5D:%0A print sum_of_multiples_of_3_or_5(test_limit)%0A
|
|
1db14473edff479f97703fb68cb1aa8d65c25023
|
Add Python benchmark
|
lib/node_modules/@stdlib/math/base/special/exp/benchmark/python/benchmark.py
|
lib/node_modules/@stdlib/math/base/special/exp/benchmark/python/benchmark.py
|
Python
| 0.000138 |
@@ -0,0 +1,1511 @@
+#!/usr/bin/env python%0A%22%22%22Benchmark exp.%22%22%22%0A%0Aimport timeit%0A%0Aname = %22exp%22%0Arepeats = 3%0Aiterations = 1000000%0A%0A%0Adef print_version():%0A %22%22%22Print the TAP version.%22%22%22%0A%0A print(%22TAP version 13%22)%0A%0A%0Adef print_summary(total, passing):%0A %22%22%22Print the benchmark summary.%0A%0A # Arguments%0A%0A * %60total%60: total number of tests%0A * %60passing%60: number of passing tests%0A%0A %22%22%22%0A%0A print(%22#%22)%0A print(%221..%22 + str(total)) # TAP plan%0A print(%22# total %22 + str(total))%0A print(%22# pass %22 + str(passing))%0A print(%22#%22)%0A print(%22# ok%22)%0A%0A%0Adef print_results(elapsed):%0A %22%22%22Print benchmark results.%0A%0A # Arguments%0A%0A * %60elapsed%60: elapsed time (in seconds)%0A%0A # Examples%0A%0A %60%60%60 python%0A python%3E print_results(0.131009101868)%0A %60%60%60%0A %22%22%22%0A%0A rate = iterations / elapsed%0A%0A print(%22 ---%22)%0A print(%22 iterations: %22 + str(iterations))%0A print(%22 elapsed: %22 + str(elapsed))%0A print(%22 rate: %22 + str(rate))%0A print(%22 ...%22)%0A%0A%0Adef benchmark():%0A %22%22%22Run the benchmark and print benchmark results.%22%22%22%0A%0A setup = %22from math import exp; from random import random;%22%0A stmt = %22y = exp(100.0*random() - 50.0)%22%0A%0A t = timeit.Timer(stmt, setup=setup)%0A%0A print_version()%0A%0A for i in xrange(3):%0A print(%22# python::%22 + name)%0A elapsed = t.timeit(number=iterations)%0A print_results(elapsed)%0A print(%22ok %22 + str(i+1) + %22 benchmark finished%22)%0A%0A print_summary(repeats, repeats)%0A%0A%0Adef main():%0A %22%22%22Run the benchmark.%22%22%22%0A benchmark()%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
d1f4e257b449b6993e0cdc87055113018b6efabb
|
Create promoter_bin.py
|
code_collection/promoter_bin.py
|
code_collection/promoter_bin.py
|
Python
| 0.000002 |
@@ -0,0 +1,391 @@
+import sys%0A%0Apeak=%5B%5D%0Awith open(sys.argv%5B1%5D,'r') as f:%0A%09for line in f:%0A%09%09line=line.strip('%5Cn').split('%5Ct')%0A%09%09peak.append(int(line%5B3%5D))%0Af.close()%0A%0Anum=int(len(peak)/100.0)%0Abin=%5B%5D%0Afor i in range(99):%0A%09bin.append(str(i+1)+'%5Ct'+str(sum(peak%5Bnum*i:num*(i+1)%5D)/(num*1.0))+'%5Cn')%0Abin.append('100'+'%5Ct'+str(sum(peak%5Bnum*99:%5D)/(num*1.0))+'%5Cn')%0A%0Awith open('bin.txt','w') as f:%0A%09f.writelines(bin)%0Af.close%0A
|
|
93a3b7d61877e9350ea2b32ade918755fc874bb8
|
Create run_test.py
|
recipes/django-environ/run_test.py
|
recipes/django-environ/run_test.py
|
Python
| 0.000004 |
@@ -0,0 +1,189 @@
+import django%0Afrom django.conf import settings%0Asettings.configure(INSTALLED_APPS=%5B'environ', 'django.contrib.contenttypes', 'django.contrib.auth'%5D) %0Adjango.setup() %0A %0Aimport environ%0A
|
|
19186f44b1ed4c4b60ffc1ef796fa0894b25da68
|
Add garage.partdefs.sockets
|
py/garage/garage/partdefs/sockets.py
|
py/garage/garage/partdefs/sockets.py
|
Python
| 0.000051 |
@@ -0,0 +1,482 @@
+from garage import parameters%0Afrom garage import parts%0Afrom garage import sockets%0A%0A%0APARTS = parts.Parts(sockets.__name__)%0APARTS.patch_getaddrinfo = parts.AUTO%0A%0A%0APARAMS = parameters.define_namespace(sockets.__name__, 'socket utils')%0APARAMS.patch_getaddrinfo = parameters.create(%0A False, 'enable patching getaddrinfo for caching query results')%0A%0A%[email protected]_maker%0Adef make() -%3E PARTS.patch_getaddrinfo:%0A if PARAMS.patch_getaddrinfo.get():%0A sockets.patch_getaddrinfo()%0A
|
|
b7d23a337ad121a032a8aa2c395c3705bad12b28
|
add migration to grandfather in all existing plans to have Case Sharing via Groups and Child Cases privileges
|
corehq/apps/accounting/migrations/0043_grandfather_case_privs.py
|
corehq/apps/accounting/migrations/0043_grandfather_case_privs.py
|
Python
| 0 |
@@ -0,0 +1,883 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.21 on 2019-07-23 16:43%0Afrom __future__ import unicode_literals%0A%0Afrom __future__ import absolute_import%0Afrom django.core.management import call_command%0Afrom django.db import migrations%0A%0Afrom corehq.apps.hqadmin.management.commands.cchq_prbac_bootstrap import (%0A cchq_prbac_bootstrap,%0A)%0Afrom corehq.privileges import (%0A CASE_SHARING_GROUPS,%0A CHILD_CASES,%0A)%0A%0A%0Adef _grandfather_case_privs(apps, schema_editor):%0A call_command(%0A 'cchq_prbac_grandfather_privs',%0A CASE_SHARING_GROUPS,%0A CHILD_CASES,%0A noinput=True,%0A )%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('accounting', '0042_domain_user_history__unique__and__nonnullable'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(cchq_prbac_bootstrap),%0A migrations.RunPython(_grandfather_case_privs),%0A %5D%0A
|
|
304826205804e3972968b16fbf9bb9021eaf9acd
|
add FieldOfStudyHierarchy class
|
scholarly_citation_finder/apps/core/migrations/0015_fieldofstudyhierarchy.py
|
scholarly_citation_finder/apps/core/migrations/0015_fieldofstudyhierarchy.py
|
Python
| 0.000108 |
@@ -0,0 +1,1068 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.4 on 2016-03-11 13:19%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('core', '0014_publicationreference_source'),%0A %5D%0A%0A operations = %5B%0A migrations.CreateModel(%0A name='FieldOfStudyHierarchy',%0A fields=%5B%0A ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),%0A ('child_level', models.SmallIntegerField()),%0A ('parent_level', models.SmallIntegerField()),%0A ('confidence', models.FloatField()),%0A ('child', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fieldofstudyhierarchy_child', to='core.FieldOfStudy')),%0A ('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fieldofstudyhierarchy_parent', to='core.FieldOfStudy')),%0A %5D,%0A ),%0A %5D%0A
|
|
ce4bcc19e61518273e054553494288364ab4f677
|
Add lc085_maximal_rectangle.py
|
lc085_maximal_rectangle.py
|
lc085_maximal_rectangle.py
|
Python
| 0.002349 |
@@ -0,0 +1,570 @@
+%22%22%22Leetcode 85. Maximal Rectangle%0AHard%0A%0AURL: https://leetcode.com/problems/maximal-rectangle/%0A%0AGiven a 2D binary matrix filled with 0's and 1's, find the largest rectangle%0Acontaining only 1's and return its area.%0A%0AExample:%0AInput:%0A%5B%0A %5B%221%22,%220%22,%221%22,%220%22,%220%22%5D,%0A %5B%221%22,%220%22,%221%22,%221%22,%221%22%5D,%0A %5B%221%22,%221%22,%221%22,%221%22,%221%22%5D,%0A %5B%221%22,%220%22,%220%22,%221%22,%220%22%5D%0A%5D%0AOutput: 6%0A%22%22%22%0A%0Aclass Solution(object):%0A def maximalRectangle(self, matrix):%0A %22%22%22%0A :type matrix: List%5BList%5Bstr%5D%5D%0A :rtype: int%0A %22%22%22%0A pass%0A%0A%0Adef main():%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
5d8af7dec1806e7f897a89d1a54ff5f2dc5bfec0
|
Add 'merge-json.py' script to make the final annotations file.
|
bin/merge-json.py
|
bin/merge-json.py
|
Python
| 0 |
@@ -0,0 +1,401 @@
+#!/usr/bin/env python%0Afrom collections import Mapping%0Aimport json%0Aimport sys%0A%0Afilename1 = sys.argv%5B1%5D%0Afilename2 = sys.argv%5B2%5D%0A%0Ajson_data1=open(filename1).read()%0AdictA = json.loads(json_data1)%0Ajson_data2=open(filename2).read()%0AdictB = json.loads(json_data2)%0A%0Amerged_dict = %7Bkey: value for (key, value) in (dictA.items() + dictB.items())%7D%0A%0A# string dump of the merged dict%0Aprint json.dumps(merged_dict)%0A
|
|
eda01dc886cde85ee9ee84d54fa0d5c5a11a776e
|
Disable failing android tests on cros.
|
tools/telemetry/telemetry/core/platform/android_platform_backend_unittest.py
|
tools/telemetry/telemetry/core/platform/android_platform_backend_unittest.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import unittest
from telemetry import test
from telemetry.core import bitmap
from telemetry.core import util
from telemetry.core.platform import android_platform_backend
from telemetry.unittest import system_stub
class MockAdbCommands(object):
def __init__(self, mock_content):
self.mock_content = mock_content
def CanAccessProtectedFileContents(self):
return True
# pylint: disable=W0613
def GetProtectedFileContents(self, file_name, log_result):
return self.mock_content
def PushIfNeeded(self, host_binary, device_path):
pass
def RunShellCommand(self, command):
return []
class AndroidPlatformBackendTest(unittest.TestCase):
def setUp(self):
self._stubs = system_stub.Override(android_platform_backend,
['perf_control', 'thermal_throttle'])
def tearDown(self):
self._stubs.Restore()
def testGetCpuStats(self):
proc_stat_content = [
'7702 (.android.chrome) S 167 167 0 0 -1 1077936448 '
'3247 0 0 0 4 1 0 0 20 0 9 0 5603962 337379328 5867 '
'4294967295 1074458624 1074463824 3197495984 3197494152 '
'1074767676 0 4612 0 38136 4294967295 0 0 17 0 0 0 0 0 0 '
'1074470376 1074470912 1102155776']
adb_valid_proc_content = MockAdbCommands(proc_stat_content)
backend = android_platform_backend.AndroidPlatformBackend(
adb_valid_proc_content, False)
cpu_stats = backend.GetCpuStats('7702')
self.assertEquals(cpu_stats, {'CpuProcessTime': 5.0})
def testGetCpuStatsInvalidPID(self):
# Mock an empty /proc/pid/stat.
adb_empty_proc_stat = MockAdbCommands([])
backend = android_platform_backend.AndroidPlatformBackend(
adb_empty_proc_stat, False)
cpu_stats = backend.GetCpuStats('7702')
self.assertEquals(cpu_stats, {})
@test.Disabled
def testFramesFromMp4(self):
mock_adb = MockAdbCommands([])
backend = android_platform_backend.AndroidPlatformBackend(mock_adb, False)
try:
backend.InstallApplication('avconv')
finally:
if not backend.CanLaunchApplication('avconv'):
logging.warning('Test not supported on this platform')
return # pylint: disable=W0150
vid = os.path.join(util.GetUnittestDataDir(), 'vid.mp4')
expected_timestamps = [
0,
763,
783,
940,
1715,
1732,
1842,
1926,
]
# pylint: disable=W0212
for i, timestamp_bitmap in enumerate(backend._FramesFromMp4(vid)):
timestamp, bmp = timestamp_bitmap
self.assertEquals(timestamp, expected_timestamps[i])
expected_bitmap = bitmap.Bitmap.FromPngFile(os.path.join(
util.GetUnittestDataDir(), 'frame%d.png' % i))
self.assertTrue(expected_bitmap.IsEqual(bmp))
|
Python
| 0.990071 |
@@ -1060,16 +1060,45 @@
tore()%0A%0A
+ @test.Disabled('chromeos')%0A
def te
@@ -1714,16 +1714,45 @@
5.0%7D)%0A%0A
+ @test.Disabled('chromeos')%0A
def te
|
7f319b9f84e441cbe893fd2cc68ecd77cfcfd987
|
create perl-file-which package (#6800)
|
var/spack/repos/builtin/packages/perl-file-which/package.py
|
var/spack/repos/builtin/packages/perl-file-which/package.py
|
Python
| 0 |
@@ -0,0 +1,1594 @@
+##############################################################################%0A# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, [email protected], All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/spack/spack%0A# Please also see the NOTICE and LICENSE files for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass PerlFileWhich(PerlPackage):%0A %22%22%22Perl implementation of the which utility as an API%22%22%22%0A%0A homepage = %22http://cpansearch.perl.org/src/PLICEASE/File-Which-1.22/lib/File/Which.pm%22%0A url = %22http://search.cpan.org/CPAN/authors/id/P/PL/PLICEASE/File-Which-1.22.tar.gz%22%0A%0A version('1.22', 'face60fafd220dc83fa581ef6f96d480')%0A
|
|
a2b4389db17759086c4cd804b6cbfb1b658d547e
|
Create equal_sides_of_an_array.py
|
equal_sides_of_an_array.py
|
equal_sides_of_an_array.py
|
Python
| 0.998288 |
@@ -0,0 +1,333 @@
+#Kunal Gautam%0A#Codewars : @Kunalpod%0A#Problem name: Equal Sides Of An Array%0A#Problem level: 6 kyu%0A%0Adef find_even_index(arr):%0A if not sum(arr%5B1:%5D): return 0%0A if not sum(arr%5B:len(arr)-1%5D): return len(arr)-1%0A for i in range(1, len(arr)-1):%0A if sum(arr%5B:i%5D)==sum(arr%5Bi+1:%5D):%0A return i %0A return -1 %0A
|
|
d2978eae5b502cc5bc4b020044b88f02522f90cd
|
Add jobs.utils module
|
virtool/jobs/utils.py
|
virtool/jobs/utils.py
|
Python
| 0.000001 |
@@ -0,0 +1,155 @@
+def is_running_or_waiting(document):%0A latest_state = document%5B%22status%22%5D%5B-1%5D%5B%22state%22%5D%0A return latest_state != %22waiting%22 and latest_state != %22running%22%0A
|
|
3bae93629c81cc33e565912e4b9bafeff536ec22
|
Create hostgroup_info.py
|
examples/hostgroup_info.py
|
examples/hostgroup_info.py
|
Python
| 0.000001 |
@@ -0,0 +1,627 @@
+def queryHostGroupInfo():%0A %22%22%22%0A %22query host group info%22%0A %22%22%22%0A if lsf.lsb_init(%22queryHostGroupInfo%22) %3E 0:%0A return -1;%0A%0A strArr = lsf.new_stringArray(2);%0A lsf.stringArray_setitem(strArr, 0, %22hg1%22);%0A lsf.stringArray_setitem(strArr, 1, %22hg2%22);%0A for hgroupInfo in lsf.get_hostgroup_info_by_name(strArr,2):%0A if hgroupInfo != None:%0A print 'hgroup name = %25s' %25 hgroupInfo.group;%0A print 'hgroup list = %25s' %25 hgroupInfo.memberList;%0A else:%0A print 'hgroupInfo is null'%0A return -1;%0A%0A return 0;%0A%0Aif __name__ == '__main__':%0A queryHostGroupInfo();%0A
|
|
986b20363cc84be1822588dd7cc935fca7ef7f48
|
add test for get_genofile_samplelist in marker_regression/run_mapping.py
|
wqflask/tests/wqflask/marker_regression/test_run_mapping.py
|
wqflask/tests/wqflask/marker_regression/test_run_mapping.py
|
Python
| 0 |
@@ -0,0 +1,977 @@
+import unittest%0Afrom unittest import mock%0Afrom wqflask.marker_regression.run_mapping import get_genofile_samplelist%0A%0A%0Aclass AttributeSetter:%0A%09def __init__(self,obj):%0A%09%09for k,v in obj.items():%0A%09%09%09setattr(self,k,v)%0A%0A%0Aclass MockDataSetGroup(AttributeSetter):%0A%09%0A%09def get_genofiles(self):%0A%09%09return %5B%7B%22location%22:%22~/genofiles/g1_file%22,%22sample_list%22:%5B%22S1%22,%22S2%22,%22S3%22,%22S4%22%5D%7D%5D%0Aclass TestRunMapping(unittest.TestCase):%0A%09def setUp(self):%0A%09%09self.group=MockDataSetGroup(%7B%22genofile%22:%22~/genofiles/g1_file%22%7D)%0A%09%09self.dataset=AttributeSetter(%7B%22group%22:self.group%7D)%0A%0A%09def tearDown(self):%0A%09%09self.dataset=AttributeSetter(%7B%22group%22:%7B%22location%22:%22~/genofiles/g1_file%22%7D%7D)%0A%0A%0A%09def test_get_genofile_samplelist(self):%0A%09%09#location true and sample list true%0A%0A%09%09results_1=get_genofile_samplelist(self.dataset)%0A%09%09self.assertEqual(results_1,%5B%22S1%22,%22S2%22,%22S3%22,%22S4%22%5D)%0A%09%09#return empty array%0A%09%09self.group.genofile=%22~/genofiles/g2_file%22%0A%09%09result_2=get_genofile_samplelist(self.dataset)%0A%09%09self.assertEqual(result_2,%5B%5D)%0A%0A%0A%0A
|
|
fe145fd87db777d9eeb361688d502b1b3ec4b2e1
|
Add a new Model-View-Projection matrix tool.
|
Transformation.py
|
Transformation.py
|
Python
| 0 |
@@ -0,0 +1,1095 @@
+# -*- coding:utf-8 -*- %0A%0A# ***************************************************************************%0A# Transformation.py%0A# -------------------%0A# update : 2013-11-13%0A# copyright : (C) 2013 by Micha%C3%ABl Roy%0A# email : [email protected]%0A# ***************************************************************************%0A%0A# ***************************************************************************%0A# * *%0A# * This program is free software; you can redistribute it and/or modify *%0A# * it under the terms of the GNU General Public License as published by *%0A# * the Free Software Foundation; either version 2 of the License, or *%0A# * (at your option) any later version. *%0A# * *%0A# ***************************************************************************%0A%0A%0A#%0A# External dependencies%0A#%0Afrom numpy import *%0A%0A%0A
|
|
c2089b3ed549d89942f57075d0b6d573d980bc30
|
make app load in worker in uwsgi.ini, pass db configuration dynamically to docker image as env variable
|
app/config.py
|
app/config.py
|
Python
| 0 |
@@ -0,0 +1,732 @@
+from datetime import timedelta%0A%0A%0Aclass Config(object):%0A DEBUG = False%0A TESTING = False%0A SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://%7B%7D:%7B%7D@%7B%7D/%7B%7D'%0A APP_NAME = '%7B%7D Server'%0A SECRET_KEY = '%7B%7D'%0A JWT_EXPIRATION_DELTA = timedelta(days=30)%0A JWT_AUTH_URL_RULE = '/api/v1/auth'%0A SECURITY_REGISTERABLE = True%0A SECURITY_RECOVERABLE = True%0A SECURITY_TRACKABLE = True%0A SECURITY_PASSWORD_HASH = 'sha512_crypt'%0A SECURITY_PASSWORD_SALT = '%7B%7D'%0A SQLALCHEMY_TRACK_MODIFICATIONS = False%0A%0A%0Aclass ProductionConfig(Config):%0A APP_NAME = '%7B%7D Production Server'%0A DEBUG = False%0A%0A%0Aclass DevelopmentConfig(Config):%0A DEBUG = True%0A MAIL_SUPPRESS_SEND = False%0A%0A%0Aclass TestingConfig(Config):%0A TESTING = True%0A
|
|
2f0700093141643bd66e99d271f9e74087e148e6
|
Add Message model migration file.
|
core/migrations/0002_message.py
|
core/migrations/0002_message.py
|
Python
| 0 |
@@ -0,0 +1,2633 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.7 on 2016-08-05 19:19%0Afrom __future__ import unicode_literals%0A%0Aimport django.contrib.postgres.fields%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('core', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.CreateModel(%0A name='Message',%0A fields=%5B%0A ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),%0A ('created', models.DateTimeField(auto_now_add=True)),%0A ('updated', models.DateTimeField(auto_now=True)),%0A ('goes_id', models.CharField(max_length=8)),%0A ('goes_channel', models.PositiveSmallIntegerField()),%0A ('goes_spacecraft', models.CharField(choices=%5B('E', 'East'), ('W', 'West')%5D, default='E', max_length=1)),%0A ('arrival_time', models.DateTimeField()),%0A ('failure_code', models.CharField(max_length=1)),%0A ('signal_strength', models.PositiveSmallIntegerField()),%0A ('frequency_offset', models.CharField(max_length=2)),%0A ('modulation_index', models.CharField(choices=%5B('N', 'Normal (60 degrees +/- 5)'), ('L', 'Low (50 degrees)'), ('H', 'High (70 degrees)')%5D, default='N', max_length=1)),%0A ('data_quality', models.CharField(choices=%5B('N', 'Normal (error rate %3C 10%5E-6)'), ('F', 'Fair (10%5E-6 %3C error rate %3C 10%5E-4)'), ('P', 'Poor (error rate %3E 10%5E-4)')%5D, default='N', max_length=1)),%0A ('data_source', models.CharField(choices=%5B('LE', 'Cincinnati East; USACE LRD Cincinnati'), ('d1', 'NIFC West Boise ID - Unit 1; NIFC Boise'), ('d2', 'NIFC West Boise ID - Unit 2; NIFC Boise'), ('OW', 'Omaha West; USACE NWO'), ('RE', 'Rock Island East; USACE MVR'), ('RW', 'Rock Island West; USACE MVR'), ('SF', 'West Palm Beach East; SFWMD'), ('UB', 'Ucom Backup @ WCDA; NOAA Wallops CDA'), ('UP', 'Ucom Primary @ WCDA; NOAA Wallops CDA'), ('XE', 'Sioux Falls, East; USGS EROS'), ('XW', 'Sioux Falls, West; USGS EROS'), ('XL', 'Sioux Falls, LRIT; USGS EROS'), ('RL', 'Reston, LRIT; Reston, Virginia')%5D, max_length=2)),%0A ('recorded_message_length', models.PositiveSmallIntegerField()),%0A ('values', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),%0A ('message_text', models.TextField()),%0A ('station', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.Station')),%0A %5D,%0A ),%0A %5D%0A
|
|
30c5b785863df0269c7abbbc5000d83df4f815c2
|
Predict some data similar to problem on assignment
|
outlier_detection/svm_classification_with_synthetic_data.py
|
outlier_detection/svm_classification_with_synthetic_data.py
|
Python
| 0.999856 |
@@ -0,0 +1,1017 @@
+import numpy as np%0Afrom matplotlib import pyplot as plt%0Aimport matplotlib.font_manager%0Afrom sklearn import svm%0A%0A%0Adef main():%0A tests = 20%0A%0A # Generate train data%0A X = (np.random.randn(120, 2) * %0A np.array(%5B0.08, 0.02%5D) + %0A np.array(%5B0.3, 0.6%5D))%0A%0A X_train = X%5B:-tests%5D%0A X_test = X%5B-tests:%5D%0A X_outliers = np.copy(X_test)%0A X_outliers = (X_outliers + %0A np.random.uniform(low=-0.1, high=0.1, size=(tests, 2)))%0A%0A # fit the model%0A clf = svm.OneClassSVM(nu=0.1, kernel='rbf', gamma=0.1)%0A clf.fit(X_train)%0A%0A y_pred_train = clf.predict(X_train)%0A y_pred_test = clf.predict(X_test)%0A y_pred_outliers = clf.predict(X_outliers)%0A %0A print(y_pred_test)%0A print(y_pred_outliers)%0A%0A s = 40%0A plt.scatter(X_train%5B:, 0%5D, X_train%5B:, 1%5D, c='white', s=s)%0A plt.scatter(X_test%5B:, 0%5D, X_test%5B:, 1%5D, c='blueviolet', s=s)%0A plt.scatter(X_outliers%5B:, 0%5D, X_outliers%5B:, 1%5D, c='gold', s=s)%0A%0A plt.axis('equal')%0A plt.show()%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
7a400bc652e465d90f0212143836999a83f32eed
|
Make assert statement more specific (#4248)
|
tests/sentry/web/frontend/test_2fa.py
|
tests/sentry/web/frontend/test_2fa.py
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.testutils import TestCase
from sentry.models import TotpInterface
class TwoFactorAuthTest(TestCase):
def test_security_renders_without_2fa(self):
user = self.create_user('[email protected]')
self.login_as(user)
path = reverse('sentry-account-security')
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed('sentry/account/security.html')
assert 'has_2fa' in resp.context
assert resp.context['has_2fa'] is False
self.assertContains(resp, 'Enable')
def test_security_renders_with_2fa(self):
user = self.create_user('[email protected]')
self.login_as(user)
TotpInterface().enroll(user)
path = reverse('sentry-account-security')
resp = self.client.get(path)
self.assertTemplateUsed('sentry/account/security.html')
assert 'has_2fa' in resp.context
assert resp.context['has_2fa'] is True
self.assertContains(resp, 'Manage')
def test_2fa_settings_render_without_2fa(self):
user = self.create_user('[email protected]')
path = reverse('sentry-account-settings-2fa')
self.login_as(user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed('sentry/account/twofactor.html')
assert 'has_2fa' in resp.context
assert resp.context['has_2fa'] is False
self.assertContains(resp, 'Add</button>')
self.assertContains(resp, 'this can only be managed if 2FA is enabled')
self.assertNotContains(resp, '<span class="icon-trash">')
def test_2fa_settings_render_with_2fa(self):
user = self.create_user('[email protected]')
path = reverse('sentry-account-settings-2fa')
self.login_as(user)
TotpInterface().enroll(user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed('sentry/account/twofactor.html')
assert 'has_2fa' in resp.context
assert resp.context['has_2fa'] is True
self.assertNotContains(resp, 'this can only be managed if 2FA is enabled')
self.assertContains(resp, '<span class="icon-trash">')
def test_add_2fa_SSO(self):
user = self.create_user('[email protected]')
user.set_unusable_password()
user.save()
path = reverse('sentry-account-settings-2fa-totp')
self.login_as(user)
resp = self.client.post(path, data={'enroll': ''})
assert resp.status_code == 200
self.assertTemplateUsed('sentry/account/twofactor/enroll_totp.html')
assert 'otp_form' in resp.context
self.assertContains(resp, 'One-time password')
self.assertContains(resp, 'Authenticator App')
self.assertNotContains(resp, 'Sentry account password')
def test_add_2fa_password(self):
user = self.create_user('[email protected]')
path = reverse('sentry-account-settings-2fa-totp')
self.login_as(user)
resp = self.client.post(path, data={'enroll': ''})
self.assertContains(resp, 'QR')
self.assertContains(resp, 'Sentry account password')
self.assertNotContains(resp, 'Method is currently not enabled')
def test_totp_get_path_render(self):
user = self.create_user('[email protected]')
path = reverse('sentry-account-settings-2fa-totp')
self.login_as(user)
resp = self.client.get(path)
self.assertNotContains(resp, 'QR')
self.assertNotContains(resp, 'Sentry account password')
self.assertContains(resp, 'Method is currently not enabled')
def test_remove_2fa_SSO(self):
user = self.create_user('[email protected]')
user.set_unusable_password()
user.save()
TotpInterface().enroll(user)
path = reverse('sentry-account-settings-2fa-totp')
self.login_as(user)
resp = self.client.post(path, data={'remove': ''})
assert resp.status_code == 200
self.assertTemplateUsed('sentry/account/twofactor/remove.html')
self.assertContains(resp, 'Do you want to remove the method?')
self.assertNotContains(resp, 'Sentry account password')
def test_remove_2fa_password(self):
user = self.create_user('[email protected]')
TotpInterface().enroll(user)
path = reverse('sentry-account-settings-2fa-totp')
self.login_as(user)
resp = self.client.post(path, data={'remove': ''})
assert resp.status_code == 200
self.assertTemplateUsed('sentry/account/twofactor/remove.html')
self.assertContains(resp, 'Do you want to remove the method?')
self.assertContains(resp, 'Sentry account password')
|
Python
| 0.000456 |
@@ -3180,34 +3180,54 @@
Contains(resp, '
-QR
+Scan the below QR code
')%0A self.
@@ -3601,18 +3601,38 @@
(resp, '
-QR
+Scan the below QR code
')%0A
|
68dbfedf90fb9e6c922971deaeccad148a258a70
|
Add tests for PyEcore extension (EClass/EModelElement tests)
|
tests/test_dynamic_ecore_extension.py
|
tests/test_dynamic_ecore_extension.py
|
Python
| 0 |
@@ -0,0 +1,1042 @@
+import pytest%0Afrom pyecore.ecore import *%0Aimport pyecore.ecore as ecore%0Afrom ordered_set import OrderedSet%0A%0A%0Adef test__EModelElement_extension():%0A A = EClass('A', superclass=(EModelElement.eClass))%0A a = A()%0A assert a.eAnnotations == OrderedSet()%0A%0A annotation = EAnnotation(source='testAnnot')%0A annotation.details%5B'test'%5D = 'value'%0A a.eAnnotations.append(annotation)%0A assert len(a.eAnnotations) == 1%0A assert a.getEAnnotation('testAnnot') is annotation%0A assert a.getEAnnotation('testAnnot').details%5B'test'%5D == 'value'%0A%0A%0Adef test__EClass_extension():%0A SuperEClass = EClass('SuperEClass', superclass=(EClass.eClass,))%0A A = SuperEClass(name='A')%0A assert isinstance(A, EClass)%0A%0A a = A()%0A assert isinstance(a, EObject)%0A assert a.eClass is A%0A%0A%0Adef test__EClass_modification():%0A EClass.new_feature = EAttribute('new_feature', EInt)%0A A = EClass('A')%0A assert A.new_feature == 0%0A%0A A.new_feature = 5%0A assert A.new_feature == 5%0A%0A with pytest.raises(BadValueError):%0A A.new_feature = 'a'%0A
|
|
40431228c8535f325b005bb52485cae87a8be714
|
Add test module for napalm_acl
|
tests/unit/modules/test_napalm_acl.py
|
tests/unit/modules/test_napalm_acl.py
|
Python
| 0 |
@@ -0,0 +1,1380 @@
+# -*- coding: utf-8 -*-%0A'''%0A :codeauthor: :email:%60Anthony Shaw %[email protected]%3E%60%0A'''%0A%0A# Import Python Libs%0Afrom __future__ import absolute_import%0A%0A# Import Salt Testing Libs%0Afrom tests.support.mixins import LoaderModuleMockMixin%0Afrom tests.support.unit import TestCase, skipIf%0Afrom tests.support.mock import (%0A MagicMock,%0A NO_MOCK,%0A NO_MOCK_REASON%0A)%0A%0Aimport tests.support.napalm as napalm_test_support%0Aimport salt.modules.napalm_acl as napalm_acl # NOQA%0A%0A%0A@skipIf(NO_MOCK, NO_MOCK_REASON)%0Aclass NapalmAclModuleTestCase(TestCase, LoaderModuleMockMixin):%0A%0A def setup_loader_modules(self):%0A module_globals = %7B%0A '__salt__': %7B%0A 'config.option': MagicMock(return_value=%7B%0A 'test': %7B%0A 'driver': 'test',%0A 'key': '2orgk34kgk34g'%0A %7D%0A %7D),%0A 'file.file_exists': napalm_test_support.true,%0A 'file.join': napalm_test_support.join,%0A 'file.get_managed': napalm_test_support.get_managed_file,%0A 'random.hash': napalm_test_support.random_hash%0A %7D%0A %7D%0A%0A return %7Bnapalm_acl: module_globals%7D%0A%0A def test_load_term_config(self):%0A ret = napalm_acl.load_term_config(%22test_filter%22, %22test_term%22)%0A assert ret%5B'out'%5D is napalm_test_support.TEST_TERM_CONFIG%0A
|
|
f987b39bb43301c735f30169010832665953efe6
|
Add a sample permission plugin for illustrating the check on realm resources, related to #6211.
|
sample-plugins/public_wiki_policy.py
|
sample-plugins/public_wiki_policy.py
|
Python
| 0 |
@@ -0,0 +1,2047 @@
+from fnmatch import fnmatchcase%0A%0Afrom trac.config import Option%0Afrom trac.core import *%0Afrom trac.perm import IPermissionPolicy%0A%0Aclass PublicWikiPolicy(Component):%0A %22%22%22Sample permission policy plugin illustrating how to check %0A permission on realms.%0A%0A Don't forget to integrate that plugin in the appropriate place in the%0A list of permission policies:%0A %7B%7B%7B%0A %5Btrac%5D%0A permission_policies = PublicWikiPolicy, DefaultPermissionPolicy%0A %7D%7D%7D%0A%0A Then you can configure which pages you want to make public:%0A %7B%7B%7B%0A %5Bpublic_wiki%5D%0A view = Public*%0A modify = PublicSandbox/*%0A %7D%7D%7D%0A%0A %22%22%22%0A%0A implements(IPermissionPolicy)%0A%0A view = Option('public_wiki', 'view', 'Public*',%0A %22%22%22Case-sensitive glob pattern used for granting view permission on%0A all Wiki pages matching it.%22%22%22)%0A%0A modify = Option('public_wiki', 'modify', 'Public*',%0A %22%22%22Case-sensitive glob pattern used for granting modify permissions%0A on all Wiki pages matching it.%22%22%22)%0A%0A def check_permission(self, action, username, resource, perm):%0A if resource: # fine-grained permission check%0A if resource.realm == 'wiki': # wiki realm or resource%0A if resource.id: # ... it's a resource%0A if action == 'WIKI_VIEW': # (think 'VIEW' here)%0A pattern = self.view%0A else:%0A pattern = self.modify%0A if fnmatchcase(resource.id, pattern):%0A return True%0A else: # ... it's a realm%0A return True %0A # this policy ''may'' grant permissions on some wiki pages%0A else: # coarse-grained permission check%0A # %0A # support for the legacy permission checks: no resource specified%0A # and realm information in the action name itself.%0A #%0A if action.startswith('WIKI_'):%0A return True%0A # this policy ''may'' grant permissions on some wiki pages%0A%0A
|
|
784cd71fe24b1f5ce57a1982186dabc768892883
|
Fix discount calculation logic
|
saleor/product/models/discounts.py
|
saleor/product/models/discounts.py
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import models
from django.utils.translation import pgettext_lazy
from django.utils.encoding import python_2_unicode_compatible
from django_prices.models import PriceField
from prices import FixedDiscount
class NotApplicable(ValueError):
pass
@python_2_unicode_compatible
class FixedProductDiscount(models.Model):
name = models.CharField(max_length=255)
products = models.ManyToManyField('Product', blank=True)
discount = PriceField(pgettext_lazy('Discount field', 'discount value'),
currency=settings.DEFAULT_CURRENCY,
max_digits=12, decimal_places=2)
class Meta:
app_label = 'product'
def __repr__(self):
return 'FixedProductDiscount(name=%r, discount=%r)' % (
str(self.discount), self.name)
def __str__(self):
return self.name
def modifier_for_product(self, variant):
if not self.products.filter(pk=variant.product.pk).exists():
raise NotApplicable('Discount not applicable for this product')
if self.discount > variant.get_price(discounted=False):
raise NotApplicable('Discount too high for this product')
return FixedDiscount(self.discount, name=self.name)
def get_product_discounts(variant, discounts, **kwargs):
for discount in discounts:
try:
yield discount.modifier_for_product(variant, **kwargs)
except NotApplicable:
pass
|
Python
| 0.000106 |
@@ -988,16 +988,312 @@
+from ...product.models import ProductVariant%0A if isinstance(variant, ProductVariant):%0A pk = variant.product.pk%0A check_price = variant.get_price_per_item()%0A else:%0A pk = variant.pk%0A check_price = variant.get_price_per_item(variant)%0A
if not s
@@ -1315,32 +1315,16 @@
lter(pk=
-variant.product.
pk).exis
@@ -1436,43 +1436,19 @@
t %3E
-variant.get_price(discounted=False)
+check_price
:%0A
|
964d01fd9a730d02aac85740bce0ef9dace6517b
|
add migrations
|
molo/core/migrations/0054_merged_cms_models.py
|
molo/core/migrations/0054_merged_cms_models.py
|
Python
| 0.000001 |
@@ -0,0 +1,1615 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.12 on 2017-02-21 12:13%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0Aimport modelcluster.fields%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('wagtailcore', '0032_add_bulk_delete_page_permission'),%0A ('core', '0053_add_next_and_recommended_functionality'),%0A %5D%0A%0A operations = %5B%0A migrations.CreateModel(%0A name='Languages',%0A fields=%5B%0A ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),%0A ('site', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.Site')),%0A %5D,%0A options=%7B%0A 'abstract': False,%0A %7D,%0A ),%0A migrations.CreateModel(%0A name='SiteLanguageRelation',%0A fields=%5B%0A ('sitelanguage_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.SiteLanguage')),%0A ('sort_order', models.IntegerField(blank=True, editable=False, null=True)),%0A ('language_setting', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='languages', to='core.Languages')),%0A %5D,%0A options=%7B%0A 'ordering': %5B'sort_order'%5D,%0A 'abstract': False,%0A %7D,%0A bases=('core.sitelanguage', models.Model),%0A ),%0A %5D%0A
|
|
17b4efb401d36060f51e07da5ace83c008d421c5
|
Create table charge_observation.
|
problem/charge_state/alembic/versions/2154afa58ba0_create_table_charge_observation.py
|
problem/charge_state/alembic/versions/2154afa58ba0_create_table_charge_observation.py
|
Python
| 0 |
@@ -0,0 +1,759 @@
+%22%22%22Create table charge_observation.%0A%0ARevision ID: 2154afa58ba0%0ARevises: %0ACreate Date: 2020-01-05 12:18:25.331846%0A%0A%22%22%22%0Afrom alembic import op%0Aimport sqlalchemy as sa%0A%0A%0A# revision identifiers, used by Alembic.%0Arevision = '2154afa58ba0'%0Adown_revision = None%0Abranch_labels = None%0Adepends_on = None%0A%0A%0Adef upgrade():%0A # ### commands auto generated by Alembic - please adjust! ###%0A op.create_table('charge_observation',%0A sa.Column('id', sa.Integer(), nullable=False),%0A sa.Column('stamp', sa.DateTime(), nullable=True),%0A sa.PrimaryKeyConstraint('id')%0A )%0A # ### end Alembic commands ###%0A%0A%0Adef downgrade():%0A # ### commands auto generated by Alembic - please adjust! ###%0A op.drop_table('charge_observation')%0A # ### end Alembic commands ###%0A
|
|
841fb156fff3d257d39afdc9d3d4e587427fe2cf
|
Add new file missed in earlier commit place holder for projects that do not load for some reason
|
Source/Scm/wb_scm_project_place_holder.py
|
Source/Scm/wb_scm_project_place_holder.py
|
Python
| 0 |
@@ -0,0 +1,1980 @@
+'''%0A ====================================================================%0A Copyright (c) 2016 Barry A Scott. All rights reserved.%0A%0A This software is licensed as described in the file LICENSE.txt,%0A which you should have received as part of this distribution.%0A%0A ====================================================================%0A%0A wb_scm_project_place_holder.py%0A%0A'''%0Aimport pathlib%0A%0A#%0A# ScmProjectPlaceholder is used when the project cannot be loaded%0A#%0Aclass ScmProjectPlaceholder:%0A def __init__( self, app, prefs_project ):%0A self.app = app%0A self.prefs_project = prefs_project%0A%0A self.tree = ScmProjectPlaceholderTreeNode( self, prefs_project.name, pathlib.Path( '.' ) )%0A%0A def scmType( self ):%0A return self.prefs_project.scm_type%0A%0A def isNotEqual( self, other ):%0A return self.projectName() != other.projectName()%0A%0A def getBranchName( self ):%0A return ''%0A%0A def projectName( self ):%0A return self.prefs_project.name%0A%0A def projectPath( self ):%0A return pathlib.Path( self.prefs_project.path )%0A%0A def updateState( self ):%0A pass%0A%0Aclass ScmProjectPlaceholderTreeNode:%0A def __init__( self, project, name, path ):%0A self.project = project%0A self.name = name%0A self.__path = path%0A%0A def __repr__( self ):%0A return '%3CScmProjectPlaceholderTreeNode: project %25r, path %25s%3E' %25 (self.project, self.__path)%0A%0A def isNotEqual( self, other ):%0A return (self.relativePath() != other.relativePath()%0A or self.project.isNotEqual( other.project ))%0A%0A def __lt__( self, other ):%0A return self.name %3C other.name%0A%0A def relativePath( self ):%0A return self.__path%0A%0A def absolutePath( self ):%0A return self.project.projectPath() / self.__path%0A%0A def getAllFolderNodes( self ):%0A return %5B%5D%0A%0A def getAllFolderNames( self ):%0A return %5B%5D%0A%0A def getAllFileNames( self ):%0A return %5B%5D%0A%0A def isByPath( self ):%0A return False%0A
|
|
f1ba45809e6682235c07ab89e4bc32e56b2fa84f
|
Create i_love_lance_janice.py
|
i_love_lance_janice.py
|
i_love_lance_janice.py
|
Python
| 0.000014 |
@@ -0,0 +1,1978 @@
+%22%22%22%0AI Love Lance & Janice%0A=====================%0AYou've caught two of your fellow minions passing coded notes back and forth - while they're on duty, no less! Worse, you're pretty sure it's not job-related - they're both huge fans of the space soap opera %22Lance & Janice%22. You know how much Commander Lambda hates waste, so if you can prove that these minions are wasting her time passing non-job-related notes, it'll put you that much closer to a promotion.%0AFortunately for you, the minions aren't exactly advanced cryptographers. In their code, every lowercase letter %5Ba..z%5D is replaced with the corresponding one in %5Bz..a%5D, while every other character (including uppercase letters and punctuation) is left untouched. That is, 'a' becomes 'z', 'b' becomes 'y', 'c' becomes 'x', etc. For instance, the word %22vmxibkgrlm%22, when decoded, would become %22encryption%22.%0AWrite a function called answer(s) which takes in a string and returns the deciphered string so you can show the commander proof that these minions are talking about %22Lance & Janice%22 instead of doing their jobs.%0ALanguages%0A=========%0ATo provide a Python solution, edit solution.py%0ATo provide a Java solution, edit solution.java%0ATest cases%0A==========%0AInputs:%0A (string) s = %22wrw blf hvv ozhg mrtsg'h vkrhlwv?%22%0AOutput:%0A (string) %22did you see last night's episode?%22%0AInputs:%0A (string) s = %22Yvzs! I xzm'g yvorvev Lzmxv olhg srh qly zg gsv xlolmb!!%22%0AOutput:%0A (string) %22Yeah! I can't believe Lance lost his job at the colony!!%22%0A%22%22%22%0A%0Adef strSlice(s):%0A str_lst = %5B%5D%0A%0A for i in range(len(s)):%0A sliced_str = s%5B0:i+1%5D%0A str_lst.append(sliced_str)%0A%0A return str_lst%0A%0A%0Adef answer(s):%0A%0A str_lst = strSlice(s)%0A str_len_lst = %5B%5D%0A%0A for elmt in str_lst:%0A cnt_elmt = s.count(elmt)%0A quotient = len(s)/len(elmt)%0A if (elmt * quotient) == s:%0A str_len_lst.append(cnt_elmt)%0A%0A return max(str_len_lst)%0A%0A# s = %22abccbaabccba%22%0A# 2%0A%0As = %22abcabcabcabc%22%0A# 4%0A%0Aprint answer(s)%0A
|
|
a08a7da41300721e07c1bff8e36e3c3d69af06fb
|
Add py-asdf package (#12817)
|
var/spack/repos/builtin/packages/py-asdf/package.py
|
var/spack/repos/builtin/packages/py-asdf/package.py
|
Python
| 0 |
@@ -0,0 +1,1124 @@
+# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass PyAsdf(PythonPackage):%0A %22%22%22The Advanced Scientific Data Format (ASDF) is a next-generation%0A interchange format for scientific data. This package contains the Python%0A implementation of the ASDF Standard.%22%22%22%0A%0A homepage = %22https://github.com/spacetelescope/asdf%22%0A url = %22https://pypi.io/packages/source/a/asdf/asdf-2.4.2.tar.gz%22%0A%0A version('2.4.2', sha256='6ff3557190c6a33781dae3fd635a8edf0fa0c24c6aca27d8679af36408ea8ff2')%0A%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('py-setuptools', type='build')%0A depends_on('py-setuptools-scm', type='build')%0A depends_on('[email protected]:2.6.0', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('[email protected]:3.999', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A
|
|
6e0be0636d53e2a78c677441fbf4174042110541
|
Normalize tool_name case for timing logs.
|
Allura/allura/lib/custom_middleware.py
|
Allura/allura/lib/custom_middleware.py
|
import os
import re
import logging
import tg
import pkg_resources
from paste import fileapp
from pylons import c
from pylons.util import call_wsgi_application
from timermiddleware import Timer, TimerMiddleware
from webob import exc, Request
import pysolr
from allura.lib import helpers as h
log = logging.getLogger(__name__)
class StaticFilesMiddleware(object):
'''Custom static file middleware
Map everything in allura/public/nf/* to <script_name>/*
For each plugin, map everything <module>/nf/<ep_name>/* to <script_name>/<ep_name>/*
'''
CACHE_MAX_AGE=60*60*24*365
def __init__(self, app, script_name=''):
self.app = app
self.script_name = script_name
self.directories = [
(self.script_name + ep.name.lower() + '/', ep)
for ep in pkg_resources.iter_entry_points('allura') ]
def __call__(self, environ, start_response):
environ['static.script_name'] = self.script_name
if not environ['PATH_INFO'].startswith(self.script_name):
return self.app(environ, start_response)
try:
app = self.get_app(environ)
app.cache_control(public=True, max_age=self.CACHE_MAX_AGE)
return app(environ, start_response)
except OSError:
return exc.HTTPNotFound()(environ, start_response)
def get_app(self, environ):
for prefix, ep in self.directories:
if environ['PATH_INFO'].startswith(prefix):
filename = environ['PATH_INFO'][len(prefix):]
file_path = pkg_resources.resource_filename(
ep.module_name, os.path.join(
'nf',
ep.name.lower(),
filename))
return fileapp.FileApp(file_path, [
('Access-Control-Allow-Origin', '*')])
filename = environ['PATH_INFO'][len(self.script_name):]
file_path = pkg_resources.resource_filename(
'allura', os.path.join(
'public', 'nf',
filename))
return fileapp.FileApp(file_path, [
('Access-Control-Allow-Origin', '*')])
class LoginRedirectMiddleware(object):
'''Actually converts a 401 into a 302 so we can do a redirect to a different
app for login. (StatusCodeRedirect does a WSGI-only redirect which cannot
go to a URL not managed by the WSGI stack).'''
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
status, headers, app_iter, exc_info = call_wsgi_application(
self.app, environ, catch_exc_info=True)
if status[:3] == '401':
login_url = tg.config.get('auth.login_url', '/auth/')
if environ['REQUEST_METHOD'] == 'GET':
return_to = environ['PATH_INFO']
if environ.get('QUERY_STRING'):
return_to += '?' + environ['QUERY_STRING']
location = tg.url(login_url, dict(return_to=return_to))
else:
# Don't try to re-post; the body has been lost.
location = tg.url(login_url)
r = exc.HTTPFound(location=location)
return r(environ, start_response)
start_response(status, headers, exc_info)
return app_iter
class CSRFMiddleware(object):
'''On POSTs, looks for a special field name that matches the value of a given
cookie. If this field is missing, the cookies are cleared to anonymize the
request.'''
def __init__(self, app, cookie_name, param_name=None):
if param_name is None: param_name = cookie_name
self._app = app
self._param_name = param_name
self._cookie_name = cookie_name
def __call__(self, environ, start_response):
req = Request(environ)
cookie = req.cookies.get(self._cookie_name, None)
if cookie is None:
cookie = h.cryptographic_nonce()
if req.method == 'POST':
param = req.str_POST.pop(self._param_name, None)
if cookie != param:
log.warning('CSRF attempt detected, %r != %r', cookie, param)
environ.pop('HTTP_COOKIE', None)
def session_start_response(status, headers, exc_info = None):
headers.append(
('Set-cookie',
str('%s=%s; Path=/' % (self._cookie_name, cookie))))
return start_response(status, headers, exc_info)
return self._app(environ, session_start_response)
class SSLMiddleware(object):
'Verify the https/http schema is correct'
def __init__(self, app, no_redirect_pattern=None):
self.app = app
if no_redirect_pattern:
self._no_redirect_re = re.compile(no_redirect_pattern)
else:
self._no_redirect_re = re.compile('$$$')
def __call__(self, environ, start_response):
req = Request(environ)
if self._no_redirect_re.match(environ['PATH_INFO']):
return req.get_response(self.app)(environ, start_response)
resp = None
try:
request_uri = req.url
request_uri.decode('ascii')
except UnicodeError:
resp = exc.HTTPNotFound()
secure = req.environ.get('HTTP_X_SFINC_SSL', 'false') == 'true'
srv_path = req.url.split('://', 1)[-1]
if req.cookies.get('SFUSER'):
if not secure:
resp = exc.HTTPFound(location='https://' + srv_path)
elif secure:
resp = exc.HTTPFound(location='http://' + srv_path)
if resp is None:
resp = req.get_response(self.app)
return resp(environ, start_response)
class AlluraTimerMiddleware(TimerMiddleware):
def timers(self):
import genshi
import jinja2
import markdown
import ming
import pymongo
import socket
import urllib2
return [
Timer('markdown', markdown.Markdown, 'convert'),
Timer('ming', ming.odm.odmsession.ODMCursor, 'next'),
Timer('ming', ming.odm.odmsession.ODMSession, 'flush', 'find',
'get'),
Timer('ming', ming.schema.Document, 'validate',
debug_each_call=False),
Timer('ming', ming.schema.FancySchemaItem, '_validate_required',
'_validate_fast_missing', '_validate_optional',
debug_each_call=False),
Timer('mongo', pymongo.collection.Collection, 'count', 'find',
'find_one'),
Timer('mongo', pymongo.cursor.Cursor, 'count', 'distinct',
'explain', 'hint', 'limit', 'next', 'rewind', 'skip',
'sort', 'where'),
Timer('jinja', jinja2.Template, 'render', 'stream', 'generate'),
# urlopen and socket io may or may not overlap partially
Timer('urlopen', urllib2, 'urlopen'),
Timer('render', genshi.Stream, 'render'),
Timer('socket_read', socket._fileobject, 'read', 'readline',
'readlines', debug_each_call=False),
Timer('socket_write', socket._fileobject, 'write', 'writelines',
'flush', debug_each_call=False),
Timer('template', genshi.template.Template, '_prepare', '_parse',
'generate'),
Timer('solr', pysolr.Solr, 'add', 'delete', 'search', 'commit'),
]
def before_logging(self, stat_record):
if c.app and c.app.config:
stat_record.add('request_category', c.app.config.tool_name)
return stat_record
|
Python
| 0.00001 |
@@ -7568,16 +7568,24 @@
ool_name
+.lower()
)%0A
|
7f4642fc2e0edba668482f2ebbb64ab8870e709a
|
Initialize P01_basics
|
books/AutomateTheBoringStuffWithPython/Chapter01/P01_basics.py
|
books/AutomateTheBoringStuffWithPython/Chapter01/P01_basics.py
|
Python
| 0.000002 |
@@ -0,0 +1,1466 @@
+# This program performs basic Python instructions%0A%0A# Expressions%0Aprint(2 + 2)%0Aprint(2 + 3 * 6)%0Aprint((2 + 3) * 6)%0Aprint(48565878 * 578453)%0Aprint(2 ** 8)%0Aprint(23 / 7)%0Aprint(23 // 7)%0Aprint(23 %25 7)%0Aprint(2 + 2)%0Aprint((5 - 1) * ((7 + 1) / (3 - 1)))%0A%0A# Uncomment to see what happens%0A#print(5 + )%0A#print(42 + 5 + * 2)%0A%0A# The Integer, Floating-Point, and String Data Types%0A#print(%22Hello world!) # Uncomment to see what happens%0Aprint(%22Alice%22 + %22Bob%22)%0A#print(%22Alice%22 + 42) # Uncomment to see what happens%0Aprint(%22Alice%22 * 5)%0A%0A# Uncomment to see what happens%0A#print(%22Alice%22 * %22Bob%22)%0A#print(%22Alice%22 * 5.0)%0A%0A# Storing Values in Variables%0Aspam = 40%0Aprint(spam)%0Aeggs = 2%0Aprint(spam + eggs)%0Aprint(spam + eggs + spam)%0Aspam = spam + 2%0Aprint(spam)%0A%0Aspam = %22Hello%22%0Aprint(spam)%0Aspam = %22Goodbye%22%0Aprint(spam)%0A%0A# The len() Function%0Aprint(len(%22hello%22))%0Aprint(len(%22My very energetic monster just scarfed nachos.%22))%0Aprint(len(''))%0A%0A#print(%22I am%22 + 29 + %22 years old.%22) # Uncomment to see what happens%0A%0A# The str(), int(), and float() Functions%0Aprint(str(29))%0Aprint(%22I am %22 + str(29) + %22 years old.%22)%0A%0Aprint(str(0))%0Aprint(str(-3.14))%0Aprint(int(%2242%22))%0Aprint(int(%22-99%22))%0Aprint(int(1.25))%0Aprint(int(1.99))%0Aprint(float(%223.14%22))%0Aprint(float(10))%0A%0Aspam = input(%22Type 101 here: %22) # Type 101 when prompted%0Aprint(spam)%0A%0Aspam = int(spam)%0Aprint(spam)%0Aprint(spam * 10 / 5)%0A%0A# Uncomment to see what happens%0A#print(int(%2299.99%22))%0A#print(int(%22twelve%22))%0A%0Aprint(int(7.7))%0Aprint(int(7.7) + 1)%0A
|
|
ea6d73ac2b9274eae0a866acd1e729854c59fb17
|
Add update.py to drive the update loop.
|
kettle/update.py
|
kettle/update.py
|
Python
| 0 |
@@ -0,0 +1,2065 @@
+#!/usr/bin/env python%0A%0A# Copyright 2017 The Kubernetes Authors.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0A%0Aimport os%0Aimport time%0A%0A%0Adef modified_today(fname):%0A now = time.time()%0A try:%0A return os.stat(fname).st_mtime %3E (now - now %25 (24 * 60 * 60))%0A except OSError:%0A return False%0A%0A%0Adef call(cmd):%0A print '+', cmd%0A status = os.system(cmd)%0A if status:%0A raise Exception('invocation failed')%0A%0A%0Adef main():%0A call('time python make_db.py --buckets ../buckets.yaml --junit --threads 128')%0A%0A bq_cmd = 'bq load --source_format=NEWLINE_DELIMITED_JSON --max_bad_records=1000'%0A mj_cmd = 'pypy make_json.py'%0A%0A mj_ext = ''%0A bq_ext = ''%0A if not modified_today('build_day.json.gz'):%0A # cycle daily/weekly tables%0A bq_ext = ' --replace'%0A mj_ext = ' --reset-emitted'%0A%0A call(mj_cmd + mj_ext + ' --days 1 %7C pv %7C gzip %3E build_day.json.gz')%0A call(bq_cmd + bq_ext + ' k8s-gubernator:build.day build_day.json.gz schema.json')%0A%0A call(mj_cmd + mj_ext + ' --days 7 %7C pv %7C gzip %3E build_week.json.gz')%0A call(bq_cmd + bq_ext + ' k8s-gubernator:build.week build_week.json.gz schema.json')%0A%0A call(mj_cmd + ' %7C pv %7C gzip %3E build_all.json.gz')%0A call(bq_cmd + ' k8s-gubernator:build.all build_all.json.gz schema.json')%0A%0A call('python stream.py --poll kubernetes-jenkins/gcs-changes/kettle '%0A ' --dataset k8s-gubernator:build --tables all:0 day:1 week:7 --stop_at=1')%0A%0A%0Aif __name__ == '__main__':%0A os.chdir(os.path.dirname(__file__))%0A os.environ%5B'TZ'%5D = 'America/Los_Angeles'%0A main()%0A
|
|
5114f177741b105f33819b98415702e53b52eb01
|
Add script to update site setup which is used at places like password reset email [skip ci]
|
corehq/apps/hqadmin/management/commands/update_site_setup.py
|
corehq/apps/hqadmin/management/commands/update_site_setup.py
|
Python
| 0 |
@@ -0,0 +1,2117 @@
+from django.core.management.base import BaseCommand, CommandError%0Afrom django.contrib.sites.models import Site%0Afrom django.conf import settings%0A%0A%0Aclass Command(BaseCommand):%0A def add_arguments(self, parser):%0A parser.add_argument(%0A 'site_address',%0A help=%22the new site address that should be used. This would get set in the site objects name %22%0A %22and domain.%22%0A )%0A parser.add_argument(%0A '--skip-checks',%0A action='store_true',%0A default=False,%0A help=%22If you are sure of what you are doing and want to skip checks to ensure safe update.%22%0A )%0A%0A def handle(self, site_address, *args, **options):%0A if not options%5B'skip_checks'%5D:%0A if settings.SITE_ID != 1:%0A raise CommandError(%22SITE ID under settings expected to have value 1 since only one object is expected%22)%0A sites_count = Site.objects.count()%0A if sites_count != 1:%0A raise CommandError(%22Expected to have only one object added by Site during setup but currently its %25s %22%25%0A Site.objects.count())%0A site_object = Site.objects.first()%0A if site_object.name != %22example.com%22 and site_object.domain != %22example.com%22:%0A raise CommandError(%0A %22%22%22%0A Expected the present site object to have dummy example values.%0A They were probably modified and needs to be rechecked.%0A Current Values, name -%3E %7Bname%7D, domain -%3E %7Bdomain%7D%0A %22%22%22.format(name=site_object.name, domain=site_object.domain%0A ))%0A%0A site_object = Site.objects.first()%0A site_object.name = site_address%0A site_object.domain = site_address%0A site_object.save()%0A%0A Site.objects.clear_cache()%0A%0A site_object = Site.objects.first()%0A print('Updated!')%0A print('Site object now is name -%3E %7Bname%7D, domain -%3E %7Bdomain%7D'.format(%0A name=site_object.name,%0A domain=site_object.domain%0A ))%0A%0A%0A
|
|
49d0dd94c3925c3721d059ad3ee2db51d176248c
|
Put 80col suppression check in the right place
|
hooks/pre_commit_checks.py
|
hooks/pre_commit_checks.py
|
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
import time
def _FormatError(msg, files):
return ('%s in these files:\n' % msg +
'\n'.join([' ' + x for x in files])
)
def _ReportErrorFileAndLine(filename, line_num, dummy_line):
"""Default error formatter for _FindNewViolationsOfRule."""
return '%s:%s' % (filename, line_num)
def _FindNewViolationsOfRule(callable_rule, input_api,
error_formatter=_ReportErrorFileAndLine):
"""Find all newly introduced violations of a per-line rule (a callable).
Arguments:
callable_rule: a callable taking a file extension and line of input and
returning True if the rule is satisfied and False if there was a problem.
input_api: object to enumerate the affected files.
source_file_filter: a filter to be passed to the input api.
error_formatter: a callable taking (filename, line_number, line) and
returning a formatted error string.
Returns:
A list of the newly-introduced violations reported by the rule.
"""
errors = []
for f in input_api.AffectedFiles(include_deletes=False):
# For speed, we do two passes, checking first the full file. Shelling out
# to the SCM to determine the changed region can be quite expensive on
# Win32. Assuming that most files will be kept problem-free, we can
# skip the SCM operations most of the time.
extension = str(f.filename).rsplit('.', 1)[-1]
if all(callable_rule(extension, line) for line in f.contents_as_lines):
continue # No violation found in full text: can skip considering diff.
for line_num, line in f.changed_lines:
if not callable_rule(extension, line):
errors.append(error_formatter(f.filename, line_num, line))
return errors
def CheckCopyright(input_api):
sources = input_api.AffectedFiles(include_deletes=False)
project_name = 'Chromium'
current_year = int(time.strftime('%Y'))
allow_old_years=False
if allow_old_years:
allowed_years = (str(s) for s in reversed(xrange(2006, current_year + 1)))
else:
allowed_years = [str(current_year)]
years_re = '(' + '|'.join(allowed_years) + ')'
# The (c) is deprecated, but tolerate it until it's removed from all files.
license_header = (
r'.*? Copyright (\(c\) )?%(year)s The %(project)s Authors\. '
r'All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.(?: \*/)?\n'
) % {
'year': years_re,
'project': project_name,
}
license_re = re.compile(license_header, re.MULTILINE)
bad_files = []
for f in sources:
contents = f.contents
if not license_re.search(contents):
bad_files.append(f.filename)
if bad_files:
return [_FormatError(
'License must match:\n%s\n' % license_re.pattern +
'Found a bad license header',
bad_files)]
return []
def CheckLongLines(input_api, maxlen=80):
"""Checks that there aren't any lines longer than maxlen characters in any of
the text files to be submitted.
"""
maxlens = {
'': maxlen,
}
# Language specific exceptions to max line length.
# '.h' is considered an obj-c file extension, since OBJC_EXCEPTIONS are a
# superset of CPP_EXCEPTIONS.
CPP_FILE_EXTS = ('c', 'cc')
CPP_EXCEPTIONS = ('#define', '#endif', '#if', '#include', '#pragma')
JAVA_FILE_EXTS = ('java',)
JAVA_EXCEPTIONS = ('import ', 'package ')
OBJC_FILE_EXTS = ('h', 'm', 'mm')
OBJC_EXCEPTIONS = ('#define', '#endif', '#if', '#import', '#include',
'#pragma')
LANGUAGE_EXCEPTIONS = [
(CPP_FILE_EXTS, CPP_EXCEPTIONS),
(JAVA_FILE_EXTS, JAVA_EXCEPTIONS),
(OBJC_FILE_EXTS, OBJC_EXCEPTIONS),
]
def no_long_lines(file_extension, line):
# Check for language specific exceptions.
if any(file_extension in exts and line.startswith(exceptions)
for exts, exceptions in LANGUAGE_EXCEPTIONS):
return True
file_maxlen = maxlens.get(file_extension, maxlens[''])
# Stupidly long symbols that needs to be worked around if takes 66% of line.
long_symbol = file_maxlen * 2 / 3
# Hard line length limit at 50% more.
extra_maxlen = file_maxlen * 3 / 2
line_len = len(line)
if line_len <= file_maxlen:
return True
if line_len > extra_maxlen:
return False
if any((url in line) for url in ('file://', 'http://', 'https://')):
return True
if 'url(' in line and file_extension == 'css':
return True
if '<include' in line and file_extension in ('css', 'html', 'js'):
return True
if '@suppress longLineCheck' in line:
return True
return re.match(
r'.*[A-Za-z][A-Za-z_0-9]{%d,}.*' % long_symbol, line)
def format_error(filename, line_num, line):
return '%s, line %s, %s chars' % (filename, line_num, len(line))
errors = _FindNewViolationsOfRule(no_long_lines, input_api,
error_formatter=format_error)
if errors:
return [_FormatError(
'Found lines longer than %s characters' % maxlen,
errors)]
else:
return []
def RunChecks(input_api):
results = []
results += CheckCopyright(input_api)
results += CheckLongLines(input_api)
return results
|
Python
| 0 |
@@ -4462,24 +4462,85 @@
eturn True%0A%0A
+ if '@suppress longLineCheck' in line:%0A return True%0A%0A
if line_
@@ -4835,69 +4835,8 @@
ue%0A%0A
- if '@suppress longLineCheck' in line:%0A return True%0A%0A
@@ -5432,9 +5432,8 @@
results%0A
-%0A
|
6fd0cee9bca0449aa6aab6a62e470ba8ff909cbb
|
print all caesar rotations for some string
|
language/rotN.py
|
language/rotN.py
|
Python
| 0.031516 |
@@ -0,0 +1,503 @@
+#! /usr/bin/env python%0Aimport string%0A%0Aciphered = %22LVFU XAN YIJ UVXRB RKOYOFB%22%0A%0Adef make_rot_n(n):%0A # http://stackoverflow.com/questions/3269686/short-rot13-function%0A lc = string.ascii_lowercase%0A uc = string.ascii_uppercase%0A trans = string.maketrans(lc + uc,%0A lc%5Bn:%5D + lc%5B:n%5D + uc%5Bn:%5D + uc%5B:n%5D)%0A return lambda s: string.translate(s, trans)%0A%0A%0Afor i in range(26):%0A rotator = make_rot_n(i)%0A deciphered = rotator(ciphered)%0A print str(i) + ' ' + deciphered%0A
|
|
bce076cd383d7349a397a9716feb3f6e84281078
|
Make lasagne.utils.floatX support scalars and avoid copies when possible
|
lasagne/utils.py
|
lasagne/utils.py
|
import numpy as np
import theano
import theano.tensor as T
def floatX(arr):
"""Converts numpy array to one with the correct dtype.
Parameters
----------
arr : numpy array
The array to be converted.
Returns
-------
numpy array
The input array in the ``floatX`` dtype configured for Theano.
"""
return arr.astype(theano.config.floatX)
def shared_empty(dim=2, dtype=None):
"""Creates empty Theano shared variable.
Shortcut to create an empty Theano shared variable with
the specified number of dimensions.
Parameters
----------
dim : int, optional
The number of dimensions for the empty variable, defaults to 2.
dtype : a numpy data-type, optional
The desired dtype for the variable. Defaults to the Theano
``floatX`` dtype.
Returns
-------
Theano shared variable
An empty Theano shared variable of dtype ``dtype`` with
`dim` dimensions.
"""
if dtype is None:
dtype = theano.config.floatX
shp = tuple([1] * dim)
return theano.shared(np.zeros(shp, dtype=dtype))
def as_theano_expression(input):
"""Wrap as Theano expression.
Wraps the given input as a Theano constant if it is not
a valid Theano expression already. Useful to transparently
handle numpy arrays and Python scalars, for example.
Parameters
----------
input : number, numpy array or Theano expression
Expression to be converted to a Theano constant.
Returns
-------
Theano symbolic constant
Theano constant version of `input`.
"""
if isinstance(input, theano.gof.Variable):
return input
else:
try:
return theano.tensor.constant(input)
except Exception as e:
raise TypeError("Input of type %s is not a Theano expression and "
"cannot be wrapped as a Theano constant (original "
"exception: %s)" % (type(input), e))
def one_hot(x, m=None):
"""One-hot representation of integer vector.
Given a vector of integers from 0 to m-1, returns a matrix
with a one-hot representation, where each row corresponds
to an element of x.
Parameters
----------
x : integer vector
The integer vector to convert to a one-hot representation.
m : int, optional
The number of different columns for the one-hot representation. This
needs to be strictly greater than the maximum value of `x`.
Defaults to ``max(x) + 1``.
Returns
-------
Theano tensor variable
A Theano tensor variable of shape (``n``, `m`), where ``n`` is the
length of `x`, with the one-hot representation of `x`.
"""
if m is None:
m = T.cast(T.max(x) + 1, 'int32')
return T.eye(m)[T.cast(x, 'int32')]
def unique(l):
"""Filters duplicates of iterable.
Create a new list from l with duplicate entries removed,
while preserving the original order.
Parameters
----------
l : iterable
Input iterable to filter of duplicates.
Returns
-------
list
A list of elements of `l` without duplicates and in the same order.
"""
new_list = []
for el in l:
if el not in new_list:
new_list.append(el)
return new_list
def as_tuple(x, N):
"""
Coerce a value to a tuple of length N.
Parameters:
-----------
x : value or iterable
N : integer
length of the desired tuple
Returns:
--------
tuple
``tuple(x)`` if `x` is iterable, ``(x,) * N`` otherwise.
"""
try:
X = tuple(x)
except TypeError:
X = (x,) * N
if len(X) != N:
raise ValueError("input must be a single value "
"or an iterable with length {0}".format(N))
return X
def compute_norms(array, norm_axes=None):
""" Compute incoming weight vector norms.
Parameters
----------
array : ndarray
Weight array.
norm_axes : sequence (list or tuple)
The axes over which to compute the norm. This overrides the
default norm axes defined for the number of dimensions
in `array`. When this is not specified and `array` is a 2D array,
this is set to `(0,)`. If `array` is a 3D, 4D or 5D array, it is
set to a tuple listing all axes but axis 0. The former default is
useful for working with dense layers, the latter is useful for 1D,
2D and 3D convolutional layers.
(Optional)
Returns
-------
norms : 1D array
1D array of incoming weight vector norms.
Examples
--------
>>> array = np.random.randn(100, 200)
>>> norms = compute_norms(array)
>>> norms.shape
(200,)
>>> norms = compute_norms(array, norm_axes=(1,))
>>> norms.shape
(100,)
"""
ndim = array.ndim
if norm_axes is not None:
sum_over = tuple(norm_axes)
elif ndim == 2: # DenseLayer
sum_over = (0,)
elif ndim in [3, 4, 5]: # Conv{1,2,3}DLayer
sum_over = tuple(range(1, ndim))
else:
raise ValueError(
"Unsupported tensor dimensionality {}."
"Must specify `norm_axes`".format(array.ndim)
)
norms = np.sqrt(np.sum(array**2, axis=sum_over))
return norms
|
Python
| 0 |
@@ -88,16 +88,26 @@
onverts
+data to a
numpy ar
@@ -114,37 +114,41 @@
ray
-to one with the correct dtype
+of dtype %60%60theano.config.floatX%60%60
.%0A%0A
@@ -186,27 +186,26 @@
arr :
-numpy
array
+_like
%0A
@@ -209,21 +209,20 @@
The
-array
+data
to be c
@@ -262,24 +262,26 @@
-%0A numpy
+nd
array%0A
@@ -353,34 +353,116 @@
-%22%22%22%0A return arr.as
+ If %60arr%60 is an ndarray of correct dtype, it is returned as is.%0A %22%22%22%0A return np.asarray(arr, d
type
-(
+=
thea
|
b7c3bd6c6ab5bc606b17d7bbbc3038d4b347c425
|
send down indicator names in english and hindi
|
custom/bihar/reports/indicators/fixtures.py
|
custom/bihar/reports/indicators/fixtures.py
|
from xml.etree import ElementTree
from corehq.apps.groups.models import Group
from corehq.apps.users.models import CommCareUser
from custom.bihar.reports.indicators.indicators import IndicatorDataProvider, IndicatorConfig, INDICATOR_SETS
# meh
hard_coded_domains = ('care-bihar', 'bihar')
hard_coded_indicators = 'homevisit'
hard_coded_group_filter = lambda group: bool(group.metadata.get('awc-code', False))
hard_coded_fixture_id = 'indicators:bihar-supervisor'
def generator(user, *args, **kwargs):
# todo: this appears in the beginning of all fixture generators. should fix
if isinstance(user, CommCareUser):
pass
elif hasattr(user, "_hq_user") and user._hq_user is not None:
user = user._hq_user
else:
return []
if user.domain in hard_coded_domains:
groups = filter(hard_coded_group_filter, Group.by_user(user))
if len(groups) == 1:
data_provider = IndicatorDataProvider(
domain=user.domain,
indicator_set=IndicatorConfig(INDICATOR_SETS).get_indicator_set(hard_coded_indicators),
groups=groups,
)
fixture_provider = IndicatorFixtureProvider(
hard_coded_fixture_id, user, data_provider
)
return [fixture_provider.to_fixture()]
return []
class IndicatorFixtureProvider(object):
def __init__(self, id, user, data_provider):
self.id = id
self.user = user
self.data_provider = data_provider
def to_fixture(self):
"""
Generate a fixture representation of the indicator set. Something like the following:
<fixture id="indicators:bihar-supervisor" user_id="3ce8b1611c38e956d3b3b84dd3a7ac18">
<group id="1012aef098ab0c0" team="Samda Team 1">
<indicators>
<indicator id="bp">
<name>BP Visits last 30 days</name>
<done>25</done>
<due>22</due>
<clients>
<client id="a1029b09c090s9d173" status="done"></client>
<client id="bad7a1029b09c090s9" status="due"></client>
</clients>
</indicator>
</indicators>
</group>
</fixture>
"""
def _el(tag, text):
el = ElementTree.Element(tag)
el.text = unicode(text)
return el
def _indicator_to_fixture(indicator):
ind_el = ElementTree.Element('indicator',
attrib={
'id': indicator.slug,
},
)
done, due = self.data_provider.get_indicator_data(indicator)
ind_el.append(_el('name', indicator.name))
ind_el.append(_el('done', done))
ind_el.append(_el('due', due))
clients = ElementTree.Element('clients')
for case_id, data in self.data_provider.get_case_data(indicator).items():
client = ElementTree.Element('client',
attrib={
'id': case_id,
'status': 'done' if data['num'] else 'due',
}
)
clients.append(client)
ind_el.append(clients)
return ind_el
root = ElementTree.Element('fixture',
attrib={'id': self.id, 'user_id': self.user._id},
)
group = ElementTree.Element('group',
attrib={
'id': self.data_provider.groups[0]._id,
'team': self.data_provider.groups[0].name
},
)
root.append(group)
indicators = ElementTree.Element('indicators')
for indicator in self.data_provider.summary_indicators:
indicators.append(_indicator_to_fixture(indicator))
group.append(indicators)
return root
def to_string(self):
return ElementTree.tostring(self.to_fixture(), encoding="utf-8")
|
Python
| 0 |
@@ -27,16 +27,104 @@
entTree%0A
+from django.utils import translation%0Afrom django.utils.translation import ugettext as _%0A
from cor
@@ -2466,19 +2466,66 @@
ag, text
+, attrib=None
):%0A
+ attrib = attrib or %7B%7D%0A
@@ -2556,16 +2556,31 @@
ment(tag
+, attrib=attrib
)%0A
@@ -2957,16 +2957,121 @@
tor.name
+, attrib=%7B'lang': 'en'%7D))%0A ind_el.append(_el('name', _(indicator.name), attrib=%7B'lang': 'hin'%7D
))%0A
@@ -3621,24 +3621,196 @@
urn ind_el%0A%0A
+ # switch to hindi so we can use our builtin translations%0A current_language = translation.get_language()%0A translation.activate('hin')%0A try:%0A
root
@@ -3839,24 +3839,28 @@
('fixture',%0A
+
@@ -3909,34 +3909,42 @@
r._id%7D,%0A
+
-)%0A
+ )%0A
group =
@@ -3976,32 +3976,36 @@
p',%0A
+
attrib=%7B%0A
@@ -4009,24 +4009,28 @@
+
+
'id': self.d
@@ -4053,24 +4053,28 @@
ups%5B0%5D._id,%0A
+
@@ -4123,32 +4123,36 @@
ame%0A
+
%7D,%0A )%0A
@@ -4138,34 +4138,42 @@
%7D,%0A
+
-)%0A
+ )%0A
root.app
@@ -4183,16 +4183,20 @@
(group)%0A
+
@@ -4246,24 +4246,28 @@
s')%0A
+
+
for indicato
@@ -4314,32 +4314,36 @@
rs:%0A
+
indicators.appen
@@ -4382,24 +4382,28 @@
r))%0A
+
+
group.append
@@ -4415,16 +4415,20 @@
cators)%0A
+
@@ -4438,16 +4438,192 @@
urn root
+%0A finally:%0A # i don't think this level of paranoia is actually necessary%0A # but it doesn't hurt.%0A translation.activate(current_language)
%0A%0A de
|
d3dbb797575221d574fdda9c3d087d8696f6091a
|
Add netstring lib
|
lib/netstring.py
|
lib/netstring.py
|
Python
| 0.000002 |
@@ -0,0 +1,943 @@
+def encode_netstring(s):%0A return str(len(s)).encode('ascii') + b':' + s + b','%0A%0Adef consume_netstring(s):%0A %22%22%22If s is a bytestring beginning with a netstring, returns (value, rest)%0A where value is the contents of the netstring, and rest is the part of s%0A after the netstring.%0A %0A Raises ValueError if s does not begin with a netstring.%0A %0A %22%22%22%0A (length, sep, rest) = s.partition(b':')%0A if sep != b':':%0A raise ValueError(%22No colon found in s%22)%0A if not length.isdigit():%0A raise ValueError(%22Length is not numeric%22)%0A length = int(length)%0A if len(rest) %3C= length:%0A raise ValueError(%22String not long enough%22)%0A if rest%5Blength%5D != 0x2c:%0A raise ValueError(%22String not terminated with comma%22)%0A return (rest%5B:length%5D, rest%5Blength+1:%5D)%0A%0Adef is_netstring(s):%0A try:%0A (val, rest) = consume_netstring(s)%0A return len(rest) == 0%0A except ValueError:%0A return False%0A
|
|
9f5c3715f4b3cd5bf451bdc504cded6459e8ee79
|
add one test file and add content to it
|
test/unit_test/test_similarity2.py
|
test/unit_test/test_similarity2.py
|
Python
| 0 |
@@ -0,0 +1,509 @@
+from lexos.helpers.error_messages import MATRIX_DIMENSION_UNEQUAL_MESSAGE%0A%0Acount_matrix = %5B%5B'', 'The', 'all', 'bobcat', 'cat', 'caterpillar',%0A 'day.', 'slept'%5D,%0A %5B'catBobcat', 9.0, 9.0, 5.0, 4.0, 0.0, 9.0, 9.0%5D,%0A %5B'catCaterpillar', 9.0, 9.0, 0.0, 4.0, 5.0, 9.0, 9.0%5D,%0A %5B'test', 9.0, 9.0, 5.0, 4.0, 0.0, 9.0, 9.0%5D%5D%0Aassert all(len(line) == len(count_matrix%5B1%5D)%0A for line in count_matrix%5B1:%5D), MATRIX_DIMENSION_UNEQUAL_MESSAGE%0A%0Aprint(%22pass%22)%0A
|
|
44863ff1f7064f1d9a9bb897822834eb6755ed59
|
Add SMTP auth server
|
authserver.py
|
authserver.py
|
Python
| 0.000001 |
@@ -0,0 +1,529 @@
+import bcrypt%0Aimport asyncore%0Afrom secure_smtpd import SMTPServer, FakeCredentialValidator%0Afrom srht.objects import User%0A%0Aclass UserValidator(object):%0A def validate(self, username, password):%0A user = User.query.filter(User.username == username).first()%0A if not user:%0A return False%0A return bcrypt.checkpw(password, user.password)%0A%0A%0ASMTPServer(%0A ('0.0.0.0', 4650),%0A None,%0A require_authentication=True,%0A ssl=False,%0A credential_validator=FakeCredentialValidator(),%0A)%0Aasyncore.loop()%0A
|
|
f56e390be0e2cea8e08080029aad756a6ab3c91f
|
Add lc0253_meeting_rooms_ii.py from Copenhagen :)
|
lc0253_meeting_rooms_ii.py
|
lc0253_meeting_rooms_ii.py
|
Python
| 0 |
@@ -0,0 +1,475 @@
+%22%22%22Leetcode 253. Meeting Rooms II (Premium)%0AMedium%0A%0AURL: https://leetcode.com/problems/meeting-rooms-ii%0A%0AGiven an array of meeting time intervals consisting of start and end times%0A%5B%5Bs1,e1%5D,%5Bs2,e2%5D,...%5D (si %3C ei),%0Afind the minimum number of conference rooms required.%0A%22%22%22%0A%0Aclass Solution2(object):%0A # @param %7BInterval%5B%5D%7D intervals%0A # @return %7Binteger%7D%0A def minMeetingRooms(self, intervals):%0A pass%0A%0A%0Adef main():%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
8b4c34e84d306b5f9021de47bc3ae9050e2fc2b3
|
Fix loading of ply files exported by meshlab
|
compare_clouds.py
|
compare_clouds.py
|
Python
| 0 |
@@ -0,0 +1,429 @@
+#!/usr/bin/env python3%0A%0Afrom pathlib import Path%0A%0A%22%22%22Code for comparing point clouds%22%22%22%0A%0Acloud1Path = Path(%22./data/reconstructions/2016_10_24__17_43_17/reference.ply%22)%0Acloud2Path = Path(%22./data/reconstructions/2016_10_24__17_43_17/high_quality.ply%22)%0A%0Afrom load_ply import load_ply%0A%0Acloud1PointData = load_ply(cloud1Path)%5B0%5D%5B:,:3%5D.copy()%0Acloud2PointData = load_ply(cloud2Path)%5B0%5D%5B:,:3%5D.copy()%0A%0A#if __name__=='__main__':%0A #pass%0A
|
|
4f70773bb9041c44b0f83ef61a46d5fa974b366e
|
Create conwaytesting.py
|
conwaytesting.py
|
conwaytesting.py
|
Python
| 0 |
@@ -0,0 +1 @@
+%0A
|
|
0bf7d9fb20a3d2588ffc0e8341ec2af3df5fe300
|
Add test for depot index page
|
depot/tests/test_depot_index.py
|
depot/tests/test_depot_index.py
|
Python
| 0 |
@@ -0,0 +1,1962 @@
+from django.test import TestCase, Client%0Afrom depot.models import Depot%0A%0A%0Adef create_depot(name, state):%0A return Depot.objects.create(name=name, active=state)%0A%0A%0Aclass DepotIndexTestCase(TestCase):%0A%0A def test_depot_index_template(self):%0A response = self.client.get('/depots/')%0A self.assertTemplateUsed(%0A response,%0A template_name='depot/index.html'%0A )%0A%0A def test_depot_index_with_no_depots(self):%0A response = self.client.get('/depots/')%0A self.assertEqual(response.status_code, 200)%0A self.assertQuerysetEqual(response.context%5B'depot_list'%5D, %5B%5D)%0A self.assertContains(response, 'No depots available :(')%0A%0A def test_depot_index_with_active_depot(self):%0A depot = create_depot('active depot', True)%0A response = self.client.get('/depots/')%0A self.assertEqual(response.status_code, 200)%0A self.assertQuerysetEqual(%0A response.context%5B'depot_list'%5D, %5B'%3CDepot: Depot active depot%3E'%5D%0A )%0A self.assertContains(response, depot.name)%0A%0A def test_depot_index_with_archived_depot(self):%0A depot = create_depot('archived depot', False)%0A response = self.client.get('/depots/')%0A self.assertEqual(response.status_code, 200)%0A self.assertQuerysetEqual(response.context%5B'depot_list'%5D, %5B%5D)%0A self.assertContains(response, 'No depots available')%0A self.assertNotContains(response, depot.name)%0A%0A def test_depot_index_with_active_and_archived_depot(self):%0A active_depot = create_depot('active depot', True)%0A archived_depot = create_depot('archived depot', False)%0A response = self.client.get('/depots/')%0A self.assertEqual(response.status_code, 200)%0A self.assertQuerysetEqual(%0A response.context%5B'depot_list'%5D, %5B'%3CDepot: Depot active depot%3E'%5D%0A )%0A self.assertContains(response, active_depot.name)%0A self.assertNotContains(response, archived_depot.name)%0A
|
|
f77f9775277a100c7809698c75cb0855b07b884d
|
Fix accidentally added import
|
git/test/test_util.py
|
git/test/test_util.py
|
# test_utils.py
# Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import tempfile
from git.test.lib import (
TestBase,
assert_equal
)
from git.util import (
LockFile,
BlockingLockFile,
get_user_id,
Actor,
IterableList
)
from git.objects.util import (
altz_to_utctz_str,
utctz_to_altz,
verify_utctz,
parse_date,
tzoffset,
)
from git.cmd import dashify
from git.compat import string_types
import time
class TestIterableMember(object):
"""A member of an iterable list"""
__slots__ = ("name", "prefix_name")
def __init__(self, name):
self.name = name
self.prefix_name = name
class TestUtils(TestBase):
def setup(self):
self.testdict = {
"string": "42",
"int": 42,
"array": [42],
}
def test_it_should_dashify(self):
assert_equal('this-is-my-argument', dashify('this_is_my_argument'))
assert_equal('foo', dashify('foo'))
def test_lock_file(self):
my_file = tempfile.mktemp()
lock_file = LockFile(my_file)
assert not lock_file._has_lock()
# release lock we don't have - fine
lock_file._release_lock()
# get lock
lock_file._obtain_lock_or_raise()
assert lock_file._has_lock()
# concurrent access
other_lock_file = LockFile(my_file)
assert not other_lock_file._has_lock()
self.failUnlessRaises(IOError, other_lock_file._obtain_lock_or_raise)
lock_file._release_lock()
assert not lock_file._has_lock()
other_lock_file._obtain_lock_or_raise()
self.failUnlessRaises(IOError, lock_file._obtain_lock_or_raise)
# auto-release on destruction
del(other_lock_file)
lock_file._obtain_lock_or_raise()
lock_file._release_lock()
def test_blocking_lock_file(self):
my_file = tempfile.mktemp()
lock_file = BlockingLockFile(my_file)
lock_file._obtain_lock()
# next one waits for the lock
start = time.time()
wait_time = 0.1
wait_lock = BlockingLockFile(my_file, 0.05, wait_time)
self.failUnlessRaises(IOError, wait_lock._obtain_lock)
elapsed = time.time() - start
assert elapsed <= wait_time + 0.02 # some extra time it may cost
def test_user_id(self):
assert '@' in get_user_id()
def test_parse_date(self):
# test all supported formats
def assert_rval(rval, veri_time, offset=0):
assert len(rval) == 2
assert isinstance(rval[0], int) and isinstance(rval[1], int)
assert rval[0] == veri_time
assert rval[1] == offset
# now that we are here, test our conversion functions as well
utctz = altz_to_utctz_str(offset)
assert isinstance(utctz, string_types)
assert utctz_to_altz(verify_utctz(utctz)) == offset
# END assert rval utility
rfc = ("Thu, 07 Apr 2005 22:13:11 +0000", 0)
iso = ("2005-04-07T22:13:11 -0200", 7200)
iso2 = ("2005-04-07 22:13:11 +0400", -14400)
iso3 = ("2005.04.07 22:13:11 -0000", 0)
alt = ("04/07/2005 22:13:11", 0)
alt2 = ("07.04.2005 22:13:11", 0)
veri_time_utc = 1112911991 # the time this represents, in time since epoch, UTC
for date, offset in (rfc, iso, iso2, iso3, alt, alt2):
assert_rval(parse_date(date), veri_time_utc, offset)
# END for each date type
# and failure
self.failUnlessRaises(ValueError, parse_date, 'invalid format')
self.failUnlessRaises(ValueError, parse_date, '123456789 -02000')
self.failUnlessRaises(ValueError, parse_date, ' 123456789 -0200')
def test_actor(self):
for cr in (None, self.rorepo.config_reader()):
assert isinstance(Actor.committer(cr), Actor)
assert isinstance(Actor.author(cr), Actor)
# END assure config reader is handled
def test_iterable_list(self):
for args in (('name',), ('name', 'prefix_')):
l = IterableList('name')
m1 = TestIterableMember('one')
m2 = TestIterableMember('two')
l.extend((m1, m2))
assert len(l) == 2
# contains works with name and identity
assert m1.name in l
assert m2.name in l
assert m2 in l
assert m2 in l
assert 'invalid' not in l
# with string index
assert l[m1.name] is m1
assert l[m2.name] is m2
# with int index
assert l[0] is m1
assert l[1] is m2
# with getattr
assert l.one is m1
assert l.two is m2
# test exceptions
self.failUnlessRaises(AttributeError, getattr, l, 'something')
self.failUnlessRaises(IndexError, l.__getitem__, 'something')
# delete by name and index
self.failUnlessRaises(IndexError, l.__delitem__, 'something')
del(l[m2.name])
assert len(l) == 1
assert m2.name not in l and m1.name in l
del(l[0])
assert m1.name not in l
assert len(l) == 0
self.failUnlessRaises(IndexError, l.__delitem__, 0)
self.failUnlessRaises(IndexError, l.__delitem__, 'something')
# END for each possible mode
|
Python
| 0 |
@@ -510,22 +510,8 @@
te,%0A
- tzoffset,%0A
)%0Afr
|
2a903d721c44f9c6b53c8516b28b9dd6c1faa5e0
|
Create crawler_utils.py
|
crawler_utils.py
|
crawler_utils.py
|
Python
| 0.000017 |
@@ -0,0 +1,815 @@
+import json%0Aimport os.path%0A%0A%0Adef comments_to_json(comments):%0A result = %5B%5D%0A for comment in comments:%0A result.append(%7B%22score%22: comment.score,%0A %22url%22: comment.permalink,%0A %22body%22: comment.body,%0A %22id%22: comment.id,%0A %22replies%22: comments_to_json(comment.replies)%7D)%0A%0A return result%0A%0A%0Adef save_submission(submission, storage_dir):%0A with open(os.path.join(storage_dir, submission.id), %22w%22) as f:%0A f.write(json.dumps(%7B%22url%22: submission.permalink,%0A %22text%22: submission.selftext,%0A %22title%22: submission.title,%0A %22score%22: submission.score,%0A %22comments%22: comments_to_json(submission.comments)%7D))%0A f.close()%0A
|
|
d81a1f3ef63aef7f003a018f26ea636cf47cfc5d
|
Add init file for installation
|
jswatchr/__init__.py
|
jswatchr/__init__.py
|
Python
| 0 |
@@ -0,0 +1,23 @@
+from jswatchr import *%0A
|
|
7850371982cc50dc2a5a59c7b01d5a1bec80cf3f
|
Add FairFuzz tool spec
|
benchexec/tools/fairfuzz.py
|
benchexec/tools/fairfuzz.py
|
Python
| 0 |
@@ -0,0 +1,2082 @@
+%22%22%22%0ABenchExec is a framework for reliable benchmarking.%0AThis file is part of BenchExec.%0ACopyright (C) 2007-2015 Dirk Beyer%0AAll rights reserved.%0ALicensed under the Apache License, Version 2.0 (the %22License%22);%0Ayou may not use this file except in compliance with the License.%0AYou may obtain a copy of the License at%0A http://www.apache.org/licenses/LICENSE-2.0%0AUnless required by applicable law or agreed to in writing, software%0Adistributed under the License is distributed on an %22AS IS%22 BASIS,%0AWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0ASee the License for the specific language governing permissions and%0Alimitations under the License.%0A%22%22%22%0Aimport benchexec.result as result%0Aimport benchexec.util as util%0Aimport benchexec.tools.template%0Aimport benchexec.model%0A%0Aclass Tool(benchexec.tools.template.BaseTool):%0A %22%22%22%0A Tool info for FairFuzz (https://https://github.com/carolemieux/afl-rb/tree/testcomp).%0A %22%22%22%0A REQUIRED_PATHS = %5B%0A %22bin%22%0A %5D%0A%0A def executable(self):%0A return util.find_executable('fairfuzz-svtestcomp')%0A%0A%0A def version(self, executable):%0A return %22FairFuzz, built on AFL 2.52b%22%0A%0A%0A def name(self):%0A return 'FairFuzz'%0A%0A%0A def determine_result(self, returncode, returnsignal, output, isTimeout):%0A %22%22%22%0A Parse the output of the tool and extract the verification result.%0A This method always needs to be overridden.%0A If the tool gave a result, this method needs to return one of the%0A benchexec.result.RESULT_* strings.%0A Otherwise an arbitrary string can be returned that will be shown to the user%0A and should give some indication of the failure reason%0A (e.g., %22CRASH%22, %22OUT_OF_MEMORY%22, etc.).%0A %22%22%22%0A for line in output:%0A if %22ERROR: couldn't run FairFuzz%22 in line:%0A return %22Couldn't run FairFuzz%22%0A if %22CRASHES FOUND%22 in line:%0A return result.RESULT_FALSE_REACH%0A if %22DONE RUNNING%22 in line:%0A return result.RESULT_DONE%0A return result.RESULT_UNKNOWN%0A%0A
|
|
37db687b4167aee0e88036c5d85995de891453ed
|
Create cbalusek_01.py
|
Week01/Problem01/cbalusek_01.py
|
Week01/Problem01/cbalusek_01.py
|
Python
| 0.000048 |
@@ -0,0 +1,397 @@
+#This project defines a function that takes any two numbers and sums their multiples to some cutoff value%0A%0Adef sum(val1, val2, test):%0A i = 1%0A j = 1%0A cum = 0%0A while i*val1 %3C test:%0A cum += i*val1%0A i += 1%0A while j*val2 %3C test:%0A if j*val2%25val1 != 0:%0A cum += j*val2%0A j += 1%0A else:%0A j += 1%0A return cum%0A%0Aprint(sum(3,5,1000))%0A
|
|
cc79ee252e09ade17961d03265c61a87e270bd88
|
Make color emoji use character sequences instead of PUA.
|
nototools/map_pua_emoji.py
|
nototools/map_pua_emoji.py
|
Python
| 0 |
@@ -0,0 +1,2256 @@
+#!/usr/bin/python%0A#%0A# Copyright 2014 Google Inc. All rights reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0A%22%22%22Modify an emoji font to map legacy PUA characters to standard ligatures.%22%22%22%0A%0A__author__ = '[email protected] (Roozbeh Pournader)'%0A%0Aimport sys%0A%0Afrom fontTools import ttLib%0A%0Afrom nototools import add_emoji_gsub%0Afrom nototools import font_data%0A%0A%0Adef get_glyph_name_from_gsub(char_seq, font):%0A %22%22%22Find the glyph name for ligature of a given character sequence from GSUB.%0A %22%22%22%0A cmap = font_data.get_cmap(font)%0A # FIXME: So many assumptions are made here.%0A try:%0A first_glyph = cmap%5Bchar_seq%5B0%5D%5D%0A rest_of_glyphs = %5Bcmap%5Bch%5D for ch in char_seq%5B1:%5D%5D%0A except KeyError:%0A return None%0A%0A for lookup in font%5B'GSUB'%5D.table.LookupList.Lookup:%0A ligatures = lookup.SubTable%5B0%5D.ligatures%0A try:%0A for ligature in ligatures%5Bfirst_glyph%5D:%0A if ligature.Component == rest_of_glyphs:%0A return ligature.LigGlyph%0A except KeyError:%0A continue%0A return None%0A%0A%0Adef add_pua_cmap(source_file, target_file):%0A %22%22%22Add PUA characters to the cmap of the first font and save as second.%22%22%22%0A font = ttLib.TTFont(source_file)%0A cmap = font_data.get_cmap(font)%0A for pua, (ch1, ch2) in (add_emoji_gsub.EMOJI_KEYCAPS.items()%0A + add_emoji_gsub.EMOJI_FLAGS.items()):%0A if pua not in cmap:%0A glyph_name = get_glyph_name_from_gsub(%5Bch1, ch2%5D, font)%0A if glyph_name is not None:%0A cmap%5Bpua%5D = glyph_name%0A font.save(target_file)%0A%0A%0Adef main(argv):%0A %22%22%22Save the first font given to the second font.%22%22%22%0A add_pua_cmap(argv%5B1%5D, argv%5B2%5D)%0A%0A%0Aif __name__ == '__main__':%0A main(sys.argv)%0A%0A
|
|
4b0b0361ed8d231844344d014412f7b647baae0b
|
Remove more stray import IPy
|
nova/tests/network/base.py
|
nova/tests/network/base.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base class of Unit Tests for all network models
"""
import IPy
import os
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import ipv6
from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.network')
class NetworkTestCase(test.TestCase):
"""Test cases for network code"""
def setUp(self):
super(NetworkTestCase, self).setUp()
# NOTE(vish): if you change these flags, make sure to change the
# flags in the corresponding section in nova-dhcpbridge
self.flags(connection_type='fake',
fake_call=True,
fake_network=True)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
self.projects = []
self.network = utils.import_object(FLAGS.network_manager)
self.context = context.RequestContext(project=None, user=self.user)
for i in range(FLAGS.num_networks):
name = 'project%s' % i
project = self.manager.create_project(name, 'netuser', name)
self.projects.append(project)
# create the necessary network data for the project
user_context = context.RequestContext(project=self.projects[i],
user=self.user)
host = self.network.get_network_host(user_context.elevated())
instance_ref = self._create_instance(0)
self.instance_id = instance_ref['id']
instance_ref = self._create_instance(1)
self.instance2_id = instance_ref['id']
def tearDown(self):
# TODO(termie): this should really be instantiating clean datastores
# in between runs, one failure kills all the tests
db.instance_destroy(context.get_admin_context(), self.instance_id)
db.instance_destroy(context.get_admin_context(), self.instance2_id)
for project in self.projects:
self.manager.delete_project(project)
self.manager.delete_user(self.user)
super(NetworkTestCase, self).tearDown()
def _create_instance(self, project_num, mac=None):
if not mac:
mac = utils.generate_mac()
project = self.projects[project_num]
self.context._project = project
self.context.project_id = project.id
return db.instance_create(self.context,
{'project_id': project.id,
'mac_address': mac})
def _create_address(self, project_num, instance_id=None):
"""Create an address in given project num"""
if instance_id is None:
instance_id = self.instance_id
self.context._project = self.projects[project_num]
self.context.project_id = self.projects[project_num].id
return self.network.allocate_fixed_ip(self.context, instance_id)
def _deallocate_address(self, project_num, address):
self.context._project = self.projects[project_num]
self.context.project_id = self.projects[project_num].id
self.network.deallocate_fixed_ip(self.context, address)
def _is_allocated_in_project(self, address, project_id):
"""Returns true if address is in specified project"""
project_net = db.network_get_by_bridge(context.get_admin_context(),
FLAGS.flat_network_bridge)
network = db.fixed_ip_get_network(context.get_admin_context(),
address)
instance = db.fixed_ip_get_instance(context.get_admin_context(),
address)
# instance exists until release
return instance is not None and network['id'] == project_net['id']
def test_private_ipv6(self):
"""Make sure ipv6 is OK"""
if FLAGS.use_ipv6:
instance_ref = self._create_instance(0)
address = self._create_address(0, instance_ref['id'])
network_ref = db.project_get_network(
context.get_admin_context(),
self.context.project_id)
address_v6 = db.instance_get_fixed_address_v6(
context.get_admin_context(),
instance_ref['id'])
self.assertEqual(instance_ref['mac_address'],
ipv6.to_mac(address_v6))
instance_ref2 = db.fixed_ip_get_instance_v6(
context.get_admin_context(),
address_v6)
self.assertEqual(instance_ref['id'], instance_ref2['id'])
self.assertEqual(address_v6,
ipv6.to_global(network_ref['cidr_v6'],
instance_ref['mac_address'],
'test'))
self._deallocate_address(0, address)
db.instance_destroy(context.get_admin_context(),
instance_ref['id'])
def test_available_ips(self):
"""Make sure the number of available ips for the network is correct
The number of available IP addresses depends on the test
environment's setup.
Network size is set in test fixture's setUp method.
There are ips reserved at the bottom and top of the range.
services (network, gateway, CloudPipe, broadcast)
"""
network = db.project_get_network(context.get_admin_context(),
self.projects[0].id)
net_size = flags.FLAGS.network_size
admin_context = context.get_admin_context()
total_ips = (db.network_count_available_ips(admin_context,
network['id']) +
db.network_count_reserved_ips(admin_context,
network['id']) +
db.network_count_allocated_ips(admin_context,
network['id']))
self.assertEqual(total_ips, net_size)
|
Python
| 0 |
@@ -836,11 +836,15 @@
ort
-IPy
+netaddr
%0Aimp
|
2a5b7773af3e9516d8a4a3df25c0b829598ebb1c
|
Remove redundant str typecasting
|
nova/tests/uuidsentinel.py
|
nova/tests/uuidsentinel.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
class UUIDSentinels(object):
def __init__(self):
from oslo_utils import uuidutils
self._uuid_module = uuidutils
self._sentinels = {}
def __getattr__(self, name):
if name.startswith('_'):
raise ValueError('Sentinels must not start with _')
if name not in self._sentinels:
self._sentinels[name] = str(self._uuid_module.generate_uuid())
return self._sentinels[name]
sys.modules[__name__] = UUIDSentinels()
|
Python
| 0.000009 |
@@ -924,12 +924,8 @@
%5D =
-str(
self
@@ -953,17 +953,16 @@
e_uuid()
-)
%0A
|
ac673650673f7d6b9785d577499037bf9db4435a
|
refactor prompt abstraction
|
lib/smashlib/prompt.py
|
lib/smashlib/prompt.py
|
Python
| 0.000003 |
@@ -0,0 +1,1124 @@
+%22%22%22 smash.prompt %22%22%22%0Afrom smashlib.data import PROMPT_DEFAULT as DEFAULT%0A%0Aclass Prompt(dict):%0A%0A def __setitem__(self, k, v, update=True):%0A if k in self:%0A raise Exception,'prompt component is already present: ' + str(k)%0A super(Prompt, self).__setitem__(k, v)%0A if update:%0A self.update_prompt()%0A%0A def update_prompt(self):%0A parts = self.values()%0A parts.sort()%0A parts = %5Bpart%5B1%5D for part in parts%5D%0A self.template = ' '.join(parts)%0A%0A def _get_template(self):%0A %22%22%22 get the current prompt template %22%22%22%0A opc = getattr(__IPYTHON__.shell, 'outputcache', None)%0A if opc:%0A return opc.prompt1.p_template%0A else:%0A return 'error-getting-output-prompt'%0A def _set_template(self, t):%0A %22%22%22 set the current prompt template %22%22%22%0A opc = getattr(__IPYTHON__.shell, 'outputcache', None)%0A if opc:%0A opc.prompt1.p_template = t%0A template = property(_get_template, _set_template)%0A%0Aprompt = Prompt()%0Aprompt.__setitem__('working_dir', %5B100, DEFAULT%5D, update=False)%0Aprompt.template = DEFAULT%0A
|
|
2c178c5ea05d2454ef6896aaf9c58b6536f5a15f
|
Create bubblesort.py
|
bubblesort.py
|
bubblesort.py
|
Python
| 0.000003 |
@@ -0,0 +1,451 @@
+def bubblesort(lst):%0A #from last index to second%0A for passes in range(len(lst) - 1, 0, -1):%0A #from %5B0,passes%5B keep swapping to put the largest%0A #number at index passes%0A for i in range(passes):%0A if lst%5Bi%5D %3E lst%5Bi+1%5D:%0A swap(lst, i, i+1)%0A return lst%0A %0Adef swap(lst, i, j):%0A temp = lst%5Bi%5D%0A lst%5Bi%5D = lst%5Bj%5D%0A lst%5Bj%5D = temp%0A %0Aprint %22%7B0%7D%22.format(bubblesort(%5B23,57,75,33,6,8,56%5D))%0A
|
|
9e954d5181d36762a8c34e69516c7f5510bae5a7
|
add exception class to use for mtconvert errors
|
oldowan/mtconvert/error.py
|
oldowan/mtconvert/error.py
|
Python
| 0 |
@@ -0,0 +1,345 @@
+%0Aclass MtconvertError(Exception):%0A %22%22%22Exception raised for errors in the mtconvert module.%0A%0A Attributes:%0A expression -- input expression in which the error occurred%0A message -- explanation of the error%0A %22%22%22%0A%0A def __init__(self, expression, message):%0A self.expression = expression%0A self.message = message%0A%0A
|
|
3adcefcad4fc3ecb85aa4a22e8b3c4bf5ca4e6f5
|
Add tests for revision updates via import
|
test/integration/ggrc/converters/test_import_update.py
|
test/integration/ggrc/converters/test_import_update.py
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
|
Python
| 0 |
@@ -106,16 +106,63 @@
file%3E%0A%0A
+%22%22%22Tests for bulk updates with CSV import.%22%22%22%0A%0A
from int
@@ -718,16 +718,221 @@
policy%22)
+%0A revision_count = models.Revision.query.filter(%0A models.Revision.resource_type == %22Policy%22,%0A models.Revision.resource_id == policy.id%0A ).count()%0A self.assertEqual(revision_count, 1)
%0A%0A fi
@@ -1161,16 +1161,221 @@
Edited policy%22)%0A
+ revision_count = models.Revision.query.filter(%0A models.Revision.resource_type == %22Policy%22,%0A models.Revision.resource_id == policy.id%0A ).count()%0A self.assertEqual(revision_count, 2)%0A
|
e48caa4bb61cce466ad5eb9bffbfba8e33312474
|
Add Python EC2 TerminateInstances example
|
python/example_code/ec2/terminate_instances.py
|
python/example_code/ec2/terminate_instances.py
|
Python
| 0 |
@@ -0,0 +1,2520 @@
+# snippet-comment:%5BThese are tags for the AWS doc team's sample catalog. Do not remove.%5D%0A# snippet-sourcedescription:%5Bterminate_instances.py demonstrates how to terminate an Amazon EC2 instance.%5D%0A# snippet-service:%5Bec2%5D%0A# snippet-keyword:%5BAmazon EC2%5D%0A# snippet-keyword:%5BPython%5D%0A# snippet-keyword:%5BAWS SDK for Python (Boto3)%5D%0A# snippet-keyword:%5BCode Sample%5D%0A# snippet-sourcetype:%5Bfull-example%5D%0A# snippet-sourcedate:%5B2019-2-11%5D%0A# snippet-sourceauthor:%5BAWS%5D%0A%0A# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.%0A#%0A# This file is licensed under the Apache License, Version 2.0 (the %22License%22).%0A# You may not use this file except in compliance with the License. A copy of the%0A# License is located at%0A#%0A# http://aws.amazon.com/apache2.0/%0A#%0A# This file is distributed on an %22AS IS%22 BASIS, WITHOUT WARRANTIES OR CONDITIONS%0A# OF ANY KIND, either express or implied. See the License for the specific%0A# language governing permissions and limitations under the License.%0A%0Aimport logging%0Aimport boto3%0Afrom botocore.exceptions import ClientError%0A%0A%0Adef terminate_instances(instance_ids):%0A %22%22%22Terminate one or more Amazon EC2 instances%0A%0A :param instance_ids: List of string IDs of EC2 instances to terminate%0A :return: List of state information for each instance specified in instance_ids.%0A If error, return None.%0A %22%22%22%0A%0A%0A # Terminate each instance in the argument list%0A ec2 = boto3.client('ec2')%0A try:%0A states = ec2.terminate_instances(InstanceIds=instance_ids)%0A except ClientError as e:%0A logging.error(e)%0A return None%0A return states%5B'TerminatingInstances'%5D%0A%0A%0Adef main():%0A %22%22%22Exercise terminate_instances()%22%22%22%0A%0A # Assign these values before running the program%0A ec2_instance_ids = %5B'EC2_INSTANCE_ID'%5D%0A%0A # Set up logging%0A logging.basicConfig(level=logging.DEBUG,%0A format='%25(levelname)s: %25(asctime)s: %25(message)s')%0A%0A # Terminate the EC2 instance(s)%0A states = terminate_instances(ec2_instance_ids)%0A if states is not None:%0A logging.debug('Terminating the following EC2 instances')%0A for state in states:%0A logging.debug(f'ID: %7Bstate%5B%22InstanceId%22%5D%7D')%0A logging.debug(f' Current state: Code %7Bstate%5B%22CurrentState%22%5D%5B%22Code%22%5D%7D, '%0A f'%7Bstate%5B%22CurrentState%22%5D%5B%22Name%22%5D%7D')%0A logging.debug(f' Previous state: Code %7Bstate%5B%22PreviousState%22%5D%5B%22Code%22%5D%7D, '%0A f'%7Bstate%5B%22PreviousState%22%5D%5B%22Name%22%5D%7D')%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
34c0ca7ba0f8d2ac51583dfab4ea2f4cee7a62d5
|
add script to read csv files to list
|
python/read_formated_txt_file/read_csv2list.py
|
python/read_formated_txt_file/read_csv2list.py
|
Python
| 0 |
@@ -0,0 +1,426 @@
+import csv%0A%0Adef csv_to_list(csv_file, delimiter=','):%0A %22%22%22 %0A Reads in a CSV file and returns the contents as list,%0A where every row is stored as a sublist, and each element%0A in the sublist represents 1 cell in the table.%0A %0A %22%22%22%0A with open(csv_file, 'r') as csv_con:%0A reader = csv.reader(csv_con, delimiter=delimiter)%0A return list(reader)%0A %0Adata = csv_to_list('./astro.csv')%0Aprint data
|
|
d402c25a6b257778e08e6db2890ae575432daed0
|
Add new linkedlist file for intersection
|
linkedlist/intersection.py
|
linkedlist/intersection.py
|
Python
| 0 |
@@ -0,0 +1,2192 @@
+def intersection(h1, h2):%0A %22%22%22%0A This function takes two lists and returns the node they have in common, if any.%0A In this example:%0A 1 -%3E 3 -%3E 5%0A %5C%0A 7 -%3E 9 -%3E 11%0A /%0A 2 -%3E 4 -%3E 6%0A ...we would return 7.%0A Note that the node itself is the unique identifier, not the value of the node.%0A %22%22%22%0A count = 0%0A flag = None%0A h1_orig = h1%0A h2_orig = h2%0A while h1 or h2:%0A count += 1%0A if not flag and (h1.next is None or h2.next is None):%0A # We hit the end of one of the lists, set a flag for this%0A flag = (count, h1.next, h2.next)%0A if h1:%0A h1 = h1.next%0A if h2:%0A h2 = h2.next%0A long_len = count # Mark the length of the longer of the two lists%0A short_len = flag%5B0%5D%0A if flag%5B1%5D is None:%0A shorter = h1_orig%0A longer = h2_orig%0A elif flag%5B2%5D is None:%0A shorter = h2_orig%0A longer = h1_orig%0A while longer and shorter:%0A while long_len %3E short_len:%0A # force the longer of the two lists to %22catch up%22%0A longer = longer.next%0A long_len -= 1%0A if longer == shorter:%0A # The nodes match, return the node%0A return longer%0A else:%0A longer = longer.next%0A shorter = shorter.next%0A return None%0A%0A%0Aclass Node(object):%0A def __init__(self, val=None):%0A self.val = val%0A self.next = None%0A%0A%0Adef test():%0A def printLinkedList(head):%0A string = %22%22%0A while head.next:%0A string += head.val + %22 -%3E %22%0A head = head.next%0A string += head.val%0A print(string)%0A%0A # 1 -%3E 3 -%3E 5%0A # %5C%0A # 7 -%3E 9 -%3E 11%0A # /%0A # 2 -%3E 4 -%3E 6%0A%0A a1 = Node(%221%22)%0A b1 = Node(%223%22)%0A c1 = Node(%225%22)%0A d = Node(%227%22)%0A a2 = Node(%222%22)%0A b2 = Node(%224%22)%0A c2 = Node(%226%22)%0A e = Node(%229%22)%0A f = Node(%2211%22)%0A%0A a1.next = b1%0A b1.next = c1%0A c1.next = d%0A a2.next = b2%0A b2.next = c2%0A c2.next = d%0A d.next = e%0A e.next = f%0A%0A printLinkedList(a1)%0A printLinkedList(a2)%0A print(intersection(a1, a2))%0A assert intersection(a1, a2).val == d.val%0A%0Atest()%0A
|
|
8f1cf446a0b602e6e64ccebaa794e7ec6a2f840d
|
add support routines for oversampling
|
compressible_fv4/initialization_support.py
|
compressible_fv4/initialization_support.py
|
Python
| 0 |
@@ -0,0 +1,534 @@
+%22%22%22Routines to help initialize cell-average values by oversampling the%0Ainitial conditions on a finer mesh and averaging down to the requested%0Amesh%22%22%22%0A%0Aimport mesh.fv as fv%0A%0Adef get_finer(myd):%0A%0A mgf = myd.grid.fine_like(4)%0A fd = fv.FV2d(mgf)%0A%0A for v in myd.names:%0A fd.register_var(v, myd.BCs%5Bv%5D)%0A%0A fd.create()%0A%0A return fd%0A%0Adef average_down(myd, fd):%0A %22%22%22average the fine data from fd into the coarser object, myd%22%22%22%0A%0A for v in myd.names:%0A var = myd.get_var(v)%0A var%5B:,:%5D = fd.restrict(v, N=4)%0A
|
|
4227cef6567023717c8d66f99ce776d9d8aa0929
|
Add OS::Contrail::PhysicalRouter
|
contrail_heat/resources/physical_router.py
|
contrail_heat/resources/physical_router.py
|
Python
| 0.000001 |
@@ -0,0 +1,1781 @@
+from heat.engine import properties%0Afrom vnc_api import vnc_api%0Afrom contrail_heat.resources import contrail%0Aimport uuid%0A%0A%0Aclass HeatPhysicalRouter(contrail.ContrailResource):%0A PROPERTIES = (%0A NAME,%0A ) = (%0A 'name',%0A )%0A%0A properties_schema = %7B%0A NAME: properties.Schema(%0A properties.Schema.STRING,%0A _('Physical Router name'),%0A update_allowed=True,%0A ),%0A %7D%0A%0A attributes_schema = %7B%0A %22name%22: _(%22The name of the Physical Router.%22),%0A %22fq_name%22: _(%22The FQ name of the Physical Router.%22),%0A %22physical_interfaces%22: _(%22List of Physical Interfaces attached.%22),%0A %22show%22: _(%22All attributes.%22),%0A %7D%0A%0A def handle_create(self):%0A config_obj = self.vnc_lib().global_system_config_read(%0A fq_name=%5B%22default-global-system-config%22%5D)%0A pr_obj = vnc_api.PhysicalRouter(name=self.properties%5Bself.NAME%5D,%0A parent_obj=config_obj)%0A pr_uuid = self.vnc_lib().physical_router_create(pr_obj)%0A self.resource_id_set(pr_uuid)%0A%0A def _show_resource(self):%0A pr_obj = self.vnc_lib().physical_router_read(id=self.resource_id)%0A dic = %7B%7D%0A dic%5B'name'%5D = pr_obj.get_display_name()%0A dic%5B'fq_name'%5D = pr_obj.get_fq_name_str()%0A dic%5B'physical_interfaces'%5D = (%0A %5Bpi%5B'to'%5D for pi in pr_obj.get_physical_interfaces() or %5B%5D%5D)%0A return dic%0A%0A def handle_delete(self):%0A try:%0A self.vnc_lib().physical_router_delete(id=self.resource_id)%0A except Exception:%0A pass%0A%0A def handle_update(self, json_snippet, tmpl_diff, prop_diff):%0A # TODO%0A pass%0A%0A%0Adef resource_mapping():%0A return %7B%0A 'OS::Contrail::PhysicalRouter': HeatPhysicalRouter,%0A %7D%0A
|
|
5cebd0b56f81dfc02feb5511dade82ebf6db99ff
|
add presence.py
|
litecord/presence.py
|
litecord/presence.py
|
Python
| 0.000002 |
@@ -0,0 +1,1065 @@
+'''%0Apresence.py - presence management%0A%0ASends PRESENCE_UPDATE to clients when needed%0A'''%0A%0Aclass PresenceManager:%0A def __init__(self, server):%0A self.server = server%0A%0A async def update_presence(self, user_id, status):%0A '''%0A PresenceManager.update_presence(user_id, status)%0A%0A Updates the presence of a user.%0A Sends a PRESENCE_UPDATE event to relevant clients.%0A '''%0A%0A '''%0A ????dummy code????%0A%0A current_presence = self.presences.get(user_id)%0A new_presence = self.make_presence(status)%0A%0A # something like this lol%0A user = await self.user.get_user(user_id)%0A for guild_id in user.guilds:%0A guild = await self.guilds.get_guild(guild_id)%0A for member in guild:%0A member = await self.guilds.get_member(guild_id, member_id)%0A c = await self.server.get_connection(member_id)%0A if c is not None:%0A await c.dispatch('PRESENCE_UPDATE', self.diff(current_presence, new_presence))%0A '''%0A%0A pass%0A
|
|
02c59aa1d2eec43442f4bcf1d6662535e094bffd
|
add move pics by modified date
|
media/pic_date_move.py
|
media/pic_date_move.py
|
Python
| 0 |
@@ -0,0 +1,1074 @@
+'''%0AFile: pic_date_move.py%0ACreated: 2021-04-01 10:46:38%0AModified: 2021-04-01 10:46:43%0AAuthor: mcxiaoke ([email protected])%0ALicense: Apache License 2.0%0A'''%0A%0Aimport os%0Aimport sys%0Aimport shutil%0Afrom datetime import date, datetime%0Aimport pathlib%0A%0A%0Adef move_one_file(src_file):%0A old_file = pathlib.Path(src_file)%0A old_dir = pathlib.Path(old_file).parent%0A name = old_file.name%0A # old_file = pathlib.Path(old_dir, name)%0A fd = datetime.fromtimestamp(old_file.stat().st_mtime)%0A new_dir = pathlib.Path(old_dir.parent, fd.strftime('%25Y%25m%25d'))%0A new_file = pathlib.Path(new_dir, name)%0A if not (new_dir.exists() and new_dir.samefile(old_dir)):%0A if not new_dir.exists():%0A new_dir.mkdir(parents=True, exist_ok=True)%0A print('Move to', new_file)%0A # old_file.rename(new_file)%0A%0A%0Adef move_by_date(src_dir):%0A '''%0A move image files by file modified date %0A '''%0A for root, _, files in os.walk(src_dir):%0A print(root)%0A for name in files:%0A move_one_file(pathlib.Path(root, name))%0A%0A%0Amove_by_date(sys.argv%5B1%5D)%0A
|
|
3ae5dc9a4325251033d3db9cae0d80eb4812815d
|
Add lazy iterator
|
minio/lazy_iterator.py
|
minio/lazy_iterator.py
|
Python
| 0.00005 |
@@ -0,0 +1,1136 @@
+# Minimal Object Storage Library, (C) 2015 Minio, Inc.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A__author__ = 'minio'%0A%0Aclass LazyIterator(object):%0A def __init__(self, populator):%0A self.populator = populator%0A self.values = %5B%5D%0A%0A def __iter__(self):%0A return self%0A%0A def next(self):%0A if self.populator is None:%0A # should never see this, but we'll be defensive%0A raise StopIteration()%0A if len(self.values) == 0:%0A self.values, self.populator = self.populator()%0A if len(self.values) %3E 0:%0A return self.values.pop(0)%0A raise StopIteration()%0A%0A
|
|
860f93d2bb4c08b63c64fe9e5b7b620b824d8490
|
test ++/--/+
|
pychecker/pychecker2/utest/ops.py
|
pychecker/pychecker2/utest/ops.py
|
Python
| 0.000069 |
@@ -0,0 +1,466 @@
+from pychecker2 import TestSupport%0Afrom pychecker2 import OpChecks%0A%0Aclass OpTests(TestSupport.WarningTester):%0A def testOperator(self):%0A for op in %5B'--', '++'%5D:%0A self.warning('def f(x):%5Cn'%0A ' return %25sx' %25 op,%0A 2, OpChecks.OpCheck.operator, op)%0A %0A def testOperatorPlus(self):%0A self.warning('def f(x):%5Cn'%0A ' return +x', 2, OpChecks.OpCheck.operatorPlus)%0A
|
|
1ff7708fba64f289a957214dd089f224b74f2467
|
Make exclude_from_indexes a set, and public API. (#3756)
|
datastore/google/cloud/datastore/entity.py
|
datastore/google/cloud/datastore/entity.py
|
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for representing a single entity in the Cloud Datastore."""
from google.cloud._helpers import _ensure_tuple_or_list
class Entity(dict):
"""Entities are akin to rows in a relational database
An entity storing the actual instance of data.
Each entity is officially represented with a
:class:`~google.cloud.datastore.key.Key`, however it is possible that
you might create an entity with only a partial key (that is, a key
with a kind, and possibly a parent, but without an ID). In such a
case, the datastore service will automatically assign an ID to the
partial key.
Entities in this API act like dictionaries with extras built in that
allow you to delete or persist the data stored on the entity.
Entities are mutable and act like a subclass of a dictionary.
This means you could take an existing entity and change the key
to duplicate the object.
Use :meth:`~google.cloud.datastore.client.Client.get` to retrieve an
existing entity:
.. testsetup:: entity-ctor
from google.cloud import datastore
from tests.system.test_system import Config # system tests
client = datastore.Client()
key = client.key('EntityKind', 1234, namespace='_Doctest')
entity = datastore.Entity(key=key)
entity['property'] = 'value'
Config.TO_DELETE.append(entity)
client.put(entity)
.. doctest:: entity-ctor
>>> client.get(key)
<Entity('EntityKind', 1234) {'property': 'value'}>
You can the set values on the entity just like you would on any
other dictionary.
.. doctest:: entity-ctor
>>> entity['age'] = 20
>>> entity['name'] = 'JJ'
However, not all types are allowed as a value for a Google Cloud Datastore
entity. The following basic types are supported by the API:
* :class:`datetime.datetime`
* :class:`~google.cloud.datastore.key.Key`
* :class:`bool`
* :class:`float`
* :class:`int` (as well as :class:`long` in Python 2)
* ``unicode`` (called ``str`` in Python 3)
* ``bytes`` (called ``str`` in Python 2)
* :class:`~google.cloud.datastore.helpers.GeoPoint`
* :data:`None`
In addition, two container types are supported:
* :class:`list`
* :class:`~google.cloud.datastore.entity.Entity`
Each entry in a list must be one of the value types (basic or
container) and each value in an
:class:`~google.cloud.datastore.entity.Entity` must as well. In
this case an :class:`~google.cloud.datastore.entity.Entity` **as a
container** acts as a :class:`dict`, but also has the special annotations
of ``key`` and ``exclude_from_indexes``.
And you can treat an entity like a regular Python dictionary:
.. testsetup:: entity-dict
from google.cloud import datastore
entity = datastore.Entity()
entity['age'] = 20
entity['name'] = 'JJ'
.. doctest:: entity-dict
>>> sorted(entity.keys())
['age', 'name']
>>> sorted(entity.items())
[('age', 20), ('name', 'JJ')]
.. note::
When saving an entity to the backend, values which are "text"
(``unicode`` in Python2, ``str`` in Python3) will be saved using
the 'text_value' field, after being encoded to UTF-8. When
retrieved from the back-end, such values will be decoded to "text"
again. Values which are "bytes" (``str`` in Python2, ``bytes`` in
Python3), will be saved using the 'blob_value' field, without
any decoding / encoding step.
:type key: :class:`google.cloud.datastore.key.Key`
:param key: Optional key to be set on entity.
:type exclude_from_indexes: tuple of string
:param exclude_from_indexes: Names of fields whose values are not to be
indexed for this entity.
"""
def __init__(self, key=None, exclude_from_indexes=()):
super(Entity, self).__init__()
self.key = key
self._exclude_from_indexes = set(_ensure_tuple_or_list(
'exclude_from_indexes', exclude_from_indexes))
# NOTE: This will be populated when parsing a protobuf in
# google.cloud.datastore.helpers.entity_from_protobuf.
self._meanings = {}
def __eq__(self, other):
"""Compare two entities for equality.
Entities compare equal if their keys compare equal and their
properties compare equal.
:rtype: bool
:returns: True if the entities compare equal, else False.
"""
if not isinstance(other, Entity):
return False
return (self.key == other.key and
self._exclude_from_indexes == other._exclude_from_indexes and
self._meanings == other._meanings and
super(Entity, self).__eq__(other))
def __ne__(self, other):
"""Compare two entities for inequality.
Entities compare equal if their keys compare equal and their
properties compare equal.
:rtype: bool
:returns: False if the entities compare equal, else True.
"""
return not self.__eq__(other)
@property
def kind(self):
"""Get the kind of the current entity.
.. note::
This relies entirely on the :class:`google.cloud.datastore.key.Key`
set on the entity. That means that we're not storing the kind
of the entity at all, just the properties and a pointer to a
Key which knows its Kind.
"""
if self.key:
return self.key.kind
@property
def exclude_from_indexes(self):
"""Names of fields which are *not* to be indexed for this entity.
:rtype: sequence of field names
:returns: The set of fields excluded from indexes.
"""
return frozenset(self._exclude_from_indexes)
def __repr__(self):
if self.key:
return '<Entity%s %s>' % (self.key._flat_path,
super(Entity, self).__repr__())
else:
return '<Entity %s>' % (super(Entity, self).__repr__(),)
|
Python
| 0.000001 |
@@ -4546,33 +4546,32 @@
ey%0A self.
-_
exclude_from_ind
@@ -4604,16 +4604,16 @@
r_list(%0A
+
@@ -4663,16 +4663,93 @@
dexes))%0A
+ %22%22%22Names of fields which are *not* to be indexed for this entity.%22%22%22%0A
@@ -5310,25 +5310,24 @@
self.
-_
exclude_from
@@ -5344,17 +5344,16 @@
= other.
-_
exclude_
@@ -6178,32 +6178,32 @@
if self.key:%0A
+
retu
@@ -6224,298 +6224,8 @@
nd%0A%0A
- @property%0A def exclude_from_indexes(self):%0A %22%22%22Names of fields which are *not* to be indexed for this entity.%0A%0A :rtype: sequence of field names%0A :returns: The set of fields excluded from indexes.%0A %22%22%22%0A return frozenset(self._exclude_from_indexes)%0A%0A
|
cfb77bbe0c77a67c536614bf9fece1e9fcde4eb0
|
make find_configs filter name more descriptive
|
crosscat/utils/experiment_utils.py
|
crosscat/utils/experiment_utils.py
|
import os
#
import crosscat.utils.file_utils as file_utils
import crosscat.utils.geweke_utils as geweke_utils
import crosscat.utils.general_utils as general_utils
result_filename = geweke_utils.summary_filename
writer = geweke_utils.write_result
reader = geweke_utils.read_result
runner = geweke_utils.run_geweke
def find_configs(dirname, filename=result_filename):
root_has_filename = lambda (root, ds, filenames): filenames.count(filename)
get_filepath = lambda (root, ds, fs): os.path.join(root, filename)
def is_one_deep(filepath):
_dir, _file = os.path.split(filepath)
return os.path.split(_dir)[0] == dirname
tuples = filter(root_has_filename, os.walk(dirname))
filepaths = map(get_filepath, tuples)
filepaths = filter(is_one_deep, filepaths)
return filepaths
def read_all_configs(dirname='.'):
def read_config(filepath):
result = file_utils.unpickle(filepath, dir=dirname)
config = result['config']
return config
filepaths = find_configs(dirname)
config_list = map(read_config, filepaths)
return config_list
def read_results(config_list, *args, **kwargs):
_read_result = lambda config: reader(config, *args, **kwargs)
config_list = general_utils.ensure_listlike(config_list)
results = map(_read_result, config_list)
return results
def write_results(results, *args, **kwargs):
_write_result = lambda result: writer(result, *args, **kwargs)
map(_write_result, results)
return
def do_experiments(runner, writer, config_list, *args, **kwargs):
def do_experiment(config):
result = runner(config)
writer(result, *args, **kwargs)
return
config_list = general_utils.ensure_listlike(config_list)
map(do_experiment, config_list)
return
def args_to_config(args):
parser = geweke_utils.generate_parser()
args = parser.parse_args(args)
args = geweke_utils.arbitrate_args(args)
return args.__dict__
if __name__ == '__main__':
args_list = [
['--num_rows', '10', '--num_cols', '2', '--num_iters', '300', ],
['--num_rows', '10', '--num_cols', '3', '--num_iters', '300', ],
['--num_rows', '20', '--num_cols', '2', '--num_iters', '300', ],
['--num_rows', '20', '--num_cols', '3', '--num_iters', '300', ],
]
configs_list = map(args_to_config, args_list)
do_experiments(runner, writer, configs_list)
configs_list = read_all_configs()
has_three_cols = lambda config: config['num_cols'] == 3
configs_list = filter(has_three_cols, configs_list)
results = read_results(configs_list)
|
Python
| 0.000001 |
@@ -525,24 +525,28 @@
def is_
-one_deep
+this_dirname
(filepat
@@ -773,16 +773,20 @@
(is_
-one_deep
+this_dirname
, fi
|
58fee826ab5298f7de036bf320bbc109b853eec8
|
Add null check for sds sync thread which can be optional
|
tendrl/commons/manager/__init__.py
|
tendrl/commons/manager/__init__.py
|
import abc
import logging
import six
from tendrl.commons import jobs
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Manager(object):
def __init__(
self,
sds_sync_thread,
central_store_thread,
):
self._central_store_thread = central_store_thread
self._sds_sync_thread = sds_sync_thread
self._job_consumer_thread = jobs.JobConsumerThread()
def stop(self):
LOG.info("%s stopping" % self.__class__.__name__)
self._job_consumer_thread.stop()
self._sds_sync_thread.stop()
self._central_store_thread.stop()
def start(self):
LOG.info("%s starting" % self.__class__.__name__)
self._central_store_thread.start()
self._sds_sync_thread.start()
self._job_consumer_thread.start()
def join(self):
LOG.info("%s joining" % self.__class__.__name__)
self._job_consumer_thread.join()
self._sds_sync_thread.join()
self._central_store_thread.join()
|
Python
| 0 |
@@ -542,32 +542,70 @@
r_thread.stop()%0A
+ if self._sds_sync_thread:%0A
self._sd
@@ -617,32 +617,32 @@
c_thread.stop()%0A
-
self._ce
@@ -782,32 +782,70 @@
_thread.start()%0A
+ if self._sds_sync_thread:%0A
self._sd
@@ -858,32 +858,32 @@
_thread.start()%0A
-
self._jo
@@ -1020,32 +1020,70 @@
r_thread.join()%0A
+ if self._sds_sync_thread:%0A
self._sd
@@ -1145,9 +1145,8 @@
.join()%0A
-%0A
|
82ccb8522f82337c9602742a62c954492784d7fc
|
Update forward compatibility horizon to 2019-06-13
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2019, 6, 12)
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
global _FORWARD_COMPATIBILITY_HORIZON
try:
old_compat_date = _FORWARD_COMPATIBILITY_HORIZON
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)
yield
finally:
_FORWARD_COMPATIBILITY_HORIZON = old_compat_date
|
Python
| 0 |
@@ -1139,17 +1139,17 @@
19, 6, 1
-2
+3
)%0A%0A%0A@tf_
|
20bd1d719f238de4da303b9e71a0618b64eedcf1
|
Update forward compatibility horizon to 2022-03-28
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
import datetime
import os
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2022, 3, 27)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
if date < _FORWARD_COMPATIBILITY_HORIZON:
logging.warning("Trying to set the forward compatibility date to the past"
" date %s. This will be ignored by TensorFlow." % (date))
return
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0 |
@@ -1339,9 +1339,9 @@
3, 2
-7
+8
)%0A_F
|
7601684473a5fd980e6abcd9bd1eac338b9be547
|
Update forward compatibility horizon to 2022-10-07
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
import datetime
import os
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2022, 10, 6)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
if date < _FORWARD_COMPATIBILITY_HORIZON:
logging.warning("Trying to set the forward compatibility date to the past"
" date %s. This will be ignored by TensorFlow." % (date))
return
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://www.tensorflow.org/guide/versions#backward_and_partial_forward_compatibility).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0 |
@@ -1339,9 +1339,9 @@
10,
-6
+7
)%0A_F
|
3437fba39d5bca77fd7627aad15ba76fb75f5731
|
Update forward compatibility horizon to 2018-08-15
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See
@{$guide/version_compat#backward_and_partial_forward_compatibility}
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2018, 8, 14)
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See @{$guide/version_compat#backward_and_partial_forward_compatibility}.
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See @{$guide/version_compat#backward_and_partial_forward_compatibility}.
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args :
year: A year (e.g. 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Yields:
Nothing.
"""
global _FORWARD_COMPATIBILITY_HORIZON
try:
old_compat_date = _FORWARD_COMPATIBILITY_HORIZON
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)
yield
finally:
_FORWARD_COMPATIBILITY_HORIZON = old_compat_date
|
Python
| 0 |
@@ -1124,9 +1124,9 @@
8, 1
-4
+5
)%0A%0A%0A
|
677f5a50251ef707bef10fb1f0e3c5111e4a7297
|
Update forward compatibility horizon to 2019-10-04
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2019, 10, 3)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0 |
@@ -1383,17 +1383,17 @@
19, 10,
-3
+4
)%0A_FORWA
|
27cce3b6708a17f813f0a82871c988fec3a36517
|
Add quart to contrib (#300)
|
rollbar/contrib/quart/__init__.py
|
rollbar/contrib/quart/__init__.py
|
Python
| 0 |
@@ -0,0 +1,315 @@
+%22%22%22%0AIntegration with Quart%0A%22%22%22%0A%0Afrom quart import request%0Aimport rollbar%0A%0A%0Adef report_exception(app, exception):%0A rollbar.report_exc_info(request=request)%0A%0A%0Adef _hook(request, data):%0A data%5B'framework'%5D = 'quart'%0A%0A if request:%0A data%5B'context'%5D = str(request.url_rule)%0A%0Arollbar.BASE_DATA_HOOK = _hook%0A
|
|
e5a39d4e17a0555cb242731b34f0ee480367b4fe
|
Add task that sends out notifications
|
foireminder/foireminder/reminders/tasks.py
|
foireminder/foireminder/reminders/tasks.py
|
Python
| 0.999047 |
@@ -0,0 +1,450 @@
+from django.utils import timezone%0A%0Afrom .models import ReminderRequest, EmailReminder%0A%0A%0Adef send_todays_notifications(self):%0A today = timezone.now()%0A reminders = ReminderRequest.objects.filter(%0A start__year=today.year,%0A start__month=today.month,%0A start__day=today.da%0A )%0A for reminder in reminders:%0A for subscriber in EmailReminder.objects.filter(rule=reminder.rule):%0A subscriber.send_notification()%0A
|
|
9efda5a5a2b7aa16423e68fb10e1a0cb94c1f33e
|
Create rectangles_into_squares.py
|
rectangles_into_squares.py
|
rectangles_into_squares.py
|
Python
| 0.999056 |
@@ -0,0 +1,335 @@
+#Kunal Gautam%0A#Codewars : @Kunalpod%0A#Problem name: Rectangles into Squares%0A#Problem level: 6 kyu%0A%0Adef sqInRect(lng, wdth):%0A if lng==wdth: return None%0A li=%5B%5D%0A while lng and wdth:%0A if lng%3Ewdth:%0A lng-=wdth%0A li.append(wdth)%0A else:%0A wdth-=lng%0A li.append(lng)%0A return li%0A
|
|
c5fc38749dcf966787f6c6a201e23c310a22358c
|
Add script to update UniProt protein names
|
src/main/resources/org/clulab/reach/update_uniprot.py
|
src/main/resources/org/clulab/reach/update_uniprot.py
|
Python
| 0 |
@@ -0,0 +1,3386 @@
+import os%0Aimport re%0Aimport csv%0Aimport requests%0Aimport itertools%0Afrom gilda.generate_terms import parse_uniprot_synonyms%0A%0A%0A# Base URL for UniProt%0Auniprot_url = 'http://www.uniprot.org/uniprot'%0A# Get protein names, gene names and the organism%0Acolumns = %5B'id', 'protein%2520names', 'genes', 'organism'%5D%0A# Only get reviewed entries and use TSV format%0Aparams = %7B%0A 'sort': 'id',%0A 'desc': 'no',%0A 'compress': 'no',%0A 'query': 'reviewed:yes',%0A 'format': 'tab',%0A 'columns': ','.join(columns)%0A%7D%0A%0A%0Adef process_row(row):%0A entry, protein_names, genes, organisms = row%0A # Gene names are space separated%0A gene_synonyms = genes.split(' ') if genes else %5B%5D%0A # We use a more complex function to parse protein synonyms which appear%0A # as %22first synonym (second synonym) (third synonym) ...%22.%0A protein_synonyms = parse_uniprot_synonyms(protein_names) %5C%0A if protein_names else %5B%5D%0A # We remove EC codes as synonyms because they always refer to higher-level%0A # enzyme categories shared across species%0A protein_synonyms = %5Bp for p in protein_synonyms%0A if not p.startswith('EC ')%5D%0A # Organisms and their synonyms also appear in the format that protein%0A # synonyms do%0A organism_synonyms = parse_uniprot_synonyms(organisms)%0A # ... except we need to deal with a special case in which the first%0A # organism name has a strain name in parantheses after it, and make sure%0A # that the strain name becomes part of the first synonym.%0A if len(organism_synonyms) %3E= 2 and %5C%0A organism_synonyms%5B1%5D.startswith('strain'):%0A organism_synonyms%5B0%5D = '%25s (%25s)' %25 (organism_synonyms%5B0%5D,%0A organism_synonyms%5B1%5D)%0A organism_synonyms = %5Borganism_synonyms%5B0%5D%5D + organism_synonyms%5B2:%5D%0A # We now take each gene synonym and each organism synonym and create all%0A # combinations of these as entries.%0A entries = %5B%5D%0A for gene, organism in itertools.product(gene_synonyms + protein_synonyms,%0A organism_synonyms):%0A # We skip synonyms that are more than 5 words in length (consistent%0A # with original KB construction).%0A if len(gene.split(' ')) %3E 5:%0A continue%0A entries.append((gene, entry, organism))%0A return entries%0A%0A%0Aif __name__ == '__main__':%0A if not os.path.exists('uniprot_entries.tsv'):%0A res = requests.get(uniprot_url, params=params)%0A res.raise_for_status()%0A with open('uniprot_entries.tsv', 'w') as fh:%0A fh.write(res.text)%0A processed_entries = %5B%5D%0A with open('uniprot_entries.tsv', 'r') as fh:%0A reader = csv.reader(fh, delimiter='%5Ct')%0A next(reader)%0A for row in reader:%0A processed_entries += process_row(row)%0A # We sort the entries first by the synonym but in a way that special%0A # characters and capitalization is ignored, then sort by ID and then%0A # by organism.%0A processed_entries = sorted(processed_entries,%0A key=lambda x: (re.sub('%5B%5EA-Za-z0-9%5D', '',%0A x%5B0%5D).lower(), x%5B1%5D,%0A x%5B2%5D))%0A with open('kb/uniprot-proteins.tsv.update', 'w') as fh:%0A writer = csv.writer(fh, delimiter='%5Ct')%0A for entry in processed_entries:%0A writer.writerow(entry)%0A
|
|
aeadfbd4ae1f915291328f040cda54f309743024
|
Add main application code
|
oline-gangnam-style.py
|
oline-gangnam-style.py
|
Python
| 0.000001 |
@@ -0,0 +1,480 @@
+from jinja2 import Environment, FileSystemLoader%0A%0Aimport json%0Aimport os%0Aimport sys%0A%0Aenv = Environment(loader=FileSystemLoader(%22.%22))%0Atemplate = env.get_template('ircd.conf.jinja')%0A%0Aconfig = %7B%7D%0A%0Awith open(sys.argv%5B1%5D if len(sys.argv) %3E 1 else %22config.json%22, %22r%22) as fin:%0A config = json.loads(fin.read())%0A%0Anetwork = config%5B%22network%22%5D%0A%0Afor server in config%5B%22servers%22%5D:%0A with open(%22confs/%22 + server%5B%22name%22%5D+%22.conf%22, %22w%22) as fout:%0A fout.write(template.render(**locals()))%0A%0A
|
|
7a6b5396ce760eaa206bfb9b556a374c9c17f397
|
Add DecisionTree estimator.
|
bike-sharing/2-decision-tree.py
|
bike-sharing/2-decision-tree.py
|
Python
| 0 |
@@ -0,0 +1,2879 @@
+import math%0Aimport argparse%0Afrom datetime import datetime%0Aimport numpy as np%0Afrom sklearn import cross_validation%0Afrom sklearn import tree%0Afrom sklearn import metrics%0A%0Adef load_data(path, **kwargs):%0A return np.loadtxt(path, **kwargs)%0A%0Adef save_data(path, data, **kwargs):%0A np.savetxt(path, data, **kwargs)%0A%0Adef hour_from_dt_string(dt_string):%0A return datetime.strptime(dt_string, '%25Y-%25m-%25d %25H:%25M:%25S').hour%0A%0Adef preprocessing(X, y):%0A is_seasons = np.empty((X.shape%5B0%5D, 4))%0A return X, y%0A%0Adef cv(estimator, X, y):%0A k_fold = cross_validation.KFold(n=len(train_dataset), n_folds=10,%0A indices=True)%0A a = 0.0%0A for train_idx, test_idx in k_fold:%0A r = estimator.fit(X%5Btrain_idx%5D, y%5Btrain_idx%5D).predict(X%5Btest_idx%5D)%0A r = np.where(r %3E 0, r, 0).astype(np.int)%0A s = math.sqrt(metrics.mean_squared_error(np.log(y%5Btest_idx%5D + 1),%0A np.log(r + 1.0)))%0A a += s%0A print 'Score: %7B:.4f%7D'.format(s)%0A print 'Average score: %7B:.4f%7D'.format(a/len(k_fold))%0A%0Adef loss_func(y_real, y_predicted):%0A return math.sqrt(metrics.mean_squared_error(np.log(y_real + 1), np.log(y_predicted + 1)))%0A%0Aif __name__ == '__main__':%0A # Command arguments%0A parser = argparse.ArgumentParser(description='bike-sharing estimator')%0A parser.add_argument('--cv', dest='cv', action='store_const', const=True,%0A default=False, help='Do cross validation')%0A parser.add_argument('--no-test', dest='out', action='store_const',%0A const=False, default=True, help='No test dataset')%0A args = parser.parse_args()%0A%0A # Input%0A common_input_options = %7B'delimiter': ',', 'skiprows': 1,%0A 'converters': %7B0: hour_from_dt_string%7D %7D%0A train_dataset = load_data('data/train.csv', usecols=(0,1,2,3,4,5,6,7,8,11),%0A **common_input_options)%0A test_dataset = load_data('data/test.csv', usecols=(0,1,2,3,4,5,6,7,8),%0A **common_input_options)%0A common_input_options%5B'converters'%5D = %7B%7D%0A out_column = load_data('data/test.csv', usecols=(0,), dtype=str,%0A **common_input_options)%0A%0A # Data preprocessing%0A X_train, y_train = preprocessing(train_dataset%5B:,:-1%5D, train_dataset%5B:,-1%5D)%0A X_test, y_test = preprocessing(test_dataset, None)%0A%0A # The interesting part%0A estimator = tree.DecisionTreeRegressor(max_depth=12)%0A if args.cv:%0A cv(estimator, X_train, y_train)%0A if args.out:%0A results = estimator.fit(X_train, y_train).predict(X_test)%0A results = np.where(results %3E 0, results, 0.01).astype(np.int)%0A%0A # Output%0A save_data('data/out.csv', np.column_stack((out_column.T, results.T)),%0A delimiter=',', header='datetime,count', fmt=('%25s', '%25s'),%0A comments='')%0A
|
|
c58c58d5bf1394e04e30f5eeb298818558be027f
|
Add directory for tests of rules removin
|
tests/rules_tests/clearAfterNonTermRemove/__init__.py
|
tests/rules_tests/clearAfterNonTermRemove/__init__.py
|
Python
| 0 |
@@ -0,0 +1,111 @@
+#!/usr/bin/env python%0A%22%22%22%0A:Author Patrik Valkovic%0A:Created 17.08.2017 22:06%0A:Licence GNUv3%0APart of grammpy%0A%0A%22%22%22
|
|
9a7091c1502b9758c1492a1c99ace7d4ad74026c
|
move integer_divions to syntax
|
tests/pyccel/parser/scripts/syntax/integer_division.py
|
tests/pyccel/parser/scripts/syntax/integer_division.py
|
Python
| 0.000022 |
@@ -0,0 +1,59 @@
+5 // 3%0Aa // 3%0A5 // b%0Aa // b%0A%0A5.// 3.%0Aa // 3.%0A5.// b%0Aa // b%0A
|
|
17e0b81463e3c4c9b62f95f40912b270652a8e63
|
Create new package (#6376)
|
var/spack/repos/builtin/packages/r-ggridges/package.py
|
var/spack/repos/builtin/packages/r-ggridges/package.py
|
Python
| 0 |
@@ -0,0 +1,1825 @@
+##############################################################################%0A# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, [email protected], All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/spack/spack%0A# Please also see the NOTICE and LICENSE files for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass RGgridges(RPackage):%0A %22%22%22Ridgeline plots provide a convenient way of visualizing changes in%0A distributions over time or space.%22%22%22%0A%0A homepage = %22https://cran.r-project.org/web/packages/ggridges/index.html%22%0A url = %22https://cran.r-project.org/src/contrib/ggridges_0.4.1.tar.gz%22%0A list_url = %22https://cran.rstudio.com/src/contrib/Archive/ggridges%22%0A%0A version('0.4.1', '21d53b3f7263beb17f629f0ebfb7b67a')%0A version('0.4.0', 'da94ed1ee856a7fa5fb87712c84ec4c9')%0A%0A depends_on('[email protected]:3.4.9')%0A depends_on('r-ggplot2', type=('build', 'run'))%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.