blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
70001b56d298f5befbbcdf00e94f61e060b46a96
|
21b0483666d8e5cbdc4a911bda93e1a3392c40ec
|
/lib/initialConditions.py
|
1f3e0b6022939d9fa360915afc8482217719f223
|
[] |
no_license
|
Christopher-Bradshaw/fluids_final
|
0541111323c640b40ee86f970acb896689bbb867
|
2e33b2ef04fdbd40760c1804a02c86c93c5fd926
|
refs/heads/master
| 2021-08-24T03:13:43.132099 | 2017-12-07T20:33:31 | 2017-12-07T20:33:31 | 113,245,782 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,193 |
py
|
import numpy as np
def getPressure(energy, volume, gamma):
return energy * (gamma - 1) / volume
def getFlatConfig():
dx = 1
width = 5
gamma = 5/3
# Densities
initialRho = np.ones(width) # this will never change
summedInitialRho = np.array([
initialRho[i] + initialRho[i+1] for i in range(len(initialRho)-1)
])
# The grid
grid = np.zeros(width + 1, dtype=[
("position", "float64"),
("velocity", "float64"),
])
grid["position"] = np.arange(0, width + 1, dx)
grid["velocity"] = np.zeros_like(grid["position"])
grid["velocity"][0] = 0
grid["velocity"][-1] = 0
# Things defined in the gaps
gaps = np.zeros(width, dtype=[
("volume", "float64"),
("viscocity", "float64"),
("energy", "float64"),
("pressure", "float64"),
])
gaps["volume"] = 1/initialRho
gaps["viscocity"] = np.zeros(width)
gaps["energy"] = np.ones(width)
gaps["pressure"] = getPressure(gaps["energy"], gaps["volume"], gamma)
return {
"grid": grid,
"gaps": gaps,
"initialRho": initialRho,
"summedInitialRho": summedInitialRho,
"dx": dx,
"width": width,
"gamma": gamma,
}
def getVelocityConfig():
config = getFlatConfig()
config["grid"]["velocity"][1:-1] += 0.01
return config
def getShockTubeConfig():
dx = 1
width = 100
gamma = 5/3
# Densities
initialRho = np.ones(width) # this will never change
# initialRho[3] = 1.1
initialRho[:50] = 1.1
summedInitialRho = np.array([
initialRho[i] + initialRho[i+1] for i in range(len(initialRho)-1)
])
# The grid
grid = np.zeros(width + 1, dtype=[
("position", "float64"),
("velocity", "float64"),
])
grid["position"] = np.arange(0, width + 1, dx)
grid["velocity"] = np.zeros_like(grid["position"])
grid["velocity"][0] = 0
grid["velocity"][-1] = 0
# Things defined in the gaps
gaps = np.zeros(width, dtype=[
("volume", "float64"),
("viscocity", "float64"),
("energy", "float64"),
("pressure", "float64"),
])
gaps["volume"] = 1/initialRho
gaps["viscocity"] = np.zeros(width) # should we / can we give initial viscocity?
gaps["energy"] = 1 * initialRho
gaps["pressure"] = getPressure(gaps["energy"], gaps["volume"], gamma)
return {
"grid": grid,
"gaps": gaps,
"initialRho": initialRho,
"summedInitialRho": summedInitialRho,
"dx": dx,
"width": width,
"gamma": gamma,
}
def getExpansionConfig():
dx = 1
width = 100
gamma = 5/3
# Densities
initialRho = np.ones(width) # this will never change
# initialRho[3] = 1.1
initialRho[50:] = 0.1
summedInitialRho = np.array([
initialRho[i] + initialRho[i+1] for i in range(len(initialRho)-1)
])
# The grid
grid = np.zeros(width + 1, dtype=[
("position", "float64"),
("velocity", "float64"),
])
grid["position"] = np.arange(0, width + 1, dx)
grid["velocity"] = np.zeros_like(grid["position"])
grid["velocity"][0] = 0
grid["velocity"][-1] = 0
# Things defined in the gaps
gaps = np.zeros(width, dtype=[
("volume", "float64"),
("viscocity", "float64"),
("energy", "float64"),
("pressure", "float64"),
])
gaps["volume"] = 1/initialRho
gaps["viscocity"] = np.zeros(width) # should we / can we give initial viscocity?
gaps["energy"] = 1 * initialRho
gaps["pressure"] = getPressure(gaps["energy"], gaps["volume"], gamma)
return {
"grid": grid,
"gaps": gaps,
"initialRho": initialRho,
"summedInitialRho": summedInitialRho,
"dx": dx,
"width": width,
"gamma": gamma,
}
def getSedovConfig():
dx = 1
width = 100
gamma = 5/3
# Densities
initialRho = np.ones(width) # this will never change
summedInitialRho = np.array([
initialRho[i] + initialRho[i+1] for i in range(len(initialRho)-1)
])
# The grid
grid = np.zeros(width + 1, dtype=[
("position", "float64"),
("velocity", "float64"),
])
grid["position"] = np.arange(0, width + 1, dx)
grid["velocity"] = np.zeros_like(grid["position"])
grid["velocity"][0] = 0
grid["velocity"][-1] = 0
# Things defined in the gaps
gaps = np.zeros(width, dtype=[
("volume", "float64"),
("viscocity", "float64"),
("energy", "float64"),
("pressure", "float64"),
])
gaps["volume"] = 1/initialRho
gaps["viscocity"] = np.zeros(width) # should we / can we give initial viscocity?
gaps["energy"] = 1 * initialRho
gaps["energy"][0] *= 2
gaps["pressure"] = getPressure(gaps["energy"], gaps["volume"], gamma)
return {
"grid": grid,
"gaps": gaps,
"initialRho": initialRho,
"summedInitialRho": summedInitialRho,
"dx": dx,
"width": width,
"gamma": gamma,
}
|
[
"[email protected]"
] | |
40b4fc7442a3dca396d30cd384a4df70fbca793d
|
a6d8465aed280c36fb7129e1fa762535bae19941
|
/embroidery365/builder/migrations/0015_auto_20171107_1318.py
|
e8fb24e2d785b3b21a4799b1ab238de547240bcb
|
[] |
no_license
|
rahuezo/365digitizing_and_embroidery
|
c61c53f567e73163a67d3fd568a20551a3681ccd
|
41a22b6ff8bd83238219f2d34ce13b5a8ef9bb57
|
refs/heads/master
| 2020-09-02T11:59:07.702947 | 2017-11-11T02:40:01 | 2017-11-11T02:40:01 | 98,377,801 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 493 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-11-07 21:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('builder', '0014_order_extra_details'),
]
operations = [
migrations.AlterField(
model_name='order',
name='extra_details',
field=models.TextField(blank=True, default='No Specifications Included'),
),
]
|
[
"[email protected]"
] | |
2ef93f787a9d83908066ad2e141bcdc977dc2348
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/cirq_new/cirq_program/startCirq_pragma99.py
|
e33f306588051a905793954fdd141d45e8a365b0
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,468 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=11
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=5
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=6
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=7
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=10
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma99.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
b89e6024ba7fcd2978bed43342381eaea6996fb3
|
5ebfced62f59052560c6adf89bfd2f249877cc75
|
/webcomics/series/urls.py
|
46b8c581e3ef21673277aa776913f4bad5bfbd5c
|
[] |
no_license
|
lumenwrites/webcomics
|
537c9bd0337ebd087dacdee7b72797b658481f8c
|
34200eaf19021147c561bf140a685e398156589e
|
refs/heads/master
| 2021-06-10T17:12:50.317113 | 2017-02-19T09:28:57 | 2017-02-19T09:28:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 882 |
py
|
from django.conf.urls import url
from . import views
from posts.views import SeriesFeed
urlpatterns = [
# url(r'^$', views.BrowseView.as_view(), name='post-list'),
url(r'^create-series/$', views.SeriesCreate.as_view(), name='series-create'),
url(r'^series/(?P<slug>[^\.]+)/edit$', views.SeriesEdit.as_view()),
url(r'^series/(?P<slug>[^\.]+)/delete$', views.series_delete),
# url(r'^browse/$', views.BrowseView.as_view(), name='post-list'),
url(r'^series/(?P<slug>[^\.]+)/subscribe', views.subscribe),
url(r'^series/(?P<slug>[^\.]+)/unsubscribe', views.unsubscribe),
url(r'^series/(?P<slug>[^\.]+)/feed/atom/$', SeriesFeed()),
url(r'^series/(?P<slug>[^\.]+)$', views.SeriesView.as_view(), name='series-detail'),
url(r'^orangemind$', views.SeriesView.as_view(), {'slug': 'orangemind'}, name='series-detail'),
]
|
[
"[email protected]"
] | |
daae7ab1b7ac6d998eca5a559c61ec45f2d7095e
|
25985aeeee54373d26a164e4cc6a014770e3ebf3
|
/windows/w3af/w3af/core/data/nltk_wrapper/.svn/text-base/nltk_wrapper.py.svn-base
|
3f652ee04b353c653d75d4761f71621362d73520
|
[] |
no_license
|
sui84/tools
|
4b750dae90940fbe3a226cba72dc071d8fb88b7c
|
651cc08eb50199ce1044c684dbf714ea26df6432
|
refs/heads/master
| 2021-01-22T19:22:26.964580 | 2017-08-20T15:23:38 | 2017-08-20T15:23:38 | 100,774,276 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,047 |
'''
nltk_wrapper.py
Copyright 2011 Andres Riancho
This file is part of w3af, w3af.sourceforge.net .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
from nltk.corpus.util import LazyCorpusLoader
from nltk.data import ZipFilePathPointer
from nltk.corpus.reader.wordnet import WordNetCorpusReader
import os
class wordnet_loader(LazyCorpusLoader):
def __init__(self, name, reader_cls, *args, **kwargs):
from nltk.corpus.reader.api import CorpusReader
assert issubclass(reader_cls, CorpusReader)
self.__name = self.__name__ = name
self.__reader_cls = reader_cls
self.__args = args
self.__kwargs = kwargs
def __load(self):
# Find the corpus root directory.
zip_location = os.path.join('plugins', 'discovery', 'wordnet','wordnet.zip')
root = ZipFilePathPointer(zip_location, 'wordnet/')
# Load the corpus.
corpus = self.__reader_cls(root, *self.__args, **self.__kwargs)
# This is where the magic happens! Transform ourselves into
# the corpus by modifying our own __dict__ and __class__ to
# match that of the corpus.
self.__dict__ = corpus.__dict__
self.__class__ = corpus.__class__
def __getattr__(self, attr):
self.__load()
# This looks circular, but its not, since __load() changes our
# __class__ to something new:
return getattr(self, attr)
wn = wordnet_loader('wordnet', WordNetCorpusReader)
|
[
"[email protected]"
] | ||
214374daa226d99e5073ab7b542cbb0a073ca027
|
fce6762c17fc81009af226f71ca32d2dc8227beb
|
/Section 4 Matrix multiplications.py
|
952e60e048ad9f035f59866a9b471ae7989ef640
|
[] |
no_license
|
kuangzijian/Linear-Algebra
|
3f9599ef282283dfc6bd49c0c97327a8fa31e671
|
94a872502ff570f04d61cb7bf1db653681f403c3
|
refs/heads/master
| 2022-11-04T07:05:55.272865 | 2019-07-31T05:32:42 | 2019-07-31T05:32:42 | 196,622,918 | 0 | 1 | null | 2022-10-29T19:00:25 | 2019-07-12T17:57:11 |
Python
|
UTF-8
|
Python
| false | false | 7,048 |
py
|
import numpy as np
import matplotlib.pyplot as plt
import math
from sympy import *
#Standard matrix multiplication, parts 1 & 2
## rules for multiplication validity
m = 4
n = 3
k = 6
# make some matrices
A = np.random.randn(m,n)
B = np.random.randn(n,k)
C = np.random.randn(m,k)
# test which multiplications are valid.
np.matmul(A,B)
#np.matmul(A,A)
np.matmul(np.matrix.transpose(A),C)
np.matmul(B,np.matrix.transpose(B))
np.matmul(np.matrix.transpose(B),B)
#np.matmul(B,C)
#np.matmul(C,B)
#np.matmul(np.matrix.transpose(C),B)
np.matmul(C,np.matrix.transpose(B))
#Code challenge: matrix multiplication by layering
A = np.abs(np.round(5*np.random.randn(4,2)))
B = np.abs(np.round(5*np.random.randn(2,3)))
print(A)
print(B)
r1 = 0
for i in range(0, len(B)):
r1 = r1 + np.outer(A[:,i], B[i])
print(A[:,i])
print(B[i])
print(r1)
print(np.matmul(A, B))
#Order-of-operations on matrices
n = 2
L = np.random.randn(n,n)
I = np.random.randn(n,n)
V = np.random.randn(n,n)
E = np.random.randn(n,n)
# result of "forward" multiplication and then transpose
res1 = np.matrix.transpose( L @ I @ V @ E )
# result of "flipped" multiplication of transposed matrices
res2 = np.matrix.transpose(E) @ np.matrix.transpose(V) @ np.matrix.transpose(I) @ np.matrix.transpose(L)
# test equality by subtracting (ignore possible computer rounding errors)
res1-res2
#Matrix-vector multiplication
# number of elements
m = 4
# create matrices
N = np.round( 10*np.random.randn(m,m) )
S = np.round( np.matrix.transpose(N)*N/m**2 ) # scaled symmetric
# and vector
w = np.array([-1, 0, 1, 2])
print(S)
print(w)
print(N)
print("with symmetric matrix")
# NOTE: The @ symbol for matrix multiplication is relatively new to Python, a@b is the same as numpy.dot or a.dot(b)
print(S@w) # 1
print(np.matrix.transpose(S@w)) # 2
print(w@S) # 3
print(np.matrix.transpose(w)@np.matrix.transpose(S)) # 4
print(np.matrix.transpose(w)@S) # 5
print("with nonsymmetric matrix")
print(N@w) # 1
print(np.matrix.transpose(N@w)) # 2
print(w@N) # 3
print(np.matrix.transpose(w)@np.matrix.transpose(N)) # 4
print(np.matrix.transpose(w)@N) # 5
#2D transformation matrices
# 2D input vector
v = np.array([ 3, -2 ])
# 2x2 transformation matrix
A = np.array([ [1,-1], [2,1] ])
# output vector is Av (convert v to column)
w = [email protected](v)
# plot them
plt.plot([0,v[0]],[0,v[1]],label='v')
plt.plot([0,w[0]],[0,w[1]],label='Av')
plt.grid()
plt.axis((-6, 6, -6, 6))
plt.legend()
plt.title('Rotation + stretching')
plt.show()
## pure rotation
# 2D input vector
v = np.array([ 3, -2 ])
# 2x2 rotation matrix
th = np.pi/30
A = np.array([ [math.cos(th),-math.sin(th)], [math.sin(th),math.cos(th)] ])
# output vector is Av (convert v to column)
w = [email protected](v)
# plot them
plt.plot([0,v[0]],[0,v[1]],label='v')
plt.plot([0,w[0]],[0,w[1]],label='Av')
plt.grid()
plt.axis((-4, 4, -4, 4))
plt.legend()
plt.title('Pure rotation')
plt.show()
#code challenge: Pure and impure rotation matrices
v = np.array([ 3, -2 ])
# 2x2 rotation matrix
ths = np.linspace(0, 2*np.pi,100)
vecmags = np.zeros([len(ths),2])
for i in range(0, len(ths)):
th = ths[i]
#inpure transformation matrix
A1 = np.array([ [2*math.cos(th),-math.sin(th)], [math.sin(th),math.cos(th)] ])
#pure transformation matrix
A2 = np.array([ [math.cos(th),-math.sin(th)], [math.sin(th),math.cos(th)] ])
# output vector is Av (convert v to column)
vecmags[i, 0] = np.linalg.norm(A1 @ v)
vecmags[i, 1] = np.linalg.norm(A2 @ v)
# plot them
plt.plot(ths,vecmags)
plt.grid()
plt.legend(["inpure transformation","pure transformation matrix"])
plt.title('Pure and impure rotation matrices')
plt.show()
#Additive and multiplicative matrix identities
# size of matrices
n = 4
A = np.round( 10*np.random.randn(n,n) )
I = np.eye(n,n)
Z = np.zeros((n,n))
# test both identities
np.array_equal( A@I , A )
np.array_equal( A , A@I )
np.array_equal( A , A+I )
np.array_equal( A , A+I )
np.array_equal( A+Z , A@I )
#Additive and multiplicative symmetric matrices
## the additive method
# specify sizes
m = 5
n = 5
# create matrices
A = np.random.randn(m,n)
S = ( A + np.matrix.transpose(A) )/2
# A symmetric matrix minus its transpose should be all zeros
print( S-np.matrix.transpose(S) )
## the multiplicative method
# specify sizes
m = 5
n = 3
# create matrices
A = np.random.randn(m,n)
AtA = np.matrix.transpose(A)@A
AAt = [email protected](A)
# first, show that they are square
print( AtA.shape )
print( AAt.shape )
# next, show that they are symmetric
print( AtA - np.matrix.transpose(AtA) )
print( AAt - np.matrix.transpose(AAt) )
#Element-wise (Hadamard) multiplication
# any matrix sizes
m = 13
n = 2
# ...but the two matrices must be the same size
A = np.random.randn(m,n)
B = np.random.randn(m,n)
# note the different syntax compared to @ for matrix multiplication
C = np.multiply( A,B )
print(C)
#code challenge: Symmetry of combined symmetric matrices
print("Create two symmetric matrices")
S = np.round( 2*np.random.randn(3,2) )
S1 = S.dot(np.transpose(S))
print(S1)
S = np.round( 2*np.random.randn(3,2) )
S2 = S.dot(np.transpose(S))
print(S2)
print("compute sum, multiplication, and Hadamard multiplication of the two matrices")
#determine whether the result is still symmetric
print(S1+S2)
print(S1.dot(S2))
print(S1*S2)
#Multiplication of two symmetric matrices
a,b,c,d,e,f,g,h,k,l,m,n,o,p,q,r,s,t,u = symbols('a b c d e f g h k l m n o p q r s t u', real=True)
# symmetric and constant-diagonal matrices
A = Matrix([ [a,b,c,d],
[b,a,e,f],
[c,e,a,h],
[d,f,h,a] ])
B = Matrix([ [l,m,n,o],
[m,l,q,r],
[n,q,l,t],
[o,r,t,l] ])
# confirmation that A and B are symmetric
print( A - A.transpose() )
print( B - B.transpose() )
# ... and constant diagonal
for i in range(0,np.size(A,0)):
print( A[i,i] )
for i in range(0,np.size(B,0)):
print( B[i,i] )
# but AB neq (AB)'
A@B - (A@B).T
# maybe for a submatrix?
n = 3
A1 = A[ 0:n,0:n ]
B1 = B[ 0:n,0:n ]
A1@B1 - (A1*B1).T
#Frobenius dot-product
# any matrix sizes
m = 9
n = 4
# but the two matrices must be the same size
A = np.random.randn(m,n)
B = np.random.randn(m,n)
# first vectorize, then vector-dot-product
Av = np.reshape( A,m*n, order='F' ) # order='F' reshapes by columns instead of by rows
Bv = np.reshape( B,m*n, order='F' )
frob_dp = np.dot( Av,Bv )
# trace method
frob_dp2 = np.trace( np.matrix.transpose(A)@B )
print(frob_dp2)
print(frob_dp)
# matrix norm
Anorm = np.linalg.norm(A,'fro')
Anorm2 = np.sqrt( np.trace( np.matrix.transpose(A)@A ) )
print(Anorm)
print(Anorm2)
#Code challenge: standard and Hadamard multiplication for diagonal matrices
#Create two matrices 4x4 full and diagonal
D1 = np.random.randn(4,4)
D2 = np.diag([4,5,6,7])
#multiply each matrix by itself (A*A): standard and hadmard multiplications
RS1 = D1.dot(D1)
RS2 = D2.dot(D2)
RH1 = D1*D1
RH2 = D2*D2
print(D1)
print(RS1)
print(RH1)
print(D2)
print(RS2)
print(RH2)
|
[
"[email protected]"
] | |
3482c862a6405f9d46af7e9c72673545f05201a1
|
eb8b5cde971573668800146b3632e43ed6e493d2
|
/python/oneflow/test/modules/test_instruction_replay.py
|
e9fbd188d1ecc88127be665d92a6ea691ab0065a
|
[
"Apache-2.0"
] |
permissive
|
big-data-ai/oneflow
|
16f167f7fb7fca2ce527d6e3383c577a90829e8a
|
b1c67df42fb9c5ab1335008441b0273272d7128d
|
refs/heads/master
| 2023-07-08T21:21:41.136387 | 2021-08-21T11:31:14 | 2021-08-21T11:31:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,832 |
py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
import oneflow
import oneflow as flow
import oneflow.unittest
def _test_instruction_replay_impl(test_case, device, shape):
x = flow.Tensor(np.random.rand(*shape), device=flow.device(device))
y = flow.Tensor(np.random.rand(*shape), device=flow.device(device))
oneflow._oneflow_internal.debug.start_recording_instructions()
z = x + y
oneflow._oneflow_internal.debug.end_recording_instructions()
test_case.assertTrue(np.allclose(z.numpy(), x.numpy() + y.numpy(), 0.0001, 0.0001))
z.zeros_()
oneflow._oneflow_internal.debug.replay_instructions()
test_case.assertTrue(np.allclose(z.numpy(), x.numpy() + y.numpy(), 0.0001, 0.0001))
oneflow._oneflow_internal.debug.clear_recorded_instructions()
@flow.unittest.skip_unless_1n1d()
class TestIntructionReplay(flow.unittest.TestCase):
def test_instruction_replay(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["shape"] = [[2, 3], [1, 10]]
for arg in GenArgList(arg_dict):
_test_instruction_replay_impl(test_case, *arg)
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
30b31dbb48ee318100dfe52ceb8b3bf19ac84ee9
|
9aab01a48d1af5c4f1889ae9d27940f8bc738d37
|
/Mindshare/project_management/cvr/tables.py
|
4143730b11b7ed81bf26920c54a9c284e43bd1ea
|
[] |
no_license
|
raveena17/workout-ex
|
274998170a3cfbf42bffe61d49fce8531eddc3f5
|
a9c652535f33d05199b3c5d26b72c721a822a2b7
|
refs/heads/master
| 2021-09-05T10:06:46.399468 | 2018-01-26T08:36:58 | 2018-01-26T08:36:58 | 119,025,925 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 521 |
py
|
# import django_tables2 as tables
# from .models import Cvr
# #class django_tables2.columns.LinkColumn(, urlconf=None, , kwargs=None, current_app=None, attrs=None, **extra)
# class CVRTable(tables.Table):
# id = tables.LinkColumn(viewname='edit_cvr', args=[tables.A('pk')])
# class Meta:
# model = Cvr
# exclude = ('comments', 'reason_for_visit', 'actions_taken_during_the_visit', 'next_plan_of_action',)
# # add class="paleblue" to <table> tag
# attrs = {'class': 'paleblue'}
|
[
"[email protected]"
] | |
6c79ae8cc7aed21c5f2b9410bcf90b219dedfe16
|
07af444dafa5bde373b0730e92d67e455d4ff4df
|
/SFData/StackOverflow/s36972087_ground_truth.py
|
79f82ae3f49c2bb32dc969c91d323ecc4f7a516f
|
[] |
no_license
|
tensfa/tensfa
|
9114595b58a2e989780af0c348afb89a2abb04b4
|
415dcfaec589b0b14c5b9864872c912f3851b383
|
refs/heads/main
| 2023-06-30T14:27:38.217089 | 2021-08-03T01:33:30 | 2021-08-03T01:33:30 | 368,465,614 | 2 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,008 |
py
|
import tensorflow as tf
import numpy as np
train_images = np.array(np.random.random((10, 19)), dtype=np.float32)
train_labels = np.random.randint(0, 2, 10, dtype=np.int32)
train_labels = np.eye(2)[train_labels]
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 19])
y_ = tf.placeholder(tf.float32, shape=[None, 2])
W = tf.Variable(tf.zeros([19,2]))
b = tf.Variable(tf.zeros([2]))
sess.run(tf.global_variables_initializer())
y = tf.nn.softmax(tf.matmul(x,W) + b)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
start = 0
batch_1 = 50
end = 100
for i in range(1):
# batch = mnist.train.next_batch(50)
x1 = train_images[start:end]
y1 = train_labels[start:end]
start = start + batch_1
end = end + batch_1
x1 = np.reshape(x1, (-1, 19))
y1 = np.reshape(y1, (-1, 2))
train_step.run(feed_dict={x: np.expand_dims(x1[0], 0), y_: np.expand_dims(y1[0], 0)})
|
[
"[email protected]"
] | |
62a13abd4c0147da29cd785233f04f06aca6a23a
|
2a8abd5d6acdc260aff3639bce35ca1e688869e9
|
/telestream_cloud_qc_sdk/test/test_container_essence_consistency_test.py
|
a53e951acde1e1e1d545fa4c1388c5f5ecb32225
|
[
"MIT"
] |
permissive
|
Telestream/telestream-cloud-python-sdk
|
57dd2f0422c83531e213f48d87bc0c71f58b5872
|
ce0ad503299661a0f622661359367173c06889fc
|
refs/heads/master
| 2021-01-18T02:17:44.258254 | 2020-04-09T11:36:07 | 2020-04-09T11:36:07 | 49,494,916 | 0 | 0 |
MIT
| 2018-01-22T10:07:49 | 2016-01-12T11:10:56 |
Python
|
UTF-8
|
Python
| false | false | 1,600 |
py
|
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import telestream_cloud_qc
from telestream_cloud_qc.models.container_essence_consistency_test import ContainerEssenceConsistencyTest # noqa: E501
from telestream_cloud_qc.rest import ApiException
class TestContainerEssenceConsistencyTest(unittest.TestCase):
"""ContainerEssenceConsistencyTest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ContainerEssenceConsistencyTest
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = telestream_cloud_qc.models.container_essence_consistency_test.ContainerEssenceConsistencyTest() # noqa: E501
if include_optional :
return ContainerEssenceConsistencyTest(
reject_on_error = True,
checked = True
)
else :
return ContainerEssenceConsistencyTest(
)
def testContainerEssenceConsistencyTest(self):
"""Test ContainerEssenceConsistencyTest"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
5c90209a2a85242d66565dc74c3d13c88a2f10b7
|
e7b7505c084e2c2608cbda472bc193d4a0153248
|
/DailyChallenge/LC_126.py
|
6a71e599fd82c9936054243d450e4e182fae01a5
|
[] |
no_license
|
Taoge123/OptimizedLeetcode
|
8e5c1cd07904dfce1248bc3e3f960d2f48057a5d
|
3e50f6a936b98ad75c47d7c1719e69163c648235
|
refs/heads/master
| 2023-02-27T21:13:40.450089 | 2023-02-07T04:11:09 | 2023-02-07T04:11:09 | 170,044,224 | 9 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,181 |
py
|
class Solution:
def findLadders(self, beginWord: str, endWord: str, wordList):
#we dont need visited since we will remove the newLayer.values() for the words we have processed
wordList = set(wordList)
res = []
lowercase = string.ascii_lowercase
#layer is similar to queue in 127
layer = collections.defaultdict(list)
layer[beginWord] = [[beginWord]]
while layer:
newLayer = collections.defaultdict(list)
for word in layer:
if word == endWord:
for i in layer[word]:
res.append(i)
else:
for i in range(len(word)):
for char in lowercase:
newWord = word[:i] + char + word[i+1:]
if newWord in wordList:
for valList in layer[word]:
# print(newWord, valList + [newWord])
newLayer[newWord].append(valList + [newWord])
wordList -= set(newLayer.keys())
layer = newLayer
return res
|
[
"[email protected]"
] | |
4e7eb91fe1d09211b9bd1a08ad237e37699b1484
|
ac549e553263801bdc6962a10ebbe784dc2631df
|
/Python/graphs/traversal.py
|
e3e6b65ebfcfc36492062561afd6ccc02a61bcd2
|
[] |
no_license
|
Bishal44/DataStructure
|
e595890d18bde39e65f02a7ca3a6904c6070c3c8
|
939c47de6dcfe3b2578aaa0610d3cdc5726572c7
|
refs/heads/master
| 2020-09-10T22:40:46.368607 | 2020-03-28T12:15:08 | 2020-03-28T12:15:08 | 221,854,694 | 0 | 0 | null | 2019-12-10T15:47:45 | 2019-11-15T05:59:40 |
Python
|
UTF-8
|
Python
| false | false | 1,863 |
py
|
'''
Created on Sat Jan 11 2020
'''
graph = {'A': set(['B', 'C', 'F']),
'B': set(['A', 'D', 'E']),
'C': set(['A', 'F']),
'D': set(['B']),
'E': set(['B', 'F']),
'F': set(['A', 'C', 'E'])}
# dfs and bfs are the ultimately same except that they are visiting nodes in
# different order. To simulate this ordering we would use stack for dfs and
# queue for bfs.
#
def dfs_traverse(graph, start):
visited, stack = set(), [start]
while stack:
node = stack.pop()
if node not in visited:
visited.add(node)
for nextNode in graph[node]:
if nextNode not in visited:
stack.append(nextNode)
return visited
# print(dfs_traverse(graph, 'A'))
def bfs_traverse(graph, start):
visited, queue = set(), [start]
while queue:
node = queue.pop(0)
if node not in visited:
visited.add(node)
for nextNode in graph[node]:
if nextNode not in visited:
queue.append(nextNode)
return visited
# print(bfs_traverse(graph, 'A'))
def dfs_traverse_recursive(graph, start, visited=None):
if visited is None:
visited = set()
visited.add(start)
for nextNode in graph[start]:
if nextNode not in visited:
dfs_traverse_recursive(graph, nextNode, visited)
return visited
# print(dfs_traverse_recursive(graph, 'A'))
# def find_path(graph, start, end, visited=[]):
# # basecase
# visitied = visited + [start]
# if start == end:
# return visited
# if start not in graph:
# return None
# for node in graph[start]:
# if node not in visited:
# new_visited = find_path(graph, node, end, visited)
# return new_visited
# return None
# print(find_path(graph, 'A', 'F'))
|
[
"[email protected]"
] | |
b08ad2fefef80365d87004cef4629d3c62aa60b3
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/layout/legend/_traceorder.py
|
d5fe177e6cf14ddf521d4e55b0eef9d2d0fa8d2e
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 |
MIT
| 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null |
UTF-8
|
Python
| false | false | 532 |
py
|
import _plotly_utils.basevalidators
class TraceorderValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="traceorder", parent_name="layout.legend", **kwargs):
super(TraceorderValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
extras=kwargs.pop("extras", ["normal"]),
flags=kwargs.pop("flags", ["reversed", "grouped"]),
**kwargs
)
|
[
"[email protected]"
] | |
fb2dc56539cdf51cd1d14fa04f375e98d0178ecc
|
ea16c6da19fce9a4dff085aaeff3ac12baa21d59
|
/tests/test_obvs.py
|
5febd213e3768347232d28f1e8c604c5c017648c
|
[] |
no_license
|
changhoonhahn/specmulator
|
a31b17aeab1ba1a29118e431fd7558dd8bbc7e5b
|
9453e7fcc30d74b732594bfb78f7e4f5d20bc95f
|
refs/heads/master
| 2021-09-10T18:57:21.361837 | 2018-03-31T05:52:33 | 2018-03-31T05:52:33 | 106,511,656 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,710 |
py
|
import numpy as np
import env
import util as UT
import obvs as Obvs
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
def Plk_halo_mneut_ratio(nzbin=4, zspace=False):
''' Plot the ratio of P_l^mneut(k)/P_l^0.0eV
for different neutrino masses
'''
mneuts = [0.0, 0.06, 0.10, 0.15, 0.6] # eV
p0ks_mneut, p2ks_mneut, p4ks_mneut = [], [], []
for mneut in mneuts:
p0ks, p2ks, p4ks = [], [], []
for ireal in range(1, 101):
# read all 100 realizations
plk_i = Obvs.Plk_halo(mneut, ireal, nzbin, zspace=zspace)
if ireal == 1: k = plk_i['k']
p0ks.append(plk_i['p0k'])
p2ks.append(plk_i['p2k'])
p4ks.append(plk_i['p4k'])
# plot the average
p0ks_mneut.append(np.average(np.array(p0ks), axis=0))
p2ks_mneut.append(np.average(np.array(p2ks), axis=0))
p4ks_mneut.append(np.average(np.array(p4ks), axis=0))
plks_mneut = [p0ks_mneut, p2ks_mneut, p4ks_mneut]
fig = plt.figure(figsize=(15, 5))
for i, ell in enumerate([0,2,4]):
sub = fig.add_subplot(1,3,i+1)
for ii in range(len(mneuts)):
sub.plot(k, plks_mneut[i][ii]/plks_mneut[i][0], lw=2, label=r'$\sum m_\nu = $ '+str(mneuts[ii])+'eV')
if i == 0:
sub.legend(loc='lower right', prop={'size': 12})
else:
sub.set_yticks([])
sub.set_xscale('log')
sub.set_xlim([0.01, 0.5])
sub.set_xlabel('k', fontsize=20)
sub.set_ylim([0.9, 1.15])
sub.set_ylabel('$P_{'+str(ell)+'}(k)/P_{'+str(ell)+'}^{0.0\mathrm{eV}}(k)$', fontsize=20)
if zspace: str_space = 'z'
else: str_space = 'r'
fig.savefig(''.join([UT.fig_dir(), 'tests/plk_halo.mneuts_ratio.nzbin', str(nzbin),
'.', str_space, 'space.png']), bbox_inches='tight')
return None
def Plk_halo_mneut(nzbin=4, zspace=False):
''' Plot P_l(k) for different neutrino masses
'''
mneuts = [0.0, 0.06, 0.10, 0.15, 0.6] # eV
p0ks_mneut, p2ks_mneut, p4ks_mneut = [], [], []
for mneut in mneuts:
p0ks, p2ks, p4ks = [], [], []
for ireal in range(1, 101):
# read all 100 realizations
plk_i = Obvs.Plk_halo(mneut, ireal, nzbin, zspace=zspace)
if ireal == 1: k = plk_i['k']
p0ks.append(plk_i['p0k'])
p2ks.append(plk_i['p2k'])
p4ks.append(plk_i['p4k'])
# plot the average
p0ks_mneut.append(np.average(np.array(p0ks), axis=0))
p2ks_mneut.append(np.average(np.array(p2ks), axis=0))
p4ks_mneut.append(np.average(np.array(p4ks), axis=0))
plks_mneut = [p0ks_mneut, p2ks_mneut, p4ks_mneut]
fig = plt.figure(figsize=(15, 5))
for i, ell in enumerate([0,2,4]):
sub = fig.add_subplot(1,3,i+1)
for mneut, plk in zip(mneuts, plks_mneut[i]):
sub.plot(k, plk, lw=2, label=r'$\sum m_\nu = $ '+str(mneut)+'eV')
if i == 0:
sub.legend(loc='lower right', prop={'size': 12})
else:
sub.set_yticks([])
sub.set_xscale('log')
sub.set_xlim([0.01, 0.15])
sub.set_xlabel('k', fontsize=20)
sub.set_ylim([1e3, 1e5])
sub.set_yscale('log')
sub.set_ylabel('$k P_{'+str(ell)+'}(k)$', fontsize=20)
if zspace: str_space = 'z'
else: str_space = 'r'
fig.savefig(''.join([UT.fig_dir(), 'tests/plk_halo.mneuts.nzbin', str(nzbin),
'.', str_space, 'space.png']), bbox_inches='tight')
return None
def Plk_halo(mneut=0.0, nzbin=4, zspace=False):
''' **TESTED --- Nov 7, 2017 **
Test the Plk_halo
'''
p0ks, p2ks, p4ks = [], [], []
for ireal in range(1, 101):
# read all 100 realizations
plk_i = Obvs.Plk_halo(mneut, ireal, nzbin, zspace=zspace)
if ireal == 1: k = plk_i['k']
p0ks.append(plk_i['p0k'])
p2ks.append(plk_i['p2k'])
p4ks.append(plk_i['p4k'])
fig = plt.figure()
sub = fig.add_subplot(111)
for p0k, p2k, p4k in zip(p0ks, p2ks, p4ks):
sub.plot(k, k * p0k, c='k', lw=0.1)
sub.plot(k, k * p2k, c='b', lw=0.1)
sub.plot(k, k * p4k, c='r', lw=0.1)
# plot the average
sub.plot(k, k * np.average(np.array(p0ks), axis=0), c='k', lw=2, ls='--', label='$\ell=0$')
sub.plot(k, k * np.average(np.array(p2ks), axis=0), c='b', lw=2, ls='--', label='$\ell=2$')
sub.plot(k, k * np.average(np.array(p4ks), axis=0), c='r', lw=2, ls='--', label='$\ell=4$')
sub.set_xlim([0.01, 0.15])
sub.set_xlabel('k', fontsize=20)
sub.set_ylim([-2000., 2500.])
sub.set_ylabel('$k P(k)$', fontsize=20)
sub.legend(loc='lower right', prop={'size': 15})
if zspace: str_space = 'z'
else: str_space = 'r'
fig.savefig(''.join([UT.fig_dir(), 'tests/plk_halo.', str(mneut), 'eV.nzbin', str(nzbin),
'.', str_space, 'space.png']), bbox_inches='tight')
return None
if __name__=="__main__":
Plk_halo_mneut_ratio(nzbin=4, zspace=False)
Plk_halo_mneut_ratio(nzbin=4, zspace=True)
#Plk_halo_mneut(nzbin=4, zspace=False)
#Plk_halo_mneut(nzbin=4, zspace=True)
#Plk_halo(mneut=0.6, zspace=False)
#Plk_halo(mneut=0.6, zspace=True)
|
[
"[email protected]"
] | |
d7f53e22fde0ca53ee451f3ff3b5e007a16c8a41
|
9c61ec2a55e897e4a3bb9145296081c648d812c4
|
/docs/cd/06443007程式碼/ch01/1-8.py
|
d27b4e821287c1d67dba80b1f5b27da4d527b6e6
|
[] |
no_license
|
wildboy2arthur/ML-Class
|
47899246251d12972a6d3875160c1cc8d1052202
|
345c86e3f8890919d59a63a79674acbdcd4577c4
|
refs/heads/main
| 2023-07-16T11:32:07.683652 | 2021-08-24T08:25:04 | 2021-08-24T08:25:04 | 399,388,026 | 0 | 0 | null | 2021-08-24T08:18:36 | 2021-08-24T08:18:35 | null |
UTF-8
|
Python
| false | false | 240 |
py
|
def cal_price_dict(k_cost):
rate = 0.03
nt_cost = k_cost * rate
inc = 0.2
nt_price = nt_cost * (1 + inc)
data = {
'k_cost': k_cost,
'nt_cost': nt_cost,
'nt_price': nt_price
}
return data
|
[
"[email protected]"
] | |
4da999cb489a900fa165b6cd924ab3776644bd18
|
9973dd9a35333f1b24e4c1e3cd2098391d17e193
|
/clones/migrations/0002_auto_20200216_2103.py
|
d210099b236272054745ccd1c53767889b1d5bc6
|
[] |
no_license
|
smilepogz/FinalTrelloClone
|
5140f804ceeb02e6969cb5693daa3cad7e296961
|
9affade23a0b911baa5fa11d9d2ce83e3db669e7
|
refs/heads/master
| 2021-01-04T11:20:08.893932 | 2020-02-17T13:44:11 | 2020-02-17T13:44:11 | 240,524,496 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 699 |
py
|
# Generated by Django 3.0.3 on 2020-02-16 13:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clones', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='boardlist',
name='title',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='card',
name='Attachment',
field=models.FileField(upload_to=''),
),
migrations.AlterField(
model_name='card',
name='description',
field=models.TextField(blank=True, max_length=10),
),
]
|
[
"[email protected]"
] | |
020942a036c94976bc69092a9f4d19b9c8c7ad90
|
8f455679fdb8e05c4c78141a8065250696d68d89
|
/MultiNetV1.py
|
f46219308f3cf2135c2153d96f56870b3514b6ff
|
[
"MIT"
] |
permissive
|
x5g/dogs_vs_cats
|
63a17ac914ded5850d6d4e745408d50e4d242f74
|
8a6b992fe9abc6b20b31729eaec79ca8d6ec12e0
|
refs/heads/master
| 2022-10-20T02:25:51.097115 | 2020-06-09T17:21:52 | 2020-06-09T17:21:52 | 271,065,547 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,326 |
py
|
import plaidml.keras
plaidml.keras.install_backend()
import os
os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
import keras
import matplotlib.pyplot as plt
import numpy as np
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
ROWS = 299
COLS = 299
CHANNELS = 3
batch_size = 32
epochs = 10
train_dir = './train2'
validation_dir = './validation'
test_dir = './test1'
Inp = keras.layers.Input((ROWS, COLS, CHANNELS))
InceptionV3_model = keras.applications.InceptionV3(weights='imagenet', include_top=False, input_shape=(ROWS, COLS, CHANNELS))
Xception_model = keras.applications.Xception(weights='imagenet', include_top=False, input_shape=(ROWS, COLS, CHANNELS))
InceptionV3_layers = InceptionV3_model(Inp)
InceptionV3_layers = keras.layers.GlobalAveragePooling2D()(InceptionV3_layers)
Xception_layers = Xception_model(Inp)
Xception_layers = keras.layers.GlobalAveragePooling2D()(Xception_layers)
x = keras.layers.Concatenate()([InceptionV3_layers, Xception_layers])
output = keras.layers.Dense(2, activation='softmax')(x)
model = keras.Model(inputs=Inp, outputs=output)
for layer in InceptionV3_model.layers:
layer.trainable = False
for layer in Xception_model.layers:
layer.trainable = False
keras.utils.plot_model(model, show_shapes=True, show_layer_names=True, to_file='MultiNetV1_model.pdf')
train_datagen = keras.preprocessing.image.ImageDataGenerator(
rotation_range = 40, # 随机旋转度数
width_shift_range = 0.2, # 随机水平平移
height_shift_range = 0.2,# 随机竖直平移
rescale = 1/255, # 数据归一化
shear_range = 20, # 随机错切变换
zoom_range = 0.2, # 随机放大
horizontal_flip = True, # 水平翻转
fill_mode = 'nearest', # 填充方式
)
test_datagen = keras.preprocessing.image.ImageDataGenerator(
rescale = 1/255, # 数据归一化
)
# 生成训练数据
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(ROWS,COLS),
batch_size=batch_size,
)
# 验证数据
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(ROWS,COLS),
batch_size=batch_size,
)
model.summary()
# 定义优化器,代价函数,训练过程中计算准确率
model.compile(optimizer=keras.optimizers.SGD(lr=1e-4, momentum=0.9), loss=keras.losses.binary_crossentropy, metrics=['accuracy'])
## Callback for loss logging per epoch
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.val_losses = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
lossHistory = LossHistory()
history = model.fit_generator(
generator = train_generator,
steps_per_epoch=len(train_generator),
epochs = epochs,
validation_data=validation_generator,
validation_steps=len(validation_generator),
callbacks = [lossHistory, early_stopping])
model.save('MultiNetV1.h5')
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
# acc = [
# 0.9014070402083021,
# 0.9552851634870563,
# 0.9575885033298283,
# 0.9616944569640881,
# 0.9623454008312052,
# 0.9634469981488059,
# 0.963747433781964,
# 0.9642982324370337,
# 0.9672024435431376,
# 0.9662009914375845]
# val_acc = [
# 0.9805572257894484,
# 0.9821607535505228,
# 0.98296251743106,
# 0.9831629585087192,
# 0.9825616355983163,
# 0.9841651633593906,
# 0.984365604222,
# 0.9845660452996593,
# 0.9851673683414814,
# 0.9851673681025372]
# loss = [
# 0.34548001789042687,
# 0.1829768680474425,
# 0.15205100328394244,
# 0.1336793582993715,
# 0.12181056393720338,
# 0.11529702214687088,
# 0.1095373861976298,
# 0.10428516739372867,
# 0.10034206073545955,
# 0.09901416560581902]
# val_loss = [
# 0.16728722282750116,
# 0.11115399416999794,
# 0.0901722999804482,
# 0.07770438194887197,
# 0.07115493825619816,
# 0.06525685261254752,
# 0.0611271229343917,
# 0.058128020974982354,
# 0.05485415271406638,
# 0.05218703313500113]
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages('MultiNetV1_result.pdf')
from matplotlib.ticker import MultipleLocator
# 绘制训练 & 验证的准确率值
fig = plt.figure()
ax = fig.add_subplot(111)
lns1 = ax.plot(acc, color='blue', linestyle='-', label='Train accuracy')
lns2 = ax.plot(val_acc, color='orange', linestyle='-', label='Validation accuracy')
ax2 = ax.twinx()
lns3 = ax2.plot(loss, color='red', linestyle='-', label='Train loss')
lns4 = ax2.plot(val_loss, color='green', linestyle='-', label='Validation loss')
lns = lns1 + lns2 + lns3 + lns4
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc='right')
# ax.legend(lns, labs, loc=0)
ax.grid()
ax.set_xlabel("Epoch")
ax.set_ylabel("Accuracy")
x_major_locator = MultipleLocator(1)
y_major_locator = MultipleLocator(0.01)
ax.xaxis.set_major_locator(x_major_locator)
ax.set_xlim(0, 9)
ax.set_ylim(0.90, 0.99)
ax.yaxis.set_major_locator(y_major_locator)
ax2.yaxis.set_major_locator(MultipleLocator(0.05))
ax2.set_ylabel("Loss")
ax2.set_ylim(0.05, 0.35)
# ax2.legend(loc=0)
plt.title('Training and validation accuracy and loss')
# plt.show()
# plt.savefig('MultiNetV1_result.png')
plt.tight_layout()
print('savefig...')
pdf.savefig()
plt.close()
pdf.close()
with open("MultiNetV1.txt", 'a+') as f:
f.write('acc\n')
for item in acc:
f.write("{}\n".format(item))
f.write('val_acc\n')
for item in val_acc:
f.write("{}\n".format(item))
f.write('loss\n')
for item in loss:
f.write("{}\n".format(item))
f.write('val_loss\n')
for item in val_loss:
f.write("{}\n".format(item))
def read_image(file_path):
from PIL import Image
img = Image.open(file_path)
if img.mode != 'RGB':
img = img.convert('RGB')
return img.resize((ROWS, COLS), Image.NEAREST)
def predict():
result = []
model = keras.models.load_model('MultiNetV1.h5')
test_images = [test_dir + '/' + str(i) + '.jpg' for i in range(1, 12501)]
count = len(test_images)
data = np.ndarray((count, ROWS, COLS, CHANNELS), dtype=np.float32)
for i, image_file in enumerate(test_images):
image = read_image(image_file)
data[i] = np.asarray(image) / 255.0
if i % 250 == 0: print('处理 {} of {}'.format(i, count))
test = data
predictions = model.predict(test, verbose=1)
print(predictions)
for i in range(len(predictions)):
dog_pre = predictions[i, 1]
if dog_pre <= 0.005:
result.append(0.005)
elif dog_pre >=0.995:
result.append(0.995)
else:
result.append(dog_pre)
# if predictions[i, 0] >= 0.5:
# result.append(0.005)
# else:
# result.append(0.995)
return result
result = predict()
print(result)
import pandas as pd
# 字典中的key值即为csv中列名
dataframe = pd.DataFrame({'id': [i for i in range(1, 12501)], 'label': result})
# 将DataFrame存储为csv,index表示是否显示行名,default=True
dataframe.to_csv("MultiNetV1_result.csv", index=False, sep=',')
|
[
"[email protected]"
] | |
8e3f054d598f85623ae2376aac935bda04e154d6
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/ironstubs/process_stubs.py
|
570dd3fd93b8d1d96302c69f2f0d497a9dd5adf3
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560 | 2022-05-23T18:12:06 | 2022-05-23T18:12:06 | 95,340,553 | 235 | 88 |
NOASSERTION
| 2023-07-05T06:36:28 | 2017-06-25T05:30:46 |
Python
|
UTF-8
|
Python
| false | false | 6,253 |
py
|
""" Stub Generator for IronPython
Extended script based on script developed by Gary Edwards at:
gitlab.com/reje/revit-python-stubs
This is uses a slightly modify version of generator3,
github.com/JetBrains/intellij-community/blob/master/python/helpers/generator3.py
Iterates through a list of targeted assemblies and generates stub directories
for the namespaces using pycharm's generator3.
Note:
Some files ended up too large for Jedi to handle and would cause
memory errors and crashes - 1mb+ in a single files was enough to
cause problems. To fix this, there is a separate module that creates
a compressed version of the stubs, but it also split large file
into separate files to deal with jedi.
These directories will show up in the stubs as (X_parts)
MIT LICENSE
https://github.com/gtalarico/ironpython-stubs
Gui Talarico
--------------------------------------------------------------------------
Large files, such as `System/__init__.py` or `Revit/DB/__init__.py`
can exceed memory limits and crash the system.
These files need to be optimized so Jedi won't misbehave and crash your system
when parsing these files to index autocomplete options.
The primary strategies are:
1. Remove unecessary characters (empty lines, extra spaces, etc)
2. Split Large file into parts to improve Jedi perfomance and avoid crashes
#1 is very straight forward. Use a few regexes.
#2 is more complex. Some of the stubs created by generator3 such as DB/__init__.py
had nearyly 2mb. Doesn't seem like much, but for a raw .py file, that's more than
120K lines. System.Windows.Forms had over 7mb.
The strategy here was simple. Take all the classes inside this monster files,
create separate files for each one, and import them back into the original file.
For an example, compare:
`\stubs\Autodesk\Revit\DB\__init__.py`
and
``\stubs.min\Autodesk\Revit\DB\__init__.py`
"""
import re
import os
import sys
import subprocess
from collections import defaultdict
import json
from pprint import pprint
#############################################################################
#TODO: Integrate with CLI
#TODO: FIX Vars
#TODO: FIX Character Replacement + Optimize
#############################################################################
##########
# CONFIG #
##########
join = os.path.join
project_dir = os.getcwd() # Must execute from project dir
SAVE_PATH = os.path.join(project_dir, 'release', 'stubs')
LIMIT_IN_KB = 200
FILESIZE_LIMITE = LIMIT_IN_KB * 1024
def file_is_too_damn_big(filepath):
return os.path.getsize(filepath) > FILESIZE_LIMITE
def read_source(filepath):
with open(filepath) as fp:
source = fp.read()
return source
def write_source(filepath, source):
folderpath = os.path.dirname(filepath)
if not os.path.exists(folderpath):
os.makedirs(folderpath)
with open(filepath, 'w') as fp:
source = fp.write(source)
print('File Written: {}'.format(filepath))
target_files = []
TESTING = False
# TESTING = True
print('Starting...')
print(SAVE_PATH)
for root, subfolders, files in os.walk(SAVE_PATH):
py_files = [f for f in files if f.endswith('.py')]
for filename in py_files:
filepath = join(root, filename)
filesize = os.path.getsize(filepath)
filedir = os.path.dirname(filepath)
new_filedir = filedir.replace('\stubs', '\stubs.min')
new_filepath = os.path.join(new_filedir, filename)
source = read_source(filepath)
print("Processing File detected: {}".format(filepath))
if TESTING:
if not filepath.endswith('DB\\__init__.py'):
continue
# SOME OF THESE WORK IN TESTS BUT ARE NOT WORKING ON BATCH REPLACEMENT
replacements = [
(r' {4}', ' '), # Convert 4 spaces into single
(r':\r\n( )+pass', r':pass'), # Put pass in one line
(r'"""\r\n( )+pass', r'"""'), # If has doc string, not need to keep pass
(r'pass\n', r'pass'), # Remove Extra Line after pass
(r' = ', '='),
(r', ', ','),
(r' # known case of __new__', ''), # Pycharm Note
(r' #cannot find CLR method', ''), # Pycharm Note
(r' # default', ''), # Pycharm Note
]
new_source = source
for old, new in replacements:
new_source = re.sub(old, new, new_source)
write_source(new_filepath, new_source)
print('='*30)
#####################################
# SEPARATE FILE INTO SEPARATE FILES #
#####################################
if file_is_too_damn_big(new_filepath):
print('='*30)
print('WARNING: file above breaking max: {}'.format(new_filepath))
module_name = os.path.basename(filepath).replace('.py', '_parts')
chunks_dir = join(new_filedir, module_name)
# Create Blank Init File
write_source(join(chunks_dir, '__init__.py'), '')
# Split File into Classes
chunks = re.split(r'(?:\n)class ', new_source)
header = chunks.pop(0)
clean_source = header
write_source(new_filepath, clean_source)
for chunk in chunks:
# Find Class Name and body
class_source = 'class ' + chunk
re_class_name = re.search('(class )(\w+)', class_source)
class_name = re_class_name.group(2)
if not os.path.exists(chunks_dir):
os.mkdir(chunks_dir)
# Write individual class files
with open(join(chunks_dir, class_name + '.py'), 'w') as fp:
fp.write(class_source)
# New class file import to __init__
with open(new_filepath, 'a') as fp:
fp.write('from {0}.{1} import {1}\n'.format(module_name, class_name))
|
[
"[email protected]"
] | |
9fabaf664d6dbaf4dd42fc7eb23fb3b411cfd395
|
845d8e6816e91474e673b6cda452254d40c65e5c
|
/django_mailbox/transports/mmdf.py
|
ad462849609331fa0f5cdc9bf69e107179dd2cb7
|
[] |
no_license
|
redtoad/django-mailbox
|
d0847f7f29f4e4459045e8d9d3d5d1406968175b
|
6da17053d495bee58ea78d4fb394d7618aeaab1a
|
refs/heads/master
| 2021-01-01T15:36:55.409316 | 2013-06-12T06:50:25 | 2013-06-12T06:50:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 152 |
py
|
from mailbox import MMDF
from django_mailbox.transports.generic import GenericFileMailbox
class MMDFTransport(GenericFileMailbox):
_variant = MMDF
|
[
"[email protected]"
] | |
6204778bccce5acd82eee6997003e783a16005fd
|
a939e018333a9ecd26ddc618f99835b7eb381686
|
/.svn/tmp/tempfile.2.tmp
|
509885ba67010786fd018501957f1787d480a5c8
|
[] |
no_license
|
cash2one/crawl_youtube
|
bff5ba254001c2f31f770e55a4aca39bc54e45ee
|
0dc40186a1d89da2b00f29d4f4edfdc5470eb4fc
|
refs/heads/master
| 2021-01-16T22:30:17.800282 | 2016-02-18T11:50:09 | 2016-02-18T11:50:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,117 |
tmp
|
#!/usr/bin/python
# coding=utf8
# Copyright 2015 LeTV Inc. All Rights Reserved.
# author: [email protected] (Qiang Gao)
import os
import signal
from le_crawler.common.logutil import Log
thrift_logger = Log('thrift.server.TServer', 'log/thrift_filter.error').log
from optparse import OptionParser
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TCompactProtocol
from thrift.server import TServer
from pybloom import ScalableBloomFilter
from le_crawler.proto.filter import UrlFilterService
class FilterHandler(object):
def __init__(self, logger):
self.logger_ = logger
self._load_from_file()
def url_seen(self, url):
if self.deduper_.add(url):
self.logger_.info('url duplicated: %s', url)
return True
return False
def _load_from_file(self):
self.logger_.info('loading data from cache file...')
if not os.path.isfile('data/bloom.data'):
self.logger_.error('bloom cache file not found, create one instead.')
self.deduper_ = ScalableBloomFilter(100000000, 0.0001, 4)
else:
with open('data/bloom.data', 'r') as f:
self.deduper_ = ScalableBloomFilter.fromfile(f)
def _dump_to_file(self):
self.logger_.info('dumping data...')
if not os.path.isdir('data'):
os.mkdir('data')
with open('data/bloom.data', 'w') as f:
self.deduper_.tofile(f)
self.logger_.info('dump data finished.')
def close(self):
self._dump_to_file()
class FilterServiceMain(object):
def __init__(self):
self.logger_ = Log('filter_log', 'log/filter.log').log
self.exit_ = False
def close(self, num, fram):
self.exit_ = True
try:
self.socket_.close()
self.handler_.close()
self.logger_.info('close transport')
except:
self.logger_.exception('failed to close transport.')
def run(self, host, port):
# this flag daemon set true is for stop service by outside signal
self.socket_ = TSocket.TServerSocket(host, port)
self.handler_ = FilterHandler(self.logger_)
self.service = TServer.TThreadedServer(UrlFilterService.Processor(self.handler_),
self.socket_,
TTransport.TBufferedTransportFactory(),
TCompactProtocol.TCompactProtocolFactory(),
daemon=True)
self.logger_.info('begin server on %s, %s' % (host, port))
print 'begin server on %s, %s' % (host, port)
self.service.serve()
scheduler = FilterServiceMain()
signal.signal(signal.SIGINT, scheduler.close)
signal.signal(signal.SIGTERM, scheduler.close)
if __name__ == '__main__':
option_parser = OptionParser()
option_parser.add_option('-H', '--host', type='string', dest='host',
default='10.150.140.84', help="service host")
option_parser.add_option('-p', '--port', type='int', dest='port',
default=8089, help="service port")
options, _ = option_parser.parse_args()
scheduler.run(options.host, options.port)
|
[
"[email protected]"
] | |
6951735b5119448cb7a86cf403b941f92733e4b0
|
f46966a5e49a6138182635a4850738a18eec01e5
|
/scripts/utils/bcbio_prep_cwl_genomes.py
|
d704120ef6fa0e7407cca8ec06c5c6a3272e0319
|
[
"MIT"
] |
permissive
|
jchenpku/bcbio-nextgen
|
44a9247a0e1314aaba66d1f9941540ddb2993bde
|
9ddbfcc6f2595298ae8aad3adfa6a568a2a4c62f
|
refs/heads/master
| 2020-08-01T03:06:30.695158 | 2019-10-07T00:21:32 | 2019-10-07T00:21:32 | 73,585,332 | 1 | 0 |
MIT
| 2019-10-07T00:21:33 | 2016-11-12T23:49:31 |
Python
|
UTF-8
|
Python
| false | false | 2,642 |
py
|
#!/usr/bin/env python
"""Clean and prepare a set of genomes for CWL usage and upload.
bcbio with CWL can read directly from a reference genome folder
without using Galaxy location files. This allows both local and
remote usage on object stores (Arvados, DNAnexus, SevenBridges, Synapse, S3).
This copies from an existing bcbio genome installation, cleaning
and packing directories to be ready for CWL usage and upload.
Usage:
bcbio_prep_cwl_genomes.py <genome_dir>
"""
import glob
import os
import shutil
import subprocess
import sys
import tarfile
from bcbio import utils
def main(base_dir):
for genome_dir in sorted(glob.glob(os.path.join(base_dir, "*", "*"))):
if os.path.isdir(genome_dir):
genome_name = os.path.basename(genome_dir)
genome_out_dir = utils.safe_makedir(os.path.join(os.path.join(os.getcwd(), "genomes", genome_name)))
copy_genome(genome_dir, genome_out_dir)
def copy_genome(orig_dir, out_dir):
print(orig_dir, out_dir)
to_copy = ["versions.csv", "bwa", "config", "coverage", "rnaseq", "rtg", "seq", "snpeff",
"ucsc", "validation", "variation", "viral"]
excludes = {"seq": ["*.fa.gz*", "*.old*", "perl"],
"rnaseq": ["ericscript", "tophat", "kallisto"],
"snpeff": ["transcripts"],
"variation": ["genesplicer", "dbNSFP*"]}
to_tar = ["bwa", "rtg", "snpeff"]
for copy in to_copy:
if os.path.isfile(os.path.join(orig_dir, copy)):
shutil.copy(os.path.join(orig_dir, copy), out_dir)
elif copy in to_tar and len(glob.glob(os.path.join(out_dir, "%s*-wf.tar.gz" % copy))) == 1:
print("already prepped: %s" % glob.glob(os.path.join(out_dir, "%s*-wf.tar.gz" % copy)))
else:
cmd = ["rsync", "-avz"]
for e in excludes.get(copy, []):
cmd += ["--exclude", e]
cmd += ["%s/%s/" % (orig_dir, copy), "%s/%s/" % (out_dir, copy)]
print " ".join(cmd)
subprocess.check_call(cmd)
if copy in to_tar:
with utils.chdir(out_dir):
out_file = copy
dir_files = os.listdir(copy)
if len(dir_files) == 1 and os.path.isdir(os.path.join(copy, dir_files[0])):
out_file += "--%s" % (dir_files[0])
out_file += "-wf.tar.gz"
print("tarball", out_file)
with tarfile.open(out_file, "w:gz") as tar:
tar.add(copy)
shutil.rmtree(copy)
if __name__ == "__main__":
main(*sys.argv[1:])
|
[
"[email protected]"
] | |
000ad2bfe0221337ebe78b33b4c1046aed21085d
|
46b432cd3557038c454601367b878f889c9b6a8f
|
/kiyuna/tutorial04/test_hmm.py
|
b2b0fc5a973faf6fbfb2ad7d8772238651f39b66
|
[] |
no_license
|
tmu-nlp/NLPtutorial2019
|
84ceec06568fd9d899a686658fb8851466133375
|
d77d199c50cd37d70e462209a7bfcd4dee9140a1
|
refs/heads/master
| 2020-05-14T13:34:05.336594 | 2019-09-25T02:25:41 | 2019-09-25T02:25:41 | 181,814,723 | 1 | 0 | null | 2019-08-01T18:53:54 | 2019-04-17T04:04:06 |
Python
|
UTF-8
|
Python
| false | false | 3,896 |
py
|
'''
隠れマルコフモデルによる品詞推定
'''
import os
import sys
import subprocess
from collections import defaultdict
from math import log2
os.chdir(os.path.dirname(os.path.abspath(__file__))) # cd .
def message(text):
print("\33[92m" + text + "\33[0m")
def load_model(model_file):
possible_tags = defaultdict(int)
emission = defaultdict(float)
transition = defaultdict(float)
with open(model_file) as f:
for line in f:
type, context, word, prob = line.split()
possible_tags[context] += 1
if type == 'T':
transition[f"{context} {word}"] = float(prob)
else:
emission[f"{context} {word}"] = float(prob)
return possible_tags, emission, transition
def test_hmm(model_path, test_path, output_path):
λ_1 = 0.90
λ_unk = 1 - λ_1
V = 1e6
possible_tags, emission, transition = load_model(model_path)
res = []
with open(test_path) as f:
for line in f:
words = line.split()
# 最小化DP(viterbi)
best_score = defaultdict(lambda: float('inf'))
best_edge = defaultdict(str)
best_score["0 <s>"] = 0
best_edge["0 <s>"] = None
for i, word in enumerate(words):
for prev in possible_tags:
for next in possible_tags:
if f"{i} {prev}" not in best_score:
continue
if f"{prev} {next}" not in transition:
continue
score = best_score[f"{i} {prev}"]
Pt = transition[f"{prev} {next}"]
score += -log2(Pt)
Pe = λ_1 * emission[f"{next} {word}"] + λ_unk / V
score += -log2(Pe)
if best_score[f"{i+1} {next}"] > score:
best_score[f"{i+1} {next}"] = score
best_edge[f"{i+1} {next}"] = f"{i} {prev}"
l = len(words)
for tag in possible_tags:
if f"{l} {tag}" not in best_score:
continue
if f"{tag} </s>" not in transition:
continue
Pt = transition[f"{tag} </s>"]
score = best_score[f"{l} {tag}"] + -log2(Pt)
if best_score[f"{l+1} </s>"] > score:
best_score[f"{l+1} </s>"] = score
best_edge[f"{l+1} </s>"] = f"{l} {tag}"
tags = []
next_edge = best_edge[f"{l+1} </s>"]
while next_edge != "0 <s>":
pos, tag = next_edge.split()
tags.append(tag)
next_edge = best_edge[next_edge]
tags.reverse()
res.append(" ".join(tags) + '\n')
with open(output_path, 'w') as f:
f.writelines(res)
if __name__ == '__main__':
is_test = sys.argv[1:] == ["test"]
if is_test:
message("[*] test")
model = './model_test.txt'
test = '../../test/05-test-input.txt'
res = './result_test.pos'
ans = '../../test/05-test-answer.txt'
else:
message("[*] wiki")
model = './model_wiki.txt'
test = '../../data/wiki-en-test.norm'
res = './result_wiki.pos'
ans = '../../data/wiki-en-test.pos'
test_hmm(model, test, res)
if is_test:
subprocess.run(f'diff -s {res} {ans}'.split())
else:
subprocess.run(f'perl ../../script/gradepos.pl {ans} {res}'.split())
message("[+] Done!")
'''
Accuracy: 90.82% (4144/4563)
Most common mistakes:
NNS --> NN 45
NN --> JJ 27
JJ --> DT 22
NNP --> NN 22
VBN --> NN 12
JJ --> NN 12
NN --> IN 11
NN --> DT 10
NNP --> JJ 8
VBP --> VB 7
'''
|
[
"[email protected]"
] | |
c697740729c72361e89fa3f8b66eec1705d07e84
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p4VQE/R3/benchmark/startPyquil348.py
|
c41069924278a31fe96eac76877e55e4208814cf
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,196 |
py
|
# qubit number=4
# total number=13
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=5
prog += SWAP(1,0) # number=6
prog += SWAP(1,0) # number=7
prog += CNOT(1,0) # number=10
prog += X(0) # number=11
prog += CNOT(1,0) # number=12
prog += X(0) # number=9
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil348.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
66c35ef831aaa59121f0b9b48d719fee7b050b34
|
078686dd88ff399cb3f9f773d237a7b18adf513a
|
/fund_crawl.py
|
2e11bb1c2315571f53e2f78a3e04f58a7555f55c
|
[] |
no_license
|
kh7160/lotto
|
b1995bb9488a02f9c0656779cb6bb118aa1d66b0
|
9c6b764bcc7244729d8ad39637de3d029f8f4b26
|
refs/heads/master
| 2023-02-28T00:12:27.295284 | 2021-02-01T10:49:20 | 2021-02-01T10:49:20 | 334,917,528 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 842 |
py
|
import requests
from bs4 import BeautifulSoup
import fund_parse
url = 'https://dhlottery.co.kr/common.do?method=main'
resp = requests.get(url)
soup = BeautifulSoup(resp.text, 'html.parser')
# 7 num crawling
group = soup.select('.group .num span')
group = group[0].text
num = []
num.append(int(soup.find_all('span', {'class' : 'num al720_color1'})[0].text))
num.append(int(soup.find_all('span', {'class' : 'num al720_color2'})[0].text))
num.append(int(soup.find_all('span', {'class' : 'num al720_color3'})[0].text))
num.append(int(soup.find_all('span', {'class' : 'num al720_color4'})[0].text))
num.append(int(soup.find_all('span', {'class' : 'num al720_color5'})[0].text))
num.append(int(soup.find_all('span', {'class' : 'num al720_color6'})[0].text))
# mysql update
fund_parse.fund_update_group(group)
fund_parse.fund_update_number(num)
|
[
"[email protected]"
] | |
ddff08d9864dfe1076ecf400d73e63b3b20a37df
|
1a663b69c47ac56c38aed5704fc403df82b48491
|
/teafacto/scripts/theanowrap.py
|
331e4da792e0cef33af941df2b7e907443d1db42
|
[
"MIT"
] |
permissive
|
lukovnikov/teafacto
|
9c0dda1dbb1abbcff795097a3522178ad5395852
|
5e863df8d061106ad705c0837f2d2ca4e08db0e4
|
refs/heads/master
| 2020-04-04T05:53:56.616520 | 2017-02-08T21:03:17 | 2017-02-08T21:03:17 | 46,288,607 | 2 | 5 | null | 2016-04-13T12:25:47 | 2015-11-16T16:52:23 |
Python
|
UTF-8
|
Python
| false | false | 710 |
py
|
from teafacto.core.base import tensorops as T, Val, param
import numpy as np
import sys
x = Val(np.random.random((10,10)))
#y = Val(np.random.random((10,10)))
y = param((10, 10), name="y").uniform()
w = param((10, 10), name="w").uniform()
#z = T.dot(x, y)
z = (x + y)
u = z * w
s = T.nnet.sigmoid
s2 = T.nnet.sigmoid
print s == s2
sys.exit()
print z.allparams
print T.dot
print z.ndim
print z.dimswap
zd = z.dimswap(1,0)
print z.dimswap(0, 1).allparams
print y.dimswap(0, 1).allparams
print T.nnet.conv.conv2d
print u.norm(2).allparams
print u.dimswap(0, 1).allparams
print T.nnet.softmax(z).allparams
zs = T.nnet.sigmoid(z)
zs = zs + x
zs.autobuild()
zs.autobuild()
us = T.nnet.sigmoid(u)
print us.allparams
|
[
"[email protected]"
] | |
59c6f29c6c88c672ad008ad803c796881d0de0c6
|
938a089e9b5e876a3b48932274171da7a4e7aa42
|
/bench/genesys2.py
|
2332f797a6bd9cebe7f8ad88338e320f41377567
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
rprinz08/liteeth
|
aa94e0eb790ba571ea59e98697d11300a57b3d03
|
dc10f82753efd236e1811a72c4be2c27cefd2c68
|
refs/heads/master
| 2023-07-18T17:17:06.441779 | 2021-09-10T08:06:47 | 2021-09-10T08:06:47 | 260,763,015 | 0 | 0 |
NOASSERTION
| 2020-05-02T19:47:32 | 2020-05-02T19:47:32 | null |
UTF-8
|
Python
| false | false | 2,680 |
py
|
#!/usr/bin/env python3
#
# This file is part of LiteEth.
#
# Copyright (c) 2020 Florent Kermarrec <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
import os
import argparse
from migen import *
from litex_boards.platforms import genesys2
from litex_boards.targets.genesys2 import _CRG
from litex.soc.cores.clock import *
from litex.soc.interconnect.csr import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from liteeth.phy.s7rgmii import LiteEthPHYRGMII
# Bench SoC ----------------------------------------------------------------------------------------
class BenchSoC(SoCCore):
def __init__(self, sys_clk_freq=int(50e6)):
platform = genesys2.Platform()
# SoCMini ----------------------------------------------------------------------------------
SoCMini.__init__(self, platform, clk_freq=sys_clk_freq,
ident = "LiteEth bench on Genesys2",
ident_version = True
)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# Etherbone --------------------------------------------------------------------------------
self.submodules.ethphy = LiteEthPHYRGMII(
clock_pads = self.platform.request("eth_clocks"),
pads = self.platform.request("eth"),
with_hw_init_reset = False)
self.add_etherbone(phy=self.ethphy, buffer_depth=255)
# SRAM -------------------------------------------------------------------------------------
self.add_ram("sram", 0x20000000, 0x1000)
# Leds -------------------------------------------------------------------------------------
from litex.soc.cores.led import LedChaser
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# Main ---------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteEth Bench on Genesys2")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--load", action="store_true", help="Load bitstream")
args = parser.parse_args()
soc = BenchSoC()
builder = Builder(soc, csr_csv="csr.csv")
builder.build(run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
6387f24c6cee7a4d44c898fadc2886bc1358fc85
|
cb3d1b072391b07ef0e9596df7f223f37683e970
|
/[0333]_Largest_BST_Subtree/Largest_BST_Subtree.py
|
20ac486fdae272035ca2cdb53f05e32e45ab550b
|
[] |
no_license
|
kotori233/LeetCode
|
99620255a64c898457901602de5db150bc35aabb
|
996f9fcd26326db9b8f49078d9454fffb908cafe
|
refs/heads/master
| 2021-09-10T18:00:56.968949 | 2018-03-30T14:38:27 | 2018-03-30T14:38:27 | 103,036,334 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 835 |
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def largestBSTSubtree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.res = 0
def dfs(root):
if root is None:
return (0, float('-inf'), float('inf'))
left = dfs(root.left)
right = dfs(root.right)
if root.val > left[1] and root.val < right[2]:
temp = left[0] + right[0] + 1
self.res = max(temp, self.res)
return (temp, max(root.val, right[1]), min(root.val, left[2]))
return (0, float('-inf'), float('inf'))
dfs(root)
return self.res
|
[
"[email protected]"
] | |
4fccc4958d08996a263601b37e9b8b1a85416c19
|
8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a
|
/stackoverflow_site__parsing/print__datetime_utc_and_reputation_change__with_missing_dates.py
|
00e9926585256ee0385b09771cb3e87d0bf1e62c
|
[
"CC-BY-4.0"
] |
permissive
|
stepik/SimplePyScripts
|
01092eb1b2c1c33756427abb2debbd0c0abf533f
|
3259d88cb58b650549080d6f63b15910ae7e4779
|
refs/heads/master
| 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,007 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import datetime as DT
def generate_range_dates(start_date, end_date) -> list:
date_1 = min(start_date, end_date)
date_2 = max(start_date, end_date)
# Сразу добавляем стартовую дату
items = [date_1]
while date_1 < date_2:
date_1 += DT.timedelta(days=1)
items.append(date_1)
return items
if __name__ == '__main__':
url = 'https://ru.stackoverflow.com/users/201445/gil9red?tab=reputation'
from print__datetime_utc_and_reputation_change import get_day_by_rep
day_by_rep = get_day_by_rep(url)
start_date, end_date = min(day_by_rep), max(day_by_rep)
print('Start: {}, end: {}'.format(start_date, end_date))
print()
# Сгенерируем диапазон дат
dates = generate_range_dates(start_date, end_date)
# Print
for day in reversed(dates):
print('{:%d/%m/%Y} : {}'.format(day, day_by_rep.get(day, 0)))
|
[
"[email protected]"
] | |
4bdba1ed302a07e95891189723cb8e02be46a173
|
8806a17d66d7abb8434c879215dc09cbfc3b5a25
|
/bin/log.py
|
02e6764c09facc7e70ec062e7792b50d468208ef
|
[] |
no_license
|
chenrun666/JW_purchase
|
f23d1719f447be669134c8fc02b1b8fd9d82cba8
|
9552920259f4014a08b38db88d0d48f0864822d3
|
refs/heads/master
| 2020-04-25T20:44:06.403805 | 2019-03-09T10:05:19 | 2019-03-09T10:05:19 | 173,057,863 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,963 |
py
|
# coding:utf-8
import logging
from logging.handlers import RotatingFileHandler # 按文件大小滚动备份
import colorlog # 控制台日志输入颜色
import time
import datetime
import os
cur_path = os.path.dirname(os.path.realpath(__file__)) # log_path是存放日志的路径
log_path = os.path.join(os.path.dirname(cur_path), 'logs')
if not os.path.exists(log_path): os.mkdir(log_path) # 如果不存在这个logs文件夹,就自动创建一个
logName = os.path.join(log_path, '%s.log' % time.strftime('%Y-%m-%d')) # 文件的命名
log_colors_config = {
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
class Log:
def __init__(self, logName=logName):
self.logName = logName
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
self.formatter = colorlog.ColoredFormatter(
'%(log_color)s[%(asctime)s] [%(filename)s:%(lineno)d] [%(module)s:%(funcName)s] [%(levelname)s]- %(message)s',
log_colors=log_colors_config) # 日志输出格式
self.handle_logs()
def get_file_sorted(self, file_path):
"""最后修改时间顺序升序排列 os.path.getmtime()->获取文件最后修改时间"""
dir_list = os.listdir(file_path)
if not dir_list:
return
else:
dir_list = sorted(dir_list, key=lambda x: os.path.getmtime(os.path.join(file_path, x)))
return dir_list
def TimeStampToTime(self, timestamp):
"""格式化时间"""
timeStruct = time.localtime(timestamp)
return str(time.strftime('%Y-%m-%d', timeStruct))
def handle_logs(self):
"""处理日志过期天数和文件数量"""
dir_list = ['report'] # 要删除文件的目录名
for dir in dir_list:
dirPath = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + '/' + dir # 拼接删除目录完整路径
file_list = self.get_file_sorted(dirPath) # 返回按修改时间排序的文件list
if file_list: # 目录下没有日志文件
for i in file_list:
file_path = os.path.join(dirPath, i) # 拼接文件的完整路径
t_list = self.TimeStampToTime(os.path.getctime(file_path)).split('-')
now_list = self.TimeStampToTime(time.time()).split('-')
t = datetime.datetime(int(t_list[0]), int(t_list[1]),
int(t_list[2])) # 将时间转换成datetime.datetime 类型
now = datetime.datetime(int(now_list[0]), int(now_list[1]), int(now_list[2]))
if (now - t).days > 7: # 创建时间大于6天的文件删除
self.delete_logs(file_path)
if len(file_list) > 10: # 限制目录下记录文件数量
file_list = file_list[0:-4]
for i in file_list:
file_path = os.path.join(dirPath, i)
print(file_path)
self.delete_logs(file_path)
def delete_logs(self, file_path):
try:
os.remove(file_path)
except PermissionError as e:
Log().warning('删除日志文件失败:{}'.format(e))
def __console(self, level, message):
# 创建一个FileHandler,用于写到本地
fh = RotatingFileHandler(filename=self.logName, mode='a', maxBytes=1024 * 1024 * 10, backupCount=10,
encoding='utf-8') # 使用RotatingFileHandler类,滚动备份日志
fh.suffix = "%Y%m%d.log"
fh.setLevel(logging.DEBUG)
fh.setFormatter(self.formatter)
self.logger.addHandler(fh)
# 创建一个StreamHandler,用于输出到控制台
ch = colorlog.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(self.formatter)
self.logger.addHandler(ch)
if level == 'info':
self.logger.info(message)
elif level == 'debug':
self.logger.debug(message)
elif level == 'warning':
self.logger.warning(message)
elif level == 'error':
self.logger.error(message)
# 这两行代码是为了避免日志输出重复问题
self.logger.removeHandler(ch)
self.logger.removeHandler(fh)
fh.close() # 关闭打开的文件
def debug(self, message):
self.__console('debug', message)
def info(self, message):
self.__console('info', message)
def warning(self, message):
self.__console('warning', message)
def error(self, message):
self.__console('error', message)
logger = Log()
if __name__ == "__main__":
log = Log()
log.debug("---测试开始----")
log.info("操作步骤")
log.warning("----测试结束----")
log.error("----测试错误----")
|
[
"[email protected]"
] | |
76f0db3ff3eb3950c75953ea5619bbcd4e1ee88c
|
113bfeda578324908963307670718c5545f30e8b
|
/booksite/booksite/book/migrations/0011_auto_20171205_1611.py
|
c4e55389b90cee0a19f1960233318db14ed070c8
|
[
"Apache-2.0"
] |
permissive
|
tkliuxing/bookspider
|
f0989814716e38fa081cc300f92fc975ff8ac67d
|
bc7ba487f0ab6ea7782f5093bb1d074eac662bdf
|
refs/heads/master
| 2021-01-18T23:31:26.566892 | 2020-03-14T04:04:48 | 2020-03-14T04:04:48 | 21,845,464 | 40 | 36 | null | 2015-11-06T03:58:04 | 2014-07-15T03:51:01 |
CSS
|
UTF-8
|
Python
| false | false | 426 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-12-05 08:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('book', '0010_auto_20171205_1436'),
]
operations = [
migrations.AlterUniqueTogether(
name='bookpage',
unique_together=set([('page_number', 'site')]),
),
]
|
[
"[email protected]"
] | |
e51ce24cc9abf704617483f76ca2cd74285aeb65
|
5b93930ce8280b3cbc7d6b955df0bfc5504ee99c
|
/nodes/VanderPlas17Python/E_Chapter4/N_ThreeDimensionalPlotting/A_ThreeDimensionalPoints/index.py
|
0b8df9722eaab651824247c3bb62acae5f54c7eb
|
[] |
no_license
|
nimra/module_gen
|
8749c8d29beb700cac57132232861eba4eb82331
|
2e0a4452548af4fefd4cb30ab9d08d7662122cf4
|
refs/heads/master
| 2022-03-04T09:35:12.443651 | 2019-10-26T04:40:49 | 2019-10-26T04:40:49 | 213,980,247 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,466 |
py
|
# Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.HierBlock import HierBlock as hbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.ListBlock import ListBlock as lbk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
blocks = [
# Figure 4-92. An empty three-dimensional axes
#
# With this 3D axes enabled, we can now plot a variety of three-dimensional plot types.
# Three-dimensional plotting is one of the functionalities that benefits immensely from
# viewing figures interactively rather than statically in the notebook; recall that to use
# interactive figures, you can use %matplotlib notebook rather than %matplotlib
# inline when running this code.
#
# Three-Dimensional Points and Lines
# The most basic three-dimensional plot is a line or scatter plot created from sets of (x,
# y, z) triples. In analogy with the more common two-dimensional plots discussed ear‐
# lier, we can create these using the ax.plot3D and ax.scatter3D functions. The call
# signature for these is nearly identical to that of their two-dimensional counterparts,
# so you can refer to “Simple Line Plots” on page 224 and “Simple Scatter Plots” on
# page 233 for more information on controlling the output. Here we’ll plot a trigono‐
# metric spiral, along with some points drawn randomly near the line (Figure 4-93):
# In[4]: ax = plt.axes(projection='3d')
#
# # Data for a three-dimensional line
# zline = np.linspace(0, 15, 1000)
# xline = np.sin(zline)
# yline = np.cos(zline)
# ax.plot3D(xline, yline, zline, 'gray')
#
# # Data for three-dimensional scattered points
# zdata = 15 * np.random.random(100)
# xdata = np.sin(zdata) + 0.1 * np.random.randn(100)
# ydata = np.cos(zdata) + 0.1 * np.random.randn(100)
# ax.scatter3D(xdata, ydata, zdata, c=zdata, cmap='Greens');
#
#
#
#
# Three-Dimensional Plotting in Matplotlib | 291
#
# Figure 4-93. Points and lines in three dimensions
#
# Notice that by default, the scatter points have their transparency adjusted to give a
# sense of depth on the page. While the three-dimensional effect is sometimes difficult
# to see within a static image, an interactive view can lead to some nice intuition about
# the layout of the points.
#
# Three-Dimensional Contour Plots
# Analogous to the contour plots we explored in “Density and Contour Plots” on page
# 241, mplot3d contains tools to create three-dimensional relief plots using the same
# inputs. Like two-dimensional ax.contour plots, ax.contour3D requires all the input
# data to be in the form of two-dimensional regular grids, with the Z data evaluated at
# each point. Here we’ll show a three-dimensional contour diagram of a three-
# dimensional sinusoidal function (Figure 4-94):
# In[5]: def f(x, y):
# return np.sin(np.sqrt(x ** 2 + y ** 2))
#
# x = np.linspace(-6, 6, 30)
# y = np.linspace(-6, 6, 30)
#
# X, Y = np.meshgrid(x, y)
# Z = f(X, Y)
# In[6]: fig = plt.figure()
# ax = plt.axes(projection='3d')
# ax.contour3D(X, Y, Z, 50, cmap='binary')
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z');
#
#
#
#
# 292 | Chapter 4: Visualization with Matplotlib
#
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Three-Dimensional Points and Lines",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
[self.add(a) for a in blocks]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class ThreeDimensionalPoints(HierNode):
def __init__(self):
super().__init__("Three-Dimensional Points and Lines")
self.add(Content())
# eof
|
[
"[email protected]"
] | |
8a501952490fa9d33985f24cf23aa7cb69298554
|
452be58b4c62e6522724740cac332ed0fe446bb8
|
/src/starboard/android/shared/gyp_configuration.gypi
|
12dd79875f4d6246ee3cd44f16732f163bbd4628
|
[
"Apache-2.0"
] |
permissive
|
blockspacer/cobalt-clone-cab7770533804d582eaa66c713a1582f361182d3
|
b6e802f4182adbf6a7451a5d48dc4e158b395107
|
0b72f93b07285f3af3c8452ae2ceaf5860ca7c72
|
refs/heads/master
| 2020-08-18T11:32:21.458963 | 2019-10-17T13:09:35 | 2019-10-17T13:09:35 | 215,783,613 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,374 |
gypi
|
# Copyright 2016 The Cobalt Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Platform specific configuration for Android on Starboard. Automatically
# included by gyp_cobalt in all .gyp files by Cobalt together with base.gypi.
#
{
'variables': {
'target_os': 'android',
'final_executable_type': 'shared_library',
'gtest_target_type': 'shared_library',
'sb_widevine_platform' : 'android',
'gl_type': 'system_gles2',
'enable_remote_debugging': 0,
'linker_flags': [
# The NDK default "ld" is actually the gold linker for all architectures
# except arm64 (aarch64) where it's the bfd linker. Don't use either of
# those, rather use lld everywhere. See release notes for NDK 19:
# https://developer.android.com/ndk/downloads/revision_history
'-fuse-ld=lld',
],
# Define platform specific compiler and linker flags.
# Refer to base.gypi for a list of all available variables.
'compiler_flags_host': [
'-O2',
],
'compiler_flags_debug': [
'-frtti',
'-O0',
],
'compiler_flags_devel': [
'-frtti',
'-O2',
],
'compiler_flags_qa': [
'-fno-rtti',
'-gline-tables-only',
],
'compiler_flags_qa_size': [
'-Os',
],
'compiler_flags_qa_speed': [
'-O2',
],
'compiler_flags_gold': [
'-fno-rtti',
'-gline-tables-only',
],
'compiler_flags_gold_size': [
'-Os',
],
'compiler_flags_gold_speed': [
'-O2',
],
'platform_libraries': [
'-lEGL',
'-lGLESv2',
'-lOpenSLES',
'-landroid',
'-llog',
'-lmediandk',
],
'conditions': [
['cobalt_fastbuild==0', {
'compiler_flags_debug': [
'-g',
],
'compiler_flags_devel': [
'-g',
],
'compiler_flags_qa': [
'-gline-tables-only',
],
'compiler_flags_gold': [
'-gline-tables-only',
],
}],
],
},
'target_defaults': {
'target_conditions': [
['sb_pedantic_warnings==1', {
'cflags': [
'-Wall',
'-Wextra',
'-Wunreachable-code',
# Don't get pedantic about warnings from base macros. These must be
# disabled after the -Wall above, so this has to be done here rather
# than in the platform's target toolchain.
# TODO: Rebase base and use static_assert instead of COMPILE_ASSERT
'-Wno-unused-local-typedef', # COMPILE_ASSERT
'-Wno-missing-field-initializers', # LAZY_INSTANCE_INITIALIZER
# It's OK not to use some input parameters. Note that the order
# matters: Wall implies Wunused-parameter and Wno-unused-parameter
# has no effect if specified before Wall.
'-Wno-unused-parameter',
],
}],
['_type=="executable"', {
# Android Lollipop+ requires relocatable executables.
'cflags': [
'-fPIE',
],
'ldflags': [
'-pie',
],
},{
# Android requires relocatable shared libraries.
'cflags': [
'-fPIC',
],
}],
['use_asan==1', {
'cflags': [
'-fsanitize=address',
'-fno-omit-frame-pointer',
],
'ldflags': [
'-fsanitize=address',
# Force linking of the helpers in sanitizer_options.cc
'-Wl,-u_sanitizer_options_link_helper',
],
'defines': [
'ADDRESS_SANITIZER',
],
}],
['use_tsan==1', {
'cflags': [
'-fsanitize=thread',
'-fno-omit-frame-pointer',
],
'ldflags': [
'-fsanitize=thread',
],
'defines': [
'THREAD_SANITIZER',
],
}],
],
}, # end of target_defaults
}
|
[
"[email protected]"
] | |
6914467b4e480fb1fed13898dda10452a6241fef
|
51b6d2fc53d5c632fcf01319842baebf13901e84
|
/atcoder.jp/arc032/arc032_1/Main.py
|
66bb6d66546bb0ff6cd9f30580c3f42ba9e3c722
|
[] |
no_license
|
mono-0812/procon
|
35db3b2c21eff74fbd7b52db07f249380f6834ef
|
68a4b53880a228a0164052b23d1326363efcbc20
|
refs/heads/master
| 2023-05-30T17:02:58.935074 | 2021-06-27T12:15:10 | 2021-06-27T12:15:10 | 345,896,553 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 188 |
py
|
n = int(input())
val = 0
for i in range(1,n+1):
val += i
for i in range(2,val//2):
if val%i == 0:
print("BOWWOW")
exit()
if val == 1:
print("BOWWOW")
exit()
print("WANWAN")
|
[
"[email protected]"
] | |
8e0ec5c953585aa962691f0bce2d260c8e78caa8
|
11c036911cf893325199d9e9a91a11cd1dca7c90
|
/all-paths-from-source-to-target/solution.py
|
1fd9a15570b8173bfb5bd501c9d9b6d36d73959b
|
[] |
no_license
|
arpiagar/HackerEarth
|
34f817f69e94d88657c1d8991a55aca302cdc890
|
4a94f1b11a353ab6b2837a1ac77bfbd7c91f91d2
|
refs/heads/master
| 2021-07-18T14:23:05.124943 | 2021-02-09T21:58:12 | 2021-02-09T21:58:12 | 19,204,412 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 640 |
py
|
#https://leetcode.com/problems/all-paths-from-source-to-target/submissions/
class Solution:
def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:
adj_map ={}
for i in range(len(graph)):
adj_map[i] = graph[i]
start = 0
out = []
self.findpath(start, len(graph)-1, [], adj_map, out)
return out
def findpath(self, current, end, temp, adj_map, out):
if current == end:
out.append(temp+[current])
temp.append(current)
for elem in adj_map[current]:
self.findpath(elem, end, [x for x in temp],adj_map, out)
|
[
"[email protected]"
] | |
43caf1de2da7fac86bcfdb234a60cee558ff0e0a
|
7d23056a789ded9ff2b9e14f9c57e59295cdfd6d
|
/samples/src/com/zoho/crm/api/initializer/init.py
|
e6722a40d14a4971a467de5fc2f5fd8877382104
|
[] |
no_license
|
L1nuxFNC/zohocrm-python-sdk
|
2e825fe4d7c6fb1374a5747cbd1e39b0dd4b706d
|
bba7328de07b137d2cb6e2aac31b8f57e0803026
|
refs/heads/master
| 2023-06-05T09:17:35.549980 | 2021-05-13T12:45:59 | 2021-05-13T12:45:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,893 |
py
|
from zcrmsdk.src.com.zoho.crm.api.user_signature import UserSignature
from zcrmsdk.src.com.zoho.crm.api.dc import INDataCenter, USDataCenter, EUDataCenter, CNDataCenter, AUDataCenter
from zcrmsdk.src.com.zoho.api.authenticator.store import DBStore, FileStore
from zcrmsdk.src.com.zoho.api.logger import Logger
from zcrmsdk.src.com.zoho.crm.api.initializer import Initializer
from zcrmsdk.src.com.zoho.api.authenticator.oauth_token import OAuthToken, TokenType
class SDKInitializer(object):
@staticmethod
def initialize():
"""
Create an instance of Logger Class that takes two parameters
1 -> Level of the log messages to be logged. Can be configured by typing Logger.Levels "." and choose any level from the list displayed.
2 -> Absolute file path, where messages need to be logged.
"""
logger = Logger.get_instance(level=Logger.Levels.INFO, file_path="/Users/user_name/Documents/python_sdk_log.log")
# Create an UserSignature instance that takes user Email as parameter
user = UserSignature(email="[email protected]")
"""
Configure the environment
which is of the pattern Domain.Environment
Available Domains: USDataCenter, EUDataCenter, INDataCenter, CNDataCenter, AUDataCenter
Available Environments: PRODUCTION(), DEVELOPER(), SANDBOX()
"""
environment = USDataCenter.PRODUCTION()
"""
Create a Token instance that takes the following parameters
1 -> OAuth client id.
2 -> OAuth client secret.
3 -> OAuth redirect URL.
4 -> REFRESH/GRANT token.
5 -> token type.
"""
token = OAuthToken(client_id="clientId", client_secret="clientSecret", redirect_url="redirectURL", token="REFRESH/ GRANT Token", token_type=TokenType.REFRESH / TokenType.GRANT)
"""
Create an instance of TokenStore
1 -> Absolute file path of the file to persist tokens
"""
store = FileStore(file_path='/Users/username/Documents/python_sdk_tokens.txt')
"""
Create an instance of TokenStore
1 -> DataBase host name. Default value "localhost"
2 -> DataBase name. Default value "zohooauth"
3 -> DataBase user name. Default value "root"
4 -> DataBase password. Default value ""
5 -> DataBase port number. Default value "3306"
"""
store = DBStore()
store = DBStore(host='host_name', database_name='database_name', user_name='user_name', password='password',
port_number='port_number')
"""
A Boolean value for the key (auto_refresh_fields) to allow or prevent auto-refreshing of the modules' fields in the background.
if True - all the modules' fields will be auto-refreshed in the background whenever there is any change.
if False - the fields will not be auto-refreshed in the background. The user can manually delete the file(s) or the specific module's fields using methods from ModuleFieldsHandler
"""
auto_refresh_fields = True
"""
The path containing the absolute directory path (in the key resource_path) to store user-specific files containing information about fields in modules.
"""
resource_path = '/Users/user_name/Documents/python-app'
"""
Call the static initialize method of Initializer class that takes the following arguments
1 -> UserSignature instance
2 -> Environment instance
3 -> Token instance
4 -> TokenStore instance
5 -> Logger instance
6 -> auto_refresh_fields
7 -> resource_path
"""
Initializer.initialize(user=user, environment=environment, token=token, store=store, logger=logger, auto_refresh_fields=auto_refresh_fields, resource_path=resource_path)
SDKInitializer.initialize()
|
[
"[email protected]"
] | |
dd6398e4756bc1d70633d09a2c01a4591bf45d5a
|
dc99d95671170444cd7bf02e37da6ecda4a5f19e
|
/apps/courses/forms.py
|
7c3607216ee4eed7c75516ebceebca0b96f618d5
|
[] |
no_license
|
bbright3493/python_real_war
|
734d49ed9f7e1800d24dc754424a07b69d7d8c1f
|
6e43bb7d814920222f3310bd6fd9f04cb3d5bbf1
|
refs/heads/master
| 2020-03-30T06:08:40.249185 | 2018-10-22T07:33:41 | 2018-10-22T07:33:41 | 150,841,381 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 358 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18-5-7 上午10:05
# @Author : Ztsong
from django import forms
from .models import ProgramUpload
#
# class ProgramUploadForm(forms.ModelForm):
# class Meta:
# model = ProgramUpload
# fields = ['image']
class ProgramUploadForm(forms.Form):
image = forms.ImageField()
|
[
"[email protected]"
] | |
ec626fcce05227e389111ecdb0c34538cbe6e418
|
0090756d7a6eb6ab8389ad23b20e89cd68dbd0e4
|
/배열insert.py
|
b895832beb6bb14ce872d0f5f7be1610194d477c
|
[] |
no_license
|
ssh6189/2019.12.16
|
5c3093e03ac793d5f0a93cf99e78c6483fcee6d8
|
c1021bb72b3fdc05d7f5e8ae350bbd6eee65b0d3
|
refs/heads/master
| 2020-12-13T19:19:04.558270 | 2020-01-17T08:47:04 | 2020-01-17T08:47:04 | 234,507,219 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 396 |
py
|
import numpy as np
a = np.arange(1, 10).reshape(3,3)
print(a)
#a 배열을 일차원 ㅐ열로 변환하고 1번 index에 99추가
np.insert(a, 1, 999)
#a배열의 axis 0방향 1번 인덱스에 추가
#인덱스가 1인 row에 999가 추가됨
np.insert(a, 1, 999, axis=0)
#a배열의 axis 1방향 1번 인덱스에 추가
#index가 1인 column에 999가 추가됨
np.insert(a, 1, 999, axis=1)
|
[
"[email protected]"
] | |
44685fe6f9efa4068a850e9767859e5f04694261
|
1564d12d61f669ce9f772f3ef7563167f7fe13bf
|
/codeforces/606/B.MakeThemOdd.py
|
9c2d937efa9b206dced25914e93f323bacc2266a
|
[] |
no_license
|
sakshamk6999/codingPractice
|
73ec4873defb0f0d2e47173150a589ee12e5e0a1
|
f727aac6d87448b19fc9d48660dc6978fe5edc14
|
refs/heads/master
| 2020-12-01T20:22:36.299535 | 2020-02-04T05:55:53 | 2020-02-04T05:55:53 | 230,757,937 | 0 | 0 | null | 2020-02-12T20:38:12 | 2019-12-29T14:00:22 |
Python
|
UTF-8
|
Python
| false | false | 849 |
py
|
from collections import defaultdict
import heapq
for _ in range(int(input())):
n = int(input())
rec = {}
rec = defaultdict(lambda : 0, rec)
a = sorted(list(map(int, input().split())))
e = []
l = 0
for i in a:
if i % 2 == 0 and rec[-1 * i] == 0:
e.append(-1 * i)
rec[-1 * i] = 1
l += 1
heapq.heapify(e)
ans = 0
while l > 0:
# print(e)
ans += 1
temp = heapq.heappop(e)
# print("temp", -1 * temp)
rec[temp] = 0
temp = (-1 * temp) // 2
if temp % 2 == 0:
if rec[-1 * temp] == 1:
# print("temp is in", - 1 * temp)
l -= 1
else:
rec[-1 * temp] = 1
heapq.heappush(e, -1 * temp)
else:
l -= 1
print(ans)
|
[
"[email protected]"
] | |
0d00be6ffa67dcb44dadf1e7fb59c96d3cefdc76
|
dabc9c7ec7cce125a12c6243ff67fd91e620d636
|
/tap/tests/test_pytest_plugin.py
|
c91e8b40631e9c79c21ada77df44a0db95c9ba65
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
Mark-E-Hamilton/tappy
|
7634209c2862c9e837b58602d4b59636fd9a8e89
|
62c1a4ef1d9e724d3c7bbb31361c17c3bf071d04
|
refs/heads/master
| 2021-01-15T09:04:09.813683 | 2016-03-21T04:51:45 | 2016-03-21T04:51:45 | 53,630,217 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,332 |
py
|
# Copyright (c) 2016, Matt Layman
try:
from unittest import mock
except ImportError:
import mock
import tempfile
from tap.plugins import _pytest
from tap.tests import TestCase
from tap.tracker import Tracker
class TestPytestPlugin(TestCase):
def setUp(self):
"""The pytest plugin uses module scope so a fresh tracker
must be installed each time."""
# When running this suite with pytest, save and restore the tracker.
self._tracker = _pytest.tracker
_pytest.tracker = Tracker()
def tearDown(self):
_pytest.tracker = self._tracker
def _make_config(self):
config = mock.Mock()
config.option.tap_stream = False
config.option.tap_files = False
config.option.tap_outdir = None
config.option.tap_combined = False
return config
def test_includes_options(self):
group = mock.Mock()
parser = mock.Mock()
parser.getgroup.return_value = group
_pytest.pytest_addoption(parser)
self.assertEqual(group.addoption.call_count, 4)
def test_tracker_stream_set(self):
config = self._make_config()
config.option.tap_stream = True
_pytest.pytest_configure(config)
self.assertTrue(_pytest.tracker.streaming)
def test_tracker_outdir_set(self):
outdir = tempfile.mkdtemp()
config = self._make_config()
config.option.tap_outdir = outdir
_pytest.pytest_configure(config)
self.assertEqual(_pytest.tracker.outdir, outdir)
def test_tracker_combined_set(self):
config = self._make_config()
config.option.tap_combined = True
_pytest.pytest_configure(config)
self.assertTrue(_pytest.tracker.combined)
def test_track_when_call_report(self):
"""Only the call reports are tracked."""
_pytest.tracker = mock.Mock()
report = mock.Mock(when='setup', outcome='passed')
_pytest.pytest_runtest_logreport(report)
self.assertFalse(_pytest.tracker.add_ok.called)
def test_tracks_ok(self):
_pytest.tracker = mock.Mock()
location = ('test_file.py', 1, 'TestFake.test_me')
report = mock.Mock(when='call', outcome='passed', location=location)
_pytest.pytest_runtest_logreport(report)
_pytest.tracker.add_ok.assert_called_once_with(
'TestFake', 'TestFake.test_me')
def test_tracks_not_ok(self):
_pytest.tracker = mock.Mock()
location = ('test_file.py', 1, 'TestFake.test_me')
report = mock.Mock(when='call', outcome='failed', location=location)
_pytest.pytest_runtest_logreport(report)
_pytest.tracker.add_not_ok.assert_called_once_with(
'TestFake', 'TestFake.test_me', diagnostics='')
def test_tracks_skip(self):
_pytest.tracker = mock.Mock()
location = ('test_file.py', 1, 'TestFake.test_me')
longrepr = ('', '', 'Skipped: a reason')
report = mock.Mock(
when='call', outcome='skipped', location=location,
longrepr=longrepr)
_pytest.pytest_runtest_logreport(report)
_pytest.tracker.add_skip.assert_called_once_with(
'TestFake', 'TestFake.test_me', 'a reason')
def test_generates_reports_for_stream(self):
config = self._make_config()
config.option.tap_stream = True
_pytest.tracker = mock.Mock()
_pytest.pytest_unconfigure(config)
_pytest.tracker.generate_tap_reports.assert_called_once_with()
def test_generates_reports_for_files(self):
config = self._make_config()
config.option.tap_files = True
_pytest.tracker = mock.Mock()
_pytest.pytest_unconfigure(config)
_pytest.tracker.generate_tap_reports.assert_called_once_with()
def test_generates_reports_for_combined(self):
config = self._make_config()
config.option.tap_combined = True
_pytest.tracker = mock.Mock()
_pytest.pytest_unconfigure(config)
_pytest.tracker.generate_tap_reports.assert_called_once_with()
def test_skips_reporting_with_no_output_option(self):
config = self._make_config()
_pytest.tracker = mock.Mock()
_pytest.pytest_unconfigure(config)
self.assertFalse(_pytest.tracker.generate_tap_reports.called)
|
[
"[email protected]"
] | |
e36cba2db79f18ed6432af22f03c4f53dd4f61b1
|
2dfbb97b47fd467f29ffb26faf9a9f6f117abeee
|
/leetcode/242.py
|
0b7a2589d14a456369352fe3820fb247d6675b0b
|
[] |
no_license
|
liuweilin17/algorithm
|
0e04b2d36dfb6b7b1b0e0425daf69b62273c54b5
|
d3e8669f932fc2e22711e8b7590d3365d020e189
|
refs/heads/master
| 2020-12-30T11:03:40.085105 | 2020-04-10T03:46:01 | 2020-04-10T03:46:01 | 98,844,919 | 3 | 1 | null | 2018-10-05T03:01:02 | 2017-07-31T03:35:14 |
C++
|
UTF-8
|
Python
| false | false | 1,014 |
py
|
###########################################
# Let's Have Some Fun
# File Name: 242.py
# Author: Weilin Liu
# Mail: [email protected]
# Created Time: Fri Oct 19 00:40:47 2018
###########################################
#coding=utf-8
#!/usr/bin/python
# valid anagram
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
dt = {}
l1 = len(s)
l2 = len(t)
if l1 != l2:
return False
for c in s:
if c in dt.keys():
dt[c] += 1
else:
dt[c] = 1
for c in t:
if c in dt.keys():
dt[c] -= 1
if dt[c] < 0:
return False
else:
return False
return True
if __name__ == '__main__':
so = Solution()
s = "anagram"
t = "nagaram"
print so.isAnagram(s, t)
s = "rat"
t = "car"
print so.isAnagram(s, t)
|
[
"[email protected]"
] | |
7eacb9ca621e2a660599a473bfdbc1136d01a7a6
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_1/bbbeebun/codejam_01.py
|
7a489d790a6fd40192e6c72e498da86daa2ff2b1
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 |
Python
|
UTF-8
|
Python
| false | false | 1,308 |
py
|
def has_completed(mapping):
count = 0
for key in mapping:
count += mapping[key]
if count == 10:
return True
else:
return False
def update_mapping(current_n, mapping):
current_n_str = str(current_n)
for each in current_n_str:
if mapping[each] == 0:
mapping[each] = 1
def counting_sheep(n):
if n == 0:
return 'INSOMNIA'
mapping = {
'0':0, '1':0, '2':0,
'3':0, '4':0, '5':0,
'6':0, '7':0, '8':0,
'9':0
}
current_n = n
update_mapping(current_n, mapping)
while not has_completed(mapping):
current_n += n
update_mapping(current_n, mapping)
return current_n
i = 1
dataset = [0,1,2,11,1692,213858,999995,292164,265199,1000000,10,663708,25,674735,762196,519439,205639,686594,851051,506636,72961,571071,380018,721364,271918,124,362718,40,779467,125000,9,4,104652,20,999998,34,133688,911210,71670,403183,3,999999,777164,999991,999996,954404,999997,200,771909,535557,621518,246569,816478,12500,854110,434198,610249,562071,679849,999992,5,427795,889527,739756,866179,8,513404,125,211763,408914,1250,225473,541210,687079,839403,6,557598,816751,584871,857249,999993,999994,467549,364901,988598,659695,402255,657006,637531,224284,441246,192103,166,565718,300682,596698,584551,410726,7,90188]
for each in dataset:
print 'Case #'+str(i) +': ' + str(counting_sheep(each))
i += 1
|
[
"[[email protected]]"
] | |
08d60b0fdf4f6abfda5e2ac10591021283fc44bf
|
8e1be167066e30eff91c26c0757211cf3cf8b016
|
/django/orm/book_authors_proj/apps/books_authors_app/migrations/0001_initial.py
|
5818682c02c3c18e31b135482e2c1adb636304db
|
[] |
no_license
|
dojo-solutions/online-ft-python
|
074d0ba968f5a77eaec1bca0904232f2aa29051a
|
b4f6941d0bba376d121a40a6429b815d5b03c32f
|
refs/heads/master
| 2020-04-21T11:52:31.390772 | 2019-03-02T01:27:54 | 2019-03-02T01:27:54 | 169,542,448 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,422 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2019-02-21 18:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=45)),
('last_name', models.CharField(max_length=45)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('desc', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='author',
name='books',
field=models.ManyToManyField(related_name='authors', to='books_authors_app.Book'),
),
]
|
[
"[email protected]"
] | |
2ba20372fe4994021d5cf6e43c9b163c1c106b64
|
05a9e0bb7e33099f94dfc8af53b4837bc5c9d287
|
/python/ext_examples/torch/bench/linear.py
|
1a840cbd0db9eba60313d59db7025e1b6a7852df
|
[] |
no_license
|
HiroIshida/snippets
|
999c09efadae80397cb82a424328bb1dbda4915f
|
f64dcd793184be64682b55bdaee7392fd97a0916
|
refs/heads/master
| 2023-09-01T08:18:42.523625 | 2023-09-01T04:08:20 | 2023-09-01T04:08:20 | 207,662,767 | 7 | 2 | null | 2022-08-01T23:20:42 | 2019-09-10T21:04:01 |
C++
|
UTF-8
|
Python
| false | false | 782 |
py
|
import torch.nn as nn
import tqdm
import numpy as np
import matplotlib.pyplot as plt
import time
import torch
import threadpoolctl
def measure_perf(depth, with_grad: bool = False):
lst = []
for _ in range(depth):
lst.append(nn.Linear(40, 40))
lst.append(nn.ReLU())
lst.append(nn.Linear(40, 1))
lst.append(nn.Sigmoid())
net = nn.Sequential(*lst)
arr = np.random.randn(1, 40)
ten = torch.from_numpy(arr).float()
ten.requires_grad_(with_grad)
ts = time.time()
n_trial = 100
for _ in range(n_trial):
val1 = net(ten)
if with_grad:
val1.backward()
perf = (time.time() - ts) / n_trial
return perf
perfs = [measure_perf(n, True) for n in tqdm.tqdm(range(50))]
plt.plot(perfs)
plt.show()
|
[
"[email protected]"
] | |
b5ed3013b2eafda68318a223d46dce0287cafaff
|
32fdc94d1b8d98085db5d1e8caae4161d3e70667
|
/3rd_party/python3.7/lib/python3.7/site-packages/mining-0.2.2-py3.7-linux-x86_64.egg/mining/utils/listc.py
|
298f9060e40d10832fcab747bdea37497e80d1e6
|
[
"Python-2.0"
] |
permissive
|
czfdlut/ticket_proxy
|
fa0f1924a86babfa7ce96cf97e929f7bf78643b7
|
0d7c19448741bc9030484a97c1b8f118098213ad
|
refs/heads/master
| 2022-12-23T05:25:58.207123 | 2019-11-20T03:58:31 | 2019-11-20T03:58:31 | 174,579,562 | 1 | 3 | null | 2022-12-18T01:18:07 | 2019-03-08T17:22:48 |
Python
|
UTF-8
|
Python
| false | false | 310 |
py
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, 'listc.cpython-37m-x86_64-linux-gnu.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
[
"[email protected]"
] | |
32fb9a2a330ac6fa993cae29751e0c894fb2e922
|
1af44bdcbc3c15d3f6e436a7924dfd45f504ab3a
|
/01.jump to python/chpter 2/62em.py
|
4db6566d344540379fdc05693d0ca4cb074461b8
|
[] |
no_license
|
wql7654/bigdata_exam
|
f57c8b475690cbc5978009dbf8008bedff602e2a
|
c07ee711bb84407428ba31165185b9607b6825e8
|
refs/heads/master
| 2023-04-07T00:50:59.563714 | 2021-05-25T02:46:43 | 2021-05-25T02:46:43 | 180,915,985 | 0 | 0 | null | 2023-03-25T01:08:09 | 2019-04-12T02:36:08 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 191 |
py
|
a=['life','is','too','hard']
re=" ".join(a)
print(re)
re=re.split()
print(re)
re=','.join(a)
print(re)
re=re.split(',')
print(re)
re.sort()
print(re)
re=" ".join(re)
print(re)
|
[
"[email protected]"
] | |
ede66e2d33e041a80cec2a8771ccc87fe440f7af
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/148/usersdata/268/99980/submittedfiles/testes.py
|
c1a570ae015ca648546489e96edebf5c24b3fe5c
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 142 |
py
|
# -*- coding: utf-8 -*-
n=int(input('ooo'))
i=0
while i*(i+1)*(i+2) < n:
i=i+1
if i*(i+1)*(i+2)==n:
print('S')
else :
print('N')
|
[
"[email protected]"
] | |
60c93a4684a8e005d11c1dc1ee26fb60e25dd162
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03393/s891507939.py
|
38993f4b41b75bc140544df5c2618f773831c0e9
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 463 |
py
|
s = input()
c = list(s)
c2 = set(c)
al = sorted(list("qwertyuiopasdfghjklzxcvbnm"))
#26文字未満なら追加
if len(c)<26:
for i in range(26):
if al[i] not in c2:
print(s+al[i])
exit()
if s == "zyxwvutsrqponmlkjihgfedcba":
print(-1)
exit()
rev = "zyxwvutsrqponmlkjihgfedcba"
for i in range(25,-1,-1):
x = sorted(c[i:])
for j in x:
if ord(s[i])<ord(j):
print(s[:i]+j)
exit()
|
[
"[email protected]"
] | |
ebbc9f436c2f66f730686c9789e0cb9cb7aa1ee8
|
5ac72c8484d8b7c2ecb94217e70ffa96c8c83053
|
/server/account/models.py
|
0661b22685cb7c013e9dce0dd4cb818a1fc07399
|
[
"MIT"
] |
permissive
|
buffalos0721/Super-Neutron-Drive
|
975b6a9d20f9dc28d85632f87f50dd37da199f1f
|
d3cbeeae113722099032fb651dd4148670cb86e9
|
refs/heads/master
| 2020-03-26T08:40:40.409045 | 2016-08-18T16:20:36 | 2016-08-18T16:20:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,280 |
py
|
import urllib
import datetime
from importlib import import_module
from collections import OrderedDict
from django.db import models
from django.conf import settings
from django.utils import timezone
from django.core import validators
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from ndrive.utils.lib import cached_method
from ndrive.utils.email import send_mail
import jwt
from paypal.standard.ipn.signals import subscription_signup
SESSION_ENGINE = import_module(settings.SESSION_ENGINE)
class User (AbstractBaseUser, PermissionsMixin):
verified_email = models.EmailField('verified email address', null=True, blank=True)
verified = models.BooleanField(default=False)
newsletter = models.BooleanField('Subscribe to Newsletter', default=False)
first_name = models.CharField('first name', max_length=30, blank=True)
last_name = models.CharField('last name', max_length=30, blank=True)
username = models.CharField('Username', max_length=30, unique=True,
help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.',
validators=[
validators.RegexValidator(r'^[\w.@+-]+$', 'Enter a valid username.', 'invalid')
])
email = models.EmailField('E-Mail', unique=True)
is_staff = models.BooleanField('staff status', default=False,
help_text='Designates whether the user can log into this admin site.')
is_active = models.BooleanField('active', default=True,
help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.')
date_joined = models.DateTimeField('date joined', default=timezone.now)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
objects = UserManager()
def __unicode__ (self):
return self.username
def get_short_name (self):
return self.username
@staticmethod
def autocomplete_search_fields():
return ("id__iexact", "username__icontains", "email__icontains", "first_name__icontains", "last_name__icontains")
def chrome_token (self, session):
return jwt.encode({
'session': session.session_key,
'exp': datetime.datetime(2030, 1, 1)
}, settings.SECRET_KEY)
@staticmethod
def get_session (token):
payload = jwt.decode(token, settings.SECRET_KEY, verify_expiration=False)
return SESSION_ENGINE.SessionStore(payload['session'])
def send_verify (self, request):
if self.email != self.verified_email:
EmailVerify.new_verify(self, request)
def send_pwreset (self, request):
EmailVerify.new_verify(self, request, True)
@cached_method
def subscription (self):
try:
return self.subscription_set.filter(expires__gte=timezone.now())[0]
except:
return None
class EmailVerify (models.Model):
user = models.ForeignKey(User)
email = models.EmailField()
used = models.BooleanField(default=False)
reset = models.BooleanField(default=False)
created = models.DateTimeField(default=timezone.now)
class Meta:
verbose_name = 'E-Mail Verify'
verbose_name_plural = 'E-Mail Verifies'
def __unicode__ (self):
return self.email
def qs (self):
return '?token={}&email={}'.format(self.token(), urllib.quote(self.email))
@cached_method
def token (self):
return jwt.encode({'id': self.id, 'created': unicode(self.created)}, settings.SECRET_KEY)
@staticmethod
def new_verify (user, request, reset=False):
verify = EmailVerify(user=user, email=user.email, reset=reset)
verify.save()
context = {'verify': verify, 'request': request}
if reset:
tpl = 'account/email.password-reset'
send_mail('Password Reset - {site_name}', [verify.email], tpl, context)
else:
tpl = 'account/email.verify'
send_mail('Please Verify Your E-Mail - {site_name}', [verify.email], tpl, context)
return verify
@staticmethod
def verify_token (token, email, age=10, reset=False):
payload = jwt.decode(token, settings.SECRET_KEY)
old = timezone.now() - datetime.timedelta(days=age)
verify = EmailVerify.objects.get(
id=payload['id'],
email=email,
created__gte=old,
used=False,
reset=reset,
)
if not reset:
verify.used = True
verify.save()
return verify
SUBS_TYPES = [
('initiate', 'Initiate'),
('padawan', 'Padawan'),
('knight', 'Knight'),
('master', 'Master'),
('grand-master', 'Grand Master'),
]
SUBSCRIPTIONS = OrderedDict([
('initiate', {
'cost': 2500,
'name': 'Initiate'
}),
('padawan', {
'cost': 5000,
'name': 'Padawan'
}),
('knight', {
'cost': 9900,
'name': 'Knight'
}),
('master', {
'cost': 30000,
'name': 'Master'
}),
('grand-master', {
'cost': 50000,
'name': 'Grand Master'
}),
])
if settings.DEBUG:
SUBSCRIPTIONS['special'] = {'cost': 200, 'name': 'Special'}
SUBS_TYPES.append(('special', 'Special'))
class Subscription (models.Model):
user = models.ForeignKey(User)
name = models.CharField('Display Name for Credits', max_length=30)
stype = models.CharField('Subscription Type', max_length=20, choices=SUBS_TYPES)
stripe_id = models.CharField(max_length=255, blank=True, null=True)
stripe_subs = models.CharField(max_length=255, blank=True, null=True)
paypal_id = models.CharField(max_length=255, blank=True, null=True)
paypal_subs = models.CharField(max_length=255, blank=True, null=True)
expires = models.DateTimeField()
cancelled = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-expires',)
def __unicode__ (self):
return self.user.username
def payment_type (self):
if self.stripe_id:
return 'Stripe'
return 'PayPal'
def paypal_subs_created (sender, **kwargs):
user = User.objects.get(id=sender.custom)
subs = Subscription(
user = user,
name = user.username,
stype = sender.item_number,
expires = timezone.now() + datetime.timedelta(days=365),
paypal_id = sender.payer_email,
paypal_subs = sender.subscr_id,
)
subs.save()
subscription_signup.connect(paypal_subs_created)
|
[
"[email protected]"
] | |
d8e1b1e542edb43a01bb810371c9af69a80d601c
|
1e4d2a66f92b8ef3baddaf76366c1be4ad853328
|
/Safari_Edris_DSC510/SandBox/ImportFiles/venv/Scripts/pip3-script.py
|
d8f7812de43586a23f66d5e0a7f99db0e1b9abc4
|
[] |
no_license
|
dlingerfelt/DSC-510-Fall2019
|
0c4168cf030af48619cfd5e044f425f1f9d376dd
|
328a5a0c8876f4bafb975345b569567653fb3694
|
refs/heads/master
| 2022-12-04T05:04:02.663126 | 2022-11-28T14:58:34 | 2022-11-28T14:58:34 | 204,721,695 | 5 | 23 | null | 2019-12-06T01:15:11 | 2019-08-27T14:30:27 |
Python
|
UTF-8
|
Python
| false | false | 463 |
py
|
#!C:\Users\safar\Documents\GitHub\DSC-510-Fall2019\Safari_Edris_DSC510\SandBox\ImportFiles\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"[email protected]"
] | |
2bb14a82bf0195f215a36c5e10aef5136ef02006
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/era5_scripts/02_preprocessing/combine82/35-tideGauge.py
|
c363a79bc5b9cbdedc37466360109e92883f0129
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,115 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 16 16:11:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
dir_in = '/lustre/fs0/home/mtadesse/eraFiveConcat'
dir_out = '/lustre/fs0/home/mtadesse/ereaFiveCombine'
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
#cd to where the actual file is
os.chdir(dir_in)
x = 35
y = 36
for t in range(x, y):
tg_name = tg_list_name[t]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis = 1, inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
|
[
"[email protected]"
] | |
ea48d2765c2ca0ae7d26e05b899fc93cb13349ec
|
e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d
|
/a10sdk/core/system/system_bfd_stats.py
|
2b13f0d88b617ea5ea2c93a905d0181004463e88
|
[
"Apache-2.0"
] |
permissive
|
amwelch/a10sdk-python
|
4179565afdc76cdec3601c2715a79479b3225aef
|
3e6d88c65bd1a2bf63917d14be58d782e06814e6
|
refs/heads/master
| 2021-01-20T23:17:07.270210 | 2015-08-13T17:53:23 | 2015-08-13T17:53:23 | 40,673,499 | 0 | 0 | null | 2015-08-13T17:51:35 | 2015-08-13T17:51:34 | null |
UTF-8
|
Python
| false | false | 4,388 |
py
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Stats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param udp_checksum_error: {"optional": true, "size": "2", "type": "number", "oid": "2", "format": "counter"}
:param invalid_detect_mult: {"optional": true, "size": "2", "type": "number", "oid": "8", "format": "counter"}
:param auth_length_invalid: {"optional": true, "size": "2", "type": "number", "oid": "12", "format": "counter"}
:param auth_key_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "16", "format": "counter"}
:param invalid_my_disc: {"optional": true, "size": "2", "type": "number", "oid": "10", "format": "counter"}
:param multihop_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "4", "format": "counter"}
:param dest_unreachable: {"optional": true, "size": "2", "type": "number", "oid": "20", "format": "counter"}
:param length_too_small: {"optional": true, "size": "2", "type": "number", "oid": "6", "format": "counter"}
:param auth_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "13", "format": "counter"}
:param auth_failed: {"optional": true, "size": "2", "type": "number", "oid": "18", "format": "counter"}
:param auth_type_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "14", "format": "counter"}
:param invalid_ttl: {"optional": true, "size": "2", "type": "number", "oid": "11", "format": "counter"}
:param data_is_short: {"optional": true, "size": "2", "type": "number", "oid": "7", "format": "counter"}
:param session_not_found: {"optional": true, "size": "2", "type": "number", "oid": "3", "format": "counter"}
:param auth_seqnum_invalid: {"optional": true, "size": "2", "type": "number", "oid": "17", "format": "counter"}
:param local_state_admin_down: {"optional": true, "size": "2", "type": "number", "oid": "19", "format": "counter"}
:param ip_checksum_error: {"optional": true, "size": "2", "type": "number", "oid": "1", "format": "counter"}
:param version_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "5", "format": "counter"}
:param auth_key_id_mismatch: {"optional": true, "size": "2", "type": "number", "oid": "15", "format": "counter"}
:param other_error: {"optional": true, "size": "2", "type": "number", "oid": "21", "format": "counter"}
:param invalid_multipoint: {"optional": true, "size": "2", "type": "number", "oid": "9", "format": "counter"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "stats"
self.DeviceProxy = ""
self.udp_checksum_error = ""
self.invalid_detect_mult = ""
self.auth_length_invalid = ""
self.auth_key_mismatch = ""
self.invalid_my_disc = ""
self.multihop_mismatch = ""
self.dest_unreachable = ""
self.length_too_small = ""
self.auth_mismatch = ""
self.auth_failed = ""
self.auth_type_mismatch = ""
self.invalid_ttl = ""
self.data_is_short = ""
self.session_not_found = ""
self.auth_seqnum_invalid = ""
self.local_state_admin_down = ""
self.ip_checksum_error = ""
self.version_mismatch = ""
self.auth_key_id_mismatch = ""
self.other_error = ""
self.invalid_multipoint = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Bfd(A10BaseClass):
"""Class Description::
Statistics for the object bfd.
Class bfd supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/system/bfd/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "bfd"
self.a10_url="/axapi/v3/system/bfd/stats"
self.DeviceProxy = ""
self.stats = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
[
"[email protected]"
] | |
c1bb69c3c89f7e74c5290bc657be0da088c70345
|
13696a9691b173d75b11b4aee22b79d4ea6b7c0b
|
/test/test_o_auth_api.py
|
760055ebca9f7d8f8ae0f95734aad1999bf0caef
|
[
"Apache-2.0"
] |
permissive
|
square/connect-python-sdk
|
410613bc4b04f0f70176275591a16c9e49e25ede
|
e00e2889b2dd2c55048219cbe64db79962a68633
|
refs/heads/master
| 2023-06-15T09:24:17.190416 | 2019-08-15T17:44:41 | 2019-08-15T17:44:41 | 64,772,029 | 53 | 45 |
Apache-2.0
| 2020-12-20T18:41:31 | 2016-08-02T16:07:17 |
Python
|
UTF-8
|
Python
| false | false | 1,346 |
py
|
# coding: utf-8
"""
Copyright 2017 Square, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import squareconnect
from squareconnect.rest import ApiException
from squareconnect.apis.o_auth_api import OAuthApi
class TestOAuthApi(unittest.TestCase):
""" OAuthApi unit test stubs """
def setUp(self):
self.api = squareconnect.apis.o_auth_api.OAuthApi()
def tearDown(self):
pass
def test_obtain_token(self):
print("Start test case for obtain_token")
pass
def test_renew_token(self):
print("Start test case for renew_token")
pass
def test_revoke_token(self):
print("Start test case for revoke_token")
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
0ad4839cc902ab89f8ee4c25b4c3fbf598f4798a
|
8881a4927d893e1e755c0488f76ba7941b379f26
|
/tech_gram_project2/producthunt_project/producthunt_project/urls.py
|
01b11efd49f26851698655f127f6afdfa499ab26
|
[] |
no_license
|
SatishNitk/Django
|
6bb839fcf2bc7d70413e3d56ac98124a7a96a5de
|
d9260c032322a34410d783c39a8f13e8f63b8be4
|
refs/heads/master
| 2020-05-24T23:01:35.767388 | 2019-07-06T13:56:50 | 2019-07-06T13:56:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 418 |
py
|
from django.contrib import admin
from django.urls import path,include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('product/', include("products.urls")),
path('account/', include("accounts.urls"))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) # this is for to open the image fro link inside admin
|
[
"[email protected]"
] | |
faf7637b93bf57c9d86f6f84ec0dc2f5c276cca2
|
994ea22f35c635fdf139af9282b0d3a3d86ea34a
|
/ud617-intro_to_hadoop_mapreduce/lesson6/part1/reducer_q3.py
|
d3be0e7c6127a7fdf196f92e9b3177b5ef9970aa
|
[] |
no_license
|
zjyx147/Udacity
|
ac371fbc5b5b456e88b411657ef5a28c3b071c6c
|
d86fadd537dbacc6f8142b043e71527b0448bae3
|
refs/heads/master
| 2022-06-23T14:25:41.242353 | 2019-06-20T20:12:13 | 2019-06-20T20:12:13 | 191,207,247 | 0 | 0 | null | 2022-06-21T22:07:35 | 2019-06-10T16:42:18 |
DIGITAL Command Language
|
UTF-8
|
Python
| false | false | 593 |
py
|
#!/usr/bin/python
import sys
totalNum = 0
totalVal = 0
oldKey = None
# Loop around the data
# It will be in the format key\tval
# Where key is the store name, val is the sale amount
#
# All the sales for a particular store will be presented,
# then the key will change and we'll be dealing with the next store
for line in sys.stdin:
data_mapped = line.strip().split("\t")
if len(data_mapped) != 2:
# Something has gone wrong. Skip this line.
continue
thisKey, thisSale = data_mapped
totalNum += 1
totalVal += float(thisSale)
print totalNum, totalVal
|
[
"[email protected]"
] | |
cf62c539355e00b0778c2edcea0d321f0c331db4
|
f719fb52b2fee32742c62e0267633a68c228d982
|
/2017-03-29/gen3.py
|
3394a05273741324057417acd390b15bacc994bb
|
[] |
no_license
|
elsys/python2016-2017
|
76e0fcb97b509a6f87fd010479b44ee702d7b2dd
|
290ba35dc1242a9f13a320ada1ec0498acc8fb79
|
refs/heads/master
| 2021-06-18T08:07:12.025390 | 2017-06-14T15:41:12 | 2017-06-14T15:41:12 | 83,579,817 | 3 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 253 |
py
|
def fun(v):
print("calling fun() with value v=", v)
return 2*v
# print(fun(1))
def gen(maxv):
while maxv > 0:
print("before yield")
yield fun(maxv)
maxv -= 1
g = gen(3)
print(next(g))
# print(next(g))
"""
for v in g:
print(v)
"""
|
[
"[email protected]"
] | |
cb2ad544ec354652fc3ec9b093ddbc618597cd18
|
44badce6303eb8df34707edf27c5f8f2d2bc2697
|
/redfoot-1.6/lib/redfootlib/rdf/model/schema.py
|
2e583d06866efeaa30576f5f9794e1023a1d9554
|
[] |
no_license
|
jtauber/redfoot-orig
|
d371456f79e8b584f8e58037a5ab33011027484a
|
a5c26c53ba94c6d8970578bfcbc637aafaad1e11
|
refs/heads/master
| 2021-01-13T01:13:24.072000 | 2014-06-22T14:58:45 | 2014-06-22T14:58:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,998 |
py
|
from __future__ import generators
from redfootlib.rdf.model.core import Core
from redfootlib.rdf.const import LABEL, COMMENT
from redfootlib.rdf.const import TYPE, STATEMENT
from redfootlib.rdf.const import SUBJECT, PREDICATE, OBJECT
from redfootlib.rdf.const import DOMAIN, SUBCLASSOF
class Schema(Core):
def label(self, subject, default=None):
for s, p, o in self.triples(subject, LABEL, None):
return o
return default or subject
def comment(self, subject, default=None):
for s, p, o in self.triples(subject, COMMENT, None):
return o
return default or self.label(subject)
def typeless_resources(self):
for subject in self.subjects():
if not self.exists(subject, TYPE, None):
yield subject
# TODO: should we have a version of this that answers for subclasses too?
def is_of_type(self, subject, type):
return self.exists(subject, TYPE, type)
def subjects_by_type(self, type, predicate, object):
for subject in self.subjects(predicate, object):
if self.is_of_type(subject, type):
yield subject
def get_statement_uri(self, subject, predicate, object):
"""\
Returns the first statement uri for the given subject, predicate, object.
"""
for (s, p, o) in self.triples(None, TYPE, STATEMENT):
if self.exists(s, SUBJECT, subject)\
and self.exists(s, PREDICATE, predicate)\
and self.exists(s, OBJECT, object):
return s
return None
def possible_properties(self, type):
for object in self.transitive_objects(type, SUBCLASSOF):
for subject in self.subjects(DOMAIN, object):
yield subject
def possible_properties_for_subject(self, subject):
for type in self.objects(subject, TYPE):
for property in self.possible_properties(type):
yield property
|
[
"[email protected]"
] | |
9cc6d69a4edce9161dbfdc879d96259cff1bacef
|
5be8b0f2ee392abeee6970e7a6364ac9a5b8ceaa
|
/xiaojian/first_phase/day06/exersice03.py
|
e8b14b51849de9acde1b36a099be0ce424888398
|
[] |
no_license
|
Wellsjian/20180826
|
424b65f828f0174e4d568131da01dafc2a36050a
|
0156ad4db891a2c4b06711748d2624080578620c
|
refs/heads/master
| 2021-06-18T12:16:08.466177 | 2019-09-01T10:06:44 | 2019-09-01T10:06:44 | 204,462,572 | 0 | 1 | null | 2021-04-20T18:26:03 | 2019-08-26T11:38:09 |
JavaScript
|
UTF-8
|
Python
| false | false | 691 |
py
|
# 在控制台中选取季度,并将相应月份打印出来
# season = input("请输入季度:")
# if season == "春":
# print("该季度有1 2 3 月份")
# elif season == "夏":
# print("该季度有4 5 6 月份")
# elif season == "秋":
# print("该季度有7 8 9 月份")
# elif season == "冬":
# print("该季度有10 11 12 月份")
# else:
# print("您的输入不合法")
season = input("请输入季度:")
season_dict = {"春": (1, 2, 3),
"夏": (4, 5, 6),
"秋": (7, 8, 9),
"冬": (10, 11, 12)
}
if season in season_dict:
print(season_dict[season])
else:
print("输入不正确")
|
[
"[email protected]"
] | |
bc96195975a91b5368e14f03c4909420a70a4ac3
|
65bf0113da75390c4cf3960b6a409aca15569a06
|
/tests/migrations/0014_apply_report_file.py
|
e3afd0e4e25db2a852b10394c22262f44c292c82
|
[] |
no_license
|
wenpengfan/opsadmin
|
e7701538265253653adb1c8ce490e0ce71d3b4f6
|
3d997259353dc2734ad153c137a91f3530e0a8ec
|
refs/heads/master
| 2023-03-29T11:50:10.756596 | 2020-11-16T02:41:18 | 2020-11-16T02:41:18 | 313,171,594 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 513 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2020-06-01 13:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tests', '0013_apply_feedback'),
]
operations = [
migrations.AddField(
model_name='apply',
name='report_file',
field=models.CharField(max_length=255, null=True, verbose_name='\u6d4b\u8bd5\u62a5\u544a\u6587\u4ef6'),
),
]
|
[
"[email protected]"
] | |
0d9a7d280e51e2933b55ef5fd026a4939f72886c
|
f38e78214992de722a6ec2012e844bce7b3c59ed
|
/bin/taskwarrior
|
25c78d0f0862d46f2f10288152304e2e7cfef0a4
|
[
"MIT"
] |
permissive
|
clckwrkbdgr/dotfiles
|
20fb86f54d93ae4936c334898c3d7b1b3820fb06
|
a7e880e189bfa4793f30ff928b049e4a182a38cd
|
refs/heads/master
| 2023-08-31T13:13:47.533868 | 2023-08-30T18:32:00 | 2023-08-30T18:32:00 | 20,396,084 | 2 | 2 |
MIT
| 2022-10-01T16:35:31 | 2014-06-02T07:26:38 |
Python
|
UTF-8
|
Python
| false | false | 1,716 |
#!/usr/bin/env python
import logging
import functools
logger = logging.getLogger('taskwarrior')
from clckwrkbdgr import utils
import clckwrkbdgr.taskwarrior
from clckwrkbdgr.taskwarrior import TaskWarrior, Config
import clckwrkbdgr.logging
import click, click_default_group
import clckwrkbdgr.click
@functools.lru_cache()
def get_taskwarrior():
return TaskWarrior(Config.read_config())
@clckwrkbdgr.click.windows_noexpand_args
@click.group(cls=click_default_group.DefaultGroup, default='current', default_if_no_args=True)
@click.option('--debug', is_flag=True, help='Enables debug output.')
def cli(debug=False):
""" Provides simple interface to manage user's task flow. """
clckwrkbdgr.logging.init(logger, debug=debug)
@cli.command('current')
@utils.exits_with_return_value
def current_task():
""" Displays current task. """
if get_taskwarrior().get_current_task() is None:
return False
print(get_taskwarrior().get_current_task())
return True
@cli.command('start')
@click.argument('task', required=False)
@utils.exits_with_return_value
def start_task(task=None):
""" Starts given task.
If task is not given, resumes previous task.
"""
return get_taskwarrior().start(task)
@cli.command('stop')
@utils.exits_with_return_value
def stop_task():
""" Stops current task. """
return get_taskwarrior().stop()
@cli.command('list')
@utils.exits_with_return_value
def list_history():
""" Prints task execution history. """
for entry in get_taskwarrior().get_history():
print(entry)
return True
@cli.command('fix')
@utils.exits_with_return_value
def fix_history():
""" Provides interface to fix task history manually. """
return get_taskwarrior().fix_history()
if __name__ == '__main__':
cli()
|
[
"[email protected]"
] | ||
5b2abe106d6315f4695312f7040b4d674324543f
|
6515dee87efbc5edfbf4c117e262449999fcbb50
|
/eet/Merge_k_Sorted_Lists.py
|
a79231a7a08f8900b10c642d099fb90026c69498
|
[] |
no_license
|
wangyunge/algorithmpractice
|
24edca77e180854b509954dd0c5d4074e0e9ef31
|
085b8dfa8e12f7c39107bab60110cd3b182f0c13
|
refs/heads/master
| 2021-12-29T12:55:38.096584 | 2021-12-12T02:53:43 | 2021-12-12T02:53:43 | 62,696,785 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,102 |
py
|
"""
Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.
Example:
Input:
[
1->4->5,
1->3->4,
2->6
]
Output: 1->1->2->3->4->4->5->6
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
def _merge_two(a, b):
fake_head = ListNode(0)
head = fake_head
while a and b :
if a.val <= b.val:
head.next = a
head = a
a = a.next
else:
head.next = b
head = b
b = b.next
if a:
head.next = a
if b:
head.next = b
return fake_head.next
def _merge_sort(arr):
if len(arr) == 1:
return arr[0]
mid = len(arr) // 2
left = _merge_sort(arr[:mid])
right = _merge_sort(arr[mid:])
return _merge_two(left, right)
return _merge_sort(lists)
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeKLists(self,lists):
if not lists:
return []
heap = []
for headers in lists:
if headers:
heapq.heappush(heap,(headers.val,headers))
if not heap:
return []
(value,head) = heapq.heappop(heap)
operator = head
if head.next:
heapq.heappush(heap,(head.next.val,head.next))
while heap:
(value,poped) = heapq.heappop(heap)
operator.next = poped
operator = operator.next
if poped.next:
heapq.heappush(heap,(poped.next.val,poped.next))
return head
|
[
"[email protected]"
] | |
ebf05022393496f5a3c2690de8595fb5f621a652
|
fcfb3f5e94f35aa0d7c5632efec1d1c15d66e856
|
/day9/flask_day3/inherit_demo/app.py
|
826748d83c0c62228ad9455de8a5457081fe0b4b
|
[
"Apache-2.0"
] |
permissive
|
gaohj/wh1904js
|
98a9d1dd63d42766b656f07ce537b5933eaafb78
|
a3af38f8311f79eb9f2e08a3de16dd1e02c40714
|
refs/heads/master
| 2021-07-11T17:16:49.885524 | 2020-01-17T09:48:15 | 2020-01-17T09:48:15 | 232,022,360 | 0 | 0 |
Apache-2.0
| 2021-03-20T02:41:32 | 2020-01-06T04:14:22 |
JavaScript
|
UTF-8
|
Python
| false | false | 322 |
py
|
from flask import Flask,render_template
from flask_script import Manager
app = Flask(__name__)
manager = Manager(app)
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/detail/')
def details():
return render_template('detail.html')
if __name__ == '__main__':
manager.run()
|
[
"[email protected]"
] | |
569e5135fac1555cf0fb518269b99b2c71661cc5
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_9477.py
|
fa57d72004d0e3842548a0d58e2499f639d33ab5
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 164 |
py
|
# Django Testing: Using a login decorator for test cases
class SimpleTest(TestCase):
def setUp(self):
self.client.login(username='foo', password='bar')
|
[
"[email protected]"
] | |
52b1286ab48d460abebb87719f7d65cef1e7009d
|
c62a07c8051d6106717863651004c8186a0e3027
|
/logic.py
|
84c2678f81cea7f404f2c5b6faddd8b4b1335110
|
[] |
no_license
|
isakura313/third_22
|
bf47bef5914ac5debeb33a36dad39566181ed6fb
|
c43911d6b73f638894d14f757a0ec5462e9e8005
|
refs/heads/master
| 2022-04-18T14:27:00.231299 | 2020-04-22T17:01:23 | 2020-04-22T17:01:23 | 257,967,272 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 425 |
py
|
role = input("Введите вашу роль в проекте: ")
age = input("Введите ваш возраст: ")
age = int(age)
if role == "admin" and age > 18:
print("У вас есть все права")
elif role == "user" and age> 16:
print("У вас на этом проекте есть некоторые права")
else:
print(" этот сервис закрыт на карантин")
|
[
"[email protected]"
] | |
7953194e08d87e2cc8bd5e2a743dc383d4d6458b
|
fc3c9d2143aecedce191bb91dbd01babe7f6d40b
|
/tensorpack/callbacks/dump.py
|
ef62833b31118c6a9f00e80eb5e6c9216d57a65e
|
[
"Apache-2.0"
] |
permissive
|
rahulbprakash/tensorpack
|
0ee10de245f486d17a252354833c98dd713fd6e6
|
b2ec42a8d152760498aa911818d50b01e408bb43
|
refs/heads/master
| 2020-12-30T19:12:08.800662 | 2016-06-09T23:03:37 | 2016-06-09T23:03:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,077 |
py
|
# -*- coding: UTF-8 -*-
# File: dump.py
# Author: Yuxin Wu <[email protected]>
import os
import scipy.misc
from scipy.misc import imsave
import numpy as np
from .base import Callback
from ..utils import logger
from ..tfutils import get_op_var_name
__all__ = ['DumpParamAsImage']
class DumpParamAsImage(Callback):
"""
Dump a variable to image(s) after every epoch.
"""
def __init__(self, var_name, prefix=None, map_func=None, scale=255, clip=False):
"""
:param var_name: the name of the variable.
:param prefix: the filename prefix for saved images. Default is the op name.
:param map_func: map the value of the variable to an image or list of
images of shape [h, w] or [h, w, c]. If None, will use identity
:param scale: a multiplier on pixel values, applied after map_func. default to 255
:param clip: whether to clip the result to [0, 255]
"""
op_name, self.var_name = get_op_var_name(var_name)
self.func = map_func
if prefix is None:
self.prefix = op_name
else:
self.prefix = prefix
self.log_dir = logger.LOG_DIR
self.scale = scale
self.clip = clip
def _before_train(self):
# TODO might not work for multiGPU?
self.var = self.graph.get_tensor_by_name(self.var_name)
def _trigger_epoch(self):
val = self.trainer.sess.run(self.var)
if self.func is not None:
val = self.func(val)
if isinstance(val, list):
for idx, im in enumerate(val):
self._dump_image(im, idx)
else:
self._dump_image(val)
def _dump_image(self, im, idx=None):
assert im.ndim in [2, 3], str(im.ndim)
fname = os.path.join(
self.log_dir,
self.prefix + '-ep{:03d}{}.png'.format(
self.epoch_num, '-' + str(idx) if idx else ''))
res = im * self.scale
if self.clip:
res = np.clip(res, 0, 255)
imsave(fname, res.astype('uint8'))
|
[
"[email protected]"
] | |
9d3ebb55f1314362a215d95a4aadf6a840bf824d
|
1b9075ffea7d4b846d42981b41be44238c371202
|
/2009/stable/hardware/firmware/flashrom/actions.py
|
870e4cb6e12699846f65a6f0b2a8ad85380f45fd
|
[] |
no_license
|
pars-linux/contrib
|
bf630d4be77f4e484b8c6c8b0698a5b34b3371f4
|
908210110796ef9461a1f9b080b6171fa022e56a
|
refs/heads/master
| 2020-05-26T20:35:58.697670 | 2011-07-11T11:16:38 | 2011-07-11T11:16:38 | 82,484,996 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 457 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2007-2010 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def build():
autotools.make()
def install():
pisitools.dosbin("flashrom")
pisitools.doman("flashrom.8")
pisitools.dodoc("ChangeLog", "COPYING", "README")
|
[
"[email protected]"
] | |
22af9c136349ee70da4d000c5eef00cb1baf0109
|
8ecd899a8558ad0a644ecefa28faf93e0710f6fb
|
/ABC007/ABC007_A.py
|
679623e243c760e493ba9bd56ca1c2569cd69a61
|
[] |
no_license
|
yut-inoue/AtCoder_ABC
|
b93885547049788d452e86b442a4a9f5ee191b0e
|
3d2c4b2b2f8871c75f86040ad07ccd7736ad3dbe
|
refs/heads/master
| 2021-07-03T09:09:20.478613 | 2021-02-21T13:20:31 | 2021-02-21T13:20:31 | 227,140,718 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 152 |
py
|
n = int(input())
#a, b = map(int,input().split())
#l = list(map(int,input().split()))
#l = [list(map(int,input().split())) for i in range(n)]
print(n-1)
|
[
"[email protected]"
] | |
2859050f5f4926044ceeb1a9937dfdf2a9332f07
|
3ec84a6e34f9bc709cb203f8b3f668f2b6697e2a
|
/python20200322-master/class_Python기초/py12패키지/mylib/operation/test.py
|
a8124c496932fed9a4168d433ceb4a82eeb59f3b
|
[] |
no_license
|
eopr12/pythonclass
|
52079bd99358ac73664beed236659b97c8b63d40
|
2526fe255969a799f6c534c9db6bff9e4eccd877
|
refs/heads/master
| 2022-07-10T11:17:31.692754 | 2020-05-16T08:43:00 | 2020-05-16T08:43:00 | 263,377,402 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 153 |
py
|
def test_operation():
result = None
try:
result = "test_operation"
except Exception as ex:
print(ex)
return result
|
[
"[email protected]"
] | |
922ad653e03b85705765df9053e41ed4a995fcc9
|
7e3c7e9bf8e8410b688787bbf41f93e0bce30ef8
|
/misc/fix_keras_optimizer.py
|
4c1e72705ec6e77de0e31f5dd426bd7ffed1acef
|
[] |
no_license
|
directorscut82/msthesis-experiments
|
bb8233d4e54da0b294b3a43f219bc424626e8ad5
|
f86e344c972f2b61c3fa16eae523fd20303e8842
|
refs/heads/master
| 2020-03-23T08:24:19.535200 | 2017-07-27T06:23:18 | 2017-07-27T06:23:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 475 |
py
|
#!/usr/bin/env python
"""
Make keras 1.x models usable in keras 2.x.
Run this when you get the following error:
ValueError: Optimizer weight shape (512,) not compatible with provided weight shape (32,)
"""
import glob
import h5py
model_files = sorted(glob.glob('*.h5'))
for model_file in model_files:
print("Update '{}'".format(model_file))
with h5py.File(model_file, 'a') as f:
if 'optimizer_weights' in f.keys():
del f['optimizer_weights']
|
[
"[email protected]"
] | |
a515fc587646476cc8878bb50f72120b4e6aa5ba
|
bad85cd8d547a071baf4b6590f7e81d13ef1ec0d
|
/assistant/core/views.py
|
2732966b89290e6982d4e90149bce48ffa294e63
|
[
"MIT"
] |
permissive
|
kapiak/ware_prod
|
92e11671059642e14219d5aa8334e0564403db77
|
ae61256890834c434d2e38cc2ccacf00b638665a
|
refs/heads/master
| 2023-01-06T04:36:43.173093 | 2020-09-21T04:06:51 | 2020-09-21T04:06:51 | 310,320,165 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 871 |
py
|
from typing import List
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView
from assistant.orders.models import Order
from assistant.products.models import Product
class DashboardViewMixin(LoginRequiredMixin):
title: str = None
breadcrumbs: List = []
def get_title(self):
return self.title
def get_context_data(self):
context = super().get_context_data()
context.update({'title': self.get_title()})
return context
class DashboardTemplateView(LoginRequiredMixin, TemplateView):
template_name = "core/dashboard.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'orders': Order.objects.all(),
'products': Product.objects.all()
})
return context
|
[
"[email protected]"
] | |
f7526d46e57dacaf54913613ea92feeddb67cffd
|
e34cbf5fce48f661d08221c095750240dbd88caf
|
/python/homework/day10_ansibleLike/core/verify.py
|
70613c6ced9cebe0e42908774b56c4de14604d30
|
[] |
no_license
|
willianflasky/growup
|
2f994b815b636e2582594375e90dbcb2aa37288e
|
1db031a901e25bbe13f2d0db767cd28c76ac47f5
|
refs/heads/master
| 2023-01-04T13:13:14.191504 | 2020-01-12T08:11:41 | 2020-01-12T08:11:41 | 48,899,304 | 2 | 0 | null | 2022-12-26T19:46:22 | 2016-01-02T05:04:39 |
C
|
UTF-8
|
Python
| false | false | 790 |
py
|
#!/usr/bin/env python
# -*-coding:utf8-*-
# __author__ = "willian"
import getpass
from lib import mysql_helper
from conf.settings import *
def verify():
conn = mysql_helper.MySQLHandler(db_host, db_port, db_user, db_pass, db_name)
result = conn.select('select * from {0}', 'users')
count = 3
while count > 0:
_username = input("请输入用户名:").strip()
_password = getpass.getpass("请输入密码:").strip() # pycharm调试不好用
for user_dic in result:
if _username == user_dic['username'] and _password == user_dic['password']:
print("\033[32;1m验证成功!\033[0m")
return True, user_dic
count -= 1
else:
print("\033[31;1m超过3次!\033[0m")
return False
|
[
"[email protected]"
] | |
1f274d45c819c75e5909ef811396617f68af6e41
|
32271508e449e8842f38186e5e4528696b41d1f9
|
/tabby/tab/migrations/0025_remove_race_win_market.py
|
16142941a5e3532c0ac93d4c68b1aef608b2742a
|
[] |
no_license
|
Tjorriemorrie/tabby
|
d623ad5be3ae53b9370fd400f362d940e7191ac3
|
09c697bd48fdc4de548c911f1fd81b2a7e4b511b
|
refs/heads/master
| 2022-12-10T10:01:40.317751 | 2019-12-13T04:31:12 | 2019-12-13T04:31:12 | 100,076,546 | 4 | 2 | null | 2022-12-08T06:51:55 | 2017-08-11T23:26:00 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 326 |
py
|
# Generated by Django 2.0.1 on 2018-01-30 01:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tab', '0024_auto_20180128_1356'),
]
operations = [
migrations.RemoveField(
model_name='race',
name='win_market',
),
]
|
[
"[email protected]"
] | |
21dae073458e0bac5899c85d1f117f88958119dc
|
fb78fd824e904705fb1ee09db8b3c20cc3902805
|
/django-myshop/myshop/settings.py
|
e93a63838401dfb03886299b9b686dadf4dae54b
|
[] |
no_license
|
Roderich25/mac
|
8469833821ac49c539a744db29db5a41d755ad55
|
4f7fe281c88f0199b85d0ac99ce41ffb643d6e82
|
refs/heads/master
| 2023-01-12T05:55:12.753209 | 2021-11-26T01:16:24 | 2021-11-26T01:16:24 | 207,029,750 | 0 | 0 | null | 2023-01-07T11:49:23 | 2019-09-07T21:51:53 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 3,011 |
py
|
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ea=e^w3s$qfrb9_+5oq962$u(e7xq&me_b%ez7^c!6&6hm-q0d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'shop.apps.ShopConfig',
'cart.apps.CartConfig',
'orders.apps.OrdersConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myshop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'cart.context_processors.cart',
],
},
},
]
WSGI_APPLICATION = 'myshop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
CART_SESSION_ID = 'cart'
|
[
"[email protected]"
] | |
e350c375091476a2506c1e698410dc3a6adfbfb8
|
30f8afce1ba484183d8e1e14aae76cabb2d92354
|
/pbase/day29/old.py
|
741ae415c047f7297fc6ca49c5aab16131a342b0
|
[] |
no_license
|
brooot/Python_Base_Codes
|
d83e8c3b8a37b86672412c812fdb0d47deb67836
|
a864685e160b5df4162a6f9fb910627eda702aaf
|
refs/heads/master
| 2023-04-10T20:08:39.161289 | 2021-03-25T12:59:23 | 2021-03-25T12:59:23 | 200,570,412 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 790 |
py
|
from socket import *
import sys
class FtpClient(object):
def __init__(self,serveraddr):
self.serveraddr=serveraddr
def do_list(self):
sockfd=socket()
sockfd.connect(self.ser)
def main():
if len(sys.argv)<3:
print('argv is error')
host=sys.argv[1]
port=int(sys.argv[2])
BUFFERSIZE=1024
addr=(host,port)
# sockfd=socket()
while True:
print('**command **')
print('**list **')
print('**get **')
print('**put filename**')
print('**quit **')
data=input('shuru')
ftp=FtpClient(addr)
if data[:4]=='list':
ftp.do_list()
elif data[:3]=='get':
ftp.do_get()
elif data=='put':
pass
else:
sys.quit(0)
if __name__=='__main__':
main()
|
[
"[email protected]"
] | |
c51c27f98dfdd33ed8055495236836b200efc808
|
93652e0f73558ffa24059647324f79ba043ba241
|
/topi/tests/python/test_topi_bitserial_conv2d.py
|
6df18483a45f9263f685e4ade3c425b75b29eb76
|
[
"Apache-2.0"
] |
permissive
|
souptc/tvm
|
830b1444435b6bda267df305538a783eb687d473
|
a8574e7bb814997cb3920a72035071899635b753
|
refs/heads/master
| 2020-03-25T12:42:20.686770 | 2018-08-06T21:07:38 | 2018-08-06T21:07:38 | 143,789,191 | 1 | 0 |
Apache-2.0
| 2018-08-06T22:18:20 | 2018-08-06T22:18:19 | null |
UTF-8
|
Python
| false | false | 4,821 |
py
|
import os
import numpy as np
import tvm
import topi
import topi.testing
from tvm.contrib.pickle_memoize import memoize
from topi.util import get_const_tuple
from tvm.contrib import util
from tvm.contrib.pickle_memoize import memoize
def generate_quantized_np(shape, bits, out_dtype):
min_val = 0
max_val = 1 << bits
return np.random.randint(min_val, max_val, size=shape).astype(out_dtype)
def verify_bitserial_conv2d_nchw(batch, in_size, in_channel, num_filter, kernel, stride, padding,
activation_bits, weight_bits, dorefa):
in_height = in_width = in_size
input_type='uint32'
out_dtype='int32'
with tvm.target.create('llvm'):
A = tvm.placeholder((batch, in_channel, in_height, in_width), dtype=input_type, name='A')
W = tvm.placeholder((num_filter, in_channel, kernel, kernel), dtype=input_type, name='W')
B = topi.nn.bitserial_conv2d(A, W, stride, padding, activation_bits, weight_bits,
out_dtype=out_dtype, layout="NCHW", dorefa=dorefa)
s = topi.generic.schedule_bitserial_conv2d_nchw([B])
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
dtype = A.dtype
def get_ref_data():
a_np = generate_quantized_np(get_const_tuple(A.shape), activation_bits, input_type)
w_np = generate_quantized_np(get_const_tuple(W.shape), weight_bits, input_type)
if dorefa:
w_ = np.copy(w_np).astype(out_dtype)
for x in np.nditer(w_, op_flags=['readwrite']):
x[...] = 1 if x == 1 else -1
b_np = topi.testing.conv2d_nchw_python(a_np.astype(out_dtype), w_, stride, padding)
else:
b_np = topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
ctx = tvm.cpu(0)
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
func = tvm.build(s, [A, W, B], "llvm")
func(a, w, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
def verify_bitserial_conv2d_nhwc(batch, in_size, in_channel, num_filter, kernel, stride, padding,
activation_bits, weight_bits, dorefa):
in_height = in_width = in_size
input_type='uint32'
out_dtype='int32'
with tvm.target.create('llvm'):
A = tvm.placeholder((batch, in_height, in_width, in_channel), dtype=input_type, name='A')
W = tvm.placeholder((kernel, kernel, in_channel, num_filter), dtype=input_type, name='W')
B = topi.nn.bitserial_conv2d(A, W, stride, padding, activation_bits, weight_bits, out_dtype=out_dtype,
layout="NHWC", dorefa=dorefa)
s = topi.generic.schedule_bitserial_conv2d_nhwc([B])
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
dtype = A.dtype
def get_ref_data():
a_np = generate_quantized_np(get_const_tuple(A.shape), activation_bits, input_type)
w_np = generate_quantized_np(get_const_tuple(W.shape), weight_bits, input_type)
if dorefa:
w_ = np.copy(w_np).astype(out_dtype)
for x in np.nditer(w_, op_flags=['readwrite']):
x[...] = 1 if x == 1 else -1
b_np = topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype)
else:
b_np = topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(out_dtype)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
ctx = tvm.cpu(0)
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
func = tvm.build(s, [A, W, B], 'llvm')
func(a, w, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
def test_bitserial_conv2d():
in_size = 56
ic, oc = 64, 64
k = 3
stride = 1
pad = 1
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 1, 1, True)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 1, True)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 1, 1, False)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 1, False)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 2, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, True)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, True)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 2, False)
if __name__ == "__main__":
test_bitserial_conv2d()
|
[
"[email protected]"
] | |
3d1f1756528afaa87544ba2b6c62e67f3b6572f7
|
b2c24abff86b28ca8a495b3a3c3227f070737aa2
|
/parlai/agents/AdaND/utils.py
|
2467ef5a277ddf9fdc24d14669efbf63843aff07
|
[
"MIT"
] |
permissive
|
hengyicai/AdaND
|
d5dda1b2fcd2abd17be6603de632f0515382b37b
|
5e3fefb1cf40c42215a37246efc64958ae6db005
|
refs/heads/master
| 2023-09-01T07:38:49.076947 | 2020-10-19T04:58:00 | 2020-10-19T04:58:00 | 204,633,631 | 10 | 2 |
MIT
| 2023-08-11T19:52:23 | 2019-08-27T06:20:39 |
Python
|
UTF-8
|
Python
| false | false | 947 |
py
|
import torch.nn as nn
def reverse(lst):
return lst[::-1]
class FeedForward(nn.Module):
def __init__(self, input_dim, out_dim, hidden_sizes=(512,),
activation="Tanh", bias=True, dropout=0.1):
super(FeedForward, self).__init__()
self.activation = getattr(nn, activation)()
n_inputs = [input_dim] + list(hidden_sizes)
n_outputs = list(hidden_sizes) + [out_dim]
self.linears = nn.ModuleList([nn.Linear(n_in, n_out, bias=bias)
for n_in, n_out in zip(n_inputs, n_outputs)])
self.num_layer = len(self.linears)
self.dropout_layer = nn.Dropout(dropout)
def forward(self, input_):
x = input_
i = 0
for linear in self.linears:
x = linear(x)
if i < self.num_layer - 1:
x = self.dropout_layer(x)
x = self.activation(x)
i += 1
return x
|
[
"[email protected]"
] | |
fdef0e55fea15ec9925ee84443a708abafdfecc5
|
4a8775eac5a5f39400848b4c81476c49ddfbd871
|
/apps/api-test/urls.py
|
a999262fcf5944dbfda9b7441dd035df4a93df14
|
[] |
no_license
|
wdudek82/quotarium-backend
|
ec6d73c13ed06a201066442f108cdbcc4777da5e
|
b37cbbe1a136f89fe10ed6d6418a69d585bec8ff
|
refs/heads/master
| 2022-12-10T18:32:37.564838 | 2018-07-08T20:40:28 | 2018-07-08T20:40:28 | 140,035,629 | 0 | 0 | null | 2022-12-08T02:17:15 | 2018-07-06T22:39:13 |
Python
|
UTF-8
|
Python
| false | false | 142 |
py
|
from django.conf.urls import url
from .views import UserViewSet
urlpatterns = [
url(r'^users/$', UserViewSet.as_view(), name='users'),
]
|
[
"[email protected]"
] | |
e6a2fb17e898a3dedb7ffb531fb3c9dcd46ee0a7
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/WebMirror/management/rss_parser_funcs/feed_parse_extractRandnovelstlsamatchateaWordpressCom.py
|
d0cebc880f2171d48c773fdaf78dc3e6e389d55b
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 |
BSD-3-Clause
| 2023-09-11T15:48:15 | 2015-07-24T04:30:43 |
Python
|
UTF-8
|
Python
| false | false | 586 |
py
|
def extractRandnovelstlsamatchateaWordpressCom(item):
'''
Parser for 'randnovelstlsamatchatea.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
[
"[email protected]"
] | |
4aa8272025f036b52ea729420003ccaed04615fc
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02955/s157641213.py
|
4c49cb084d398b88a82c7768e464c3400e1d3697
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 693 |
py
|
from collections import deque
def isok(x):
que=deque(sorted(z%x for z in a))
res=0
while que:
l=que[0]
if l==0:
que.popleft()
continue
r=que[-1]
if r==0:
que.pop()
continue
d=min(l,x-r)
que[0]-=d
que[-1]=(que[-1]+d)%x
res+=d
return res
n,k=map(int,input().split())
a=list(map(int,input().split()))
sum_=sum(a)
fac=set()
for i in range(1,sum_+1):
if i*i>sum_:
break
if sum_%i==0:
fac.add(i)
fac.add(sum_//i)
fac=sorted(fac,reverse=True)
ans=1
for x in fac:
c=isok(x)
if c<=k:
ans=x
break
print(ans)
|
[
"[email protected]"
] | |
25c76f936b1e618ae4f59f11a453aeb716d710ca
|
4c0062f3b45afe6a087f0e8b0b9292448ce8680e
|
/inwike/wsgi.py
|
b62adb52993addcae9133236c57a9f24c5e90cd2
|
[] |
no_license
|
mitshel/inwike
|
89846286824d4dd322edb4d51836af8d86da00d2
|
e89bd4ccb9c3a71d17692d14def6e1041596d0f9
|
refs/heads/master
| 2020-06-20T21:04:00.623930 | 2019-07-19T20:14:03 | 2019-07-19T20:14:03 | 197,248,276 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 389 |
py
|
"""
WSGI config for inwike project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'inwike.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
daf4ddaea769085c50cf8f4f15f0287de9a5ab16
|
7256596fc6437c7f3cd1947f9f88bc556df6ba56
|
/programs_in_python/programming_excercise/1.py
|
7aa1303ea03548fa583c4aa0857c6f32292d692b
|
[] |
no_license
|
panu2306/Python-Articles
|
fd02cf70635e4a63eae8b691597b6858c40832b8
|
7585dbdca92264a8f52cfb3c1b918b29814d3bd1
|
refs/heads/master
| 2020-12-27T17:33:28.576776 | 2020-05-06T14:55:10 | 2020-05-06T14:55:10 | 237,990,657 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 645 |
py
|
'''
Write a program which will find all such numbers which are divisible by 7 but are not a multiple of 5,
between 2000 and 3200 (both included).
The numbers obtained should be printed in a comma-separated sequence on a single line.
'''
# Using List in Python:
def multiple_of_seven(start, end):
l = []
for i in range(start, end+1):
if((i%7 == 0) and (i%5 != 0)):
l.append(str(i))
return l
print(','.join(multiple_of_seven(2000, 3200)))
# Using yield in Python:
def multiple_seven(start, end):
for i in range(start, end+1):
if((i%7==0) and (i%5!=0)):
yield(str(i))
for i in multiple_seven(2000, 3200):
print(i, end=',')
|
[
"[email protected]"
] | |
c7e086c6ea45c41cf28e897e3b175a4f462aca19
|
ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3
|
/python/baiduads-sdk-auto/test/test_app_info_item.py
|
deaf0a9f6d02f3a631c0a04ff600f2afd04a818c
|
[
"Apache-2.0"
] |
permissive
|
baidu/baiduads-sdk
|
24c36b5cf3da9362ec5c8ecd417ff280421198ff
|
176363de5e8a4e98aaca039e4300703c3964c1c7
|
refs/heads/main
| 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 |
Apache-2.0
| 2023-06-02T05:19:40 | 2022-01-11T07:23:17 |
Python
|
UTF-8
|
Python
| false | false | 637 |
py
|
"""
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.appprocess.model.app_info_item import AppInfoItem
class TestAppInfoItem(unittest.TestCase):
"""AppInfoItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAppInfoItem(self):
"""Test AppInfoItem"""
# FIXME: construct object with mandatory attributes with example values
# model = AppInfoItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
5bd0e53ba54a4a57cf01aa9a0f830f42c969bd2f
|
e08e7bb643b81899d261bbdada63754eb32da2e8
|
/demos/helloworld/jinja2-support/main.py
|
fff720215c30da226ea87bdea8861ee34f58e750
|
[
"Apache-2.0"
] |
permissive
|
tao12345666333/app-turbo
|
95baa0e0d7f7172183591c2bc177efc9ae0e1b37
|
8717ba5631e47c476e277c3a897d85b5a93f9082
|
refs/heads/master
| 2020-12-25T04:45:26.575354 | 2016-12-11T15:35:12 | 2016-12-11T15:35:12 | 31,700,837 | 0 | 0 | null | 2015-03-05T07:14:08 | 2015-03-05T07:14:08 | null |
UTF-8
|
Python
| false | false | 478 |
py
|
#-*- coding:utf-8 -*-
from tornado.options import define, options
import tornado.options
import setting
import turbo.register
import turbo.app
#uncomment this to init state manager: store
#import store
turbo.register.register_app(setting.SERVER_NAME, setting.TURBO_APP_SETTING, setting.WEB_APPLICATION_SETTING, __file__, globals())
define("port", default=8888, type=int)
if __name__ == '__main__':
tornado.options.parse_command_line()
turbo.app.start(options.port)
|
[
"[email protected]"
] | |
22731541dd107b93c3c9efbc5cf7a570dc5ca82e
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/pointcloud/hoverlabel/font/_color.py
|
ab79906bd35243f454c553b46a4a5012a256ba50
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 |
MIT
| 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null |
UTF-8
|
Python
| false | false | 473 |
py
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="pointcloud.hoverlabel.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
[
"[email protected]"
] | |
b1e8dff10bd7f06b4f82282a4a65779bd9215537
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/hamming/ccca6c36f7464721986b3e6214962018.py
|
73c19fda905d9bdfc0b57e3963e8c92516bc8d4d
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null |
UTF-8
|
Python
| false | false | 239 |
py
|
def distance(strand1, strand2):
if len(strand1) != len(strand2):
raise Exception('length mismatch', len(strand1), len(strand2))
hd = 0
for i in xrange(len(strand1)):
if strand1[i] != strand2[i]:
hd += 1
return hd
|
[
"[email protected]"
] | |
0ddbf86f3bedb7bcc299a23d41f190b92dc242af
|
bc2c2d63ac18dfa6b5171ff97ad6b88f647dc282
|
/mininet/wifi/util.py
|
00b9511e070c1f3f235e10d02d277a5b7d286e1e
|
[
"LicenseRef-scancode-x11-stanford"
] |
permissive
|
MatthiasEckhart/mininet-wifi
|
ca8cadccb62db7ce6221ab0dcf4af7a79a93e74e
|
95392e59f82e1380730b0b3f4e375a04839316ce
|
refs/heads/master
| 2020-03-16T00:52:39.895729 | 2018-05-11T09:17:26 | 2018-05-11T09:17:26 | 132,427,457 | 1 | 2 | null | 2018-05-07T08:03:36 | 2018-05-07T08:03:36 | null |
UTF-8
|
Python
| false | false | 1,014 |
py
|
"Utility functions for Mininet-WiFi"
from mininet.util import retry
def moveIntfNoRetry(intf, dstNode, printError=False):
"""Move interface to node, without retrying.
intf: string, interface
dstNode: destination Node
printError: if true, print error"""
from mininet.wifi.node import Station, Car, AP
if (isinstance(dstNode, Station) or isinstance(dstNode, Car)
or isinstance(dstNode, AP) and 'eth' not in str(intf)):
if isinstance(dstNode, Station) or isinstance(dstNode, Car):
return True
else:
return True
def moveIntf(intf, dstNode, printError=True,
retries=3, delaySecs=0.001):
"""Move interface to node, retrying on failure.
intf: string, interface
dstNode: destination Node
printError: if true, print error"""
from mininet.wifi.node import AP
if not isinstance(dstNode, AP):
retry(retries, delaySecs, moveIntfNoRetry, intf, dstNode,
printError=printError)
|
[
"[email protected]"
] | |
094fb1998b4fb1a04b1860e17d4d7bcda5a15b28
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/pg_0848+249/sdB_PG_0848+249_lc.py
|
3419530c1b53949ad19342e48dd7c6716eb727b3
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 346 |
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[132.907667,24.697419], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_PG_0848+249 /sdB_PG_0848+249_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
4880c6673cf71e0c7ee5ecb34afce54a4736b043
|
48156b85839d832ecfe8fdf0a0e17b5ebf8b9460
|
/75.findPeak.py
|
1672766c60cae18647eadcca3b6c3ce2ede0c597
|
[] |
no_license
|
Aissen-Li/lintcode
|
7dc2564fcec20667f073d9219fe049808c5de625
|
4d2a717956a75197ce1dfa1094cdd5ab3a1d2004
|
refs/heads/master
| 2020-11-28T16:43:21.760691 | 2020-01-14T15:08:45 | 2020-01-14T15:08:45 | 229,871,626 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 612 |
py
|
class Solution:
"""
@param A: An integers array.
@return: return any of peek positions.
"""
def findPeak(self, A):
if A[1] > A[2]:
return 1
if A[len(A) - 2] > A[len(A) - 3]:
return len(A) - 2
start, end = 0, len(A) - 1
while start + 1 < end:
mid = (start + end) // 2
if A[mid] > A[mid + 1] and A[mid] > A[mid - 1]:
return mid
if A[mid] < A[mid + 1]:
start = mid + 1
else:
end = mid - 1
return start if A[start] >= A[end] else end
|
[
"[email protected]"
] | |
81e8042a40de433fce29be36bc546150bd69ec66
|
87e60b0504be11c6997f1b20b72e9428cc128342
|
/python/cowbells/data/tqplot.py
|
5ca6659a29428f017baacabacc79523cfcbe6ff4
|
[] |
no_license
|
brettviren/cowbells
|
70a85856fdfc54526c847f115d5dc01ec85ec215
|
1ceca86383f4f774d56c3f159658518242875bc6
|
refs/heads/master
| 2021-01-10T18:44:41.531525 | 2014-04-09T15:17:29 | 2014-04-09T15:17:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,644 |
py
|
#!/usr/bin/env python
'''
Make some plots from the TQ tree.
'''
import ROOT
import math
def liberate(tobj):
ROOT.SetOwnership(tobj,0)
return tobj
def draw_stats(pad, h, fitnum =111):
h.Draw()
pad.Modified()
pad.Update()
stats = h.FindObject("stats")
if stats:
stats.SetOptStat(1110)
stats.SetOptFit(fitnum)
return
class Plots(object):
expected_led_time = "abs(tmin[%(chn)d]-1530) < 30"
def __init__(self, tree, canvas = None, pdffile = 'tqplot.pdf'):
self.tree = tree
self.pdffile = pdffile
if not canvas:
canvas = ROOT.TCanvas("tqtree","tqtree debug", 0,0, 1000, 700)
self.canvas = canvas
def cprint(self,extra=''):
self.canvas.Print('%s%s'%(self.pdffile,extra), 'pdf')
def do_twoXtwo(self, what, chn=0):
self.canvas.Clear()
self.canvas.Divide(2,2)
for count, what in enumerate(what):
pad = self.canvas.cd(count+1)
pad.SetLogy(True)
self.tree.Draw("%s[%d]"%(what,chn))
return
def do_minmax(self, chn=0):
self.do_twoXtwo(['qmin','qmax','tmin','tmax'], chn)
def do_stats(self, chn=0):
self.do_twoXtwo(['avg','mean','rms','sigma'], chn)
def do_sumn(self, chn=0):
self.do_twoXtwo(['n3','n4','sum3','sum4'], chn)
def do_34(self, chn=0, maxq=400, opt="", logy=True, fit=(25,100)):
self.canvas.Clear()
self.canvas.Divide(2,2)
todraw = "n%(nsig)d[%(chn)d]*mean[%(chn)d] -sum%(nsig)d[%(chn)d]"
for count,nsig in enumerate([3,4]):
pad = self.canvas.cd(count+1)
pad.SetLogy(logy)
self.tree.Draw(todraw%locals(),"",opt)
for count,nsig in enumerate([3,4]):
pad = self.canvas.cd(count+3)
pad.SetLogy(logy)
h = liberate(ROOT.TH1F("spe%d"%nsig,'sum(ADC) >%d sigma above ped'%nsig,maxq,0,maxq))
self.tree.Draw(todraw%locals()+">>spe%d"%nsig,"",opt)
if fit:
h.Fit("gaus","","", *fit)
h.Draw()
pad.Modified()
pad.Update()
stats = h.FindObject("stats")
if stats:
stats.SetOptStat(1110)
stats.SetOptFit(111)
continue
return
def do_34_50(self, chn=0, opt="", logy=True):
self.do_34(chn=chn, maxq=50, opt=opt, logy=logy,fit=None)
def do_34vEntry(self, chn=0):
self.canvas.Clear()
self.canvas.Divide(2,2)
measure = "n%(nsig)d[%(chn)d]*mean[%(chn)d]-sum%(nsig)d[%(chn)d]"
for count,nsig in enumerate([3,4]):
pad = self.canvas.cd(count+1)
m = measure % locals()
m += ':Entry$'
c = ""
print m
self.tree.Draw(m,c,'colz')
for count,nsig in enumerate([3,4]):
pad = self.canvas.cd(count+3)
m = measure % locals()
c = "%s > 0 && %s < 400" % (m,m)
m += ':Entry$'
print m
print c
self.tree.Draw(m,c,'colz')
return
def do_fit(self, chn=0):
self.canvas.Clear()
self.canvas.Divide(2,2)
toplot = "mean[%(chn)d] sigma[%(chn)d] mean[%(chn)d]:Entry$ sigma[%(chn)d]:Entry$"
toplot = toplot % locals()
for count,what in enumerate(toplot.split()):
pad = self.canvas.cd(count+1)
opt = ""
if 'Entry$' in what:
opt = "COLZ"
self.tree.Draw(what,"",opt)
continue
return
def _fit_pe(self, chn=0, cuts=None,
spe=(60,110), dpe=(115,220), tpe=(225,350), qmeas = 'qpeak'):
'''
Fit single/double PE peak of qpeak.
'''
if cuts is None:
cuts = self.expected_led_time
nbins, minq, maxq = 500, 0, 500
cuts = cuts%locals()
what = "%(qmeas)s[%(chn)d]"%locals()
h = liberate(ROOT.TH1F('hqpeak', "%s {%s}" % (qmeas, cuts,), nbins, minq, maxq))
self.tree.Draw('%s >> hqpeak'%what, cuts)
pe1 = liberate(h.Clone())
pe1.Fit("gaus","L","",*spe)
fit1 = pe1.GetFunction("gaus")
fit1.SetRange(minq,maxq)
fit1.SetLineColor(2)
pe2 = liberate(h.Clone())
pe2.Add(fit1, -1)
pe2.Fit("gaus","L","",*dpe)
fit2 = pe2.GetFunction("gaus")
fit2.SetRange(spe[0],maxq)
fit2.SetLineColor(4)
pe3 = liberate(h.Clone())
pe3.Add(fit2, -1)
pe3.Fit("gaus","L","",*tpe)
fit3 = pe3.GetFunction("gaus")
#fit3.SetRange(dpe[0],maxq)
fit3.SetLineColor(6)
pe123 = liberate(h.Clone())
dfit = liberate(ROOT.TF1("dfit","gaus(0)+gaus(3)+gaus(6)",10,tpe[1]))
for ind in range(3):
dfit.SetParameter(ind, fit1.GetParameter(ind))
dfit.SetParameter(ind+3,fit2.GetParameter(ind))
dfit.SetParameter(ind+6,fit3.GetParameter(ind))
pe123.Fit(dfit,"L","",10,maxq)
dfit = pe123.GetFunction("dfit")
dfit.SetRange(10,maxq)
dfit.SetLineColor(7)
self.canvas.Clear()
self.canvas.Divide(2,2)
pad = self.canvas.cd(1)
draw_stats(pad, pe1)
pad = self.canvas.cd(2)
draw_stats(pad, pe2)
pad = self.canvas.cd(3)
draw_stats(pad, pe3)
pad = self.canvas.cd(4)
draw_stats(pad, pe123, 111111111)
a1 = fit1.Integral(minq,maxq)
if not a1:
print 'No fit 1'
return
a2 = fit2.Integral(minq,maxq)
c1 = fit1.GetParameter(0)
c2 = fit2.GetParameter(0)
mu1 = fit1.GetParameter(1)
mu2 = fit2.GetParameter(1)
mupe = 2.0*a2/a1
print 'Mean <PE> of source = 2*%.1f/%.1f = %.3f' %(a2,a1,mupe)
mu2mu1_frac = 0
if mu1: mu2mu1_frac = mu2/mu1
print 'Ratio of PE2/PE1: %.1f/%.1f = %.3f (~2?)' % (mu2,mu1,mu2/mu1)
if mupe > 0:
print 'Prob 0PE: %.3f' % (math.exp(-1*mupe),)
return
def do_pe_fits(self, chn=0, cuts = None):
for qmeas in ['qpeak','qpeaks3','qpeaks4','qpeaks5','qwin']:
self._fit_pe(chn=chn,qmeas=qmeas,cuts=cuts)
self.cprint()
continue
return
def do_interspersed_led_cuts(self):
# Cuts to select LEDs interspersed with cosmic muon triggers
self.canvas.Clear()
self.canvas.Divide(2,2)
pad = self.canvas.cd(1)
pad.SetLogy(True)
self.tree.Draw("mean[2]-qmin[2]","mean[2]-qmin[2]<1000")
pad = self.canvas.cd(2)
pad.SetLogy(True)
self.tree.Draw("qnpeaks[0]","mean[2]-qmin[2]<100")
pad = self.canvas.cd(3)
pad.SetLogy(True)
self.tree.Draw("tmin[0]","mean[2]-qmin[2]<100 && qnpeaks[0] == 1")
pad = self.canvas.cd(4)
pad.SetLogy(False)
self.tree.Draw("qpeak[0]")
def all(self, chn = 0):
self.cprint('[')
for what in [
'minmax','stats','fit','sumn',
'34','34_50', '34vEntry',
]:
meth = getattr(self, 'do_%s' % what)
meth(chn)
self.cprint()
self.do_interspersed_led_cuts()
self.cprint()
self.do_pe_fits(chn)
self.cprint(']')
if __name__ == '__main__':
import sys
fp = ROOT.TFile.Open(sys.argv[1])
tree = fp.Get("tq")
try:
pdf = sys.argv[2]
except IndexError:
pdf = None
p = Plots(tree, pdffile=pdf)
p.all()
|
[
"[email protected]"
] | |
cd7a6e39bddcd867989015fc0c40cc09c18bc796
|
d86e9d59784097a7262fa9337585a36bd58a6d29
|
/cvxbenchmarks/lib/data/epsilon/epopt/problems/hinge_l2.py
|
41b8b42fd08be15cf32527c0526e7dc334f6548e
|
[] |
no_license
|
nishi951/cvxbenchmarks
|
2ae36e75c42c8bd35fafac98bad5d9d88168bd68
|
932141d8e4e929860011bf25c41e941e2f8fbd76
|
refs/heads/master
| 2021-01-11T07:23:32.260811 | 2018-09-15T22:23:14 | 2018-09-15T22:23:14 | 62,177,196 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 400 |
py
|
"""Standard SVM, i.e.. hinge loss w/ l2 regularization."""
from epopt.problems import problem_util
import cvxpy as cp
import epopt as ep
import numpy as np
import scipy.sparse as sp
def create(**kwargs):
A, b = problem_util.create_classification(**kwargs)
lam = 1
x = cp.Variable(A.shape[1])
f = ep.hinge_loss(x, A, b) + lam*cp.sum_squares(x)
return cp.Problem(cp.Minimize(f))
|
[
"[email protected]"
] | |
87a1365fd6a9f6ccca02348485d3a70abebd022f
|
34a26b713021f15d94d416b9728bac50d283ed5f
|
/interno_pymedigital-9.0/sale_order_invoice_amount/models/sale_order.py
|
7dc71e3de0680f62a283f98ad86d6675e8ad4e35
|
[] |
no_license
|
Brahim820/odoo-1
|
7641b2a0ef411fb3b82f806a11e88b9880875a46
|
d8ee18a7dc467ff250113a0a3df3fcf1e876b321
|
refs/heads/master
| 2020-04-07T10:58:09.517097 | 2018-11-19T16:56:45 | 2018-11-19T16:56:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,690 |
py
|
# -*- encoding: utf-8 -*-
from openerp import api, fields, models, _
from openerp.tools import float_is_zero
from openerp.exceptions import UserError
class SaleOrder(models.Model):
_inherit = 'sale.order'
@api.multi
def action_invoice_create_from_amount(self, grouped=False, amount=0):
if amount == 0:
raise UserError(_('The amount to invoice should be greater than cero.'))
for order in self:
group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)
inv_obj = self.env['account.invoice']
invoices = {}
for line in order.order_line.sorted(key=lambda l: l.qty_to_invoice):
if group_key not in invoices:
inv_data = order._prepare_invoice()
invoice = inv_obj.create(inv_data)
invoices[group_key] = invoice
elif group_key in invoices:
vals = {}
if order.name not in invoices[group_key].origin.split(', '):
vals['origin'] = invoices[group_key].origin + ', ' + order.name
if order.client_order_ref and order.client_order_ref not in invoices[group_key].name.split(', '):
vals['name'] = invoices[group_key].name + ', ' + order.client_order_ref
invoices[group_key].write(vals)
discount = 1 - (line.discount or 0.0 / 100.0)
if line.price_unit > 0 and discount < 100:
paid_qty = amount / (line.price_unit * discount)
else:
paid_qty = line.product_uom_qty
to_invoice = 0
if line.qty_to_invoice > 0:
if paid_qty >= line.qty_to_invoice:
to_invoice = line.qty_to_invoice
else:
to_invoice = paid_qty
name = line.name + ' desde ' + str(round(line.qty_invoiced, 2)) + ' a ' + str(
round(line.qty_invoiced + to_invoice, 2)) + ' de ' + str(round(line.product_uom_qty, 2))
line.invoice_line_create_from_amount(invoices[group_key].id, to_invoice, name)
amount -= to_invoice * line.price_unit
if amount > 0:
discount = 1 - (line.discount or 0.0 / 100.0)
lines = order.order_line.filtered(lambda l: l.product_uom_qty - l.qty_invoiced > 0)
for line in lines.sorted(
key=lambda l: (l.product_uom_qty - l.qty_invoiced) * l.price_unit):
if line.price_unit > 0 and discount < 100:
paid_qty = amount / (line.price_unit * discount)
else:
paid_qty = line.product_uom_qty
residual_qty = line.product_uom_qty - line.qty_invoiced
to_invoice = 0
if residual_qty > 0:
if round(paid_qty, 5) > round(residual_qty, 5):
to_invoice = residual_qty
else:
to_invoice = paid_qty
name = ' Pago anticipado: ' + line.name + ' desde ' + str(round(line.qty_invoiced, 2)) + ' a ' + str(
round(line.qty_invoiced + to_invoice, 2)) + ' de ' + str(round(line.product_uom_qty, 2))
line.invoice_line_create_from_amount(invoices[group_key].id, to_invoice, name)
amount -= to_invoice * line.price_unit
if not invoices:
raise UserError(_('There is no invoicable line.'))
for invoice in invoices.values():
if not invoice.invoice_line_ids:
raise UserError(_('There is no invoicable line.'))
# If invoice is negative, do a refund invoice instead
if invoice.amount_untaxed < 0:
invoice.type = 'out_refund'
for line in invoice.invoice_line_ids:
line.quantity = -line.quantity
# Use additional field helper function (for account extensions)
for line in invoice.invoice_line_ids:
line._set_additional_fields(invoice)
# Necessary to force computation of taxes. In account_invoice, they are triggered
# by onchanges, which are not triggered when doing a create.
invoice.compute_taxes()
#TODO: agregar este cálculo a la función principal
# para evitar problemas con las funciones que hacen super
# como en el módulo l10n_ec_sri_sale
resx = [inv.id for inv in invoices.values()]
invx = self.env['account.invoice'].browse(resx)
for i in invx:
i.compute_sri_invoice_amounts()
return [inv.id for inv in invoices.values()]
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
@api.multi
def invoice_line_create_from_amount(self, invoice_id, qty, name):
"""
Create an invoice line. The quantity to invoice can be positive (invoice) or negative
(refund).
:param name: char
:param invoice_id: integer
:param qty: float quantity to invoice
"""
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
for line in self:
if not float_is_zero(qty, precision_digits=precision):
vals = line._prepare_invoice_line(qty=qty)
vals.update({'name': name, 'invoice_id': invoice_id, 'sale_line_ids': [(6, 0, [line.id])]})
self.env['account.invoice.line'].create(vals)
|
[
"[email protected]"
] | |
6436176ee36f61be4e18fceb8292042e2a8cd3bd
|
ccbb7fb8fda4d936e765263f05a435058b397bd9
|
/src/guiltytargets/ppi_network_annotation/model/__init__.py
|
ab512ca4a78d001836dbb692256bd93d16deee04
|
[
"MIT"
] |
permissive
|
GuiltyTargets/guiltytargets
|
5a5d3ba9e45867a64c81a91529ae6689f8be447f
|
c20a5cae6c9cc71c2ca73080a862abe986bc34c0
|
refs/heads/master
| 2022-02-13T03:30:49.705239 | 2021-12-22T12:51:20 | 2021-12-22T12:51:20 | 154,318,881 | 10 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 339 |
py
|
# -*- coding: utf-8 -*-
"""Package that includes classes for data models."""
from .attribute_network import AttributeNetwork # noqa: F401
from .filtered_network import FilteredNetwork # noqa: F401
from .gene import Gene # noqa: F401
from .labeled_network import LabeledNetwork # noqa: F401
from .network import Network # noqa: F401
|
[
"[email protected]"
] | |
3f7145a11f4c1d019d782a5fae6848a3d4d3f507
|
1d892928c70ee9ddf66f2a37a8e083d2632c6e38
|
/nova/api/openstack/compute/contrib/rescue.py
|
7bf815a37979d0e68811a4baac694cc8f191f500
|
[
"Apache-2.0"
] |
permissive
|
usc-isi/essex-baremetal-support
|
74196c3f1332ee3cdeba9c263faff0ac0567d3cf
|
a77daf8ef56cf41e38de36621eda25ed3f180156
|
refs/heads/master
| 2021-05-19T03:12:11.929550 | 2020-07-24T14:15:26 | 2020-07-24T14:15:26 | 4,702,421 | 0 | 1 |
Apache-2.0
| 2020-07-24T14:15:27 | 2012-06-18T15:19:41 | null |
UTF-8
|
Python
| false | false | 3,310 |
py
|
# Copyright 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The rescue mode extension."""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions as exts
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
authorize = exts.extension_authorizer('compute', 'rescue')
class RescueController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(RescueController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _get_instance(self, context, instance_id):
try:
return self.compute_api.get(context, instance_id)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(msg)
@wsgi.action('rescue')
@exts.wrap_errors
def _rescue(self, req, id, body):
"""Rescue an instance."""
context = req.environ["nova.context"]
authorize(context)
if body['rescue'] and 'adminPass' in body['rescue']:
password = body['rescue']['adminPass']
else:
password = utils.generate_password(FLAGS.password_length)
instance = self._get_instance(context, id)
try:
self.compute_api.rescue(context, instance,
rescue_password=password)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rescue')
return {'adminPass': password}
@wsgi.action('unrescue')
@exts.wrap_errors
def _unrescue(self, req, id, body):
"""Unrescue an instance."""
context = req.environ["nova.context"]
authorize(context)
instance = self._get_instance(context, id)
try:
self.compute_api.unrescue(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unrescue')
return webob.Response(status_int=202)
class Rescue(exts.ExtensionDescriptor):
"""Instance rescue mode"""
name = "Rescue"
alias = "os-rescue"
namespace = "http://docs.openstack.org/compute/ext/rescue/api/v1.1"
updated = "2011-08-18T00:00:00+00:00"
def get_controller_extensions(self):
controller = RescueController()
extension = exts.ControllerExtension(self, 'servers', controller)
return [extension]
|
[
"[email protected]"
] | |
c7a1755f0e7fbc0d4edee7b813130bfb252193cf
|
2acf64fca88200f4a4ada46f5da4f96702bafa06
|
/stubs/facebook_business/adobjects/hotelroom.pyi
|
0c74f95680b203626a30e5473345113f0c61cb3b
|
[] |
no_license
|
vlab-research/adopt
|
bf6cdbfb751f7d85674e3925b207639e7d9d92c4
|
66347b00996e26910290e4fdb883e4231cc614af
|
refs/heads/master
| 2023-04-12T12:16:23.061861 | 2021-05-18T14:17:01 | 2021-05-18T14:17:01 | 278,025,786 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,736 |
pyi
|
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject as AbstractCrudObject
from facebook_business.adobjects.abstractobject import AbstractObject as AbstractObject
from facebook_business.adobjects.objectparser import ObjectParser as ObjectParser
from facebook_business.api import FacebookRequest as FacebookRequest
from facebook_business.typechecker import TypeChecker as TypeChecker
from typing import Any, Optional
class HotelRoom(AbstractCrudObject):
def __init__(self, fbid: Optional[Any] = ..., parent_id: Optional[Any] = ..., api: Optional[Any] = ...) -> None: ...
class Field(AbstractObject.Field):
applinks: str = ...
base_price: str = ...
currency: str = ...
description: str = ...
id: str = ...
images: str = ...
margin_level: str = ...
name: str = ...
room_id: str = ...
sale_price: str = ...
url: str = ...
def api_delete(self, fields: Optional[Any] = ..., params: Optional[Any] = ..., batch: Optional[Any] = ..., success: Optional[Any] = ..., failure: Optional[Any] = ..., pending: bool = ...): ...
def api_get(self, fields: Optional[Any] = ..., params: Optional[Any] = ..., batch: Optional[Any] = ..., success: Optional[Any] = ..., failure: Optional[Any] = ..., pending: bool = ...): ...
def api_update(self, fields: Optional[Any] = ..., params: Optional[Any] = ..., batch: Optional[Any] = ..., success: Optional[Any] = ..., failure: Optional[Any] = ..., pending: bool = ...): ...
def get_pricing_variables(self, fields: Optional[Any] = ..., params: Optional[Any] = ..., batch: Optional[Any] = ..., success: Optional[Any] = ..., failure: Optional[Any] = ..., pending: bool = ...): ...
|
[
"[email protected]"
] | |
1bda6fd6e7271cebb0d5a3ec0f810bf5ba116d12
|
386d1b6557f4cbaf20794cd222f3b7b8598ef6a6
|
/data/clean_data/A1/18.py
|
165e341e4a20acdb7adcf06e11bc7e769b947482
|
[] |
no_license
|
woowei0102/code2pro
|
3baf86985f911264362963c503f12d20bdc1f89f
|
0b16c62a1cb9053ab59edd7a52e1b3b39fdf66dc
|
refs/heads/main
| 2023-06-28T23:09:23.998798 | 2021-07-13T11:49:27 | 2021-07-13T11:49:27 | 385,585,282 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 710 |
py
|
class Account:
def __init__(self, name):
self.name = name
self._balance = 0
def deposit(self, amount):
self._balance = self._balance + amount
print('{}存了NT${:,.0f}元.'.format(self.name,self._balance))
def withdraw(self, amount):
if amount < self._balance:
self._balance = self._balance - amount
print('{}提了NT${:,.0f}元.'.format(self.name,self._balance))
else:
print('{}的存款不足.'.format(self.name))
def show(self):
print('{}餘額NT${:,.0f}元.'.format(self.name,self._balance))
userA = Account("Jack")
userA.withdraw(1000)
userA.deposit(5000)
userA.withdraw(1000)
userA.show()
|
[
"[email protected]"
] | |
d6de7da64fe8278c4dcc7e25bc1fdf741e82efa8
|
d7e9bf5d59343f9ea1670fc529e1afa8fdcbf337
|
/Section-04/create_tables.py
|
2cd9ed6868b3e5aafebbbf768358599456b3f6fa
|
[] |
no_license
|
tyday/solid-guacamole
|
2610985f3156d44144cf40dd65b040898fb8c159
|
f1a1544ae831c18c2acf558afdf8a1d4c9991152
|
refs/heads/master
| 2020-05-05T09:01:56.946260 | 2019-04-14T17:49:13 | 2019-04-14T17:50:21 | 179,888,284 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 424 |
py
|
import sqlite3
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
create_table = "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, username text, password text)"
cursor.execute(create_table)
create_table = "CREATE TABLE IF NOT EXISTS items (name text, price real)"
cursor.execute(create_table)
cursor.execute("INSERT INTO items VALUES ('test', 10.99)")
connection.commit()
connection.close()
|
[
"[email protected]"
] | |
caa8266f63e9454a80ff08be34a5a07f072d0f01
|
98a359465e6e0620accede5b87b819aed663179d
|
/schol_library/migrations/0059_auto_20190922_1729.py
|
f4818e0050c290afbb640c3b9136a5ea6ce4a2ed
|
[] |
no_license
|
mustavfaa/back-end
|
88f8674bd6c2f8d0c4984a2a3d34f2aece3ec8d1
|
6635e8f504c7a7ba9709121b4dd8d5ccecdf05ca
|
refs/heads/main
| 2023-08-15T10:48:03.461138 | 2021-09-27T15:26:03 | 2021-09-27T15:26:03 | 410,938,832 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,011 |
py
|
# Generated by Django 2.2 on 2019-09-22 11:29
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('schol_library', '0058_auto_20190922_1144'),
]
operations = [
migrations.AddField(
model_name='requestedition',
name='checkid',
field=models.BooleanField(blank=True, default=False, verbose_name='статус'),
),
migrations.AlterField(
model_name='checkidrequestedition',
name='date_time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2019, 9, 22, 17, 29, 19, 729564), null=True, verbose_name='время просмотра'),
),
migrations.AlterField(
model_name='requestedition',
name='date_time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2019, 9, 22, 17, 29, 19, 728450), verbose_name='время заявки'),
),
]
|
[
"[email protected]"
] | |
eb129243a035487b54c5721c6288ed1cc40cdb22
|
96a34a048c783a75736bf0ec775df22142f9ee53
|
/packages/models-library/src/models_library/services_access.py
|
9e121fad95a0a38e9c550ccca50cfff86227dfc2
|
[
"MIT"
] |
permissive
|
ITISFoundation/osparc-simcore
|
77e5b9f7eb549c907f6ba2abb14862154cc7bb66
|
f4c57ffc7b494ac06a2692cb5539d3acfd3d1d63
|
refs/heads/master
| 2023-08-31T17:39:48.466163 | 2023-08-31T15:03:56 | 2023-08-31T15:03:56 | 118,596,920 | 39 | 29 |
MIT
| 2023-09-14T20:23:09 | 2018-01-23T10:48:05 |
Python
|
UTF-8
|
Python
| false | false | 666 |
py
|
"""Service access rights models
"""
from pydantic import BaseModel, Field
from pydantic.types import PositiveInt
GroupId = PositiveInt
class ServiceGroupAccessRights(BaseModel):
execute_access: bool = Field(
default=False,
description="defines whether the group can execute the service",
)
write_access: bool = Field(
default=False, description="defines whether the group can modify the service"
)
class ServiceAccessRights(BaseModel):
access_rights: dict[GroupId, ServiceGroupAccessRights] | None = Field(
None,
alias="accessRights",
description="service access rights per group id",
)
|
[
"[email protected]"
] | |
5431703d1c4fa12874ad6fb9cb4a6c792be79bb7
|
0809ea2739d901b095d896e01baa9672f3138825
|
/ORMproject1/testApp/migrations/0002_proxyemployee_proxyemployee2.py
|
947459ac9771a7ef22b74ac0159c4d06da01f56a
|
[] |
no_license
|
Gagangithub1988/djangoprojects
|
dd001f2184e78be2fb269dbfdc8e3be1dd71ce43
|
ea236f0e4172fbf0f71a99aed05ed7c7b38018e2
|
refs/heads/master
| 2022-11-15T23:46:46.134247 | 2020-07-15T06:37:51 | 2020-07-15T06:37:51 | 273,479,403 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 791 |
py
|
# Generated by Django 3.0.5 on 2020-04-30 05:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('testApp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProxyEmployee',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('testApp.employee',),
),
migrations.CreateModel(
name='ProxyEmployee2',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('testApp.employee',),
),
]
|
[
"[email protected]"
] | |
f3f75af6d875f307aaf0c5dd59ebde978c2efb5d
|
19101bf9478c585f73540f1962494a0315ccd0a6
|
/ax/models/tests/test_alebo_initializer.py
|
8c37f09dce95a0e81f510189ed5551873bcd1268
|
[
"MIT"
] |
permissive
|
liusulin/Ax
|
4ca1dcaa34f129d25faa2f52a8094b5f6e399eba
|
850b6975b7c7f9960ad5461e71d0304b2670232a
|
refs/heads/main
| 2023-07-14T01:02:38.044397 | 2021-08-18T15:34:06 | 2021-08-18T15:35:11 | 397,664,102 | 1 | 0 |
MIT
| 2021-08-18T16:16:10 | 2021-08-18T16:16:09 | null |
UTF-8
|
Python
| false | false | 1,047 |
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from ax.models.random.alebo_initializer import ALEBOInitializer
from ax.utils.common.testutils import TestCase
class ALEBOSobolTest(TestCase):
def testALEBOSobolModel(self):
B = np.array([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
Q = np.linalg.pinv(B) @ B
# Test setting attributes
m = ALEBOInitializer(B=B)
self.assertTrue(np.allclose(Q, m.Q))
# Test gen
Z, w = m.gen(5, bounds=[(-1.0, 1.0)] * 3)
self.assertEqual(Z.shape, (5, 3))
self.assertTrue(Z.min() >= -1.0)
self.assertTrue(Z.max() <= 1.0)
# Verify that it is in the subspace
self.assertTrue(np.allclose(Q @ Z.transpose(), Z.transpose()))
m = ALEBOInitializer(B=B, nsamp=1)
with self.assertRaises(ValueError):
m.gen(2, bounds=[(-1.0, 1.0)] * 3)
|
[
"[email protected]"
] | |
66f2df3cf8c49c743f988bcbdddae4207bad389c
|
c0c8aeb5aaf08925d8c9e1d660b02c89cbc7ad71
|
/Algorithms/Medium/105. Construct Binary Tree from Preorder and Inorder Traversal/answer.py
|
bec4709a6dc30054d5688961993bb42736c611cf
|
[
"Apache-2.0"
] |
permissive
|
kenwoov/PlayLeetCode
|
b2fdc43d799c37683a9efdc31c4df159cf553bf5
|
4012a2f0a099a502df1e5df2e39faa75fe6463e8
|
refs/heads/master
| 2022-12-17T05:54:22.775972 | 2020-09-26T14:08:43 | 2020-09-26T14:08:43 | 214,839,611 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 653 |
py
|
from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
if not preorder:
return None
root = TreeNode(preorder[0])
mid = inorder.index(preorder[0])
root.left = self.buildTree(preorder[1:mid+1], inorder[:mid])
root.right = self.buildTree(preorder[mid+1:], inorder[mid+1:])
return root
if __name__ == "__main__":
s = Solution()
result = s.buildTree([3, 9, 20, 15, 7], [9, 3, 15, 20, 7])
print(result)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.