blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
509b760ca822fb225e313cddf49de04c4015a78a
|
1ab5bc2e1b7a48ef1fd0f3d89618ce7ad55c15a5
|
/quizlet.py
|
223f164d449452f8fe8d2674bf439bce4182a385
|
[] |
no_license
|
joequery/quizlet
|
bbb99bf8db6c5d0bdecc37383098d50db60f6f00
|
25eabec0668cf4a90657a2e7e7f8258b00bd93bb
|
refs/heads/master
| 2020-03-30T23:11:57.211977 | 2013-04-22T15:59:11 | 2013-04-22T15:59:11 | 9,089,231 | 39 | 7 | null | 2013-04-11T03:31:15 | 2013-03-29T00:24:02 |
Python
|
UTF-8
|
Python
| false | false | 3,138 |
py
|
import requests
import json
import os
import re
import sys
from quizlet_secret import QUIZLET_CLIENT_ID
###########################################################################
# Constants
###########################################################################
SET_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sets")
###########################################################################
# Helper functions
###########################################################################
def get_answer_parts(termStr):
answerPartList = filter(None, termStr.split("\n"))
return answerPartList
def get_keyterms(answerPart):
keyterms = re.findall(r'\[[^]]+\]', answerPart)
# get rid of surrounding brackets
for i,keyterm in enumerate(keyterms):
keyterms[i] = keyterm[1:-1]
return keyterms
def user_answer_index(answer, parts):
answer = answer.lower()
for i, part in enumerate(parts):
keyterms = get_keyterms(part)
nonMatchedTerm = 0
for keyterm in keyterms:
keyterm = keyterm.lower()
if answer.find(keyterm) == -1:
nonMatchedTerm += 1
if nonMatchedTerm == 0:
return i
return -1
def make_quizlet_request(endpoint):
params = {"client_id": QUIZLET_CLIENT_ID, "whitespace": 0}
apiPrefix = "https://api.quizlet.com/2.0"
url = os.path.join(apiPrefix, endpoint)
r = requests.get(url=url, params=params)
dictFromJSON = json.loads(r.content)
# Force status code key. Quizlet doesn't put one in for 200, only errors
dictFromJSON['http_code'] = r.status_code
return dictFromJSON
def get_flashcard_set(setID):
return make_quizlet_request("sets/%s" % setID)
def save_flashcard_set_terms_to_file(flashcardSet, f):
termsJSON = json.dumps(flashcardSet['terms'])
f.write(termsJSON)
def load_flashcard_set_terms_from_file(f):
termJSON = f.read()
return json.loads(termJSON)
def check_answer(userAnswer, answerParts):
'''
If userAnswer is in the answerParts list, remove it from the list and
return True. Otherwise return False
'''
answerIndex = user_answer_index(userAnswer, answerParts)
if answerIndex != -1:
answerParts.pop(answerIndex)
return True
else:
return False
def hintify(answerPart):
'''
Return a string signifying a hint of an answerPart
'''
answerStr = list(answerPart)
inBracket = False
i = 0
startOfNewWord = False
while i < len(answerStr):
if not inBracket and answerStr[i] != '[':
i += 1
elif not inBracket and answerStr[i] == '[':
inBracket = True
i += 2
elif inBracket and answerStr[i] != ']':
if answerStr[i] == " ":
startOfNewWord = True
else:
if startOfNewWord:
startOfNewWord = False
else:
answerStr[i] = "_"
i += 1
elif inBracket and answerStr[i] == ']':
inBracket = False
i += 1
return "".join(answerStr)
|
[
"[email protected]"
] | |
499e78c1d4c85fbf210f6bfa70ebea06fd01bca7
|
dc3b2dfb012d4a2738f7fecbc128b7cb3b1090e0
|
/utils.py
|
e28f15eb879683d5278f67ff4d1680ec38cdb3b4
|
[] |
no_license
|
jonathanlctt/ihs_srht
|
5f8d93796e4facb3759f121c95a144ee4270cdfa
|
6cc7e687da68cb470f636e4755579028b48a19bb
|
refs/heads/main
| 2023-04-10T00:18:04.789764 | 2023-04-01T03:11:03 | 2023-04-01T03:11:03 | 306,413,609 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 589 |
py
|
import numpy as np
from time import time
def timeit(method):
def timed(*args):
start = time()
result = method(*args)
end = time()
return result, end-start
return timed
def generate_example(n=1024, d=64, nu=1.):
A = 1./np.sqrt(n)*np.random.randn(n, d)
U, _, V = np.linalg.svd(A, full_matrices=False)
Sigma = np.array([0.9/(ii+1) for ii in range(d)])
A = U @ Sigma*V.T
xpl = 1./np.sqrt(d) * np.random.randn(d,)
b = A @ xpl + 1./np.sqrt(n) * np.random.randn(n,)
de = np.sum( Sigma ** 2 / (Sigma ** 2 + nu ** 2) )
return A, b, de
|
[
"[email protected]"
] | |
876d42eca7d958444943cfd5e550208f8781fe15
|
43c24c890221d6c98e4a45cd63dba4f1aa859f55
|
/test/cpython/test_copy_reg.py
|
2f49eb711e1c3c92f0d6818c85ca52b10e5eb43c
|
[
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
jmgc/pyston
|
c8e4df03c33c6b81d20b7d51a781d9e10148238e
|
9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f
|
refs/heads/master
| 2020-12-11T07:51:58.968440 | 2020-09-11T14:38:38 | 2020-09-11T14:38:38 | 39,242,644 | 0 | 0 |
NOASSERTION
| 2020-09-11T14:38:39 | 2015-07-17T08:09:31 |
Python
|
UTF-8
|
Python
| false | false | 44 |
py
|
../../from_cpython/Lib/test/test_copy_reg.py
|
[
"[email protected]"
] | |
b4190faf905e258263c3afd8f9712baf92b92424
|
6f14bd1f6a17d5129d79ad47661f317cb69d7079
|
/TempestD_converter/decode_tempest_for_QA.py
|
25486b773920fde82a67809cf30a4fa43462db11
|
[] |
no_license
|
CSU-CIRA/tempestd_data_assimilation
|
2874970f40c532f026f25a72cf31b4172f18fd4f
|
f6d425368012cd8af9c42d427b2fc20af3942751
|
refs/heads/master
| 2023-04-05T16:57:18.109051 | 2021-04-29T12:13:48 | 2021-04-29T12:13:48 | 271,883,194 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,353 |
py
|
# coding: utf-8
# Stock modules
import os
import sys
import math
import argparse
import glob
import re
import numpy as np
import numpy.ma as ma
import datetime
import pytz
import h5py
import logging
# Downloaded specialty modules
import ncepbufr
# Local modules
from tempest_h5_to_bufr import TD_record
'''
"python decode_tempest_for_QA.py bufr_file h5_data_dir"
Decodes the Tempest-D BUFR file and compares the pixel values in every subset
to the corresponding pixel values in the HDF5 file.
'''
# For converting the iwp & lwp values
KG_TO_G = 1000
class TD_HDF5_File:
'''
Encapsulates an HDF5 file. Specifically keys it to the time range given in
the file name.
'''
# Free the file data if this many match failures have been seen
MAX_MATCH_FAILURES = 10
# TEMPEST_L1_LG_pub_20181208T030000_20181208T090000_v1.41.h5
REGEX = 'TEMPEST.*\_(\d{8}T\d{6})\_(\d{8}T\d{6})\_.*\.h5'
FNAME_MATCHER = re.compile(REGEX)
def __init__(self, filepath):
self.filepath = filepath
# Init the file object
log.info('opening %s', filepath)
self.file_obj = h5py.File(filepath, 'r')
# Read the data in only when we need it
self.h5_data = None
# Determine the file's time range by parsing its name
fname_match = self.FNAME_MATCHER.search(filepath)
dt = datetime.datetime.strptime(
fname_match.group(1),
'%Y%m%dT%H%M%S'
)
# The pixel records we want to compare these to use timezone aware
# datetimes so these need to be timezone aware too
self.dt_start = pytz.utc.localize(dt)
dt = datetime.datetime.strptime(
fname_match.group(2),
'%Y%m%dT%H%M%S'
)
self.dt_end = pytz.utc.localize(dt)
# Count how many times the time range match has failed
self.matchFailures = 0
def read_file(self, record):
'''
Read the h5 datasets into memory using the dataset keys in the record
data array
'''
self.h5_data = {}
for dset_name in record.data:
self.h5_data[dset_name] = self.file_obj[dset_name][:]
def free_data(self):
'''
At least try to free the file data from memory
'''
for dset_name, h5_d in self.h5_data.items():
del h5_d
# If the above loop doesn't do it, this should
self.h5_data = None
def in_range(self, record):
'''
Returns True if the record was in file's time range, and False if not.
'''
if record.datetime() >= self.dt_start and record.datetime() <= self.dt_end:
return True
else:
self.matchFailures += 1
if self.matchFailures >= self.MAX_MATCH_FAILURES and self.h5_data:
# Data in the BUFR files should be pretty contiguous by
# date/time, so we probably don't need this hdf5 data anymore
self.free_data()
# But just in case we do - start over
self.matchFailures = 0
return False
def check_pixel(self, record):
'''
We should know that the hdf5 file time range matches the record time,
so the record's data should be in here and it should match up properly
'''
if not self.h5_data:
self.read_file(record)
row = record.sangle_idx
col = record.sline_idx
for dset_name, h5_d in self.h5_data.items():
pixel_val = record.data[dset_name]
h5_val = h5_d[row, col]
#log.debug('pixel_val: %s, h5_val: %s', pixel_val, h5_val)
if pixel_val == 10E10:
# h5 val should be nan
if np.isnan(h5_val) or h5_val < -990:
result = True
else:
result = False
elif h5_val < -300:
result = False
elif np.issubdtype(h5_d.dtype, np.integer):
# For h5 integer datasets check for equality
result = (int(pixel_val) == h5_val)
elif dset_name == '/iwp' or dset_name == '/lwp':
# For the water path datasets check for closeness within 1000th
result = math.isclose(pixel_val, h5_val,
abs_tol=0.001)
else:
# For the other float datasets check for closeness within
# 10000th
result = math.isclose(pixel_val, h5_val,
abs_tol=0.0001)
if not result:
log.warning('BUFR & HDF5 values don\'t match:')
log.warning('HDF5 file: %s, row: %s, col: %s', self.filepath, row, col)
log.warning('dataset: %s', dset_name)
log.warning('bufr val: %s, h5 val: %s\n', pixel_val, h5_val)
#sys.exit(1)
class PixelChecker:
'''
Checks the accuracy of the pixel data from a BUFR file against one or more
input HDF5 files, to QA the conversion of the HDF5 files to BUFR
'''
def __init__(self, h5_dir):
# Open and initialize all the HDF5 files
h5_glob = os.path.join(h5_dir, '*.h5')
h5_paths = glob.glob(h5_glob)
self.filelist = []
for h5_path in h5_paths:
self.filelist.append(TD_HDF5_File(h5_path))
# Sort by starting datetime
self.filelist.sort(key = lambda x: x.dt_start)
def check(self, pixel):
'''
Makes sure the original hdf5 file pixel data is correctly matched by
the BUFR pixel data contained in the pixel record
'''
was_range_matched = False
was_verified = False
for h5_file in self.filelist:
if h5_file.in_range(pixel):
was_range_matched = True
if h5_file.check_pixel(pixel):
was_verified = True
if not was_range_matched:
literal = (
'No HDF5 file\'s time range matched this pixel\'s datetime:'
)
log.warning('%s\n%s', literal, str(pixel.__dict__))
return was_verified
#
# Main Program
#
# Setup logging. Note that the output directly from BUFRLIB goes to stdout,
# so the logging output has to do the same for both to go to the same
# place.
log = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s %(levelname)-8s%(name)s: %(message)s',
level=logging.INFO
)
parser = argparse.ArgumentParser()
parser.add_argument(
'bufr_file',
help='The BUFR file to check'
)
parser.add_argument(
'input_hdf5_dir',
help='The directory that contains the Tempest-D HDF5 files to check against'
)
#parser.add_argument(
# 'increment',
# help='The size of the steps through the BUFR file pixels'
#)
parser_args = parser.parse_args()
# Open the HDF5 files and get them set up for pixel data checking
checker = PixelChecker(parser_args.input_hdf5_dir)
bufr = ncepbufr.open(parser_args.bufr_file)
while bufr.advance() == 0:
bufr_header = '{:10d}{:6d}{:^10}'.format(
bufr.msg_date, bufr.msg_counter, bufr.msg_type)
log.info(bufr_header)
while bufr.load_subset() == 0:
pixel = TD_record()
pixel.data = {}
#scalarstr1 = 'SAID YEAR MNTH DAYS HOUR MINU SECO CLATH CLONH CHSQ'
# Squeeze out any extra singleton dimensions and fill in any masked
# values with the fill value - 10E10
hdr = bufr.read_subset(pixel.scalarstr1).squeeze().filled()
#log.debug('hdr: %s', hdr)
# Put the time info in both the pixel time members and the data dict so
# that both getting the object's datetime and looking up the data in
# the hdf5 files is easy
pixel.year = pixel.data['/year'] = int(hdr[1])
pixel.month = pixel.data['/month'] = int(hdr[2])
pixel.day = pixel.data['/day'] = int(hdr[3])
pixel.hour = pixel.data['/hour'] = int(hdr[4])
pixel.min = pixel.data['/minute'] = int(hdr[5])
pixel.sec = pixel.data['/second'] = int(hdr[6])
#log.debug('hdr[7]: %s, %s', hdr[7], type(hdr[7]))
pixel.data['/pixel latitude'] = hdr[7]
pixel.data['/pixel longitude'] = hdr[8]
pixel.data['/chi'] = hdr[9]
#scalarstr2 = 'CLAVR SAZA BEARAZ SOZA SOLAZI SANG FOVN SLNM'
hdr = bufr.read_subset(pixel.scalarstr2).squeeze().filled()
pixel.data['/converge'] = hdr[0]
pixel.data['/zenith_angle'] = hdr[1]
pixel.data['/scan_angle'] = hdr[5]
pixel.sangle_idx = int(hdr[6])
pixel.sline_idx = int(hdr[7])
#ilwpstr = 'COLN ILWP'
obs = bufr.read_subset(pixel.ilwpstr, rep=True).squeeze().filled()
# Convert back to g m^-1
obs[1, :][obs[1, :] < 9E9] *= KG_TO_G
#log.debug('ilwp obs: %s', obs)
pixel.data['/iwp'] = obs[1, 0]
pixel.data['/lwp'] = obs[1, 1]
#tmbrstr = 'CHNM TMBR'
obs = bufr.read_subset(pixel.tmbrstr, rep=True).squeeze().filled()
#log.debug('tmbr obs: %s', obs)
pixel.data['/Tb 89 GHz'] = obs[1, 0]
pixel.data['/Tb 165 GHz'] = obs[1, 1]
pixel.data['/Tb 176 GHz'] = obs[1, 2]
pixel.data['/Tb 180 GHz'] = obs[1, 3]
pixel.data['/Tb 182 GHz'] = obs[1, 4]
checker.check(pixel)
bufr.close()
|
[
"[email protected]"
] | |
4a2ecbc07be3e1defbf0fed67e0ccfd2451a3d05
|
a35ec94d6346fa20ad33aed7b00d3326f67ffa7a
|
/e_commerce/urls.py
|
50e7eef5b4fe3d2b88ef6142825341877d16b05e
|
[] |
no_license
|
UsamaMashood/e_commerce_project
|
b2d4c4843c9edc72f4f90341d944647be4dbb098
|
09faf224321ed4de0ea55cc2143d8610f2ed5d99
|
refs/heads/master
| 2020-05-23T05:52:07.758188 | 2019-05-14T16:03:18 | 2019-05-14T16:03:18 | 186,632,129 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 352 |
py
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('shop/', include('shop.urls')),
path('blog/', include('blog.urls')),
] + static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
23b8ea48b8dcdfd520fd983a55990ac4992ded00
|
4017add8fa767cf2eca9163791aa65ee77c67a07
|
/code/gradient_descent/first.py
|
2a722d65bc85c82cd02c686f9e1e382f1907852a
|
[] |
no_license
|
ducksfrogs/numpy_data_ana
|
00c0928f2ddc7a8ad0ea9ecdefa3815a8d880969
|
9d89bc377a3015c19c74f6b5aa500f2f2f08cdb1
|
refs/heads/master
| 2022-10-19T22:37:10.314453 | 2020-06-09T00:05:23 | 2020-06-09T00:05:23 | 268,383,582 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,939 |
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
dataset = load_boston()
samples, label, feature_names = dataset.data, dataset.target, dataset.feature_names
bostondf = pd.DataFrame(dataset.data)
bostondf.columns = dataset.feature_names
bostondf['Target price'] = dataset.target
bostondf.head()
bostondf.plot(x='RM', y='Target price', style='o')
def prediction(X, coefficient, intercept):
return X*coefficient + intercept
def cost_function(X, Y, coefficient, intercept):
MSE = 0.0
for i in range(len(X)):
MSE += (Y[i] -(coefficient*X[i] + intercept))**2
return MSE / len(X)
def update_weights(X, Y, coefficient, intercept, learning_rate):
coefficient_derivative = 0
intercept_derivative = 0
for i in range(len(X)):
coefficient_derivative += -2*X[i] *(Y[i] -(coefficient * X[i] + intercept))
intercept_derivative += -2*(Y[i] - (coefficient* X[i] + intercept))
coefficient -= (coefficient_derivative / len(X)) * learning_rate
intercept -= (intercept_derivative / len(X)) * learning_rate
return coefficient, intercept
def train(X, Y, coefficient, intercept, learning_rate, iteration):
cost_hist = []
for i in range(iteration):
coefficient, intercept = update_weights(X, Y, coefficient, intercept, learning_rate)
cost = cost_function(X, Y, coefficient, intercept)
cost_hist.append(cost)
return coefficient, intercept, cost_hist
learning_rate = 0.01
iteration = 10001
coefficient = 0.3
intercept = 2
X = bostondf.iloc[:, 5:6].values
Y = bostondf.iloc[:, 13:14].values
# coefficient, intercept, cost_history = train(X, Y, coefficient, intercept, learning_rate, iteration)
coefficient, intercept, cost_history = train(X, Y, coefficient, intercept=2, learning_rate=0.01, iteration=10001)
y_hat = X*coefficient + intercept
plt.plot(X, Y, 'bo')
plt.plot(X, y_hat)
plt.show()
|
[
"[email protected]"
] | |
563989d6542731165bdd0c43292b32598e100512
|
ecca799453ce5939ce703915fb461f628a77eabe
|
/course/csic5011/2023/homework/homework4/LI_YAKUN_MATH 5473_HW4/HW4_T2.py
|
308c51fdca776aad5a412338b2b88be11a6738f7
|
[] |
no_license
|
yao-lab/yao-lab.github.io
|
830c3835f69d3f285a533b2cdc2c032dfc9eb12d
|
08c96fb16a7516e5db556b135cc6a49964bc398c
|
refs/heads/master
| 2023-06-08T00:50:12.585105 | 2023-05-29T11:52:48 | 2023-05-29T11:52:48 | 124,191,804 | 39 | 37 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,487 |
py
|
import numpy as np
import math
import random
import pandas as pd
from cvxopt import matrix, solvers
from cvxopt.modeling import variable, op,sum, dot
import matplotlib.pyplot as plt
N = 20
d = 20
K = 20
S = np.zeros((N, K), dtype = float)
def function(a):
if a == 0 :
return -1
else:
return 1
for n in range(1,N+1):
A = np.random.normal(loc=0, scale=1, size=(n, d))
for k in range(1, n+1):
for i in range(1, 50+1):
# Make a sparse x0
x0 = np.zeros(d)
t = random.sample(range(d), k)
rand_bino = np.random.binomial(1, 0.5, k)
result = map(function, rand_bino)
result_list = list(result)
x0[t]=result_list
# Draw a standard Gaussian Random Matrix
A = np.random.normal(loc=0, scale=1, size=(n, d))
b = np.dot(A, x0)
# = [-1 if x0[i]<0 else 1 for i in range(len(x0))]
A = A.T
A = matrix(A)
b = matrix(b)
#c = matrix(c)
# Solve the linear programming problem
x = variable(d)
op(sum(abs(x)),[dot(A,x) == b]).solve()
x = np.asarray(x.value)
x = np.squeeze(x)
dist = np.sqrt(np.sum(np.square(x-x0)))
if dist <= 1e-3:
S[n, k]+=1
S = S/50
plt.imshow(S, origin = 'lower', extent = [0, K, 0, N])
plt.xlabel('k-axis')
plt.ylabel('n-axis')
plt.show()
|
[
"[email protected]"
] | |
cac3526b274c70a2aac8b446caaa0e96ec4ca392
|
70ea2dc70e9c0aea1fe9af0db8ff362bdfa50676
|
/main_cls.py
|
b414051a8d4a13db9d12b83b48e9063d571b7edf
|
[] |
no_license
|
guanjz20/MM21_FME_solution
|
b314814f4b49e18e9a6cf3cccc1063bb7e8d9f42
|
7919833a2112c6387a5a49fdcf0c43e75394a0bb
|
refs/heads/master
| 2023-06-16T05:00:55.101198 | 2021-07-11T07:28:07 | 2021-07-11T07:28:07 | 384,887,886 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 16,176 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from genericpath import exists
import os
from typing import Final
import cv2
import sys
from matplotlib.pyplot import xcorr
from numpy.random import f, sample, shuffle
from torch.utils.data import dataset
from config import parser
if len(sys.argv) > 1:
# use shell args
args = parser.parse_args()
print('Use shell args.')
else:
# Debug
args_list = [
'--dataset',
'SAMM',
'--print-freq',
'1',
'--snap',
'debug',
'--data_option',
'wt_diff',
'--gpus',
'0',
'--batch_size',
'2',
'--input_size',
'128',
'--length',
'64',
'-L',
'12',
'--workers',
'0',
]
args = parser.parse_args(args_list)
# os setting
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
cv2.ocl.setUseOpenCL(False)
cv2.setNumThreads(0)
if args.gpus is not None:
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
import re
import logging
import time
import torch
import os.path as osp
import torch.nn as nn
import numpy as np
import pandas as pd
import torch.distributed as dist
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from datetime import datetime
from tqdm import tqdm
from pprint import pformat
from timm.utils import setup_default_logging, NativeScaler, reduce_tensor, distribute_bn
from timm.data.distributed_sampler import OrderedDistributedSampler
from contextlib import suppress
from model.network import Two_Stream_RNN_Cls, load_pretrained_model
from dataset.me_dataset import SAMMDataset, CASME_2Dataset
import utils
import trainer_cls as trainer
# torch.multiprocessing.set_start_method('spawn')
torch.backends.cudnn.benchmark = True
# check resume
RESUME = osp.exists(args.resume)
# check finetune
if len(args.finetune_list) > 0:
assert RESUME
FINETUNE = True
else:
FINETUNE = False
_logger = logging.getLogger('train')
# resume
if RESUME:
setattr(args, 'save_root', 'results/{}'.format(osp.basename(args.resume)))
else:
snapshot_name = '_'.join(
[args.snap, datetime.now().strftime("%Y%m%d-%H%M%S")])
if len(args.store_name) == 0:
args.store_name = snapshot_name
setattr(args, 'save_root', 'results/{}'.format(args.store_name))
# make dirs
if args.local_rank == 0:
utils.check_rootfolders(args)
else:
time.sleep(1)
# setup logging
setup_default_logging(
log_path=os.path.join(args.save_root, args.root_log, 'run.log'))
_logger.info("save experiment to :{}".format(args.save_root))
# save args
if args.local_rank == 0:
args_string = pformat(args.__dict__)
_logger.info(args_string)
# reset random
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
# if distributed
if args.distributed and 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.device = 'cuda'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
dist.init_process_group(backend='nccl', init_method='env://')
args.world_size = dist.get_world_size()
args.rank = dist.get_rank()
_logger.info(
'Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
% (args.rank, args.world_size))
# else:
# _logger.info('Training with a single process on 1 GPUs.')
assert args.rank >= 0
utils.synchronize()
# loss_fn
criterion = utils.Focal_Loss(alpha=args.focal_alpha)
# leave one subject out cross validation
img_dirs = utils.get_img_dirs(args.dataset)
img_dirs_dict = utils.leave_one_out(
img_dirs, args.dataset) # key -> [train_set, val_set]
# finetuen and resume
if RESUME:
total_MNA = np.load(osp.join(args.resume, args.root_output,
'cross_validation_MNA_dict.npy'),
allow_pickle=True).item()
match_regions_record_all = np.load(osp.join(
args.resume, args.root_output, 'match_regions_record_all.npy'),
allow_pickle=True).item()
if not FINETUNE:
keys1 = list(total_MNA.keys())
# keys2 = list(match_regions_record_all.keys())
rm_key = keys1[-1] # after python 3.6, order is guaranteed
if args.delete_last:
# delete the last subject results
total_MNA, match_regions_record_all = utils.delete_records(
total_MNA, match_regions_record_all, rm_key)
if args.local_rank == 0:
_logger.info('resume from subject {} (include)'.format(rm_key))
elif args.local_rank == 0:
_logger.info('resume from subject {} (not include)'.format(rm_key))
else:
if args.local_rank == 0:
_logger.info('finetune subjects: [{}]'.format(','.join(
args.finetune_list)))
else:
total_MNA = {} # store all cross-validation results
match_regions_record_all = {}
utils.synchronize()
for vi, (val_id, [train_dirs, val_dirs]) in enumerate(img_dirs_dict.items()):
# leave {val_id} out...
# FINETUNE has higher priority than RESUME
if FINETUNE and (val_id not in args.finetune_list):
continue # skip subjects that do not need finetune
if RESUME and (not FINETUNE) and (val_id in total_MNA):
continue # skip from resume
if val_id in args.finetune_list:
# delete records
total_MNA, match_regions_record_all = utils.delete_records(
total_MNA, match_regions_record_all, val_id)
if args.data_option == 'diff':
inchannel = args.L
elif args.data_option == 'wt_diff':
inchannel = 4 * args.L
elif args.data_option == 'wt_dr':
inchannel = (
args.L + 1 - 11 +
1) * 2 * 4 # gauss kernel size = 11, *2 = dr1,dr2, *4 = 4 bands
# amp
amp_autocast = suppress # do nothing
loss_scaler = None
if args.amp:
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if args.local_rank == 0:
_logger.info(
'Using native Torch AMP. Training in mixed precision.')
else:
if args.local_rank == 0:
_logger.info('AMP not enabled. Training in float32.')
# model
model = Two_Stream_RNN_Cls(mlp_hidden_units=args.hidden_units,
inchannel=inchannel,
outchannel=2)
# load pretrained
if osp.exists(args.load_pretrained):
model = load_pretrained_model(model, args.load_pretrained,
args.load_bn)
if args.local_rank == 0:
_logger.info('Load pretrained model from {}[load_bn: {}]'.format(
args.load_pretrained, args.load_bn))
# pytorch_total_params = sum(p.numel() for p in model.parameters()
# if p.requires_grad)
# print("Total Params: {}".format(pytorch_total_params))
model = model.cuda()
# setup synchronized BatchNorm for distributed training
if args.distributed:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
# if args.local_rank == 0:
# _logger.info(
# 'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '
# 'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.'
# )
# optimizer
if args.optim == 'SGD':
optimizer = torch.optim.SGD(
[p for p in model.parameters() if p.requires_grad],
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
elif args.optim == 'Adam':
optimizer = torch.optim.Adam(
[p for p in model.parameters() if p.requires_grad],
args.lr,
weight_decay=args.weight_decay)
else:
raise NotImplementedError
# setup distributed training
if args.distributed:
model = DistributedDataParallel(model,
device_ids=[args.local_rank],
find_unused_parameters=True)
else:
model = DataParallel(model).cuda()
# dataset
Dataset = SAMMDataset if args.dataset == 'SAMM' else CASME_2Dataset
def create_dataset():
train_dataset = Dataset(
mode='train',
img_dirs=train_dirs,
seq_len=args.length,
step=args.step,
# step=1000, # !!
time_len=args.L,
input_size=args.input_size,
data_aug=args.data_aug,
data_option=args.data_option)
val_dataset = Dataset(
mode='test',
img_dirs=val_dirs,
seq_len=args.length,
step=args.length, # assert no overlap
# step=1000, # !!
time_len=args.L,
input_size=args.input_size,
data_aug=False,
data_option=args.data_option)
return train_dataset, val_dataset
train_dataset, val_dataset = create_dataset()
if args.distributed:
val_sampler = OrderedDistributedSampler(val_dataset)
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset)
else:
val_sampler = None
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset,
shuffle=train_sampler is None,
sampler=train_sampler,
batch_size=args.batch_size,
drop_last=False,
num_workers=args.workers,
pin_memory=False)
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=args.batch_size,
shuffle=False,
sampler=val_sampler,
num_workers=0,
pin_memory=False,
drop_last=False)
if args.local_rank == 0:
_logger.info('<' * 10 + ' {} '.format(val_id) + '<' * 10)
best_f_score = -1000.0
best_loss = 1000.0
val_accum_epochs = 0
for epoch in range(args.epochs):
if train_sampler is not None:
train_sampler.set_epoch(epoch)
utils.adjust_learning_rate(optimizer, epoch, args.lr,
args.weight_decay, args.lr_steps,
args.lr_decay_factor)
trainer.train(train_loader, model, criterion, optimizer, epoch,
_logger, args, amp_autocast, loss_scaler)
utils.synchronize()
# bn syn
if args.distributed:
if args.local_rank == 0:
_logger.info("Distributing BatchNorm running means and vars")
distribute_bn(model, args.world_size,
True) # true for reduce, false for broadcast
# logging
if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:
loss_val, pred_and_gt = trainer.validate(val_loader, model,
criterion, _logger, args,
amp_autocast)
# distributed synchronize
pred_and_gt = utils.synchronize_pred_and_gt(
pred_and_gt, epoch, args)
# eval
if args.local_rank == 0:
precision, recall, f_score, MNA, match_regions_record = utils.evaluate_bi_labels(
pred_and_gt, val_id, epoch, args)
else:
f_score = -10.0
MNA = (0, 0, 0)
# precision, recall, f_score, MNA, match_regions_record = utils.evaluate_bi_labels(
# pred_and_gt, val_id, epoch, args)
utils.synchronize()
# synchronize
f_score = utils.synchronize_f_score(f_score, args)
_logger.info('f_score of processor {}: {:.4f}'.format(
args.local_rank, f_score))
MNA = utils.synchronize_list(MNA, args)
_logger.info('MNA of processor {}: {}'.format(
args.local_rank, MNA))
is_equal_score = f_score == best_f_score
is_best_loss = loss_val < best_loss
best_loss = min(loss_val, best_loss)
is_best_score = f_score > best_f_score
best_f_score = max(best_f_score, f_score)
# save checkpoint
if args.local_rank == 0:
_logger.info(
'Test[{}]: loss_val: {:.4f} (best: {:.4f}), f-score: {:.4f} (best: {:.4f})'
.format(epoch, loss_val, best_loss, f_score, best_f_score))
utils.save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': model.state_dict(),
},
is_best_score,
args.save_root,
args.root_model,
filename=val_id)
utils.synchronize()
if is_best_score or (is_equal_score and
MNA[1] < total_MNA.get(val_id, [0, 0, 0])[1]):
val_accum_epochs = 0
total_MNA.update(
{val_id:
MNA}) # processor 0 need this record for branch selection
if args.local_rank == 0:
match_regions_record_all.update(
match_regions_record
) # only processor 0 need this record
out_dir = osp.join(args.save_root, args.root_output,
val_id)
os.makedirs(out_dir, exist_ok=True)
np.save(osp.join(out_dir, 'match_regions_record_best.npy'),
match_regions_record)
# all
np.save(
osp.join(args.save_root, args.root_output,
'cross_validation_MNA_dict.npy'), total_MNA)
np.save(
osp.join(args.save_root, args.root_output,
'match_regions_record_all.npy'),
match_regions_record_all)
precision, recall, f_score = utils.calculate_metric_from_dict_MNA(
total_MNA)
_logger.info(
'Test[all] Avg f-score now: {:.4f}'.format(f_score))
utils.synchronize()
else:
val_accum_epochs += 1
if val_accum_epochs >= args.early_stop:
_logger.info(
"validation ccc did not improve over {} epochs, stop processor {}"
.format(args.early_stop, args.local_rank))
break
if args.local_rank == 0:
precision_all, recall_all, f_score_all = utils.calculate_metric_from_dict_MNA(
total_MNA)
_logger.critical(
'[{}][{}]/[{}] f_score: {:.4f}, precision_all: {:.4f}, recall_all: {:.4f}, f_score_all: {:.4f}'
.format(val_id, vi + 1, len(img_dirs_dict), best_f_score,
precision_all, recall_all, f_score_all))
# store results
if args.local_rank == 0:
np.save(
osp.join(args.save_root, args.root_output,
'cross_validation_MNA_dict.npy'), total_MNA)
np.save(
osp.join(args.save_root, args.root_output,
'match_regions_record_all.npy'), match_regions_record_all)
_logger.info('ALL DONE')
exit()
|
[
"[email protected]"
] | |
b18d2508a46f66081f28699a952ef5d57a521629
|
ed8a031dd8a34eb614798ceb553e70439a0000ce
|
/Blog/Blog/urls.py
|
5f8d259fe1dfe5ceb11ec8801966c066111e0b44
|
[] |
no_license
|
gunnalasreekanthreddy/myblog_using_django
|
d86a90528dcd9ef78826ef25d9b638afd088320e
|
94307d90450cd4a4a615b15c6cfa0418520cb4ef
|
refs/heads/master
| 2020-06-27T03:33:00.079948 | 2019-08-02T12:05:38 | 2019-08-02T12:05:38 | 199,832,879 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,440 |
py
|
"""Blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from myblog import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.hello),
# path('addblogcategory/',views.addblogcategory),
# path('details/',views.details),
path('addcategory/',views.addcateg),
# path('addblog/',views.addblog),
# path('addblogui/',views.addblogui),
# path('auth/',views.auth),
path('createblog/',views.createblog),
# path('authui/',views.authui),
path('authcreate/', views.authcreate),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_DIR)
# static(settings.STATIC_URL, document_root=settings.STATIC_DIR)
|
[
"[email protected]"
] | |
c79e23af4259ba1c22d26c8aa3efba74db913669
|
3fb24e6505ffdc3a3961c467bc54ba7c0b526454
|
/gravityRK4_resized.py
|
bb4decb7fef61bfc5574688ebcdd950f6745dc2a
|
[
"MIT"
] |
permissive
|
martinohanlon/minecraft-planets
|
8d76018cb1dc4154becf9ec836f1e00488673b9e
|
c7017eb9be6260c8c664891a77063305ac97ae57
|
refs/heads/master
| 2020-06-02T07:50:33.539646 | 2013-03-07T21:19:43 | 2013-03-07T21:19:43 | 8,636,766 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,722 |
py
|
#!/usr/bin/env python
"""
An improved version of my Python-based gravity simulator, using Runge-Kutta
4th order solution of the differential equations - coded during Xmas 2012.
Happy holidays, everyone!
I've always been fascinated by space - ever since I read 'The Family of
the Sun', when I was young. And I always wanted to simulate what I've read
about Newton's gravity law, and see what happens in... a universe of my own
making :-)
So: The following code 'sprays' some 'planets' randomly, around a sun,
inside a 900x600 window (the values are below, change them at will).
Afterwards, it applies a very simple set of laws:
- Gravity, inversely proportional to the square of the distance, and linearly
proportional to the product of the two masses
- Elastic collissions of two objects if they are close enough to touch: a
merged object is then created, that maintains the momentum (mass*velocity)
and the mass of the two merged ones.
- This updated version of the code is using the RK4 solution of the velocity/
acceleration differential equation, and is in fact based on the excellent
blog of Glenn Fiedler (http://gafferongames.com)
Use the numeric keypad's +/- to zoom in/out, and press SPACE to toggle
showing/hiding the orbits trace.
Blog post at:
http://users.softlab.ntua.gr/~ttsiod/gravity.html
http://ttsiodras.github.com/gravity.html
Thanassis Tsiodras
[email protected]
"""
import sys
import math
import pygame
import random
from collections import defaultdict
# The window size
WIDTH, HEIGHT = 50, 50
WIDTHD2, HEIGHTD2 = WIDTH/2., HEIGHT/2.
# The number of simulated planets
PLANETS = 30
# The density of the planets - used to calculate their mass
# from their volume (i.e. via their radius)
DENSITY = 0.001
# The gravity coefficient - it's my universe, I can pick whatever I want :-)
GRAVITYSTRENGTH = 1.e4
# The global list of planets
g_listOfPlanets = []
class State:
"""Class representing position and velocity."""
def __init__(self, x, y, vx, vy):
self._x, self._y, self._vx, self._vy = x, y, vx, vy
def __repr__(self):
return 'x:{x} y:{y} vx:{vx} vy:{vy}'.format(
x=self._x, y=self._y, vx=self._vx, vy=self._vy)
class Derivative:
"""Class representing velocity and acceleration."""
def __init__(self, dx, dy, dvx, dvy):
self._dx, self._dy, self._dvx, self._dvy = dx, dy, dvx, dvy
def __repr__(self):
return 'dx:{dx} dy:{dy} dvx:{dvx} dvy:{dvy}'.format(
dx=self._dx, dy=self._dy, dvx=self._dvx, dvy=self._dvy)
class Planet:
"""Class representing a planet. The "_st" member is an instance of "State",
carrying the planet's position and velocity - while the "_m" and "_r"
members represents the planet's mass and radius."""
def __init__(self, initialState=None):
#if PLANETS == 1:
if initialState != None:
# A nice example of a planet orbiting around our sun :-)
#self._st = State(15, 25, 0, 0.2)
self._st = initialState
else:
# otherwise pick a random position and velocity
self._st = State(
float(random.randint(0, WIDTH)),
float(random.randint(0, HEIGHT)),
float(random.randint(0, 40)/100.)-0.2,
float(random.randint(0, 40)/100.)-0.2)
self._r = 0.55
self.setMassFromRadius()
self._merged = False
def __repr__(self):
return repr(self._st)
def acceleration(self, state, unused_t):
"""Calculate acceleration caused by other planets on this one."""
ax = 0.0
ay = 0.0
for p in g_listOfPlanets:
if p is self or p._merged:
continue # ignore ourselves and merged planets
dx = p._st._x - state._x
dy = p._st._y - state._y
dsq = dx*dx + dy*dy # distance squared
dr = math.sqrt(dsq) # distance
force = GRAVITYSTRENGTH*self._m*p._m/dsq if dsq>1e-10 else 0.
# Accumulate acceleration...
ax += force*dx/dr
ay += force*dy/dr
return (ax, ay)
def initialDerivative(self, state, t):
"""Part of Runge-Kutta method."""
ax, ay = self.acceleration(state, t)
return Derivative(state._vx, state._vy, ax, ay)
def nextDerivative(self, initialState, derivative, t, dt):
"""Part of Runge-Kutta method."""
state = State(0., 0., 0., 0.)
state._x = initialState._x + derivative._dx*dt
state._y = initialState._y + derivative._dy*dt
state._vx = initialState._vx + derivative._dvx*dt
state._vy = initialState._vy + derivative._dvy*dt
ax, ay = self.acceleration(state, t+dt)
return Derivative(state._vx, state._vy, ax, ay)
def updatePlanet(self, t, dt):
"""Runge-Kutta 4th order solution to update planet's pos/vel."""
a = self.initialDerivative(self._st, t)
b = self.nextDerivative(self._st, a, t, dt*0.5)
c = self.nextDerivative(self._st, b, t, dt*0.5)
d = self.nextDerivative(self._st, c, t, dt)
dxdt = 1.0/6.0 * (a._dx + 2.0*(b._dx + c._dx) + d._dx)
dydt = 1.0/6.0 * (a._dy + 2.0*(b._dy + c._dy) + d._dy)
dvxdt = 1.0/6.0 * (a._dvx + 2.0*(b._dvx + c._dvx) + d._dvx)
dvydt = 1.0/6.0 * (a._dvy + 2.0*(b._dvy + c._dvy) + d._dvy)
self._st._x += dxdt*dt
self._st._y += dydt*dt
self._st._vx += dvxdt*dt
self._st._vy += dvydt*dt
def setMassFromRadius(self):
"""From _r, set _m: The volume is (4/3)*Pi*(r^3)..."""
self._m = DENSITY*4.*math.pi*(self._r**3.)/3.
def setRadiusFromMass(self):
"""Reversing the setMassFromRadius formula, to calculate radius from
mass (used after merging of two planets - mass is added, and new
radius is calculated from this)"""
self._r = (3.*self._m/(DENSITY*4.*math.pi))**(0.3333)
def main():
pygame.init()
win=pygame.display.set_mode((WIDTH, HEIGHT))
keysPressed = defaultdict(bool)
def ScanKeyboard():
while True:
# Update the keysPressed state:
evt = pygame.event.poll()
if evt.type == pygame.NOEVENT:
break
elif evt.type in [pygame.KEYDOWN, pygame.KEYUP]:
keysPressed[evt.key] = evt.type == pygame.KEYDOWN
global g_listOfPlanets, PLANETS
if len(sys.argv) == 2:
PLANETS = int(sys.argv[1])
# And God said: Let there be lights in the firmament of the heavens...
g_listOfPlanets = []
#for i in xrange(0, PLANETS):
#g_listOfPlanets.append(Planet())
g_listOfPlanets.append(Planet(State(15, 25, 0, 0.2)))
g_listOfPlanets.append(Planet(State(35, 25, 0, -0.2)))
g_listOfPlanets.append(Planet(State(5, 25, 0, 0.15)))
g_listOfPlanets.append(Planet(State(37, 37, 0, -0.15)))
#g_listOfPlanets.append(Planet())
def planetsTouch(p1, p2):
dx = p1._st._x - p2._st._x
dy = p1._st._y - p2._st._y
dsq = dx*dx + dy*dy
dr = math.sqrt(dsq)
return dr<=(p1._r + p2._r)
sun = Planet()
sun._st._x, sun._st._y = WIDTHD2, HEIGHTD2
sun._st._vx = sun._st._vy = 0.
sun._m *= 100
sun.setRadiusFromMass()
g_listOfPlanets.append(sun)
for p in g_listOfPlanets:
if p is sun:
continue
if planetsTouch(p, sun):
p._merged = True # ignore planets inside the sun
# Zoom factor, changed at runtime via the '+' and '-' numeric keypad keys
zoom = 1.0
# t and dt are unused in this simulation, but are in general,
# parameters of engine (acceleration may depend on them)
t, dt = 0., 1.
bClearScreen = True
pygame.display.set_caption('Gravity simulation (SPACE: show orbits, '
'keypad +/- : zoom in/out)')
while True:
t += dt
pygame.display.flip()
if bClearScreen: # Show orbits or not?
win.fill((0, 0, 0))
win.lock()
for p in g_listOfPlanets:
if not p._merged: # for planets that have not been merged, draw a
# circle based on their radius, but take zoom factor into account
pygame.draw.circle(win, (255, 255, 255),
(int(WIDTHD2+zoom*WIDTHD2*(p._st._x-WIDTHD2)/WIDTHD2),
int(HEIGHTD2+zoom*HEIGHTD2*(p._st._y-HEIGHTD2)/HEIGHTD2)),
int(p._r*zoom), 0)
win.unlock()
ScanKeyboard()
# Update all planets' positions and speeds (should normally double
# buffer the list of planet data, but turns out this is good enough :-)
for p in g_listOfPlanets:
if p._merged or p is sun:
continue
# Calculate the contributions of all the others to its acceleration
# (via the gravity force) and update its position and velocity
p.updatePlanet(t, dt)
# See if we should merge the ones that are close enough to touch,
# using elastic collisions (conservation of total momentum)
for p1 in g_listOfPlanets:
if p1._merged:
continue
for p2 in g_listOfPlanets:
if p1 is p2 or p2._merged:
continue
if planetsTouch(p1, p2):
if p1._m < p2._m:
p1, p2 = p2, p1 # p1 is the biggest one (mass-wise)
p2._merged = True
if p1 is sun:
continue # No-one can move the sun :-)
newvx = (p1._st._vx*p1._m+p2._st._vx*p2._m)/(p1._m+p2._m)
newvy = (p1._st._vy*p1._m+p2._st._vy*p2._m)/(p1._m+p2._m)
p1._m += p2._m # maintain the mass (just add them)
p1.setRadiusFromMass() # new mass --> new radius
p1._st._vx, p1._st._vy = newvx, newvy
# update zoom factor (numeric keypad +/- keys)
if keysPressed[pygame.K_KP_PLUS]:
zoom /= 0.99
if keysPressed[pygame.K_KP_MINUS]:
zoom /= 1.01
if keysPressed[pygame.K_ESCAPE]:
break
if keysPressed[pygame.K_SPACE]:
while keysPressed[pygame.K_SPACE]:
ScanKeyboard()
bClearScreen = not bClearScreen
verb = "show" if bClearScreen else "hide"
pygame.display.set_caption(
'Gravity simulation (SPACE: '
'%s orbits, keypad +/- : zoom in/out)' % verb)
if __name__ == "__main__":
try:
import psyco
psyco.profile()
except:
print 'Psyco not found, ignoring it'
main()
|
[
"[email protected]"
] | |
fda009d969b4c11b4518f554302e60e88490b46b
|
0f09759025db447fe63b3af0af80c3e31e2a887f
|
/scripts/cell/taskScripts/Bangzhushenmiren.py
|
06b8d8ab06630b18f47c4ebd930e3d56d5de5726
|
[] |
no_license
|
jevonhuang/huanhuoserver
|
d7db1cd4c67d8be2da4dc9ec84ef8f23e891c537
|
caa8a87cd303b4d0368a0a6397fc1d47685c3bc3
|
refs/heads/master
| 2020-12-07T16:47:40.668507 | 2018-04-02T10:12:01 | 2018-04-02T10:12:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 741 |
py
|
# -*- coding: utf-8 -*-
import KBEngine
from KBEDebug import *
class Bangzhushenmiren(object):
def __init__(self, owner, selfIndex, npcName, npcTaskIndex):
DEBUG_MSG("Bangzhushenmiren:__init__")
self.owner = owner
self.selfIndex = selfIndex
self.npcName = npcName
self.npcTaskIndex = npcTaskIndex
self.owner.setAttr("Bangzhushenmiren_TaskCounter", 1)
self.oldTaskCounter = self.owner.getAttr("Bangzhushenmiren_TaskCounter")
def detectTaskCompleteness(self):
self.owner.setAttr("Bangzhushenmiren_TaskCounter", 0)
if self.owner.getAttr("Bangzhushenmiren_TaskCounter") == 0:
self.owner.setTaskFinish(self.npcName, self.npcTaskIndex, self.selfIndex)
|
[
"[email protected]"
] | |
d3c7eba1a1d9d06ee6edec913210550daf9fc33e
|
94c64fc5191243d41322e1ddeefa53fe368cd79f
|
/its_triage/models/account_move.py
|
42063865f86c305e92c83d7d97420f3310e279f4
|
[] |
no_license
|
solo-jr/its_kassim
|
342784c4c7bac65f72b8dcf89e72fd9cb6cef2c3
|
4fe85ea56c95da8980c0f351121fcde9a4b10b26
|
refs/heads/main
| 2023-05-11T17:25:03.280755 | 2021-05-29T21:44:54 | 2021-05-29T21:44:54 | 368,870,158 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,240 |
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2021 IT-Solutions.mg. All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import fields, models
class AccountMove(models.Model):
_inherit = 'account.move'
exchange_rate = fields.Monetary(string='Exchange rate')
transfer_fee = fields.Monetary(string='Transfer fee')
other_expenses = fields.Monetary(string='Other Expenses')
|
[
"[email protected]"
] | |
8526d76d462eb31cb9b6edae46331fdb9552850a
|
7a5b729a660a35d0d80c9836202025a719f026fb
|
/general codes/mod10_10.py
|
8c80260bc5a7216f6b44eceaaa9cf816b84db9ad
|
[] |
no_license
|
Harshit2009/My-Programs-
|
7a05eb3369b98010805752a0234867b726c4ac0e
|
1ac60faeb0ba514f2c35bcb82be43654b5cef785
|
refs/heads/master
| 2023-01-13T18:58:26.088714 | 2020-11-19T08:31:43 | 2020-11-19T08:31:43 | 269,538,702 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 35 |
py
|
import mod10
mod10.mod10(0,1,10)
|
[
"[email protected]"
] | |
61875b4157b2a3997a65fe8185632e870931a8fd
|
8cdf2632e4858aac74bcb276701fea329bda253b
|
/max_subarray_dc.py
|
e275901b747449d2fa8b6112f59f4c25a10c1c5e
|
[] |
no_license
|
victorhslima98/Complexidade_de_Algoritmos
|
b90fba85e97d7eb74238fa778b1f10b190d4efe7
|
50f666e5e4f2d9e2b7d7481499d36f256229505a
|
refs/heads/master
| 2020-05-18T22:17:07.535425 | 2019-05-03T02:26:44 | 2019-05-03T02:26:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 830 |
py
|
from max_crossing_subarray import max_crossing_subarray
from math import floor
def max_sum_subarray(a, low, high):
if low == high:
return [low, high, a[low]]
else:
center = int(floor((low+high)/2))
(left_low, left_high, left_sum) = max_sum_subarray(a, low, center)
(right_low, right_high, right_sum) = max_sum_subarray(a, center+1, high)
(cross_low, cross_high, cross_sum) = max_crossing_subarray(a, low, center, high)
if (left_sum >= right_sum) and (left_sum >= cross_sum):
return left_low, left_high, left_sum
elif (right_sum >= left_sum) and (right_sum >= cross_sum):
return right_low, right_high, right_sum
else:
return cross_low, cross_high, cross_sum
def main(a):
return max_sum_subarray(a, 0, len(a) - 1)
|
[
"[email protected]"
] | |
87d3f4edd0f90cf21b88a93a2fb448387e317cda
|
cfe58567a8f5a07bfc3bbf75bd0aeb674de10b35
|
/Git_Vundle_Vim_BashIt_Linux.py
|
6a11a10a3eba110ee303aee2694a84b9908e615f
|
[] |
no_license
|
chrisrosa418/vimrc
|
eb215e8db560542e66ad6d030a489ad23707978a
|
8c0d42ae8b110a79e9d343a570cef598fd66adc2
|
refs/heads/master
| 2021-01-23T07:03:06.720252 | 2018-07-17T00:20:27 | 2018-07-17T00:20:27 | 40,999,557 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 796 |
py
|
#Check to See if GIT is installed
from subprocess import Popen, PIPE
import os
#Fresh Install
update = "sudo apt-get update"
os.system(update)
#Install Git
git = "sudo apt-get install git"
os.system(git)
#Install Vim
vim = "sudo apt-get install vim"
os.system(vim)
#Install Vundle
vundle = "git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim"
os.system(vundle)
#Create .vimrc
vimrc = "wget -O ~/.vimrc wget https://raw.githubusercontent.com/chrisrosa418/vimrc/master/.vimrc"
os.system(vimrc)
#Install Command
install = "vim +PluginInstall +qall"
os.system(install)
#Clone Bash It
bashit = "git clone --depth=1 https://github.com/Bash-it/bash-it.git ~/.bash_it"
os.system(bashit)
#Install Command
bashinstall = "~/.bash_it/install.sh"
os.system(bashinstall)
|
[
"[email protected]"
] | |
ecbe3b9041201d2af65fc820530e64b9a5aa5439
|
a1382b2bcfea1485130bd69835343fdbacef218a
|
/preimage/models/weighted_degree_model.py
|
8d782f0a7d4c7c26682f3d9cda360a4d4dfd44f9
|
[
"BSD-2-Clause"
] |
permissive
|
a-ro/preimage
|
6dde6285b7918bcdd2522a855214b921fc32d0c6
|
5489e8ab34bdd5f1f1f2eb18718425373ede44bb
|
refs/heads/master
| 2021-03-30T15:53:08.386991 | 2015-07-17T02:14:28 | 2015-07-17T02:14:28 | 33,873,950 | 7 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,987 |
py
|
__author__ = 'amelie'
from preimage.features.weighted_degree_feature_space import WeightedDegreeFeatureSpace
from preimage.inference.graph_builder import GraphBuilder
from preimage.models.model import Model
class WeightedDegreeModel(Model):
def __init__(self, alphabet, n, is_using_length=True):
self._graph_builder = GraphBuilder(alphabet, n)
self._is_normalized = True
Model.__init__(self, alphabet, n, is_using_length)
def fit(self, inference_parameters):
Model.fit(self, inference_parameters)
self._feature_space_ = WeightedDegreeFeatureSpace(self._alphabet, self._n, inference_parameters.Y_train,
self._is_normalized)
def predict(self, Y_weights, y_lengths):
if self._is_using_length:
self._verify_y_lengths_is_not_none_when_use_length(y_lengths)
Y_predictions = self._predict_with_length(Y_weights, y_lengths)
else:
Y_predictions = self._predict_without_length(Y_weights)
return Y_predictions
def _predict_with_length(self, Y_weights, y_lengths):
Y_predictions = []
for y_weights, y_length in zip(Y_weights, y_lengths):
n_gram_weights = self._feature_space_.compute_weights(y_weights, y_length)
y_predicted = self._graph_builder.find_max_string(n_gram_weights, y_length)
Y_predictions.append(y_predicted)
return Y_predictions
def _predict_without_length(self, Y_weights):
Y_predictions = []
for y_weights in Y_weights:
n_gram_weights = self._feature_space_.compute_weights(y_weights, self._max_length_)
y_predicted = self._graph_builder.find_max_string_in_length_range(n_gram_weights, self._min_length_,
self._max_length_, self._is_normalized)
Y_predictions.append(y_predicted)
return Y_predictions
|
[
"[email protected]"
] | |
f434d074c2a942412002f5c9efc9a15c033dacc0
|
5472a3f913e1a6698b9dab902545f0ba02e7a02e
|
/pbay_url.py
|
30c40f18b360964362158d06ed0107620e90d399
|
[] |
no_license
|
Arrowheadahp/piratebay-search-and-download
|
bf38956588ce6da8caf25cec653bec76409cfd79
|
0fe8db913215e4a0b00a9153e7085728e7d3ecf7
|
refs/heads/master
| 2020-05-31T05:56:18.592671 | 2019-07-20T06:15:26 | 2019-07-20T06:15:26 | 190,131,141 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 566 |
py
|
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
import webbrowser
def soupcreate(url):
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
#print ('url page read')
return(BeautifulSoup(webpage,features="lxml"))
def geturl():
proxylist=soupcreate('https://piratebay-proxylist.se/')
proxy=proxylist.find('td',{'class':'url'})
proxyurl=proxy.get('data-href')
return (proxyurl)
if __name__=='__main__':
print (geturl())
webbrowser.open(geturl())
|
[
"[email protected]"
] | |
c63a208fac10bb36594b6582a3444350532e6641
|
01df7cdc75e4a889896c84c4258696eda72b181c
|
/main1.py
|
adeaaf8b02599969255013def03122c560737aba
|
[
"MIT"
] |
permissive
|
mycroftsherlock/ai-zhinengyinxiang
|
254bc62798486bb8f88b4810daa3c3f2c4ca5f03
|
eca5b478de20203149cdb0e5e9f8b6ffd85cb248
|
refs/heads/master
| 2020-07-14T16:17:42.923769 | 2019-08-30T14:19:14 | 2019-08-30T14:19:14 | 205,351,208 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 18,088 |
py
|
# -*- coding: utf-8 -*-
import pyaudio
import viVoicecloud as vv
from sjtu.audio import findDevice
import re
device_in = findDevice("ac108","input")
Sample_channels = 1
Sample_rate = 16000
Sample_width = 2
time_seconds = 0.5
p = pyaudio.PyAudio()
stream = p.open(
rate=Sample_rate,
format=p.get_format_from_width(Sample_width),
channels=Sample_channels,
input=True,
input_device_index=device_in,
start=False)
vv.Login()
ASR=vv.asr()
while True:
try:
import pyaudio
import viVoicecloud as vv
from sjtu.audio import findDevice
device_in = findDevice("ac108","input")
Sample_channels = 1
Sample_rate = 16000
Sample_width = 2
time_seconds = 0.5 #录音片段的时长,建议设为0.2-0.5秒
p = pyaudio.PyAudio()
stream = p.open(
rate=Sample_rate,
format=p.get_format_from_width(Sample_width),
channels=Sample_channels,
input=True,
input_device_index=device_in,
start = False)
ASR=vv.asr()#实例化
ASR.SessionBegin(language='Chinese')#开始语音识别
stream.start_stream()
print ('***Listening...')
#录音并上传到讯飞,当判定一句话已经结束时,status返回3
status=0
while status!=3:
frames=stream.read(int(Sample_rate*time_seconds))
ret,status,recStatus=ASR.AudioWrite(frames)
stream.stop_stream()
print ('---GetResult...')
text1=ASR.GetResult()#获取结果
ASR.SessionEnd()#结束语音识别
print (text1)
temp=re.match("[盘潘判盼攀畔磐叛泮槃][盐眼演烟延岩燕严研]",text1)
if temp!=None:
break
except Exception as e:
print(e)
print('stopped')
vv.Logout()
stream.close()
break
temp=re.match("[\u4E00-\u9FA5]*音乐",text1)
if temp!=None:
print("进入播放音乐模式")
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import viVoicecloud as vv #导入模块
t = vv.tts() #实例化
t.say(text="你要英文还是中文?",voice="xiaofeng")
while True:
# -*- coding: utf-8 -*-
import pyaudio
import viVoicecloud as vv
from sjtu.audio import findDevice
device_in = findDevice("ac108","input")
Sample_channels = 1
Sample_rate = 16000
Sample_width = 2
time_seconds = 0.5 #录音片段的时长,建议设为0.2-0.5秒
p = pyaudio.PyAudio()
stream = p.open(
rate=Sample_rate,
format=p.get_format_from_width(Sample_width),
channels=Sample_channels,
input=True,
input_device_index=device_in,
start = False)
ASR=vv.asr()#实例化
ASR.SessionBegin(language='Chinese')#开始语音识别
stream.start_stream()
print ('***Listening...')
#录音并上传到讯飞,当判定一句话已经结束时,status返回3
status=0
while status!=3:
frames=stream.read(int(Sample_rate*time_seconds))
ret,status,recStatus=ASR.AudioWrite(frames)
stream.stop_stream()
print ('---GetResult...')
lang=ASR.GetResult()#获取结果
ASR.SessionEnd()#结束语音识别
print (lang)
temp=re.match("[\u4E00-\u9FA5]*[英|中]",lang)
if temp!= False:
break
# -*- coding: utf-8 -*-
temp=re.match("[\u4E00-\u9FA5]*英文",lang)
if temp!=None:
# -*- coding: utf-8 -*-
import pyaudio
import viVoicecloud as vv
from sjtu.audio import findDevice
device_in = findDevice("ac108","input")
Sample_channels = 1
Sample_rate = 16000
Sample_width = 2
time_seconds = 0.5 #录音片段的时长,建议设为0.2-0.5秒
p = pyaudio.PyAudio()
stream = p.open(
rate=Sample_rate,
format=p.get_format_from_width(Sample_width),
channels=Sample_channels,
input=True,
input_device_index=device_in,
start = False)
ASR=vv.asr()#实例化
ASR.SessionBegin(language='English')#开始语音识别
stream.start_stream()
print ('***Listening...')
#录音并上传到讯飞,当判定一句话已经结束时,status返回3
status=0
while status!=3:
frames=stream.read(int(Sample_rate*time_seconds))
ret,status,recStatus=ASR.AudioWrite(frames)
stream.stop_stream()
print ('---GetResult...')
enmu=ASR.GetResult()#获取结果
ASR.SessionEnd()#结束语音识别
print (enmu)
enmu=enmu.strip(".")
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#第一步:检索歌曲
import urllib
import urllib.request
url = "http://tingapi.ting.baidu.com/v1/restserver/ting?"
url += "from=webapp_music"
url += "&method=baidu.ting.search.catalogSug"
url += "&format=json"
keywords = enmu
keywords_encoded = urllib.parse.quote(keywords) #转成urlcode编码
print(keywords_encoded)
url += "&query="+keywords_encoded
ref = urllib.request.urlopen(url)
result = ref.read()
print (result)
#第二步:获取链接
import json
dict1 = json.loads(str(result,encoding='utf-8'))
#print(dict1)
songid = dict1["song"][0]["songid"]
url2 = "http://music.taihe.com/data/music/fmlink?"
url2 += "songIds="+songid
ref2 = urllib.request.urlopen(url2)
result2 = ref2.read()
#print (result2)
dict2 = json.loads(str(result2,encoding='utf-8'))
#print(dict2)
songLink = dict2["data"]["songList"][0]["songLink"]
#print(songLink)
#第三步:下载或播放
#urllib.request.urlretrieve(songLink,"myMusic.mp3") #下载
import vlc
p = vlc.MediaPlayer(songLink)
p.play() #直接播放
import time
time.sleep(2)
while p.is_playing(): #每隔0.5秒循环一次,直到音乐播放结束
time.sleep(0.5)
temp=re.matchtemp=re.match("[\u4E00-\u9FA5]*中文",lang)
if temp!=None:
# -*- coding: utf-8 -*-
import pyaudio
import viVoicecloud as vv
from sjtu.audio import findDevice
device_in = findDevice("ac108","input")
Sample_channels = 1
Sample_rate = 16000
Sample_width = 2
time_seconds = 0.5 #录音片段的时长,建议设为0.2-0.5秒
p = pyaudio.PyAudio()
stream = p.open(
rate=Sample_rate,
format=p.get_format_from_width(Sample_width),
channels=Sample_channels,
input=True,
input_device_index=device_in,
start = False)
ASR=vv.asr()#实例化
ASR.SessionBegin(language='Chinese')#开始语音识别
stream.start_stream()
print ('***Listening...')
#录音并上传到讯飞,当判定一句话已经结束时,status返回3
status=0
while status!=3:
frames=stream.read(int(Sample_rate*time_seconds))
ret,status,recStatus=ASR.AudioWrite(frames)
stream.stop_stream()
print ('---GetResult...')
enmu=ASR.GetResult()#获取结果
ASR.SessionEnd()#结束语音识别
print (enmu)
enmu=enmu.strip("。")
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#第一步:检索歌曲
import urllib
import urllib.request
url = "http://tingapi.ting.baidu.com/v1/restserver/ting?"
url += "from=webapp_music"
url += "&method=baidu.ting.search.catalogSug"
url += "&format=json"
keywords = enmu
keywords_encoded = urllib.parse.quote(keywords) #转成urlcode编码
print(keywords_encoded)
url += "&query="+keywords_encoded
ref = urllib.request.urlopen(url)
result = ref.read()
print (result)
#第二步:获取链接
import json
dict1 = json.loads(str(result,encoding='utf-8'))
#print(dict1)
songid = dict1["song"][0]["songid"]
url2 = "http://music.taihe.com/data/music/fmlink?"
url2 += "songIds="+songid
ref2 = urllib.request.urlopen(url2)
result2 = ref2.read()
#print (result2)
dict2 = json.loads(str(result2,encoding='utf-8'))
#print(dict2)
songLink = dict2["data"]["songList"][0]["songLink"]
#print(songLink)
#第三步:下载或播放
#urllib.request.urlretrieve(songLink,"myMusic.mp3") #下载
import vlc
p = vlc.MediaPlayer(songLink)
p.play() #直接播放
import time
time.sleep(2)
while p.is_playing(): #每隔0.5秒循环一次,直到音乐播放结束
time.sleep(0.5)
temp=re.matchtemp=re.match("[\u4E00-\u9FA5]*聊天",text1)
if temp!=None:
time_seconds = 0.5
p = pyaudio.PyAudio()
stream = p.open(
rate=Sample_rate,
format=p.get_format_from_width(Sample_width),
channels=Sample_channels,
input=True,
input_device_index=device_in,
start=False)
ASR=vv.asr()
while True:
try:
ASR.SessionBegin(language='Chinese')
stream.start_stream()
print ('***Listening...')
status=0
while status!=3:
frames=stream.read(int(Sample_rate*time_seconds),exception_on_overflow = False)
ret,status,recStatus=ASR.AudioWrite(frames)
stream.stop_stream()
print ('---GetResult...')
qa=ASR.GetResult()
ASR.SessionEnd()
print (qa)
import viVoicecloud as vv
from sjtu.answer import aiui_answer,my_answer
t = vv.tts()
q = qa
if q=="exit":
break
else:
if not my_answer(q,t):
aiui_answer(q,vv,t)
except Exception as e:
print(e)
print('stopped')
vv.Logout()
stream.close()
break
temp=re.match("[\u4E00-\u9FA5]*[转|赚|转|砖|篆]",text1)
if temp!=None:
print("进入语音转换")
# -*- coding: utf-8 -*-
import pyaudio
import viVoicecloud as vv
from sjtu.audio import findDevice
device_in = findDevice("ac108","input")
Sample_channels = 1
Sample_rate = 16000
Sample_width = 2
time_seconds = 0.5
p = pyaudio.PyAudio()
stream = p.open(
rate=Sample_rate,
format=p.get_format_from_width(Sample_width),
channels=Sample_channels,
input=True,
input_device_index=device_in,
start=False)
ASR=vv.asr()
while True:
try:
ASR.SessionBegin(language='Chinese')
stream.start_stream()
print ('***Listening...')
status=0
while status!=3:
frames=stream.read(int(Sample_rate*time_seconds),exception_on_overflow = False)
ret,status,recStatus=ASR.AudioWrite(frames)
stream.stop_stream()
print ('---GetResult...')
words=ASR.GetResult()
ASR.SessionEnd()
print (words)
except Exception as e:
print(e)
print('stopped')
vv.Logout()
stream.close()
p.terminate()
break
temp=re.match("[\u4E00-\u9FA5]*翻译",text1)
if temp!=None:
print("开始翻译")
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import viVoicecloud as vv #导入模块
t = vv.tts() #实例化
t.say(text="你是要翻译成英文还是翻译成中文",voice="xiaomeng")
# -*- coding: utf-8 -*-
import pyaudio
import viVoicecloud as vv
from sjtu.audio import findDevice
device_in = findDevice("ac108","input")
Sample_channels = 1
Sample_rate = 16000
Sample_width = 2
time_seconds = 0.5 #录音片段的时长,建议设为0.2-0.5秒
p = pyaudio.PyAudio()
stream = p.open(
rate=Sample_rate,
format=p.get_format_from_width(Sample_width),
channels=Sample_channels,
input=True,
input_device_index=device_in,
start = False)
ASR=vv.asr()#实例化
ASR.SessionBegin(language='Chinese')#开始语音识别
stream.start_stream()
print ('***Listening...')
#录音并上传到讯飞,当判定一句话已经结束时,status返回3
status=0
while status!=3:
frames=stream.read(int(Sample_rate*time_seconds))
ret,status,recStatus=ASR.AudioWrite(frames)
stream.stop_stream()
print ('---GetResult...')
lang1=ASR.GetResult()#获取结果
ASR.SessionEnd()#结束语音识别
print (lang1)
temp=re.matchtemp=re.match("[\u4E00-\u9FA5]*英文",lang1)
if temp!=None:
# -*- coding: utf-8 -*-
import pyaudio
import viVoicecloud as vv
from sjtu.audio import findDevice
device_in = findDevice("ac108","input")
Sample_channels = 1
Sample_rate = 16000
Sample_width = 2
time_seconds = 0.5
p = pyaudio.PyAudio()
stream = p.open(
rate=Sample_rate,
format=p.get_format_from_width(Sample_width),
channels=Sample_channels,
input=True,
input_device_index=device_in,
start=False)
ASR=vv.asr()
while True:
try:
ASR.SessionBegin(language='Chinese')
stream.start_stream()
print ('***Listening...')
status=0
while status!=3:
frames=stream.read(int(Sample_rate*time_seconds),exception_on_overflow = False)
ret,status,recStatus=ASR.AudioWrite(frames)
stream.stop_stream()
print ('---GetResult...')
lang2=ASR.GetResult()
ASR.SessionEnd()
print (lang2)
import viVoicecloud as vv
tr = vv.baidu_translate()
result = tr.translate(lang2,"zh","en")
print(result)
t = vv.tts() #实例化
t.say(text=result,voice="henry")
except Exception as e:
print(e)
print('stopped')
vv.Logout()
stream.close()
p.terminate()
break
temp=re.matchtemp=re.match("[\u4E00-\u9FA5]*中文",lang2)
if temp!=None:
# -*- coding: utf-8 -*-
import pyaudio
import viVoicecloud as vv
from sjtu.audio import findDevice
device_in = findDevice("ac108","input")
Sample_channels = 1
Sample_rate = 16000
Sample_width = 2
time_seconds = 0.5
p = pyaudio.PyAudio()
stream = p.open(
rate=Sample_rate,
format=p.get_format_from_width(Sample_width),
channels=Sample_channels,
input=True,
input_device_index=device_in,
start=False)
ASR=vv.asr()
while True:
try:
ASR.SessionBegin(language='English')
stream.start_stream()
print ('***Listening...')
status=0
while status!=3:
frames=stream.read(int(Sample_rate*time_seconds),exception_on_overflow = False)
ret,status,recStatus=ASR.AudioWrite(frames)
stream.stop_stream()
print ('---GetResult...')
lang2=ASR.GetResult()
ASR.SessionEnd()
print (lang2)
import viVoicecloud as vv
tr = vv.baidu_translate()
result = tr.translate( lang2 ,"en","zh")
print(result)
t = vv.tts() #实例化
t.say(text=result,voice="xiaofeng")
except Exception as e:
print(e)
print('stopped')
vv.Logout()
stream.close()
p.terminate()
break
|
[
"[email protected]"
] | |
f2e3a44793aa3a254b55ee606674a23f99ca7d8a
|
d1eee92848bcb6c7179a004bbe37e0c3656821ed
|
/scripts/bba.py
|
66ab1873dae8aaeebe4635127086b646344f58ab
|
[] |
no_license
|
sswaans/CS244Project
|
b1dba7b2d6e7d225a7f530326fd598df764b363f
|
21dfada26b361dea8da97e786884f325f0a56216
|
refs/heads/master
| 2022-10-23T11:43:24.369486 | 2020-06-11T06:11:02 | 2020-06-11T06:11:02 | 265,962,353 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,611 |
py
|
from queue import Queue
import random
import matplotlib.pyplot as plt
class BBASim:
def __init__(self, rates, chunkSec, bufSize, reservoirSize, cushionSize, capacity):
self.rates = rates # Available video rates (Mbps)
self.chunkSec = chunkSec # Number of seconds in a chunk
self.bufSize = bufSize # Maximum number of seconds in buffer
self.reservoirSize = max(reservoirSize, chunkSec) # How many seconds of video we should always have (use min rate if below)
self.cushionSize = cushionSize # How many seconds of video we should have before hitting max rate
self.capacity = capacity # Network capacity, C (Mbps)
self.buffer = 0 # Number of seconds of video we have buffered
self.rate = rates[0] # Current video rate
self.rateQueue = Queue() # Keeps track of which rates of video have been downloaded
self.partialChunkMb = 0 # Number of Mb we've already downloaded of current chunk
self.initialBufferComplete = False # Whether or not we have buffered the very first chunk of video
self.log = ""
self.bufferVals = [] # For graphing, list of all buffer values over time
self.rateVals = [] # For graphing, list of all rate values over time
self.capacityVals = [] # For graphing, list of all capacity values over time
def __rateMap(self):
if self.buffer <= self.reservoirSize:
return self.rates[0]
elif self.buffer >= self.cushionSize:
return self.rates[-1]
else: # linear between rmin and rmax
percentCushion = (self.buffer - self.reservoirSize) / (self.cushionSize - self.reservoirSize)
return self.rates[0] + (percentCushion * (self.rates[-1] - self.rates[0]))
def __getNextRate(self):
self.log += "Previous rate: " + str(self.rate) + "\n"
ratePlus = self.rates[-1] if self.rate == self.rates[-1] else min([rate for rate in self.rates if rate > self.rate])
rateMinus = self.rates[0] if self.rate == self.rates[0] else max([rate for rate in self.rates if rate < self.rate])
rateSuggest = self.__rateMap()
self.log += "Suggested rate: " + str(rateSuggest) + "\n"
rateNext = self.rate
if rateSuggest == self.rates[0] or rateSuggest == self.rates[-1]:
rateNext = rateSuggest
elif rateSuggest >= ratePlus:
rateNext = max([rate for rate in self.rates if rate < rateSuggest])
elif rateSuggest <= rateMinus:
rateNext = min([rate for rate in self.rates if rate > rateSuggest])
# Have to pick a "safe" rate, not "risky" (i.e. chunk must finish before buffer runs below reservoir)
if self.buffer > self.reservoirSize and rateNext * self.chunkSec / rates[0] > self.buffer - self.reservoirSize:
availableRates = [rate for rate in self.rates if rate * self.chunkSec / rates[0] <= self.buffer - self.reservoirSize]
if not availableRates:
rateNext = rates[0]
else:
rateNext = max(availableRates)
# Custom addition: never return a (rate * chunk sec) greater than buffer size
# if rateNext * self.chunkSec > self.bufSize:
# availableRates = [rate for rate in self.rates if rate * self.chunkSec <= self.bufSize]
# if not availableRates:
# return -1
# rateNext = max(availableRates)
self.log += "New rate: " + str(rateNext) + "\n"
return rateNext
def printLog(self, error=None):
if error:
self.log += error
self.log += "bufSize: " + str(self.bufSize) + "\n"
self.log += "chunkSec: " + str(self.chunkSec) + "\n"
self.log += "cushionFrac: " + str(self.cushionSize / self.bufSize) + "\n"
self.log += "capacity: " + str(self.capacity) + "\n"
self.log += "reservoirFrac: " + str(self.reservoirSize / self.bufSize) + "\n"
self.log += "\n\n\n\n"
print(self.log)
def simulateSecond(self, capacity=None):
# TODO: Currently only support integer chunkSec >= 1. Adding support for floats is nontrivial.
# -----DRAIN-----
self.log += "DRAIN\n"
if self.initialBufferComplete:
if self.buffer <= 0:
error = "NO CHUNK FULLY DOWNLOADED!\n"
self.printLog(error)
return False
drainRate = self.rateQueue.get(block=False)
self.buffer -= 1
if self.buffer < 0:
error = "BUFFER RAN EMPTY!\n"
self.printLog(error)
return False
self.log += "Drained rate: " + str(drainRate) + "\n"
self.log += "Approx blocks in queue: " + str(self.rateQueue.qsize()) + "\n"
self.log += "=======================================\n"
# ----DOWNLOAD-----
self.log += "DOWNLOAD\n"
# If user supplied new capacity, update
if capacity:
self.capacity = capacity
if self.partialChunkMb == 0:
newRate = self.__getNextRate()
if newRate < 0:
error = "BUFFER TOO SMALL, NO SUITABLE RATE.\n"
self.printLog(error)
return False
self.rate = newRate
bufRemaining = self.bufSize - self.buffer
self.log += "Buffer remaining: " + str(bufRemaining) + "\n"
if bufRemaining > 0:
capacityRemaining = self.capacity
chunkRemaining = self.rate * self.chunkSec - self.partialChunkMb
self.log += "Capacity remaining: " + str(capacityRemaining) + "\n"
self.log += "Chunk remaining: " + str(chunkRemaining) + "\n"
# If we can, download a full single chunk and reevaluate rate
while bufRemaining >= chunkRemaining / self.rate and chunkRemaining <= capacityRemaining:
self.log += "Finishing chunk\n"
capacityRemaining -= chunkRemaining
bufRemaining -= chunkRemaining / self.rate
for _ in range(self.chunkSec):
self.rateQueue.put(self.rate)
self.buffer += chunkRemaining / self.rate
self.initialBufferComplete = True
if min(capacityRemaining, bufRemaining) > 0:
newRate = self.__getNextRate()
if newRate < 0:
error = "BUFFER TOO SMALL, NO SUITABLE RATE.\n"
self.printLog(error)
return False
self.rate = newRate
chunkRemaining = self.rate * self.chunkSec
self.partialChunkMb = 0
self.log += "----------------------\n"
self.log += "Buffer remaining: " + str(bufRemaining) + "\n"
self.log += "Capacity remaining: " + str(capacityRemaining) + "\n"
self.log += "Chunk remaining: " + str(chunkRemaining) + "\n"
# If we can't download a full single chunk, download as much as capacity and
# remaining buffer allow and note how much of the chunk we downloaded
MbDown = min(capacityRemaining, bufRemaining * self.rate)
self.buffer += MbDown / self.rate
self.partialChunkMb += MbDown
self.log += "Couldn't finish chunk, downloaded " + str(self.partialChunkMb) + "\n"
else:
self.log += "Buffer full, no download this cycle\n"
self.log += "============================\n"
self.log += "============================\n"
self.bufferVals.append(self.buffer)
self.rateVals.append(self.rate)
self.capacityVals.append(self.capacity)
return True
def getGraphVals(self):
return self.bufferVals, self.rateVals, self.capacityVals
if __name__ == "__main__":
rates = [1, 2.5, 5, 8, 16, 45]
bufSizes = [5, 10, 50, 100, 240, 1000]
chunkSecs = [1, 2, 3, 4, 5, 10]
cushionFracs = [0.25, 0.5, 0.75, 0.9, 1.0]
capacities = [1, 2, 3, 5, 10, 30, 50]
reservoirFracs = [0.1, 0.25, 0.5, 0.75, 1.0]
# Test with fixed capacities
ratePrev = rates[0]
# for bufSize in bufSizes:
# for chunkSec in chunkSecs:
# if chunkSec > bufSize:
# continue
# for cushionFrac in cushionFracs:
# for capacity in capacities:
# for reservoirFrac in reservoirFracs:
# if reservoirFrac > cushionFrac:
# continue
# bbaSim = BBASim(rates, chunkSec, bufSize, reservoirFrac * bufSize, cushionFrac * bufSize, capacity)
# for i in range(100):
# success = bbaSim.simulateSecond()
# if not success:
# break
# if bufSize == 240 and chunkSec == 4 and cushionFrac == 0.9 and capacity == 5 and reservoirFrac == 0.1:
# bbaSim.printLog()
# Test with random capacities
# for bufSize in bufSizes:
# for chunkSec in chunkSecs:
# if chunkSec > bufSize:
# continue
# for cushionFrac in cushionFracs:
# for reservoirFrac in reservoirFracs:
# if reservoirFrac > cushionFrac:
# continue
# capacity = random.choice(capacities)
# bbaSim = BBASim(rates, chunkSec, bufSize, reservoirFrac * bufSize, cushionFrac * bufSize, capacity)
# for i in range(100):
# capacity = random.choice(capacities)
# success = bbaSim.simulateSecond(capacity)
# if not success:
# break
# Generate graphs
fig, ax = plt.subplots()
capacity = random.choice(capacities)
capacityIndex = capacities.index(capacity)
bbaSim = BBASim(rates, 4, 240, 0.25 * 240, 0.8 * 240, capacity)
for i in range(200):
availableIndexes = [capacityIndex]
if capacityIndex > 0:
availableIndexes.append(capacityIndex - 1)
if capacityIndex < len(capacities) - 1:
availableIndexes.append(capacityIndex + 1)
capacityIndex = random.choice(availableIndexes)
capacity = capacities[capacityIndex]
success = bbaSim.simulateSecond(capacity)
if not success:
break
bufferVals, rateVals, capacityVals = bbaSim.getGraphVals()
xVals = [i for i in range(200)]
reservoirVals = [0.25 * 240 for i in range(200)]
cushionVals = [0.8 * 240 for i in range(200)]
ax.plot(xVals, rateVals, label='Rate', color='b')
ax.plot(xVals, capacityVals, label='Capacity', color='r')
ax.set_ylabel('Mbps')
ax.set_xlabel('Time (seconds)')
ax.legend()
fig.tight_layout()
plt.grid(True)
plt.savefig("RateCapacity.png")
ax.clear()
ax.plot(xVals, bufferVals, label='Buffer occupancy', color='g')
ax.plot(xVals, reservoirVals, label='Reservoir', color='orange')
ax.plot(xVals, cushionVals, label="Cushion", color="purple")
ax.set_ylabel("Occupancy (seconds)")
ax.set_xlabel("Time (seconds)")
ax.legend()
plt.ylim(0, 240)
fig.tight_layout()
plt.grid(True)
plt.savefig("Buffer.png")
|
[
"[email protected]"
] | |
fba52c98ec47e1d97dde2ada860f69add0d7d445
|
daca5cf1750496799a07f436644efac8bf1c68cc
|
/src/fiduciaPro/urls.py
|
10bb528a6c5e7ee3deb887367f8e6f46f4f675e2
|
[] |
no_license
|
TasifTarikul/Fiducia
|
88ed3c00e734156fb715a68ed0b22e88f3687338
|
eca356ba247f94dc02e6a6b0d5527cc8e74dd774
|
refs/heads/master
| 2022-05-19T22:39:02.088312 | 2020-12-11T12:31:23 | 2020-12-11T12:31:23 | 241,008,454 | 0 | 0 | null | 2022-04-22T23:06:49 | 2020-02-17T03:05:36 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,138 |
py
|
"""fiduciaPro URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.auth import views as auth_views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('UserApp.urls')),
path('logout/', auth_views.LogoutView.as_view(next_page='UserApp:homePage'), name='userProfilelogout'),
path('admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
d302c5d3ee83f2c09c5d7ac3a31c6e14d9b5b0cb
|
32661df15c5eef6e7f14f388aa16af254e6c4141
|
/main/data_feature_muti_main.py
|
d06df63f2be0a357a2bfabb27bcacac686d73dd4
|
[] |
no_license
|
hyren01/KunShan-stdmatcher
|
10dcb14f21e68cb3831b468b671eecef7ce4fb1a
|
37afd06b5a8374e4ab54e71142f13960030da3ed
|
refs/heads/master
| 2022-11-05T11:58:46.333595 | 2020-06-22T06:56:23 | 2020-06-22T06:56:23 | 274,062,449 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,509 |
py
|
import logging
import multiprocessing
import time
import ibm_db
from configuration import Config
from dao import get_db2_connect
from utils.common_util import date_trans
from main.data_feature_main import analyse_table_feature
from utils.log_util import init_log
init_log('../logs/feature', level=logging.DEBUG)
if __name__ == '__main__':
conf = Config()
start_date_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
output_conn = None
if conf.output_db == "db2":
output_conn = get_db2_connect(conf.output_db_url)
import dao.output.db2_helper as output_helper
else:
logging.error("输出配置数据库未适配 :{}".format(conf.output_db))
exit(-1)
# 获取表配置信息
analysis_conf_dict = output_helper.get_config_info(output_conn, conf.output_schema)
# 读取全部表的分析进度情况
analysis_schedule_dict = output_helper.get_analysis_schedule(output_conn, conf.output_schema)
# 读取全部表卸数方式
ana_alg_dict = output_helper.get_tab_alg(output_conn, conf.output_schema)
# 用于存放待分析的表信息
table_need_analysis_dict = {}
for (sys_code, ori_table_code) in analysis_conf_dict:
if analysis_conf_dict[(sys_code, ori_table_code)]['FEATURE_FLAG'] == '1' and \
analysis_schedule_dict[(sys_code, ori_table_code)]['FEATURE_SCHE'] == '0':
etl_date = analysis_conf_dict[(sys_code, ori_table_code)]['ETL_DATE']
date_offset = analysis_conf_dict[(sys_code, ori_table_code)]['DATE_OFFSET']
etl_dates = date_trans(etl_date, date_offset)
table_need_analysis_dict[(sys_code, ori_table_code)] = {'alg': ana_alg_dict[(sys_code, ori_table_code)],
'etl_dates': etl_dates}
# else:
# logging.error("待分析表表名重复:{}.{}".format(sys_code, ori_table_code))
# exit(-1)
logging.info("本次共分析{}张表".format(len(table_need_analysis_dict)))
# 关闭数据库连接
ibm_db.close(output_conn)
pool = multiprocessing.Pool(processes=5)
for (sys_code, ori_table_code) in table_need_analysis_dict:
pool.apply_async(analyse_table_feature,
args=(conf, sys_code, ori_table_code, table_need_analysis_dict[(sys_code, ori_table_code)]['alg'], table_need_analysis_dict[(sys_code, ori_table_code)]['etl_dates'], start_date_str))
pool.close()
pool.join()
|
[
"[email protected]"
] | |
4c273131315b983cb2e4a345e77c2afd9fd4eb39
|
e4276439d0bdb0e0009900527852542854d59e23
|
/scoreboard.py
|
2f3ad26aa2e9885ed005aa162e9a2aa5cc81ec6d
|
[] |
no_license
|
Daceman07/Frogger_game
|
b688ab1001138f2509bcf0a593d2228fa391668d
|
7f9c26da1d93e0207e817b76065c66af3b8df11b
|
refs/heads/master
| 2023-06-29T10:21:01.238055 | 2021-08-05T22:11:33 | 2021-08-05T22:11:33 | 393,181,997 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 581 |
py
|
from turtle import Turtle
FONT = ("Courier", 20, "normal")
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.penup()
self.hideturtle()
self.level = 1
self.goto(-280, 260)
self.write(f"Level: {self.level}", align="Left", font=FONT)
def update(self):
self.clear()
self.level += 1
self.goto(-280, 260)
self.write(f"Level: {self.level}", align="Left", font=FONT)
def game_over(self):
self.goto(-70, 0)
self.write(f"Game Over", align="Left", font=FONT)
|
[
"[email protected]"
] | |
22a0efd61428ca996199ba140cb48190c54006e0
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/0/bfi.py
|
e4c0d8a77b1235838847b5ced1684c62c97867da
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'bfI':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"[email protected]"
] | |
5253d398213d1c154ea2dffba964210fbf476c74
|
e33ecdb112045327344dce2ae8b0612848938f24
|
/cotidia/socialshare/conf.py
|
1e7d660917e60fb666ce61f86598c24b02e1edef
|
[
"BSD-3-Clause"
] |
permissive
|
guillaumepiot/cotidia-social-share
|
939125b97474bb34e8a94cd0fa6d6919026c029c
|
9c926bb86e7f158f2b59eaddcf09eba459c009b6
|
refs/heads/master
| 2020-04-21T07:28:24.520846 | 2019-03-26T14:00:00 | 2019-03-26T14:00:00 | 169,393,675 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 187 |
py
|
from django.conf import settings
from appconf import AppConf
class SocialShareConf(AppConf):
FACEBOOK_APP_ID = "[Not implemented]"
class Meta:
prefix = 'socialshare'
|
[
"[email protected]"
] | |
243077a75698f29576100138de41aa8a654a2058
|
3129e4c7e2b6264c6fea1cf7a182a1b600f506ff
|
/src/server/server/migrations/0001_initial.py
|
0ffc0e17608a8cdc6108c1bfce2e72750f5db506
|
[
"MIT"
] |
permissive
|
ITCoders/MobileCloudIR
|
a8202aebd2173378f317514dd3191bd93635c81e
|
b48e8ab0bda8c3764320ef8c51bd0c70c01663a8
|
refs/heads/master
| 2021-05-15T13:19:44.725078 | 2018-03-18T17:02:55 | 2018-03-18T17:02:55 | 107,083,232 | 3 | 1 |
MIT
| 2018-03-18T17:02:56 | 2017-10-16T05:48:11 |
Java
|
UTF-8
|
Python
| false | false | 963 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-18 19:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DataRepository',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(db_index=True, max_length=30)),
('data_path', models.CharField(db_index=True, max_length=50)),
],
),
migrations.CreateModel(
name='OnlineDevices',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(db_index=True, max_length=20, unique=True)),
],
),
]
|
[
"[email protected]"
] | |
f5520c23088623002d67820d418372da588ac78d
|
7a4ae475933d65998ad401fa13e8ebdc198446ce
|
/Python/Exercicio05.py
|
e48a2639dd392b92fb4dbcd9ad652af01f45d5c6
|
[] |
no_license
|
vlkonageski/Logica
|
b26f20b0ee8e1bdcc4fb0125af18ba44af6eb6a5
|
962aa382dc4af41712f0ca1f59339d8435bfa4b2
|
refs/heads/master
| 2023-04-03T14:33:40.034303 | 2021-04-20T00:48:34 | 2021-04-20T00:48:34 | 303,716,293 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 405 |
py
|
"""
Faça um algoritimo que pergunte quanto voce ganha por hora e o numero de horas trabalhadas no mes.
Calcule e mostre o total do seu salario no referido mes.
"""
valor_hora = float(input("Informe quanto voce ganha por hora: "))
horas_trabalhadas = int(input("Informe a quantidade de horas trabalhadas: "))
salario = horas_trabalhadas * valor_hora
print("O seu salario é R$ {:.2f}".format(salario))
|
[
"[email protected]"
] | |
5d379cb7e6ff7287440a81755fb4498393ba2973
|
e2585a04a371f1bec5581b481f0da7dcb69b06f9
|
/client/migrations/0006_client_bonus.py
|
a7515d18c1d536cd420519ec9a6b48efa243a359
|
[] |
no_license
|
cianidtop/prokat-crm
|
fd1edca0a3d389e62b5f4917c65dd5d91ff8e9b7
|
13b60dadeabdb22643c55e911202c48d9727a9bd
|
refs/heads/main
| 2023-08-10T08:50:59.063119 | 2021-09-13T08:09:27 | 2021-09-13T08:09:27 | 405,884,997 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 438 |
py
|
# Generated by Django 3.0.4 on 2020-03-22 15:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('client', '0005_auto_20200316_1555'),
]
operations = [
migrations.AddField(
model_name='client',
name='bonus',
field=models.IntegerField(default=0, max_length=4, verbose_name='Бонусный счет:'),
),
]
|
[
"root@e7cfa4b601be"
] |
root@e7cfa4b601be
|
b9b99e68650ad4506aabc58561593edbd3eb674b
|
6dc10e1f30c558fa274e8d7935bf9e8d34b6bfba
|
/sparql_shim/parsers.py
|
07b5b5406dadb17b3aa1dfd894fcbde9d9b58fd0
|
[] |
no_license
|
npilon/sparql-shim
|
f7131ec0350af490c932332b0c3888834cc4fb98
|
0497211e7f2b88c0fe0fa9310f4f6a14bca4a1fb
|
refs/heads/master
| 2020-12-24T17:17:39.564463 | 2010-12-05T03:11:29 | 2010-12-05T03:11:29 | 1,124,415 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 759 |
py
|
from cStringIO import StringIO
import rdflib
from pymantic import content_type_to_rdflib_format
def parse_graph(request, content_type):
request.body_graph = rdflib.Graph()
request.body_graph.parse(StringIO(request.body),
format=content_type_to_rdflib_format[content_type])
def parse_n3(context, request):
try:
parse_graph(request, 'text/rdf+n3')
return True
except:
return False
def parse_rdfxml(context, request):
try:
parse_graph(request, 'application/rdf+xml')
return True
except Exception, e:
return False
def parse_ntriples(context, request):
try:
parse_graph(request, 'text/plain')
return True
except:
return False
|
[
"[email protected]"
] | |
8d704be2ad0bccea7611b5a9eac75d47a7e74899
|
f448b9635d076d88a4439e937eec7dd050cc316a
|
/xx.py
|
6a1bcaeeb2767fb3a0468cbdf1fb2786afa1066f
|
[] |
no_license
|
udaytejam/practicesamples
|
c7e6ba2e30f52138b3b22414c57ddc1f9e94162a
|
acda24dfe5c3aff60b688c9b434b83a3132b0af1
|
refs/heads/master
| 2021-01-10T02:03:51.456102 | 2015-10-05T11:23:42 | 2015-10-05T11:23:42 | 43,500,701 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 168 |
py
|
globvar = 10
def read1():
print(globvar)
def write1():
global globvar
globvar = 5
def write2():
globvar = 15
read1()
write1()
read1()
write2()
read1()
|
[
"[email protected]"
] | |
f4e823b8074caad03b76b275a87e60d38b5c1ea2
|
df04a9fce4208476afdc35c32d8c8a4ef388bfba
|
/pandas/core/array_algos/take.py
|
31cbadb0e442bcce4d507c8f7fab585186ff5bb8
|
[
"BSD-3-Clause"
] |
permissive
|
burbanom/pandas
|
6c0807ee6ab72eb1d7a94764c551e6aa7972b023
|
285dae9d29f1a0693b7c3e62ede5449ec3812c2a
|
refs/heads/master
| 2021-06-19T10:48:21.732456 | 2021-03-16T15:33:07 | 2021-03-16T15:33:07 | 181,310,248 | 0 | 0 |
BSD-3-Clause
| 2021-03-16T15:33:10 | 2019-04-14T13:06:32 |
Python
|
UTF-8
|
Python
| false | false | 19,176 |
py
|
from __future__ import annotations
import functools
from typing import (
TYPE_CHECKING,
Optional,
overload,
)
import numpy as np
from pandas._libs import (
algos as libalgos,
lib,
)
from pandas._typing import ArrayLike
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
)
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.construction import ensure_wrapped_if_datetimelike
if TYPE_CHECKING:
from pandas.core.arrays.base import ExtensionArray
@overload
def take_nd(
arr: np.ndarray,
indexer,
axis: int = ...,
out: Optional[np.ndarray] = ...,
fill_value=...,
allow_fill: bool = ...,
) -> np.ndarray:
...
@overload
def take_nd(
arr: ExtensionArray,
indexer,
axis: int = ...,
out: Optional[np.ndarray] = ...,
fill_value=...,
allow_fill: bool = ...,
) -> ArrayLike:
...
def take_nd(
arr: ArrayLike,
indexer,
axis: int = 0,
out: Optional[np.ndarray] = None,
fill_value=lib.no_default,
allow_fill: bool = True,
) -> ArrayLike:
"""
Specialized Cython take which sets NaN values in one pass
This dispatches to ``take`` defined on ExtensionArrays. It does not
currently dispatch to ``SparseArray.take`` for sparse ``arr``.
Note: this function assumes that the indexer is a valid(ated) indexer with
no out of bound indices.
Parameters
----------
arr : np.ndarray or ExtensionArray
Input array.
indexer : ndarray
1-D array of indices to take, subarrays corresponding to -1 value
indices are filed with fill_value
axis : int, default 0
Axis to take from
out : ndarray or None, default None
Optional output array, must be appropriate type to hold input and
fill_value together, if indexer has any -1 value entries; call
maybe_promote to determine this type for any fill_value
fill_value : any, default np.nan
Fill value to replace -1 values with
allow_fill : boolean, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
Returns
-------
subarray : np.ndarray or ExtensionArray
May be the same type as the input, or cast to an ndarray.
"""
if fill_value is lib.no_default:
fill_value = na_value_for_dtype(arr.dtype, compat=False)
if not isinstance(arr, np.ndarray):
# i.e. ExtensionArray,
# includes for EA to catch DatetimeArray, TimedeltaArray
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
arr = np.asarray(arr)
return _take_nd_ndarray(arr, indexer, axis, out, fill_value, allow_fill)
def _take_nd_ndarray(
arr: np.ndarray,
indexer,
axis: int,
out: Optional[np.ndarray],
fill_value,
allow_fill: bool,
) -> np.ndarray:
if indexer is None:
indexer = np.arange(arr.shape[axis], dtype=np.int64)
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
indexer = ensure_int64(indexer, copy=False)
indexer, dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(
arr, indexer, out, fill_value, allow_fill
)
flip_order = False
if arr.ndim == 2 and arr.flags.f_contiguous:
flip_order = True
if flip_order:
arr = arr.T
axis = arr.ndim - axis - 1
if out is not None:
out = out.T
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape_ = list(arr.shape)
out_shape_[axis] = len(indexer)
out_shape = tuple(out_shape_)
if arr.flags.f_contiguous and axis == arr.ndim - 1:
# minor tweak that can make an order-of-magnitude difference
# for dataframes initialized directly from 2-d ndarrays
# (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its
# f-contiguous transpose)
out = np.empty(out_shape, dtype=dtype, order="F")
else:
out = np.empty(out_shape, dtype=dtype)
func = _get_take_nd_function(
arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info
)
func(arr, indexer, out, fill_value)
if flip_order:
out = out.T
return out
def take_1d(
arr: ArrayLike,
indexer: np.ndarray,
fill_value=None,
allow_fill: bool = True,
) -> ArrayLike:
"""
Specialized version for 1D arrays. Differences compared to `take_nd`:
- Assumes input array has already been converted to numpy array / EA
- Assumes indexer is already guaranteed to be int64 dtype ndarray
- Only works for 1D arrays
To ensure the lowest possible overhead.
Note: similarly to `take_nd`, this function assumes that the indexer is
a valid(ated) indexer with no out of bound indices.
TODO(ArrayManager): mainly useful for ArrayManager, otherwise can potentially
be removed again if we don't end up with ArrayManager.
"""
if not isinstance(arr, np.ndarray):
# ExtensionArray -> dispatch to their method
# error: Argument 1 to "take" of "ExtensionArray" has incompatible type
# "ndarray"; expected "Sequence[int]"
return arr.take(
indexer, # type: ignore[arg-type]
fill_value=fill_value,
allow_fill=allow_fill,
)
if not allow_fill:
return arr.take(indexer)
indexer, dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(
arr, indexer, None, fill_value, allow_fill
)
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
out = np.empty(indexer.shape, dtype=dtype)
func = _get_take_nd_function(
arr.ndim, arr.dtype, out.dtype, axis=0, mask_info=mask_info
)
func(arr, indexer, out, fill_value)
return out
def take_2d_multi(
arr: np.ndarray, indexer: np.ndarray, fill_value=np.nan
) -> np.ndarray:
"""
Specialized Cython take which sets NaN values in one pass.
"""
# This is only called from one place in DataFrame._reindex_multi,
# so we know indexer is well-behaved.
assert indexer is not None
assert indexer[0] is not None
assert indexer[1] is not None
row_idx, col_idx = indexer
row_idx = ensure_int64(row_idx)
col_idx = ensure_int64(col_idx)
# error: Incompatible types in assignment (expression has type "Tuple[Any, Any]",
# variable has type "ndarray")
indexer = row_idx, col_idx # type: ignore[assignment]
mask_info = None
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype:
# check if promotion is actually required based on indexer
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
mask_info = (row_mask, col_mask), (row_needs, col_needs)
if not (row_needs or col_needs):
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
out_shape = len(row_idx), len(col_idx)
out = np.empty(out_shape, dtype=dtype)
func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
if func is None and arr.dtype != out.dtype:
func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
if func is not None:
func = _convert_wrapper(func, out.dtype)
if func is not None:
func(arr, indexer, out=out, fill_value=fill_value)
else:
_take_2d_multi_object(
arr, indexer, out, fill_value=fill_value, mask_info=mask_info
)
return out
@functools.lru_cache(maxsize=128)
def _get_take_nd_function_cached(
ndim: int, arr_dtype: np.dtype, out_dtype: np.dtype, axis: int
):
"""
Part of _get_take_nd_function below that doesn't need `mask_info` and thus
can be cached (mask_info potentially contains a numpy ndarray which is not
hashable and thus cannot be used as argument for cached function).
"""
tup = (arr_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
return func
tup = (out_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
func = _convert_wrapper(func, out_dtype)
return func
return None
def _get_take_nd_function(
ndim: int, arr_dtype: np.dtype, out_dtype: np.dtype, axis: int = 0, mask_info=None
):
"""
Get the appropriate "take" implementation for the given dimension, axis
and dtypes.
"""
func = None
if ndim <= 2:
# for this part we don't need `mask_info` -> use the cached algo lookup
func = _get_take_nd_function_cached(ndim, arr_dtype, out_dtype, axis)
if func is None:
def func(arr, indexer, out, fill_value=np.nan):
indexer = ensure_int64(indexer)
_take_nd_object(
arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info
)
return func
def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
def wrapper(
arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan
):
if arr_dtype is not None:
arr = arr.view(arr_dtype)
if out_dtype is not None:
out = out.view(out_dtype)
if fill_wrap is not None:
fill_value = fill_wrap(fill_value)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _convert_wrapper(f, conv_dtype):
def wrapper(
arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan
):
if conv_dtype == object:
# GH#39755 avoid casting dt64/td64 to integers
arr = ensure_wrapped_if_datetimelike(arr)
arr = arr.astype(conv_dtype)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
_take_1d_dict = {
("int8", "int8"): libalgos.take_1d_int8_int8,
("int8", "int32"): libalgos.take_1d_int8_int32,
("int8", "int64"): libalgos.take_1d_int8_int64,
("int8", "float64"): libalgos.take_1d_int8_float64,
("int16", "int16"): libalgos.take_1d_int16_int16,
("int16", "int32"): libalgos.take_1d_int16_int32,
("int16", "int64"): libalgos.take_1d_int16_int64,
("int16", "float64"): libalgos.take_1d_int16_float64,
("int32", "int32"): libalgos.take_1d_int32_int32,
("int32", "int64"): libalgos.take_1d_int32_int64,
("int32", "float64"): libalgos.take_1d_int32_float64,
("int64", "int64"): libalgos.take_1d_int64_int64,
("int64", "float64"): libalgos.take_1d_int64_float64,
("float32", "float32"): libalgos.take_1d_float32_float32,
("float32", "float64"): libalgos.take_1d_float32_float64,
("float64", "float64"): libalgos.take_1d_float64_float64,
("object", "object"): libalgos.take_1d_object_object,
("bool", "bool"): _view_wrapper(libalgos.take_1d_bool_bool, np.uint8, np.uint8),
("bool", "object"): _view_wrapper(libalgos.take_1d_bool_object, np.uint8, None),
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64
),
}
_take_2d_axis0_dict = {
("int8", "int8"): libalgos.take_2d_axis0_int8_int8,
("int8", "int32"): libalgos.take_2d_axis0_int8_int32,
("int8", "int64"): libalgos.take_2d_axis0_int8_int64,
("int8", "float64"): libalgos.take_2d_axis0_int8_float64,
("int16", "int16"): libalgos.take_2d_axis0_int16_int16,
("int16", "int32"): libalgos.take_2d_axis0_int16_int32,
("int16", "int64"): libalgos.take_2d_axis0_int16_int64,
("int16", "float64"): libalgos.take_2d_axis0_int16_float64,
("int32", "int32"): libalgos.take_2d_axis0_int32_int32,
("int32", "int64"): libalgos.take_2d_axis0_int32_int64,
("int32", "float64"): libalgos.take_2d_axis0_int32_float64,
("int64", "int64"): libalgos.take_2d_axis0_int64_int64,
("int64", "float64"): libalgos.take_2d_axis0_int64_float64,
("float32", "float32"): libalgos.take_2d_axis0_float32_float32,
("float32", "float64"): libalgos.take_2d_axis0_float32_float64,
("float64", "float64"): libalgos.take_2d_axis0_float64_float64,
("object", "object"): libalgos.take_2d_axis0_object_object,
("bool", "bool"): _view_wrapper(
libalgos.take_2d_axis0_bool_bool, np.uint8, np.uint8
),
("bool", "object"): _view_wrapper(
libalgos.take_2d_axis0_bool_object, np.uint8, None
),
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
}
_take_2d_axis1_dict = {
("int8", "int8"): libalgos.take_2d_axis1_int8_int8,
("int8", "int32"): libalgos.take_2d_axis1_int8_int32,
("int8", "int64"): libalgos.take_2d_axis1_int8_int64,
("int8", "float64"): libalgos.take_2d_axis1_int8_float64,
("int16", "int16"): libalgos.take_2d_axis1_int16_int16,
("int16", "int32"): libalgos.take_2d_axis1_int16_int32,
("int16", "int64"): libalgos.take_2d_axis1_int16_int64,
("int16", "float64"): libalgos.take_2d_axis1_int16_float64,
("int32", "int32"): libalgos.take_2d_axis1_int32_int32,
("int32", "int64"): libalgos.take_2d_axis1_int32_int64,
("int32", "float64"): libalgos.take_2d_axis1_int32_float64,
("int64", "int64"): libalgos.take_2d_axis1_int64_int64,
("int64", "float64"): libalgos.take_2d_axis1_int64_float64,
("float32", "float32"): libalgos.take_2d_axis1_float32_float32,
("float32", "float64"): libalgos.take_2d_axis1_float32_float64,
("float64", "float64"): libalgos.take_2d_axis1_float64_float64,
("object", "object"): libalgos.take_2d_axis1_object_object,
("bool", "bool"): _view_wrapper(
libalgos.take_2d_axis1_bool_bool, np.uint8, np.uint8
),
("bool", "object"): _view_wrapper(
libalgos.take_2d_axis1_bool_object, np.uint8, None
),
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
}
_take_2d_multi_dict = {
("int8", "int8"): libalgos.take_2d_multi_int8_int8,
("int8", "int32"): libalgos.take_2d_multi_int8_int32,
("int8", "int64"): libalgos.take_2d_multi_int8_int64,
("int8", "float64"): libalgos.take_2d_multi_int8_float64,
("int16", "int16"): libalgos.take_2d_multi_int16_int16,
("int16", "int32"): libalgos.take_2d_multi_int16_int32,
("int16", "int64"): libalgos.take_2d_multi_int16_int64,
("int16", "float64"): libalgos.take_2d_multi_int16_float64,
("int32", "int32"): libalgos.take_2d_multi_int32_int32,
("int32", "int64"): libalgos.take_2d_multi_int32_int64,
("int32", "float64"): libalgos.take_2d_multi_int32_float64,
("int64", "int64"): libalgos.take_2d_multi_int64_int64,
("int64", "float64"): libalgos.take_2d_multi_int64_float64,
("float32", "float32"): libalgos.take_2d_multi_float32_float32,
("float32", "float64"): libalgos.take_2d_multi_float32_float64,
("float64", "float64"): libalgos.take_2d_multi_float64_float64,
("object", "object"): libalgos.take_2d_multi_object_object,
("bool", "bool"): _view_wrapper(
libalgos.take_2d_multi_bool_bool, np.uint8, np.uint8
),
("bool", "object"): _view_wrapper(
libalgos.take_2d_multi_bool_object, np.uint8, None
),
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
}
def _take_nd_object(
arr: np.ndarray,
indexer: np.ndarray,
out: np.ndarray,
axis: int,
fill_value,
mask_info,
):
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
if arr.dtype != out.dtype:
arr = arr.astype(out.dtype)
if arr.shape[axis] > 0:
arr.take(ensure_platform_int(indexer), axis=axis, out=out)
if needs_masking:
outindexer = [slice(None)] * arr.ndim
outindexer[axis] = mask
out[tuple(outindexer)] = fill_value
def _take_2d_multi_object(
arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value, mask_info
) -> None:
# this is not ideal, performance-wise, but it's better than raising
# an exception (best to optimize in Cython to avoid getting here)
row_idx, col_idx = indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
if fill_value is not None:
if row_needs:
out[row_mask, :] = fill_value
if col_needs:
out[:, col_mask] = fill_value
for i in range(len(row_idx)):
u_ = row_idx[i]
for j in range(len(col_idx)):
v = col_idx[j]
out[i, j] = arr[u_, v]
def _take_preprocess_indexer_and_fill_value(
arr: np.ndarray,
indexer: np.ndarray,
out: Optional[np.ndarray],
fill_value,
allow_fill: bool,
):
mask_info = None
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
mask = indexer == -1
needs_masking = mask.any()
mask_info = mask, needs_masking
if needs_masking:
if out is not None and out.dtype != dtype:
raise TypeError("Incompatible type for fill_value")
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
return indexer, dtype, fill_value, mask_info
|
[
"[email protected]"
] | |
e4c2ae0d87d85c79e418b6439a463334c40e3c8a
|
a6f735889e77d2469e9bac9fb253adb872cdc766
|
/server.py
|
8027bc50c4e12d3a0c98022096c13a4c03f43e03
|
[] |
no_license
|
DevCodes1/portfo
|
dc9c84783526cfa9d008ef88cc161fd4965cde03
|
c6afe9d26388d78590abfd9a3c3030ebbc7b59c7
|
refs/heads/main
| 2023-01-07T12:25:17.918485 | 2020-11-10T14:52:02 | 2020-11-10T14:52:02 | 311,688,183 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,078 |
py
|
# lonked to the text file database CSV File
from flask import Flask, render_template, url_for, request, redirect
import csv
app = Flask(__name__)
print(__name__)
@app.route('/')
def home():
return render_template('/index.html')
@app.route('/<string:page_name>')
def html_page(page_name):
return render_template(page_name)
# writing to file method = CSV File
def write_to_csv(data):
with open('database.csv', mode='a') as database:
email = data['email']
subject = data['subject']
message = data['message']
csv_writer = csv.writer(database, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow([email, subject, message])
# database.write(f'\n{email},\t\t{subject}, \t\t{message}')
@app.route('/submit_form', methods=['POST', 'GET'])
def submit_Form():
if request.method == 'POST':
data = request.form.to_dict()
write_to_csv(data)
# print(data)
return redirect('/thankyou.html')
else:
return 'Somthing went wrong'
|
[
"[email protected]"
] | |
6eb8b235fd81ce33dab57b99690a47093bc66b94
|
9c47fbb2761cc50b7b0be67decb20c377dd1d078
|
/CodeWars/Python/020-Count_IP_Addresses.py
|
77ab020caab135c43570828f47bde8c58e3618d3
|
[
"MIT"
] |
permissive
|
IsFilimonov/Interviews
|
782ec1f5d82373c20df0edaaeb56cfb0d493a9e7
|
3b9858f43ef6b7a2b5e565ef58406e4018edbf97
|
refs/heads/main
| 2022-12-12T13:16:25.750870 | 2022-11-30T11:31:38 | 2022-11-30T11:31:38 | 213,611,039 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 123 |
py
|
import ipaddress
def ips_between(start, end):
return int(ipaddress.ip_address(end)) - int(ipaddress.ip_address(start))
|
[
"[email protected]"
] | |
75209e21a5a4c7a82156680d8a773a25f5018b16
|
08a754c83ad8455077670abb2736ba0e3a8e3817
|
/work/clean_files.py
|
698ffebf605b6cb54a6e64902dae6aa010a73639
|
[] |
no_license
|
momoforever/momo
|
f4a52dbf7b24fad70fe9ffc61a8333e951aeabff
|
040173dc519bdfdd3c10b28f6cecf62c6015d23b
|
refs/heads/master
| 2021-01-17T15:36:20.004289 | 2016-06-25T00:27:54 | 2016-06-25T00:27:54 | 56,226,060 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,176 |
py
|
#!/lab/gw_test_framework/app/venv/python3.5-rhes6.x86_64-epglib2/bin/python
# coding:utf-8
import os
import datetime
def clean_file(file_dir, days):
lists = os.listdir(file_dir)
file_lists = []
count = 0
for i in range(len(lists)):
path = os.path.join(file_dir, lists[i])
if os.path.isfile(path):
if lists[i] != r'*':
file_lists.append(lists[i])
for i in range(len(file_lists)):
path = os.path.join(file_dir, file_lists[i])
if os.path.isdir(path):
continue
timestamp = os.path.getmtime(path)
file_date = datetime.datetime.fromtimestamp(timestamp)
now = datetime.datetime.now()
if (now - file_date) > datetime.timedelta(days=days):
print('file date is: % s' % file_date)
print('removing: % s' % path)
os.remove(path)
count = count + 1
else:
print('file % s is safe' % path)
print('total % s files are deleted.' % count)
if __name__ == '__main__':
file_dir = input('please input the path:')
days = int(input('please input the days:'))
clean_file(file_dir, days)
|
[
"[email protected]"
] | |
2947dd334a9962628fbd6ad140d2c25e8e572f97
|
ba54b70f93fe7f9d114623d76b1ad3f88309d66f
|
/main/views/public.py
|
2b2679a267e16f85bdc1a067bb156f7ebb7f755b
|
[] |
no_license
|
loobinsk/newprj
|
9769b2f26092ce7dd8612fce37adebb307b01b8b
|
c6aa6a46973fb46375f4b05a86fe76207a8ae16d
|
refs/heads/master
| 2023-05-07T00:28:44.242163 | 2021-05-25T08:22:05 | 2021-05-25T08:22:05 | 370,617,690 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,630 |
py
|
#-*- coding: utf-8 -*-
from registration.backends.default.views import RegistrationView
from main.form import RegisterForm, AuthForm, FeedbackForm, ReclameForm
from django.views.generic import View, TemplateView, FormView
from django.contrib.auth.views import login, logout
from main.models import Company, Advert, Tariff, Town
from uprofile.models import User
from django.contrib.auth import authenticate, login as auth_login, logout as auth_logout
from django.utils.decorators import method_decorator
from annoying.decorators import ajax_request
from django.views.decorators.csrf import csrf_exempt
from sorl.thumbnail import get_thumbnail
from gutils.views import BreadcrumbMixin, AjaxableResponseMixin
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from mail_templated import send_mail_admins, send_mail
from django.conf import settings
from django.contrib.auth.signals import user_logged_in
from datetime import datetime, timedelta
from ucomment.signals import comment_create
import re
from django.db.models import Count
from cache_utils.decorators import cached
from django.contrib.sites.models import Site
def user_check_sessions(sender, user, request, **kwargs):
"""
Проверка сессий пользователя и закрытие остальных сессий
"""
from user_sessions.models import Session
Session.objects.filter(user=user).exclude(session_key=request.session.session_key).delete()
user_logged_in.connect(user_check_sessions)
def comment_send_notice(sender, user, **kwargs):
"""
Отправка уведомления о комментарии
"""
m = re.search('^company_(\d+)$', sender.key)
if m:
company_list = Company.objects.filter(id=m.group(1))
if company_list:
if company_list[0].owner:
if company_list[0].owner != user:
send_mail('main/email/comment-notice.html', context={
'subject': 'У вашего агентства появился новый отзыв',
'comment': sender,
'company': company_list[0]
},
recipient_list=[company_list[0].owner.email],
fail_silently=True)
if settings.SITE_ID == 1:
comment_create.connect(comment_send_notice)
class RegisterView(AjaxableResponseMixin, BreadcrumbMixin, RegistrationView):
def __init__(self, *argc, **kwargs):
super(RegisterView, self).__init__(*argc, **kwargs)
self.form_class = RegisterForm
def get_initial(self, request=None):
initial = super(RegisterView, self).get_initial(request)
initial['company_town'] = self.request.current_town.id
return initial
def form_valid(self, request, form):
response = super(AjaxableResponseMixin, self).form_valid(request, form)
if self.request.is_ajax():
data = {
'id': self.object.pk if hasattr(self, 'object') else None,
'object': self.get_model_dict(),
}
return self.render_to_json_response(data)
else:
return response
def register(self, request, form):
form.cleaned_data['email'] = form.cleaned_data['username']
new_user = super(RegisterView, self).register(request, form)
if form.cleaned_data['agent_status'] == RegisterForm.REGISTER_STATUS_COMPANY:
town = get_object_or_404(Town, id=form.cleaned_data['company_town'])
company = Company(
owner=new_user,
title=form.cleaned_data['company_name'],
tel=form.cleaned_data['company_tel'],
email=form.cleaned_data['username'],
address=form.cleaned_data['company_address'],
fact_address=form.cleaned_data['company_fact_address'],
ogrn=form.cleaned_data['company_ogrn'],
inn=form.cleaned_data['company_inn'],
person=form.cleaned_data['company_person'],
town=town
)
company.save()
new_user.company = company
new_user.tel =form.cleaned_data['company_tel']
new_user.gen_access_code()
new_user.save()
send_mail('main/email/reg-notice.html',
{'company': company, 'subject': u'Поступила новая заявка на регистрацию от %s' % company.title},
recipient_list=settings.NOTICE_REGISTER_EMAIL)
elif form.cleaned_data['agent_status'] == RegisterForm.REGISTER_STATUS_AGENT:
if form.cleaned_data['company_town'] == '1':
company = Company.objects.get(id=form.cleaned_data['agent_company_msk'])
elif form.cleaned_data['company_town'] == '2':
company = Company.objects.get(id=form.cleaned_data['agent_company_spb'])
if not company.is_real:
company.is_real = True
company.status = Company.STATUS_MODERATE
# company.owner = new_user
if not company.tel:
company.tel = form.cleaned_data['company_tel']
if not company.email:
company.email = new_user.email
if not company.owner:
company.owner = new_user
company.save()
new_user.company = company
new_user.tel = form.cleaned_data['company_tel']
new_user.first_name = form.cleaned_data['agent_name']
new_user.gen_access_code()
new_user.status = User.STATUS_MODERATE
new_user.save()
exist_users = company.user_set.filter(agent_email=new_user.email)
if exist_users:
new_user.extnum = exist_users[0].extnum
new_user.save()
exist_users[0].advert_set.all().update(user=new_user)
exist_users[0].delete()
send_mail('main/email/reg-agent-notice.html', {
'user': new_user,
'subject': u'Поступила новая заявка на регистрацию от агента %s' % new_user.username
},
recipient_list=settings.NOTICE_REGISTER_EMAIL)
request.session['registration_email'] = form.cleaned_data['username']
request.session.modified = True
return new_user
def get_breadcrumbs(self):
return [('Агентствам недвижимости', reverse('registration_register'))]
def get_model_dict(self):
return {
'message': u'Регистрация завершена',
'url': reverse('registration_complete')
}
class RegisterCompleteView(TemplateView):
template_name='registration/registration_complete.html'
def get_context_data(self, **kwargs):
context = super(RegisterCompleteView, self).get_context_data(**kwargs)
mail_servers = [
("mail.ru","Почта Mail.Ru","https://e.mail.ru/"),
("bk.ru","Почта Mail.Ru (bk.ru)","https://e.mail.ru/"),
("list.ru","Почта Mail.Ru (list.ru)","https://e.mail.ru/"),
("inbox.ru","Почта Mail.Ru (inbox.ru)","https://e.mail.ru/"),
("yandex.ru","Яндекс.Почта","https://mail.yandex.ru/"),
("ya.ru","Яндекс.Почта","https://mail.yandex.ru/"),
("yandex.ua","Яндекс.Почта","https://mail.yandex.ua/"),
("yandex.by","Яндекс.Почта","https://mail.yandex.by/"),
("yandex.kz","Яндекс.Почта","https://mail.yandex.kz/"),
("yandex.com","Yandex.Mail","https://mail.yandex.com/"),
("gmail.com","Почта Gmail","https://mail.google.com/"),
("googlemail.com","Почта Gmail","https://mail.google.com/"),
("outlook.com","Почта Outlook.com","https://mail.live.com/"),
("hotmail.com","Почта Outlook.com (Hotmail)","https://mail.live.com/"),
("live.ru","Почта Outlook.com (live.ru)","https://mail.live.com/"),
("live.com","Почта Outlook.com (live.com)","https://mail.live.com/"),
("me.com","Почта iCloud Mail","https://www.icloud.com/"),
("icloud.com","Почта iCloud Mail","https://www.icloud.com/"),
("rambler.ru","Рамблер-Почта","https://mail.rambler.ru/"),
("yahoo.com","Почта Yahoo! Mail","https://mail.yahoo.com/"),
("ukr.net","Почта ukr.net","https://mail.ukr.net/"),
("i.ua","Почта I.UA","http://mail.i.ua/"),
("bigmir.net","Почта Bigmir.net","http://mail.bigmir.net/"),
("tut.by","Почта tut.by","https://mail.tut.by/"),
("inbox.lv","Inbox.lv","https://www.inbox.lv/"),
("mail.kz","Почта mail.kz","http://mail.kz/"),
]
email = self.request.session.get('registration_email')
if email:
for server in mail_servers:
if server[0].lower() in email.lower():
context['mail_server'] = server
return context
class LoginView(View):
def get(self, *args, **kwargs):
return login(self.request, authentication_form=AuthForm)
def post(self, *args, **kwargs):
return login(self.request, authentication_form=AuthForm)
class LoginView_Moder(LoginView):
def get(self, *args, **kwargs):
return login(self.request, authentication_form=AuthForm, template_name='registration/moder/login.html')
def post(self, *args, **kwargs):
return login(self.request, authentication_form=AuthForm, template_name='registration/moder/login.html')
class AjaxLoginView(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(AjaxLoginView, self).dispatch(request, *args, **kwargs)
@method_decorator(ajax_request)
def post(self, *args, **kwargs):
context = {}
form = AuthForm(self.request, data=self.request.POST)
if form.is_valid():
user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password'])
if user is not None:
if user.is_active:
auth_login(self.request, user)
context['success'] = True
context['message'] = 'Добро пожаловать'
context['username'] = user.get_full_name()
if user.image:
try:
thumb = get_thumbnail(user.image, '100x100', crop='center', quality=99)
context['image'] = thumb.url
except:
context['image'] = ''
else:
context['image'] = ''
company = user.company
if company:
context['activated'] = company.status == Company.STATUS_ACTIVE
context['company'] = company.title
else:
context['activated'] = True
context['company'] = ''
else:
context['success'] = False
context['message'] = 'Аккаунт заблокирован'
else:
# Return an 'invalid login' error message.
context['success'] = False
context['message'] = 'Неправильные имя пользователя или пароль'
else:
context['success'] = False
a = []
for error in form.errors:
for e in form.errors[error]:
a.append(e)
context['message'] = '<br>'.join(a)
return context
class LogoutView_Moder(LoginView):
def get(self, *args, **kwargs):
return logout(self.request, next_page='/', template_name='registration/moder/login.html')
def post(self, *args, **kwargs):
return logout(self.request, next_page='/', template_name='registration/moder/login.html')
class HomeView(TemplateView):
template_name = 'main/home.html'
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
town = self.request.current_town
# статистика
context['count_adverts'] = self.get_count_adverts()
context['count_companies'] = self.get_count_companies()
# последние объявления
context['vip_list'] = Advert.objects.filter(company=None,
town=town,
need=Advert.NEED_SALE,
status=Advert.STATUS_VIEW,
date__gte=datetime.now() - timedelta(days=30))\
.filter(Advert.ARCHIVE_NO_QUERY)\
.annotate(image_count=Count('images'))\
.exclude(image_count=0)\
.order_by('?')[:5]
context['last_advert_list'] = Advert.objects.filter(town=town, need=Advert.NEED_SALE, status=Advert.STATUS_VIEW).order_by('-date')[:5]
context['arenda_advert_list'] = Advert.objects\
.filter(adtype=Advert.TYPE_LEASE, town=town, need=Advert.NEED_SALE)\
.filter(estate=Advert.ESTATE_LIVE, status=Advert.STATUS_VIEW)\
.order_by('-date')[:4]
context['sale_advert_list'] = Advert.objects \
.filter(adtype=Advert.TYPE_SALE, town=town, need=Advert.NEED_SALE) \
.filter(estate=Advert.ESTATE_LIVE, status=Advert.STATUS_VIEW) \
.order_by('-date')[:4]
return context
@cached(3600)
def get_count_adverts(self):
return Advert.objects.filter(status=Advert.STATUS_VIEW).count()
@cached(3600)
def get_count_companies(self):
return Company.objects.all().count()
def page_not_found(request, template_name='404.html'):
from django.views.defaults import page_not_found
return page_not_found(request, template_name)
def page_not_found_moder(request, template_name='404.html'):
from django.views.defaults import page_not_found
return page_not_found(request, template_name='404-moder.html')
|
[
"[email protected]"
] | |
97e11e4eb41ae9950e9f027a56876c03c99d3f64
|
37a273eb995edcc6267ffa0f9409ccf6897845c6
|
/climetlab/mockup.py
|
9ee6f3e5eddd62605127bd2fdbf47d75844f3d5a
|
[
"Apache-2.0"
] |
permissive
|
dingxinjun/climetlab
|
3f6fe6a6547c5e29df4ab2bbcdd378d7c43ce1ce
|
9ae9210f81e754f3d7d97aa75fd29c5b1a581533
|
refs/heads/main
| 2023-08-15T15:17:01.458040 | 2021-09-27T11:52:35 | 2021-09-27T11:52:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,708 |
py
|
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
from climetlab.sources import Source
class TestingMockup:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class TestingXarrayAttrs(dict):
pass
class TestingXarrayDims(list):
pass
class TestingDatasetAsXarray(TestingMockup):
def __init__(self, *args, **kwargs):
super(TestingDatasetAsXarray, self).__init__(*args, **kwargs)
self.attrs = TestingXarrayAttrs()
self.dims = TestingXarrayDims()
# TODO: make this generic
def min(self, *args, **kwargs):
print(f"xr.min({args}, {kwargs})")
return 42.0
def max(self, *args, **kwargs):
print(f"xr.min({args}, {kwargs})")
return 42.0
def map(self, *args, **kwargs):
print("xr.map(...)")
# print(f'xr.map({args}, {kwargs})')
return self
def sortby(self, *args, **kwargs):
print(f"xr.sortby({args}, {kwargs})")
return self
def __getitem__(self, key):
print(f"xr.__getitem__({key})")
return self
def __setitem__(self, key, value):
print(f"xr.__setitem__({key})=...")
# print(f'xr.__setitem__({key})={value}')
return self
def chunk(self, *args, **kwargs):
print(f"xr.chunk({args}, {kwargs})")
return self
def astype(self, *args, **kwargs):
print(f"xr.astype({args}, {kwargs})")
return self
def to_zarr(self, *args, **kwargs):
print(f"xr.to_zarr({args}, {kwargs})")
return self
def __getattr__(self, name):
print(f"xr.{name} (unkwown)")
return self
class DatasetMockup(TestingMockup):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
print(f"Climetlab SourceMockup : args={args}, kwargs={kwargs}")
super(SourceMockup, self).__init__(**kwargs)
def to_xarray(self, *args, **kwargs):
return TestingDatasetAsXarray(*self.args, **self.kwargs)
class SourceMockup(Source):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
print(f"Climetlab SourceMockup : args={args}, kwargs={kwargs}")
super(SourceMockup, self).__init__(**kwargs)
def to_xarray(self, *args, **kwargs):
return TestingDatasetAsXarray(*self.args, **self.kwargs)
|
[
"[email protected]"
] | |
ab60c94e17edc63d738db0bc96f31a64f747d412
|
21fe5be2ec275416c06a09a006be9b68f31230dc
|
/regexURLfinder.py
|
abc821259e59374967b2a5406edc0e7f089c1609
|
[] |
no_license
|
heyquentin/automate-the-boring-stuff
|
0ead12d761cf4837433f081867cd90b1b1fc2913
|
d34654990734b6dab655c33a42a8da2973a7f0d1
|
refs/heads/master
| 2021-03-16T21:00:45.053373 | 2020-04-07T02:29:31 | 2020-04-07T02:29:31 | 246,943,001 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,745 |
py
|
import re
# TODO: Create a text variable with URLs in it
text = """But I must explain to you how all this mistaken idea of denouncing pleasure and https://www.lipsum.com/ praising pain was born and I will give you a complete account of the system,
and expound the actual teachings of the great explorer of the truth, the master-builder of human happiness. No one rejects, dislikes, or avoids pleasure itself, because it is pleasure, but
because those who do not know how to pursue pleasure rationally encounter consequences that are extremely painful. Nor again is there anyone who loves or pursues or desires to obtain
pain of itself, https://automatetheboringstuff.com/2e/chapter7/ because it is pain, but because occasionally circumstances occur in which toil and pain can procure him some great pleasure.
To take a trivial example, which of us ever undertakes laborious physical exercise, except to obtain some advantage from it? But who has any right to find fault with a man who chooses to
enjoy a pleasure that has no annoying consequences, or one who avoids a pain that produces no resultant pleasure?"""
# TODO: Create a regex to pull out the URLs
## Import the regex module with import re.
## Create a Regex object with the re.compile() function. (Remember to use a raw string.)
## Pass the string you want to search into the Regex object’s search() method. This returns a Match object.
## Call the Match object’s group() method to return a string of the actual matched text.
URLMatch = re.compile(r'http.*?\s')
matchObject = URLMatch.findall(text)
# TODO: Print the URLs from the text to the terminal
print('Here are all the URLs in your text')
for i in matchObject:
i = i.rstrip()
print(i)
|
[
"[email protected]"
] | |
b1532dca490f5b992fcd2d4448901b761f3b2807
|
025dc1fa797b0de25b556365d23bddb848ab8ce0
|
/colossus/apps/lists/mixins.py
|
ec6726113587e1a0aef7d4b9d7aedb437406729a
|
[
"MIT"
] |
permissive
|
ramanaditya/colossus
|
eab49ec33031b8542b07e3aaebc36467a97786d6
|
11b34a216b2021a5da79cd6e347aef842f7b0c72
|
refs/heads/master
| 2023-03-30T12:39:12.948490 | 2021-03-25T17:11:32 | 2021-03-25T17:11:32 | 340,977,981 | 1 | 0 |
MIT
| 2021-03-25T16:34:54 | 2021-02-21T18:51:05 |
Python
|
UTF-8
|
Python
| false | false | 1,295 |
py
|
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.views.generic.base import ContextMixin
from colossus.apps.subscribers.constants import TemplateKeys
from colossus.apps.subscribers.models import SubscriptionFormTemplate
from .models import MailingList
class MailingListMixin(ContextMixin):
__mailing_list = None
@property
def mailing_list(self):
if self.__mailing_list is None:
self.__mailing_list = get_object_or_404(MailingList, pk=self.kwargs.get('pk'))
return self.__mailing_list
def get_context_data(self, **kwargs):
if 'menu' not in kwargs:
kwargs['menu'] = 'lists'
if 'mailing_list' not in kwargs:
kwargs['mailing_list'] = self.mailing_list
return super().get_context_data(**kwargs)
class FormTemplateMixin:
def get_object(self):
mailing_list_id = self.kwargs.get('pk')
key = self.kwargs.get('form_key')
if key not in TemplateKeys.LABELS.keys():
raise Http404
form_template, created = SubscriptionFormTemplate.objects.get_or_create(
key=key,
mailing_list_id=mailing_list_id
)
if created:
form_template.load_defaults()
return form_template
|
[
"[email protected]"
] | |
16a2aa4eab29fc1a01f30df04ebadd198816e166
|
eb1861482c91eea76efc1a237bb12ffb09e902d1
|
/d5.py
|
e6b81f16a9d710200e34dd774c998f0ae1649801
|
[] |
no_license
|
SindreSkrede/AdventOfCode2018
|
24e6a181c30505edc2a4839097585ec50dec2bd0
|
3f793c84d2940b1fcca0dc49fde80a499ad83025
|
refs/heads/master
| 2020-04-09T12:28:28.459835 | 2019-02-10T11:32:16 | 2019-02-10T11:32:16 | 160,351,520 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 451 |
py
|
d_org = open("data/d5.txt").read()[:-1]
#d_org = "dabAcCaCBAcCcaDA"
sol = {}
for l in "abcdefghijklmnopqrstuvwxyz":
d = d_org.replace(l,"").replace(l.upper(),"")
while(True):
stop = True
for i in range(len(d)-1):
x = ord(d[i])
y = ord(d[i+1])
diff = abs(x - y)
if (diff == 32):
d = d[0:i] + d[i+2:]
stop = False
break
if (stop):
break
sol[l] = len(d)
print(l, len(d))
print(min(sol.items(), key=lambda x : x[1]))
|
[
"[email protected]"
] | |
6547662049ad68d0f522d83608f4a1bc4000f4a6
|
58e78ec0c9997e416546e49bff5a1c65de7e1059
|
/E_Choice/main/urls.py
|
d2630d17c7a47131b2205420ccb7d3586b91595e
|
[] |
no_license
|
bramlap/E-Choice
|
f79d66dc2828b3a54c5b905a12d7697961c0d8cd
|
5e34c02cc615a426d8a6bd099bc59785606336f8
|
refs/heads/master
| 2020-04-18T00:32:29.483072 | 2016-08-31T08:04:03 | 2016-08-31T08:04:03 | 66,548,714 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,132 |
py
|
"""ftc URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
# from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
url(r'^login', views.login),
url(r'^logout', views.logout),
url(r'^loggedin', views.loggedin),
url(r'^vragen', views.vragen),
url(r'^weging', views.weging),
url(r'^docent', views.docent),
url(r'^pdf_export', views.pdf_export),
url(r'^page_not_permitted', views.page_not_permitted),
url(r'^opleiding_kiezen', views.opleiding_kiezen),
]
|
[
"[email protected]"
] | |
272f220f3f3ebc85c30ee2deceb3ddb12740a3f5
|
f3bfd5c7639e24072e97033fd2366879d7480598
|
/quizsolver.py
|
358540cdf87b3010e90cb99b0ab5d28063701daf
|
[] |
no_license
|
dimnikolos/quizsolver
|
cffa8699d513b9cd7cfa9f362de0fcbd7783ab2a
|
49c6359e4632151475168685b8dbb648a07996df
|
refs/heads/master
| 2021-05-16T03:02:38.500288 | 2017-11-23T21:05:33 | 2017-11-23T21:05:33 | 20,495,095 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,216 |
py
|
def quizsolver1():
for a in range(0,10):
for b in range(0,10):
for c in range(0,10):
for d in range(0,10):
for e in range(0,10):
for f in range(1,10):
for g in range(0,10):
if (((10*a+b)*(10*b+c)==d*100+a*10+d)
and ((10*e+c)/f == f)
and ((100*b+10*c+d)*g==1000*b+100*b+10*e+a)
and ((10*a+b)*(10*e+c)==100*b+10*c+d)
and ((10*b+c)/f == g)
and ((100*d+10*a+d) * f == 1000*b+100*b+10*e+a)):
print(''.join([str(a),str(b),str(c),str(d),str(e),
str(f),str(g)]))
def quizsolver2():
for a in range(0,10):
for b in range(0,10):
for d in range(0,10):
for f in range(1,10):
for g in range(0,10):
c = (f*f) % 10
e = (f*f) / 10
if (((10*a+b)*(10*b+c)==d*100+a*10+d)
and ((10*e+c)/f == f)
and ((100*b+10*c+d)*g==1000*b+100*b+10*e+a)
and ((10*a+b)*(10*e+c)==100*b+10*c+d)
and ((10*b+c)/f == g)
and ((100*d+10*a+d) * f == 1000*b+100*b+10*e+a)):
print(''.join([str(a),str(b),str(c),str(d),
str(e),str(f),str(g)]))
quizsolver1()
quizsolver2()
|
[
"[email protected]"
] | |
072d371ce95370c4977fcc64b3a3e77c06ca6c30
|
5f07c38899e350b0b776510fd7d7831d44cf1404
|
/drfmixins/drfmixins/settings.py
|
d599783f7b76ad7f17b66c1c6fd0e90c0991e475
|
[] |
no_license
|
shubham454/Django-Rest
|
b733f1d47ada9df452e912dcd8acad48a7ec4c75
|
3d94f57cab3537c51caa68807d5fcdf8883d2d2c
|
refs/heads/master
| 2022-12-14T20:37:11.835794 | 2020-08-13T18:43:26 | 2020-08-13T18:43:26 | 287,354,715 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,133 |
py
|
"""
Django settings for drfmixins project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z&7-uzdyn7cex&u5yzfw&wh$j8_v71pu@!4rc9lu@c#8y(!_^('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'testapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'drfmixins.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'drfmixins.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
fe495c9341a1eb554cfe5de479bfbb5f57e1bab2
|
677fbc1a97c53aa8c56da070539afc3d8f48dd8c
|
/libraries/botbuilder-core/botbuilder/core/user_state.py
|
9bc0d12586bb993faaaba0ade4b488957cd3ee8d
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
crazyrex/botbuilder-python
|
e1e83110e57ffd92c6687d7ec3856a631efd501c
|
f2f1a8bb4171bbebdc42b94a68b2c2cf7aea8683
|
refs/heads/master
| 2020-03-31T03:49:09.024171 | 2018-09-19T18:53:44 | 2018-09-19T18:53:44 | 151,879,387 | 1 | 0 |
MIT
| 2018-10-06T20:34:16 | 2018-10-06T20:34:16 | null |
UTF-8
|
Python
| false | false | 1,463 |
py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .turn_context import TurnContext
from .bot_state import BotState
from .storage import Storage
class UserState(BotState):
"""
Reads and writes user state for your bot to storage.
"""
no_key_error_message = 'UserState: channel_id and/or conversation missing from context.activity.'
def __init__(self, storage: Storage, namespace=''):
"""
Creates a new UserState instance.
:param storage:
:param namespace:
"""
self.namespace = namespace
def call_get_storage_key(context):
key = self.get_storage_key(context)
if key is None:
raise AttributeError(self.no_key_error_message)
else:
return key
super(UserState, self).__init__(storage, call_get_storage_key)
def get_storage_key(self, context: TurnContext) -> str:
"""
Returns the storage key for the current user state.
:param context:
:return:
"""
activity = context.activity
channel_id = getattr(activity, 'channel_id', None)
user_id = getattr(activity.from_property, 'id', None) if hasattr(activity, 'from_property') else None
storage_key = None
if channel_id and user_id:
storage_key = f"user/{channel_id}/{user_id}/{self.namespace}"
return storage_key
|
[
"[email protected]"
] | |
9f25d416bd468bb65eb3923ab99d32b912f60ca7
|
3e85618c79a1a934fec543e1327e772ca081a5b9
|
/N1949.py
|
2c0945dcd3d845154cc7480e681a4eb6834ef180
|
[] |
no_license
|
ghdus4185/SWEXPERT
|
72d79aa4a668452327a676a644b952bab191c79b
|
4dc74ad74df7837450de4ce55526dac7760ce738
|
refs/heads/master
| 2020-07-16T18:31:22.153239 | 2019-12-20T04:18:30 | 2019-12-20T04:18:30 | 205,843,190 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,019 |
py
|
import sys
sys.stdin = open('sample_input.txt', 'r')
# 가장 높은 봉우리를 찾아야한다
# 내 주변을 선택할 때 나보다 낮은 얘들을 선택하거나 한번 깎아서 선택할 수 있다.
# 이후에 깎는게 더 유리할 수 있으므로
# 1) 낮은 칸으로 이동해보기
# 2) 높거나 같은 칸에 대해서 2가지 선택 깍는다 or 깍지않는다.
# 3) 깍아서 지나갈 수 있는 상황이라면 굳이 많이 깍지 않고 딱 나보다 작은 정도만
# 깍는다.
def f(i, j, c, e): # c : 깍는 횟수, e : 이동거리
di = [0, 1, 0, -1]
dj = [1, 0, -1, 0]
global N, K, maxV, visited, arr
if maxV < e:
maxV = e
visited[i][j] = 1 # 등산로에 포함되었음을 표시
#주변탐색
for k in range(4):
ni = i + di[k]
nj = j + dj[k]
if ni >= 0 and ni < N and nj >= 0 and nj< N: # 유효좌표인지 확인
if arr[i][j] > arr[ni][nj]:
f(ni, nj, c, e+1) # 주변의 낮은 점으로 이동
elif visited[ni][nj] == 0 and c > 0 and arr[i][j] > arr[ni][nj]-K:
# 주변 점을 깍아서 이동
org = arr[ni][nj] # 원래 높이 저장
arr[ni][nj] = arr[i][j] -1 # 주변 점을 깍아서 이동
f(ni, nj, 0, e+1)
arr[ni][nj] = org # 높이 원상 복구
# 돌아왔을 때를 생각해서 깍기 전 높이를 저장해둔다
visited[i][j] = 0 # 다른 경로의 등산로에 포함될 수 있으므로
return
T = int(input())
for tc in range(T):
N, K = map(int, input().split())
arr = [list(map(int, input().split())) for _ in range(N)]
visited = [[0]*N for _ in range(N)]
h = 0
for i in range(N):
for j in range(N):
if h < arr[i][j]:
h = arr[i][j]
maxV = 0
for i in range(N):
for j in range(N):
if arr[i][j] == h:
f(i, j, 1, 1)
print('#{} {}'.format(tc+1, maxV))
|
[
"[email protected]"
] | |
c9ad73bbe82399296d85ed65b6b54ec0055bcc34
|
1b43b76d5a7db101b19edfe240309e5e16543170
|
/src/identifiers.py
|
8101d73c479986363ad5d9529089d35d4a745985
|
[] |
no_license
|
mfilmer/SDD
|
464adfa4422ea8c084fff7fb5e5fae86611d02c5
|
0505147e6725e52945e81f4133e9e87bfb695b11
|
refs/heads/master
| 2020-05-18T19:53:52.481472 | 2014-08-18T03:29:53 | 2014-08-18T03:29:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 954 |
py
|
# Identifiers
class Single(object):
def __init__(self, name):
self._name = name
def __str__(self):
return self._name
def __repr__(self):
return "{}({})".format(type(self), self._name)
class Alignment(Single):
pass
Unaligned = Alignment('Unaligned')
Good = Alignment('Good')
Bad = Alignment('Bad')
class State(Single):
def __repr__(self):
return "State('{}')".format(self._name)
CreateGame = State('CreateGame')
MakeTeam = State('MakeTeam')
VoteTeam = State('VoteTeam')
OnMission = State('OnMission')
GameOver = State('GameOver')
class TeamVote(Single):
pass
Approve = TeamVote('Approve')
Reject = TeamVote('Reject')
class MissionBehavior(Single):
pass
Pass = MissionBehavior('Pass')
Fail = MissionBehavior('Fail')
# Reason the winning team won
class VictoryReason(Single):
pass
WinThreeMissions = VictoryReason('WinThreeMissions')
FiveRejectedTeams = VictoryReason('FiveRejectedTeams')
|
[
"[email protected]"
] | |
1a4112e5ec12d83e80e2b90b621e04a155b02728
|
61744d85bbf2aefdf0fc27006acc2e742db9e152
|
/misoKG-master/legacy_code/run_multiKG_DragAndLift.py
|
7611d9296675d8bd95723646ed6c3787b57bcfc3
|
[] |
no_license
|
sunatthegilddotcom/perovskite-4
|
896da29694830a6b98c33050f1aa41258310bd59
|
dd21c8b6217c5859783a6a92e9b082aeea98f9e8
|
refs/heads/master
| 2021-07-03T13:36:08.618860 | 2017-09-25T02:18:44 | 2017-09-25T02:18:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,361 |
py
|
from joblib import Parallel, delayed
import scipy.optimize
from moe.optimal_learning.python.data_containers import HistoricalData, SamplePoint
from moe.optimal_learning.python.geometry_utils import ClosedInterval
from moe.optimal_learning.python.python_version.domain import TensorProductDomain as pythonTensorProductDomain
from moe.optimal_learning.python.python_version.gaussian_process import GaussianProcess
from moe.optimal_learning.python.python_version.covariance import SquareExponential
from moe.optimal_learning.python.python_version.expected_improvement import ExpectedImprovement
from moe.optimal_learning.python.cpp_wrappers.gaussian_process import GaussianProcessNew
from moe.optimal_learning.python.cpp_wrappers.covariance import MixedSquareExponential as cppMixedSquareExponential
from multifidelity_KG.model.covariance_function import MixedSquareExponential
from multifidelity_KG.voi.knowledge_gradient import *
from multifidelity_KG.voi.optimization import *
from multifidelity_KG.result_container import BenchmarkResult
import sql_util
import sample_initial_points
from assembleToOrder.assembleToOrder import AssembleToOrder
from multifidelity_KG.obj_functions import Rosenbrock
from mothers_little_helpers import process_parallel_results, load_init_points_for_all_IS, load_vals
__author__ = 'jialeiwang'
### The following parameters must be adapted for each simulator
numIS = 2
truth_IS = 0
exploitation_IS = 2 # IS to use when VOI does not work
func_name = 'rosenbrock'
init_data_pickle_filename = "rosenbrock_2_IS"
benchmark_result_table_name = "rosenbrock_multiKG_newCost_2"
obj_func_max = Rosenbrock(numIS, mult=-1.0) # used by KG
obj_func_min = Rosenbrock(numIS, mult=1.0) # our original problems are all assumed to be minimization!
# less important params
exploitation_threshold = 1e-5
num_x_prime = 3000
num_discretization_before_ranking = num_x_prime * 3
num_iterations = 100
num_threads = 64
num_multistart = 64
num_candidate_start_points = 500
### end parameter
search_domain = pythonTensorProductDomain([ClosedInterval(bound[0], bound[1]) for bound in obj_func_max._search_domain])
noise_and_cost_func = obj_func_min.noise_and_cost_func
# Load initial data from pickle
init_pts = load_init_points_for_all_IS("pickles", init_data_pickle_filename, obj_func_min._numIS)
init_vals = load_vals("pickles", init_data_pickle_filename, obj_func_min._numIS)
#init_pts, init_vals = sample_initial_points.load_data_from_a_min_problem("pickles", init_data_pickle_filename)
# setup benchmark result container
multi_kg_result = BenchmarkResult(num_iterations, obj_func_max._dim, benchmark_result_table_name)
kg_hyper_param = pandas.read_sql_table('multifidelity_kg_hyperparam_' + func_name, sql_util.sql_engine).mean(axis=0).values
kg_data = HistoricalData(obj_func_max._dim + 1)
best_sampled_val = numpy.inf
for i in range(obj_func_max._num_IS):
IS_pts = numpy.hstack(((i + 1) * numpy.ones(len(init_pts[i])).reshape((-1, 1)), init_pts[i]))
# multiply all values by -1 since we assume that the training data stems from the minimization version
# but misoKG uses the maximization version
vals = -1.0 * numpy.array(init_vals[i])
# obtain what used to be sample_vars
noise_vars = numpy.array([noise_and_cost_func(i+1, pt)[0] for pt in init_pts[i]])
kg_data.append_historical_data(IS_pts, vals, noise_vars)
# find the best initial value
if numpy.amin(init_vals[i]) < best_sampled_val:
best_sampled_val = numpy.amin(init_vals[i])
best_sampled_point = init_pts[i][numpy.argmin(init_vals[i]), :]
truth_at_best_sampled = obj_func_min.evaluate(truth_IS, best_sampled_point)
kg_cov = MixedSquareExponential(hyperparameters=kg_hyper_param, total_dim=obj_func_max._dim + 1, num_is=obj_func_max._num_IS)
kg_cov_cpp = cppMixedSquareExponential(hyperparameters=kg_hyper_param)
kg_gp_cpp = GaussianProcessNew(kg_cov_cpp, kg_data, obj_func_max._num_IS)
for kg_n in range(num_iterations):
print "itr {0}, {1}".format(kg_n, benchmark_result_table_name)
### First discretize points and then only keep the good points idea
discretization_points = search_domain.generate_uniform_random_points_in_domain(num_discretization_before_ranking)
discretization_points = numpy.hstack((numpy.zeros((num_discretization_before_ranking,1)), discretization_points))
all_mu = kg_gp_cpp.compute_mean_of_points(discretization_points)
sorted_idx = numpy.argsort(all_mu)
all_zero_x_prime = discretization_points[sorted_idx[-num_x_prime:], :]
### idea ends
# all_zero_x_prime = numpy.hstack((numpy.zeros((num_x_prime,1)), search_domain.generate_uniform_random_points_in_domain(num_x_prime)))
def min_kg_unit(start_pt, IS):
func_to_min, grad_func = negative_kg_and_grad_given_x_prime(IS, all_zero_x_prime, noise_and_cost_func, kg_gp_cpp)
return bfgs_optimization_grad(start_pt, func_to_min, grad_func, obj_func_max._search_domain)
def compute_kg_unit(x, IS):
return compute_kg_given_x_prime(IS, x, all_zero_x_prime, noise_and_cost_func(IS, x)[0], noise_and_cost_func(IS, x)[1], kg_gp_cpp)
def find_mu_star(start_pt):
return bfgs_optimization(start_pt, negative_mu_kg(kg_gp_cpp), obj_func_max._search_domain)
min_negative_kg = numpy.inf
with Parallel(n_jobs=num_threads) as parallel:
for i in range(obj_func_max._num_IS):
start_points_prepare = search_domain.generate_uniform_random_points_in_domain(num_candidate_start_points)
kg_vals = parallel(delayed(compute_kg_unit)(x, i+1) for x in start_points_prepare)
sorted_idx_kg = numpy.argsort(kg_vals)
start_points = start_points_prepare[sorted_idx_kg[-num_multistart:], :]
parallel_results = parallel(delayed(min_kg_unit)(pt, i+1) for pt in start_points)
inner_min, inner_min_point = process_parallel_results(parallel_results)
if inner_min < min_negative_kg:
min_negative_kg = inner_min
point_to_sample = inner_min_point
sample_IS = i + 1
print "IS {0}, KG {1}".format(i+1, -inner_min)
start_points_prepare = search_domain.generate_uniform_random_points_in_domain(num_candidate_start_points)
mu_vals = kg_gp_cpp.compute_mean_of_points(numpy.hstack((numpy.zeros((num_candidate_start_points, 1)), start_points_prepare)))
start_points = start_points_prepare[numpy.argsort(mu_vals)[-num_multistart:], :]
parallel_results = parallel(delayed(find_mu_star)(pt) for pt in start_points)
negative_mu_star, mu_star_point = process_parallel_results(parallel_results)
print "mu_star found"
if -min_negative_kg < exploitation_threshold:
sample_IS = exploitation_IS
print "KG search failed, do exploitation"
point_to_sample = mu_star_point
sample_val = obj_func_min.evaluate(sample_IS, point_to_sample)
predict_mean = kg_gp_cpp.compute_mean_of_points(numpy.concatenate(([0], point_to_sample)).reshape((1,-1)))[0]
predict_var = kg_gp_cpp.compute_variance_of_points(numpy.concatenate(([0], point_to_sample)).reshape((1,-1)))[0,0]
cost = noise_and_cost_func(sample_IS, point_to_sample)[1]
mu_star_var = kg_gp_cpp.compute_variance_of_points(numpy.concatenate(([0], mu_star_point)).reshape((1,-1)))[0,0]
mu_star_truth = obj_func_min.evaluate(truth_IS, mu_star_point)
multi_kg_result.add_entry(point_to_sample, sample_IS, sample_val, best_sampled_val, truth_at_best_sampled, predict_mean, predict_var, cost, -min_negative_kg, mu_star=negative_mu_star, mu_star_var=mu_star_var, mu_star_truth=mu_star_truth, mu_star_point=mu_star_point)
print "pt: {0} \n IS: {1} \n val: {2} \n voi: {3} \n best_sample_truth: {4} \n mu_star_point: {5} \n mu_star_truth: {6} \n total cost: {7}".format(
point_to_sample, sample_IS, sample_val, -min_negative_kg, truth_at_best_sampled, mu_star_point, mu_star_truth, multi_kg_result._total_cost
)
if sample_val < best_sampled_val:
best_sampled_val = sample_val
best_sampled_point = point_to_sample
truth_at_best_sampled = obj_func_min.evaluate(truth_IS, best_sampled_point)
kg_gp_cpp.add_sampled_points([SamplePoint(numpy.concatenate(([sample_IS], point_to_sample)), -sample_val, noise_and_cost_func(sample_IS, point_to_sample)[0])])
|
[
"[email protected]"
] | |
e88fda088c2f404fcd59acff5d348e1c622ee84c
|
222469236b22fe44d4cfdd791fcdeaada73138ba
|
/migrations/versions/e84c5458f62c_add_all_models.py
|
b6f6d698e3633c9afc088ed7a1cde6df46d56fbe
|
[
"MIT"
] |
permissive
|
vshalt/flask-reddit
|
9b807d098e1ce0c3a9858528a01baf2e6800e259
|
72059a6bb1a10af7937fe5db9833fd2513b1db51
|
refs/heads/main
| 2023-01-13T06:49:21.685939 | 2020-11-21T12:49:13 | 2020-11-21T12:49:13 | 314,663,793 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,347 |
py
|
"""add: all models
Revision ID: e84c5458f62c
Revises: 985b5267334c
Create Date: 2020-11-16 01:36:12.374598
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e84c5458f62c'
down_revision = '985b5267334c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('communities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('description', sa.String(length=512), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('community_participants',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('community_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['community_id'], ['communities.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=256), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('community_id', sa.Integer(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['community_id'], ['communities.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('post_votes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('count', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['post_id'], ['posts.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('replies',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['post_id'], ['posts.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('reply_votes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('count', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('reply_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['reply_id'], ['replies.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('reply_votes')
op.drop_table('replies')
op.drop_table('post_votes')
op.drop_table('posts')
op.drop_table('community_participants')
op.drop_table('communities')
# ### end Alembic commands ###
|
[
"[email protected]"
] | |
4e3fa15f47c92e5b5c108003def6343323d488e5
|
bd5cf1a71604fe2b9ec5ca77803e396c802ac02e
|
/codeforces/ppc/02contest/l.py
|
9acb8a4ae8ee4ca34c966cb87e4f8b67bce29f53
|
[] |
no_license
|
rafaelmakaha/competition-programming
|
405e665dd7d3b42f40e086c9ab006e9e6a9d9307
|
6840b53dcb042497d3446bb267786984e0360fb9
|
refs/heads/master
| 2023-04-01T22:45:25.067308 | 2021-04-03T15:00:33 | 2021-04-03T15:00:33 | 177,287,902 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,652 |
py
|
'''
L. Way Too Long Words
time limit per test1 second
memory limit per test256 megabytes
inputstandard input
outputstandard output
Sometimes some words like "localization" or "internationalization" are so long that writing them many times in one text is quite tiresome.
Let's consider a word too long, if its length is strictly more than 10 characters. All too long words should be replaced with a special abbreviation.
This abbreviation is made like this: we write down the first and the last letter of a word and between them we write the number of letters between the first and the last letters. That number is in decimal system and doesn't contain any leading zeroes.
Thus, "localization" will be spelt as "l10n", and "internationalization» will be spelt as "i18n".
You are suggested to automatize the process of changing the words with abbreviations. At that all too long words should be replaced by the abbreviation and the words that are not too long should not undergo any changes.
Input
The first line contains an integer n (1 ≤ n ≤ 100). Each of the following n lines contains one word. All the words consist of lowercase Latin letters and possess the lengths of from 1 to 100 characters.
Output
Print n lines. The i-th line should contain the result of replacing of the i-th word from the input data.
'''
def main():
n = int(input())
i=0
ans = []
while i < n:
word = input()
if len(word) > 10:
ans.append(word[0] + str(len(word) -2) + word[-1])
else:
ans.append(word)
i +=1
i=0
while i < n:
print(ans[i])
i +=1
main()
|
[
"[email protected]"
] | |
a4e7abec0d0fbe8262a04058bd93baabf0bee303
|
c99006d4bfe69b72757b267f2c51fb1ffcf4d8aa
|
/baseline3-8050.py
|
be726a05653739fc9140058e3c93df3c375b1eae
|
[] |
no_license
|
a550461053/2019CCF-jinnan
|
24a6f5dce8956b4d61427e6099d49cbada534f1f
|
2633f2d23a5e54a0db5b0074c9220a9846129454
|
refs/heads/master
| 2020-04-17T11:56:21.211932 | 2019-01-19T15:28:36 | 2019-01-19T15:28:36 | 166,560,518 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,646 |
py
|
# coding=utf-8
# @Time : 2019-01-19 10:07
# @Auther : Batista-yu
# @Contact : [email protected]
# @license : (C) Copyright2016-2018, Batista Yu Limited.
'''
'''
import numpy as np
import pandas as pd
import lightgbm as lgb
import xgboost as xgb
from sklearn.linear_model import BayesianRidge
from sklearn.model_selection import KFold, RepeatedKFold
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from scipy import sparse
import warnings
import time
import sys
import os
import re
import datetime
import matplotlib.pyplot as plt
import seaborn as sns
# import plotly.offline as py
# py.init_notebook_mode(connected=True)
# import plotly.graph_objs as go
# import plotly.tools as tls
from sklearn.metrics import mean_squared_error
from sklearn.metrics import log_loss
import logging
logging.basicConfig(level=logging.DEBUG, filename="baseline_logfile_1_15",
filemode="a+", format="%(asctime)-15s %(levelname)-8s %(message)s")
pre_root_path = "data/pre-data"
result_path = "result"
train = pd.read_csv(pre_root_path + '/jinnan_round1_train_20181227.csv', encoding = 'gb18030')
test = pd.read_csv(pre_root_path + '/jinnan_round1_testA_20181227.csv', encoding = 'gb18030')
print('load data')
target_col = "收率"
# 删除异常值
print(train[train['收率'] < 0.87])
train = train[train['收率'] > 0.87]
train.loc[train['B14'] == 40, 'B14'] = 400
train = train[train['B14']>=400]
# 合并数据集, 顺便处理异常数据
target = train['收率']
train.loc[train['A25'] == '1900/3/10 0:00', 'A25'] = train['A25'].value_counts().values[0]
train['A25'] = train['A25'].astype(int)
train.loc[train['B14'] == 40, 'B14'] = 400
# test.loc[test['B14'] == 385, 'B14'] = 385
test_select = {}
for v in [280, 385, 390, 785]:
print(v)
print(test[test['B14'] == v]['样本id'])
test_select[v] = test[test['B14'] == v]['样本id'].index
print(test[test['B14'] == v]['样本id'].index)
print(test_select[v])
del train['收率']
data = pd.concat([train,test],axis=0,ignore_index=True)
data = data.fillna(-1)
def timeTranSecond(t):
try:
t, m, s = t.split(":")
except:
if t == '1900/1/9 7:00':
return 7 * 3600 / 3600
elif t == '1900/1/1 2:30':
return (2 * 3600 + 30 * 60) / 3600
elif t == -1:
return -1
else:
return 0
try:
tm = (int(t) * 3600 + int(m) * 60 + int(s)) / 3600
except:
return (30 * 60) / 3600
return tm
for f in ['A5', 'A7', 'A9', 'A11', 'A14', 'A16', 'A24', 'A26', 'B5', 'B7']:
try:
data[f] = data[f].apply(timeTranSecond)
except:
print(f, '应该在前面被删除了!')
def getDuration(se):
try:
sh, sm, eh, em = re.findall(r"\d+\.?\d*", se)
except:
if se == -1:
return -1
try:
if int(sh) > int(eh):
tm = (int(eh) * 3600 + int(em) * 60 - int(sm) * 60 - int(sh) * 3600) / 3600 + 24
else:
tm = (int(eh) * 3600 + int(em) * 60 - int(sm) * 60 - int(sh) * 3600) / 3600
except:
if se == '19:-20:05':
return 1
elif se == '15:00-1600':
return 1
return tm
for f in ['A20', 'A28', 'B4', 'B9', 'B10', 'B11']:
data[f] = data.apply(lambda df: getDuration(df[f]), axis=1)
data['样本id'] = data['样本id'].apply(lambda x: x.split('_')[1])
data['样本id'] = data['样本id'].astype(int)
# 基本数据处理完毕, 开始拼接数据
train = data[:train.shape[0]]
test = data[train.shape[0]:]
train['target'] = list(target)
new_train = train.copy()
new_train = new_train.sort_values(['样本id'], ascending=True)
train_copy = train.copy()
train_copy = train_copy.sort_values(['样本id'], ascending=True)
# 把train加长两倍
train_len = len(new_train)
new_train = pd.concat([new_train, train_copy])
# 把加长两倍的train拼接到test后面
new_test = test.copy()
new_test = pd.concat([new_test, new_train])
import sys
# 开始向后做差
diff_train = pd.DataFrame()
ids = list(train_copy['样本id'].values)
print(ids)
from tqdm import tqdm
import os
# 构造新的训练集
if os.path.exists(pre_root_path + '/diff_train.csv'):
diff_train = pd.read_csv(pre_root_path + '/diff_train.csv')
else:
for i in tqdm(range(1, train_len)):
# 分别间隔 -1, -2, ... -len行 进行差值,得到实验的所有对比实验
diff_tmp = new_train.diff(-i)
diff_tmp = diff_tmp[:train_len]
diff_tmp.columns = [col_ + '_difference' for col_ in
diff_tmp.columns.values]
# 求完差值后加上样本id
diff_tmp['样本id'] = ids
diff_train = pd.concat([diff_train, diff_tmp])
diff_train.to_csv(pre_root_path + '/diff_train.csv', index=False)
# 构造新的测试集
diff_test = pd.DataFrame()
ids_test = list(test['样本id'].values)
test_len = len(test)
if os.path.exists(pre_root_path + '/diff_test.csv'):
diff_test = pd.read_csv(pre_root_path + '/diff_test.csv')
else:
for i in tqdm(range(test_len, test_len+train_len)):
# 分别间隔 - test_len , -test_len -1 ,.... - test_len - train_len +1 进行差值, 得到实验的所有对比实验
diff_tmp = new_test.diff(-i)
diff_tmp = diff_tmp[:test_len]
diff_tmp.columns = [col_ + '_difference' for col_ in
diff_tmp.columns.values]
# 求完差值后加上样本id
diff_tmp['样本id'] = ids_test
diff_test = pd.concat([diff_test, diff_tmp])
diff_test = diff_test[diff_train.columns]
diff_test.to_csv(pre_root_path + '/diff_test.csv', index=False)
print(train.columns.values)
# 和train顺序一致的target
train_target = train['target']
train.drop(['target'], axis=1, inplace=True)
# 拼接原始特征
diff_train = pd.merge(diff_train, train, how='left', on='样本id')
diff_test = pd.merge(diff_test, test, how='left', on='样本id')
target = diff_train['target_difference']
diff_train.drop(['target_difference'], axis=1, inplace=True)
diff_test.drop(['target_difference'], axis=1, inplace=True)
X_train = diff_train
y_train = target
X_test = diff_test
print(X_train.columns.values)
param = {'num_leaves': 120, #31
'min_data_in_leaf': 20,
'objective': 'regression',
'max_depth': -1,
'learning_rate': 0.01,
# "min_child_samples": 30,
"boosting": "gbdt",
"feature_fraction": 0.9,
"bagging_freq": 1,
"bagging_fraction": 0.9,
"bagging_seed": 11,
"metric": 'mse',
"lambda_l2": 0.1,
# "lambda_l1": 0.1,
'num_thread': 4,
"verbosity": -1}
groups = X_train['样本id'].values
folds = KFold(n_splits=5, shuffle=True, random_state=2018)
oof_lgb = np.zeros(len(diff_train))
predictions_lgb = np.zeros(len(diff_test))
feature_importance = pd.DataFrame()
feature_importance['feature_name'] = X_train.columns.values
for fold_, (trn_idx, val_idx) in enumerate(folds.split(X_train, y_train)):
print("fold n°{}".format(fold_ + 1))
dev = X_train.iloc[trn_idx]
val = X_train.iloc[val_idx]
trn_data = lgb.Dataset(dev, y_train.iloc[trn_idx])
val_data = lgb.Dataset(val, y_train.iloc[val_idx])
num_round = 20000 # 3000
clf = lgb.train(param, trn_data, num_round, valid_sets=[trn_data, val_data], verbose_eval=5,
early_stopping_rounds=100)
oof_lgb[val_idx] = clf.predict(val, num_iteration=clf.best_iteration)
predictions_lgb += clf.predict(X_test, num_iteration=clf.best_iteration) / folds.n_splits
importance = clf.feature_importance(importance_type='gain')
feature_name = clf.feature_name()
tmp_df = pd.DataFrame({'feature_name': feature_name, 'importance': importance})
feature_importance = pd.merge(feature_importance, tmp_df, how='left',
on='feature_name')
print(len(feature_importance['feature_name']))
print(len(diff_train))
feature_importance.to_csv(result_path + '/eda/feature_importance2.csv', index=False)
# 还原train target
diff_train['compare_id'] = diff_train['样本id'] - diff_train['样本id_difference']
train['compare_id'] = train['样本id']
train['compare_target'] = list(train_target)
# 把做差的target拼接回去
diff_train = pd.merge(diff_train, train[['compare_id', 'compare_target']], how='left', on='compare_id')
print(diff_train.columns.values)
diff_train['pre_target_diff'] = oof_lgb
diff_train['pre_target'] = diff_train['pre_target_diff'] + diff_train['compare_target']
mean_result = diff_train.groupby('样本id')['pre_target'].mean().reset_index(name='pre_target_mean')
true_result = train[['样本id', 'compare_target']]
mean_result = pd.merge(mean_result, true_result, how='left', on='样本id')
print(mean_result)
print("CV score: {:<8.8f}".format(mean_squared_error(oof_lgb, target)))
logging.info("Lgb CV score: {:<8.8f}".format(mean_squared_error(oof_lgb, target)))
print("CV score: {:<8.8f}".format(mean_squared_error(mean_result['pre_target_mean'].values, mean_result['compare_target'].values)))
logging.info("Lgb CV score: {:<8.8f}".format(mean_squared_error(mean_result['pre_target_mean'].values, mean_result['compare_target'].values)))
# pre_target = mean_result['pre_target_mean'].values
# true_target = mean_result['']
# 还原test target
diff_test['compare_id'] = diff_test['样本id'] - diff_test['样本id_difference']
diff_test = pd.merge(diff_test, train[['compare_id', 'compare_target']], how='left', on='compare_id')
diff_test['pre_target_diff'] = predictions_lgb
diff_test['pre_target'] = diff_test['pre_target_diff'] + diff_test['compare_target']
mean_result_test = diff_test.groupby(diff_test['样本id'], sort=False)['pre_target'].mean().reset_index(name='pre_target_mean')
print(mean_result_test)
test = pd.merge(test, mean_result_test, how='left', on='样本id')
sub_df = pd.read_csv(pre_root_path + '/jinnan_round1_submit_20181227.csv', header=None)
sub_df[1] = test['pre_target_mean']
# sub_df[1] = sub_df[1].apply(lambda x:round(x, 3))
sub_df.to_csv(result_path + '/jinnan_round1_submit_20181227_3_2.csv', index=0, header=0) # 这是另存为,不保存索引行
print('save done!')
for v in test_select.keys():
if v == 280:
x = 0.947
elif v == 385 or v == 785:
x = 0.879
elif v == 390:
x = 0.89
print(v)
print(test_select[v])
# sub_df.iloc[test_select[v]][1] = x
sub_df.loc[test_select[v], 1] = x
sub_df.to_csv(result_path + '/jinnan_round_submit_diff_2.csv', index=False, header=False)
print(len(diff_train))
# training's l2: 0.00014802 valid_1's l2: 0.000148992
|
[
"[email protected]"
] | |
50fd88816adb4f8b0d7547c4fe8995661dad64a1
|
47538a35f3900a3bd06913a64b52ad77874a0941
|
/practice3.py
|
3507322f90b6d5f74358cfde1e2f7c12f4818ba5
|
[] |
no_license
|
thxxx/Python_learning
|
ef42ae95f785fa91409b1b8f839e759d5e17a935
|
d2a9f200a745a5b613ed75006f23b9abf5c62378
|
refs/heads/main
| 2023-06-06T14:48:57.427760 | 2021-06-27T00:13:34 | 2021-06-27T00:13:34 | 326,104,816 | 0 | 0 | null | 2021-06-27T00:13:34 | 2021-01-02T04:01:17 |
Python
|
UTF-8
|
Python
| false | false | 1,661 |
py
|
import os
import requests
from bs4 import BeautifulSoup
from babel.numbers import format_currency
os.system("clear")
url = "https://www.iban.com/currency-codes"
countries = []
request = requests.get(url)
soup = BeautifulSoup(request.text, "html.parser")
table = soup.find("table")
rows = table.find_all("tr")[1:]
for row in rows :
items = row.find_all("td")
name = items[0].text
code =items[2].text
if name and code:
if name != "No universal currency":
country = {
'name':name.capitalize(),
'code':code,
}
countries.append(country)
def ask() -> str:
try:
num = int(input("#: "))
if num > len(countries):
print("Choose a number from the list.")
ask()
else:
country = countries[num]
result = country['code']
print(result)
except ValueError:
print("That wasn't a number.")
ask()
def ask_amount(first:str, second:str):
try:
money = input(f"How many {first} do you want to convert to {second}? \n")
except ValueError:
print("That wasn't a numbet")
ask_amount()
print(money)
print("Where are you from? Choose a country by number")
for index, country in enumerate(countries):
print(f"#{index} {country['name']}")
ask()
print("Now choose another country.")
ask()
#ask_amount(first_country,second_country)
"""
Use the 'format_currency' function to format the output of the conversion
format_currency(AMOUNT, CURRENCY_CODE, locale="ko_KR" (no need to change this one))
"""
print(format_currency(5000, "KRW", locale="ko_KR"))
|
[
"[email protected]"
] | |
bd159b3012e8e7eba9eba97fabcc90c2eaa40352
|
431418fb8be3eb611cb417538e7a06e5a2e1267d
|
/talosblockchain/talosvc/test/test_policy.py
|
87fef750c16853ba27307ad05f0bc3971025e7e2
|
[
"Apache-2.0"
] |
permissive
|
chunchuan-wang/droplet-engine
|
a4f6e3c4d6d50fe40942ff44bf67865b77f3ebfa
|
6f542c456ba51162aec183c22f56efbe0accfc4b
|
refs/heads/main
| 2023-01-03T05:03:02.612949 | 2020-11-03T12:32:46 | 2020-11-03T12:32:46 | 300,635,447 | 0 | 0 |
Apache-2.0
| 2020-10-02T14:11:00 | 2020-10-02T14:10:59 | null |
UTF-8
|
Python
| false | false | 1,527 |
py
|
import unittest
from talosvc.policy import Policy, create_policy_from_json_str
from talosvc.config import *
from talosvc.policydb import create_db, TalosPolicyDB
import binascii
class TestPolicy(unittest.TestCase):
def test_1(self):
policy = Policy("Me", 12, 1, "nonce")
policy.add_share(["A", "B"])
policy.add_time_interval(12, 3)
print policy.to_json()
res = """{"_start_points": [12], "_shares": ["A", "B"], "_intervals": [12]
, "_version": 1, "_owner": "Me", "_stream_id": 12, "_nonce": "nonce"}"""
self.assertEquals(res, policy.to_json())
def test_2(self):
state = TalosPolicyDB("./talos-virtualchain.db")
policyA = state.get_policy("mtr5ENEQ73HZMeZvUEjXdWRJvMhQJMHzcJ", 1)
policyA_str = policyA.to_json()
polcyB = create_policy_from_json_str(policyA_str)
self.assertEquals(policyA_str, polcyB.to_json())
print policyA_str
class TestRandom(unittest.TestCase):
def test_rand(self):
str = get_policy_cmd_create_str(1, 12, 13, 24, "ABDGFHDTARSGDTSF")
res = parse_policy_cmd_create_data(str[3:])
print res
def test_addd(self):
str = get_policy_cmd_addaccess_str()
res = parse_policy_cmd_create_data(str[3:])
print res
def test_rand(self):
db = create_db("test.db")
db.close()
print "ok"
def test_dem1(self):
data = get_policy_cmd_create_str(1, 1, 100, 200, '\x00' * 16)
print binascii.hexlify(data)
|
[
"[email protected]"
] | |
028717923cb8ac9e5f0ec24005740682c5357995
|
dcf8dc9d8c248a1932d3e5fa5c9f8fc380d030a2
|
/setup.py
|
70f047b608a86f3f008fe7fa6d74edde15e21a66
|
[
"MIT"
] |
permissive
|
xqdzn/pyzatt
|
8e59f0cd0d8806f006bf393065b39f63b7792075
|
d61ae5cdc93400b7714bfbb434f76bbde64f631a
|
refs/heads/master
| 2022-12-23T16:29:59.694383 | 2020-09-09T23:43:43 | 2020-09-09T23:43:43 | 293,735,267 | 0 | 0 |
MIT
| 2020-09-08T07:32:48 | 2020-09-08T07:32:47 | null |
UTF-8
|
Python
| false | false | 1,516 |
py
|
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['Click>=7.0', ]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest>=3', ]
setup(
author="Alexander Marin",
author_email='[email protected]',
python_requires='>=3.5',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Python lib to access ZKTeco's standalone devices",
entry_points={
'console_scripts': [
'pyzatt=pyzatt.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='pyzatt',
name='pyzatt',
packages=find_packages(include=['pyzatt', 'pyzatt.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/adrobinoga/pyzatt',
version='2.0.0',
zip_safe=False,
)
|
[
"[email protected]"
] | |
6263e039ad9af13ef791509c181faa8db28b8c59
|
95328183fc4a8be5dfac47d36eca7d6c62393432
|
/server/helpers/generate_link.py
|
bbc5d0a6397e5287aa48e10ab39f906672e99658
|
[] |
no_license
|
KirillIvano/pickles
|
cbf2426b1a51d10c9fb8c37aa22d0fec00a14f58
|
3cf53403093f99c257883a276ac79d7f2b588a05
|
refs/heads/master
| 2023-08-07T16:42:24.904373 | 2021-07-14T20:57:34 | 2021-07-14T20:57:34 | 277,870,222 | 0 | 0 | null | 2021-04-02T13:41:26 | 2020-07-07T16:38:27 |
TypeScript
|
UTF-8
|
Python
| false | false | 119 |
py
|
from aglobell.settings import HOST
def get_order_link(order):
return f'{HOST}/order/{order.id}?key={order.hash}'
|
[
"[email protected]"
] | |
7cf273190d96620eee668d08dc2db2285ad01b3b
|
0e5c7b5e82d11ff3af358e8d83a76adda0dcbb53
|
/generator/generator.py
|
50350dc31c14d8e56a496c40a1110e8016ce88e1
|
[] |
no_license
|
zkroliko/ADPTO-Project--Queens
|
64d8dfa95866ad1759bf62ef69f610608ff27408
|
34fa1eab0507f9d356abdc85caa5e614702f89af
|
refs/heads/master
| 2021-01-20T20:36:36.932686 | 2016-10-09T17:16:23 | 2016-10-09T17:16:23 | 63,703,642 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,162 |
py
|
#!/usr/bin/env python
# coding=utf-8
# generator.py
import random
from sys import *
PRINT_MOVES = True
MAX_TRIES_FOR_TARGET = 10
MAX_TOTAL_EXPONENT = 25 # 60
MAX_BOARD_SIZE = 128
directions = {
"top_left": (-1, -1),
"top": (0, -1),
"top_right": (1, -1),
"right": (1, 0),
"bottom_right": (1, 1),
"bottom": (0, 1),
"bottom_left": (-1, 1),
"left": (-1, 0)
}
class Board():
def __init__(self, size):
self.size = size
self.queens = {}
def printOut(self):
M = max(pow(2,power) for queen, power in self.queens.iteritems())
L = len(str(2 ^ M)) + 1
for row in range(0, self.size):
r = ""
for cell in range(0, self.size):
x = str(pow(2, self.queens[(row, cell)])) if (row, cell) in self.queens else "0"
r += x + " " * (L - len(x) + 1)
print r
print ""
# Can move
def move(self, source):
if self.queens[source] <= 0:
return False
for direction, offset in directions.iteritems():
possibility = self.findMove(source, direction)
if possibility:
self.queens[source] -= 1
self.queens[possibility] = self.queens[source]
return possibility
return None
# Find move in direction
def findMove(self, source, direction):
x, y = source[0] + directions[direction][0], source[1] + directions[direction][1]
possible = []
while (x, y) not in self.queens and 0 <= x < self.size and 0 <= y < self.size:
possible.append((x, y))
x += directions[direction][0]
y += directions[direction][1]
if len(possible) > 0:
return possible[int(random.uniform(0, len(possible)))]
else:
return None
# Generate initial points
def createInitial(target, board):
initial = board.queens
count = 0
powerLeft = MAX_TOTAL_EXPONENT
while len(initial) < target:
x = int(random.uniform(0, board.size))
y = int(random.uniform(0, board.size))
power = int(random.triangular(0, powerLeft))
powerLeft -= power
if (x, y) not in initial:
initial[(x, y)] = power
# Creates the problem
def divide(board, minimalMoves):
checked = set()
moves = []
while len(moves) < minimalMoves and len(checked) < len(board.queens):
queen = board.queens.items()[int(random.uniform(0, len(board.queens)))][0]
while queen in checked:
queen = board.queens.items()[int(random.uniform(0, len(board.queens)))][0]
if queen not in checked:
move = board.move(queen)
while move and len(moves) < minimalMoves:
moves.append((move,queen))
move = board.move(queen)
if move:
moves.append((move,queen))
# Queen is moving here
checked.add(queen)
return moves if len(moves) >= minimalMoves else []
# Printing moves
def printMoves(moves):
moves.reverse()
for move in moves:
print("%s %s %s %s" % (move[0][0], move[0][1], move[1][0], move[1][1]))
# Main
def main():
if (len(argv) != 4):
print("USE: generator.py <board size> <queen count> <targetMoves>")
exit(-1)
queenTarget = int(argv[2])
boardSize = int(argv[1])
if (boardSize not in range(1, MAX_BOARD_SIZE)):
print("%s is a bad board size, range is (0,%s)" % (boardSize, MAX_BOARD_SIZE))
exit(-1)
target_moves = int(argv[3])
while (target_moves > 0):
found = False
tries = 0
while not found and tries < MAX_TRIES_FOR_TARGET:
board = Board(int(argv[1]))
createInitial(queenTarget, board)
found = divide(board, target_moves)
if len(found) > 0:
# FOUND THE RESULT
print board.size
print queenTarget
board.printOut()
if PRINT_MOVES:
printMoves(found)
return 0
tries -= 1
target_moves -= 1
main()
|
[
"[email protected]"
] | |
3aeef00917e1279b07e812d3dc03007fc0495598
|
3545d589a6d33f44c0da75ed12e64d8530677c70
|
/examples/anime.py
|
7b53f9dcbccdb65db224694eddb77cd23316b714
|
[
"MIT"
] |
permissive
|
pengode-handal/animec
|
f6ff2e335e19be5d728c545ca595263a24fe5e24
|
1f1940e69e13e68e6e4891f786694ee2dfd8994d
|
refs/heads/main
| 2023-06-29T21:19:13.001375 | 2021-08-09T03:55:01 | 2021-08-09T03:55:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,056 |
py
|
from animec import Anime, NoResultFound
def get_anime(name: str):
try:
anime = Anime(name)
except NoResultFound:
return None
return anime
def body(base: Anime):
display_body = f"""
Name: {base.name}
Alt Titles: {base.alt_titles}
Description: {base.description}
Episodes: {base.episodes}
Aired: {base.aired}
Broadcast: {base.broadcast}
Rating: {base.rating}
Ranking: {base.ranked}
Populatiry: {base.popularity}
Type: {base.type}
Status: {base.status}
Producers: {", ".join(base.producers)}
Genres: {", ".join(base.genres)}
Opening Themes: {", ".join(base.opening_themes)}
Ending Themes: {", ".join(base.ending_themes)}
"""
return display_body
def prompt():
inp = input("\nPlease input the name of the anime you wish to search for: ")
anime = get_anime(inp)
if anime:
return body(anime)
else:
return "\nIt looks like I couldn't find the anime you were looking for."
print(prompt())
|
[
"[email protected]"
] | |
d9477781652f2bd42dbb5cac6cabe820b018c6c5
|
187b62e1b808fa92b04912c211301a5695a2d1fe
|
/Hackerrank/algorithms - practice/implementation/10-bon-appetit.py
|
4595a6c29fa1219a1e8c86ae65bc03e81a98c08e
|
[] |
no_license
|
tonymontaro/Algorithmic-Problem-Solving
|
fda498ee841a88e9e43ad624136e86b4e101e587
|
290e4c3af79ff5477b31a314a08eb24f3690890b
|
refs/heads/master
| 2021-06-07T04:49:22.786439 | 2020-11-24T10:49:00 | 2020-11-24T10:49:00 | 122,983,998 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 260 |
py
|
def bon_appetit(n, k, bill, ar):
correct_bill = (sum(ar) - ar[k]) / 2
return 'Bon Appetit' if correct_bill == bill else int(
(bill - correct_bill))
print(bon_appetit(4, 1, 12, [3, 10, 2, 9])) # 5
print(bon_appetit(4, 1, 7, [3, 10, 2, 9]))
|
[
"[email protected]"
] | |
b0e9a2e9dea4cf099fab569ff2818440f8a53bc4
|
2c0bcae51ffbd26080af47616992963c38ba9e83
|
/bridge/methods.py
|
c34cdaf16bc3272cd7fd2e0ea1ca321a534e247a
|
[
"MIT"
] |
permissive
|
Emilien-P/CompVis-project
|
ba1b39575a18e00d5e8ac59e5a6568923f4f07e5
|
41b236e120d134319135f5e8b240b065b3fb180e
|
refs/heads/master
| 2020-03-18T07:43:41.758660 | 2018-06-08T19:10:58 | 2018-06-08T19:10:58 | 134,469,945 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 15,598 |
py
|
"""
An entire file for you to expand. Add methods here, and the client should be
able to call them with json-rpc without any editing to the pipeline.
"""
#def count(number):
# """It counts. Duh. Note: intentionally written to break on non-ints"""
# return int(number) + 1
import argparse
import os
import shutil
import time
import sys
from PIL import Image
import dataloaderhelper
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torch.utils.data as data
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--data', '-d', metavar='DIR', default='hold2',
help='path to dataset')
parser.add_argument('--port', '-por', metavar='PORT', default='8000')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=1, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', type=str, metavar='PATH',
help='path to latest checkpoint', default='model_best.pth.tar')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
parser.add_argument('--folderToTest', default='hold2', type=str)
best_prec1 = 0
#def count(number):
# return int(number) + 1
def count(number):
pathtofile = number
global args, best_prec1
args = parser.parse_args()
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if not args.distributed:
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
#print("made it past resume")
# cudnn.benchmark = True
# Data loading code
# basestr = 'foodfolder/train'
# traindir = os.path.join(basestr, 'train')
#valdir =
# traindir = basestr
# valdir = basestr
#print("made it to train loading")
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
classes = ("Apple Pie", "Baby back ribs","Baklava","Beef carpaccio","Beef tartare","Beet salad","Beignets","Bibimbap","Bread pudding","Breakfast burrito","Bruschetta","Caesar salad","Cannoli","Caprese salad","Carrot cake","Ceviche","Cheesecake","Cheese plate","Chicken curry","Chicken quesadilla","Chicken wings","Chocolate cake","Chocolate mousse","Churros","Clam chowder","Club sandwich","Crab cakes","Creme brulee","Croque madame","Cup cakes","Deviled eggs","Donuts","Dumplings","Edamame","Eggs benedict","Escargots","Falafel","Filet mignon","Fish and chips","Foie gras","French fries","French onion soup","French toast","Fried calamari","Fried rice","Frozen yogurt","Garlic bread","Gnocchi","Greek salad","Grilled cheese sandwich","Grilled salmon","Guacamole","Gyoza","Hamburger","Hot and sour soup","Hot dog","Huevos rancheros","Hummus","Ice cream","Lasagna","Lobster bisque","Lobster roll sandwich","Macaroni and cheese","Macarons","Miso soup","Mussels","Nachos","Omelette","Onion rings","Oysters","Pad thai","Paella","Pancakes","Panna cotta","Peking duck","Pho","Pizza","Pork chop","Poutine","Prime rib","Pulled pork sandwich","Ramen","Ravioli","Red velvet cake","Risotto","Samosa","Sashimi","Scallops","Seaweed salad","Shrimp and grits","Spaghetti bolognese","Spaghetti carbonara","Spring rolls","Steak","Strawberry shortcake","Sushi","Tacos","Takoyaki","Tiramisu","Tuna tartare","Waffles")
model.eval()
keepGoing = True
# print ("folder to test " + str(args.folderToTest))
import numpy as np
while(keepGoing):
# path = input()
# try:
# path = input()
# except EOFError:
# return
# print("trying to cont")
# print(args.folderToTest)
# return
# path = args.folderToTest
# data = sys.stdin.readlines()
# print(data)
path = pathtofile
if path == "exit":
keepGoing = False
else:
# data = torch.utils.data.DataLoader(
# datasets.ImageFolder(path,
# transforms.Compose([
# transforms.Resize(256),
# transforms.CenterCrop(224),
# transforms.ToTensor(),
# normalize,
# ])))
data = torch.utils.data.DataLoader(
dataloaderhelper.ImageFolder(path,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])))
top5_accuracies = np.zeros((101, 1))
top1_accuracies = np.zeros((101, 1))
# print(data)
with torch.no_grad():
k = 0
for i, (image, target, filepath) in enumerate(data):
#k += 1
prediction = model(image)
# Compute top 5
temp = np.argsort(np.array(prediction))
top5 = reversed(temp.flatten()[-5:])
# Compute top 1
top1 = np.argmax(prediction)
# j = int(target.item()) # folder number.. so also output node true val if setup
# if j in np.array(top5):
# top5_accuracies[j] += 1
# if j == top1:
# top1_accuracies[top1] += 1
print("filepath " + str(filepath))
#print("number of classes " + str(len(classes)))
#print("index we want " + str(top1.item()-1))
#print("top1 prediction is " + classes[top1.item()-1])
# print(image)
# print(target)
predcounter = 1
stringtoret = "";
for pred in top5:
print("top " + str(predcounter) + " prediction is " + classes[pred])
stringtoret = stringtoret + '\n' + "top " + str(predcounter) + " prediction is " + classes[pred] + '<br>'
predcounter = predcounter + 1
# top1_accuracies /= 250
# top5_accuracies /= 250
# np.save("top1_acc", top1_accuracies)
# np.save("top5_acc", top5_accuracies)
# print(top1_accuracies)
sys.stdout.flush()
# TODO REMOVE
keepGoing = False
if 'Hot' in stringtoret:
stringtoret = stringtoret + '<br> <strong>This is likely to be a Hot Dawg! Please watch out for Gluten and Meat product.</strong>'
return stringtoret
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
# print(train_loader)
#print("made here 4")
for i, (input, target) in enumerate(train_loader):
# measure data loading time
#print("made here 5")
data_time.update(time.time() - end)
target = target.cuda(non_blocking=True)
#print("made here 6")
# compute output
output = model(input)
loss = criterion(output, target)
#print("made here 7")
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
#print("made here 8")
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
sys.stdout.flush()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
#"Computes and stores the average and current value""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
#""Sets the learning rate to the initial LR decayed by 10 every 30 epochs""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
#""Computes the precision@k for the specified values of k""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
global args
args = parser.parse_args()
count(args.folderToTest)
#"""
|
[
"[email protected]"
] | |
ed769fa51d32f54a61f3ed5cf8b7c1edf229e81c
|
6474c3439723587026def4cb936f668698aae1a7
|
/users/migrations/0001_initial.py
|
be24f00c5070d7f4b2ff9b95dff8dbbdf9b39316
|
[] |
no_license
|
shadowdevcode/blog-app
|
37219760b02a56926bbcbea878c6f8c28d9c0431
|
6c3c9723429be20162cba61db36fb2c2a1cb554d
|
refs/heads/master
| 2022-12-07T14:06:09.941766 | 2019-07-23T07:41:33 | 2019-07-23T07:41:33 | 167,966,809 | 1 | 0 | null | 2022-11-22T03:24:30 | 2019-01-28T13:27:46 |
JavaScript
|
UTF-8
|
Python
| false | false | 777 |
py
|
# Generated by Django 2.1.5 on 2019-01-27 16:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"[email protected]"
] | |
f352972ed899074c94eae89a2beb3d3b03c245b5
|
5f6248b0a950c64f11a6b7a4604dbb8b353c2338
|
/flaskr/__init__.py
|
8ea451354f48b54e77637cd92495c08fa2003c6e
|
[] |
no_license
|
hnumair/flask-tutorial
|
63817ecf6e8e26aa77b5a03948d1206ab4c3b931
|
7401a2fa58c67226a5fcd6c8ca28a4da3d458205
|
refs/heads/master
| 2023-02-21T06:08:47.586276 | 2021-01-20T12:52:26 | 2021-01-20T12:52:26 | 328,558,265 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,016 |
py
|
import os
from flask import Flask
def create_app(test_config=None):
# create and configure the app
print(__name__)
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='abcd',
DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed
app.config.from_mapping(test_config)
# ensuring the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# simple page for hello world
@app.route('/hello')
def hello():
return 'Hello, World!'
from . import db
db.init_app(app)
from . import auth
app.register_blueprint(auth.bp)
from . import blog
app.register_blueprint(blog.bp)
app.add_url_rule('/', endpoint='index')
return app
|
[
"[email protected]"
] | |
2d25948fc47ae05e17ec0c8404dc6012cc0a51f0
|
f9c7969c8649c484f2460fb245a3d5bd6870fa5a
|
/ch07/exercises/exercise 35.py
|
85def5a86980f358fd4a9a1b39f5216c13556056
|
[] |
no_license
|
Pshypher/tpocup
|
78cf97d51259bfea944dc205b9644bb1ae4ab367
|
b05b05728713637b1976a8203c2c97dbbfbb6a94
|
refs/heads/master
| 2022-05-18T13:11:31.417205 | 2020-01-07T13:50:06 | 2020-01-07T13:50:06 | 260,133,112 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 373 |
py
|
# Unless stated otherwise, variables are assumed to be of the str data type
def reverse_string(S):
"""Return the string S in reverse order using a for loop."""
S_reverse = ""
for ch in S:
S_reverse = ch + S_reverse
return S_reverse
# Prompt user for a string
chars = input("Enter a sequence of alphanumeric chars: ")
print(reverse_string(chars))
|
[
"[email protected]"
] | |
aafb7acdd937d5e18a31b404b96b813a8b2089d1
|
1f70692436b1f70f62eab6cd88cb4305ddb80823
|
/StockRight/stockQuotes/migrations/0004_stock_quantity.py
|
275ee1340f5cbbca256288673e9da622045e9485
|
[] |
no_license
|
rohanpatel711/CMPT350-StockRight
|
28ec2b5946396d71b60c59b1193081d610c165df
|
febe702bae4c91ac13492adcc110ff2bef561644
|
refs/heads/master
| 2022-04-11T16:47:44.457551 | 2020-04-02T15:10:30 | 2020-04-02T15:10:30 | 249,769,810 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 388 |
py
|
# Generated by Django 3.0.4 on 2020-04-01 04:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stockQuotes', '0003_auto_20200331_2056'),
]
operations = [
migrations.AddField(
model_name='stock',
name='quantity',
field=models.IntegerField(default=0),
),
]
|
[
"[email protected]"
] | |
7f379fc9f27c5f050bdf2abf7b91ae3ec247be87
|
71c48ce854552f32d436218ce2d185afadf80785
|
/test/functional/feature_logging.py
|
f09595dbebd1a20869942f3db26a60595b672b91
|
[
"MIT"
] |
permissive
|
fizzgig656/ConneX
|
7fd75abb3437728a2b5fe456e3853bf8d4ad978c
|
2a67367ef9e28109a536d5d6e4ddc11e4831022c
|
refs/heads/master
| 2020-05-15T22:09:07.873339 | 2019-03-25T07:45:30 | 2019-03-25T07:45:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,978 |
py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test debug logging."""
import os
from test_framework.test_framework import CONNEXTestFramework
from test_framework.test_node import ErrorMatch
class LoggingTest(CONNEXTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def relative_log_path(self, name):
return os.path.join(self.nodes[0].datadir, "regtest", name)
def run_test(self):
# test default log file name
default_log_path = self.relative_log_path("debug.log")
assert os.path.isfile(default_log_path)
# test alternative log file name in datadir
self.restart_node(0, ["-debuglogfile=foo.log"])
assert os.path.isfile(self.relative_log_path("foo.log"))
# test alternative log file name outside datadir
tempname = os.path.join(self.options.tmpdir, "foo.log")
self.restart_node(0, ["-debuglogfile=%s" % tempname])
assert os.path.isfile(tempname)
# check that invalid log (relative) will cause error
invdir = self.relative_log_path("foo")
invalidname = os.path.join("foo", "foo.log")
self.stop_node(0)
exp_stderr = "Error: Could not open debug log file \S+$"
self.nodes[0].assert_start_raises_init_error(["-debuglogfile=%s" % (invalidname)], exp_stderr, match=ErrorMatch.FULL_REGEX)
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (relative) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) will cause error
self.stop_node(0)
invdir = os.path.join(self.options.tmpdir, "foo")
invalidname = os.path.join(invdir, "foo.log")
self.nodes[0].assert_start_raises_init_error(["-debuglogfile=%s" % invalidname], exp_stderr, match=ErrorMatch.FULL_REGEX)
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that -nodebuglogfile disables logging
self.stop_node(0)
os.unlink(default_log_path)
assert not os.path.isfile(default_log_path)
self.start_node(0, ["-nodebuglogfile"])
assert not os.path.isfile(default_log_path)
# just sanity check no crash here
self.stop_node(0)
self.start_node(0, ["-debuglogfile=%s" % os.devnull])
if __name__ == '__main__':
LoggingTest().main()
|
[
"[email protected]"
] | |
6905fda86703d56d27ced0178a27ebf687bb1da0
|
d18df0ec22dc766496d4b0c2dcdcc933bdf332d8
|
/utils.py
|
f15c3122cd9f699a4a7cf4c18cdcaea62d5eff1b
|
[] |
no_license
|
thanhlt998/tktdtt
|
edc6610a28e09482f0746db258eed5323636abaa
|
64f32e62fb3b2d5d6ef6c2a0e74294bdff4b2057
|
refs/heads/master
| 2022-03-21T07:24:59.104986 | 2019-12-17T02:32:25 | 2019-12-17T02:32:25 | 208,956,173 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,151 |
py
|
from pyvi.ViTokenizer import ViTokenizer
import re
from dateutil.parser import parse
import json
def tokenize(terms):
terms = ViTokenizer.tokenize(terms)
terms = [f"\"{re.sub(r'_', ' ', term)}\"" for term in re.findall(r'\S+', terms)]
return ' '.join(terms)
def time_str2iso_format(time_str, is_24h_format=True):
time = re.search(fr'\d[\d/:,\- ]+[\d{"AMP" if is_24h_format else ""}]+', time_str)[0]
time = parse(time)
return time.strftime('%Y-%m-%dT%H:%M:%SZ')
def read_jsonl_file(fn):
docs = []
with open(fn, mode='r', encoding='utf8') as f:
for line in f:
docs.append(json.loads(line))
f.close()
return docs
def read_json_file(fn):
with open(fn, mode='r', encoding='utf8') as f:
docs = json.load(f)
f.close()
return docs
def dump_jsonl_file(fn, docs):
with open(fn, mode='w', encoding='utf8') as f:
for doc in docs:
f.write(json.dumps(doc, ensure_ascii=False))
f.close()
if __name__ == '__main__':
# docs = read_json_file('data/data_baomoi.json')
docs = read_jsonl_file('data/24h.jsonl')
print(docs[:2])
|
[
"[email protected]"
] | |
940b7535dac448f5b89c7bf95f5086ad745a8ee8
|
08e9d00e2545c6a89c3d6b45443303796a636d8e
|
/Chapter4/tuanhtran/iterativeFeatureSelection.py
|
17598f1094dbdac42fd3e3b01b3500082bf499b7
|
[] |
no_license
|
tuantran1810/MLWithPython
|
cde92dd81153228943dd98ce6382b53bf02a2401
|
93a5ff1ffba7cbe85f944c5177f0981ff05a0572
|
refs/heads/master
| 2022-01-08T22:45:17.650813 | 2019-07-02T16:09:40 | 2019-07-02T16:09:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,283 |
py
|
from sklearn.feature_selection import RFE
from sklearn.datasets import load_breast_cancer
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import numpy as np
import matplotlib.pyplot as plt
splitStr = "\n" + "=" * 100 + "\n"
cancer = load_breast_cancer()
rng = np.random.RandomState(42)
noise = rng.normal(size = (len(cancer.data), 50))
X_w_noise = np.hstack([cancer.data, noise])
X_train, X_test, y_train, y_test = train_test_split(X_w_noise, cancer.target,
random_state = 0, test_size = 0.5)
select = RFE(RandomForestClassifier(n_estimators = 100, random_state = 42),
n_features_to_select = 40).fit(X_train, y_train)
mask = select.get_support()
plt.matshow(mask.reshape(1, -1), cmap = 'gray_r')
plt.xlabel("Sample index")
plt.yticks(())
plt.show()
X_train_rfe = select.transform(X_train)
X_test_rfe = select.transform(X_test)
logreg = LogisticRegression().fit(X_train_rfe, y_train)
print("Logreg on training set: {:.3f}".format(logreg.score(X_train_rfe, y_train)))
print("Logreg on test set: {:.3f}".format(logreg.score(X_test_rfe, y_test)))
print("RFE score: {:.3f}".format(select.score(X_test, y_test)))
|
[
"[email protected]"
] | |
10bf66a31ac6a7609891546fa9062c1561711a39
|
13fd82d61ce17bd1389b977632fc46e2dbadf81c
|
/Linked Lists.py
|
49e09e5cdf9e45d2960d6ad046cb7c3c50024233
|
[] |
no_license
|
Pranay-sopho/Data-Structures-and-Algorithms-in-Python
|
ed1f502d5b55f95f3d15b3a978fb2c865dfa2844
|
cfaf8ea5c21f3f86e97f5c5eff7486e1b4f64815
|
refs/heads/master
| 2021-01-01T18:41:11.976237 | 2017-07-12T22:44:52 | 2017-07-12T22:44:52 | 98,408,721 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 15,019 |
py
|
class Empty(Exception):
pass
class LinkedStack:
class _Node:
__slots__ = '_element', '_next'
def __init__(self, element, next):
self._element = element
self._next = next
def __init__(self):
self._head = None
self._size = 0
def __len__(self):
return self._size
def is_empty(self):
return self._size == 0
def top(self):
if self.is_empty():
raise Empty('Stack is empty')
return self._head._element
def push(self, element):
self._head = self._Node(element, self._head)
self._size += 1
def pop(self):
if self.is_empty():
raise Empty('Stack is empty')
answer = self._head._element
self._head = self._head._next
self._size -= 1
return answer
"""
data = LinkedStack()
data.push(4)
data.push(3)
print(data.pop())
print(data.is_empty())
print(data.top())
"""
class LinkedQueue:
class _Node:
__slots__ = '_element', '_next'
def __init__(self, element, next):
self._element = element
self._next = next
def __init__(self):
self._head = None
self._tail = None
self._size = 0
def __len__(self):
return self._size
def is_empty(self):
return self._size == 0
def first(self):
if self.is_empty():
raise Empty('Queue is empty')
return self._head._element
def enqueue(self, element):
newest = self._Node(element, None)
if self.is_empty():
self._head = newest
else:
self._tail._next = newest
self._tail = newest
self._size += 1
def dequeue(self):
if self.is_empty():
raise Empty('Queue is empty')
answer = self._head._element
self._head = self._head._next
self._size -= 1
if self.is_empty():
self._tail = None
return answer
class CircularQueue:
class _Node:
__slots__ = '_element', '_next'
def __init__(self, element, next):
self._element = element
self._next = next
def __init__(self):
self._tail = None
self._size = 0
def __len__(self):
return self._size
def is_empty(self):
return self._size == 0
def first(self):
if self.is_empty():
raise Empty('Queue is empty')
return self._tail._next._element
def dequeue(self):
if self.is_empty():
raise Empty('Queue is empty')
head = self._tail._next
answer = head._element
head = head._next
self._size -= 1
if self.is_empty():
self._tail = None
return answer
def enqueue(self, element):
newest = self._Node(element, None)
if self.is_empty():
newest._next = newest
else:
newest._next = self._tail._next
self._tail._next = newest
self._tail = newest
self._size += 1
def rotate(self):
if self._size > 0:
self._tail = self._tail._next
class _DoublyLinkedBase:
class _Node:
__slots__ = '_element', '_prev', '_next'
def __init__(self, element, prev, next):
self._element = element
self._prev = prev
self._next = next
def __init__(self):
self._header = self._Node(None, None, None)
self._trailer = self._Node(None, None, None)
self._header._next = self._trailer
self._trailer._prev = self._header
self._size = 0
def __len__(self):
return self._size
def is_empty(self):
return self._size == 0
def _insert_between(self, element, predecessor, successor):
newest = self._Node(element, predecessor, successor)
predecessor._next = newest
successor._prev = newest
self._size += 1
return newest
def _delete_node(self, node):
node._prev._next = node._next
node._next._prev = node._prev
self._size -= 1
answer = node._element
node._element = node._prev = node._next = None
return answer
class LinkedDeque(_DoublyLinkedBase):
def first(self):
if self.is_empty():
raise Empty('Queue is empty')
return self._header._next._element
def last(self):
if self.is_empty():
raise Empty('Queue is empty')
return self._trailer._prev._element
def insert_first(self, element):
self._insert_between(element, self._header, self._header._next)
def insert_last(self, element):
self._insert_between(element, self._trailer._prev, self._trailer)
def delete_first(self):
if self.is_empty():
raise Empty('Queue is empty')
first = self._delete_node(self._header._next)
return first
def delete_last(self):
if self.is_empty():
raise Empty('Queue is empty')
last = self._delete_node(self._trailer._prev)
return last
"""
data = LinkedDeque()
print(len(data))
data.insert_first(3)
data.insert_first(65)
data.insert_last(54)
print(data.first())
print(data.is_empty())
data.delete_first()
data.delete_last()
print(data.first())
print(data.last())
"""
class PositionalList(_DoublyLinkedBase):
class Position:
def __init__(self, container, node):
self._container = container
self._node = node
def element(self):
return self._node._element
def __eq__(self, other):
return type(other) is type(self) and other._node is self._node
def __ne__(self, other):
return not (self == other)
def _validate(self, p):
"""
start = p._container._header._next
pos = self.Position(p._container, start)
while pos != None:
if pos == p:
return True
start = start._next
pos = self.Position(p._container, start)
return False
"""
if not isinstance(p, self.Position):
raise TypeError('p must be Proper Position Type')
if p._container is not self:
raise ValueError('p does not belong to this container')
if p._node._next is None:
raise ValueError('p is no longer valid')
return p._node
def _make_position(self, node):
# if node._prev is None or node._next is None:
if node is self._header or node is self._trailer:
return None
else:
return self.Position(self, node)
def first(self):
return self._make_position(self._header._next)
def last(self):
return self._make_position(self._trailer._prev)
def before(self, p):
node = self._validate(p)
return self._make_position(node._prev)
def after(self, p):
node = self._validate(p)
return self._make_position(node._next)
def __iter__(self):
cursor = self.first()
# while cursor != None:
while cursor is not None:
yield cursor.element()
cursor = self.after(cursor)
def _insert_between(self, element, predecessor, successor):
node = super()._insert_between(element, predecessor, successor)
return self._make_position(node)
def add_first(self, element):
return self._insert_between(element, self._header, self._header._next)
def add_last(self, element):
return self._insert_between(element, self._trailer._prev, self._trailer)
def add_before(self, position, element):
node = self._validate(position)
return self._insert_between(element, node._prev, node)
def add_after(self, position, element):
node = self._validate(position)
return self._insert_between(element, node, node._next)
def delete(self, position):
node = self._validate(position)
return self._delete_node(node)
def replace(self, position, element):
node = self._validate(position)
old_value = node._element
node._element = element
return node._element
def insertion_sort(L):
marker = L.first()
while marker != L.last():
pivot = L.after(marker)
value = pivot.element()
if value > marker.element():
marker = pivot
else:
walk = marker
while walk != L.first() and L.before(walk).element() > pivot.element():
walk = L.before(walk)
L.delete(pivot)
L.add_before(walk, value)
"""
L = PositionalList()
L.add_first(5)
L.add_last(4)
L.add_first(6)
L.add_last(3)
print(L.first().element())
print(L.last().element())
insertion_sort(L)
print(L.first().element())
print(L.last().element())
"""
# -------Priority Queues--------
class PriorityQueueBase:
class _Item:
__slots__ = '_key', '_value'
def __init__(self, k, v):
self._key = k
self._value = v
def __lt__(self, other):
return self._key < other._key
def is_empty(self):
return len(self) == 0
class UnsortedPriorityQueue(PriorityQueueBase):
def _find_min(self):
if self.is_empty():
raise Empty('Priority Queue is Empty')
small = self._data.first()
walk = self._data.after(small)
while walk is not None:
if walk.element() < small.element():
small = walk
walk = self._data.after(walk)
return small
def __init__(self):
self._data = PositionalList()
def __len__(self):
return len(self._data)
def add(self, key, value):
self._data.add_last(self._Item(key, value))
def min(self):
p = self._find_min()
item = p.element()
return (item._key, item._value)
def remove_min(self):
p = self._find_min()
item = self._data.delete(p)
return (item._key, item._value)
class SortedPriorityQueue(PriorityQueueBase):
def __init__(self):
self._data = PositionalList()
def __len__(self):
return len(self._data)
def add(self, key, value):
newest = self._Item(key, value)
walk = self._data.last()
while walk is not None and newest < walk.element():
walk = self._data.before(walk)
if walk is None:
self._data.add_first(newest)
else:
self._data.add_after(walk, newest)
def min(self):
if self.is_empty():
raise Empty('Priority Queue is Empty.')
p = self._data.first()
item = p.element()
return (item._key, item._value)
def remove_min(self):
if self.is_empty():
raise Empty('Priority Queue is Empty.')
item = self._data.delete(self._data.first())
return (item._key, item._value)
class HeapPriorityQueue(PriorityQueueBase):
def _parent(self, j):
return (j - 1) // 2
def _left(self, j):
return 2 * j + 1
def _right(self, j):
return 2 * j + 2
def _has_left(self, j):
return self._left(j) < len(self._data)
def _has_right(self, j):
return self._right(j) < len(self._data)
def _swap(self, i, j):
self._data[i], self._data[j] = self._data[j], self._data[i]
def _upheap(self, j):
parent = self._parent(j)
if j > 0 and self._data[j] < self._data[parent]:
self._swap(j, parent)
self._upheap(parent)
def _downheap(self, j):
if self._has_left(j):
left = self._left(j)
small_child = left
if self._has_right(j):
right = self._right(j)
if self._data[right] < self._data[left]:
small_child = right
if self._data[j] > self._data[small_child]:
self._swap(j, small_child)
self._downheap(small_child)
def __init__(self, contents=()):
self._data = [self._Item(k, v) for k, v in contents]
if len(self._data) > 1:
self._heapify()
def __len__(self):
return len(self._data)
def add(self, key, value):
self._data.append(self._Item(key, value))
self._upheap(len(self._data) - 1)
def min(self):
if self.is_empty():
raise Empty('Priority queue is empty')
item = self._data[0]
return (item._key, item._value)
def remove_min(self):
if self.is_empty():
raise Empty('Priority queue is empty')
self._swap(0, len(self._data) - 1)
item = self._data.pop()
self._downheap(0)
return (item._key, item._value)
def _heapify(self):
start = self._parent(len(self) - 1)
for j in range(start, -1, -1):
self._downheap(j)
def pq_sort(C):
n = len(C)
P = HeapPriorityQueue()
for j in range(n):
element = C.delete(C.first())
P.add(element, element)
for k in range(n):
(k, v) = P.remove_min()
C.add_last(v)
class AdaptableHeapPriorityQueue(HeapPriorityQueue):
class Locator(HeapPriorityQueue._Item):
__slots__ = '_index'
def __init__(self, k, v, j):
super().__init__(k, v)
self._index = j
def _swap(self, i, j):
super()._swap(i, j)
self._data[i]._index = i
self._data[j]._index = j
def _bubble(self, j):
if j > 0 and self._data[j] < self._data[self._parent(j)]:
self._upheap(j)
else:
self._downheap(j)
def add(self, key, value):
token = self.Locator(key, value, len(self._data))
self._data.append(token)
self._upheap(len(self._data) - 1)
return token
def update(self, loc, newkey, newval):
j = loc._index
if not (0 <= j < len(self) and self._data[j] is loc):
raise ValueError('Invalid Locator')
loc._key = newkey
loc._val = newval
self._bubble(j)
def remove(self, loc):
j = loc._index
if not (0 <= j < len(self) and self._data[j] is loc):
raise ValueError('Invalid Locator')
if j == len(self) - 1:
self._data.pop()
else:
self._swap(j, len(self) - 1)
self._data.pop()
self._bubble(j)
return (loc._key, loc._value)
|
[
"[email protected]"
] | |
dd52beff462a1f2913ede44aad3668e054509274
|
49743d1b594284c18af9370e8907bcc7e66443ca
|
/nhotel/jythonui/jythonuiserver/resources/packages/jsutil.py
|
38cb56cb12e8f032a34212d03f0679fa0b1d8825
|
[] |
no_license
|
stanislawbartkowski/javahotel
|
1a06ce1eecc8508787be8fbcce697471d0c8d2b3
|
13f65d0fda5238dc5cb944aaa90c7275ce186ef9
|
refs/heads/master
| 2021-01-25T07:21:48.960529 | 2017-07-31T10:55:43 | 2017-07-31T10:55:43 | 34,128,037 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,409 |
py
|
from com.google.gson import JsonObject
from com.google.gson import JsonParser
import cutil,con,miscutil
def toList(s,listid,dialname=None,listname=None) :
elem = JsonParser().parse(s)
object = elem.getAsJsonObject()
array = object.getAsJsonArray(listid)
list=[]
lform = None
if dialname != None : lform = miscutil.toListMap(dialname,listname)
for i in range (array.size()) :
ma = {}
o = array.get(i)
# print i,o
s = o.entrySet()
for e in s :
# print e,e.getKey(),e.getValue()
if e.getValue().isJsonNull() : val = None
else :
if lform != None : (ttype,after) = miscutil.getColumnDescr(lform,e.getKey())
p = o.getAsJsonPrimitive(e.getKey())
if p.isBoolean() : val = p.getAsBoolean()
elif p.isNumber() :
val = p.getAsDouble()
if lform != None :
if ttype == cutil.INT : val = int(val)
else :
val = p.getAsString()
if lform != None :
if ttype == cutil.DATE : val = con.StoDate(val)
if ttype == cutil.DATETIME : val = con.StoDate(val,True)
ma[e.getKey()] = val
list.append(ma)
return list
|
[
"[email protected]"
] | |
5ddf5e813713edbb4b9e1d05174fb7a38507ae8f
|
e50b8bc3e15c9480d9e433dd5ec41139345d6e2e
|
/zparkl_demo/apps/zparkl_demo/urls.py
|
adc693c842c065cb7312476dfa0ee241c9bbd35f
|
[] |
no_license
|
artminster/zparkl-demo
|
927bce1be24a9d906c8417dd6136c2f955be1ba6
|
8938d8c2feedd8ad9f38e8567578a26e18a98379
|
refs/heads/master
| 2020-06-06T15:06:39.400176 | 2013-12-20T02:40:51 | 2013-12-20T02:40:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 266 |
py
|
from django.conf.urls.defaults import patterns, include, url
from django.contrib.auth.decorators import login_required, permission_required
from django.conf import settings
from views import *
urlpatterns = patterns('',
url(r"^$", Home.as_view(), name="home"),
)
|
[
"[email protected]"
] | |
c989db8a045c7efe3de706c8ce506ed845b322b4
|
b9c84f50509e5d35c59a7978f3345966cd8828b1
|
/fyp/Model/migrations/0003_auto_20210205_1554.py
|
46c81463fb967b43f521e37e429c041c396ff1a9
|
[] |
no_license
|
FY-Zhang/FYP
|
b483615e9463f2afcda899299715914e2d57ae09
|
56284a7b1be1aa035bb33c102043035eb5c74a40
|
refs/heads/master
| 2023-04-18T10:32:29.141580 | 2021-04-28T09:14:38 | 2021-04-28T09:14:38 | 332,101,437 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 325 |
py
|
# Generated by Django 3.1.2 on 2021-02-05 07:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Model', '0002_auto_20210205_1551'),
]
operations = [
migrations.RenameModel(
old_name='User',
new_name='Users',
),
]
|
[
"[email protected]"
] | |
c8cf806d2df5d35fa3ff2f8cfe24065527558025
|
ec46cb07a709a04af186e06d810938a6e73a4ea4
|
/src/tests.py
|
041d3a277f758730ed786e8344f49d4e13f59930
|
[] |
no_license
|
nathanesau/BinaryTreeVisualizer
|
819fa795114e011ef111af1a156d4c7bde11736c
|
bf46e5cf07269a15c4a2976997c4d1e1c9b02a24
|
refs/heads/master
| 2020-08-28T18:00:17.514676 | 2019-10-26T22:32:26 | 2019-10-26T22:32:26 | 217,776,904 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 92 |
py
|
import unittest
from bst import BST, TestBST
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
ec13aae73282e9274cd0ebbdc680e4d555db294b
|
90b47d053812f54ebc62b94f996a44836d405085
|
/daily_charge.py
|
2b5765282995ed56f007c455ebc34283ef94b2a4
|
[] |
no_license
|
zeus911/aws_billing_monitor
|
be05301c8e20a146185d1e47aa19136e8c69b939
|
98965b591b7bfafe2791de9d13f68ebb3a82f6a3
|
refs/heads/master
| 2020-12-30T16:59:43.004161 | 2015-05-19T07:58:50 | 2015-05-19T07:58:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,708 |
py
|
#!/usr/bin/env python
import MySQLdb
import logging
import datetime
from datetime import timedelta
def Daily_charge(table):
#Now=datetime.date.today()
#Now=datetime.date.today() + timedelta(days=-5)
Yesterday=datetime.date.today() + timedelta(days=-1)
OneDayBefore=Yesterday + timedelta(days=-1)
conn = MySQLdb.connect(user='root', db='instance', passwd='', host='localhost')
cursor = conn.cursor()
sql1="select total_all from %s where date='%s'" % (table,OneDayBefore)
cursor.execute(sql1)
infos1=cursor.fetchall()[0][0]
# the charges of the day before yesterday
sql2="select total_all from %s where date='%s'" % (table,Yesterday)
cursor.execute(sql2)
infos2=cursor.fetchall()[0][0]
# the charges of yesterday
if str(Yesterday).split('-')[1] == str(OneDayBefore).split('-')[1]: #in the same month
print "ok"
Day_charge=infos2-infos1 # the difference of two numbers
sql3="update %s set total_today='%s' where date='%s'" % (table,Day_charge,Yesterday)
cursor.execute(sql3)
elif str(Yesterday).split('-')[2] == '01': # the first day of the month
print "ok 01"
Day_charge=infos2
sql4="update %s set total_today='%s' where date='%s'" % (table,Day_charge,Yesterday)
cursor.execute(sql4)
else :
print "not ok"
conn.commit()
cursor.close()
conn.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %Y-%m-%d %H:%M:%S',
filename='debug_Daily.log',
filemode='w')
Daily_charge('billing_info_account1') #your billing tables in the db
Daily_charge('billing_info_account2')
Daily_charge('billing_info_account3')
|
[
"[email protected]"
] | |
229e39097ba9ce78b622007287da9fc967f81220
|
b3c8dda0112573aa7d393781aa143d2fdd01443f
|
/41:Learning to Speak Object_Oriented/ex41-1debug.py
|
e2be5641f9801edda005800e839f827319f4ac5a
|
[] |
no_license
|
YukyCookie/learn-python-three-the-hard-way
|
34fbd58e379dba5dd4ca0feddf89b79c2d62891c
|
462bd850571ecf32c6eec2b5ee7bd0dc40b8a59f
|
refs/heads/master
| 2020-06-21T15:11:52.702279 | 2019-11-03T02:53:45 | 2019-11-03T02:53:45 | 197,488,523 | 5 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,064 |
py
|
import random
from urllib.request import urlopen
import sys
WORD_URL = "http://learncodethehardway.org/words.txt"
WORDS = []
PHRASES = {
"class %%%(%%%):":
"Make a class named %%% that is-a %%%.",
"class %%%(object):\n\tdef __init__(self, @@@)":
"class %%% has-a __init__ that takes self and *** params.",
"class %%%(object):\n\tdef *** (self, @@@)":
"class %%% has-a function *** that takes self and @@@ params.",
"*** = %%%()":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function, call it with params self, @@@.",
"***.*** = '***'":
"From *** get the *** attribute and set it to '***'."
}
# do they want to drill phrases first
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASE_FIRST = True
else:
PHRASE_FIRST = False
# load up the words from the website
for word in urlopen(WORD_URL).readlines():
WORDS.append(str(word.strip(), encoding="utf-8"))
snippets = list(PHRASES.keys())
print(">>>> snippets: ", snippets)
print("")
random.shuffle(snippets)
print(">>>> snippets/打乱顺序: ", snippets)
for snippet in snippets:
phrase = PHRASES[snippet]
print(">>>> snippet: ", snippet)
print(">>>> phrase: ", phrase)
print("")
class_names = [w.capitalize() for w in
random.sample(WORDS, snippet.count("%%%"))]
print(">>>> class_names: ", class_names)
other_names = random.sample(WORDS, snippet.count("***"))
print(">>>> other_names: ", other_names)
results = []
param_names = []
print(">>>> before param_names: ", param_names)
for i in range(0, snippet.count("@@@")):
param_count = random.randint(1,3)
print(">>>> 循环赋值前param_names: ", param_names)
param_names.append(', '.join(
random.sample(WORDS, param_count)))
print(">>>> 循环赋值后param_names: ", param_names)
print(">>>> after param_names: ", param_names)
for sentence in snippet, phrase:
print("")
print(">>>>>>>>>>>>>> sentence: ", sentence)
result = sentence[:]
print(">>>> result/sentence: ", result)
# fake class names
print(">>>> 替换%%%: ")
for word in class_names:
print(">>>> before result/classnames: ", result)
result = result.replace("%%%", word, 1)
print(">>>> after result/classnames: ", result)
# fake other names
print(">>>> 替换***: ")
for word in other_names:
print(">>>> before result/othernames: ", result)
result = result.replace("***", word, 1)
print(">>>> after result/othernames: ", result)
# fake parameter lists
print(">>>> 替换@@@: ")
for word in param_names:
print(">>>> before result/paramnames: ", result)
result = result.replace("@@@", word, 1)
print(">>>> after result/paramnames: ", result)
results.append(result)
print(">>>> 循环中results: ", results)
print("")
print(">>>> 循环后results: ", results)
question, answer = results
if PHRASE_FIRST:
question, answer = answer, question
print(question)
input("> ")
print(f"ANSWER: {answer}\n\n")
|
[
"[email protected]"
] | |
5f39f871c28451f45bdd8d8b12b694b82f7509cc
|
c40680fdb9fec4a40372a5b85103baae668c7493
|
/etrade/migrations/0008_paper_paper_reference_value.py
|
80f08f71c5e2611acafaac872be85086b2e2aa85
|
[] |
no_license
|
Henrique-Costardi/Sportstrader
|
32d70229dfdb58f6213a0892043524fbfe64fed8
|
7e09b2d3ddc11a8d68ec0c17b91ff3b944efa0bd
|
refs/heads/master
| 2021-01-20T14:16:31.874398 | 2017-05-08T03:28:13 | 2017-05-08T03:28:13 | 83,870,265 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 465 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-08 22:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('etrade', '0007_paper_last_transaction'),
]
operations = [
migrations.AddField(
model_name='paper',
name='paper_reference_value',
field=models.FloatField(default=1.0),
),
]
|
[
"[email protected]"
] | |
6117082d0bb54f9eb0eaf89afb5636e8fe4cab5f
|
f92f341079ed56cb3966a984bd1b107b28f04079
|
/ESP8266/ESP8266_SDK/tools/gen_appbin.py
|
d5629015685218fd9084c14646e79b042d48d004
|
[
"LicenseRef-scancode-warranty-disclaimer",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
FrauBluher/ShR2
|
6aeca9ef632076e6630f0c8e53da2e5ee4c38421
|
e23e5482c3695833eebf992e9047a2efa2f3076a
|
refs/heads/master
| 2023-01-05T16:17:05.921040 | 2015-06-10T23:35:28 | 2015-06-10T23:35:28 | 17,427,876 | 1 | 3 |
MIT
| 2022-12-26T20:04:52 | 2014-03-05T04:25:07 |
C++
|
UTF-8
|
Python
| false | false | 6,999 |
py
|
#!/usr/bin/python
#
# File : gen_appbin.py
# This file is part of Espressif's generate bin script.
# Copyright (C) 2013 - 2016, Espressif Systems
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of version 3 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""This file is part of Espressif's generate bin script.
argv[1] is elf file name
argv[2] is version num"""
import string
import sys
import os
import re
import binascii
import struct
TEXT_ADDRESS = 0x40100000
# app_entry = 0
# data_address = 0x3ffb0000
# data_end = 0x40000000
# text_end = 0x40120000
CHECKSUM_INIT = 0xEF
chk_sum = CHECKSUM_INIT
blocks = 0
def write_file(file_name,data):
if file_name is None:
print 'file_name cannot be none\n'
sys.exit(0)
fp = open(file_name,'ab')
if fp:
fp.seek(0,os.SEEK_END)
fp.write(data)
fp.close()
else:
print '%s write fail\n'%(file_name)
def combine_bin(file_name,dest_file_name,start_offset_addr,need_chk):
global chk_sum
global blocks
if dest_file_name is None:
print 'dest_file_name cannot be none\n'
sys.exit(0)
if file_name:
fp = open(file_name,'rb')
if fp:
########## write text ##########
fp.seek(0,os.SEEK_END)
data_len = fp.tell()
if data_len:
if need_chk:
tmp_len = (data_len + 3) & (~3)
else:
tmp_len = (data_len + 15) & (~15)
data_bin = struct.pack('<II',start_offset_addr,tmp_len)
write_file(dest_file_name,data_bin)
fp.seek(0,os.SEEK_SET)
data_bin = fp.read(data_len)
write_file(dest_file_name,data_bin)
if need_chk:
for loop in range(len(data_bin)):
chk_sum ^= ord(data_bin[loop])
# print '%s size is %d(0x%x),align 4 bytes,\nultimate size is %d(0x%x)'%(file_name,data_len,data_len,tmp_len,tmp_len)
tmp_len = tmp_len - data_len
if tmp_len:
data_str = ['00']*(tmp_len)
data_bin = binascii.a2b_hex(''.join(data_str))
write_file(dest_file_name,data_bin)
if need_chk:
for loop in range(len(data_bin)):
chk_sum ^= ord(data_bin[loop])
blocks = blocks + 1
fp.close()
else:
print '!!!Open %s fail!!!'%(file_name)
def gen_appbin():
global chk_sum
global blocks
if len(sys.argv) != 6:
print 'Usage: gen_appbin.py eagle.app.out boot_mode flash_mode flash_clk_div flash_size'
sys.exit(0)
elf_file = sys.argv[1]
boot_mode = sys.argv[2]
flash_mode = sys.argv[3]
flash_clk_div = sys.argv[4]
flash_size = sys.argv[5]
flash_data_line = 16
data_line_bits = 0xf
irom0text_bin_name = 'eagle.app.v6.irom0text.bin'
text_bin_name = 'eagle.app.v6.text.bin'
data_bin_name = 'eagle.app.v6.data.bin'
rodata_bin_name = 'eagle.app.v6.rodata.bin'
flash_bin_name ='eagle.app.flash.bin'
BIN_MAGIC_FLASH = 0xE9
BIN_MAGIC_IROM = 0xEA
data_str = ''
sum_size = 0
if os.getenv('COMPILE')=='gcc' :
cmd = 'xtensa-lx106-elf-nm -g ' + elf_file + ' > eagle.app.sym'
else :
cmd = 'xt-nm -g ' + elf_file + ' > eagle.app.sym'
os.system(cmd)
fp = file('./eagle.app.sym')
if fp is None:
print "open sym file error\n"
sys.exit(0)
lines = fp.readlines()
fp.close()
entry_addr = None
p = re.compile('(\w*)(\sT\s)(call_user_start)$')
for line in lines:
m = p.search(line)
if m != None:
entry_addr = m.group(1)
# print entry_addr
if entry_addr is None:
print 'no entry point!!'
sys.exit(0)
data_start_addr = '0'
p = re.compile('(\w*)(\sA\s)(_data_start)$')
for line in lines:
m = p.search(line)
if m != None:
data_start_addr = m.group(1)
# print data_start_addr
rodata_start_addr = '0'
p = re.compile('(\w*)(\sA\s)(_rodata_start)$')
for line in lines:
m = p.search(line)
if m != None:
rodata_start_addr = m.group(1)
# print rodata_start_addr
# write flash bin header
#============================
# SPI FLASH PARAMS
#-------------------
#flash_mode=
# 0: QIO
# 1: QOUT
# 2: DIO
# 3: DOUT
#-------------------
#flash_clk_div=
# 0 : 80m / 2
# 1 : 80m / 3
# 2 : 80m / 4
# 0xf: 80m / 1
#-------------------
#flash_size=
# 0 : 512 KB
# 1 : 256 KB
# 2 : 1024 KB
# 3 : 2048 KB
# 4 : 4096 KB
#-------------------
# END OF SPI FLASH PARAMS
#============================
byte2=int(flash_mode)&0xff
byte3=(((int(flash_size)<<4)| int(flash_clk_div))&0xff)
if boot_mode == '2':
# write irom bin head
data_bin = struct.pack('<BBBBI',BIN_MAGIC_IROM,4,byte2,byte3,long(entry_addr,16))
sum_size = len(data_bin)
write_file(flash_bin_name,data_bin)
# irom0.text.bin
combine_bin(irom0text_bin_name,flash_bin_name,0x0,0)
data_bin = struct.pack('<BBBBI',BIN_MAGIC_FLASH,3,byte2,byte3,long(entry_addr,16))
sum_size = len(data_bin)
write_file(flash_bin_name,data_bin)
# text.bin
combine_bin(text_bin_name,flash_bin_name,TEXT_ADDRESS,1)
# data.bin
if data_start_addr:
combine_bin(data_bin_name,flash_bin_name,long(data_start_addr,16),1)
# rodata.bin
combine_bin(rodata_bin_name,flash_bin_name,long(rodata_start_addr,16),1)
# write checksum header
sum_size = os.path.getsize(flash_bin_name) + 1
sum_size = flash_data_line - (data_line_bits&sum_size)
if sum_size:
data_str = ['00']*(sum_size)
data_bin = binascii.a2b_hex(''.join(data_str))
write_file(flash_bin_name,data_bin)
write_file(flash_bin_name,chr(chk_sum & 0xFF))
if boot_mode == '1':
sum_size = os.path.getsize(flash_bin_name)
data_str = ['FF']*(0x10000-sum_size)
data_bin = binascii.a2b_hex(''.join(data_str))
write_file(flash_bin_name,data_bin)
fp = open(irom0text_bin_name,'rb')
if fp:
data_bin = fp.read()
write_file(flash_bin_name,data_bin)
fp.close()
else :
print '!!!Open %s fail!!!'%(flash_bin_name)
sys.exit(0)
cmd = 'rm eagle.app.sym'
os.system(cmd)
if __name__=='__main__':
gen_appbin()
|
[
"[email protected]"
] | |
42d9e0ac5b905f545f05a2e2624b82db8a952a70
|
d0f802ff64e68e12b2b0fab84d4f13d047b3ff1e
|
/_11_1_Linear_Regression_Source.py
|
9f3d302b50bbf179b11ce7d1f7281b3e8140c1b8
|
[] |
no_license
|
joohongkeem/MachineLearning
|
592d977e1db775034fc841bf2306d48f9902df5c
|
0be89190b0d53a3a1f1801ae43202451b648550d
|
refs/heads/master
| 2020-03-22T21:25:59.824090 | 2018-07-23T00:20:28 | 2018-07-23T00:20:28 | 140,687,644 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,528 |
py
|
# Run module(Hot key : F5)
print("-------------------------------------------------")
print("# 20180712","Made by joohongkeem#".rjust(38),sep=' ',end='\n')
print("-------------------------------------------------")
# 선형 회귀 분석 예시1 (최소 제곱법 - 1)
#
import matplotlib.pyplot as plt
import numpy as np
def predict(x): # 예측값을 구하는 함수
return w0 + w1 * x
sample_data = [[10, 25], [20, 45], [30, 65], [50, 105]] # W0, W1을 구하기 위한 Data
X_train = []
y_train = []
X_train_a = [] # Matplot 으로 그림을 그리기 위한 x축 좌표들
y_train_a = [] # Matplot 으로 그림을 그리기 위한 y축 좌표들
total_size = 0 # Sample Data의 총 개수(n)
sum_xy = 0 # Σ(x*y)
sum_x = 0 # Σ(x)
sum_y = 0 # Σ(y)
sum_x_square = 0 # Σ(x^2)
for row in sample_data: # row는 [10,25]->[20,45]->[30,65]->[50,105] 순서로 돈다
X_train = row[0]
y_train = row[1]
X_train_a.append(row[0])
y_train_a.append(row[1])
sum_xy += X_train * y_train
sum_x += X_train
sum_y += y_train
sum_x_square += X_train * X_train
total_size += 1
w1 = (total_size * sum_xy - sum_x * sum_y) / (total_size * sum_x_square - sum_x * sum_x)
w0 = (sum_x_square * sum_y - sum_xy * sum_x) / (total_size * sum_x_square - sum_x * sum_x)
X_test = 40
y_predict = predict(X_test)
print("가중치: ", w1)
print("상수 : ", w0)
print("예상 값 :", " x 값 :", X_test, " y_predict :", y_predict)
# 그래프 그려보기
#
x_new = np.arange(0,51) # 직선을 그리기 위한 0부터 50까지의 x data
y_new = predict(x_new) # 위의 x데이터를 대입한 예측 y결과
# >> 직선이 모든 점을 지나는지 확인할 수 있다!
plt.scatter(X_train_a, y_train_a, label = "data") # 점을 찍는다!!
plt.scatter(X_test, y_predict, label="predict")
plt.plot(x_new, y_new,'r-', label = "regression") # 그래프를 그린다!!
plt.xlabel("House Size")
plt.ylabel("House Price")
plt.title("Linear Regression")
plt.legend() # Data의 종류 표시 (data는 파란색, predict는 주황색)
plt.show()
y_predict
print("-------------------------------------------------")
# 선형 회귀 분석 예시2 (numpy 기반의 행렬 연산)
#
import matplotlib.pyplot as plt
import statsmodels.api as sm
import numpy as np
def predict(x):
return w0 + w1*x
X1 =np.array([ [10], [20],[30], [50]])
# [[10]
# [20]
# [30]
# [50]]
y_label =np.array([ [25], [45],[65], [105] ])
X_train = sm.add_constant(X1) # 오그멘테이션
# X_train 출력하면 일캐나온다.
# [[ 1. 10.]
# [ 1. 20.]
# [ 1. 30.]
# [ 1. 50.]]
w = np.dot(np.dot(np.linalg.inv(np.dot(X_train.T, X_train)), X_train.T), y_label)
print('w',w,sep='\n')
# 2 * 4 행렬 . 4*1 행렬 --> \ : 2 * 1 행렬
w0 = w[0]
w1 = w[1]
X_test = 40
y_predict = predict(X_test)
print("가중치: ", w1)
print("상수 : ", w0)
print("예상 값 :", " x 값 :", X_test, " y_predict :", y_predict)
print("-------------------------------------------------")
# 선형 회귀 분석 예시3 (scikit_learn 라이브러리 사용)
#
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
# 1. 모델 객체 생성 : 모델 클래스를 선언하여 모델 객체를 생성
model = LinearRegression(fit_intercept=True) # 상수항이 있으면
# 2. 학습 : 데이터셋 X, 타겟값 y를 입력으로 받아
# fit(X,y) 함수를 이용하여 생성된 모델 객체를 학습시킨다
X_train =np.array([ [10], [20],[30], [50]])
y_train =np.array([ [25], [45],[65], [105] ])
model.fit(X_train, y_train)
# 3. 예측 : 하나 혹은 복수의 데이터 X를 입력받아 학습시킨 모델 객체를 이용하여
# predict(X) 함수로 타겟값 y를 예측한다.
X_test = 40
y_predict = model.predict(X_test) # y_predict : 예측값을 넣었을 때의 결과값
y_pred = model.predict(X_train) # y_pred : Sample Data를 넣었을 때의 결과값
# mean_squared_error(predictions, targets)
# Sample Data를 넣었을 때 결과값과 실제 y_train을 비교한다.
mse = mean_squared_error(y_pred, y_train)
print(mse)
print("가중치: ", model.coef_)
print("상수 : ", model.intercept_)
print("예상 값 :", " x 값 :", X_test, " y_predict :", y_predict)
print("-------------------------------------------------")
# 선형 회귀 분석 예시4 (Random 데이터를 통한 분석)
#
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
import matplotlib.pyplot as plt
import numpy as np
# scikit_learn 에서 제공하는 데이터 제공 함수
X_train, y_train, coef = \
make_regression(n_samples=50, n_features=1, bias=50, noise=20,coef=True, random_state=1)
# 입력
# n_samples = 50 >> 표본 데이터의 갯수는 50개
# n_features = 1 >> 독립변수(feature)의 차원은 1
# n_targets = default >> 종속변수(taget)의 차원은 1(default)
# bias = 50 >> y절편은 50
# noise = 20 >> 종속변수(출력)에 더해지는 오차의 표준편차
# coef = True >> 선형 모형의 계수도 출력
# random_state = 1 >> 난수 발생용 시드값
#
# 출력
# X : [n_samples, n_features] 형상의 2차원 배열 & 독립변수의 표본 데이터 행렬
# y : [n_samples] 형상의 1차원 배열 또는 [n_samples, n_targets] 형상의 2차원 배열
# & 종속 변수의 표본 데이터 벡터 y
# coef : [n_features] 형상의 1차원 배열 또는 [n_features, n_targets] 형상의 2차원 배열
# >> 선형 모형의 계수 벡터 w
model = LinearRegression(fit_intercept=True) # 상수항이 있으면
model.fit(X_train, y_train)
# 선형회귀 직선을 작성하기 위해 데이터 생성
# X_train 데이터의 최대, 최소 값 사이를 100의 데이터로 구분한다.
x_new = np.linspace(np.min(X_train), np.max(X_train), 100)
# y = linspace(x1,x2)는 x1과 x2 사이에서 균일한 간격의 점 100개로 구성된 행 벡터를 반환합니다.
# y = linspace(x1,x2,n)은 n개의 점을 생성합니다. 점 사이의 간격은 (x2-x1)/(n-1)입니다.
# default = 50
# 1행 N열의 데이터를 N행 1열로 reshape
X_new = x_new.reshape(-1, 1)
# 그래프를 그리기 위한 y 예측 값 --> 직선을 그리기 위한 x 값과 그에 따른 y 값 정의
y_predict = model.predict(X_new)
# 예측 값
y_pred = model.predict(X_train)
mse = mean_squared_error(y_train, y_pred)
print('mse =',mse)
# 그래프 그려보기
plt.scatter(X_train, y_train, c='r', label="data")
plt.plot(X_new, y_predict, 'g-', label="regression")
plt.xlabel("x")
plt.ylabel("y")
plt.title("Linear Regression")
plt.legend()
plt.show()
|
[
"[email protected]"
] | |
fb7b8c96b8ddf267dc54ca2f53581f447373a03a
|
bea753dd89c38df2611bb300f460eaa2cbb82e6a
|
/watcher/common/policies/data_model.py
|
768240d239a15bb3f47c14abe0824b75162cafbc
|
[
"Apache-2.0",
"CC-BY-3.0"
] |
permissive
|
openstack/watcher
|
c28bd146b4239fc187f34958c4295cd9df76a12a
|
1e11c490a7ec300974c589dff804858cdeb7860c
|
refs/heads/master
| 2023-09-01T18:12:40.545788 | 2023-08-29T11:21:46 | 2023-08-29T11:21:46 | 35,901,539 | 69 | 39 |
Apache-2.0
| 2018-05-11T15:29:38 | 2015-05-19T18:46:11 |
Python
|
UTF-8
|
Python
| false | false | 1,052 |
py
|
# Copyright 2019 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from watcher.common.policies import base
DATA_MODEL = 'data_model:%s'
rules = [
policy.DocumentedRuleDefault(
name=DATA_MODEL % 'get_all',
check_str=base.RULE_ADMIN_API,
description='List data model.',
operations=[
{
'path': '/v1/data_model',
'method': 'GET'
}
]
),
]
def list_rules():
return rules
|
[
"[email protected]"
] | |
2ca7726a97e24168ecf4147fb619ac3d3540182e
|
d1808d8cc5138489667b7845466f9c573591d372
|
/notebooks/Reproducible Papers/Syngine_2016/figure_2_source_width.py
|
7eb1deaeb1cbee060358396def82df02fcfa286e
|
[] |
no_license
|
krischer/seismo_live
|
e140777900f6246a677bc28b6e68f0a168ec41ab
|
fcc615aee965bc297e8d53da5692abb2ecd6fd0c
|
refs/heads/master
| 2021-10-20T22:17:42.276096 | 2019-11-27T23:21:16 | 2019-11-28T10:44:21 | 44,953,995 | 69 | 59 | null | 2020-05-22T11:00:52 | 2015-10-26T08:00:42 |
Python
|
UTF-8
|
Python
| false | false | 5,880 |
py
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + {"deletable": true, "editable": true, "cell_type": "markdown"}
# <div style='background-image: url("../../share/images/header.svg") ; padding: 0px ; background-size: cover ; border-radius: 5px ; height: 250px'>
# <div style="float: right ; margin: 50px ; padding: 20px ; background: rgba(255 , 255 , 255 , 0.7) ; width: 50% ; height: 150px">
# <div style="position: relative ; top: 50% ; transform: translatey(-50%)">
# <div style="font-size: xx-large ; font-weight: 900 ; color: rgba(0 , 0 , 0 , 0.8) ; line-height: 100%">Computational Seismology</div>
# <div style="font-size: large ; padding-top: 20px ; color: rgba(0 , 0 , 0 , 0.5)">Reproducible Papers - Syngine Paper</div>
# </div>
# </div>
# </div>
# + {"deletable": true, "editable": true, "cell_type": "markdown"}
# ---
#
# # Figure 2: Source Width Parameter
#
# This notebook is part of the supplementary materials for the Syngine paper and reproduces figure 2.
#
# Requires matplotlib >= 1.5 and an ObsPy version with the syngine client (>= 1.0) as well as instaseis.
#
# ##### Authors:
# * Lion Krischer ([@krischer](https://github.com/krischer))
# + {"deletable": true, "editable": true}
# %matplotlib inline
import obspy
import matplotlib.pyplot as plt
import numpy as np
plt.style.use("seaborn-whitegrid")
import copy
import io
import instaseis
import json
import requests
# + {"deletable": true, "editable": true}
SYNGINE_URL = "http://service.iris.edu/irisws/syngine/1/query"
# + {"deletable": true, "editable": true}
network = "IU"
station = "ANMO"
# Get station information from the IRIS FDSN service.
from obspy.clients.fdsn import Client
c = Client("IRIS")
print(c.get_stations(network=network, station=station, format="text")[0][0])
# + {"deletable": true, "editable": true}
# The param file is only used to extract the source parameters. This is
# thus consistent with the other figures but can of course also be done
# differently.
filename = "chile_param.txt"
# Parse the finite source wiht instaseis.
finite_source = instaseis.FiniteSource.from_usgs_param_file(filename)
# Compute the centroid of it.
finite_source.compute_centroid()
# src is now the centroid of the finite source.
src = finite_source.CMT
# Common query parametersh su
params_common = {
# IU.ANMO
"receiverlatitude": 34.95,
"receiverlongitude": -106.46,
"dt": 0.1,
"origintime": src.origin_time,
"components": "Z",
"model": "ak135f_2s",
"format": "miniseed",
"units": "velocity"}
# Parameters only needed for the point source.
params_ps = copy.deepcopy(params_common)
params_ps["sourcelatitude"] = src.latitude
params_ps["sourcelongitude"] = src.longitude
params_ps["sourcedepthinmeters"] = src.depth_in_m
params_ps["sourcemomenttensor"] = ",".join(
str(getattr(src, _i)) for _i in ("m_rr", "m_tt", "m_pp", "m_rt", "m_rp", "m_tp"))
print(finite_source)
print(finite_source.CMT)
# + {"deletable": true, "editable": true}
import copy
import collections
seis = collections.OrderedDict()
source_widths = [2.5, 5, 10, 25, 50, 100]
# Request one seismogram for each source with.
for sw in source_widths:
p = copy.deepcopy(params_ps)
# The sourcewidth parameter steers the width of the STF.
p["sourcewidth"] = sw
# Send it alongside.
r = requests.get(url=SYNGINE_URL, params=p)
assert r.ok, str(r.reason)
# Get the data and parse it as an ObsPy object.
with io.BytesIO(r.content) as f:
tr = obspy.read(f)[0]
seis[sw] = tr
# Plot only some phases.
tr.slice(tr.stats.starttime + 1000, tr.stats.starttime + 1500).plot()
# + {"deletable": true, "editable": true}
import matplotlib.gridspec as gridspec
# Plotting setup.
fig = plt.figure(figsize=(10, 3))
gs1 = gridspec.GridSpec(1, 1, wspace=0, hspace=0, left=0.05,
right=0.62, bottom=0.14, top=0.99)
ax1 = fig.add_subplot(gs1[0])
gs2 = gridspec.GridSpec(1, 1, wspace=0, hspace=0, left=0.65,
right=0.94, bottom=0.14, top=0.99)
ax2 = fig.add_subplot(gs2[0])
plt.sca(ax1)
# Now plot all the seismograms.
for _i, (sw, tr) in enumerate(seis.items()):
tr.normalize()
plt.plot(tr.times(), 2.0 * tr.data - _i * 3, color="0.1")
plt.legend()
plt.xlim(0, 2000)
plt.yticks([0, -3, -6, -9, -12, -15], [str(_i) for _i in source_widths])
plt.ylim(-17, 2)
plt.xlabel("Time since event origin [sec]")
plt.ylabel("Source width [sec]")
plt.sca(ax2)
# Use an internal instaseis function to get the used STF.
from instaseis.server.util import get_gaussian_source_time_function
dt = 0.01
# Plot all the source time functions.
for _i, sw in enumerate(source_widths):
sr = get_gaussian_source_time_function(sw, dt)[1]
#sr = np.concatenate([sr2, np.zeros(1000)])
alpha = 0.4 - _i * 0.4 / len(source_widths)
plt.fill_between(np.arange(len(sr)) * dt - sw, sr, color="0.0", alpha=alpha, linewidth=0)
if sw == 25:
plt.plot(np.arange(len(sr)) * dt - sw, sr, color="0.0", lw=2)
ax2.annotate('25 sec', xy=(5, 0.07), xytext=(8, 0.10),
arrowprops=dict(facecolor='black', shrink=0.05))
plt.grid(True)
plt.xlim(-20, 20)
plt.ylim(-0.0005, 0.16)
plt.xticks([-10, 0, 10])
plt.yticks([0, 0.04, 0.08, 0.12])
plt.xlabel("Time [sec]")
plt.ylabel("Slip rate [m/sec]")
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax2.yaxis.set_tick_params(length=2)
ax2.yaxis.set_tick_params(pad=4)
ax2.xaxis.set_tick_params(length=2)
ax2.xaxis.set_tick_params(pad=4)
ax2.xaxis.set_tick_params(color="#CCCCCC")
ax2.yaxis.set_tick_params(color="#CCCCCC")
plt.savefig("source_width.pdf")
|
[
"[email protected]"
] | |
759aec4daa0e5dfea558e0ed071b43b9ea35f424
|
fae6b5956dd9b9982f81fa00307e95144c7f1415
|
/user/migrations/0005_friendrequest_sender_name.py
|
656b97881f8a18df0fb91bfb9d94321b70404726
|
[] |
no_license
|
BohdanDziadyk/socialNetworkAPI
|
94abc7168fa654a87930d9b02d5aa51fe84dbfae
|
77d91f3b5dff40635044354b465081b84f778cbf
|
refs/heads/master
| 2023-03-19T07:54:07.336733 | 2021-03-09T15:17:59 | 2021-03-09T15:17:59 | 309,443,227 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 405 |
py
|
# Generated by Django 3.1.3 on 2021-02-04 14:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0004_auto_20210201_1403'),
]
operations = [
migrations.AddField(
model_name='friendrequest',
name='sender_name',
field=models.CharField(blank=True, max_length=20),
),
]
|
[
"[email protected]"
] | |
1fb5bfb391c21ca3c77f093c94956dfbc3c708eb
|
11336266140cabb0063623105cf7b1e3962e7ba4
|
/scraping_scripts/get_metadata.py
|
b01bc77717f7729da537c093068a92832bd9e4a0
|
[] |
no_license
|
mugak/tweet-sentiment-analyzer
|
6d98a3300b359a840154ef7a3152043652c4630f
|
c164f6774add022049d3f1b7ddbd88cb18ca94a2
|
refs/heads/master
| 2020-05-30T13:34:14.046816 | 2019-10-08T02:03:20 | 2019-10-08T02:03:20 | 189,764,637 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,780 |
py
|
import tweepy
import json
import math
import glob
import csv
import zipfile
import zlib
from tweepy import TweepError
from time import sleep
# CHANGE THIS TO THE USER YOU WANT
user = 'elonmusk'
with open('api_keys.json') as f:
keys = json.load(f)
auth = tweepy.OAuthHandler(keys['consumer_key'], keys['consumer_secret'])
auth.set_access_token(keys['access_token'], keys['access_token_secret'])
api = tweepy.API(auth)
user = user.lower()
output_file = '{}.json'.format(user)
output_file_short = '{}_short.json'.format(user)
compression = zipfile.ZIP_DEFLATED
with open('all_ids.json') as f:
ids = json.load(f)
print('total ids: {}'.format(len(ids)))
all_data = []
start = 0
end = 100
limit = len(ids)
i = math.ceil(limit / 100)
for go in range(i):
print('currently getting {} - {}'.format(start, end))
sleep(6) # needed to prevent hitting API rate limit
id_batch = ids[start:end]
start += 100
end += 100
tweets = api.statuses_lookup(id_batch)
for tweet in tweets:
all_data.append(dict(tweet._json))
print('metadata collection complete')
print('creating master json file')
with open(output_file, 'w') as outfile:
json.dump(all_data, outfile)
print('creating ziped master json file')
zf = zipfile.ZipFile('{}.zip'.format(user), mode='w')
zf.write(output_file, compress_type=compression)
zf.close()
results = []
def is_retweet(entry):
return 'retweeted_status' in entry.keys()
def get_source(entry):
if '<' in entry["source"]:
return entry["source"].split('>')[1].split('<')[0]
else:
return entry["source"]
with open(output_file) as json_data:
data = json.load(json_data)
for entry in data:
t = {
"created_at": entry["created_at"],
"text": entry["text"],
"in_reply_to_screen_name": entry["in_reply_to_screen_name"],
"retweet_count": entry["retweet_count"],
"favorite_count": entry["favorite_count"],
"source": get_source(entry),
"id_str": entry["id_str"],
"is_retweet": is_retweet(entry)
}
results.append(t)
print('creating minimized json master file')
with open(output_file_short, 'w') as outfile:
json.dump(results, outfile)
with open(output_file_short) as master_file:
data = json.load(master_file)
fields = ["favorite_count", "source", "text", "in_reply_to_screen_name", "is_retweet", "created_at", "retweet_count", "id_str"]
print('creating CSV version of minimized json master file')
f = csv.writer(open('{}.csv'.format(user), 'w'))
f.writerow(fields)
for x in data:
f.writerow([x["favorite_count"], x["source"], x["text"], x["in_reply_to_screen_name"], x["is_retweet"], x["created_at"], x["retweet_count"], x["id_str"]])
|
[
"[email protected]"
] | |
caf3885eb69dffefab2740e33c5b3de38ac0c871
|
a7153a124669a93c611fe64a61e4efac96b61d7d
|
/tool/test_singleidcard_split_logo.py
|
0a5be9e871dfcde33f711996dcdfc50149773e12
|
[
"MIT"
] |
permissive
|
hks5201106166/segmentation_models.pytorch
|
a3c95792fa82c34bd59b13f8488927452a55d7db
|
440a53dd9520378f7f2225a2a81eee6f5a67cc00
|
refs/heads/master
| 2022-11-15T22:34:54.938859 | 2020-07-09T08:41:04 | 2020-07-09T08:41:04 | 271,442,584 | 0 | 0 | null | 2020-06-11T03:28:07 | 2020-06-11T03:28:07 | null |
UTF-8
|
Python
| false | false | 7,210 |
py
|
#-*-coding:utf-8-*-
#-*-coding:utf-8-*-
#-*-coding:utf-8-*-
#-*-coding:utf-8-*-
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import numpy as np
import cv2
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from torch.utils.data import Dataset as BaseDataset
import torch
import numpy as np
import segmentation_models_pytorch as smp
import albumentations as albu
import time
import matplotlib.pyplot as plt
class Dataset(BaseDataset):
"""CamVid Dataset. Read images, apply augmentation and preprocessing transformations.
Args:
images_dir (str): path to images folder
masks_dir (str): path to segmentation masks folder
class_values (list): values of classes to extract from segmentation mask
augmentation (albumentations.Compose): data transfromation pipeline
(e.g. flip, scale, etc.)
preprocessing (albumentations.Compose): data preprocessing
(e.g. noralization, shape manipulation, etc.)
"""
CLASSES = ['background', 'id', 'id_reverse']
def __init__(
self,
images_dir,
masks_dir,
classes=None,
augmentation=None,
preprocessing=None,
):
self.ids = os.listdir(images_dir)
self.images_fps = [os.path.join(images_dir, image_id) for image_id in self.ids]
self.masks_fps = [os.path.join(masks_dir, image_id) for image_id in self.ids]
# convert str names to class values on masks
self.class_values = [self.CLASSES.index(cls.lower()) for cls in classes]
self.augmentation = augmentation
self.preprocessing = preprocessing
def __getitem__(self, i):
# read data
image = cv2.imread(self.images_fps[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(self.masks_fps[i], 0)
# extract certain classes from mask (e.g. cars)
masks = [(mask == v) for v in self.class_values]
mask = np.stack(masks, axis=-1).astype('float')
# apply augmentations
if self.augmentation:
sample = self.augmentation(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
# apply preprocessing
if self.preprocessing:
sample = self.preprocessing(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
return image, mask
def __len__(self):
return len(self.ids)
# DATA_DIR = './data/CamVid/'
#
# # load repo with data if it is not exists
# if not os.path.exists(DATA_DIR):
# print('Loading data...')
# os.system('git clone https://github.com/alexgkendall/SegNet-Tutorial ./data')
# print('Done!')
# helper function for data visualization
def visualize(**images):
"""PLot images in one row."""
n = len(images)
plt.figure(figsize=(16, 5))
for i, (name, image) in enumerate(images.items()):
plt.subplot(1, n, i + 1)
plt.xticks([])
plt.yticks([])
plt.title(' '.join(name.split('_')).title())
plt.imshow(image)
plt.show()
def get_validation_augmentation():
"""Add paddings to make image shape divisible by 32"""
test_transform = [
albu.Resize(512, 512)
]
return albu.Compose(test_transform)
def to_tensor(x, **kwargs):
return x.transpose(2, 0, 1).astype('float32')
def get_preprocessing(preprocessing_fn):
"""Construct preprocessing transform
Args:
preprocessing_fn (callbale): data normalization function
(can be specific for each pretrained neural network)
Return:
transform: albumentations.Compose
"""
_transform = [
albu.Lambda(image=preprocessing_fn),
albu.Lambda(image=to_tensor),
]
return albu.Compose(_transform)
# same image with different random transforms
ENCODER = 'resnet18'
ENCODER_WEIGHTS = 'imagenet'
CLASSES = ['background','id','id_reverse']
ACTIVATION = 'softmax2d' # could be None for logits or 'softmax2d' for multicalss segmentation
DEVICE = 'cuda'
# create segmentation model with pretrained encoder
model = smp.FPN(
encoder_name=ENCODER,
encoder_weights=ENCODER_WEIGHTS,
classes=len(CLASSES),
activation=ACTIVATION,
)
preprocessing_fn = smp.encoders.get_preprocessing_fn(ENCODER, ENCODER_WEIGHTS)
loss = smp.utils.losses.DiceLoss()
metrics = [
smp.utils.metrics.IoU(threshold=0.5),
]
optimizer = torch.optim.Adam([
dict(params=model.parameters(), lr=0.0001),
])
# load best saved checkpoint
best_model = torch.load('/home/simple/mydemo/ocr_project/segment/segmentation_models.pytorch/best_model.pth').cuda()
# create test dataset
path='/home/simple//mydemo/ocr_project/segment/data/segmet_logo/remove_logo_and_aug_image3/train'
train_or_test='/'
# path='/home/simple/mydemo/segmentation_models_mulclass/'
# train_or_test='error_data/'
# images_name=os.listdir(path+train_or_test)
images_name=os.listdir('/home/simple/mydemo/ocr_project/segment/data/segmet_logo/remove_logo_and_aug_image3/train/')
for index,image_name in enumerate(images_name):
print(index)
#n = np.random.choice(len(test_dataset))
#image_vis = test_dataset_vis[n][0].astype('uint8')
image = cv2.imread('/home/simple/mydemo/ocr_project/segment/data/segmet_logo/remove_logo_and_aug_image3/train/'+image_name)
#print(image_name)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image1 = image[:, 449:898, :]
image = image[:, 0:449, :]
# cv2.imshow('mask', image)
# cv2.waitKey(0)
transform=get_validation_augmentation()
image_resize=transform(image=image)['image']
preprocessing=get_preprocessing(preprocessing_fn)
image_cuda=preprocessing(image=image_resize)['image']
#gt_mask = gt_mask.squeeze().transpose((1,2,0))[:,:,1]
x_tensor = torch.from_numpy(image_cuda).to(DEVICE).unsqueeze(0)
t1=time.clock()
pr_mask = best_model.predict(x_tensor)
y_logo_detection = torch.nn.Softmax2d()(pr_mask)
logo_mask = y_logo_detection.cpu().numpy()
logo_mask = np.uint8(np.argmax(logo_mask, axis=1)[0]*255)
logo_mask = cv2.resize(logo_mask, dsize=(image.shape[1],image.shape[0]))
kernel = np.ones((10, 10), np.uint8)
logo_mask = cv2.erode(logo_mask, kernel=np.ones((5, 5), np.uint8))
logo_mask = cv2.dilate(logo_mask, kernel=np.ones((10, 10), np.uint8))
contours, hierarchy = cv2.findContours(logo_mask.astype('uint8'), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
ls = []
for contour in contours:
l = cv2.contourArea(contour)
ls.append(l)
index_max = np.argmax(ls)
x, y, w, h = cv2.boundingRect(contours[index_max])
# cv2.rectangle(image, (x, y), (x + w, y + h), (255, 255, 255), 2)
image_roi=image[y:y+h,x:x+w]
image1_roi=image1[y:y+h,x:x+w]
train_image=np.hstack([image1_roi,image_roi])
cv2.imwrite('/home/simple/mydemo/ocr_project/segment/data/segmet_logo/data_remove_the_logo/'+image_name,train_image)
# cv2.imshow('mask',train_image)
# cv2.waitKey(1000)
t2=time.clock()
#print(t2-t1)
# visualize(
# image=image_vis,
# ground_truth_mask=gt_mask,
# predicted_mask=mask
# )
|
[
"[email protected]"
] | |
3abcc4770b5d3213f9bbe698c4fd2bd2e30bc2df
|
015ce35e6344d1726173594ae509dfc1ca6f856d
|
/3-OOP and DSA/4-Recursion/Study/5-fibonichi.py
|
cd8fcc970c153783d338b2223d11fd4aeb930ddb
|
[] |
no_license
|
ayman-elkassas/Python-Notebooks
|
4af80df75c15a6ac3049450b3920d500fef0e581
|
26a8265f458c40ac22965d55722f32a650851683
|
refs/heads/master
| 2023-04-03T19:12:17.707673 | 2021-04-10T21:32:37 | 2021-04-10T21:32:37 | 356,699,690 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 223 |
py
|
# Fn
# = F
# n−2 + Fn−1 for n > 1.
# import gzip
# gzip.GzipFile.readline(r"C:\Users\Ayman Elkassas\Desktop\dump.txt",)
def fib(n):
if n<=1:
return n
else:
return fib(n-1)+fib(n-2)
print(fib(5))
|
[
"[email protected]"
] | |
63a6cc1020a6c44e7812410db00dc5bd93090c4c
|
50573423a9bc4034cdded596032a42ede5cdb458
|
/Derivacao Numerica.py
|
f84993c3ec78a44c4bed40407c5a4ce2788088e8
|
[
"MIT"
] |
permissive
|
gabrieltardochi/numerical-algorithms
|
1bf5c9d9280d81a23dac875745016de30bfae5f2
|
2ae95f25d1aca76082ad1df9e06cc2d168870acc
|
refs/heads/main
| 2023-03-13T19:03:27.204155 | 2021-03-14T18:42:32 | 2021-03-14T18:42:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,786 |
py
|
import math
def getFlt(flt):
flt = str(flt)
was_neg = False
if not ("e" in flt):
return flt[:10]
if flt.startswith('-'):
flt = flt[1:]
was_neg = True
str_vals = str(flt).split('e')
coef = float(str_vals[0])
exp = int(str_vals[1])
return_val = ''
if int(exp) > 0:
return_val += str(coef).replace('.', '')
return_val += ''.join(['0' for _ in range(0, abs(exp - len(str(coef).split('.')[1])))])
elif int(exp) < 0:
return_val += '0.'
return_val += ''.join(['0' for _ in range(0, abs(exp) - 1)])
return_val += str(coef).replace('.', '')
if was_neg:
return_val='-'+return_val
return return_val[:11]
def f(x):
return pow(x, 2) - math.log(x)
def drv_real(x):
return (2*x)-(1/x)
def calcularad(hs,x0):
print("adiantada")
i = 6
for h in hs:
if i == 6:
print(" h 3f(x0) 4f(x0+h) f(x0+2*h) f'*(x0) f'(x0) erro")
for k in range(i):
print(" ", end='')
i -= 1
parte1 = 3*f(x0)
parte2 = 4*f(x0+h)
parte3 = f(x0 + 2*h)
flinha_calc = (1/(2*h)) * ((parte2)-(parte1)-(parte3))
flinha_real = drv_real(x0)
erro = abs(flinha_real-flinha_calc)/abs(flinha_real)
print(h,": ",getFlt(parte1),getFlt(parte2),getFlt(parte3),getFlt(flinha_calc),getFlt(flinha_real),getFlt(erro))
print()
def calcularatr(hs,x0):
print("atrasada")
i = 6
for h in hs:
if i == 6:
print(" h f(x0-2h) 4f(x0-h) 3f(x0) f'*(x0) f'(x0) erro")
for k in range(i):
print(" ", end='')
i -= 1
h = -1*h
parte1 = f(x0-2*h)
parte2 = 4 * f(x0 - h)
parte3 = 3 * f(x0)
flinha_calc = (1 / (2 * h)) * ((parte1) + (-1*parte2) + (parte3))
flinha_real = drv_real(x0)
erro = abs(flinha_real - flinha_calc) / abs(flinha_real)
print(h, ": ", getFlt(parte1), getFlt(parte2), getFlt(parte3), getFlt(flinha_calc), getFlt(flinha_real), getFlt(erro))
print()
def calcularcent(hs,x0):
print("central")
i = 6
for h in hs:
if i == 6:
print(" h f(x0+h) f(x0-h) f'*(x0) f'(x0) erro")
for k in range(i):
print(" ", end='')
i -= 1
parte1 = f(x0+h)
parte2 = f(x0-h)
flinha_calc = (1/(2*h)) * ((parte1)+(-1*parte2))
flinha_real = drv_real(x0)
erro = abs(flinha_real-flinha_calc)/abs(flinha_real)
print(h,": ",getFlt(parte1),getFlt(parte2),getFlt(flinha_calc),getFlt(flinha_real),getFlt(erro))
print()
h_list = [0.1,0.01,0.001,0.0001]
xzero = 1
calcularad(h_list,xzero)
calcularatr(h_list,xzero)
calcularcent(h_list,xzero)
|
[
"[email protected]"
] | |
5d91b705be958d470d09162659872e29488acbda
|
3f3ebf4a4053a51e438a03cc6495f9a287a5b0bd
|
/at210/fun_dir.py
|
bade6ffb530ff2e474af831c123fd03f2a7f2992
|
[
"MIT"
] |
permissive
|
asakura-yoshifumi/publication20200818
|
d838b4b3e649c1738f4fd66f5a2b7fab8cd86a5a
|
7d22fa48b3fc5fb06255da69be65030217df38f1
|
refs/heads/main
| 2023-03-10T05:38:19.598677 | 2021-02-22T05:05:31 | 2021-02-22T05:05:31 | 319,922,256 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 22,904 |
py
|
#!/usr/bin/env python
################################################################################
# MIT License
#
# Copyright (c) 2021 Yoshifumi Asakura
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
################
### packages
###############
'''
write basic tools for calculation, and for directory or run here.
'''
################
### packages
################
import numpy as np
#from scipy.optimize import minimize
#from scipy import stats
import time
import sys
import re
import pandas as pd
import os
import shutil
import pyper
#import math
#import matplotlib.pyplot as plt
#import seaborn as sns
import platform as plf
import subprocess as sbp
import datetime as dtt
import traceback as tb
### handwrite
################
### field diff
################
def field_d1(field, xy, dx = 1, perio = True):
### field is an array
### xy is a directio x or y to diff
### dx should be 1
### periodic or not
if xy == "x":
pass
elif xy == "y":
field = field.T
#
else:
print("error, not defined direction.")
sys.exit()
#
if field.shape[0] == 1:
return(np.zeros((1, 1)))
#
if perio:
K = np.zeros((field.shape[0], field.shape[0]))\
+ np.diag(np.ones((field.shape[0] - 1)), k = 1)\
- np.diag(np.ones((field.shape[0] - 1)), k = -1)\
+ np.diag((1.0,), k = (1 - field.shape[0]))\
- np.diag((1.0,), k = (field.shape[0] - 1))
#
#print(K)
out = K.dot(field)
#
else:
K = np.zeros((field.shape[0], field.shape[0]))\
+ np.diag(np.ones((field.shape[0] - 1)), k = 1)\
- np.diag(np.ones((field.shape[0] - 1)), k = -1)\
+ np.diag((1.0,), k = (1 - field.shape[0]))\
- np.diag((1.0,), k = (field.shape[0] - 1))
K[0, 1] = 0.0
K[field.shape[0] - 1, field.shape[0] - 2] = 0.0
out = K.dot(field)
#
#
if xy == "y":
out = out.T
#
#
return(out)
#
#
def field_d2(field, xy, dx = 1, perio = True):
### field is an array
### xy is a directio x or y to diff
### dx should be 1
### periodic or not
if xy == "x":
pass
elif xy == "y":
field = field.T
#
else:
print("error, not defined direction.")
sys.exit()
#
if field.shape[0] == 1:
return(np.zeros((1, 1)))
#
if perio:
K = np.zeros((field.shape[0], field.shape[0]))\
- np.diag(np.ones((field.shape[0])), k = 0) * 2.0\
+ np.diag(np.ones((field.shape[0] - 1)), k = 1)\
+ np.diag(np.ones((field.shape[0] - 1)), k = -1)\
+ np.diag((1.0,), k = (1 - field.shape[0]))\
+ np.diag((1.0,), k = (field.shape[0] - 1))
#
#print(K)
out = K.dot(field)
#
else:
K = np.zeros((field.shape[0], field.shape[0]))\
- np.diag(np.ones((field.shape[0])), k = 0) * 2.0\
+ np.diag(np.ones((field.shape[0] - 1)), k = 1)\
+ np.diag(np.ones((field.shape[0] - 1)), k = -1)\
+ np.diag((1.0,), k = (1 - field.shape[0]))\
+ np.diag((1.0,), k = (field.shape[0] - 1))
K[0, 1] = 2.0
K[field.shape[0] - 1, field.shape[0] - 2] = 2.0
out = K.dot(field)
#
if xy == "y":
out = out.T
#
#
return(out)
#
#
def di1(field, xy, dx = 1, perio = True):
### this calculates differential of the field, instead of just diff
return(0.5 * field_d1(field, xy, dx, perio) / dx)
#
def di2(field, xy, dx = 1, perio = True):
return(1.0 * field_d2(field, xy, dx, perio) / (dx**2.0))
#
################
###
################
################
###
################
################
###
################
################
### directory setting
################
def dir_reset(dirname, options = True):
if os.path.exists(dirname) and options:
shutil.rmtree(dirname)
os.mkdir(dirname)
#
elif not os.path.exists(dirname):
os.mkdir(dirname)
#
#
################
###
################
class Name_Maker:
def __init__(self, outdir, outhead, ext, reg_com = None, sim_com = None):
self.outdir = outdir
self.outhead = outhead
self.ext = ext
self.reg_com = reg_com
self.sim_com = sim_com
#
### versions information
filename = "%s/versions_info.txt" % self.outdir
dir_reset(self.outdir, False)
with open(filename, mode = "w") as f:
tmp = sbp.run("pip list --format columns".split(" "), stdout = sbp.PIPE)
mes = [
"conducted in",
os.getcwd(),
"\nenvironments",
sys.version,
"\n",
plf.platform(),
"\n",
tmp.stdout.decode()
]
f.write("\n".join(mes))
#
reg = "%s/regression" % self.outdir
sim = "%s/simulation" % self.outdir
dir_reset(reg, False)
dir_reset(sim, False)
self.head_d_f_reg = "%s/%s" % (reg, self.outhead)
self.head_d_f_sim = "%s/%s" % (sim, self.outhead)
#
### markdown using list
self.mdlist = [
filename, ### txt file
"%s/summary_reg.csv" % self.outdir,
"%s/summary_sim.csv" % self.outdir
]
self.mdlist2 = [
filename, ### txt file
"%s/summary_reg.csv" % self.outdir,
"%s/summary_sim.csv" % self.outdir
]
self.comp_only_head = [
filename, ### txt file
"%s/summary_reg.csv" % self.outdir,
"%s/summary_sim.csv" % self.outdir
]
self.comp_label = []
self.comp_only = []
#
self.prev = "yet"
#
#
def get_passed_paras(self):
out = "%s/passed_parameters.csv" % self.outdir
return(out)
#
def get_sim_sum(self):
return(self.mdlist[2])
#
#
def set_sim(self, paras, mod_choice, ind_level):
self.paras = paras
self.mod_choice = mod_choice
self.i_paras, self.i_mod_choice = ind_level
#
### prepare
self.param_num_str = "m%02d_p%02d" %(self.i_mod_choice, self.i_paras)
#
#
def get_in_reg(self):
return(self.tablepaths, self.methods_df, self.i_meth, self.i_input)
#
def get_in_sim(self):
return(self.paras, self.mod_choice, self.i_paras, self.i_mod_choice)
#
#
def names_sim(self, another_color = []):
out = {}
out["heat_name"] = "%s_%s_field_Euler.png" %(self.head_d_f_sim, self.param_num_str)
out["Vxname"] = "%s_%s_field_Vx.png" %(self.head_d_f_sim, self.param_num_str)
out["Vyname"] = "%s_%s_field_Vy.png" %(self.head_d_f_sim, self.param_num_str)
out["Rhoname"] = "%s_%s_field_Rho.png" %(self.head_d_f_sim, self.param_num_str)
out["ERKname"] = "%s_%s_field_ERK.png" %(self.head_d_f_sim, self.param_num_str)
out["part_name"] = "%s_%s_particles_track.png" %(self.head_d_f_sim, self.param_num_str)
out["part_table"] = "%s_%s_particles_track.csv" %(self.head_d_f_sim, self.param_num_str)
out["Lag_Rdata"] = "%s_%s_sim/Lagrange.Rdata" %(self.head_d_f_sim, self.param_num_str)
out["Lag_npy"] = "%s_%s_sim/Lagrange.npy" %(self.head_d_f_sim, self.param_num_str)
out["integ_place"] = "%s_%s_sim" %(self.head_d_f_sim, self.param_num_str)
out["integ_value"] = "%s_%s_sim/integrated.npy" %(self.head_d_f_sim, self.param_num_str)
out["integ_time"] = "%s_%s_sim/timerange.npy" %(self.head_d_f_sim, self.param_num_str)
out["integ_coord"] = "%s_%s_sim/coord.npy" %(self.head_d_f_sim, self.param_num_str)
out["animation"] = "%s_%s_animation" %(self.head_d_f_sim, self.param_num_str)
out["ani_capture"] = "%s_%s_ani_cap_last.png" %(self.head_d_f_sim, self.param_num_str)
out["source"] = "%s_%s_sim/source.npy" %(self.head_d_f_sim, self.param_num_str)
out["erks_name"] = "%s_%s_ERK_last.png" %(self.head_d_f_sim, self.param_num_str)
out["erk_shape"] = "%s_%s_sim/erk_shape.Rdata" %(self.head_d_f_sim, self.param_num_str)
out["Rdata"] = "%s_%s_sim/fields.Rdata" %(self.head_d_f_sim, self.param_num_str)
out["another_color"] = {
"all": "%s_%s_field_color2_Euler.png" %(self.head_d_f_sim, self.param_num_str),
"Vx": "%s_%s_field_color2_Vx.png" %(self.head_d_f_sim, self.param_num_str),
"Vy": "%s_%s_field_color2_Vy.png" %(self.head_d_f_sim, self.param_num_str),
"Rho": "%s_%s_field_color2_Rho.png" %(self.head_d_f_sim, self.param_num_str),
"ERK": "%s_%s_field_color2_ERK.png" %(self.head_d_f_sim, self.param_num_str),
"part": "%s_%s_particles_color2.png" %(self.head_d_f_sim, self.param_num_str)
}
self.array_place = "%s_%s_fields" %(self.head_d_f_sim, self.param_num_str)
out["array_place"] = self.array_place
out["pmodel_track"] = "%s_%s_Pmodel_track.png" %(self.head_d_f_sim, self.param_num_str)
out["pmodel_color"] = "%s_%s_Pmodel_track_color2.png" %(self.head_d_f_sim, self.param_num_str)
out["pmodel_Rdata"] = "%s_%s_sim/Pmodel.Rdata" %(self.head_d_f_sim, self.param_num_str)
out["pmodel_npy"] = "%s_%s_sim/Pmodel.npy" %(self.head_d_f_sim, self.param_num_str)
#
out["comp_R"] = "%s_%s_sim/compare.Rdata" %(self.head_d_f_sim, self.param_num_str)
out["comp_gif"] = "%s_%s_particles_compare.gif" %(self.head_d_f_sim, self.param_num_str)
out["comp_all"] = "%s_%s_p_track_compare.png" %(self.head_d_f_sim, self.param_num_str)
#
out["param_num"] = self.param_num_str
#
self.mdlist.append(out["heat_name"])
self.mdlist.append(out["part_name"])
#
line = os.getcwd()
### select save fig
tmp_h = "%s/%s" % (line, out["heat_name"])
if "heat" in another_color:
tmp_h = "%s/%s" % (line, out["another_color"].get("all"))
tmp_p = "%s/%s" % (line, out["part_name"])
if "part" in another_color:
tmp_p = "%s/%s" % (line, out["another_color"].get("part"))
#
table_md = [
"|%s_field_Euler|%s_particles_track|" % (self.param_num_str, self.param_num_str),
"|---|---|",
"|||\n" % (tmp_h, tmp_p)
]; #print("\n".join(table_md))
if not self.sim_com is None:
if self.i_mod_choice < len(self.sim_com):
table_md = [self.sim_com[self.i_mod_choice]] + table_md
#self.mdlist2.append("\n".join([self.sim_com[self.i_mod_choice] + "\n"]))
#print("\n".join(table_md))
### select save fig 2nd row
tmp_v = "%s/%s" % (line, out["Vxname"])
if "Vx" in another_color:
tmp_v = "%s/%s" % (line, out["another_color"].get("Vx"))
tmp_a = "%s/%s" % (line, out["ani_capture"])
#
table_md2 = [
"|%s_field_Vx|%s_ani_capture|" % (self.param_num_str, self.param_num_str),
"|---|---|",
"|||\n" % (tmp_v, tmp_a)
]
#
### select save fig 3rd row
#tmp_r = "%s/%s" % (line, out["Rhoname"]) ### later, overwrite instead
tmp_r = "%s/%s" % (line, out["comp_all"])
tmp_p = "%s/%s" % (line, out["pmodel_track"])
#
table_md3 = [
"|%s_compare|%s_Pmodel_track|" % (self.param_num_str, self.param_num_str),
"|---|---|",
"|||\n" % (tmp_r, tmp_p)
]
#
if (not self.prev == "sim") and (not self.prev == "yet"):
#table_md = ["<div style='page-break-before:always'></div>\n"] + table_md
self.mdlist2.append("\n<div style='page-break-before:always'></div>\n")
self.mdlist2.append("\n".join(table_md))
self.mdlist2.append("\n".join(table_md2))
self.mdlist2.append("\n".join(table_md3))
self.mdlist2.append("\n<div style='page-break-before:always'></div>\n")
self.prev = "sim"
#
self.comp_label.append("%s_compare" % self.param_num_str)
self.comp_only.append(tmp_r)
#
return(out)
#
def get_mdlist(self):
'''
returns a list to include in markdown summary
'''
time_stmp = dtt.datetime.today().strftime("%Y%m%d_%H%M")[2:]
out = "%s/%s_summary.md" % (self.outdir, time_stmp)
return([out, self.mdlist])
#
def get_mdlist2(self, insertion = []):
'''
returns a list to include in markdown summary with figures in tables
'''
time_stmp = dtt.datetime.today().strftime("%Y%m%d_%H%M")[2:]
out = "%s/%s_summary.md" % (self.outdir, time_stmp)
if len(insertion) > 0:
for j in range(0, len(insertion)):
self.mdlist2.insert((3 + j), insertion[j])
return([out, self.mdlist2])
#
def get_comp_only(self, insertion = []):
#comp_only = self.comp_only
### put all figures into tables
#
if len(self.comp_label) <= 1:
out = []
if len(insertion) > 0:
out = self.comp_only_head
for j in range(0, len(insertion)):
out.insert((3 + j), insertion[j])
return(out)
#
comp_only = []
#
pages = divmod(len(self.comp_only), 6)
if pages[1] == 1:
pages = [pages[0] - 1, 7]
listk = [4, 3]
else:
listk = [pages[1]]
#
def table_command(j):
indice = [6 * j + k for k in range(0, 6)]
for k, ind in enumerate(indice):
if ind >= len(self.comp_only):
indice[k] = 0
return([
"\n<div style='page-break-before:always'></div>\n", #1
"|%s|%s|" % (self.comp_label[indice[0]], self.comp_label[indice[1]]), #2
"|---|---|", #3
"|||" % (self.comp_only[ indice[0]], self.comp_only[ indice[1]]), #4
"|%s|%s|" % (self.comp_label[indice[2]], self.comp_label[indice[3]]), #5
"|||" % (self.comp_only[ indice[2]], self.comp_only[ indice[3]]), #6
"|%s|%s|" % (self.comp_label[indice[4]], self.comp_label[indice[5]]), #7
"|||" % (self.comp_only[ indice[4]], self.comp_only[ indice[5]]), #8
"\n"
])
def odd_table(j, remainder):
if remainder % 2 == 1:
return([
"|%s|end|" % self.comp_label[(6 * j + remainder - 1)],
"||end|" % self.comp_only[ (6 * j + remainder - 1)],
"\n"
])
else:
return(["\n"])
#
#
#
for j in range(0, pages[0]):
comp_only.append("\n".join(table_command(j)))
try:
j += 1; #print(j)
except:
j = 0
try:
for k in range(0, len(listk)):
if listk[k] == 5:
comp_only.append("\n".join(table_command(j)[:6] + odd_table(j, listk[k])))
elif listk[k] == 4:
comp_only.append("\n".join(table_command(j)[:6] + odd_table(j, listk[k])))
elif listk[k] == 3:
comp_only.append("\n".join(table_command(j)[:4] + odd_table(j, listk[k])))
elif listk[k] == 2:
comp_only.append("\n".join(table_command(j)[:4] + odd_table(j, listk[k])))
#
#
#
except:
print("ERROR occured in fun_dir.Name_Maker.get_comp_only")
print(pages)
print(listk)
print(len(self.comp_label))
print(len(self.comp_only))
tb.print_exc()
#
#
if len(insertion) > 0:
out = self.comp_only_head + comp_only
for j in range(0, len(insertion)):
out.insert((3 + j), insertion[j])
#
else:
out = comp_only
#
return(out)
#
def save_fields(self, fields):
dir_f = self.array_place
files = ["%s/array%02d.npy" % (dir_f, j) for j in range(0, len(fields))]
dir_reset(dir_f, False)
#
for j, field_j in enumerate(fields):
np.save(file = files[j], arr = field_j)
#
#
def load_fields(self):
dir_f = "%s_%s_fields" %(self.head_d_f_sim, self.param_num_str)
files = os.listdir(dir_f)
#for file in files:
# print(file)
files2 = []
for file in files:
if ".npy" in file:
print(" loading %s/%s" % (dir_f, file))
files2.append("%s/%s" % (dir_f, file))
out = [np.load(file) for file in files2]
return(out)
#
#
class Data_Arrange:
def __init__(self, resultdir):
self.table = {"compare": [], "euler": [], "lagrange": [], "spring": [], "erkshape": []}
self.label = []
#
self.dir = resultdir
self.tablename = "%s/paths_to_Rdata.csv" % resultdir
#
def add_row(self, names_sim):
self.label.append(names_sim["param_num"])
self.table["compare" ].append(names_sim["comp_R"])
self.table["euler" ].append(names_sim["Rdata"])
self.table["lagrange"].append(names_sim["Lag_Rdata"])
self.table["spring" ].append(names_sim["pmodel_Rdata"])
self.table["erkshape"].append(names_sim["erk_shape"])
#
def get_Rdata_paths(self):
out0 = pd.DataFrame({"label": self.label})
out1 = pd.DataFrame(self.table)
out = pd.concat([out0, out1], axis = 1)
return(out)
#
def save_Rdata_paths(self, file = None, one_Rdata = True):
if file is None:
out = self.tablename
else:
out = file
df = self.get_Rdata_paths()
df.to_csv(out, header = True, index = False)
#
### make all Rdata onto one / setting
if one_Rdata:
dir = "%s/graphs_Rdata" % self.dir
dir_reset(dir)
for j, label in enumerate(self.label):
filej = "%s/%s.Rdata" % (dir, label)
rowj = df.iloc[j, 1:].tolist()
#
r = pyper.R()
for k, path in enumerate(rowj):
r("load('%s')" % path)
#
#
r("save.image('%s')" % filej)
if os.path.exists(filej):
print("saved %s" % filej)
else:
print("failed %s" % filej)
#
#
#
#
def draw_pub_fig(self):
path = "%s/sub_pub_graph.R" % os.path.dirname(__file__)
with open(path, mode = "r") as f:
cmd = f.read()
cmd = re.sub("__result__", self.dir, cmd)
r = pyper.R()
r(cmd)
#
def draw_pub_dx(self, rdata):
str_rdata = "c(" + ", ".join(["'%s'" % l for l in rdata]) + ")"
#
path = "%s/sub_pub_dx.R" % os.path.dirname(__file__)
with open(path, mode = "r") as f:
cmd = f.read()
cmd = re.sub("__result__", self.dir, cmd)
cmd = re.sub("__rdata__", str_rdata, cmd)
r = pyper.R()
r(cmd)
#
################
### path
################
def find_up(path):
if "/" in path:
path0 = path.split("/")[0]
else:
path0 = path
#print(path0)
path1 = "../"
while len(path1) < 30:
if any([path0 in j for j in os.listdir(path1)]):
pathout = path1 + path
break
else:
path1 = "../" + path1
#
#
return(pathout)
#
################
### time counter
################
class Time_keeper:
def __init__(self):
self.start_t = []
self.start_t.append(time.time())
#
def start_count(self, print_i = True):
self.start_t.append(time.time())
if print_i:
print(len(self.start_t))
#
def get_indice(self):
return(len(self.start_t))
#
def get_elapsed(self, index = 0, seconds = False):
took_time = int(time.time() - self.start_t[index])
if seconds:
out = took_time
#
else:
el_hr = divmod(took_time, 3600)
el_mi = divmod(el_hr[1], 60)
out = "%d hr %02d min %02d sec" % (el_hr[0], el_mi[0], el_mi[1])
#
#
#
return(out)
#
#
################
###
################
def main():
pass
################
###
################
if __name__ == '__main__':
main()
###
|
[
"[email protected]"
] | |
2089c0866d0077654c90b360bd87b465295e4e21
|
ebf3a44a24bdca43553307338ff31440d5e2e11a
|
/src/clustering/clustering/similarity.py
|
97c72f51c1aec50a005124b7ea1ce150f8b34d48
|
[
"MIT"
] |
permissive
|
juhuntenburg/pipelines
|
daeb13896023e22350eb33b90007d8a74d592745
|
9904065cccb8e316cece5451f595a24774f07bd5
|
refs/heads/master
| 2020-04-05T09:27:17.263763 | 2017-05-02T20:28:33 | 2017-05-02T20:28:33 | 30,145,021 | 0 | 3 | null | 2015-02-01T12:09:49 | 2015-02-01T12:09:49 | null |
UTF-8
|
Python
| false | false | 1,789 |
py
|
from nipype.interfaces import afni as afni
import os
from nipype.interfaces.base import BaseInterface, \
BaseInterfaceInputSpec, traits, File, TraitedSpec
from nipype.utils.filemanip import split_filename
class SimilarityInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, desc='surface data to construct similarity matrix', mandatory=True)
sim = traits.String(exists=True, desc='type of similarity', mandatory=True)
mask = File(exists=True, desc='mask surface which is correlation target', mandatory=True)
class SimilarityOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="similarity matrix output")
class Similarity(BaseInterface):
input_spec = SimilarityInputSpec
output_spec = SimilarityOutputSpec
def _run_interface(self, runtime):
##correlationmatrix##
corr = afni.AutoTcorrelate()
corr.inputs.in_file = self.inputs.in_file
corr.inputs.mask= self.inputs.mask
corr.inputs.mask_only_targets = self.inputs.sim!='temp'
corr.inputs.out_file = os.path.abspath(self.inputs.sim+'.1D')
##pipe output through another correlation, unless sim type is temp##
corr_res = corr.run()
if self.inputs.sim!='temp':
##similaritymatrix##
similarity = afni.AutoTcorrelate()
similarity.inputs.polort = -1
similarity.inputs.eta2 = self.inputs.sim=='eta2'
similarity.inputs.in_file = corr.inputs.out_file
similarity.inputs.out_file = os.path.abspath(self.inputs.sim+'.1D')
sim_res = similarity.run()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.abspath(self.inputs.sim+'.1D')
return outputs
|
[
"[email protected]"
] | |
c8f3b068e8ec104fe2c76cd33556bc435eca51e3
|
12bd6a1d7b4e4776601baf5448c9730084b81c5c
|
/venv/lib/python3.7/keyword.py
|
484e685dec5dd061a4b68d5771964c9ae143fd60
|
[] |
no_license
|
danijelkecman/weather-flask
|
e711f99a2cad705cd2671291ef17064dcdf04bba
|
1510668704dddca971e54094bac5c84deddfda24
|
refs/heads/master
| 2023-05-13T11:39:56.610433 | 2023-05-02T23:19:14 | 2023-05-02T23:19:14 | 156,313,066 | 1 | 0 | null | 2023-05-02T23:19:15 | 2018-11-06T02:18:15 |
Python
|
UTF-8
|
Python
| false | false | 61 |
py
|
/Users/danijel/.pyenv/versions/3.7.0/lib/python3.7/keyword.py
|
[
"[email protected]"
] | |
ce1e8ee4af6bb77e44595abb85662bd95b62a293
|
8b0add00a2c76ebae9476d36a39e324ade213b86
|
/Matplotlib/ScatterPlot.py
|
694a5215e195e072016ce537ed0042d0ae434cef
|
[] |
no_license
|
HarshKothari21/DataAnalysis_Practice
|
e51269b4132facd3eb7ac85fe3424f94941c6c49
|
7fd1049c5150a2e535bcb36032b6d636c86d4e69
|
refs/heads/master
| 2023-01-19T12:02:18.478609 | 2020-11-27T10:53:02 | 2020-11-27T10:53:02 | 250,902,051 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 997 |
py
|
import pandas as pd
from matplotlib import pyplot as plt
plt.style.use('seaborn')
# x = [5, 7, 8, 5, 6, 7, 9, 2, 3, 4, 4, 4, 2, 6, 3, 6, 8, 6, 4, 1]
# y = [7, 4, 3, 9, 1, 3, 2, 5, 2, 4, 8, 7, 1, 6, 4, 9, 7, 7, 5, 1]
# colors = [7, 5, 9, 7, 5, 7, 2, 5, 3, 7, 1, 2, 8, 1, 9, 2, 5, 6, 7, 5]
# sizes = [209, 486, 381, 255, 191, 315, 185, 228, 174,
# 538, 239, 394, 399, 153, 273, 293, 436, 501, 397, 539]
# plt.scatter(x, y, s=sizes, c=colors, cmap="Greens", edgecolor='black', linewidth=1, alpha=0.75)
# cbar = plt.colorbar()
# cbar.set_label('satisfaction')
data = pd.read_csv('data4.csv')
view_count = data['view_count']
likes = data['likes']
ratio = data['ratio']
plt.scatter(view_count, likes, c=ratio, cmap='summer', edgecolor='black', linewidth=1, alpha=0.75)
cbar = plt.colorbar()
cbar.set_label('Like Dislike Ratio')
plt.xscale('log')
plt.yscale('log')
plt.title('Trending YouTube Videos')
plt.xlabel('View Count')
plt.ylabel('Total Likes')
plt.tight_layout()
plt.show()
|
[
"[email protected]"
] | |
bc07120ad5e34e021098976f2146c1caf97acd3d
|
fd0cc608faf0447e236ffdb2564e8a96853f1ba6
|
/07_intermediate_python/python-patterns-master/patterns/other/graph_search.py
|
968e4342b0d719056fb76d07453df790fcc8a200
|
[] |
no_license
|
niaid/python_biologist
|
6d27bf3f86a7e249443607dffb1bad9846fd2a79
|
f6cc03d03f10d679b270fd7066382501d9620226
|
refs/heads/master
| 2023-07-19T22:59:09.297053 | 2022-05-10T15:01:21 | 2022-05-10T15:01:21 | 252,785,320 | 9 | 2 | null | 2022-05-09T14:16:36 | 2020-04-03T16:33:39 |
OpenEdge ABL
|
UTF-8
|
Python
| false | false | 1,976 |
py
|
class GraphSearch:
"""Graph search emulation in python, from source
http://www.python.org/doc/essays/graphs/"""
def __init__(self, graph):
self.graph = graph
def find_path(self, start, end, path=None):
path = path or []
path.append(start)
if start == end:
return path
for node in self.graph.get(start, []):
if node not in path:
newpath = self.find_path(node, end, path[:])
if newpath:
return newpath
def find_all_path(self, start, end, path=None):
path = path or []
path.append(start)
if start == end:
return [path]
paths = []
for node in self.graph.get(start, []):
if node not in path:
newpaths = self.find_all_path(node, end, path[:])
paths.extend(newpaths)
return paths
def find_shortest_path(self, start, end, path=None):
path = path or []
path.append(start)
if start == end:
return path
shortest = None
for node in self.graph.get(start, []):
if node not in path:
newpath = self.find_shortest_path(node, end, path[:])
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
return shortest
def main():
"""
# example of graph usage
>>> graph = {'A': ['B', 'C'], 'B': ['C', 'D'], 'C': ['D'], 'D': ['C'], 'E': ['F'], 'F': ['C']}
# initialization of new graph search object
>>> graph1 = GraphSearch(graph)
>>> print(graph1.find_path('A', 'D'))
['A', 'B', 'C', 'D']
>>> print(graph1.find_all_path('A', 'D'))
[['A', 'B', 'C', 'D'], ['A', 'B', 'D'], ['A', 'C', 'D']]
>>> print(graph1.find_shortest_path('A', 'D'))
['A', 'B', 'D']
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"[email protected]"
] | |
54bf32eb364d3ed74bc83adcfc5cf42f98ff7f41
|
cae5ac194790a05c6c6976fa2751a6be9a33fa89
|
/tests/rackspace_test_boot.py
|
f70e8c3e22cbaa5cd8cdadb874c4ca020919ddad
|
[] |
no_license
|
moonstruck/kozinaki
|
54142b21a6d941623df2376e4f7721cef07a6fd6
|
19309c13a6ef74ac5f72920843022cd076fbe50e
|
refs/heads/master
| 2020-06-06T07:17:36.782154 | 2014-12-23T23:23:41 | 2014-12-23T23:23:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,616 |
py
|
# Copyright (c) 2014 CompuNova Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Boot test for Kozinaki Rackspace provider
"""
import unittest
from libcloud.compute.types import NodeState
from base import KozinakiTestBase
class KozinakiRackspaceTestCase(KozinakiTestBase):
def test_boot_ok(self):
instance, image, metadata = self.create_test_objects(
name='test',
size_id='2',
image_id='df924994-b686-449a-86e3-1876998022aa',
provider_name='RACKSPACE',
provider_region='')
self.log.info('Spawn execution')
self.driver.spawn(
context=None,
instance=instance,
image_meta=image,
injected_files=None,
admin_password=None,
network_info=None,
block_device_info=None)
node = self.get_node(instance, state=NodeState.RUNNING)
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.name, metadata['provider_instance_name'])
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
25fdbb891b81e16ae59e248f5e55d405a5193f6b
|
7c0417790c363ad38befb58b6cab7254abcefaa4
|
/scripts/deploy_lottery.py
|
d7b0b785bf82bc1306331c9b103195ef32473311
|
[] |
no_license
|
DieKant/smartcontract-lottery
|
453b33468c8773f3d138b5e65d81231dfd2e270f
|
7cfb7eef14e45b3dd18a5c70997920822d973746
|
refs/heads/main
| 2023-08-24T13:15:25.089065 | 2021-10-17T20:12:51 | 2021-10-17T20:12:51 | 412,749,018 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,420 |
py
|
from scripts.helpful_scripts import get_account, get_contract, fund_wiht_link
from brownie import Lottery, network, config
import time
def deploy_lottery():
# al get_account gli passo il mio indirizzo locale cosi che scelga il deploy su rete testnet oppure un index se volgio usare ganache
# id="codecamp-training" per mocks
account = get_account()
print(account)
lottery = Lottery.deploy(
# passo il nome del contratto che voglio usare(prendo solo l'address cosi punto quello invece di scaricami tutto)
get_contract("eth_usd_price_feed").address,
get_contract("vrf_coordinator").address,
get_contract("link_token").address,
config["networks"][network.show_active()]["fee"],
config["networks"][network.show_active()]["keyhash"],
{"from": account},
# se non c'è verify nella conf allora mette false di default
publish_source=config["networks"][network.show_active()].get("verify", False),
)
print("deploy completato")
# faccio il return per usarla nei test, questo non cambia il funzionamento
return lottery
def start_lottery():
account = get_account()
# prendo l'ultimo contratto che ho deployato per eseguirci cose sopra
lottery = Lottery[-1]
starting_tx = lottery.startLottery({"from": account})
# aspetto l'ultima transazione da parte della funzione precedente
starting_tx.wait(1)
print("lotteria partita")
def enter_lottery():
account = get_account()
lottery = Lottery[-1]
# ne mando un po di più nel caso smongolasse
value = lottery.getEntranceFee() + 100000000
tx = lottery.enter({"from": account, "value": value})
tx.wait(1)
print("ora sei un partecipante della lotteria")
# in questa funzione ci servirà del link nel contratto perche dobbiamo prendere il numero random dall'oracle che va pagato
def end_lottery():
account = get_account()
lottery = Lottery[-1]
# mettiamo del link nel contratto
tx = fund_wiht_link(lottery.address)
tx.wait(1)
ending_transaction = lottery.endLottery({"from": account})
ending_transaction.wait(1)
# metto questo anche col wait perche il nodo chainlink ci mette del tempo a mandare indietro il numero random
time.sleep(60)
print(f"{lottery.recentWinner()} is the new winner!")
def main():
deploy_lottery()
start_lottery()
enter_lottery()
end_lottery()
|
[
"[email protected]"
] | |
6fc250290cd0b7389544fbe3a86bdc07265dc7d7
|
8eccc4cab7ba7292c932468163c711d4058e3b90
|
/app/inheritance/abstract/migrations/0003_auto_20191223_0612.py
|
5f9ce7809d3b1fe08e15168d3691200f35a33369
|
[] |
no_license
|
zehye/django-document-wps12
|
97b1aa4be5a56b949ba59ac92e8d0c5cb3e22f73
|
086fdc581ba3f2db7bc39a6eb906fd97cc61c415
|
refs/heads/master
| 2022-09-08T12:46:19.110011 | 2019-12-26T09:07:15 | 2019-12-26T09:07:15 | 228,784,564 | 0 | 0 | null | 2022-08-23T17:59:03 | 2019-12-18T07:37:14 |
Python
|
UTF-8
|
Python
| false | false | 737 |
py
|
# Generated by Django 3.0 on 2019-12-23 06:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('abstract', '0002_auto_20191223_0539'),
]
operations = [
migrations.AlterField(
model_name='childa',
name='m2m',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='abstract_childa', to='abstract.Student'),
),
migrations.AlterField(
model_name='childb',
name='m2m',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='abstract_childb', to='abstract.Student'),
),
]
|
[
"[email protected]"
] | |
4b5e6169ff8d2976efc0b118d1a59ece273b810b
|
fbb1550dc5437d672ed0137bd7711eba3290dee3
|
/students/smckellips/lesson01/inventory_management/inventory_class.py
|
a23818804808193fa47c12c1f674b366c65919ab
|
[] |
no_license
|
JavaRod/SP_Python220B_2019
|
2cc379daf5290f366cf92dc317b9cf68e450c1b3
|
5dac60f39e3909ff05b26721d602ed20f14d6be3
|
refs/heads/master
| 2022-12-27T00:14:03.097659 | 2020-09-27T19:31:12 | 2020-09-27T19:31:12 | 272,602,608 | 1 | 0 | null | 2020-06-16T03:41:14 | 2020-06-16T03:41:13 | null |
UTF-8
|
Python
| false | false | 738 |
py
|
'''
Module for inventory functions.
'''
class Inventory:
'''
Class for inventory functions.
'''
def __init__(self, product_code, description, market_price, rental_price):
self.product_code = product_code
self.description = description
self.market_price = market_price
self.rental_price = rental_price
def return_as_dictionary(self):
'''
Return the inventory class as a dictionary.
'''
output_dict = {}
output_dict['product_code'] = self.product_code
output_dict['description'] = self.description
output_dict['market_price'] = self.market_price
output_dict['rental_price'] = self.rental_price
return output_dict
|
[
"[email protected]"
] | |
7e5363f7bc158f952ae5fcf883d622a0fa2cd660
|
83ed8b754703a1c9e661c90f0763bfebbc0f2606
|
/数据处理/计财Excel/excel_jicai.py
|
35dffa6e8fac69b1cf98e1de6347fdde61ce573e
|
[] |
no_license
|
zbh123/hobby
|
4ce267a20e1af7f2accd2bde8d39af269efa319b
|
2215c406fe7700bf150fd536dd56823a2e4733d1
|
refs/heads/master
| 2021-08-02T10:31:34.683391 | 2021-07-26T07:26:16 | 2021-07-26T07:26:16 | 150,555,879 | 4 | 0 | null | 2021-07-27T07:34:28 | 2018-09-27T08:41:44 |
Python
|
UTF-8
|
Python
| false | false | 11,085 |
py
|
#!python3
# -*- coding:utf-8 -*-
import re
from datetime import datetime, date
import xlrd, xlwt
import time
import os, sys
from xlutils.copy import copy
"""
股票质押明细表操作,
1,选取自有资金。
2,批注及备注中包含本月
3,提取字段
"""
def open_excel(excel_file):
"""
读取excel函数
args:excel_file(excel文件,目录在py文件同目录)
returns:book
"""
try:
book = xlrd.open_workbook(excel_file) # 文件名,把文件与py文件放在同一目录下
return book
except:
print("open excel file failed!")
def filter_sheet(excel_file, target_folder, now_month):
"""
过滤excel文件的sheet
:param excel_file:
:return:
"""
book = open_excel(excel_file) # 打开excel文件
sheets = book.sheet_names() # 获取所有sheet表名
# 如果sheet包含待赎回交易(汇总),返回sheet的索引
for sheet in sheets:
if sheet != '待购回交易(汇总)':
continue
# 处理当前sheet的excel
handle_excel(book, sheet, target_folder, now_month)
break
def handle_excel(book, sheet, target_folder, now_month):
"""
处理表
:param book:
:param sheet:
:return:
"""
# 创建新表
workbook = xlwt.Workbook(encoding='utf-8')
worksheet = workbook.add_sheet('待赎回交易(处理后)')
# 读取原表行
sh = book.sheet_by_name(sheet)
row_num = sh.nrows
# 把头部写入新的excel
row_data = sh.row_values(0)
for i, content in enumerate(row_data):
worksheet.write(0, i, content)
# 处理每一行
r = 1
for row in range(1, row_num):
row_data = sh.row_values(row)
# 出资方
investor = row_data[1]
if investor != '自有资金':
continue
dateFormat = xlwt.XFStyle()
# 把这一行写入新的excel
for i, content in enumerate(row_data):
# 时间格式特殊处理下
if i == 0 or i == 26:
date_value = xlrd.xldate_as_tuple(content, 0)
date_value = date(*date_value[:3]).strftime('%Y/%m/%d')
date_value = time_format(date_value)
dateFormat.num_format_str = 'yyyy/m/d'
worksheet.write(r, i, date_value, dateFormat)
else:
worksheet.write(r, i, content)
# 行数+1
r = r + 1
workbook.save(target_folder + '/自有资金-待赎回交易.xlsx')
def handle_comment(target_file, now_month):
"""
处理批注
:return:
"""
# 读取修改后的文件
book = open_excel(target_file)
sh = book.sheet_by_index(0)
row_num = sh.nrows
colx_num = sh.ncols
# 设置修改文件
workbook = copy(book)
worksheet = workbook.get_sheet(0)
# worksheet.write(0, colx_num, '批注')
for row in range(1, row_num):
row_data = sh.row_values(row)
comment = row_data[23]
# 先把批注写到最后一列
# worksheet.write(row, colx_num, comment)
# 处理批注(分成数组,如果数组有月份和数字,把月份和数字向后写)
com = comment.split(';')
index_row = 0 # 用一个变量控制每一行行的最大列
for c in com:
print(c)
if not (now_month + '/' in c):
continue
# 提取数组里面的日期和金额
date_reg_exp = re.compile('\d{4}[-/]\d{1,2}[-/]\d{1,2}')
matches_list = date_reg_exp.findall(c)
print(matches_list)
# 金额(把万或者元前面的数字提取)
for matches in matches_list:
c_no_date = c.replace(matches, '')
print(c_no_date)
c_num_unit = re.findall(r'\d+(?:\.\d+)?万', c_no_date)
print(c_num_unit)
c_num2_unit = re.findall(r'\d+(?:\.\d+)?元', c_no_date)
print(c_num2_unit)
# 写入excel
index_date = 0 # 标志本月日期的增行数
index_money_w = 0 # 控制万的增行数
index_money_y = 0 # 控制元的增行数
for index, date in enumerate(matches_list):
if now_month + '/' in date:
worksheet.write(row, colx_num + index_date + index_row, date)
index_date = index_date + 1
for index2, c_num in enumerate(c_num_unit):
c_num = re.findall(r'\d+(?:\.\d+)?', c_num)
worksheet.write(row, colx_num + index_date + index2 + index_row, int(c_num[0]) * 10000)
index_money_w = index2 + 1
for index3, c_num2 in enumerate(c_num2_unit):
c_num2 = re.findall(r'\d+(?:\.\d+)?', c_num2)
worksheet.write(row, colx_num + index_date + index_money_w + index3 + index_row, c_num2[0])
index_money_y = index3 + 1
index_row = index_date + index_money_w + index_money_y + index_row
workbook.save(target_file)
def handle_remarks(target_file, now_month):
"""
处理备注
:return:
"""
# 读取修改后的文件
book = open_excel(target_file)
sh = book.sheet_by_index(0)
row_num = sh.nrows
colx_num = sh.ncols
# 设置修改文件
workbook = copy(book)
worksheet = workbook.get_sheet(0)
# worksheet.write(0, colx_num, '备注')
for row in range(1, row_num):
row_data = sh.row_values(row)
remarks = row_data[27]
# 先把备注写到最后一列
# worksheet.write(row, colx_num, remarks)
# 处理备注(分成数组,如果数组有月份和数字,把月份和数字向后写)
com = remarks.split(';')
index_row = 0
for c in com:
# print(c)
if not (now_month + '/' in c):
continue
if not ('变更' in c):
continue
# 提取数组里面的日期
date_reg_exp = re.compile('\d{4}[-/]\d{1,2}[-/]\d{1,2}')
matches_list = date_reg_exp.findall(c)
# 把延期日期去掉
for matches in matches_list:
c = c.replace('延期' + matches, '')
c = c.replace('延期到' + matches, '')
print('----' + c)
# 拿到变更前后的日期和金额,默认分成两个,可能存在多个变更的情况
array = c.split('变更')
for index, str in enumerate(array):
if index == len(array) - 1:
break
date_reg_exp = re.compile(r'\d{4}[-/]\d{1,2}[-/]\d{1,2}')
matches_date_list = date_reg_exp.findall(str)
print(matches_date_list)
per_reg_exp = re.compile(r"\d+\.\d*%|\d*%")
matches_per_list = per_reg_exp.findall(array[index + 1])
print(matches_per_list)
# 如果包含分之,并且数据的前面无日期或者数据日期为当月日期,取出
date_fenshu = ''
fenshu = ''
if array[index + 1].find("分之") != -1:
index_temp = array[index + 1].find("分之")
c_bef = array[index + 1][0:index_temp - 1]
d_reg_exp = re.compile(r'\d{4}[-/]\d{1,2}[-/]\d{1,2}')
m_date_list = d_reg_exp.findall(c_bef)
if len(m_date_list) == 0:
fenshu = array[index + 1][int(index_temp) - 1: int(index_temp) + 3]
elif now_month + '/' in m_date_list[len(m_date_list) - 1]:
date_fenshu = m_date_list[len(m_date_list) - 1]
fenshu = array[index + 1][int(index_temp) - 1: int(index_temp) + 3]
print(date_fenshu)
print(fenshu)
date = matches_date_list[len(matches_date_list) - 1]
per = matches_per_list[0]
if now_month + '/' in date:
worksheet.write(row, colx_num + index_row, date)
worksheet.write(row, colx_num + 1 + index_row, per)
# 如果只有百分数
if fenshu != '' and date_fenshu == '':
worksheet.write(row, colx_num + 2 + index_row, fenshu)
index_row = index_row + 1 + 2
# 如果有百分数,有日期
elif fenshu != '' and date_fenshu != '':
worksheet.write(row, colx_num + 2 + index_row, date_fenshu)
worksheet.write(row, colx_num + 3 + index_row, fenshu)
index_row = index_row + 1 + 3
else:
index_row = index_row + 1 + 1
if now_month + '/' in date_fenshu:
if fenshu != '' and date_fenshu != '':
worksheet.write(row, colx_num + index_row, date_fenshu)
worksheet.write(row, colx_num + 1 + index_row, fenshu)
index_row = index_row + 2
workbook.save(target_file)
def time_format(date_value):
"""
时间格式化 去掉月份,日期前面的0
:param date_value:
:return:
"""
dates = date_value.split('/')
if len(dates) == 3:
month = dates[1].lstrip('0')
day = dates[2].lstrip('0')
return dates[0] + '/' + month + '/' + day
elif len(dates) == 2:
month = dates[1].lstrip('0')
return dates[0] + '/' + month
else:
return date_value
if __name__ == '__main__':
source_file = r'D:\0RPA\计划财务部\财务rpa\魏丽Excel\科目余额表.xls'
# source_file = r'C:\Users\LiGuangxi\Desktop\RPA需求\计财\股票质押明细表(仅供参考,请核对).xlsx'
target_file = r'D:\0RPA\计划财务部\财务rpa\魏丽Excel'
now_time = time.strftime("%Y%m%d", time.localtime(time.time()))
# 如果没有源文件,则报错退出
if not os.path.exists(source_file):
print("查询不到源文件")
sys.exit(1)
# 如果没有目标文件夹,则创建
target_folder = target_file + '/' + now_time
if not os.path.exists(target_folder):
os.makedirs(target_folder)
# 当前月
now_month = time.strftime("%Y/%m", time.localtime(time.time()))
# ---------------------start:下面可以修改为您处理的任何月份---------------------------------------------------------------------------------------------
# now_month = '2020/12'
# ---------------------end:上面可以修改为您处理的任何月份-----------------------------------------------------------------------------------------------
# 过滤
filter_sheet(source_file, target_folder, now_month)
# 加工
handle_comment(target_folder + '/自有资金-待赎回交易.xlsx', now_month)
handle_remarks(target_folder + '/自有资金-待赎回交易.xlsx', now_month)
|
[
"[email protected]"
] | |
ea6c75d7901bc3c2c2b99057719760e4d00c157d
|
0b1790e0f3b230ea1b2b08578370e0ef332be8f6
|
/manage.py
|
1cf317f5f02620f5f108e49add30f228229b34e2
|
[] |
no_license
|
SA-Deve/ProjectBlog
|
d3a4d6f56903085027aae7ce38f5c129f004ed05
|
f0c0c3d7f3d72415e4883627f9b799bd9dfa3876
|
refs/heads/master
| 2022-12-07T15:53:25.519635 | 2020-08-16T16:27:06 | 2020-08-16T16:27:06 | 287,935,569 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 632 |
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ProjectBlogs.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
08b1a08138cf2a9f104b5f00cfba5cf8fb7aaa24
|
de6f57fa8391d447a50b1fe2f394cc2fc0488bfa
|
/BookMyShow/urls.py
|
7470e069be75c7a4371b1370572efd74c250c991
|
[] |
no_license
|
himdhiman/BMS-2
|
ce8db13d88dacd27b45757f5d30b78717041d0f8
|
440886028006211a1995f9d28d21fde9caf7fb0a
|
refs/heads/master
| 2021-09-27T17:25:10.187898 | 2021-01-21T15:40:19 | 2021-01-21T15:40:19 | 205,708,449 | 1 | 0 | null | 2021-09-22T17:58:58 | 2019-09-01T17:16:10 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,031 |
py
|
"""BookMyShow URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from movies.views import SearchView
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('auth.urls')),
path('', include('movies.urls')),
path('cinema/', include('cinema.urls')),
path('tickets/', include('tickets.urls')),
path('search/', SearchView.as_view(), name = 'search')
]
|
[
"[email protected]"
] | |
e4586b0f5765576ab04d9219405f92bf1d7bdeb1
|
511ba6b5e456be55bd07f20699572b3d351013de
|
/app/lol/forms.py
|
a78e21890ddd3460f71025f6b77fcced7f034716
|
[] |
no_license
|
Specimen209/leagr2
|
02a03b0d1c365c101e3f21423c0f91c6d82355d0
|
ed502b1ddd605510e915b9d73f3dcce3117667c0
|
refs/heads/master
| 2022-10-09T06:47:15.247732 | 2020-02-28T14:31:01 | 2020-02-28T14:31:01 | 239,011,167 | 0 | 0 | null | 2022-09-16T18:17:14 | 2020-02-07T19:52:49 |
HTML
|
UTF-8
|
Python
| false | false | 358 |
py
|
from flask_wtf import FlaskForm
from wtforms.fields.html5 import EmailField, TelField
from wtforms import validators, StringField, PasswordField, TextAreaField, SubmitField, BooleanField
from flask_wtf.file import FileField, FileAllowed
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from flask_ckeditor import CKEditorField
from .. import db
|
[
"[email protected]"
] | |
30fd4c0678c66be6af14d4791127bff677a27ddd
|
a69924d0ccdb289af3ca9bca14236dd881e5ab99
|
/machineLearning.py
|
b35658c7b2c797cf64d806c51eaf53e81e675d0d
|
[] |
no_license
|
RGuseynov/Financial_Inclusion
|
7926a330bcb28d73d67aa62fd8459573d19faa26
|
84814f010d654c2c72c03f575b2e34abac5968bc
|
refs/heads/master
| 2023-01-31T05:14:52.300097 | 2020-12-01T15:46:52 | 2020-12-01T15:46:52 | 317,588,421 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,344 |
py
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,recall_score,precision_score,f1_score
from sklearn.metrics import confusion_matrix
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.model_selection import GridSearchCV
from sklearn import tree
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from matplotlib import pyplot
import xgboost as xgb
from sklearn import svm
df = pd.read_csv("Data/Train_v2.csv")
df = df.drop(["uniqueid"], axis=1)
# # Vector as cell value
# X_categorical = df.select_dtypes(include=[object])
# enc = OneHotEncoder(handle_unknown='ignore')
# for column in X_categorical.columns:
# temp_df = pd.DataFrame(enc.fit_transform(X_categorical[[column]]).toarray())
# X_categorical[column] = temp_df.to_numpy().tolist()
# Dataset balancing
number_of_Yes = df.groupby(["bank_account"])["bank_account"].count()["Yes"]
df_No_account = df[df["bank_account"] == "No"]
df_Yes_account = df[df["bank_account"] == "Yes"]
df_No_account_Sample = df_No_account.sample(number_of_Yes)
df_Balanced = pd.concat([df_Yes_account, df_No_account_Sample], ignore_index=True)
le = LabelEncoder()
y = le.fit_transform(df_Balanced["bank_account"])
X = df_Balanced.drop(["bank_account"], axis=1)
X = pd.get_dummies(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# # Best features selection
# selector = SelectKBest(chi2, k=10)
# selector.fit_transform(X_train, y_train)
# cols = selector.get_support(indices=True)
# best_X_train = X_train.iloc[:,cols]
# # Decision tree classification
# clf = tree.DecisionTreeClassifier()
# clf.fit(X_train, y_train)
# y_pred = clf.predict(X_test)
# print(accuracy_score(y_test, y_pred))
# print(confusion_matrix(y_test, y_pred))
def KBestTreeClassificationLoop(X, y):
row_list = []
for i in range(1, len(X.columns)):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
selector = SelectKBest(chi2, k=i)
selector.fit_transform(X_train, y_train)
cols = selector.get_support(indices=True)
best_X_train = X_train.iloc[:,cols]
best_X_test = X_test.iloc[:,cols]
clf = tree.DecisionTreeClassifier()
clf.fit(best_X_train, y_train)
y_pred = clf.predict(best_X_test)
temp_dict = {"nombre_de_features": i,
"accuracy": accuracy_score(y_test, y_pred),
"precision": precision_score(y_test, y_pred),
"recall": recall_score(y_test, y_pred),
"f1_score": f1_score(y_test, y_pred)
}
print(confusion_matrix(y_test, y_pred))
row_list.append(temp_dict)
temp_df = pd.DataFrame(row_list)
temp_df.to_csv("training_analysis/TreeCLassificationFeaturesNumberBalanced2.csv")
# KBestTreeClassificationLoop(X, y)
# # XGboost classifier
# model=xgb.XGBClassifier(learning_rate=0.01, max_depth=20)
# model.fit(X_train, y_train)
# # plot
# # xgb.plot_importance(model)
# # pyplot.show()
# y_pred = model.predict(X_test)
# print(accuracy_score(y_test, y_pred))
# print(recall_score(y_test, y_pred))
# print(f1_score(y_test, y_pred))
# print(confusion_matrix(y_test, y_pred))
# # all features with their imortance score
# zipped = list(zip(X_train.columns, model.feature_importances_))
# zipped = sorted(zipped, key = lambda tup: tup[1], reverse=True)
# # only valuable features
# zipped2 = list(filter(lambda tup: tup[1] > 0, zipped))
# #XGBoost number features loop
# row_list = []
# for i in range(0, len(zipped2)):
# x_temp = X_train[[t[0] for t in zipped2][0: i + 1]]
# print(x_temp)
# model.fit(x_temp, y_train)
# x_temp_test = X_test[[t[0] for t in zipped2][0: i + 1]]
# print(model.score(x_temp_test,y_test))
# y_temp_pred = model.predict(x_temp_test)
# temp_dict = {"nombre_de_features": i+1,
# "accuracy": accuracy_score(y_test, y_temp_pred),
# "precision": precision_score(y_test, y_temp_pred),
# "recall": recall_score(y_test, y_temp_pred),
# "f1_score": f1_score(y_test, y_temp_pred)
# }
# row_list.append(temp_dict)
# df_xgboost_features_result = pd.DataFrame(row_list)
# df_xgboost_features_result.to_csv("training_analysis/XGBoostFeaturesNumberBalanced.csv")
|
[
"[email protected]"
] | |
fb18237e50b60ad7bbcfdb1e3feb562d44904519
|
a2844a2cc2c45d93c27bead19dcfd1c473f3aad2
|
/authapp/views.py
|
9d6b0958e31cfa93e0987a6be44923178f58c8f3
|
[] |
no_license
|
ASV1870asv1977/asv-server2
|
39aa36425e17de2173302779d721adec2d3f637e
|
6f1cbf58b46aa02999afcb23693c7a7e389d0290
|
refs/heads/master
| 2023-07-24T03:58:20.646106 | 2021-08-31T12:03:53 | 2021-08-31T12:03:53 | 392,986,942 | 0 | 0 | null | 2021-08-31T12:03:53 | 2021-08-05T09:42:27 |
Python
|
UTF-8
|
Python
| false | false | 3,489 |
py
|
from django.shortcuts import render, HttpResponseRedirect
from authapp.forms import ShopUserLoginForm, ShopUserRegisterForm, ShopUserProfileEdit
from django.contrib import auth
from django.urls import reverse
from django.conf import settings
from django.core.mail import send_mail
from authapp.forms import ShopUserEditForm
from authapp.models import ShopUser
def login(request):
title = 'вход'
login_form = ShopUserLoginForm(data=request.POST or None)
next = request.GET['next'] if 'next' in request.GET.keys() else ''
#print('next', next)
if request.method == 'POST' and login_form.is_valid():
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user and user.is_active:
auth.login(request, user)
if 'next' in request.POST.keys():
#print('redirect next', request.POST['next'])
return HttpResponseRedirect(request.POST['next'])
else:
return HttpResponseRedirect(reverse('main'))
content = {
'title': title,
'login_form': login_form,
'next': next
}
return render(request, 'authapp/login.html', content)
def logout(request):
auth.logout(request)
return HttpResponseRedirect(reverse('main'))
def register(request):
title = 'регистрация'
if request.method == 'POST':
register_form = ShopUserRegisterForm(request.POST, request.FILES)
if register_form.is_valid():
user = register_form.save()
if send_verify_mail(user):
print('success sending')
else:
print('sending failed')
return HttpResponseRedirect(reverse('auth:login'))
else:
register_form = ShopUserRegisterForm()
content = {'title': title, 'register_form': register_form}
return render(request, 'authapp/register.html', content)
def edit(request):
title = 'редактирование'
if request.method == 'POST':
edit_form = ShopUserEditForm(request.POST, request.FILES, instance=request.user)
profile_form = ShopUserProfileEdit(request.POST, instance=request.user.shopuserprofile)
if edit_form.is_valid() and profile_form.is_valid():
edit_form.save()
return HttpResponseRedirect(reverse('auth:edit'))
else:
edit_form = ShopUserEditForm(instance=request.user)
profile_form = ShopUserProfileEdit(instance=request.user.shopuserprofile)
content = {'title': title, 'edit_form': edit_form, 'profile_form': profile_form}
return render(request, 'authapp/edit.html', content)
def verify(request, email, activation_key):
user = ShopUser.objects.filter(email=email).first()
if user:
if user.activation_key == activation_key and not user.is_activation_key_expired():
user.is_active = True
user.save()
auth.login(request, user)
return render(request, 'authapp/verify.html')
return HttpResponseRedirect(reverse('main'))
def send_verify_mail(user):
subject = 'Verify your account'
link = reverse('auth:verify', args=[user.email, user.activation_key])
message = f'{settings.DOMAIN}{link}'
return send_mail(subject, message, settings.EMAIL_HOST_USER, [user.email], fail_silently=False)
|
[
"[email protected]"
] | |
029f69b24d71aad0ea9e3c1ab946b1a407f9b9fa
|
bf450b34ed441b775ea9914bf6bebed9610a3dbb
|
/hgame2020/week1/Pwn/Number Killer/Number.py
|
3396043b86bc406dbc5600cb3f18ba98194ca7fe
|
[] |
no_license
|
p199yw4ng/CTF
|
1a6b182bb42de8cf3585d1b805406b296dea41a2
|
6c81576e191ece03523595fe128f4e752289ff83
|
refs/heads/master
| 2020-12-20T16:48:39.445655 | 2020-02-08T05:31:16 | 2020-02-08T05:31:16 | 236,143,538 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 457 |
py
|
from pwn import*
import time
context.log_level = 'debug'
context(arch = 'amd64', os = 'linux')
cn=remote('47.103.214.163',20001)
#cn=process('./Number_Killer')
print cn.recv()
for i in range(1,14):
cn.sendline('47244640256')
sleep(0.1)
cn.sendline('4196237')
sleep(0.1)
cn.sendline('7074926021049463112')
sleep(0.1)
cn.sendline('-1458805190845043095')
sleep(0.1)
cn.sendline('5212724049075524360')
sleep(0.1)
cn.sendline('5562984097417')
cn.interactive()
|
[
"[email protected]"
] | |
278b8e682bc501a77202c694f122550190dcb82e
|
5109bc49adc5525d3cd50c4f736600dbee9996cb
|
/066-Easy-PlusOne.py
|
a47f8c76129221ef0bb6423c54795e8811dc9444
|
[] |
no_license
|
mariobeaulieu/leetcode
|
447df79e4a0457014345f33cc545a63cb82c2066
|
abfbb97080158dc5532dc2bd2de7b9b449fdc7f5
|
refs/heads/master
| 2020-05-14T20:21:00.355757 | 2019-09-30T01:53:49 | 2019-09-30T01:53:49 | 181,942,959 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 518 |
py
|
#!/usr/bin/env python
import sys
from typing import List
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
ll=len(digits)
for i in range(ll):
ll2=ll-1-i
digits[ll2]+=1
if digits[ll2]<10:
break
digits[ll2]=0
if ll2==0:
digits.insert(0,1)
return digits
s = Solution()
myList = list(map(int, sys.argv[1:]))
print('Result of PlusOne of the last item of the list:'%s.plusOne(myList))
|
[
"[email protected]"
] | |
9c038c2afd1ea1459a7bf0d73d5d30b1fb93b2df
|
0a58c0f9537a8291445f753f759b3892efc3a5bc
|
/services/venv/bin/rst2odt_prepstyles.py
|
b2976f588cca641e3c412ea5062751d31eb62985
|
[] |
no_license
|
jasonshere/MAPS
|
3f34fe3ce505aa123e55b4eb8f367e39eca50dfd
|
7f9c9691d29d48a6503e61c9cb990bb5670884d4
|
refs/heads/master
| 2020-03-27T23:14:05.105897 | 2018-10-08T06:42:00 | 2018-10-08T06:42:00 | 147,303,805 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,766 |
py
|
#!/Users/JasonLee/Repository/IoT_Assignment_2/doctor_patient/services/venv/bin/python3
# $Id: rst2odt_prepstyles.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
#
# Author: Michael Schutte <[email protected]>
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print >> sys.stderr, __doc__
print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0]
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
# vim:tw=78:sw=4:sts=4:et:
|
[
"[email protected]"
] | |
2dd331830c8da0eca6ca46d05d214d1443501f2f
|
4ede275efc8bc9f9ef121dc37215d2f0d8453e36
|
/primer1.py
|
20a96af89513d28f097429ac8bc17040ee3ff8f6
|
[] |
no_license
|
shanthivimalanataraajan01/code
|
bfa8a441b0c360aebd02248ad4433cc21889c3d2
|
ea467ae1eefd68a5dceaa53aab7149d31bd5faf6
|
refs/heads/master
| 2020-04-15T05:01:03.625422 | 2019-05-17T09:35:45 | 2019-05-17T09:35:45 | 164,405,963 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 177 |
py
|
#vimala
#hi
m,n=map(int,input().split())
x=' '
for n in range(m+1,n):
if n>0:
for i in range(2,n):
if n%i==0:
break
else:
x=x+str(n)+' '
print(x.strip())
|
[
"[email protected]"
] | |
efae7520995f8ad3e0624364bbf8175761cae2fd
|
3c40387650df6d93c4b226d6eaed7b77ec97c1bd
|
/box-file-management/simple-sql.py
|
e44dd2cf532b890f01129c9c37544c5d47220368
|
[] |
no_license
|
stephenberndt/Data_Projects
|
fe04bb9c252de5afa59c2bba30d37cfba130b51b
|
953860500bf16c955d00cd3b5778b56e49d732f7
|
refs/heads/master
| 2020-03-12T17:35:35.922492 | 2018-04-23T20:09:19 | 2018-04-23T20:09:19 | 130,739,834 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,322 |
py
|
# -*- coding: utf-8 -*-
import psycopg2
from sqlalchemy import create_engine
import pandas as pd
import json
from configparser import ConfigParser
parser = ConfigParser()
parser.read('config.ini')
db_name = parser.get('Redshift', 'db_name')
host = parser.get('Redshift', 'host')
port = parser.get('Redshift', 'port')
username = parser.get('Redshift', 'username')
pwd = parser.get('Redshift', 'pwd')
conn_string = 'postgresql://' + username + ':' + pwd + '@' + host + ':' + port + '/' + db_name
engine = create_engine(conn_string).connect()
print('connected to Redshift')
# with open('sql-queries.json') as sql_file:
# sql_data = json.load(sql_file)
# for query_data in sql_data:
# query, name = query_data['sql'], query_data['name']
# print('fetching query results for ' + name)
# data_frame = pd.read_sql_query(query, engine)
# print(str(data_frame.shape[0]) + ' rows found')
# print('writing .csv')
# data_frame.to_csv(temp_file_dir + name, index=False)
# print('wrote ' + name)
# break
query = "select nspname from pg_namespace WHERE nspname NOT LIKE 'pg%%' AND nspname NOT IN ('logs', 'public', 'information_schema') ORDER BY nspname asc;"
data_frame = pd.read_sql_query(query, engine)
print(data_frame)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.