blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6538486fea182499046079d127ad288a683d5a91 | 5a45cf619c853e125a9b02b894dfd5235b21b95a | /monkey_study/study_python04_function.py | 0e0b8a417e6f6cf2fe3ab96e6a0e27770254f368 | [] | no_license | SmartComputerMonkey/python | 833554d2a6a4ae3b95a8a00baba062a77a7b7f78 | 37dab2648bd2e71f01672a5459f3f7faff28e22a | refs/heads/master | 2021-09-02T12:20:13.736401 | 2018-01-02T15:06:29 | 2018-01-02T15:06:29 | 116,026,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Monkey"
#集合操作
#作用
#1、去重,把一个列表变成集合,就自动去重了 无序
list_1 = [1,4,5,7,3,6,7,9]
list_1 = set(list_1)
print(list_1,type(list_1))
#2、关系测试,测试两组数据之前的交集、差集、并集等关系
list_2 = set([2,6,0,66,22,8,4])
print(list_1,list_2)
#取交集
print(list_1.intersection(list_2))
#取并集
print(list_1.union(list_2))
#取差集
print(list_1.difference(list_2))
print(list_2.difference(list_1))
#子集
list_3 = set([1,3,7])
print(list_3.issubset(list_1))
print(list_1.issubset(list_3))
#对称差集
print(list_1.symmric_difference(list_2))
print("-------------------------------")
#两集合没有交集 返回true
list_4 = set([5,6,8])
print(list_3.isdisjoint(list_4))
#函数基本语法及特性
#参数与局部变量
#返回值
#递归
#匿名函数
#函数式编程介绍
#高阶函数
#内置函数
| [
"[email protected]"
] | |
45cf4712437f3c195508edc962277b1397c02ff0 | 734b4605f0a8fc217e9d0eb9062e76d70ada61b2 | /noxfile.py | 260a1972b6558b609426f95693c250ddadec6433 | [
"BSD-3-Clause"
] | permissive | themperek/cocotb | ab77f6509ad1203b05675962435c562265384c1f | a6cf91bbc9c88323c3dd5bbceddc4abb5c650a27 | refs/heads/master | 2023-02-19T06:57:31.478370 | 2022-02-27T13:13:29 | 2022-02-27T13:13:29 | 28,962,065 | 0 | 1 | NOASSERTION | 2022-02-27T12:50:24 | 2015-01-08T11:01:13 | Python | UTF-8 | Python | false | false | 2,535 | py | # Copyright cocotb contributors
# Licensed under the Revised BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-3-Clause
import glob
import nox
test_deps = ["coverage", "pytest", "pytest-cov"]
dev_deps = [
"black",
"isort",
"mypy",
"pre-commit",
"nox",
"tox",
"flake8",
"clang-format",
]
@nox.session
def tests(session: nox.Session) -> None:
"""run cocotb regression suite"""
session.env["CFLAGS"] = "-Werror -Wno-deprecated-declarations -g --coverage"
session.env["COCOTB_LIBRARY_COVERAGE"] = "1"
session.env["CXXFLAGS"] = "-Werror"
session.env["LDFLAGS"] = "--coverage"
session.install(*test_deps)
session.install("-e", ".")
session.run("pytest")
session.run("make", "test", external=True)
coverage_files = glob.glob("**/.coverage.cocotb", recursive=True)
session.run("coverage", "combine", "--append", *coverage_files)
@nox.session
def docs(session: nox.Session) -> None:
"""invoke sphinx-build to build the HTML docs"""
session.install("-r", "documentation/requirements.txt")
session.install("-e", ".")
outdir = session.cache_dir / "docs_out"
session.run(
"sphinx-build", "./documentation/source", str(outdir), "--color", "-b", "html"
)
index = (outdir / "index.html").resolve().as_uri()
session.log(f"Documentation is available at {index}")
@nox.session
def docs_linkcheck(session: nox.Session) -> None:
"""invoke sphinx-build to linkcheck the docs"""
session.install("-r", "documentation/requirements.txt")
session.install("-e", ".")
outdir = session.cache_dir / "docs_out"
session.run(
"sphinx-build",
"./documentation/source",
str(outdir),
"--color",
"-b",
"linkcheck",
)
@nox.session
def docs_spelling(session: nox.Session) -> None:
"""invoke sphinx-build to spellcheck the docs"""
session.install("-r", "documentation/requirements.txt")
session.install("-e", ".")
outdir = session.cache_dir / "docs_out"
session.run(
"sphinx-build",
"./documentation/source",
str(outdir),
"--color",
"-b",
"spelling",
)
@nox.session(reuse_venv=True)
def dev(session: nox.Session) -> None:
"""Build a development environment and optionally run a command given as extra args"""
session.install(*test_deps)
session.install(*dev_deps)
session.install("-e", ".")
if session.posargs:
session.run(*session.posargs, external=True)
| [
"[email protected]"
] | |
4217f6b86b63e379d08ad2b72ab3612fb8b4a7f4 | e518f1493a2c5ea52dbe98d7de7e078459fcff30 | /Supervised_Learning/data_prep.py | 2f1f0b30418febfdbc14993f40e6c30ea2070b40 | [] | no_license | wegesdal/udacity-ml | aa63232ef1573cead7623b8c5b6684f9da5797bf | e875cb92130bebe1c02afd7f24da4ec63ae9269c | refs/heads/master | 2021-01-08T16:23:44.766653 | 2020-02-28T17:24:23 | 2020-02-28T17:24:23 | 242,078,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | import numpy as np
import pandas as pd
admissions = pd.read_csv('~/udacity-ml/Grad_Descent_Backprop/binary.csv')
# Make dummy variables for rank
data = pd.concat([admissions, pd.get_dummies(admissions['rank'], prefix='rank')], axis=1)
data = data.drop('rank', axis=1)
# Standarize features
for field in ['gre', 'gpa']:
mean, std = data[field].mean(), data[field].std()
data.loc[:,field] = (data[field]-mean)/std
# Split off random 10% of the data for testing
np.random.seed(21)
sample = np.random.choice(data.index, size=int(len(data)*0.9), replace=False)
data, test_data = data.iloc[sample], data.drop(sample)
print(data.shape)
# Split into features and targets
features, targets = data.drop('admit', axis=1), data['admit']
features_test, targets_test = test_data.drop('admit', axis=1), test_data['admit'] | [
"[email protected]"
] | |
6aba9aaca7ac55616fed63285f0ad9fca9ee20de | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/CodeJamData/11/43/8.py | 1daa4631c758b6fd37cdadd9427a6c8fdeb77665 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | import sys
import fractions
infile = sys.stdin
def genprimes():
composites = {}
candidate = 2
while True:
if candidate not in composites:
yield candidate
composites[candidate*candidate] = [candidate]
else:
for p in composites[candidate]:
composites.setdefault(p + candidate, []).append(p)
del composites[candidate]
candidate += 1
powers = set()
pmax = 1e12
for p in genprimes():
power = p*p
if power>pmax: break
while power<pmax:
powers.add(power)
power *= p
def get_spread(N):
if N==1: return 0
# 1 plus number<=N
return 1 + sum(1 for p in powers if N>=p)
T = int(infile.readline())
for i in xrange(T):
N = int(infile.readline())
print("Case #%d: %d" % (i+1, get_spread(N)))
| [
"[email protected]"
] | |
8e6e213730e86c9fdb515f40d12b56ea422c269c | a2b3184d40a6a9b519184efd5e9a81e26057e9a0 | /my_stuff/download_Example.py | ad5e69a1aa3d0fbd5767273687b455fcf6fb5704 | [] | no_license | plevp/my_first_repo | 823c65f0fde9b86dd5b400a1831d21d85ea0d6d0 | 470dfdc7507031d745a732f467bd2aff64ed4588 | refs/heads/master | 2020-04-16T06:34:53.691638 | 2017-02-18T22:15:54 | 2017-02-18T22:15:54 | 21,724,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | import urllib2
import sys
import os
import BeautifulSoup
from subprocess import call
try:
url = "http://web.eecs.umich.edu/~radev/coursera-slides"
s = urllib2.urlopen(url)
soup = BeautifulSoup.BeautifulSoup(s)
print soup.prettify();
ass = soup.findAll("a")
print len(ass)
for item in ass:
print item.text
print url+ "/"+ item['href']
call(["wget", "-P", "all_slides", url+"/"+ item['href']])
except:
print "Error in connection. Please check URL again. "
## http://web.eecs.umich.edu/~radev/coursera-slides/
| [
"[email protected]"
] | |
6aff66d18128171ac9f85987412455f9d5b05661 | ea4fad285f1dcad4c48bb888df5e4b6afb533b7b | /Cap12_modulos/01_datetime/script7.py | 9abdd0802a1df451df0294cf8af6877965bd0cde | [] | no_license | frclasso/FCGurus_python1_turma_novembro2019 | 0856cb8eaa25ec20ae207960666f1c12f96160e9 | 5f97eb4b40d115d4c9846be91448e9183823bea8 | refs/heads/master | 2021-02-16T20:38:27.969672 | 2020-06-18T00:17:51 | 2020-06-18T00:17:51 | 245,042,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from datetime import datetime, timedelta
# situacao oo D-1
now = datetime.now().date()
print(now)
delta = timedelta(days=1)
ontem = now - delta
print(ontem)
data_futura = timedelta(days=7)
print(f'Daqui uma semana: {now + data_futura}') | [
"[email protected]"
] | |
2f15ae48032d0568fa73e6ec1d2b3b2099398289 | da35eb3d2cf487e2d726246f523b04d55c323fb4 | /vsm/ui/views/__init__.py | e64e76d922ec0b19a7ef9d1062d4a186bcb0ca49 | [
"MIT"
] | permissive | rabramley/volca_sample_manager | 89282d042133d063f579a6b56d36441d9444feef | 6c43371f8a7087c620e4b89de0eaa84d5a6ee904 | refs/heads/master | 2022-11-09T16:52:12.940718 | 2020-06-22T18:24:18 | 2020-06-22T18:24:18 | 255,284,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | __all__ = [
"home",
"sample",
"bank",
] | [
"[email protected]"
] | |
e80b5c44eb6124ea2d6363b55ff01e9442c18c23 | 0806547e7c839c16cc674d22d26a0b7d635c5bc7 | /motioncontrol/test.py | 6f4cd3f8df6e579ccf31ddba99d6e38b14cebf7b | [] | no_license | vinx-2105/service_bot | 957643f9a1c98f407aebd304baa5c1c15df0f643 | a5512ac23abc3de5c1621dfb9de4eaf26bd092c9 | refs/heads/master | 2020-04-08T00:36:18.274938 | 2019-03-09T16:20:44 | 2019-03-09T16:20:44 | 158,858,221 | 3 | 0 | null | 2018-12-01T16:40:38 | 2018-11-23T16:54:32 | Python | UTF-8 | Python | false | false | 850 | py | import sensorcontrol as sc
import motorcontrol2 as mc
from time import sleep
s1=3
s2=5
s3=7
s4=11 #near=12
s5=13
m11=29
m12=33
m21=35
m22=37
motors=mc.MotorControl(m11,m12,m21,m22)
motors.stop()
#motors.moveForward()
#sleep(5)
#motors.turnRightHard()
motors.turnLeftHard()
#motors.moveForward()
#motors.moveBackward()
sleep(5)
#motors.moveForward()
#sleep(0.2)
#i=1
#sum=0
#while i<=40:
# motors.moveForward()
# sleep(0.01)
# motors.stop()
# sleep(0.04)
#if pos==10 or pos==100:
# sum=sum+1
# i=i+1
#motors.stop()
#if end point then break
# elif position==2 :
# self.motors.turnLeftHard
# elif position==1 :
# self.motors.turnLeft
# elif position==0 :
# self.motors.moveForward
# elif position==-2 :
# self.motors.turnRightHard
# elif position==-1 :
# self.motors.turnRight
# sleep(.1)
# self.motors.stop();
| [
"[email protected]"
] | |
09997bd5dea4b9c39aa7c798831e80d5613be12e | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/bure.py | 03da775208dc23e73d5b48559ccb7fa18d6d8e33 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 101 | py | ii = [('LyelCPG.py', 1), ('DibdTRL2.py', 2), ('ClarGE3.py', 1), ('DibdTRL.py', 3), ('EvarJSP.py', 1)] | [
"[email protected]"
] | |
c49ae8d8c49637e49f65333414193c822ec7852d | 93ae61e5f79ff927d94f41c7700c0dc0dff89e6a | /WaveletStatistics/WaveletStatistics.py | f252b8b050e7f18f76499e064ae2265745d68e21 | [] | no_license | mohanakannan9/hcp_qc_tools | 9449ecd61b5bf04bf094028021049dff49c1d96c | 18629756f2076320ebb75efa6c46446298286ecc | refs/heads/master | 2021-01-18T14:23:13.968763 | 2015-06-23T04:37:15 | 2015-06-23T04:37:15 | 37,896,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,352 | py | '''
Created on Jun 19, 2012
@author: Tony
'''
import os
import csv
import time
import math
import numpy
import pywt
import socket
import argparse
import nipy as nip
import nibabel as nib
from scipy import stats
import matplotlib.cm as cm
import matplotlib.pyplot as pyplot
import matplotlib.animation as animation
#import matplotlib.image as pyimg
#import pymorph as pym
print "Running on " + socket.gethostname()
#===============================================================================
# PARSE INPUT
#===============================================================================
parser = argparse.ArgumentParser(description="Test program to do wavelet statistics on a nifit image...")
parser.add_argument("-D", "--ImageDir", dest="niftiDir", default=os.getcwd(), type=str, help="specify nifti directory...")
parser.add_argument("-N", "--Image", dest="niftiFile", type=str, help="specify nifti file...")
parser.add_argument("-O", "--OutputDir", dest="outputDir", type=str, help="specify where to write the output files...")
parser.add_argument("-F", "--FractionElim", dest="fracElimSlices", type=float, default=0.19, help="specify what fraction of top and bottom slices to eliminate...")
parser.add_argument("-U", "--UseFractionElim", dest="fracElimSlicesUse", type=str, default="y", help="specify if fraction of slices to be eliminated...")
InputArgs = parser.parse_args()
niftiDir = InputArgs.niftiDir
niftiFile = InputArgs.niftiFile
outputDir = InputArgs.outputDir
fracElimSlices = InputArgs.fracElimSlices
fracElimSlicesUse = InputArgs.fracElimSlicesUse
#===============================================================================
sTime = time.time()
printMasks = False
showAni = False
showSNR = False
printStats = True
osName = os.name
hostName = socket.gethostname()
numpy.seterr(divide = 'ignore')
waveName = 'coif1';
waveLevel = 5
#===============================================================================
# FUNCTIONS
#===============================================================================
def fPrintWaveletStats( inputMat, inputType, inputSubject, outputDir ):
StatsListSz = inputMat.shape
headerStr = ['Volume', 'cA', 'cH Scale 1', 'cV Scale 1', 'cD Scale 1', 'cH Scale 2', 'cV Scale 2', 'cD Scale 2', 'cH Scale 3', 'cV Scale 3', 'cD Scale 3', 'cH Scale 4', 'cV Scale 4', 'cD Scale 4', 'cH Scale 5', 'cV Scale 5', 'cD Scale 5']
fileName = inputSubject + '_WaveletKurt' +inputType+ '_' +str(len(numpyNipyDataSz))+ '.txt'
if not os.path.exists(outputDir):
os.makedirs(outputDir)
fileStatsId = csv.writer(open(outputDir +os.sep+ fileName, 'wb'), delimiter='\t')
fileStatsId.writerow(headerStr)
if len(StatsListSz) == 2:
fileStatsId.writerow([1, inputMat[0,0], inputMat[1,1], inputMat[2,1], inputMat[3,1], inputMat[1,2], inputMat[2,2], inputMat[3,2], inputMat[1,3], inputMat[2,3], inputMat[3,3], inputMat[1,4], inputMat[2,4], inputMat[3,4], inputMat[1,5], inputMat[2,5], inputMat[3,5]])
else:
for j in xrange(0, StatsListSz[2]):
fileStatsId.writerow([j, inputMat[0,0,j], inputMat[1,1,j], inputMat[2,1,j], inputMat[3,1,j], inputMat[1,2,j], inputMat[2,2,j], inputMat[3,2,j], inputMat[1,3,j], inputMat[2,3,j], inputMat[3,3,j], inputMat[1,4,j], inputMat[2,4,j], inputMat[3,4,j], inputMat[1,5,j], inputMat[2,5,j], inputMat[3,5,j]])
#===============================================================================
def fStripSession( inputName ):
# check for session on input subject string...
if (inputName.find("_strc") != -1) or (inputName.find("_diff") != -1) or (inputName.find("_fnc") != -1) or (inputName.find("_xtr") != -1):
# strip out the session stuff. Total hack with the index < stuff...
sessionIdx = inputName.index("_")
inputSubject = inputName[0:sessionIdx]
try:
fileIdx = inputName.index(".")
# ACK! Hard coding...
if (sessionIdx < 8):
outputName = inputSubject +'.'+ inputName[fileIdx:]
except:
sessionIdxEnd = inputName[sessionIdx+1:].index("_")
inputName = inputName[sessionIdxEnd+sessionIdx+2:]
outputName = inputSubject +'_'+ inputName
else:
outputName = inputName
return outputName
#===============================================================================
def fStripExtension( inputName ):
inputNameNoExtension, inputNameExtension = os.path.splitext(inputName)
if inputNameExtension == '.gz':
inputNameNoExtension, inputNameExtension = os.path.splitext(inputNameNoExtension)
return inputNameNoExtension
else:
return inputNameNoExtension
#===============================================================================
def fPrintVec( inputVec, nSlices, nFrames, inputSubject, outputDir ):
StatsListLen = len(inputVec)
headerStr = ['Volume', 'Slice', 'contrastRMS']
fileName = inputSubject + '_contrastRMS_' +str(len(numpyNipyDataSz))+ '.txt'
if not os.path.exists(outputDir):
os.makedirs(outputDir)
fileStatsId = csv.writer(open(outputDir +os.sep+ fileName, 'wb'), delimiter='\t')
fileStatsId.writerow(headerStr)
linIdx = 0
for i in xrange(0, nFrames):
for j in xrange(0, nSlices):
fileStatsId.writerow([(i+1), (j+1), inputVec[linIdx]])
linIdx += 1
#===============================================================================
inputFileName = niftiFile
niftiFile = niftiDir +os.sep+ niftiFile
print "File: " + niftiFile
nimBabel = nib.load(niftiFile)
nimBabelAffine = nimBabel.get_affine()
nimBabelScale = [nimBabelAffine[0,0], nimBabelAffine[1,1], nimBabelAffine[2,2]]
numpyNipyData = nip.load_image(niftiFile)
numpyNipyData = numpy.float32(numpy.asarray(numpyNipyData))
numpyNipyDataSz = numpy.asarray(numpyNipyData.shape)
#set up bool for keeping middle slices...
nElimSlices = round(numpyNipyDataSz[2] * fracElimSlices)
boolElimSlices = numpy.zeros(numpyNipyDataSz[2], dtype=numpy.int)
if (fracElimSlicesUse == 'y'):
boolElimSlices[nElimSlices:(numpyNipyDataSz[2] - nElimSlices)] = 1
else:
boolElimSlices[0:numpyNipyDataSz[2]] = 1
ElimSlicesIdx = numpy.nonzero(boolElimSlices)
print "Eliminating " +str( round(numpyNipyDataSz[2] - sum(boolElimSlices)) )+ " of " +str(numpyNipyDataSz[2])+ " slices ..."
kurtList = list()
kurtMeanList = list()
kurtStdList = list()
contrastList = list()
if len(numpyNipyDataSz) == 4:
kurtMatFunctionalMean = numpy.zeros([4, waveLevel+1, numpyNipyDataSz[3]])
kurtMatFunctionalStd = numpy.zeros([4, waveLevel+1, numpyNipyDataSz[3]])
kurtMatFunctionalMax = numpy.zeros([4, waveLevel+1, numpyNipyDataSz[3]])
kurtMatFunctionalMin = numpy.zeros([4, waveLevel+1, numpyNipyDataSz[3]])
for h in xrange(0, numpyNipyDataSz[3]):
currVol = numpyNipyData[:,:,:,h]
kurtMat = numpy.zeros([numpyNipyDataSz[2], 4, waveLevel+1])
for i in xrange(0, numpyNipyDataSz[2]):
numpyData = currVol[:,:,i]
# frame by frame variation...
currRMS = math.sqrt( numpy.sum((numpy.mean(numpyData) - numpyData.ravel())**2 ) / (numpyNipyDataSz[0] * numpyNipyDataSz[1]) )
contrastList.append(currRMS)
#[phi, psi, x] = pywt.Wavelet('db2').wavefun(level=4)
#cA, (cH, cV, cD) = pywt.dwt2(numpyData, 'db3')
coeffs = pywt.wavedec2(numpyData, waveName, level=waveLevel)
for j in xrange(0, waveLevel+1):
currCoeffs = numpy.asarray(coeffs[j])
#===================================================================
# low pass coeffs
#===================================================================
if len(currCoeffs.shape) == 2:
n, min_max, mean, var, skew, kurt = stats.describe(currCoeffs.ravel())
kurtList.append(kurt)
kurtMat[i,0,j] = kurt
#===================================================================
# HVD coeffs
#===================================================================
elif len(currCoeffs.shape) == 3:
for k in xrange(0, 3):
hvdCoeffs = currCoeffs[k,:]
n, min_max, mean, var, skew, kurt = stats.describe(hvdCoeffs.ravel())
kurtList.append(kurt)
kurtMat[i,k+1,j] = kurt
kurtMeanList.append(numpy.mean(kurtList))
kurtStdList.append(numpy.std(kurtList))
# kurtMatMean = numpy.mean(kurtMat, axis=0)
# kurtMatStd = numpy.std(kurtMat, axis=0)
# kurtMatMax = numpy.max(kurtMat, axis=0)
# kurtMatMin = numpy.min(kurtMat, axis=0)
kurtMatMean = numpy.mean(kurtMat[ElimSlicesIdx[0],:,:], axis=0)
kurtMatStd = numpy.std(kurtMat[ElimSlicesIdx[0],:,:], axis=0)
kurtMatMax = numpy.max(kurtMat[ElimSlicesIdx[0],:,:], axis=0)
kurtMatMin = numpy.min(kurtMat[ElimSlicesIdx[0],:,:], axis=0)
kurtMatFunctionalMean[:,:,h] = kurtMatMean
kurtMatFunctionalStd[:,:,h] = kurtMatStd
kurtMatFunctionalMax[:,:,h] = kurtMatMax
kurtMatFunctionalMin[:,:,h] = kurtMatMin
kurtMatMean = kurtMatFunctionalMean
kurtMatStd = kurtMatFunctionalStd
kurtMatMax = kurtMatFunctionalMax
kurtMatMin = kurtMatFunctionalMin
elif len(numpyNipyDataSz) == 3:
currVol = numpyNipyData
kurtMat = numpy.zeros([numpyNipyDataSz[2], 4, waveLevel+1])
for i in xrange(0, numpyNipyDataSz[2]):
numpyData = currVol[:,:,i]
currRMS = math.sqrt( numpy.sum((numpy.mean(numpyData) - numpyData.ravel())**2 ) / (numpyNipyDataSz[0] * numpyNipyDataSz[1]) )
contrastList.append(currRMS)
#print numpyData.shape()
#cA, (cH, cV, cD) = pywt.dwt2(numpyData, 'db3')
coeffs = pywt.wavedec2(numpyData, waveName, level=waveLevel)
# print [coeffs[1,0]]
for j in xrange(0, waveLevel+1):
currCoeffs = numpy.asarray(coeffs[j])
# tmpCoeffs = [[[currCoeffs[0]], [currCoeffs[1,0]], [currCoeffs[1,1]], [currCoeffs[1,2]]]]
#===================================================================
# low pass coeffs
#===================================================================
if len(currCoeffs.shape) == 2:
n, min_max, mean, var, skew, kurt = stats.describe(currCoeffs.ravel())
kurtList.append(kurt)
kurtMat[i,0,j] = kurt
# pyplot.imshow(currCoeffs)
# pyplot.show()
#===================================================================
# HVD coeffs
#===================================================================
elif len(currCoeffs.shape) == 3:
for k in xrange(0, 3):
hvdCoeffs = currCoeffs[k,:]
n, min_max, mean, var, skew, kurt = stats.describe(hvdCoeffs.ravel())
kurtList.append(kurt)
kurtMat[i,k+1,j] = kurt
# pyplot.imshow(hvdCoeffs, interpolation='none', cmap=cm.gray)
# pyplot.show()
kurtMeanList.append(numpy.mean(kurtList))
kurtStdList.append(numpy.std(kurtList))
# kurtMatMeanNS = numpy.mean(kurtMat, axis=0)
# kurtMatStd = numpy.std(kurtMat, axis=0)
# kurtMatMax = numpy.max(kurtMat, axis=0)
# kurtMatMin = numpy.min(kurtMat, axis=0)
kurtMatMean = numpy.mean(kurtMat[ElimSlicesIdx[0],:,:], axis=0)
kurtMatStd = numpy.std(kurtMat[ElimSlicesIdx[0],:,:], axis=0)
kurtMatMax = numpy.max(kurtMat[ElimSlicesIdx[0],:,:], axis=0)
kurtMatMin = numpy.min(kurtMat[ElimSlicesIdx[0],:,:], axis=0)
#pyplot.imshow(kurtMat[:,:,0], interpolation='none', cmap=cm.gray)
#pyplot.show()
tTime = time.time() - sTime
print("Duration: %s" % tTime)
if printStats:
niftiFileName = fStripExtension( inputFileName )
outputName = fStripSession( niftiFileName )
fPrintWaveletStats( kurtMatMean, 'Mean', outputName, outputDir )
fPrintWaveletStats( kurtMatStd, 'STD', outputName, outputDir )
fPrintWaveletStats( kurtMatMax, 'Max', outputName, outputDir )
fPrintWaveletStats( kurtMatMin, 'Min', outputName, outputDir )
if len(numpyNipyDataSz) == 3:
fPrintVec( contrastList, numpyNipyDataSz[2], 1, outputName, outputDir )
else:
fPrintVec( contrastList, numpyNipyDataSz[2], numpyNipyDataSz[3], outputName, outputDir )
#pyplot.figure()
#if len(numpyNipyDataSz) == 4:
## pyplot.plot(kurtMeanList, linestyle='--', marker='o', color='r', markersize=5)
# pyplot.errorbar(xrange(0,len(kurtMeanList)), kurtMeanList, yerr=kurtStdList, linestyle='--', marker='o', color='r', markersize=5)
#else:
# pyplot.hist(kurtList, bins=100)
## pyplot.plot(kurtMeanList, linestyle='--', marker='o', color='r', markersize=5)
# pyplot.figure()
# pyplot.plot(contrastList, linestyle='--', marker='o', color='r', markersize=5)
#pyplot.show()
| [
"[email protected]"
] | |
ba180a8e28a502a2bcab2b680e0e5285541eb57a | f9970fd94692bfd7ec392f399dd37f998df3e96c | /lstm_example.py | af28af4699da0e0023ffbe65babc8aca8204d63b | [] | no_license | nickc92/RNNBaroque | 3b7805a5f9bd6f951483d0c61aec15f9337d5586 | a051ee2bb49d7ba9f6ce91cdb10d234a6aad1770 | refs/heads/master | 2021-01-01T05:44:38.675654 | 2016-04-16T00:59:53 | 2016-04-16T00:59:53 | 56,019,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,027 | py | '''
Recurrent network example. Trains a 2 layered LSTM network to learn
text from a user-provided input file. The network can then be used to generate
text using a short string as seed (refer to the variable generation_phrase).
This example is partly based on Andrej Karpathy's blog
(http://karpathy.github.io/2015/05/21/rnn-effectiveness/)
and a similar example in the Keras package (keras.io).
The inputs to the network are batches of sequences of characters and the corresponding
targets are the characters in the text shifted to the right by one.
Assuming a sequence length of 5, a training point for a text file
"The quick brown fox jumps over the lazy dog" would be
INPUT : 'T','h','e',' ','q'
OUTPUT: 'u'
The loss function compares (via categorical crossentropy) the prediction
with the output/target.
Also included is a function to generate text using the RNN given the first
character.
About 20 or so epochs are necessary to generate text that "makes sense".
Written by @keskarnitish
Pre-processing of text uses snippets of Karpathy's code (BSD License)
'''
from __future__ import print_function
import numpy as np
import theano
import theano.tensor as T
import lasagne, time
import urllib2 #For downloading the sample text file. You won't need this if you are providing your own file.
try:
in_text = urllib2.urlopen('https://s3.amazonaws.com/text-datasets/nietzsche.txt').read()
#You can also use your own file
#The file must be a simple text file.
#Simply edit the file name below and uncomment the line.
#in_text = open('your_file.txt', 'r').read()
in_text = in_text.decode("utf-8-sig").encode("utf-8")
except Exception as e:
print("Please verify the location of the input file/URL.")
print("A sample txt file can be downloaded from https://s3.amazonaws.com/text-datasets/nietzsche.txt")
raise IOError('Unable to Read Text')
generation_phrase = "The quick brown fox jumps" #This phrase will be used as seed to generate text.
#This snippet loads the text file and creates dictionaries to
#encode characters into a vector-space representation and vice-versa.
chars = list(set(in_text))
data_size, vocab_size = len(in_text), len(chars)
char_to_ix = { ch:i for i,ch in enumerate(chars) }
ix_to_char = { i:ch for i,ch in enumerate(chars) }
#Lasagne Seed for Reproducibility
lasagne.random.set_rng(np.random.RandomState(1))
# Sequence Length
SEQ_LENGTH = 20
# Number of units in the two hidden (LSTM) layers
N_HIDDEN = 512
# Optimization learning rate
LEARNING_RATE = .01
# All gradients above this will be clipped
GRAD_CLIP = 100
# How often should we check the output?
PRINT_FREQ = 1000
# Number of epochs to train the net
NUM_EPOCHS = 50
# Batch Size
BATCH_SIZE = 128
def gen_data(p, batch_size = BATCH_SIZE, data=in_text, return_target=True):
'''
This function produces a semi-redundant batch of training samples from the location 'p' in the provided string (data).
For instance, assuming SEQ_LENGTH = 5 and p=0, the function would create batches of
5 characters of the string (starting from the 0th character and stepping by 1 for each semi-redundant batch)
as the input and the next character as the target.
To make this clear, let us look at a concrete example. Assume that SEQ_LENGTH = 5, p = 0 and BATCH_SIZE = 2
If the input string was "The quick brown fox jumps over the lazy dog.",
For the first data point,
x (the inputs to the neural network) would correspond to the encoding of 'T','h','e',' ','q'
y (the targets of the neural network) would be the encoding of 'u'
For the second point,
x (the inputs to the neural network) would correspond to the encoding of 'h','e',' ','q', 'u'
y (the targets of the neural network) would be the encoding of 'i'
The data points are then stacked (into a three-dimensional tensor of size (batch_size,SEQ_LENGTH,vocab_size))
and returned.
Notice that there is overlap of characters between the batches (hence the name, semi-redundant batch).
'''
x = np.zeros((batch_size,SEQ_LENGTH,vocab_size))
y = np.zeros(batch_size)
for n in range(batch_size):
ptr = n
for i in range(SEQ_LENGTH):
x[n,i,char_to_ix[data[p+ptr+i]]] = 1.
if(return_target):
y[n] = char_to_ix[data[p+ptr+SEQ_LENGTH]]
return x, np.array(y,dtype='int32')
def main(num_epochs=NUM_EPOCHS):
print("Building network ...")
# First, we build the network, starting with an input layer
# Recurrent layers expect input of shape
# (batch size, SEQ_LENGTH, num_features)
l_in = lasagne.layers.InputLayer(shape=(None, None, vocab_size))
# We now build the LSTM layer which takes l_in as the input layer
# We clip the gradients at GRAD_CLIP to prevent the problem of exploding gradients.
l_forward_1 = lasagne.layers.LSTMLayer(
l_in, N_HIDDEN, grad_clipping=GRAD_CLIP,
nonlinearity=lasagne.nonlinearities.tanh)
l_forward_2 = lasagne.layers.LSTMLayer(
l_forward_1, N_HIDDEN, grad_clipping=GRAD_CLIP,
nonlinearity=lasagne.nonlinearities.tanh)
# The l_forward layer creates an output of dimension (batch_size, SEQ_LENGTH, N_HIDDEN)
# Since we are only interested in the final prediction, we isolate that quantity and feed it to the next layer.
# The output of the sliced layer will then be of size (batch_size, N_HIDDEN)
l_forward_slice = lasagne.layers.SliceLayer(l_forward_2, -1, 1)
# The sliced output is then passed through the softmax nonlinearity to create probability distribution of the prediction
# The output of this stage is (batch_size, vocab_size)
l_out = lasagne.layers.DenseLayer(l_forward_slice, num_units=vocab_size, W = lasagne.init.Normal(), nonlinearity=lasagne.nonlinearities.softmax)
# Theano tensor for the targets
target_values = T.ivector('target_output')
# lasagne.layers.get_output produces a variable for the output of the net
network_output = lasagne.layers.get_output(l_out)
# The loss function is calculated as the mean of the (categorical) cross-entropy between the prediction and target.
cost = T.nnet.categorical_crossentropy(network_output,target_values).mean()
# Retrieve all parameters from the network
all_params = lasagne.layers.get_all_params(l_out,trainable=True)
# Compute AdaGrad updates for training
print("Computing updates ...")
updates = lasagne.updates.adagrad(cost, all_params, LEARNING_RATE)
# Theano functions for training and computing cost
print("Compiling functions ...")
train = theano.function([l_in.input_var, target_values], cost, updates=updates, allow_input_downcast=True)
compute_cost = theano.function([l_in.input_var, target_values], cost, allow_input_downcast=True)
# In order to generate text from the network, we need the probability distribution of the next character given
# the state of the network and the input (a seed).
# In order to produce the probability distribution of the prediction, we compile a function called probs.
probs = theano.function([l_in.input_var],network_output,allow_input_downcast=True)
# The next function generates text given a phrase of length at least SEQ_LENGTH.
# The phrase is set using the variable generation_phrase.
# The optional input "N" is used to set the number of characters of text to predict.
def try_it_out(N=200):
'''
This function uses the user-provided string "generation_phrase" and current state of the RNN generate text.
The function works in three steps:
1. It converts the string set in "generation_phrase" (which must be over SEQ_LENGTH characters long)
to encoded format. We use the gen_data function for this. By providing the string and asking for a single batch,
we are converting the first SEQ_LENGTH characters into encoded form.
2. We then use the LSTM to predict the next character and store it in a (dynamic) list sample_ix. This is done by using the 'probs'
function which was compiled above. Simply put, given the output, we compute the probabilities of the target and pick the one
with the highest predicted probability.
3. Once this character has been predicted, we construct a new sequence using all but first characters of the
provided string and the predicted character. This sequence is then used to generate yet another character.
This process continues for "N" characters.
To make this clear, let us again look at a concrete example.
Assume that SEQ_LENGTH = 5 and generation_phrase = "The quick brown fox jumps".
We initially encode the first 5 characters ('T','h','e',' ','q'). The next character is then predicted (as explained in step 2).
Assume that this character was 'J'. We then construct a new sequence using the last 4 (=SEQ_LENGTH-1) characters of the previous
sequence ('h','e',' ','q') , and the predicted letter 'J'. This new sequence is then used to compute the next character and
the process continues.
'''
assert(len(generation_phrase)>=SEQ_LENGTH)
sample_ix = []
x,_ = gen_data(len(generation_phrase)-SEQ_LENGTH, 1, generation_phrase,0)
for i in range(N):
# Pick the character that got assigned the highest probability
ix = np.argmax(probs(x).ravel())
# Alternatively, to sample from the distribution instead:
# ix = np.random.choice(np.arange(vocab_size), p=probs(x).ravel())
sample_ix.append(ix)
x[:,0:SEQ_LENGTH-1,:] = x[:,1:,:]
x[:,SEQ_LENGTH-1,:] = 0
x[0,SEQ_LENGTH-1,sample_ix[-1]] = 1.
random_snippet = generation_phrase + ''.join(ix_to_char[ix] for ix in sample_ix)
print("----\n %s \n----" % random_snippet)
print("Training ...")
print("Seed used for text generation is: " + generation_phrase)
p = 0
totT = totN = 0.0
try:
for it in xrange(data_size * num_epochs / BATCH_SIZE):
try_it_out() # Generate text using the p^th character as the start.
avg_cost = 0;
for ll in range(PRINT_FREQ):
x,y = gen_data(p)
print(p, ll, PRINT_FREQ)
p += SEQ_LENGTH + BATCH_SIZE - 1
if(p+BATCH_SIZE+SEQ_LENGTH >= data_size):
print('Carriage Return')
p = 0;
t1 = time.time()
avg_cost += train(x, y)
t2 = time.time()
totT += t2 - t1
totN += 1
print('avg train time:', totT / totN)
print("Epoch {} average loss = {}".format(it*1.0*PRINT_FREQ/data_size*BATCH_SIZE, avg_cost / PRINT_FREQ))
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
bec52e31f3f9e2329eccde05952eb6b19156e2d2 | 37e87b3d5e1ee9009f0ea0671bc0c6edf0e233b7 | /1754.py | 745bbc7fc7f3f3e7d42efdd00102de494df876ed | [] | no_license | Jane11111/Leetcode2021 | d9f4987792938597bf89ff72ba6bbcb4a3f9d081 | a95b871578aae0103066962c33b8c0f4ec22d0f2 | refs/heads/master | 2023-07-14T21:29:41.196752 | 2021-08-23T03:28:02 | 2021-08-23T03:28:02 | 344,804,297 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | # -*- coding: utf-8 -*-
# @Time : 2021-08-02 10:57
# @Author : zxl
# @FileName: 1754.py
class Solution:
def recSolve(self,word1,word2, dic):
if word1 in dic and word2 in dic[word1]:
return dic[word1][word2]
if len(word1)>len(word2):
return self.recSolve(word2,word1,dic)
if word1 not in dic:
dic[word1] = {}
if len(word1) == 0 and len(word2) == 0:
return ''
if len(word1) == 0:
s = word2
elif len(word2)== 0:
s = word1
else:
if word1[0]>word2[0]:
s = word1[0]+self.recSolve(word1[1:],word2,dic)
elif word1[0]<word2[0]:
s = word2[0]+self.recSolve(word1,word2[1:],dic)
else:
s1 = word1[0]+self.recSolve(word1[1:],word2,dic)
s2 = word2[0]+self.recSolve(word1,word2[1:],dic)
s = max(s1,s2)
dic[word1][word2] = s
return dic[word1][word2]
def largestMerge(self, word1: str, word2: str) -> str:
ans = self.recSolve(word1,word2, {})
return ans
| [
"[email protected]"
] | |
9cd58d170c9cab86fd373a9198bfa34b390ef2b4 | b049ec2f36bb63537ca5b73717635f2dc0126cda | /082_Remove_Duplicates_from_Sorted_List_II/082_3.py | 060c0985c2219257f846524bcbf2f4b26f2269b7 | [] | no_license | massquantity/LeetCode | 01d29fe8922b7545140015efbda0f71b04043124 | e298cdab86a4de81bf5a44579c54b5bc7bcb1618 | refs/heads/master | 2021-07-20T07:18:38.109707 | 2021-07-03T04:11:15 | 2021-07-03T04:11:15 | 135,297,184 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | class Solution:
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None:
return head
dummy = ListNode(-1)
prev, curr = dummy, head
dummy.next = head
while curr and curr.next:
while curr.next and curr.val == curr.next.val:
curr = curr.next
if prev.next == curr:
prev = prev.next
else:
prev.next = curr.next
curr = curr.next
return dummy.next | [
"[email protected]"
] | |
8c5f1d9b0745df1b33cb6e671fe0a3981a7cb2b4 | 7f5a40df7ff2c27511c76ae0beb163032cf51e0a | /verificadorPreco/asgi.py | f98af017258f143d7d9af9ecb01178077ef2b819 | [] | no_license | GabsZero/verificador-precos | 951218af29be53fbc921871485a2661de96a37ed | 3ec890034040604eb26d6c0f362e530b8628cde3 | refs/heads/master | 2023-06-19T05:17:27.135758 | 2021-07-15T01:57:25 | 2021-07-15T01:57:25 | 377,661,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
ASGI config for verificadorPreco project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'verificadorPreco.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
570459047883049dee58a550be237d8fe4f4553c | 0ca559977e4956d568fd935f36eab4b80c1f43af | /ACILoginTest.py | 8c4e04fe0e10ea2f1bff3e78f7b582404fd1350b | [] | no_license | andersnlindstrom/hello-world | aca393f64770982a297e3d3ca0cc637e9c8b4aa2 | 37fe2b4f0ff7d0764d097a8a88d3ae6d53eaed65 | refs/heads/master | 2020-05-04T10:45:26.281476 | 2019-04-05T12:36:26 | 2019-04-05T12:36:26 | 179,094,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | import requests
url = "https://apic/api/aaaLogin.json"
payload = "{\n \"aaaUser\":{\n\t\"attributes\":{\n\t\t\"name\":\"admin\",\n\t\t\"pwd\":\"cisco123\"\n\t}\n }\n}"
headers = {
'cache-control': "no-cache",
'postman-token': "68f4acae-0950-10e4-5c8a-7687bc2c3c1d"
}
try:
response = requests.request("POST", url, data=payload, headers=headers)
print(response.text)
except (requests.exceptions.ConnectionError) as err_msg:
print "Cisco ACI authentication failed. Ensure yuor controller has been provisioned"
print "The Python error message is", err_msg
| [
"[email protected]"
] | |
ed87c08e4fe58665c25ec1881f82a5337ac1ea37 | 2d227925231be797cc78b644358ecd3adf00fba7 | /ce/c006.py | 1d4b91367360be704c383fd204a29c486ecb8dc8 | [] | no_license | egalli64/pythonesque | 6bb107189d4556d832175d41366ea0b18ed6ea1d | 154042c5ae5cf43a0ae2c03d509fc48d1dc19eb8 | refs/heads/master | 2023-07-07T05:50:14.711023 | 2023-07-01T10:52:24 | 2023-07-01T10:52:24 | 53,720,525 | 21 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | """
CodeEval Longest Common Subsequence
author: Manny [email protected]
info: http://thisthread.blogspot.com/2017/02/codeeval-longest-common-subsequence.html
https://www.codeeval.com/open_challenges/6/
"""
import sys
def solution(line):
ver, hor = line.split(';')
t = [[0 for j in range(len(hor)+1)] for i in range(len(ver)+1)]
for i in range(len(ver)):
for j in range(len(hor)):
t[i + 1][j + 1] = t[i][j] + 1 if ver[i] == hor[j] else max(t[i+1][j], t[i][j+1])
i, j = len(ver), len(hor)
result = [None] * t[i][j]
cur = -1
while i > 0 and j > 0:
if t[i][j] == t[i-1][j]:
i -= 1
elif t[i][j] == t[i][j-1]:
j -= 1
else:
result[cur] = ver[i-1]
cur -= 1
i -= 1
j -= 1
return ''.join(result)
if __name__ == '__main__':
if len(sys.argv) == 2:
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
print(solution(test.rstrip('\n')))
test_cases.close()
else:
print('Data filename expected as argument!')
| [
"[email protected]"
] | |
989f929f6df8b2b70b0efbbf999227eacf81b9e6 | 212f6e4848796aecfb5b4ab8d640003d1ca6f33c | /apps/Main/migrations/0042_test_folder.py | 7a6ac9801f61b4c5eed050962dacb5c3b6403bb7 | [] | no_license | OlegEfremov/MathBaseRepo | 257827db5efd1263e0e955c9ad7b2fcf8b1c4722 | f3ca9b5f9e8b502a770bde9b2e205fa0cf6ecffa | refs/heads/master | 2022-12-02T05:23:42.020082 | 2020-04-30T16:22:32 | 2020-04-30T16:22:32 | 252,153,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,791 | py | # Generated by Django 2.0.2 on 2018-12-17 20:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Main', '0041_test_generated_is_archived'),
]
operations = [
migrations.CreateModel(
name='Test_Folder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='DEFAULT_FOLDER_NAME', max_length=1000)),
('system_name', models.CharField(default='DEFAULT_FOLDER_SYSTEM_NAME', max_length=1000)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='subfolders', to='Main.Test_Folder')),
('test', models.ManyToManyField(blank=True, related_name='solution_folders', to='Main.Test_Generated')),
],
options={
'abstract': False,
},
),
]
| [
"[email protected]"
] | |
29e80556b879a6f36290bc913a188945529be4e2 | 4e013a13f4569403f95faa2f753c09bfb51cba97 | /models.py | e66d5741650279a03c190f465fb9e8730e9b72e8 | [] | no_license | jmlc101/build-a-blog | 876554b954751f40359e1e1b3ae1edd87c1aef27 | cbedebf5f1a677d23f0df2dd0d86779ef99342c7 | refs/heads/master | 2021-05-05T15:22:51.185613 | 2018-01-30T18:56:54 | 2018-01-30T18:56:54 | 117,300,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | from app import db
class Blog(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120))
body = db.Column(db.String(2000))
def __init__(self, title, body):
self.title = title
self.body = body | [
"[email protected]"
] | |
71f18c299dfb9cc9acedc7d2a7d11aad44c6280d | 49e17d736df9889b3a0d91705abd0f3ed579d17c | /quests/Rune_Memories.py | 32215014bbf320e31eaed75dda4cd9080a7bfd6c | [] | no_license | TheWhirl/RunescapeQuestWebsite | 4f258c04a1c1e6bb9f6d9e0fa63fdcab452ccfc2 | 8d5dacbc8251bd1f2dded4ffa04400ed48e0f1fb | refs/heads/master | 2020-05-16T02:54:35.603906 | 2018-12-23T13:03:58 | 2018-12-23T13:03:58 | 182,643,424 | 0 | 0 | null | 2019-04-22T07:22:00 | 2019-04-22T07:21:59 | null | UTF-8 | Python | false | false | 468 | py | import os
import sys
sys.path.insert(0,
os.path.dirname(os.path.realpath(__file__))[
0:-len("quests")])
from QuestInfo import Quest
class Rune_Memories(Quest):
def __init__(self):
super().__init__("Rune Memories")
self.age = 5
self.difficulty = "Novice"
self.length = "Long"
self.quest_points = 1
self.magic = 27
self.runecrafting = 20
self.construction = 25
| [
"[email protected]"
] | |
dcfded448d6345ebdb38ac170e01942675fa0d6d | e594d131c45606c750cf1e673ae538c84a74b37b | /passeggio_random.py | 0dc4b36693c0cb14e59cf5d00ccea609f07415ef | [
"Apache-2.0"
] | permissive | profeder/picar | 99e21dfd59237c8b2ad4fa666a78825004724d99 | ab8c7b663839c105f234f7435b0828ee1b089b3c | refs/heads/main | 2023-02-15T03:41:42.252624 | 2021-01-10T16:51:20 | 2021-01-10T16:51:20 | 326,012,890 | 0 | 0 | Apache-2.0 | 2021-01-10T16:51:21 | 2021-01-01T16:17:33 | Python | UTF-8 | Python | false | false | 1,022 | py | import logging
import random
import time
import picar_4wd as fc
SCAN_SLEEP = 0.1
MIN_DIST_FOLLOW = 30
def lettura_media(angolo):
res = 0
for i in range(1, 3):
res = res + fc.get_distance_at(angolo)
time.sleep(0.1)
logging.warning('score media: ' + str(res/3))
return res / 3
def scan_dx():
score = 0
for i in range(30, 91, 10):
score = lettura_media(i)
time.sleep(SCAN_SLEEP)
return score
def scan_sx():
score = 0
for i in range(-30, -91, -10):
score = lettura_media(i)
time.sleep(SCAN_SLEEP)
return score
def main_loop():
if lettura_media(0) > MIN_DIST_FOLLOW:
fc.forward(20)
while lettura_media(0) > MIN_DIST_FOLLOW:
pass
fc.stop()
lval = scan_sx()
rval = scan_dx()
turn_time = 0.5 + random.random()*1
if rval < lval:
fc.turn_right(20)
else:
fc.turn_left(20)
time.sleep(turn_time)
if __name__ == '__main__':
while True:
main_loop()
| [
"[email protected]"
] | |
f1534e1fb1fcaf82d7446a42500e6405d5618b50 | fa8f727395f4443ddc7ec2d85e7e95fd04296e31 | /TemplatesForeachApplication/settings.py | 4af23de2b9fad1417932c68aa5ff85332827a800 | [] | no_license | Sudeep152/DjangoFirst | a201b7b7e1abf978cc720ba7ff218362b30ee0f0 | b637cc554877e7265849d67d1d023c0c5a3671ef | refs/heads/main | 2023-02-22T16:11:58.037499 | 2021-01-30T06:43:45 | 2021-01-30T06:43:45 | 334,343,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,221 | py | """
Django settings for TemplatesForeachApplication project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATES_DIR =os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%lnfp3qmj@s(ps$!qguv7tj1sxfcc#8zn@(s))f(f7c4ez8li!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'course',
'fees',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'TemplatesForeachApplication.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TemplatesForeachApplication.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
e8ddcddc4326d24787d09b33f0101c842cddc72f | d58d32c97f0d1dd1340100ae2a1480b138a57223 | /mysite/polls/migrations/0001_initial.py | fd4b3d358ee6767d5e0669ec594c52a105c4cfc2 | [] | no_license | imvivek71/go | e550130ff72bda9d7bf1dfe00ddb0d0e5ef87c1d | 7c1eb161fc9722a9a4584650a35ad0d83bcfd71a | refs/heads/master | 2020-04-19T23:27:42.372659 | 2019-02-23T13:18:45 | 2019-02-23T13:18:45 | 168,496,283 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | # Generated by Django 2.1.5 on 2019-01-29 08:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=100)),
('pub_date', models.DateTimeField(verbose_name='Date_published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
| [
"[email protected]"
] | |
63c7dd9959ca37a1284a4920c6a40d2fac2de29d | e3acba4c1cb6b7a7f4744e9917adc7df3108de43 | /checkout/migrations/0001_initial.py | 3c45898332d2354a65285e45246cbc682cec2bcc | [] | no_license | AntoineZakaria/ta3refa | 2ebec89d9acc78d2caa6d9af39fa60814d880fd2 | 041730988c52d9ef57164f62b268274e4fc20a89 | refs/heads/master | 2023-02-14T13:39:07.736232 | 2021-01-14T11:01:04 | 2021-01-14T11:01:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | # Generated by Django 3.1.3 on 2021-01-09 17:02
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.IntegerField()),
('products', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), default=None, size=None)),
],
),
]
| [
"[email protected]"
] | |
b941dbdf6608660fd07a54ac390d3b319989cc89 | 64e00b058909c83e436b18b50636f280660fa5fb | /HW3_client.py | 30304d82436798fb01d757d4aa7bf394642fd769 | [] | no_license | TheBell/HW3 | 726c995de4777973a2fcc6b21eaa112e5e87dcdb | d305a647f7425250fb49bea260a41fd35cd694d7 | refs/heads/master | 2021-04-27T20:39:08.650147 | 2018-02-21T19:31:30 | 2018-02-21T19:31:30 | 122,383,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | #!/usr/bin/env python3
"""
The client prompts the user for server information, then requests the desired resource.
"""
__author__ = "Brian Bell" | [
"[email protected]"
] | |
0b062341f348382fb2174eea58bec2dd55d84e8f | 691d8f198075f558e2563b61cb63ddc198ad3cf6 | /django_simple_file_handler/tests/functions.py | 364b1c260c9f4500915fe796c51dd534020d7b3f | [
"MIT"
] | permissive | jonathanrickard/django-simple-file-handler | 826e160c6730e49cf410fe6bf66c9352bd4eb7c0 | 1dfd1332cb199b155c9951162cb0b5d18a219852 | refs/heads/master | 2023-06-23T20:09:06.867318 | 2023-06-18T20:39:08 | 2023-06-18T20:39:08 | 133,997,025 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,157 | py | from io import (
BytesIO,
)
from PIL import (
Image,
)
from django.conf import (
settings,
)
from django.contrib.auth.models import (
User,
)
from django.core.files.uploadedfile import (
SimpleUploadedFile,
)
from django.test import (
RequestFactory,
)
from django.urls import (
reverse,
)
def custom_subdirectory(path):
try:
directory = settings.FILE_HANDLER_DIRECTORY
except AttributeError:
directory = ''
return f'{directory}{path}'
def pillow_settings():
try:
return settings.FILE_HANDLER_PILLOW
except AttributeError:
return {}
def create_image_file():
temp_handle = BytesIO()
image_file = Image.new(
'RGB',
(72, 72),
(0, 0, 255),
)
image_file.save(
temp_handle,
'jpeg',
)
temp_handle.seek(0)
return SimpleUploadedFile(
'test_image.jpeg',
temp_handle.read(),
'image/jpeg',
)
def create_user():
user = User.objects.create_user(
username='test_user',
password='test_password',
)
return user
def create_document_instance(model_name):
document_instance = model_name.objects.create(
title='Test Document',
extra_text='Test extra text',
saved_file=SimpleUploadedFile(
'test_file.pdf',
'test file content'.encode(),
'application/pdf',
),
)
return document_instance
def create_unprocessed_image_instance(model_name):
image_instance = model_name.objects.create(
title='Test Image',
extra_text='Test extra text',
saved_file=create_image_file(),
)
return image_instance
def create_processed_image_instance(model_name):
image_instance = model_name.objects.create(
extra_text='Test extra text',
output_width=200,
output_height=100,
saved_file=create_image_file(),
)
return image_instance
def create_pdf_instance(model_name):
pdf_instance = model_name.objects.create(
title='PDF file name',
extra_text='Test extra text',
template_location='django_simple_file_handler/tests/pdf_test.html',
template_data={
'title_name': 'Title of PDF',
'test_value': 'A test value string',
},
)
return pdf_instance
def create_response(self):
request = RequestFactory().get(reverse(
self.reverse_name,
kwargs={
'proxy_slug': self.test_instance.proxy_slug,
},
))
request.user = create_user()
return self.test_view(request, self.test_instance.proxy_slug)
def attribute_exists(instance_attribute):
return instance_attribute is not None
def status_code_equals(self, attr_name, status_code):
error_msg = f"For view '{attr_name}', the status code returned was not '{str(status_code)}'"
self.assertEqual(self.response.status_code, status_code, error_msg)
def file_equals(self, attr_name):
error_msg = f"For view '{attr_name}', the assigned file was not returned"
self.assertEqual(self.response.content, self.test_instance.saved_file.read(), error_msg)
| [
"[email protected]"
] | |
4247728a0bc10edab8316c0580e6e45f50a400bc | d4f2e2e3552ab4b111f78cfbad0d30c144201093 | /2016-12-20/condition.py | 59437d61dfcf241af110530a1ce821908201cf9e | [
"Apache-2.0"
] | permissive | dongweiming/mp | c1e9f6f2c1fd8adbd4d7b8ffc45c5cc288cdcd80 | 129c31c818e1f0c39c983aad1f2f1ad9fa7efb1c | refs/heads/master | 2023-04-29T07:56:27.198574 | 2022-10-30T04:20:09 | 2022-10-30T04:21:27 | 75,051,758 | 96 | 35 | Apache-2.0 | 2023-04-17T17:34:17 | 2016-11-29T06:44:53 | Python | UTF-8 | Python | false | false | 1,139 | py | import asyncio
import functools
async def consumer(cond, name, second):
await asyncio.sleep(second)
with await cond:
await cond.wait()
print('{}: Resource is available to consumer'.format(name))
async def producer(cond):
await asyncio.sleep(2)
for n in range(1, 3):
with await cond:
print('notifying consumer {}'.format(n))
cond.notify(n=n)
await asyncio.sleep(0.1)
async def producer2(cond):
await asyncio.sleep(2)
with await cond:
print('Making resource available')
cond.notify_all()
async def main(loop):
condition = asyncio.Condition()
task = loop.create_task(producer(condition))
consumers = [consumer(condition, name, index)
for index, name in enumerate(('c1', 'c2'))]
await asyncio.wait(consumers)
task = loop.create_task(producer2(condition))
consumers = [consumer(condition, name, index)
for index, name in enumerate(('c1', 'c2'))]
await asyncio.wait(consumers)
task.cancel()
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
loop.close()
| [
"[email protected]"
] | |
e0cf13b8071e20ef7e9bafa62f969adf7f851808 | e60804cd0f7b22b37af05fdc68eba510bc751df5 | /backend/app/settings/globals.py | f5735cd931c6cb22e8fa362f8340421fcf372c3d | [
"MIT"
] | permissive | kartikeyhadiya/base-fastapi-postgresql | f3254b75c491667ffce30be86f7de04f0399dac0 | 7e3a2916910155cd83b10cd7fec42eba7b1d3a95 | refs/heads/master | 2023-06-28T03:50:59.968192 | 2019-12-15T12:33:42 | 2019-12-15T12:33:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | from pathlib import Path
from typing import Optional
from starlette.config import Config
from starlette.datastructures import CommaSeparatedStrings
from ..models.pydantic.database import DatabaseURL
p: Path = Path(__file__).parents[2] / ".env"
config: Config = Config(p if p.exists() else None)
DATABASE: str = config("POSTGRES_DB", cast=str)
DB_USER: Optional[str] = config("POSTGRES_USER", cast=str, default=None)
DB_PASSWORD: Optional[str] = config(
"POSTGRES_PASSWORD", cast=str, default=None
)
DB_HOST: str = config("DB_HOST", cast=str, default="postgres_db")
DB_PORT: int = config("DB_PORT", cast=int, default=5432)
DATABASE_CONFIG: DatabaseURL = DatabaseURL(
drivername="asyncpg",
username=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT,
database=DATABASE,
)
ALEMBIC_CONFIG: DatabaseURL = DatabaseURL(
drivername="postgresql+psycopg2",
username=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT,
database=DATABASE,
)
REDIS_IP: str = config("REDIS_IP", cast=str, default="redis")
REDIS_PORT: int = config("REDIS_PORT", cast=int, default=6379)
REDIS_PASSWORD: str = config("REDIS_PASSWORD", cast=str, default=None)
ARQ_BACKGROUND_FUNCTIONS: Optional[CommaSeparatedStrings] = config(
"ARQ_BACKGROUND_FUNCTIONS", cast=CommaSeparatedStrings, default=None
)
| [
"[email protected]"
] | |
1944c86dc0a809a8876e5cf7f16fa6264c4e98f9 | 8ce2ef401bfa8a7edc075f30671ceb7e12001566 | /tensorflow/python/data/experimental/kernel_tests/optimization/map_and_batch_fusion_test.py | ddf3cbbcc358d765beb4bca3ae4ffdf26f2da9ca | [
"Apache-2.0"
] | permissive | TomZRoid/tensorflow | e8167a31dcd707279365c8ee5ec283c00edaafba | 89390faf68c153ef8bea0e20ba128c0d54cee0e0 | refs/heads/master | 2020-03-30T22:38:50.662448 | 2018-11-08T06:25:34 | 2018-11-08T06:25:34 | 151,673,686 | 2 | 0 | Apache-2.0 | 2018-10-05T05:15:45 | 2018-10-05T05:15:44 | null | UTF-8 | Python | false | false | 1,813 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `MapAndBatchFusion` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
class MapAndBatchFusionTest(test_base.DatasetTestBase):
def testMapAndBatchFusion(self):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(
["MapAndBatch"])).map(lambda x: x * x).batch(10)
options = dataset_ops.Options()
options.experimental_map_and_batch_fusion = True
dataset = dataset.with_options(options)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.cached_session() as sess:
self.assertAllEqual([x * x for x in range(10)], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| [
"[email protected]"
] | |
4cc35bb659adad7f05acfac926f47dc77eb1db92 | 872e7437a22e684c0fecce76e679464bb68e434d | /Anna_old/py_scripts/bioscripts/get_records.py | 6b5b4dc9c4715dfd10d01a01288f03c984e341a0 | [] | no_license | tskalicky/ngs | d9a3c4da69544c9d5ae72c71167f2078137d1775 | 6056c627cdf23eca357bbb7cba0465abf6398f43 | refs/heads/master | 2020-12-30T22:34:28.112079 | 2018-10-31T09:09:39 | 2018-10-31T09:09:39 | 80,538,797 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | #!/usr/bin/python
from Bio import SeqIO
l = [
"utg000008l|quiver|quiver|quiver",
"utg000035l|quiver|quiver|quiver"
]
fasta = '/home/anna/bioinformatics/genomes/trypanoplasma/pacbio_consensus_quiver3.fasta'
results = []
for record in SeqIO.parse(fasta, "fasta"):
if record.id in l:
results.append(record)
outpath = '/home/anna/bioinformatics/genomes/trypanoplasma/pacbio_consensus_quiver3_most_covered_scaffolds.fasta'
SeqIO.write(results, outpath, "fasta")
| [
"[email protected]"
] | |
ffeab7b06ec3fa59a92748cd8b44f1c7fda09477 | ee53b0262007b2f0db0fe15b2ad85f65fafa4e25 | /Leetcode/1802. Maximum Value at a Given Index in a Bounded Array.py | 57f92a8cbed1157b36ba6a7e5e1529599eefce52 | [] | no_license | xiaohuanlin/Algorithms | bd48caacb08295fc5756acdac609be78e143a760 | 157cbaeeff74130e5105e58a6b4cdf66403a8a6f | refs/heads/master | 2023-08-09T05:18:06.221485 | 2023-08-08T11:53:15 | 2023-08-08T11:53:15 | 131,491,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,128 | py | '''
You are given three positive integers: n, index, and maxSum. You want to construct an array nums (0-indexed) that satisfies the following conditions:
nums.length == n
nums[i] is a positive integer where 0 <= i < n.
abs(nums[i] - nums[i+1]) <= 1 where 0 <= i < n-1.
The sum of all the elements of nums does not exceed maxSum.
nums[index] is maximized.
Return nums[index] of the constructed array.
Note that abs(x) equals x if x >= 0, and -x otherwise.
Example 1:
Input: n = 4, index = 2, maxSum = 6
Output: 2
Explanation: nums = [1,2,2,1] is one array that satisfies all the conditions.
There are no arrays that satisfy all the conditions and have nums[2] == 3, so 2 is the maximum nums[2].
Example 2:
Input: n = 6, index = 1, maxSum = 10
Output: 3
Constraints:
1 <= n <= maxSum <= 109
0 <= index < n
'''
import unittest
from typing import *
class Solution:
def maxValue(self, n: int, index: int, maxSum: int) -> int:
def get_result(x):
left_p = max(x - index, 1)
left_len = x - left_p + 1
left = (left_p + x) * left_len // 2
right_p = max(x - (n - index - 1), 1)
right_len = x - right_p + 1
right = (right_p + x) * right_len // 2
return left + right + (n - (left_len + right_len - 1)) - x
start = 1
end = maxSum
while start < end:
middle = start + (end - start) // 2
if get_result(middle) < maxSum:
start = middle + 1
elif get_result(middle) > maxSum:
end = middle - 1
else:
return middle
return start if get_result(start) <= maxSum else start - 1
class TestSolution(unittest.TestCase):
def test_case(self):
examples = (
((4, 2, 6), 2),
)
for first, second in examples:
self.assert_function(first, second)
def assert_function(self, first, second):
self.assertEqual(Solution().maxValue(*first), second,
msg="first: {}; second: {}".format(first, second))
unittest.main() | [
"[email protected]"
] | |
42907b1228eb954d41ab0118f4a4e4f1361bf709 | b95112b5b66551a15ddb5075dd9f6e343d10ee26 | /code/get_distances.py | 9a2ea6e1b19a67542e5ca4ede04ad9875bd924c6 | [] | no_license | yrahul3910/neunet-less-data | ae3221ebec84f1ff49e2d079730ebf3e706a3345 | 1ad31d57a02a259ea31121a8f905ef4a8b7e32b9 | refs/heads/master | 2022-03-27T16:16:13.602531 | 2019-12-14T23:37:05 | 2019-12-14T23:37:05 | 215,572,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | import sys
if len(sys.argv) < 2:
print('Usage: get_distances DEFAULT [SKIP_LIST]')
sys.exit()
benchmark = sys.argv[1]
skip_list = []
if len(sys.argv) == 3:
skip_list = [int(x) for x in sys.argv[2].split(',')]
for line in sys.stdin:
diff = sum([x != y for i, (x, y) in enumerate(zip(benchmark, line)) if i not in skip_list])
print(line[:-1], '\t', str(diff))
| [
"[email protected]"
] | |
88988fca20b95aa138cf1a987f42ff4bc6f62a97 | f1239327d95cca232eab7d4fea44ebf5b7bc6bd5 | /admincode/pipelines.py | 31fe2c676dd12aabe94f2bef544a2f72e779db86 | [
"MIT"
] | permissive | znwang25/china_admincode | f18a1903d00a3aac869b850413542457a46eb3a1 | f934efcba67df5069abd96985db2cecd91745940 | refs/heads/master | 2020-04-18T01:34:40.472033 | 2019-11-01T20:21:13 | 2019-11-01T20:21:13 | 167,123,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import csv
from admincode import settings
def write_to_csv(item):
writer = csv.writer(
open(settings.CSV_FILE_PATH, 'a'), lineterminator='\n')
writer.writerow([item[key] for key in ['year', 'prov_name',
'city_name', 'city_code',
'county_name', 'county_code',
'town_name', 'town_code']])
class CsvExportPipeline(object):
def process_item(self, item, spider):
write_to_csv(item)
return item
class AdmincodePipeline(object):
def process_item(self, item, spider):
return item
| [
"[email protected]"
] | |
6ddbb987fa0dd007cc0828650cb5d5937259c11f | e5fc82e9239fcf42278f6d9e4e1354051dc8caa3 | /movie/utilities/utilities.py | 5d81c420f551eefbaf4e6d6077ec49a1fed9e2d2 | [] | no_license | nelf450/cs235A2 | d5e960954be0ce8650669112165ee22f2036728b | c2ae823eaca5afcfcbe3d2b49e8f097c9f2ad272 | refs/heads/master | 2023-01-03T12:14:19.536071 | 2020-10-25T03:53:01 | 2020-10-25T03:53:01 | 307,019,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | from flask import Blueprint, request, render_template, redirect, url_for, session
import movie.adapters.repository as repo
import movie.utilities.services as services
# Configure Blueprint.
utilities_blueprint = Blueprint(
'utilities_bp', __name__)
def get_tags_and_urls():
tag_names = services.get_tag_names(repo.repo_instance)
tag_urls = dict()
for tag_name in tag_names:
tag_urls[tag_name] = url_for('news_bp.articles_by_tag', tag=tag_name)
return tag_urls
def get_selected_articles(quantity=3):
articles = services.get_random_articles(quantity, repo.repo_instance)
for article in articles:
article['hyperlink'] = url_for('news_bp.articles_by_date', date=article['date'].isoformat())
return articles
| [
"[email protected]"
] | |
eb8f9cb1d25fe781f7b72df3fa2d511e38289fea | 4b64581f4466aa2b72b33033109666c9c3c0b05c | /LaunchQueens.py | 18bf1933779ab250f609b83422205893ee9ecd46 | [] | no_license | nbah22/genetic-algorithms | d7328411a07e22f44f3e74489278d1f1f0b468d9 | 499287acfbab4cf205e6ed8fe70916294a49770c | refs/heads/master | 2020-04-06T03:39:50.718211 | 2015-06-28T23:01:59 | 2015-06-28T23:01:59 | 13,321,641 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | import Queens
import Genetic
# You can change some parameters and see what happens
# This can be done in graphical interface as well
params = {'random_parents': False,
'size': 45,
'num_of_children': 200,
'max_num_of_mutations': 0, # The larger are fields - the more mutations you will need
'max_num_of_old_mutations': 2, # Number of mutations of old generation
'equal_individuals_are_allowed': False,
'equal_parents_are_allowed': False,
'random_individs_added_each_cycle': 20,
'number_of_fathers': 3,
'seed': None} # Seed is a parameter which no one should use. Ever.
# But you can try setting it to "BVVVVqqqq1VVVaqqqtVVVWqqqrVVVVqqqq1VVVaqqqtVVVWqqqrVVVVqqqq1VVVaqqqtVVVWqqqrVVVVqqqq1VVVaqqqtVVVWqqqrVVVV"
p = Queens.Population(x_size=8, y_size=8, **params) # You can try using x_size=8 and y_size=8 instead, then
Genetic.GUI(p, columns=5, title='Queens') # it is better to make max_num_of_mutations=7 or more
| [
"[email protected]"
] | |
17c702bc5916dd7aea358f9bcdb2e71b7c61a6dc | dfdaf4966cd3b0c47b3b5ccf41133b1f4baac403 | /setup.py | 406c4a03dc12cd57c9effdc8526db74c58e05dd2 | [
"MIT"
] | permissive | Sh4zKh4n/Pytorch_pdegraph | 9758e8262e89c76f8e399e8ebdf14bde00a232bd | fade6817e437b606c43221a5ca13bdaeec563fff | refs/heads/master | 2022-12-11T17:59:10.760167 | 2020-08-28T08:12:44 | 2020-08-28T08:12:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
install_requires = [
'torch',
'numpy',
'mnist',
'tqdm',
'scikit-learn',
'scikit-image',
'matplotlib',
'open3d-python',
]
setup(
name="torch_pdegraph",
version="1.1.2",
description="Running paritial difference equations (PDEs) on graphs",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/aGIToz/Pytorch_pdegraph",
python_requires='>=3.6',
install_requires=install_requires,
packages=find_packages(),
author="Amitoz AZAD",
author_email="[email protected]",
license="MIT",
include_package_data=True,
)
| [
"[email protected]"
] | |
208056b3f005ddfc905796dddb9364838d0a6d02 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4480/codes/1582_994.py | fc8433cd4c0f2fea91b41300cbedfed0b955ef4e | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | print ('Universidade Federal do Amazonas') | [
"[email protected]"
] | |
e310f2d2eed1605ce5b40d533cafd8c25822b9a4 | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20191218/example_monogusa/09capture-with-handofcats.py | d2d792f64692b455c6adba2f7ca93f33b2510763 | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 614 | py | from handofcats.injector import Injector
def hello(*, name: str, age: int, debug: bool = False) -> None:
pass
def use_normal():
import argparse
parser = argparse.ArgumentParser(prog="app.py")
Injector(hello).inject(parser, callback=id)
print(parser.format_help())
def use_gencode():
from codeobject import Module, codeobject
@codeobject
def ArgumentParser(m: Module, name: str) -> Module:
pass
m = Module()
parser = m.let("parser", ArgumentParser(prog="app.py"))
Injector(hello).inject(parser, callback=m.stmt)
print(m)
use_normal()
use_gencode()
| [
"[email protected]"
] | |
1c1e5e463bda3a74a20c39f85114242681831ddd | 1925c535d439d2d47e27ace779f08be0b2a75750 | /leetcode/check_if_a_string_contains_all_binary_codes_of_size_k.py | 90b69cee7a1b726a338e511297af4446937bbbc6 | [] | no_license | arthurDz/algorithm-studies | ee77d716041671c4b8bb757d8d96f3d10b6589f7 | 1e4d23dd0c40df34f58d71c7ca3e6491be732075 | refs/heads/master | 2023-04-27T12:17:06.209278 | 2021-04-30T20:16:18 | 2021-04-30T20:16:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | # Given a binary string s and an integer k.
# Return True if all binary codes of length k is a substring of s. Otherwise, return False.
# Example 1:
# Input: s = "00110110", k = 2
# Output: true
# Explanation: The binary codes of length 2 are "00", "01", "10" and "11". They can be all found as substrings at indicies 0, 1, 3 and 2 respectively.
# Example 2:
# Input: s = "00110", k = 2
# Output: true
# Example 3:
# Input: s = "0110", k = 1
# Output: true
# Explanation: The binary codes of length 1 are "0" and "1", it is clear that both exist as a substring.
# Example 4:
# Input: s = "0110", k = 2
# Output: false
# Explanation: The binary code "00" is of length 2 and doesn't exist in the array.
# Example 5:
# Input: s = "0000000001011100", k = 4
# Output: false
# Constraints:
# 1 <= s.length <= 5 * 10^5
# s consists of 0's and 1's only.
# 1 <= k <= 20
def hasAllCodes(self, s: str, k: int) -> bool:
if len(s) < 2 ** k: return False
d = set()
window = collections.deque(s[:k])
i = 0
d.add(''.join(window))
while i < len(s) - k:
window.popleft()
window.append(s[i + k])
d.add(''.join(window))
i += 1
return len(d) == 2 ** k | [
"[email protected]"
] | |
123c97dcd7e1ada86865ddbcf3b3efd923ec16f9 | 88ce88b6bd1094b36d6cde6e6f0468505874944b | /auth_management/exceptions.py | e58b73fd7999bf105b7b89e91c34ac59275a16c5 | [] | no_license | ncadet-dev/spotify_app | 7577c88cca8e62399ee608b0741e97f9edeed820 | d96abf6e89794146844aa61339c8b4fe82af4e47 | refs/heads/main | 2023-08-06T11:19:52.067745 | 2021-10-06T15:05:23 | 2021-10-06T15:05:23 | 413,426,433 | 0 | 0 | null | 2021-10-05T09:07:02 | 2021-10-04T13:10:06 | Python | UTF-8 | Python | false | false | 184 | py | class RefreshTokenError(Exception):
pass
class PostRefreshError(Exception):
pass
class GetTokenError(Exception):
pass
class SpotifyCallbackError(Exception):
pass
| [
"[email protected]"
] | |
110659b82bd5ddd48884ff74d5b4155c136c50cf | 987ee03ea7792f6baa025b7ae124e0b72e86cb04 | /tensorflow_lattice/python/parallel_combination_test.py | a13601c33c85338f6f1b52e965873280c1bf3b5a | [
"Apache-2.0"
] | permissive | synergy-robotics-a-b/lattice | dfb3f6f8e97fa0d9d68076656c8a491e27e1ddf1 | c570ca899d7b4942c34f9c522639aeff2891d089 | refs/heads/master | 2021-01-05T08:41:49.884489 | 2020-02-16T20:20:58 | 2020-02-16T20:20:58 | 240,958,475 | 0 | 0 | Apache-2.0 | 2020-02-16T20:11:31 | 2020-02-16T20:11:31 | null | UTF-8 | Python | false | false | 4,949 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Lattice Layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow_lattice.python import lattice_layer as ll
from tensorflow_lattice.python import parallel_combination_layer as pcl
class ParallelCombinationTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(ParallelCombinationTest, self).setUp()
self.disable_all = False
def testParallelCombinationSingleInput(self):
if self.disable_all:
return
all_calibrators = pcl.ParallelCombination()
for i in range(3):
# Its not typical to use 1-d Lattice layer for calibration, but lets do it
# to avoid redundant dependency on PWLCalibration layer.
calibrator = ll.Lattice(
lattice_sizes=[2], output_min=0.0, output_max=i + 1.0)
all_calibrators.append(calibrator)
# Given output range specified below linear initializer will have lattice to
# simply sum up inputs.
simple_sum = ll.Lattice(
lattice_sizes=[5] * 3,
kernel_initializer="linear_initializer",
output_min=0.0,
output_max=12.0,
name="SummingLattice")
model = keras.models.Sequential()
model.add(all_calibrators)
model.add(simple_sum)
test_inputs = np.asarray([
[0.0, 0.0, 0.0],
[0.1, 0.2, 0.3],
[1.0, 1.0, 1.0],
])
predictions = model.predict(test_inputs)
print("predictions")
print(predictions)
self.assertTrue(np.allclose(predictions, np.asarray([[0.0], [1.4], [6.0]])))
def testParallelCombinationMultipleInputs(self):
if self.disable_all:
return
input_layers = [keras.layers.Input(shape=[1]) for _ in range(3)]
all_calibrators = pcl.ParallelCombination(single_output=False)
for i in range(3):
# Its not typical to use 1-d Lattice layer for calibration, but lets do it
# to avoid redundant dependency on PWLCalibration layer.
calibrator = ll.Lattice(
lattice_sizes=[2], output_min=0.0, output_max=i + 1.0)
all_calibrators.append(calibrator)
# Given output range specified below linear initializer will have lattice to
# simply sum up inputs.
simple_sum = ll.Lattice(
lattice_sizes=[5] * 3,
kernel_initializer="linear_initializer",
output_min=0.0,
output_max=12.0,
name="SummingLattice",
trainable=False)
output = simple_sum(all_calibrators(input_layers))
model = keras.models.Model(inputs=input_layers, outputs=output)
test_inputs = [
np.asarray([[0.0], [0.1], [1.0]]),
np.asarray([[0.0], [0.2], [1.0]]),
np.asarray([[0.0], [0.3], [1.0]]),
]
predictions = model.predict(test_inputs)
print("predictions")
print(predictions)
self.assertTrue(np.allclose(predictions, np.asarray([[0.0], [1.4], [6.0]])))
def testParallelCombinationClone(self):
if self.disable_all:
return
input_layers = [keras.layers.Input(shape=[1]) for _ in range(3)]
all_calibrators = pcl.ParallelCombination(single_output=False)
for i in range(3):
# Its not typical to use 1-d Lattice layer for calibration, but lets do it
# to avoid redundant dependency on PWLCalibration layer.
calibrator = ll.Lattice(
lattice_sizes=[2], output_min=0.0, output_max=i + 1.0)
all_calibrators.append(calibrator)
# Given output range specified below linear initializer will have lattice to
# simply sum up inputs.
simple_sum = ll.Lattice(
lattice_sizes=[5] * 3,
kernel_initializer="linear_initializer",
output_min=0.0,
output_max=12.0,
name="SummingLattice",
trainable=False)
output = simple_sum(all_calibrators(input_layers))
model = keras.models.Model(inputs=input_layers, outputs=output)
clone = keras.models.clone_model(model)
test_inputs = [
np.asarray([[0.0], [0.1], [1.0]]),
np.asarray([[0.0], [0.2], [1.0]]),
np.asarray([[0.0], [0.3], [1.0]]),
]
predictions = clone.predict(test_inputs)
print("predictions")
print(predictions)
self.assertTrue(np.allclose(predictions, np.asarray([[0.0], [1.4], [6.0]])))
if __name__ == "__main__":
tf.test.main()
| [
"[email protected]"
] | |
43ed1c4789051834c5c479e059e2bf7302d8d337 | f558bd0fa161866efbf98e457f2758c528b9858e | /web/urls.py | bbd1475ea4cd555d2b31127f1d3acdc659747485 | [] | no_license | mk2devdrc/prospective | 7fdd2d5459598fd2370bf922b9da4dac89009cb0 | d76bd27a26429fa13c03ed72e2cb9b8fb037abbb | refs/heads/master | 2021-07-06T05:06:21.918720 | 2017-09-30T16:48:50 | 2017-09-30T16:48:50 | 105,322,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py |
from django.conf.urls import url
from .views import HomeView, NewsDetail, LingalaNewsDetail, LingalaHomeView
urlpatterns = [
url(r'^$', HomeView.as_view(), name='home'),
url(r'^lingala-news/$', LingalaHomeView.as_view(), name='ndaku'),
url(r'^news/(?P<pk>\d+)/$', NewsDetail.as_view(), name='news-detail'),
url(r'^article/(?P<pk>\d+)/$', LingalaNewsDetail.as_view(), name='lingala-news-detail'),
]
| [
"[email protected]"
] | |
de015127c98a226b7aa9d3066d8bdafd60ee2f8d | 4aa6dbb5ea8634f5ece8c348c11847e922cf7300 | /14lab/venv/bin/wheel | feeb6e7a4ae558e26a5138dcac8657ad1e12b9ba | [] | no_license | TarasBeshlei/Phyton_Labs_Repository | 691bcfbcf3eb24030789831ecdf4687fad10d077 | 4794d64ed71cf63923a542c951ce8ccfb0eb9f7c | refs/heads/master | 2020-03-17T14:09:20.387704 | 2018-06-10T18:22:50 | 2018-06-10T18:22:50 | 133,660,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | #!/Users/tarasbeshlei/PycharmProjects/14lab/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
ab566c1bb95c162f58c9548bfbe6a1038e553846 | e9326fcc42808c1a53de42ee18cb92512b467c67 | /plugins/Tester/smoothMeshPreview.py | 01941cd47eb7a3c62f839f029c0dd6d1aef7e73d | [
"MIT"
] | permissive | sol-ansano-kim/medic | 40bae740c5c9cd24823b5ca6a6d0481d540c8253 | af644292ab859d97b96612a8d1f13dff4d8a3411 | refs/heads/master | 2023-08-24T18:14:48.554708 | 2023-08-19T00:52:49 | 2023-08-19T00:52:49 | 76,765,218 | 68 | 21 | MIT | 2023-08-19T00:52:50 | 2016-12-18T05:45:18 | C++ | UTF-8 | Python | false | false | 836 | py | import medic
from maya import OpenMaya
class SmoothMeshPreview(medic.PyTester):
def __init__(self):
super(SmoothMeshPreview, self).__init__()
def Name(self):
return "SmoothMeshPreview"
def Description(self):
return "Mesh(s) is set 'smooth mesh preview' on"
def Match(self, node):
return node.object().hasFn(OpenMaya.MFn.kMesh)
def IsFixable(self):
return True
def test(self, node):
plug = node.dg().findPlug("displaySmoothMesh")
if not plug:
return None
if plug.asInt() == 0:
return None
return medic.PyReport(node)
def fix(self, report, params):
plug = report.node().dg().findPlug("displaySmoothMesh")
plug.setInt(0)
return True
def Create():
return SmoothMeshPreview()
| [
"[email protected]"
] | |
8135ad047d4ca05966dbca2d26535b5a3edddf97 | a335acaaf118c287bd98e1db518b5a84ade263b1 | /ziming/plot.py | e3cf3635cf2e812d305f42f30b15461b7f2f5a9f | [] | no_license | sserpolar/RPi-PID-Control | ef37f2022b34d1ecdbabf71fa895f83b8158b0b5 | 452ab109cd0f183d3ab6f49f648e134e774ada77 | refs/heads/master | 2021-03-01T01:52:51.210754 | 2020-03-08T00:17:48 | 2020-03-08T00:17:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | import datetime as dt
import matplotlib.pyplot as plt
import serial as sl
import matplotlib.animation as animation
arduino = sl.Serial('/dev/cu.usbmodem14101',9600)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
#ax = plt.axes(xlim=(0,100),ylim=(0,100))
x_read= 0
y_read = 0
x = []
y = []
def animate(i,x,y):
x_read = arduino.readline(100)
#while arduino.read() != 'y':
#1
y_read = arduino.readline(100)
x.append(x_read)
y.append(y_read)
x = x[-20:]
y = y[-20:]
ax.clear()
ax.plot(x, y)
# Format plot
# plt.xticks(rotation=45, ha='right')
# plt.subplots_adjust(bottom=0.30)
ani = animation.FuncAnimation(fig, animate, fargs=(x, y), interval=1000)
plt.show()
| [
"[email protected]"
] | |
d78e0cb287626db951e263789077b6a94e58ca96 | 7c0ea37adb824e803b175e94e014ba8b6f947300 | /englishPremierLeaguePlayerImageScraper.py | 0ff2d36d8de26bc72a0ee893e91b7da9d8f96a50 | [] | no_license | gitter-badger/ratingsAggreGator | 991fd72754d71e91ec841ed959a15ad712210c52 | 00f55c47c17dce68bde28be03812ef2a319f7335 | refs/heads/master | 2020-12-28T20:54:08.977478 | 2014-12-17T15:06:41 | 2014-12-17T15:06:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | # -*- coding: utf-8 -*-
# both for scraping
import requests
import urllib2
from bs4 import BeautifulSoup
# just for console output - not needed in production
# <production>
from pprint import pprint # makes viewing the json nicer
import sys
# </production>
import unicodedata
import json
# name of file to write to
json_data=open("app/storage/tottPlayers.json")
# load the json file
data = json.load(json_data)
for name in data:
##name.encode('utf-8')
name = name.replace(" ", "-")
name = name.lower()
# convert all the unicode characters into their ascii base chars
unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')
url = "https://uk.eurosport.yahoo.com/football/players/" + name
#url = "https://uk.eurosport.yahoo.com/football/players/hugo-lloris/"
html = urllib2.urlopen(url)
# scrape the player image for the url variable page
r = requests.get(url)
soup = BeautifulSoup(html)
# find the image with class=photo
imgs = soup.find_all('img', {'class':"photo"})
# download the image
for img in imgs:
print img
json_data.close()
| [
"[email protected]"
] | |
525e1a3aa1635693d0ad0fe5e94982a6497ea136 | e55dcd2fa719a1375062792e1d4fc991fd4d8b59 | /advent4.1.py | b9a707e9ede470679b42530ee543c4a8e0c968cc | [] | no_license | crsayen/aoc19 | b9868c7a47a7bb5eb4a14ae9108e3cdbf9ada7ea | 3a1d7e1aaec13a841a9309b17b2edf9a56987a09 | refs/heads/master | 2021-02-14T02:48:45.951986 | 2020-03-21T23:19:49 | 2020-03-21T23:19:49 | 244,759,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | print( len( list( filter(
lambda x: sorted(x) == list(x) and len(set(x)) < len(x),
[ str(i) for i in range(245318,765747)]
)
)
)
)
| [
"[email protected]"
] | |
000eccbdf5bd4384aeabf11d2d6e4a9053d1d7de | 6863c783fe9dc0804c66948f1684f1bdf7d27152 | /pylottery.py | a2dc6b18e45fe4d8f388bf395109aa5d33cc53d6 | [] | no_license | leonardobiffi/PyLottery | 86ad26acbb4f50ece0c47888037a7d00d5bc7714 | 4d3d461dd0bedcb2e102cc5efee77b53b2833983 | refs/heads/master | 2020-03-26T19:13:46.200990 | 2018-08-18T21:40:29 | 2018-08-18T21:40:29 | 145,254,418 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,040 | py | #!/usr/bin/python3
import random
def tamanhoJogo(jogos):
"""
Função que retorna o tamanho de cada jogo
"""
tamanho = {}
j = 0
for i in jogos:
tamanho[j] = len(i)
j += 1
lista = []
for i in range(3):
menor = min(tamanho,key=tamanho.get)
lista.append(menor)
tamanho.pop(menor)
return lista
def main():
"""
Função principal
"""
# lista com os numeros em sequencia [1-25]
list_num = list(range(1,26))
# recebe os 5 num para exclusao da lista
list_remove = []
for i in range(1,6):
while 1:
n = int(input('Digite o {}º Número [1-25]: '.format(i)))
# verifica se o num digitado está entre 1-25
if n <= 25:
# verifica se o num já foi digitado anteriormente
if n not in list_remove:
list_remove.append(n)
break
else:
print('O Número {} já foi digitado, Escolha outro número...'.format(n))
else:
print('O Número precisa ser menor ou igual a 25, Digite novamente...')
# exibe os num escolhidos
print('Os números escolhidos para exclusão foram {}'.format(list_remove))
# remove os 5 num escolhidos da lista de 25 num
for i in list_remove:
list_num.remove(i)
# jogos com lista de 4 sequencias
jogos = [[],[],[],[]]
# 3 primeiras sequencias a serem preenchidas / gerado aleatoriamente
id_jogo = random.sample(range(0,4),3)
for i in list_num:
# gera a primeira sequencia de jogos
if len(id_jogo) > 0:
for j in id_jogo:
jogos[j].append(i)
id_jogo = []
else:
menor = tamanhoJogo(jogos)
for k in menor:
jogos[k].append(i)
print("Jogos Gerados:")
n = 1
for j in jogos:
print("Jogo Nº{} -> {}".format(n,j))
n += 1
# executa a função principal
main() | [
"[email protected]"
] | |
2f821830b7cb2d8d648335b132f96d30a25c668d | f8203899e7842979bae0c53953ae47c821636d18 | /d01/d01/spiders/d011.py | 17d7586846febb4a6782680d7416cfb68ee8c6e2 | [] | no_license | Hodo7amShichiYA/Spider_of_sbbbs | b43bda17dfb4e4db79c437b495b19e2bc1b167db | 20ded7e5628085fb48369fbccc35dd7c53563562 | refs/heads/master | 2020-04-26T22:19:29.089809 | 2019-03-05T03:59:04 | 2019-03-05T03:59:04 | 173,869,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | import scrapy
class d01(scrapy.Spider):
name = "sbbbs" # 定义蜘蛛名
start_urls = ['http://www.mcbbs.net/forum-serverpack-1.html']
def parse(self, response):
#此处可改成其他需要的元素
titles = response.xpath('//a[@class="s xst"]/text()').extract()[6:]
links = response.xpath('//a[@class="s xst"]/@href').extract()[6:]
for title, link in zip(titles, links):
print(title, 'http://www.mcbbs.net/'+link)
for i in range(20):
#修改此处可以爬取任意版块
all_pages = 'http://www.mcbbs.net/forum-serverpack-%s.html' % i
yield scrapy.Request(
url=all_pages,
callback=self.parse
)
| [
"[email protected]"
] | |
b9910e6316e3960560fee7410bd0e6d805e0eb0e | da4dffe79e819bbf6e465dca91f6e6554075d7d1 | /homepage/urls.py | c11125dee780d6c4700d87d9258b8efc0a4d06f0 | [] | no_license | nijasnazar33/djangotweet | 4b8fe7db546a51b720c79e5e1accf2a17a028010 | 669561259a517029e7b0eca64907546f89b3412b | refs/heads/master | 2021-01-21T11:00:08.397498 | 2017-10-23T17:55:07 | 2017-10-23T17:55:07 | 91,717,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | from django.conf.urls import url
from . import views
app_name = 'homepage'
urlpatterns = [
url(r'^$', views.homepage, name='home'),
url(r'^tweet', views.post_tweet, name='tweet')
]
| [
"[email protected]"
] | |
4d8d220df9039a463611466e77177472111529df | d75552622878007331992896e7789991bd1c286e | /paymentsApp/server.py | f764b734ad225fa4f6b5ae9183ebe8dadf548000 | [] | no_license | niko-vulic/stripeDemo | a48cdccd6e7d3166a0f8ab439e159509114c3361 | 6c563eec17d09b0d7ecbb9e0ee6b448dc591f67f | refs/heads/master | 2023-08-11T02:57:42.699163 | 2021-10-01T13:01:41 | 2021-10-01T13:01:41 | 411,742,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | #! /usr/bin/env python3.6
"""
server.py
Stripe Sample.
Python 3.6 or newer required.
"""
import os
from flask import Flask, redirect, request
import stripe
# This is a sample test API key. Sign in to see examples pre-filled with your key.
keyFile = open('../api_key.txt', 'r')
stripe.api_key = keyFile.readline().rstrip()
PRICE_ID_BAG = 'price_1JfYvZIlYoX5Aj60qt1ng5C6'
PRICE_ID_BIN = 'price_1JfYvZIlYoX5Aj60vDbJNMb4'
app = Flask(__name__,
static_url_path='',
static_folder='public')
YOUR_DOMAIN = 'http://localhost:4242'
@app.route('/create-checkout-session', methods=['POST'])
def create_checkout_session():
try:
checkout_session = stripe.checkout.Session.create(
line_items=[
{
# TODO: replace this with the `price` of the product you want to sell
'price': PRICE_ID_BAG,
'quantity': 1,
},
{
'price' : PRICE_ID_BIN,
'quantity': 1,
}
],
payment_method_types=[
'card',
],
mode='payment',
success_url=YOUR_DOMAIN + '/success.html',
cancel_url=YOUR_DOMAIN + '/cancel.html',
)
except Exception as e:
return str(e)
return redirect(checkout_session.url, code=303)
if __name__ == '__main__':
app.run(port=4242)
| [
"[email protected]"
] | |
e2d8680645410a5c6393bb727e6e36244893314e | 942f1c46c63f631633f142c472c72a96730cb899 | /nesi/vendors/EdgeCore/baseCommandProcessor.py | da70ae143f35ccda075546a5d0a732d9f37fc338 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | inexio/NESi | 8dbd316a5d97678a2d87ca06d80b885d7a95be9e | 920b23ccaf293733b4b571e4df27929c036257f7 | refs/heads/master | 2023-04-05T14:05:25.240645 | 2021-04-19T14:36:44 | 2021-04-19T14:36:44 | 290,719,851 | 32 | 3 | BSD-2-Clause | 2021-02-24T11:38:26 | 2020-08-27T08:26:24 | Python | UTF-8 | Python | false | false | 2,391 | py | # This file is part of the NESi software.
#
# Copyright (c) 2020
# Original Software Design by Ilya Etingof <https://github.com/etingof>.
#
# Software adapted by inexio <https://github.com/inexio>.
# - Janis Groß <https://github.com/unkn0wn-user>
# - Philip Konrath <https://github.com/Connyko65>
# - Alexander Dincher <https://github.com/Dinker1996>
#
# License: https://github.com/inexio/NESi/LICENSE.rst
from nesi import exceptions
from nesi.devices.softbox.cli import base
class BaseCommandProcessor(base.CommandProcessor):
"""Create CLI REPR loop for example switch."""
def map_states(self, object, type):
if object.admin_state == '0':
if type in ('port', 'card'):
object.admin_state = 'Down'
elif object.admin_state == '1':
if type in ('port', 'card'):
object.admin_state = 'Up'
if object.operational_state == '0':
if type in 'port':
object.operational_state = 'Down'
elif object.operational_state == '1':
if type in 'port':
object.operational_state = 'Up'
def do_exit(self, command, *args, context=None):
exc = exceptions.TerminalExitError()
raise exc
def do_quit(self, command, *args, context=None):
exc = exceptions.TerminalExitError()
exc.return_to = 'sysexit'
raise exc
def on_unknown_command(self, command, *args, context=None):
raise exceptions.CommandSyntaxError(command=command)
def create_spacers(self, positions, args):
spacers = []
previous_pos = 0
i = 0
for position in positions:
spacer = position - (previous_pos + len(str(args[i])))
spacers.append(spacer)
previous_pos = position
i += 1
return spacers
def user_input(self, prompt, allow_history=True, tmp_boundary=None):
self._write(prompt)
prompt_end_pos = self.prompt_end_pos
self.prompt_end_pos = len(prompt) - 1
if not allow_history:
self.history_enabled = False
if len(self.line_buffer) != 0:
input = self.line_buffer.pop(0)
else:
input = self._read(tmp_boundary).strip()
if not allow_history:
self.history_enabled = True
self.prompt_end_pos = prompt_end_pos
return input
| [
"[email protected]"
] | |
a2e235204de6344d779d65c61785099129a621ec | c173af0aaaf6d7b6ba6d0ecf0eb87156b1219e33 | /gru.py | 6b604f9abcaa4eaae2b651b11d796a0fa2c5b7ae | [] | no_license | mattcoldwater/rnn_visual | 7be0029009766219434f1d091797ea4a24fb6594 | 84df6d06cc18f3005f5a94875eea6cfe949beecb | refs/heads/main | 2023-02-02T06:13:31.427287 | 2020-12-19T03:47:09 | 2020-12-19T03:47:09 | 313,677,701 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,835 | py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import time
import shutil
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
import os.path as osp
import csv
import numpy as np
import pickle
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torchvision.models as models
import torch.nn.utils.rnn as rnn_utils
from data_gru import NTUDataLoaders, AverageMeter, NTUSmallDataLoaders
from networks import LSTM_Simple, GRU_Simple, GRU_Att
import gc
# python gru.py --aug 0 --experiment debug1 --print_freq 500 --batch_size 256 --lr 0.005 --train 1 --max_epoches 5
# python gru.py --aug 0 --experiment debug1 --print_freq 500 --batch_size 256 --lr 0.005 --train 1 --max_epoches 5 --att 3
# python gru.py --aug 0 --experiment debug1 --print_freq 500 --batch_size 256 --lr 0.005 --train 0 --max_epoches 1 --att 3
##############
# 3 att
# python gru.py --aug 0 --experiment att3_gru0 --print_freq 500 --batch_size 256 --lr 0.005 --train 1 --max_epoches 100 --att 3
# python gru.py --aug 0 --experiment att3_gru0 --print_freq 500 --batch_size 256 --lr 0.005 --train 0 --max_epoches 1 --att 3
# 0 att
# python gru.py --aug 0 --experiment att0_gru3 --print_freq 500 --batch_size 256 --lr 0.005 --train 1 --max_epoches 100
# python gru.py --aug 0 --experiment att0_gru3 --print_freq 500 --batch_size 256 --lr 0.005 --train 0 --max_epoches 1
# 1 att
# python gru.py --aug 0 --experiment att1_gru2 --print_freq 500 --batch_size 256 --lr 0.005 --train 1 --max_epoches 100 --att 1
# python gru.py --aug 0 --experiment att1_gru2 --print_freq 500 --batch_size 256 --lr 0.005 --train 0 --max_epoches 1 --att 1
# 2 att
# python gru.py --aug 0 --experiment att2_gru1 --print_freq 500 --batch_size 256 --lr 0.005 --train 1 --max_epoches 100 --att 2
# python gru.py --aug 0 --experiment att2_gru1 --print_freq 500 --batch_size 256 --lr 0.005 --train 0 --max_epoches 1 --att 2
# 4 att
# python gru.py --aug 0 --experiment att4_gru0 --print_freq 500 --batch_size 256 --lr 0.005 --train 1 --max_epoches 100 --att 4 --nlayer 4
# python gru.py --aug 0 --experiment att4_gru0 --print_freq 500 --batch_size 256 --lr 0.005 --train 0 --max_epoches 1 --att 4 --nlayer 4
##################
##############
# 0 att
# python gru.py --dropout 0.5 --aug 0 --experiment att0_gru3_d --print_freq 500 --batch_size 256 --lr 0.005 --train 1 --max_epoches 100
# python gru.py --dropout 0.5 --aug 0 --experiment att0_gru3_d --print_freq 500 --batch_size 256 --lr 0.005 --train 0 --max_epoches 1
# 3 att
# python gru.py --dropout 0.5 --aug 0 --experiment att3_gru0_d --print_freq 500 --batch_size 256 --lr 0.005 --train 1 --max_epoches 100 --att 3
# python gru.py --dropout 0.5 --aug 0 --experiment att3_gru0_d --print_freq 500 --batch_size 256 --lr 0.005 --train 0 --max_epoches 1 --att 3
##################
# python gru.py --aug 0 --experiment debug1 --print_freq 500 --batch_size 4 --lr 0.005 --train 1 --max_epoches 1 --stop_i 3 --debug
# python gru.py --aug 0 --experiment debug1 --print_freq 500 --batch_size 4 --lr 0.005 --train 1 --max_epoches 1 --stop_i 3 --debug --att 3
# python gru.py --aug 0 --experiment debug1 --print_freq 500 --batch_size 4 --lr 0.005 --train 1 --max_epoches 1 --stop_i 3 --debug --att 3 --dropout 0.5
# python gru.py --aug 0 --experiment debug1 --print_freq 500 --batch_size 4 --lr 0.005 --train 1 --max_epoches 1 --stop_i 3 --debug --dropout 0.5 --nlayer 4 --att 4
args = argparse.ArgumentParser(description='Rnn Visual')
## For debug Only!
args.add_argument('--stop_i', type=int, default=-1,
help='for debug')
args.add_argument('--debug', action='store_true',
help='use samller dataset')
## For release,
args.add_argument('--experiment', type=str, default='debug1',
help='the experiment name')
args.add_argument('--data_path', type=str, default='/content/ntu/',
help='NTU Data Path')
args.add_argument('--max_epoches', type=int, default=200,
help='start number of epochs to run')
args.add_argument('--lr', type=float, default=0.005,
help='initial learning rate')
args.add_argument('--dropout', type=float, default=0,
help='dropout rate')
args.add_argument('--nlayer', type=int, default=3,
help='nlayer')
args.add_argument('--att', type=int, default=0,
help='attention layer num')
args.add_argument('--lr_factor', type=float, default=0.1,
help='the ratio to reduce lr on each step')
args.add_argument('--optimizer', type=str, default='Adam',
help='the optimizer type')
args.add_argument('--print_freq', '-p', type=int, default=20,
help='print frequency (default: 20)')
args.add_argument('-b', '--batch_size', type=int, default=32,
help='mini-batch size (default: 256)')
args.add_argument('--case', type=int, default=0,
help='select which case')
args.add_argument('--aug', type=int, default=1,
help='data augmentation')
args.add_argument('--workers', type=int, default=8,
help='number of data loading workers')
args.add_argument('--train', type=int, default=1,
help='train or test')
args = args.parse_args()
def main(results):
num_classes = 120
n_gru_layers = args.nlayer
gru_hidden_size = 100
feature_size = 150
dropout = args.dropout
atten = [False, False, False, False]
if args.att == 1:
atten = [True, False, False, False]
elif args.att == 2:
atten = [True, True, False, False]
elif args.att == 3:
atten = [True, True, True, False]
elif args.att == 4:
atten = [True, True, True, True]
batch_first = True
# model = LSTM_Simple(num_classes=num_classes)
model = GRU_Att(num_classes=num_classes, layers=n_gru_layers, hidden_size=gru_hidden_size,
input_size=feature_size, atten=atten, batch_first=batch_first, dropout=dropout)
model = model.cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
best = -np.Inf
scheduler = ReduceLROnPlateau(optimizer, mode='max', factor=args.lr_factor,
patience=2, cooldown=2, verbose=True)
# Data loading
gc.collect()
if args.debug: # Debug, smaller dataset
ntu_loaders = NTUSmallDataLoaders(args.case, args.aug, data_path=args.data_path)
else:
ntu_loaders = NTUDataLoaders(args.case, args.aug, data_path=args.data_path)
train_loader = ntu_loaders.get_train_loader(args.batch_size, args.workers)
val_loader = ntu_loaders.get_val_loader(args.batch_size, args.workers)
test_loader = ntu_loaders.get_test_loader(args.batch_size, args.workers)
train_size = ntu_loaders.get_train_size()
val_size = ntu_loaders.get_val_size()
test_size = ntu_loaders.get_test_size()
print('Train on %d samples, validate on %d samples, test on %d samples' %
(train_size, val_size, test_size))
if not args.debug: # Debug
assert (len(train_loader) + len(val_loader) + len(test_loader) ) * args.batch_size >= 100000
best_epoch = 0
best_hidden = None
output_dir = root_path
checkpoint = osp.join(output_dir, '%s_best.pth' % args.case)
pred_dir = osp.join(output_dir, '%s_pred.txt' % args.case)
label_dir = osp.join(output_dir, '%s_label.txt' % args.case)
att_dir = osp.join(output_dir, '%s_att.pkl' % args.case)
len_dir = osp.join(output_dir, '%s_len.pkl' % args.case)
x_dir = osp.join(output_dir, '%s_x.pkl' % args.case)
y_dir = osp.join(output_dir, '%s_y.pkl' % args.case)
visual_dirs = [att_dir, len_dir, x_dir, y_dir]
earlystop_cnt = 0
csv_file = osp.join(output_dir, '%s_log.csv' % args.case)
log_res = list()
# Training
if args.train == 1:
for epoch in range(args.max_epoches):
# train for one epoch
t_start = time.time()
train_loss, train_acc, train_hidden = train(train_loader, model, criterion, optimizer, epoch, best_hidden=best_hidden)
# evaluate on validation set
val_loss, val_acc = validate(val_loader, model, criterion)
log_res += [[train_loss, train_acc, val_loss, val_acc]]
print('Epoch-{:<3d} {:.1f}s\t'
'Train: loss {:.4f}\taccu {:.4f}\tValid: loss {:.4f}\taccu {:.4f}'
.format(epoch + 1, time.time() - t_start, train_loss, train_acc, val_loss, val_acc))
current = val_acc.cpu()
if np.greater(current, best):
print('Epoch %d: val_acc improved from %.4f to %.4f, '
'saving model to %s'
% (epoch + 1, best, current, checkpoint))
best = current
best_epoch = epoch + 1
best_hidden = train_hidden
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best': best,
'monitor': 'val_acc',
'optimizer': optimizer.state_dict(),
'best_hidden': best_hidden,
}, checkpoint)
earlystop_cnt = 0
else:
print('Epoch %d: val_acc did not improve' % (epoch + 1))
earlystop_cnt += 1
scheduler.step(current)
if earlystop_cnt > 8:
print('Epoch %d: early stopping' % (epoch + 1))
break
print('Best val_acc: %.4f from epoch-%d' % (best, best_epoch))
# save log
with open(csv_file, 'w') as fw:
cw = csv.writer(fw)
cw.writerow(['loss', 'acc', 'val_loss', 'val_acc'])
cw.writerows(log_res)
print('Save train and validation log into into %s' % csv_file)
# Testing
test(test_loader, model, checkpoint, results, pred_dir, label_dir, visual_dirs, atten)
def train(train_loader, model, criterion, optimizer, epoch, best_hidden=None):
losses = AverageMeter()
acces = AverageMeter()
model.train()
h = model.init_hidden(args.batch_size, best_hidden)
for i, (inputs, x_len, target) in enumerate(train_loader):
# https://discuss.pytorch.org/t/solved-why-we-need-to-detach-variable-which-contains-hidden-representation/1426
h = h.detach() # h.data
model.zero_grad()
optimizer.zero_grad() # clear gradients out before each mini-batch
output, h = model(inputs, x_len, h)
target = target.cuda(non_blocking=True)
loss = criterion(output, target)
# measure accuracy and record loss
acc = accuracy(output.data, target)
losses.update(loss.item(), inputs.size(0))
acces.update(acc[0], inputs.size(0))
# backward
loss.backward()
# gradient clipping
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)
optimizer.step() # update parameters
if (i + 1) % args.print_freq == 0:
print('Epoch-{:<3d} {:3d} batches\t'
'loss {loss.val:.4f} ({loss.avg:.4f})\t'
'accu {acc.val:.3f} ({acc.avg:.3f})'.format(
epoch + 1, i + 1, loss=losses, acc=acces))
if args.stop_i == i: break
return losses.avg, acces.avg, h.detach().cpu()
def validate(val_loader, model, criterion, best_hidden=None):
losses = AverageMeter()
acces = AverageMeter()
h = model.init_hidden(args.batch_size, best_hidden)
# switch to evaluation mode
model.eval()
for i, (inputs, x_len, target) in enumerate(val_loader):
with torch.no_grad():
h = h.detach()
output, h = model(inputs, x_len, h)
target = target.cuda(non_blocking=True)
with torch.no_grad():
loss = criterion(output, target)
# measure accuracy and record loss
acc = accuracy(output.data, target)
losses.update(loss.item(), inputs.size(0))
acces.update(acc[0], inputs.size(0))
return losses.avg, acces.avg
def test(test_loader, model, checkpoint, results, path, label_path, visual_dirs, atten):
acces = AverageMeter()
# load learnt model that obtained best performance on validation set
model.load_state_dict(torch.load(checkpoint)['state_dict'], strict=False)
best_hidden = torch.load(checkpoint)['best_hidden']
# print(best_hidden.shape) # 3, 4, 100
# switch to evaluation mode
h = model.init_hidden(args.batch_size, best_hidden)
model.eval()
preds, label = list(), list()
t_start = time.time()
for i, (inputs, x_len, target) in enumerate(test_loader):
with torch.no_grad():
h = h.detach()
if i == 19 and atten[0]:
output, h, attentions = model(inputs, x_len, h, visual=True)
[att_dir, len_dir, x_dir, y_dir] = visual_dirs
with open(att_dir, 'wb') as ff:
# batch * seq_len * 150
pickle.dump(attentions.detach().cpu().numpy(), ff, pickle.HIGHEST_PROTOCOL)
with open(len_dir, 'wb') as ff:
# batch
pickle.dump(x_len.detach().cpu().numpy(), ff, pickle.HIGHEST_PROTOCOL)
with open(x_dir, 'wb') as ff:
# batch * seq_len * 150
pickle.dump(inputs.detach().cpu().numpy(), ff, pickle.HIGHEST_PROTOCOL)
with open(y_dir, 'wb') as ff:
# batch
pickle.dump(target.detach().cpu().numpy(), ff, pickle.HIGHEST_PROTOCOL)
else:
output, h = model(inputs, x_len, h)
output = output.cpu()
pred = output.data.numpy()
target = target.numpy()
preds = preds + list(pred)
label = label + list(target)
preds = np.array(preds)
label = np.array(label)
preds_label = np.argmax(preds, axis=-1)
total = ((label-preds_label)==0).sum()
total = float(total)
print("Model Accuracy:%.2f" % (total / len(label)*100))
results.append(round(float(total/len(label)*100),2))
np.savetxt(path, preds, fmt = '%f')
np.savetxt(label_path, label, fmt = '%f')
def accuracy(output, target):
batch_size = target.size(0)
_, pred = output.topk(1, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
correct = correct.view(-1).float().sum(0, keepdim=True)
return correct.mul_(100.0 / batch_size)
def save_checkpoint(state, filename='checkpoint.pth.tar', is_best=False):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
if __name__ == '__main__':
root_path = '/content/results_'+args.experiment
if not osp.exists(root_path):
os.mkdir(root_path)
# get the number of total cases of certain dataset
cases = 2 # 'C-Subject' 'C-Setup'
results = list()
for case in range(cases):
print('case', case)
args.case = case
main(results)
print()
np.savetxt(root_path + '/result.txt', results, fmt = '%f')
print(results)
print('ave:', np.array(results).mean()) | [
"[email protected]"
] | |
69c652cee462458bb2db86a54d1df8cbfb004867 | 1c006b2d9a04385efe3894659c9f47b86093a6c2 | /src/shared/protocol/flt/src/Header.py | 0627944faad3670e5786c686009c5f679560d3ee | [
"Apache-2.0"
] | permissive | kpotash/bond-trader | 7f665264e1b3e80d6827feae5ff6b6203dd31a47 | cb29c2f75b64b27a8f93308566c0c3cc8426b336 | refs/heads/master | 2020-05-26T00:34:34.837057 | 2014-09-01T03:49:41 | 2014-09-01T03:49:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,012 | py |
import Message
from dateutil import parser
class Header(Message.Message):
def msgType(self, value = None):
if None == value:
return self.__buf[0:2].strip()
else:
self.__buf = self.__buf[:0] + str(value).ljust(2, " ")[:2] + self.__buf[2:]
def brokerId(self, value = None):
if None == value:
return self.__buf[2:7].strip()
else:
self.__buf = self.__buf[:2] + str(value).ljust(5, " ")[:5] + self.__buf[7:]
def assetClass(self, value = None):
if None == value:
val = self.__buf[7:9].strip()
return int(val) if val else 0
else:
self.__buf = self.__buf[:7] + str(value).rjust(2, " ")[:2] + self.__buf[9:]
def msgSeqNo(self, value = None):
if None == value:
val = self.__buf[9:14].strip()
return int(val) if val else 0
else:
self.__buf = self.__buf[:9] + str(value).rjust(5, " ")[:5] + self.__buf[14:]
def sendTime(self, value = None):
if None == value:
return parser.parse(self.__buf[14:40])
else:
self.__buf = self.__buf[:14] + value.isoformat().ljust(26, "0")[:26] + self.__buf[40:]
def bodyLength(self, value = None):
if None == value:
val = self.__buf[40:45].strip()
return int(val) if val else 0
else:
self.__buf = self.__buf[:40] + str(value).rjust(5, " ")[:5] + self.__buf[45:]
def __init__(self):
self.__buf = " " * 45
def __repr__(self):
return (super(Header, self).__repr__()
+ ", buffer length: {0}".format(len(self.__buf))
+ "\nmsgType: [" + str(self.msgType()) + "]"
+ "\nbrokerId: [" + str(self.brokerId()) + "]"
+ "\nassetClass: [" + str(self.assetClass()) + "]"
+ "\nmsgSeqNo: [" + str(self.msgSeqNo()) + "]"
+ "\nsendTime: [" + str(self.sendTime()) + "]"
+ "\nbodyLength: [" + str(self.bodyLength()) + "]")
| [
"[email protected]"
] | |
f8b965867f05d23d429655dfbe85f81713996f13 | eb5db30573b14ae5fc285bc156597aa246fb7800 | /2021/days/9/d9.py | 85ef06019e2e831ef3934e5afa2853f860cbd409 | [] | no_license | petevdp/advent | bce2bc2d468d13f89ae1e455a5944c14cba3939e | fea4cdd9c0afe11580df841147116e357eb98612 | refs/heads/master | 2023-02-22T18:06:59.129497 | 2023-01-03T01:21:47 | 2023-01-03T01:21:47 | 230,809,651 | 0 | 0 | null | 2023-02-11T02:22:45 | 2019-12-29T22:17:38 | Python | UTF-8 | Python | false | false | 1,787 | py | # %%
from math import prod
from itertools import product, islice
from typing import NamedTuple
from collections import defaultdict
class Coord(NamedTuple):
x: int
y: int
def get_input(path="./input"):
cave_map = {}
with open(path) as f:
for i, line in enumerate(f.read().strip().split("\n")):
for j, num in enumerate(map(int, list(line))):
cave_map[Coord(j, i)] = num
return cave_map
I = get_input()
NEIGHBOR_DELTAS = {Coord(1, 0),
Coord(-1, 0),
Coord(0, 1),
Coord(0, -1),
}
def get_neighbors(coord):
for d in NEIGHBOR_DELTAS:
n_coord = Coord(coord.x - d.x, coord.y - d.y)
n = I.get(n_coord)
if n is not None:
yield n_coord
def get_upstream_neighbors(coord):
for n_coord in get_neighbors(coord):
if I[n_coord] > I[coord]:
yield n_coord
def get_low_points():
low_points = []
for coord, num in I.items():
if len([*get_upstream_neighbors(coord)]) == len([*get_neighbors(coord)]):
yield coord
def part1():
return sum([I[p] + 1 for p in get_low_points()])
def part2():
basin_sizes = []
for low_point in get_low_points():
basin_points = set()
locs_to_check = {low_point}
while len(locs_to_check) > 0:
curr = locs_to_check.pop()
basin_points.add(curr)
for coord in get_upstream_neighbors(curr):
if I[coord] == 9 or coord in basin_points:
continue
locs_to_check.add(coord)
basin_sizes.append(len(basin_points))
return prod(islice(sorted(basin_sizes, reverse=True), 3))
print("p1: ", part1())
print("p2: ", part2())
| [
"[email protected]"
] | |
1bcfc69637c816b8ad9a31732db0e3e249dd6045 | 7ad486dd24efd1a1b981bc0ab92d10f2d0d67484 | /socializing/socializing/wsgi.py | ef6919022c49060f795c27706c296596c0d69f4f | [] | no_license | skerdilajdhoxha/bloggerista | 49667f390b84594ed66d64447861fbc32c051c2f | f89f60585009453d47061b771e297f79b67d0e32 | refs/heads/master | 2021-01-13T12:17:40.975566 | 2017-09-24T00:05:16 | 2017-09-24T00:05:16 | 78,279,063 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for socializing project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "socializing.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
73c00f601e495fc7b7f37d8e43c1b3b2f92ed274 | b749fb5cdfebc50a72b6b67e3cdec8fcdf06b279 | /ejercicio_django/Shoppy/Shoppy/settings.py | 29ff454ec752086b4c32c0d565f4f07ab9dfe3f5 | [] | no_license | rcaracheo/PythonDjango2016 | 119a0a0a46dcd14249092bb5a66ec2461e1dc1bc | c136aaf29b3479eed59952121013a61c8e8d77e6 | refs/heads/master | 2021-01-16T20:12:13.503224 | 2016-02-23T02:57:51 | 2016-02-23T02:57:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,594 | py | """
Django settings for Shoppy project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q)&*3)ehi!#furc&)nfa0y909di=^5xea9imuph5v59gw_t00k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'products.apps.ProductsConfig',
'clients.apps.ClientsConfig',
'userprofiles.apps.UserprofilesConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Shoppy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
#pone una direccion para encontrar todos los templates
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Shoppy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'es'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
#definir la ruta de los archivos estaticos
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATIC_URL = '/static/'
#especificar donde se trabajan las imagenes
MEDIA_ROOT = 'media'
MEDIA_URL ='/media/' | [
"[email protected]"
] | |
7c6c80d22bbfbf8e8e0d3374f30b231ef0101084 | 22e6f489e0108728eb7ddd86bc7a310e32a8a5cd | /_validate_init.py | cf531e613b74035e1e1bd7d2622f4da2dcf0f0c2 | [] | no_license | darrent1974/csiroct_gui | 0db797bbb6ee9e6e74db82e55230c9be3cb257a7 | 679c2902b3122a7af0115557a114dbf92b37bb5e | refs/heads/master | 2020-07-29T21:49:49.970338 | 2019-09-21T11:36:37 | 2019-09-21T11:36:37 | 209,974,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,653 | py | """
DO NOT MODIFY
This file is used to validate your publish settings.
"""
from __future__ import print_function
import os
import sys
import importlib
components_package = 'csiroct_gui'
components_lib = importlib.import_module(components_package)
missing_dist_msg = 'Warning {} was not found in `{}.__init__.{}`!!!'
missing_manifest_msg = '''
Warning {} was not found in `MANIFEST.in`!
It will not be included in the build!
'''
with open('MANIFEST.in', 'r') as f:
manifest = f.read()
def check_dist(dist, filename):
# Support the dev bundle.
if filename.endswith('dev.js'):
return True
return any(
filename in x
for d in dist
for x in (
[d.get('relative_package_path')]
if not isinstance(d.get('relative_package_path'), list)
else d.get('relative_package_path')
)
)
def check_manifest(filename):
return filename in manifest
def check_file(dist, filename):
if not check_dist(dist, filename):
print(
missing_dist_msg.format(filename, components_package, '_js_dist'),
file=sys.stderr
)
if not check_manifest(filename):
print(missing_manifest_msg.format(filename),
file=sys.stderr)
for cur, _, files in os.walk(components_package):
for f in files:
if f.endswith('js'):
# noinspection PyProtectedMember
check_file(components_lib._js_dist, f)
elif f.endswith('css'):
# noinspection PyProtectedMember
check_file(components_lib._css_dist, f)
elif not f.endswith('py'):
check_manifest(f)
| [
"[email protected]"
] | |
2717efe3f5ccd767c14dae4d6a22b9acc6630463 | bdb747f21b3bdcccf1c62c45a0ed0983453eff7d | /DUHaat_repo/wsgi.py | 33cb3dbed3e6d6d711fc5b3b3e3e08a686011ddf | [] | no_license | rakibsxyz/DuHaat | 2a19225c77f430c87f63d62feac742daf2d3f24f | 3c01e7a44838225fbe52a1ee2df15fd5ebed5f57 | refs/heads/master | 2020-09-04T17:25:45.115480 | 2019-11-05T19:14:28 | 2019-11-05T19:14:28 | 219,831,026 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for DUHaat_repo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DUHaat_repo.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
6df12d3c420cbf59fc22dda5431dbff3d9053ca5 | 358452ad16fac0bbd70b365133a393fb7ea8bb1d | /Shannon&ShannonElias.py | 4c11429d9a20249607027d8f2efcf6f05df7b818 | [] | no_license | Diegobr21/InformationTheoryActs | 5683aa6d4e943fb3793ab749626fca122a2515b5 | 2bae8750dccd9ab298b226dbed6f326b0673be22 | refs/heads/develop | 2023-05-29T10:19:33.413981 | 2021-06-02T23:30:17 | 2021-06-02T23:30:17 | 341,319,544 | 0 | 0 | null | 2021-05-02T17:13:51 | 2021-02-22T19:53:16 | Python | UTF-8 | Python | false | false | 353 | py | from expansion_binaria import *
n = int(input('Ingresa la cantidad de frecuencias de entrada: '))
frecuencias = []
for i in range(n):
frecuencias.append(int(input(f'Ingresa la frecuencia #{i+1}')))
F_i = [0 for i in range(len(frecuencias))]
for i in range(len(F_i)):
if(i != 0):
F_i[i] = frecuencias[i -1] + F_i[i - 1]
print(F_i)
| [
"[email protected]"
] | |
20be259ec7b4d702f1bf25b5ab6b3905499b1437 | 351f4305b4617607c9a835161caab2c1ebb0984b | /project/mystorage/serializers.py | 75b51b878301ae5586d2c8ce2f8559440a9ed957 | [] | no_license | su-mmer/Django-Rest-Framework-Practice | 366e642979affd6bc8883e3897928bb416767073 | 03ba23a2e05eafc631dfc449df4f96ba2cc85302 | refs/heads/master | 2020-08-29T11:56:46.311248 | 2019-10-28T10:53:59 | 2019-10-28T10:53:59 | 218,024,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | from .models import Essay, Album, Files
from rest_framework import serializers
from rest_framework.parsers import MultiPartParser, FormParser
class EssaySerializer(serializers.ModelSerializer):
# author 자동 지정
author_name = serializers.ReadOnlyField(source = 'author.username')
# 건드릴 수 없게 Read Only로 지정
class Meta:
model = Essay
fields = ('pk', 'title', 'body', 'author_name')
class AlbumSerializer(serializers.ModelSerializer):
author_name = serializers.ReadOnlyField(source = 'author.username')
image = serializers.ImageField(use_url = True)
class Meta:
model = Album
fields = ('pk', 'author_name', 'image', 'desc')
class FilesSerializer(serializers.ModelSerializer):
author_name = serializers.ReadOnlyField(source = 'author.username')
myfiles = serializers.FileField(use_url = True)
class Meta:
model = Files
fields = ('pk', 'author_name', 'myfiles', 'desc') | [
"[email protected]"
] | |
be93be576c7a3ab73bd98f6a24157ea38b574474 | 9e1b576181d37cd9f1a9a928e0ec7f8709532616 | /tests/test_tf_installation.py | d3fc639b660c982fc0c81229ccf21e616a53c465 | [
"MIT"
] | permissive | epiception/conda-envs | 740232fbb145a26611dbc57a9881699d2fdcfdcb | 12f971b275317f596edb68b00d9b3172521e1e7d | refs/heads/master | 2020-06-17T08:48:27.146490 | 2019-07-05T21:32:42 | 2019-07-05T21:32:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | import tensorflow as tf
print ("Tensorflow version is: " + str(tf.__version__))
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
| [
"[email protected]"
] | |
1dd6df95270703d99548b7418f4d7d489d73ea75 | fa346a2d5886420e22707a7be03599e634b230a9 | /temboo/Library/Facebook/Reading/Comments.py | 817a8e4924ba298d8dd8ae34188b7b3e705c3d1d | [] | no_license | elihuvillaraus/entity-resolution | cebf937499ed270c3436b1dd25ab4aef687adc11 | 71dd49118a6e11b236861289dcf36436d31f06bc | refs/heads/master | 2021-12-02T17:29:11.864065 | 2014-01-08T04:29:30 | 2014-01-08T04:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,026 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# Comments
# Retrieves comments for a specified Graph API object.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
from temboo.outputs.Facebook.FacebookComment import FacebookComment
import json
class Comments(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Comments Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Facebook/Reading/Comments')
def new_input_set(self):
return CommentsInputSet()
def _make_result_set(self, result, path):
return CommentsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CommentsChoreographyExecution(session, exec_id, path)
class CommentsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Comments
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved from the final step of the OAuth process.)
"""
InputSet._set_input(self, 'AccessToken', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) A comma separated list of fields to return (i.e. id,name).)
"""
InputSet._set_input(self, 'Fields', value)
def set_Limt(self, value):
"""
Set the value of the Limt input for this Choreo. ((optional, integer) Used to page through results. Limits the number of records returned in the response.)
"""
InputSet._set_input(self, 'Limt', value)
def set_ObjectID(self, value):
"""
Set the value of the ObjectID input for this Choreo. ((required, string) The id of a graph api object to get comments for.)
"""
InputSet._set_input(self, 'ObjectID', value)
def set_Offset(self, value):
"""
Set the value of the Offset input for this Choreo. ((optional, integer) Used to page through results. Returns results starting from the specified number.)
"""
InputSet._set_input(self, 'Offset', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Can be set to xml or json. Defaults to json.)
"""
InputSet._set_input(self, 'ResponseFormat', value)
def set_Since(self, value):
"""
Set the value of the Since input for this Choreo. ((optional, date) Used for time-based pagination. Values can be a unix timestamp or any date accepted by strtotime.)
"""
InputSet._set_input(self, 'Since', value)
def set_Until(self, value):
"""
Set the value of the Until input for this Choreo. ((optional, date) Used for time-based pagination. Values can be a unix timestamp or any date accepted by strtotime.)
"""
InputSet._set_input(self, 'Until', value)
class CommentsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Comments Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_HasNext(self):
"""
Retrieve the value for the "HasNext" output from this Choreo execution. ((boolean) A boolean flag indicating that a next page exists.)
"""
return self._output.get('HasNext', None)
def get_HasPrevious(self):
"""
Retrieve the value for the "HasPrevious" output from this Choreo execution. ((boolean) A boolean flag indicating that a previous page exists.)
"""
return self._output.get('HasPrevious', None)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Facebook. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
return self._output.get('Response', None)
def getComments(self):
"""
The comment for this graph object
"""
return [FacebookComment(le) for le in self.getJSONFromString(self._output.get('Response', [])).get("data", [])]
class CommentsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CommentsResultSet(response, path)
| [
"[email protected]"
] | |
fc963b9a72250626ca1b8f53b5928b488a4c5672 | ebb3ea3ee89fba55758227e3b4b35660dd1a3c4d | /tools/__init__.py | 41e51c4ceb3de11049b8163eacb3abfbaf7c6246 | [] | no_license | mamazu/Platformer | f762e51b18fe2a7584d4fab4d9d0cfdbaa1b92be | c35c777ca4e24261e40d3ebef9f6c0bbb92df17a | refs/heads/master | 2021-01-11T17:45:42.912965 | 2017-03-24T21:50:19 | 2017-03-24T21:50:19 | 79,838,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | if __name__ == "__main__":
print("This is a module")
quit()
| [
"[email protected]"
] | |
7898833fe7c1ed8ac9c9f7097f5c7cf53f16f80c | f39dc9cc34f385d8fe627035f5f35b8b02da9798 | /items/loot/dragon_sword.py | 23a8208339f16df9e324c34c92839822624d1a99 | [] | no_license | NikAlexSer/RogueBot | 600fdcd2633cb8de0b78768f5072dcd4894c7ba9 | fefcb2b5fc866c0c7df1084d1de53287b1cd933c | refs/heads/master | 2020-12-29T00:29:16.745304 | 2016-09-05T08:13:28 | 2016-09-05T08:13:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | name = 'Драконий убийца'
description = (
'Был вместо зуба у дракона, но теперь ты можешь использовать его по назначению'
)
price = 1000
fightable = True
def fight_use(user, reply, room):
if room.code_name == 'dragon' or room.code_name == 'quinquepede':
reply('Это может показаться читерским, но ты выиграл этот поединок.')
user.won(reply)
return 0
else:
msg = (
'Магическим и абстрактным образом этот меч отскачил от монстра и попал тебе в колено.\n'
'Это же не дракон в конце концов.'
)
reply(msg)
user.make_damage(20, 40, reply, name=name)
return 0 | [
"[email protected]"
] | |
c7b182554b0de3362170d0fe63c80c2eceb54be4 | d2f91b93ad42aaefa5fc315a9b3a5d45d07fa705 | /slbman/venv/Lib/site-packages/aliyunsdkecs/request/v20140526/DescribePhysicalConnectionsRequest.py | b5c6d435434a6a186174e4f7bf930b93c20e79e7 | [] | no_license | junlongzhou5566/managePlatform | 66cb5bc5b176147ff0038819924f7efa8df1d556 | 3201ba1a11b05c86db5f42aa9ca8eaf1cc20e216 | refs/heads/master | 2021-03-29T00:58:23.337808 | 2020-03-17T09:50:21 | 2020-03-17T09:50:21 | 247,910,365 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,855 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribePhysicalConnectionsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribePhysicalConnections','ecs')
def get_Filters(self):
return self.get_query_params().get('Filters')
def set_Filters(self,Filters):
for i in range(len(Filters)):
for j in range(len(Filters[i].get('Values'))):
if Filters[i].get('Values')[j] is not None:
self.add_query_param('Filter.' + str(i + 1) + '.Value.'+str(j + 1), Filters[i].get('Values')[j])
if Filters[i].get('Key') is not None:
self.add_query_param('Filter.' + str(i + 1) + '.Key' , Filters[i].get('Key'))
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_UserCidr(self):
return self.get_query_params().get('UserCidr')
def set_UserCidr(self,UserCidr):
self.add_query_param('UserCidr',UserCidr)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber) | [
"[email protected]@qq.com"
] | [email protected]@qq.com |
9c4a76f5f68f7fa8377da427d2cea7db34426b7d | ac9334216a536004f5fe042263c7ae171fdca271 | /migrations/versions/950bee344977_.py | 3fb2c52270382e08a5be5094c09873e6c9254e2a | [] | no_license | metalsalmon/remote_monitoring | 08510fe4ac9ecfc1411c7c87413829ef0cb43ed5 | 3d23d39e75540d0322181b810e3f4a1c0c0d5d59 | refs/heads/main | 2023-05-28T04:39:28.007354 | 2021-06-11T20:00:44 | 2021-06-11T20:00:44 | 346,487,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,819 | py | """empty message
Revision ID: 950bee344977
Revises: e1612c42a720
Create Date: 2021-04-11 23:32:21.620355
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '950bee344977'
down_revision = 'e1612c42a720'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('device', sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True))
op.add_column('device', sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True))
op.add_column('monitoring', sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True))
op.add_column('monitoring', sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True))
op.add_column('package', sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True))
op.add_column('package', sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True))
op.add_column('task', sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True))
op.add_column('task', sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('task', 'updated_at')
op.drop_column('task', 'created_at')
op.drop_column('package', 'updated_at')
op.drop_column('package', 'created_at')
op.drop_column('monitoring', 'updated_at')
op.drop_column('monitoring', 'created_at')
op.drop_column('device', 'updated_at')
op.drop_column('device', 'created_at')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
66041a83a9acec3ff7db307d6826a333b8ffb02e | 8132857837286e5df8a4ec32d4f23ad734d6da24 | /myvenv/bin/player.py | 9587565474f8a795893514901f6ffbe506a9b7e6 | [] | no_license | JavierMerinoMomblona/Project | 20ea2ee2b5fa7a2d4110defad1c7287a5f9a52e4 | d8f318c1b33bb61aff0377fb7192c8bf0a561348 | refs/heads/master | 2021-01-20T03:09:03.118517 | 2017-06-07T07:50:08 | 2017-06-07T07:50:08 | 89,500,645 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,117 | py | #!/home/zener/Escritorio/Project/myvenv/bin/python3
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
# --------------------------------------------------------------------
# an image animation player
class UI(tkinter.Label):
def __init__(self, master, im):
if isinstance(im, list):
# list of images
self.im = im[1:]
im = self.im[0]
else:
# sequence
self.im = im
if im.mode == "1":
self.image = ImageTk.BitmapImage(im, foreground="white")
else:
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bg="black", bd=0)
self.update()
duration = im.info.get("duration", 100)
self.after(duration, self.next)
def next(self):
if isinstance(self.im, list):
try:
im = self.im[0]
del self.im[0]
self.image.paste(im)
except IndexError:
return # end of list
else:
try:
im = self.im
im.seek(im.tell() + 1)
self.image.paste(im)
except EOFError:
return # end of file
duration = im.info.get("duration", 100)
self.after(duration, self.next)
self.update_idletasks()
# --------------------------------------------------------------------
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python player.py imagefile(s)")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
if len(sys.argv) > 2:
# list of images
print("loading...")
im = []
for filename in sys.argv[1:]:
im.append(Image.open(filename))
else:
# sequence
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
| [
"[email protected]"
] | |
b7a20171862937dfd200ebe2f84aa71b9545acdf | 3591038a7059c508f0348dd108774ec9b4da8aff | /sampleapi/urls.py | d19094ac33b405fade6e04bf8ce84b935179b1a3 | [] | no_license | shimayu22/sample-same-bitrhday-filter | 3714c286807522905cfc795a2cb08fbf0fba855e | e775779f0c9e12a5127f14ce6a2eb593f1eef377 | refs/heads/main | 2023-01-22T19:29:16.366079 | 2020-12-04T05:47:01 | 2020-12-04T05:47:01 | 318,034,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | from django.urls import path, include
from rest_framework import routers
from sampleapi import views
router = routers.DefaultRouter()
router.register('sampleapi', views.SampleViewSet)
app_name = 'sampleapi'
urlpatterns = [
path('', include(router.urls))
] | [
"[email protected]"
] | |
aff0c71df2d45a0a7a4311c12e2db3a4653a78a2 | bfbf4c7df621a8190f00f58d981b6ccfb4dc3382 | /41_time_series/bitcoin_test.py | 336710402ee99e7f267207540c8768ba034d139b | [] | no_license | zjtprince/DataAnalysisTraining | 5786725431c7f47041826bc08cad0109c9902c77 | 4c2167e0edbddbeb74621d2a02d3ee56f85586a2 | refs/heads/master | 2023-01-28T16:42:05.457291 | 2020-12-13T06:38:04 | 2020-12-13T06:38:04 | 320,966,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,416 | py | import pandas as pd
from datetime import datetime
from statsmodels.tsa.arima_model import ARMA
import warnings
import matplotlib.pyplot as plt
from itertools import product
warnings.filterwarnings('ignore')
data = pd.read_csv('~/Documents/bitcoin/bitcoin_2012-01-01_to_2018-10-31.csv')
print(data.head())
# print(data.info())
data['Timestamp'] = pd.to_datetime(data['Timestamp'] )
data.index = data['Timestamp']
print(data.head())
data_m = data.resample('M').mean()
data_q = data.resample('Q-DEC').mean()
data_y = data.resample('A-DEC').mean()
fig = plt.figure(figsize=(15,7))
plt.rcParams['font.sans-serif']=['SimHei']
plt.suptitle('比特币金额(美元)',fontsize=20 )
plt.subplot(221)
plt.plot(data['Weighted_Price'],'-',label='按天')
plt.legend()
plt.subplot(222)
plt.plot(data_m['Weighted_Price'],'-',label='按月')
plt.legend()
plt.subplot(223)
plt.plot(data_q['Weighted_Price'],'-',label='按季')
plt.legend()
plt.subplot(224)
plt.plot(data_y['Weighted_Price'],'-',label='按年')
plt.legend()
# plt.show()
q_range = range(0,5)
p_range = range(0,5)
params = product(q_range,p_range)
param_list = list(params)
print(param_list)
results = []
best_aic = float('inf')
for param in param_list:
try:
model = ARMA(data_m['Weighted_Price'],order=(param[0],param[1])).fit()
except ValueError:
print("参数错误",param)
continue
aic = model.aic
if aic < best_aic:
best_model = model
best_aic = aic
best_param = param
results.append([param,model.aic])
result_table = pd.DataFrame(results)
result_table.columns=['parameters','aic']
print("best model:", best_model.summary())
print("best param:", best_param)
data_month2 = data_m[['Weighted_Price']]
date_list = [datetime(2018, 11, 30), datetime(2018, 12, 31), datetime(2019, 1, 31), datetime(2019, 2, 28), datetime(2019, 3, 31),
datetime(2019, 4, 30), datetime(2019, 5, 31), datetime(2019, 6, 30)]
future = pd.DataFrame(index=date_list, columns=data_m.columns)
data_month2 = pd.concat([data_month2,future])
data_month2['forecast']= best_model.predict(start=0, end=91)
print(data_month2.head())
plt.figure(figsize=(20 ,7))
data_month2['Weighted_Price'].plot(label='实际金额')
data_month2['forecast'].plot(label='预测金额',ls='--',color='r')
plt.legend()
plt.title('比特币金额(月)')
plt.xlabel='时间'
plt.ylabel='金额'
plt.show() | [
"[email protected]"
] | |
a00369c266d381ed72f02fee382f751a5e84f1d8 | 7fa7cfa9024eaa17e897a0c518c6aab5dc23c91a | /view/add_file.py | 87fcb80f08b6e597af20443c7081a61313f6ad66 | [] | no_license | leejunbo/mnm | 35a34712c30671e36b69b78df5643d31c62612fc | a8eaad2362820e677bee43bb35f31ea3ab206ada | refs/heads/master | 2020-03-27T00:59:06.780282 | 2018-08-24T03:07:16 | 2018-08-24T03:07:16 | 145,672,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | def add_domain_file(cls_path,cls_name,cls_ip,domain, ip, port=""):
path = cls_path+"/"+cls_name+"/"+cls_ip
f = open(path+"/" + domain +".conf", "w+", encoding='utf8')
f.write(
"\tserver{ \n"
"\t\tlisten 80;\n"
"\t\tserver_name " + domain + ";\n"
"\t\tlocation / {\n"
"\t\tproxy_pass http://" + ip + port+";\n"
"\t\tproxy_redirect off;\n"
"\t\tproxy_set_header Host $host;\n"
"\t\tproxy_set_header X-Real-IP $remote_addr;\n"
"\t\tproxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n"
"\t\tproxy_connect_timeout 600;\n"
"\t\tproxy_read_timeout 600;\n"
"\t\t}\n"
"\t}\n")
f.close()
h = open(cls_path+"/"+cls_name+"/"+"/hosts", "a+", encoding='utf8')
h.write(ip + " " + domain + "\n")
h.close()
| [
"[email protected]"
] | |
2eb62323804f63d3092822f470a42ecd66af9815 | 449b523a8ff9603ad2d3a5c3a0e7eb31fd862be6 | /myutils/elk/elk.py | c85da860e38c7da0e17f8a35ac0d760ac8f01e7e | [] | no_license | mrgrit/utils | d0d56afcfa43be8a48a7d9bbe6fb97d9d598cbda | 2e5cd7d7562f571edf8220d1bc4f2de0abde0a0c | refs/heads/master | 2021-07-25T05:16:59.852032 | 2017-11-04T05:14:36 | 2017-11-04T05:14:36 | 108,482,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,575 | py | import queue
from threading import Thread
import json
from datetime import date
import os
import time
import datetime
from datetime import date
import pandas as pd
import xlrd
from elasticsearch import Elasticsearch
import pycurl
import requests
max_result_window = 5000000
es = Elasticsearch([{'host': '10.24.50.20', 'port': 9200}])
logq = queue.Queue()
day = datetime.date.today()
def get_logday() :
logday_list = []
logday = ''
today = str(datetime.date.today())
today = today.split('-')
logday = str(today[0]) + str(today[1])+str(today[2])
return logday
def write_log(msg) :
#log_file = logdir+str(logday)+logfile
now = datetime.datetime.now()
day = datetime.date.today()
f = open('./'+str(day)+'_log.txt','a')
log_message = '['+ str(now) + '] ' + msg + '\n'
f.write(log_message)
f.close()
logdir = os.path.join('H:\\tf10','log',get_logday())
def read_logfull():
for root, dirs, files in os.walk(logdir):
for fname in files:
logfile = os.path.join(root,fname)
print(logfile)
if 'ips' in fname:
df = pd.read_excel(logfile)
df["B/S"]="ips"
for i in range(len(df)):
ips = df.loc[[i],['B/S','사건일시','Source IP','Source Port','Destination IP','Destination Port','결과','Method','횟수','비고']] #9
logq.put_nowait(ips)
elif 'fw' in fname:
df = pd.read_excel(logfile)
df["B/S"]="fw"
for i in range(len(df)):
fw = df.loc[[i],['B/S','수집일시','Source IP','Destination IP','Destination Port','Protocol','결과']] #6
logq.put_nowait(fw)
elif 'waf' in fname:
df = pd.read_excel(logfile)
df["B/S"]="waf"
for i in range(len(df)):
waf = df.loc[[i],['B/S','사건일시','Source IP','Source Port','Destination IP','Destination Port','결과','Method','Get/Post','비고']]
logq.put_nowait(waf)
elif 'web' in fname:
df = pd.read_excel(logfile)
df["B/S"]="web"
for i in range(len(df)):
web = df.loc[[i],['B/S','사건일시','Source IP','Destination IP','결과','Method','비고']]
logq.put_nowait(web)
def write_logfull() :
while True:
if logq.qsize() == 0 :
time.sleep(2)
print("sleeping")
write_log("sleeping")
else:
for i in range(logq.qsize()):
log=logq.get_nowait()
if logq.qsize() % 200 == 0 :
print(logq.qsize())
qsiz = "Queue Size = " + str(logq.qsize())
write_log(qsiz)
if log.iloc[0,0] == 'ips' :
print(log.iloc[0,0])
ips_json = {'logtype':log.iloc[0,0],'time':log.iloc[0,1],'sip':log.iloc[0,2],'sport':str(log.iloc[0,3]),'dip':log.iloc[0,4],'dport':str(log.iloc[0,5]),'result':log.iloc[0,6],'method':log.iloc[0,7],'not':str(log.iloc[0,8]),'att_code':log.iloc[0,9]}
url = 'http://10.24.50.20:9200/ais/logfull/'
payload = ips_json
headers = {'Content-type':'application/json'}
r = requests.post(url, data = json.dumps(payload), headers=headers)
elif log.iloc[0,0] == 'fw' :
fw_json = {'logtype':log.iloc[0,0],'time':log.iloc[0,1],'sip':log.iloc[0,2],'dip':log.iloc[0,3],'dport':str(log.iloc[0,4]),'protocol':log.iloc[0,5],'result':log.iloc[0,6]}
url = 'http://10.24.50.20:9200/ais/logfull/'
payload = fw_json
headers = {'Content-type':'application/json'}
r = requests.post(url, data = json.dumps(payload), headers=headers)
elif log.iloc[0,0] == 'waf' :
waf_json = {'logtype':log.iloc[0,0],'time':log.iloc[0,1],'sip':log.iloc[0,2],'sport':str(log.iloc[0,3]),'dip':log.iloc[0,4],'dport':str(log.iloc[0,5]),'result':log.iloc[0,6],'method':log.iloc[0,7],'gnp':log.iloc[0,8],'att_code':log.iloc[0,9]}
url = 'http://10.24.50.20:9200/ais/logfull/'
payload = waf_json
headers = {'Content-type':'application/json'}
r = requests.post(url, data = json.dumps(payload), headers=headers)
elif log.iloc[0,0] == 'web' :
web_json = {'logtype':log.iloc[0,0],'time':log.iloc[0,1],'sip':log.iloc[0,2],'dip':log.iloc[0,3],'result':log.iloc[0,4],'method':log.iloc[0,5],'att_code':log.iloc[0,6]}
url = 'http://10.24.50.20:9200/ais/logfull/'
payload = web_json
headers = {'Content-type':'application/json'}
r = requests.post(url, data = json.dumps(payload), headers=headers)
def main():
write_log("start")
read_th = Thread(target=read_logfull)
write_th = Thread(target=write_logfull)
#read_th.demon = True
#write_th.demon = True
read_th.start()
write_th.start()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
7f8820cdc1afaa4e19ce098706a5396267019fb1 | 8295fbbadc6c5cdda363cb9401363ab0cfba6f82 | /Python/5kyu_TheHashtagGenerator.py | a814f641ce843383cf97d35d354fe975064cc62e | [] | no_license | codingPineAppl3/codewarskata | 41d60d0bacc7b5b114eaadbd5085debc8a3681b2 | 2975f0fac276dd62822a1d75b0a8ca9920d5e14a | refs/heads/master | 2022-11-21T18:07:20.600474 | 2020-07-24T08:46:08 | 2020-07-24T08:46:08 | 282,054,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | import string
def generate_hashtag(s):
if s == '' or len(s) > 140:
return False
else:
return '#' + s.title().replace(' ', '') | [
"[email protected]"
] | |
4393994aaeb11a94ff7b2fd0c48e2dd2d36c7608 | 54d7c3738617048788944d134667b50e9fa9e452 | /Prueba_FPGA/FullMode/FullMode_v3/top_v3.py | 43cd753f8f8f98806eb5e6ece2d7ddc691a07e63 | [] | no_license | diegoa314/tesis | f0cf4851981e9fbb1e9e97b6aae620bf1945a41d | 973483d6ee0736379538ebede03cd0e7d5edeab1 | refs/heads/master | 2020-10-01T08:02:11.128730 | 2020-08-10T21:30:23 | 2020-08-10T21:30:23 | 227,494,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,694 | py | from migen import *
import sys
sys.path.append('/home/diegoaranda/Documents/Tesis/Prueba_FPGA')
import cmoda7
sys.path.append('/home/diegoaranda/Documents/Tesis/8b10b')
from encoder_comb import *
sys.path.append('/home/diegoaranda/Documents/Tesis/Prueba_FPGA/FullMode')
from transmitter40b import *
sys.path.append('/home/diegoaranda/Documents/Tesis/PRBS')
from prbs import *
sys.path.append('/home/diegoaranda/Documents/Tesis/FIFO')
from fifo2 import *
from fsm_v2_1 import *
class Top_v3(Module):
def __init__(self,platform=0):
self.tx_output=platform.request("serial").tx #salida serila
self.trans_en=platform.request("user_btn") #habilitador general para el envio
self.write_en=platform.request("user_btn")
self.fifo_full_led=platform.request("user_led")
self.fifo_empty_led=platform.request("user_led")
"""
#Descomentar para simulacion
self.tx_output=Signal()
self.trans_en=Signal()
self.write_en=Signal()
self.fifo_empty_led=Signal()
self.fifo_full_led=Signal()
"""
# # #
self.reset=Signal()
data_gen_en=Signal()
self.data=data=Signal(32)
fsm=Fsm_v2_1() #FSM
encoder1=Encoder()
encoder2=Encoder()
encoder3=Encoder()
encoder4=Encoder()
transmitter=Transmitter40b() #transmisor de 10b
fifo=SyncFIFOBuffered(width=32, depth=32)
self.submodules+=[encoder1,encoder2,encoder3,encoder4,transmitter,fsm,fifo]
self.comb+=[
#asignaciones de las senhales
fsm.tx_done.eq(transmitter.tx_40bdone),
fsm.trans_en.eq(self.trans_en),
fsm.fifo_empty.eq(~fifo.readable),
fsm.fifo_full.eq(~fifo.writable),
fsm.write_en.eq(self.write_en),
transmitter.trans_en.eq(fsm.tx_en),
data_gen_en.eq(fsm.data_gen_en),
fifo.we.eq(fsm.fifo_we),
fifo.re.eq(fsm.fifo_re),
self.tx_output.eq(transmitter.tx_serial),
fifo.din.eq(self.data),
encoder1.data_in.eq(fifo.dout[0:8]),
encoder2.data_in.eq(fifo.dout[8:16]),
encoder3.data_in.eq(fifo.dout[16:24]),
encoder4.data_in.eq(fifo.dout[24:32]),
If(fsm.encoder_ready,
transmitter.data_in[0:10].eq(encoder1.data_out),
transmitter.data_in[10:20].eq(encoder2.data_out),
transmitter.data_in[20:30].eq(encoder3.data_out),
transmitter.data_in[30:40].eq(encoder4.data_out),
),
#no se realizan el envio de caracteres especiales
encoder1.k.eq(0),
encoder2.k.eq(0),
encoder3.k.eq(0),
encoder4.k.eq(0),
self.fifo_full_led.eq(~fifo.writable),
self.fifo_empty_led.eq(~fifo.readable),
self.reset.eq(self.trans_en & self.write_en)
]
self.sync+=[
#La asignacion de disparidad debe ser secuencial debido a la
#dependecia mutua de ambas senhalas (disp_in y disp_out)
If(fsm.change_disp,
encoder1.disp_in.eq(encoder1.disp_out),
encoder2.disp_in.eq(encoder2.disp_out),
encoder3.disp_in.eq(encoder3.disp_out),
encoder4.disp_in.eq(encoder4.disp_out)
),
If(data_gen_en & ~self.reset,
If(self.data==0,
self.data.eq(1)
).Else(
self.data.eq(self.data<<1)
)
),
If(self.reset, self.data.eq(1))
]
plat=cmoda7.Platform()
dut=Top_v3(platform=plat)
plat.build(dut)
"""
def tb(dut):
yield dut.trans_en.eq(1)
for i in range(10):
yield
yield dut.trans_en.eq(0)
yield dut.write_en.eq(1)
for i in range(100):
yield
yield dut.write_en.eq(0)
for i in range(10):
yield
yield dut.trans_en.eq(1)
for i in range(1000):
yield
yield dut.trans_en.eq(1)
yield dut.write_en.eq(1)
for i in range(10):
yield
yield dut.trans_en.eq(0)
yield dut.write_en.eq(1)
for i in range(100):
yield
yield dut.write_en.eq(0)
for i in range(10):
yield
yield dut.trans_en.eq(1)
for i in range(500):
yield
dut=Top_v3()
run_simulation(dut,tb(dut),vcd_name="top_v3.vcd")
""" | [
"[email protected]"
] | |
2d930ec01c0e552c9e873a6319556a5188e44369 | fc3753c9e35f4f1741554060e3c1993930d4ba1b | /PaddleOCR/PPOCRLabel/setup.py | d33238a800ddc620f469ead15ab355cd0e718de2 | [
"Apache-2.0"
] | permissive | TangJiamin/Ultra_light_OCR_No.23 | 0ba5fdce78993e6843875ed6d81e6708878defc6 | 594aa286dc2f88614141838ce45c164647226cdb | refs/heads/master | 2023-06-12T21:44:54.444624 | 2021-07-09T06:36:20 | 2021-07-09T06:36:20 | 383,319,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,880 | py | # Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages, Command
from sys import platform as _platform
from shutil import rmtree
import sys
import os
here = os.path.abspath(os.path.dirname(__file__))
NAME = 'labelImg'
REQUIRES_PYTHON = '>=3.0.0'
REQUIRED_DEP = ['pyqt5', 'lxml']
about = {}
with open(os.path.join(here, 'libs', '__init__.py')) as f:
exec(f.read(), about)
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
# OS specific settings
SET_REQUIRES = []
if _platform == "linux" or _platform == "linux2":
# linux
print('linux')
elif _platform == "darwin":
# MAC OS X
SET_REQUIRES.append('py2app')
required_packages = find_packages()
required_packages.append('labelImg')
APP = [NAME + '.py']
OPTIONS = {
'argv_emulation': True,
'iconfile': 'resources/icons/app.icns'
}
class UploadCommand(Command):
"""Support setup.py upload."""
description=readme + '\n\n' + history,
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
self.status('Fail to remove previous builds..')
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system(
'{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag -d v{0}'.format(about['__version__']))
os.system('git tag v{0}'.format(about['__version__']))
# os.system('git push --tags')
sys.exit()
setup(
app=APP,
name=NAME,
version=about['__version__'],
description="LabelImg is a graphical image annotation tool and label object bounding boxes in images",
long_description=readme + '\n\n' + history,
author="TzuTa Lin",
author_email='[email protected]',
url='https://github.com/tzutalin/labelImg',
python_requires=REQUIRES_PYTHON,
package_dir={'labelImg': '.'},
packages=required_packages,
entry_points={
'console_scripts': [
'labelImg=labelImg.labelImg:main'
]
},
include_package_data=True,
install_requires=REQUIRED_DEP,
license="MIT license",
zip_safe=False,
keywords='labelImg labelTool development annotation deeplearning',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
package_data={'data/predefined_classes.txt': ['data/predefined_classes.txt']},
options={'py2app': OPTIONS},
setup_requires=SET_REQUIRES,
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
}
)
| [
"[email protected]"
] | |
60716b5d9dba982f6c0d54154ed12c4e22c43271 | 6f09561b1f11cba0403b13b7e7ec9ebc75bbae94 | /utils/flags/_base.py | 5755228263b288a359e1fc8396a5c52f5159e093 | [] | no_license | a68b57/sea_dog | af7c43a4cb76656d5a2e655aa987f2af496a88ed | 2b77492163068e8cb5580d32c5a3cdc9df1bd0f4 | refs/heads/master | 2020-04-08T19:54:37.031276 | 2019-01-03T09:39:49 | 2019-01-03T09:39:49 | 159,675,929 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,618 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Flags which will be nearly universal across models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
from utils.flags._conventions import help_wrap
from utils.logs import hooks_helper
def define_base(data_dir=True, model_dir=True, clean=True, train_epochs=True,
epochs_between_evals=True, stop_threshold=True, batch_size=True,
num_gpu=True, hooks=True, export_dir=True):
"""Register base flags.
Args:
data_dir: Create a flag for specifying the input data directory.
model_dir: Create a flag for specifying the model file directory.
train_epochs: Create a flag to specify the number of training epochs.
epochs_between_evals: Create a flag to specify the frequency of testing.
stop_threshold: Create a flag to specify a threshold accuracy or other
eval metric which should trigger the end of training.
batch_size: Create a flag to specify the batch size.
num_gpu: Create a flag to specify the number of GPUs used.
hooks: Create a flag to specify hooks for logging.
export_dir: Create a flag to specify where a SavedModel should be exported.
Returns:
A list of flags for core.py to marks as key flags.
"""
key_flags = []
if data_dir:
flags.DEFINE_string(
name="data_dir", short_name="dd", default="/tmp",
help=help_wrap("The location of the input data."))
key_flags.append("data_dir")
if model_dir:
flags.DEFINE_string(
name="model_dir", short_name="md", default="/tmp",
help=help_wrap("The location of the model checkpoint files."))
key_flags.append("model_dir")
if clean:
flags.DEFINE_boolean(
name="clean", default=False,
help=help_wrap("If set, model_dir will be removed if it exists."))
key_flags.append("clean")
if train_epochs:
flags.DEFINE_integer(
name="train_epochs", short_name="te", default=1,
help=help_wrap("The number of epochs used to data_batch_1.bin."))
key_flags.append("train_epochs")
if epochs_between_evals:
flags.DEFINE_integer(
name="epochs_between_evals", short_name="ebe", default=1,
help=help_wrap("The number of training epochs to run between "
"evaluations."))
key_flags.append("epochs_between_evals")
if stop_threshold:
flags.DEFINE_float(
name="stop_threshold", short_name="st",
default=None,
help=help_wrap("If passed, training will stop at the earlier of "
"train_epochs and when the evaluation metric is "
"greater than or equal to stop_threshold."))
if batch_size:
flags.DEFINE_integer(
name="batch_size", short_name="bs", default=32,
help=help_wrap("Batch size for training and evaluation. When using "
"multiple gpus, this is the global batch size for "
"all devices. For example, if the batch size is 32 "
"and there are 4 GPUs, each GPU will get 8 examples on "
"each step."))
key_flags.append("batch_size")
if num_gpu:
flags.DEFINE_integer(
name="num_gpus", short_name="ng",
default=1 if tf.test.is_gpu_available() else 0,
help=help_wrap(
"How many GPUs to use with the DistributionStrategies API. The "
"default is 1 if TensorFlow can detect a GPU, and 0 otherwise."))
if hooks:
# Construct a pretty summary of hooks.
hook_list_str = (
u"\ufeff Hook:\n" + u"\n".join([u"\ufeff {}".format(key) for key
in hooks_helper.HOOKS]))
flags.DEFINE_list(
name="hooks", short_name="hk", default="LoggingTensorHook",
help=help_wrap(
u"A list of (case insensitive) strings to specify the names of "
u"training hooks.\n{}\n\ufeff Example: `--hooks ProfilerHook,"
u"ExamplesPerSecondHook`\n See official.utils.logs.hooks_helper "
u"for details.".format(hook_list_str))
)
key_flags.append("hooks")
if export_dir:
flags.DEFINE_string(
name="export_dir", short_name="ed", default=None,
help=help_wrap("If set, a SavedModel serialization of the model will "
"be exported to this directory at the end of training. "
"See the README for more details and relevant links.")
)
key_flags.append("export_dir")
return key_flags
def get_num_gpus(flags_obj):
"""Treat num_gpus=-1 as 'use all'."""
if flags_obj.num_gpus != -1:
return flags_obj.num_gpus
from tensorflow.python.client import device_lib # pylint: disable=g-import-not-at-top
local_device_protos = device_lib.list_local_devices()
return sum([1 for d in local_device_protos if d.device_type == "GPU"])
| [
"[email protected]"
] | |
0edbaca525d80501f909ef612a51414f6cc1dfb8 | 2caad56ceeb9ff2840783373b9c38d14cf3eefbe | /GarageDeamon/Main.py | 039519d14434ab3c23287ba804aa617784de00da | [
"Apache-2.0"
] | permissive | schitic/garage | e2ab3764e2c61e456ef0e3a42674be8b60fc092d | ee5d7700d9e2d9af6a2aba0f02a6fdf5ec9c940c | refs/heads/master | 2020-05-27T00:54:43.734721 | 2019-06-29T09:52:05 | 2019-06-29T09:52:05 | 188,430,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | #!/usr/local/bin/python
from GarageDeamon.Loader import ActorLoader, SensorLoader
from GarageDeamon.Logger import LogCreator
import logging
import RPi.GPIO as GPIO
import signal
import sys
class GarageDeamon:
def __init__(self):
# Start logging
self.log = LogCreator()
signal.signal(signal.SIGINT, self.sigint_handler)
# Load the sensors
self.sensors = SensorLoader.get_modules()
for sensor in self.sensors.keys():
self.log.write('Sensor: %s' % sensor, 'Loaded')
self.sensors[sensor].run()
# Load the actors
self.actors = ActorLoader.get_modules()
for actor in self.actors.keys():
self.log.write('Actors: %s' % actor, 'Loaded')
def sigint_handler(self, signal, frame):
for sensor in self.sensors.keys():
self.sensors[sensor].close()
GPIO.setmode(GPIO.BCM)
GPIO.cleanup()
sys.exit(0)
def run(self):
while True:
continue
if __name__ == '__main__':
logging.basicConfig(format="%(levelname)-8s: %(message)s")
logging.getLogger().setLevel(logging.INFO)
garage = GarageDeamon()
garage.run()
| [
"[email protected]"
] | |
c12710419cb9815b8a919a42a5c142b1d2df886e | 4a312f99444ef43be9e42702f032ab3540589f2a | /models/tganv2_gen.py | 1313d0b50864727405d54304226a5f51f589e85b | [] | no_license | Zasder3/Tganv2-PyTorch-Train-Sparsely--Generate-Densely | ef8005d25b5485e9963b7a8c4585300cb09f8fdf | fe40dde8117a4f6c55d1a5a9d163abaa96150c0c | refs/heads/master | 2022-11-14T21:43:36.451941 | 2020-07-10T02:01:35 | 2020-07-10T02:01:35 | 278,507,068 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,278 | py | import torch
import torch.nn as nn
import numpy as np
import math
class CLSTM_cell(nn.Module):
def __init__(self, n_filters):
"""Convolutional LSTM Cell
Args:
n_filters (int): Number of LSTM channels
"""
super(CLSTM_cell, self).__init__()
self.w_x = nn.Conv2d(n_filters, n_filters * 4, kernel_size=3,
padding=1)
self.w_h = nn.Conv2d(n_filters, n_filters * 4, kernel_size=3,
padding=1, bias=False)
def forward(self, x, h=None, c=None):
xifoc = self.w_x(x)
xi, xf, xo, xc = xifoc.chunk(4, dim=1)
if h is not None:
hi, hf, ho, hc = self.w_h(h).chunk(4, dim=1)
else:
hi, hf, ho, hc = torch.zeros_like(xifoc).chunk(4, dim=1)
if c is None:
c = torch.zeros_like(x)
ci = torch.sigmoid(xi + hi)
cf = torch.sigmoid(xf + hf)
co = torch.sigmoid(xo + ho)
cc = cf * c + ci * torch.tanh(xc + hc)
ch = torch.tanh(cc) * co
return ch, cc
class CLSTM(nn.Module):
def __init__(self, n_filters, n_frames):
"""Full Convolutional LSTM
Args:
n_filters (int): Number of LSTM channels
n_frames (int): Frames to generate
"""
super(CLSTM, self).__init__()
self.cell = CLSTM_cell(n_filters)
self.n_frames = n_frames
def forward(self, z):
# Assume z is in proper convolutional shape
out = torch.stack([torch.zeros_like(z)]*self.n_frames, dim=1)
h, c = None, None
for i in range(self.n_frames):
h, c = self.cell(z, h, c)
out[:, i] = h
z = torch.zeros_like(z)
return out
class Up(nn.Module):
def __init__(self, cin, cout):
"""Upscale and convolutions in ResNet setup
Args:
cin (int): in channels
cout (int): out channels
"""
super(Up, self).__init__()
self.relu = nn.ReLU()
# define main branch
self.upsample = nn.Upsample(scale_factor=2)
self.bn1 = nn.BatchNorm2d(cin)
self.convm1 = nn.Conv2d(cin, cout, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(cout)
self.convm2 = nn.Conv2d(cout, cout, kernel_size=3, padding=1)
# define skip branch
self.sconv = nn.Conv2d(cin, cout, kernel_size=1)
# initialize
nn.init.xavier_uniform_(self.convm1.weight, gain=math.sqrt(2))
nn.init.xavier_uniform_(self.convm2.weight, gain=math.sqrt(2))
nn.init.xavier_uniform_(self.sconv.weight)
def forward(self, x):
# compute main
h = self.bn1(x)
h = self.relu(h)
h = self.upsample(h)
h = self.convm1(h)
h = self.bn2(h)
h = self.relu(h)
h = self.convm2(h)
# compute skip
s = self.upsample(x)
s = self.sconv(s)
return h + s
class Render(nn.Module):
def __init__(self, cin, colors=3):
"""Render an image given the parameters
Args:
cin (int): in channels
colors (int, optional): Color channels. Defaults to 3.
"""
super(Render, self).__init__()
self.bn = nn.BatchNorm2d(cin)
self.relu = nn.ReLU()
self.conv = nn.Conv2d(cin, colors, kernel_size=3, padding=1)
def forward(self, x):
x = self.bn(x)
x = self.relu(x)
x = self.conv(x)
x = torch.tanh(x)
return x
class Generator_CLSTM(nn.Module):
def __init__(self, z_dim=256,
tempc=1024,
zt_dim=3,
upchannels=[512, 256, 128],
subchannels=[64, 32, 32],
n_frames=16,
colors=3):
"""Full generator CLSTM model
Args:
z_dim (int, optional): Latent z. Defaults to 256.
tempc (int, optional): CLSTM channels. Defaults to 1024.
zt_dim (int, optional): CLSTM window size. Defaults to 3.
upchannels (list, optional): Defaults to [512, 256, 128].
subchannels (list, optional): Defaults to [64, 32, 32].
n_frames (int, optional): Frames to generate. Defaults to 16.
colors (int, optional): Number of colors. Defaults to 3.
"""
super(Generator_CLSTM, self).__init__()
assert len(subchannels) == 3
self.tempc = tempc
self.zt_dim = zt_dim
self.colors = colors
self.fc = nn.Linear(z_dim, zt_dim**2 * tempc)
self.temp = CLSTM(tempc, n_frames)
self.build = nn.Sequential()
for i in range(len(upchannels)):
if not i:
self.build.add_module('Up1', Up(tempc, upchannels[0]))
else:
self.build.add_module(f'Up{i+1}', Up(upchannels[i-1],
upchannels[i]))
self.buildr = Render(upchannels[-1], colors=colors)
self.sup1 = Up(upchannels[-1], subchannels[0])
self.sup1r = Render(subchannels[0], colors=colors)
self.sup2 = Up(subchannels[0], subchannels[1])
self.sup2r = Render(subchannels[1], colors=colors)
self.sup3 = Up(subchannels[1], subchannels[2])
self.sup3r = Render(subchannels[2], colors=colors)
def subsample(self, h, N, T, frames=4):
# to vid
_, C, H, W = h.shape
h = h.view(N, T, C, H, W)
h = h[:, np.random.randint(min(frames, T))::frames]
N, T, C, H, W = h.shape
# to train
h = h.contiguous().view(N * T, C, H, W)
return h, T
def forward(self, z, test=False):
"""Compute generator forward pass
Args:
z (torch.Tensor): Latent z [batch_size, z_dim]
test (bool, optional): Produce test videos. Defaults to False.
Returns:
list(torch.Tensor) or torch.Tensor: Subsampled or regular videos
"""
h = self.fc(z)
h = h.view(-1, self.tempc, self.zt_dim, self.zt_dim)
h = self.temp(h)
N, T, C, H, W = h.shape
h = h.view(N*T, C, H, W)
h = self.build(h)
outsize = self.zt_dim * 2 ** (len(self.build) + 3)
if test:
h = self.sup1(h)
h = self.sup2(h)
h = self.sup3(h)
h = self.sup3r(h).view(N, T, self.colors, outsize,
outsize).transpose(1, 2)
return h
else:
# render 1st
x1 = self.buildr(h).view(N, T, self.colors, outsize // 8,
outsize // 8)
h, T = self.subsample(h, N, T)
h = self.sup1(h)
# render 2nd
x2 = self.sup1r(h).view(N, T, self.colors, outsize // 4,
outsize // 4)
h, T = self.subsample(h, N, T)
h = self.sup2(h)
# render 3rd
x3 = self.sup2r(h).view(N, T, self.colors, outsize // 2,
outsize // 2)
h, T = self.subsample(h, N, T)
h = self.sup3(h)
# render 4th
x4 = self.sup3r(h).view(N, T, self.colors, outsize, outsize)
return x1, x2, x3, x4
| [
"[email protected]"
] | |
ddb20467c5291607a5703e8e18cb1de813e55600 | f37bb5242abb372f6d01b72907f770cbce5a5dd2 | /excise_precursors.py | a32ede6b75d28290d850dd20540ca2f2e7ad94a2 | [] | no_license | Frankyyoung24/mornafinder | a3b19e03b223d5fdf36f8417c87c4468a0ddb644 | faa9793b00180e31a4246cdaa280891ca83bd4db | refs/heads/master | 2020-04-30T05:23:53.124611 | 2019-03-20T01:21:22 | 2019-03-20T01:21:22 | 176,626,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,258 | py | #!/usr/bin/env python
from __future__ import print_function
import sys
import re
import argparse
import getopt
from port import print_stderr, str_reverse, tr, substr
import os
hash_pos = {}
count_lines = 0
count_excisions = 0
freq_min = 2
usage = '''
{} file_fasta file_arf precursor.coords
This script excises potential miRNA precursor sequences from a genome.
The fasta file should be the relevant genome, and the arf file should
contain the read mappings.
The precursors.coords designates where to write the precursor genome coordinates to.
-a integer Only excise if the potential mature microRNA is represented
by a number of reads equal to or higher than the integer
(default 2).
-b Output progress to screen'''.format(sys.argv[0])
def insertfeature(db, strand, db_beg, db_end, freq):
global hash_pos
hash_pos[db][strand][db_beg][db_end] += freq
def find_freq(freq):
m = re.search(r'_x(\d+)', freq)
if m:
m = m.groups()
return m[0]
else:
print_stderr('Problem with read format\n')
return 1
def parse_file_arf(file_arf):
global count_lines
lines = int(os.popen('cat {} | wc -l'.format(file_arf)).read().strip())
if options.get('-b') == '':
print_stderr('reading the mapping file into memory, total lines={}\n'.format(lines))
try:
FILENAME = open(file_arf, 'rb')
except IOError:
print('Could not open file {}'.format(file_arf))
sys.exit(-1)
while True:
line = FILENAME.read()
if not line:
break
m = re.match(r'^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)', line)
if m:
m = m.groups()
query = m[0]
query_map_lng = int(m[1])
query_beg = int(m[2])
query_end = int(m[3])
query_seq = m[4]
db = m[5]
db_map_lng = int(m[6])
db_beg = int(m[7])
db_end = int(m[8])
db_seq = m[9]
strand = m[10]
edits = m[11]
edit_string = m[12]
freq = find_req(query)
insertfeature(db, strand, db_beg, db_end, freq)
count_lines += 1
if options.get('b') == '':
pass
FILENAME.close()
def find_freq_max_downstream(db, strand, db_beg, db_end):
global hash_pos
freq_max = 0
for pos_beg in range(db_beg + 1, db_end + 70 + 1):
try:
hash_pos[db][strand][pos_beg]
except KeyError:
pass
else:
# Defined
pos_ends = sorted(hash_pos[db][strand][pos_beg].keys())
for pos_end in pos_ends:
freq = hash_pos[db][strand][pos_beg][pos_end]
if freq > freq_max:
freq_max = freq
return freq_max
def max2(a, b):
return a if a > b else b
def min2(a, b):
return a if a < b else b
def excise_position(db_seq, db_lng, excise_beg, excise_end):
excise_beg_limit = max2(1, excise_beg)
excise_end_limit = min2(db_lng, excise_end)
excise_lng = excise_end_limit - excise_beg_limit + 1
# excise_seq = substr($$db_seq,$excise_beg_limit-1,$excise_lng);
excise_seq = substr(db_seq, excise_beg_limit - 1, excise_lng)
return excise_seq
def com(sequence):
return tr(sequence, 'acgtuACGTU', 'TGCAATGCAA')
def rev(sequence):
return str_reverse(sequence)
def revcom(sequence):
return rev(com(sequence))
def print_positions(PF, db, strand, db_seq, db_lng, excise_beg, excise_end):
global count_excisions
excise_seq = excise_position(db_seq, db_lng, excise_beg, excise_end)
if strand == '-':
excise_seq = revcom(excise_seq)
# print ">$$db\_$count_excisions\n$excise_seq\n";
# print PF ">$$db\_$count_excisions\t$$strand\t$$excise_beg\t$$excise_end\n";
print('>{}_{}\n{}'.format(db, count_excisions, excise_seq))
PF.write('>{}_{}\t{}\t{}\t{}\n'.format(
db,
count_excisions,
strand,
excise_beg,
excise_end
))
count_excisions += 1
def excise(PF, db, db_seq):
global freq_min
global hash_pos
strands = sorted(hash_pos[db])
for strand in strands:
db_lng = len(db_seq)
db_limit = 0
db_begs = sorted(hash_pos[db][strand].keys())
for db_beg in db_begs:
db_ends = sorted(hash_pos[db][strand][db_beg])
for db_end in db_ends:
freq = hash_pos[db][strand][db_beg][db_end]
freq_max_ds = find_freq_max_downstream(db, strand, db_beg, db_end)
if freq < freq_min or freq < freq_max_ds or db_beg > db_limit:
continue
excise_beg = db_beg - 70
excise_end = db_end + 20
# print out in fasta format
print_positions(PF, db, strand, db_seq, db_lng, excise_beg, excise_end)
excise_beg = db_beg - 20
excise_end = db_end + 70
print_positions(PF, db, strand, db_seq, db_lng, excise_beg, excise_end)
# the most 3' position that has yet been excised
db_limit = excise_end
def parse_genome_and_excise(PF, file_fasta):
try:
FASTA = open(file_fasta, 'rb')
except IOError:
FASTA.close()
while True:
line = FASTA.readline().strip()
if not line:
break
m = re.match(r'^>(\S+)(.*)', line)
if m:
m = m.groups()
_id = m[0]
desc = m[1]
sequence = ''
while True:
ll = FASTA.readline().strip()
if not ll:
break
mm = re.match(r'^>(\S+)(.*)', ll)
if mm:
mm = mm.groups()
excise(PF, _id, sequence)
_id = mm[0]
desc = mm[1]
sequence = ''
continue
sequence += ll
excise(PF, _id, sequence)
FASTA.close()
if __name__ == '__main__':
if len(sys.argv) < 4:
print(usage)
sys.exit(-1)
parser = argparse.ArgumentParser()
parser.add_argument('file_fasta', help=usage)
parser.add_argument('file_arf', help=usage)
parser.add_argument('coord_file', help=usage)
args = parser.parse_args(sys.argv[1:4])
file_fasta = args.file_fasta
file_arf = args.file_arf
coord_file = args.coord_file
opts, argss = getopt.getopt(sys.argv[4:], 'a:b')
options = dict(opts)
try:
PF = open(coord_file, 'w+')
except:
print('cannot create file {}'.format(coord_file))
sys.exit(-1)
if options.get('-a'):
freq_min = int(options.get('-a'))
if options.get('-b') == '':
print_stderr('finding lengths of genome contigs\n')
parse_file_arf(file_arf)
if options.get('-b') == '':
print_stderr('reading the genome into memory and excising potential precursors\n')
parse_genome_and_excise(PF, file_fasta)
if options.get('-b') == '':
print_stderr('potential precursors excised\n')
close(PF)
| [
"[email protected]"
] | |
b508f86c0c3c578717c29afdc02b6162f9c09a52 | 00f152d7e7c9fd6a903dd010adf133a5b1e6ee92 | /jurnal/migrations/0001_initial.py | 2d4432f0342de656560e0518b841f9a01cbfba54 | [] | no_license | dastan2807/minfin | 19ed1bf5ecb077686fd190ab908477be1681b0c0 | 293ff2503f85432a54c26bf8162a2eae33f5d017 | refs/heads/master | 2023-04-24T19:56:27.552266 | 2021-04-19T07:24:14 | 2021-04-19T07:24:14 | 357,477,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | # Generated by Django 3.1.6 on 2021-03-09 08:23
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Jurnal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(auto_now=True)),
('name', models.CharField(max_length=200, verbose_name='Ф.И.О.')),
('text_prich', models.CharField(max_length=255, verbose_name='Причина выезда')),
('time_exit', models.CharField(max_length=25, verbose_name='Отметка об убытии')),
('time_enter', models.CharField(max_length=25, verbose_name='Отметка о прибытии')),
],
),
]
| [
"[email protected]"
] | |
d5c8ce90c05eaf291e3fdf6e3ab0d68e747fd05b | 300757d6dc740015bd8ffbb94bc9abe7d6a756e1 | /interface/apps/core/fields.py | 4fabfa88ff13946859b56ceae5b27f20994b67d1 | [
"BSD-2-Clause-Views"
] | permissive | orzubalsky/inventory | 817d17e6f0d00a2ac8a73e1ab1ba081515d79d82 | 436a45e504c5c77206226f146787f208600b6ead | refs/heads/master | 2021-01-10T20:10:34.879441 | 2013-12-07T17:29:57 | 2013-12-07T17:29:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | from django.db.models import *
class ArrowField(FloatField):
"""
This is basically a FloatField, with its own form widget
"""
def formfield(self, **kwargs):
# This is a fairly standard way to set up some defaults
# while letting the caller override them.
defaults = {'form_class': ArrowField}
defaults.update(kwargs)
return super(ArrowField, self).formfield(**defaults)
class LineField(TextField):
"""
Stores either one or two string keys and a float point value
"""
__metaclass__ = SubfieldBase
def __init__(self, *args, **kwargs):
self.token = kwargs.pop('token', ',')
super(LineField, self).__init__(*args, **kwargs)
def to_python(self, value):
if not value:
return
if isinstance(value, list):
return value
return value.split(self.token)
def get_db_prep_value(self, value):
if not value:
return
assert(isinstance(value, list) or isinstance(value, tuple))
return self.token.join([unicode(s) for s in value])
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value)
| [
"[email protected]"
] | |
d1d9d6d2566f5c1b96b3ccf428145aeb695f0e09 | fef19f8073762de02edfc4227e9d971fd4b3ba63 | /Part_I_Basics/exercises/exercise_7_8_deli.py | 5b36c6bb18b1962b7eda790ca2e52487bc3f89a4 | [] | no_license | shaoda06/python_work | a461bed73d3e6ef9fddaacc5abeb01d3c75de3e0 | f7c3f77e8165019536c76edd9fbe24615f58e433 | refs/heads/master | 2020-05-07T20:36:48.695960 | 2020-02-10T23:22:54 | 2020-02-10T23:22:54 | 180,868,360 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | # 7-8. Deli: Make a list called sandwich_orders and fill it with the names of
# various sandwiches. Then make an empty list called finished_sandwiches.
# Loop through the list of sandwich orders and print a message for each
# order, such as I made your tuna sandwich. As each sandwich is made, move it
# to the list of finished sandwiches. After all the sandwiches have been
# made, print a message listing each sandwich that was made.
sandwich_orders = ['sandwich 01', 'sandwich 02', 'sandwich 03', 'sandwich 04']
finished_sandwiches = []
while sandwich_orders:
finished_sandwich = sandwich_orders.pop()
finished_sandwiches.append(finished_sandwich)
print("I made your " + finished_sandwich)
print("Sandwiches I made: ")
for finished_sandwich in finished_sandwiches:
print(finished_sandwich)
| [
"[email protected]"
] | |
ec75025a07918ec8efd71c026a763f57b1f89aed | 5f6adaf9a8927bd598e25d96040e4a5d34d905fb | /v2/python-crash-course/projects/mpl/dice_visual.py | d7116f47a2be17eb128afde2dacd5c44bb4e031c | [] | no_license | jsore/notes | 10d9da625dd8f6d1c1731c6aad40c6bdbf15a23f | 09b658d7425e2f6c0653810a2a73d8b001fb9288 | refs/heads/master | 2021-10-12T17:55:30.501885 | 2021-10-11T21:48:27 | 2021-10-11T21:48:27 | 158,454,447 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | # roll a D6, print the results and sanity check the results
from plotly.graph_objs import Bar, Layout
from plotly import offline
from die import Die
# create two D6 dice, make some rolls and store them
die_1 = Die()
# die_2 = Die()
# a 10 sided die
die_2 = Die(10)
results = []
# for roll_num in range(1000):
for roll_num in range(50_000):
result = die_1.roll() + die_2.roll()
results.append(result)
# analyze results
frequencies = []
max_result = die_1.num_sides + die_2.num_sides
# for value in range(2, max_result+1):
# frequency = results.count(value)
# frequencies.append(frequency)
#
# or, using a list comprehension of my design:
frequencies = [results.count(value) for value in range(2, max_result+1)]
# print(frequencies)
# visualize the results
# Plotly doesn't accept range results directly, explicitly
# convert it to a list first
x_values = list(range(2, max_result+1))
data = [Bar(x=x_values, y=frequencies)]
x_axis_config = {'title': 'Result', 'dtick': 1}
y_axis_config = {'title': 'Frequency of Result'}
# my_layout = Layout(title='Results of rolling two D6 dice 1,000 times',
my_layout = Layout(title='Results of rolling a D6 and D10 50,000 times',
xaxis=x_axis_config, yaxis=y_axis_config)
offline.plot({'data': data, 'layout': my_layout}, filename='d6_d10.html')
# print(frequencies)
| [
"[email protected]"
] | |
d62aa9230dce227610caed3b3e0b53237f5eabc4 | d818226860d0ed797c243748c29ef968181e5c24 | /Assignments/Week-2/Q3.py | 7962d8044ccbd230f58e1f5ce0c90fb235c26a31 | [] | no_license | iShibi/DAA | 06a9d9efe89c10b0060aa2b8d78dcacea845505a | 24773e80de0f79e877ce6ff0df1f3bdf43bc1173 | refs/heads/main | 2023-07-15T13:45:07.899928 | 2021-08-25T07:46:39 | 2021-08-25T07:46:39 | 386,178,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | # Problem: Given an array of nonnegative integers, design an algorithm and a program to count the
# number of pairs of integers such that their difference is equal to a given key, K
'''
Input:
2
5
1 51 84 21 31
20
10
24 71 16 92 12 28 48 14 20 22
4
'''
def binary_search(arr, arr_size, key):
left = 0
right = arr_size - 1
while (left <= right):
mid = left + ((right - left) // 2)
if (key == arr[mid]):
return True
elif (key < arr[mid]):
right = mid - 1
else:
left = mid + 1
return False
def pair_counter(arr, arr_size, difference):
count = 0
for i in range(arr_size):
key = arr[i] + difference
key_found = binary_search(arr, arr_size, key)
if (key_found):
count += 1
return count
def main():
test_cases = int(input())
for _ in range(test_cases):
arr_size = int(input())
arr = list(map(int, input().split(' ')))
arr.sort()
difference = int(input())
count = pair_counter(arr, arr_size, difference)
print(f'{count}')
main()
'''
Output:
2
4
''' | [
"[email protected]"
] | |
09b30d4fe64b66d29f5074b472e7849b91f29656 | 7c1c5f90018ad0210a8af77ffa498c14a03fd67b | /Ejercicios/Ejercicio4.py | 6ac99dfbf3baeb2bfc397d7f8dd37ccac35097e1 | [] | no_license | yesheng01/Python | d1f44324a74f46efe7e7926f8ab8e5433f582bc6 | 4bcec73dd72acee0379610b607275f0151a40d46 | refs/heads/master | 2023-04-30T14:58:44.483650 | 2021-05-19T17:17:03 | 2021-05-19T17:17:03 | 363,954,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | import random
z = random.randint(0,10)
o = int (input ("Introduce un numero a adivinar: "))
if o == z :
print("Has adivinado el numero , el numero era : ")
print(z)
else:
print("No has adivinado el numero") | [
"[email protected]"
] | |
ab00202d074ba58c601f2e880f68762b98f44b30 | 03ff74fff064b69e5b41af42372a6cc33738c294 | /project_advance_views/Blog/migrations/0010_auto_20190213_0508.py | 0eeb124c5b9ce6e724cc710c6900785a4bb8bb8d | [] | no_license | rosaridho/DJANGO_MVC | 189a6ba400984e91dd0b057072f8e60348619242 | 44e6f0bd0dcd7e9d268dc2580770c79d54aeda85 | refs/heads/master | 2022-11-28T04:57:03.973009 | 2019-02-13T13:55:28 | 2019-02-13T13:55:28 | 170,068,288 | 0 | 0 | null | 2022-11-22T03:25:38 | 2019-02-11T04:34:06 | HTML | UTF-8 | Python | false | false | 397 | py | # Generated by Django 2.1.5 on 2019-02-13 05:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Blog', '0009_auto_20190213_0506'),
]
operations = [
migrations.AlterField(
model_name='artikel',
name='gambar',
field=models.ImageField(upload_to='../static/img'),
),
]
| [
"[email protected]"
] | |
2cbdf69f111c9413e10a9c43442208ae677b6d1b | 5d5330cef6eab6e023cc098d790b8ca70ec4b7f7 | /reddit_bot.py | d5f92deaa5801a99125590f7f8ecff569d8cb6c7 | [] | no_license | ParthKasat/Reddit-Comment-Bot | 0e70e3a3db99d23cff688a20ef9ab7cf39599c6d | a5d2ee76175e5512470ae443ec449f27392cff60 | refs/heads/master | 2021-03-13T14:05:39.291076 | 2020-05-30T12:26:19 | 2020-05-30T12:26:19 | 246,687,043 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 3,492 | py | # -*- coding: cp1252 -*-
import praw
import time
import os
import random
import re
list = [
'And you have my bow.',
'THEY’RE TAKING THE HOBBITS TO ISENGARD. ',
'A red sun rises. Blood has been spilled this night.',
'THIS FOREST IS OLD. VERY OLD. FULL OF MEMORIES...AND ANGER.',
'YOUR FRIENDS ARE WITH YOU, ARAGORN.',
'A PLAGUE UPON THE STIFF NECKS OF DWARVES.',
'Shall I describe it to you? Or would you like me to find you a box?',
'WHAT ABOUT SIDE BY SIDE WITH A FRIEND?',
'This is no ordinary Ranger. He is Aragorn, son of Arathorn. You owe him your allegiance.',
'There is a fell voice on the air.',
'A lament for Gandalf',
'Have you heard nothing Lord Elrond has said? The Ring must be destroyed.',
'Come, Gimli! We are gaining on them!',
'You would die before your stroke fell!',
'One small bite is enough to fill the stomach of a grown man!',
'Something draws near. I can feel it.',
'Im on 17!',
'Final count: fourty-two.',
'He was twitching.',
'I feel something, a slight tingle in my fingers, I think its affecting me!',
'Hurry! Frodo and Sam have reached the eastern shore. You mean not to follow them.',
'They run as if the very whips of their masters were behind them!',
'I have not the heart to tell you. For me the grief is still too near.',
'That is one of the Mearas, unless my eyes are cheated by some spell.',
'We have trusted you this far and you have not led us astray. Forgive me. I was wrong to despair. '
]
def bot_login():
print ("Logging in...")
r = praw.Reddit(client_id='KzlFs2WOXCy4IA',
client_secret = 'h4_dFmp5HZMYoV8FtkooeQ4KAPs',
username='legolasbot',
password='Parth.kasat3232',
user_agent='legolasbot (by /u/legolasbot)')
print ("Logged in!")
return r
keywords = ['Legolas', 'legolas']
def run_bota(r, comment_replied_to):
for comment in r.subreddit('lotrmemes').comments(limit=40):
if re.search("legolas", comment.body, re.IGNORECASE) and comment.id not in comment_replied_to and not comment.author == r.user.me:
print( "Legolas has been found")
random_item = random.choice(list)
comment.reply(random_item)
comment_replied_to.append(comment.id)
with open("comment_replied_to.txt", "a") as f:
f.write(comment.id + "\n")
print("sleeping for 10 seconds in lotr memes")
time.sleep(10)
def run_bot(r, comment_replied_to):
for comment in r.subreddit('lotr').comments(limit=40):
if re.search("legolas", comment.body, re.IGNORECASE) and comment.id not in comment_replied_to and not comment.author == r.user.me:
print( "Legolas has been found")
random_item = random.choice(list)
comment.reply(random_item)
comment_replied_to.append(comment.id)
with open("comment_replied_to.txt", "a") as f:
f.write(comment.id + "\n")
print("sleeping for 10 seconds in lotr")
time.sleep(10)
def get_saved_comments():
if not os.path.isfile("comment_replied_to.txt"):
comment_replied_to = []
else:
with open("comment_replied_to.txt", "r") as f:
comment_replied_to = f.read()
comment_replied_to = comment_replied_to.split("\n")
return comment_replied_to
r = bot_login()
comment_replied_to = get_saved_comments()
while True:
run_bot(r, comment_replied_to)
run_bota(r, comment_replied_to)
| [
"[email protected]"
] | |
a8fb8eef1296caf6df290c606178f4ae0661760e | 3784e8bd314141f0b1331aba62da086ac759078b | /lists/migrations/0001_initial.py | b365b5667c9f173d31add7549f12ba3811215ba7 | [] | no_license | raultr/TddWithPython2ed | de05cfc606e4a1035cc0d1d2a9df147dfcf8ff55 | fdaadd46d0e3066c0fd72f575c838198f6be543b | refs/heads/master | 2021-04-27T21:50:44.778271 | 2018-04-16T22:56:43 | 2018-04-16T22:56:43 | 122,408,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | # Generated by Django 2.0.2 on 2018-03-07 03:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| [
"[email protected]"
] | |
5532f494f2ef358d96e325ef055515c9012608eb | da93ce28d8d7e77886a3aaeb4026126adeb18256 | /test/test_plots.py | b16c7656893533c6d86922164d34643dbbb7d459 | [
"MIT"
] | permissive | javilor/pysmap | 45185fa8c9f8261e656f1fd8256ec30a23297007 | cf677a13d98c201d647e21eb240a55e948868241 | refs/heads/master | 2020-12-28T19:33:44.359653 | 2016-08-03T20:00:34 | 2016-08-03T20:00:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,931 | py | import os
import unittest
from datetime import datetime
from test.config import config
from pysmap import SmappCollection
from pysmap import plots
class TestPlots(unittest.TestCase):
def test_control(self):
self.assertTrue(True)
def test_tweet_field_grouped_by_timeslice_hours(self):
output_path = '{}/chart_tests/Bar-{}-bar.html'.format(os.path.dirname(os.path.realpath(__file__)), datetime.now())
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['json']['valid'])
collection = SmappCollection('json', file_path)
def custom_filter(tweet):
if '#JadeHelm' in tweet['text']:
return True
return False
plots.bar_graph_tweet_field_grouped_by_period(collection, '', [], custom_filter, 'hours', datetime(2015,9,1), datetime(2015,11,30), output_path)
def test_tweet_field_grouped_by_timeslice_days(self):
output_path = '{}/chart_tests/Bar-{}-bar.html'.format(os.path.dirname(os.path.realpath(__file__)), datetime.now())
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['json']['valid'])
collection = SmappCollection('json', file_path)
def custom_filter(tweet):
return True
plots.bar_graph_tweet_field_grouped_by_period(collection, '', [], custom_filter, 'days', datetime(2015,9,1), datetime(2015,11,30), output_path)
def test_tweet_field_grouped_by_timeslice_weeks(self):
output_path = '{}/chart_tests/Bar-{}-bar.html'.format(os.path.dirname(os.path.realpath(__file__)), datetime.now())
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['json']['valid'])
collection = SmappCollection('json', file_path)
def custom_filter(tweet):
return True
plots.bar_graph_tweet_field_grouped_by_period(collection, '', [], custom_filter, 'weeks', datetime(2015,9,1), datetime(2015,11,30), output_path)
def test_tweet_field_grouped_by_timeslice_months(self):
output_path = '{}/chart_tests/Bar-{}-bar.html'.format(os.path.dirname(os.path.realpath(__file__)), datetime.now())
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['json']['valid'])
collection = SmappCollection('json', file_path)
def custom_filter(tweet):
return True
plots.bar_graph_tweet_field_grouped_by_period(collection, '', [], custom_filter, 'months', datetime(2015,9,1), datetime(2015,11,30), output_path)
def test_tweet_field_grouped_by_timeslice_years(self):
output_path = '{}/chart_tests/Bar-{}-bar.html'.format(os.path.dirname(os.path.realpath(__file__)), datetime.now())
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['json']['valid'])
collection = SmappCollection('json', file_path)
def custom_filter(tweet):
return True
plots.bar_graph_tweet_field_grouped_by_period(collection, '', [], custom_filter, 'years', datetime(2015,9,1), datetime(2015,11,30), output_path)
def test_tweet_field_grouped_by_timeslice_custom_filter(self):
output_path = '{}/chart_tests/Bar-{}-bar.html'.format(os.path.dirname(os.path.realpath(__file__)), datetime.now())
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['json']['valid'])
collection = SmappCollection('json', file_path)
def custom_filter(tweet):
if '#JadeHelm' in tweet['text']:
return True
return False
plots.bar_graph_tweet_field_grouped_by_period(collection, '', [], custom_filter, 'days', datetime(2015,9,1), datetime(2015,11,30), output_path)
def test_tweet_field_grouped_by_timeslice_single_level_field(self):
output_path = '{}/chart_tests/Bar-{}-bar.html'.format(os.path.dirname(os.path.realpath(__file__)), datetime.now())
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['json']['valid'])
collection = SmappCollection('json', file_path)
def custom_filter(tweet):
return True
plots.bar_graph_tweet_field_grouped_by_period(collection, 'id_str', ['661283295670493185'], custom_filter, 'months', datetime(2015,9,1), datetime(2015,11,30), output_path)
def test_tweet_field_grouped_by_timeslice_compound_field(self):
output_path = '{}/chart_tests/Bar-{}-bar.html'.format(os.path.dirname(os.path.realpath(__file__)), datetime.now())
file_path = '{}/{}'.format(os.path.dirname(os.path.realpath(__file__)), config['json']['valid'])
collection = SmappCollection('json', file_path)
def custom_filter(tweet):
return True
plots.bar_graph_tweet_field_grouped_by_period(collection, 'user.time_zone', ['Pacific Time (US & Canada)'], custom_filter, 'months', datetime(2015,9,1), datetime(2015,11,30), output_path)
| [
"[email protected]"
] | |
5e8f1800dbb09da5655a3988e1d33f9394c22cf0 | 99291222b1b54a17a10c1662f428648349bb15b3 | /examples/airflow_ingest/airflow_simple_dag.py | e687e51dd076503fc625080373449148e9ef54ec | [
"Apache-2.0"
] | permissive | rberrelleza/dagster | ee3c9c80f092b6d817e8bd546eb0d3c5852c0de3 | 7ad30528a4a92945967d68e59e27727a1e839c2b | refs/heads/master | 2022-12-03T11:12:32.585772 | 2020-08-07T23:25:30 | 2020-08-07T23:25:30 | 286,546,665 | 1 | 1 | Apache-2.0 | 2020-08-10T18:09:13 | 2020-08-10T18:09:12 | null | UTF-8 | Python | false | false | 828 | py | # pylint: disable=pointless-statement
from airflow import models
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils.dates import days_ago
args = {
'start_date': days_ago(2),
}
simple_dag = models.DAG(dag_id='simple_dag', default_args=args, schedule_interval=None)
run_this_last = DummyOperator(task_id='sink_task', dag=simple_dag,)
for i in range(3):
task = BashOperator(
task_id='get_task_instance_' + str(i),
bash_command='echo "{{ task_instance_key_str }}" && sleep 1',
dag=simple_dag,
)
task >> run_this_last
also_run_this = BashOperator(
task_id='get_date',
bash_command='echo "execution_date={{ execution_date }} | ts={{ ts }}"',
dag=simple_dag,
)
also_run_this >> run_this_last
| [
"[email protected]"
] | |
6c722f6089a18873fe08626dfd4350be1d26bbe2 | c166d8a39a5f1c3aa66b7eb87005da0362f86a45 | /mypdf/myapp/migrations/0006_my_pdf_image.py | d0c38b4dab2e18cabf9e575699e5d95133553246 | [] | no_license | Neeraj452/NeerajPDF.gethub.io | f3ce201223e75e5968f31210bbcdab872950d6ec | 235473da3f34b2f2500d55c56ae83b9bf4e8ad5d | refs/heads/master | 2022-11-05T10:08:14.682992 | 2020-06-27T04:56:42 | 2020-06-27T04:56:42 | 275,302,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # Generated by Django 3.0.5 on 2020-04-26 01:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0005_auto_20200426_0202'),
]
operations = [
migrations.AddField(
model_name='my_pdf',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='books/image/'),
),
]
| [
"[email protected]"
] | |
1872be9679ed8fea47a535b1690d027b326a9ce1 | 933b6a50ce1d60373015c472c21bd9bd6dfdb44b | /lib/pusher_utils.py | f5dd1dc730969cc31b8307019f64ee8c06f0815b | [] | no_license | dlf412/push_to_url | 9fb9334de47d4513d4a5602ffeafe84ec3a0de0e | fbd22deed50cce32f3b695a90d4f09bee7e0225d | refs/heads/master | 2022-10-20T03:04:33.114591 | 2022-10-08T08:56:17 | 2022-10-08T08:56:17 | 60,494,043 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,415 | py | #!/usr/bin/env python
# encoding: utf-8
from hashlib import md5, sha512
from time import time as now
import requests
from requests import RequestException as PushError
def generate_push_url(url, apikey, invalid_sec):
ts = int(now()) + invalid_sec
at = md5(sha512("{}{}".format(apikey, ts)).hexdigest()).hexdigest()
return "{}?ts={}&at={}".format(url, ts, at)
def push2customer(url, data, timeout):
headers = {"Connection": "Keep-Alive", "Accept": "*/*"}
if isinstance(data, dict):
r = requests.post(url, json=data, headers=headers, timeout=timeout)
else:
if isinstance(data, unicode):
# support utf8 encoding bytes only
data = data.encode("utf-8")
r = requests.post(url, data=data, headers=headers, timeout=timeout)
if r.status_code != 200:
try:
msg = r.json()['msg']
except:
r.raise_for_status()
else:
http_error_msg = '%s Server Error: %s for url: %s, msg: %s' % (
r.status_code, r.reason, r.url, msg)
raise requests.HTTPError(http_error_msg)
if __name__ == "__main__":
url = "http://127.0.0.1:8088/mw/matches"
apikey = "#!@$%^dlf$%@!*"
sec = 30
push_url = generate_push_url(url, apikey, sec)
push2customer(push_url, u'{"key1": "中文", "key2": "value2"}', sec)
push2customer(push_url, u"<a>测试what? </a>", sec)
| [
"[email protected]"
] | |
f4da4a713f6c8a9b04ce91771f32a79242ad9ed9 | 859582d1ecb81f1024ed5fd337e57bcfa5119cf4 | /app/models/user.py | 945b768567f5bdb7668a52554b490aa10cc76d9a | [
"MIT"
] | permissive | lxy370/PickLight | 01a7534311398c71ce39ee3b5e5fb5cbbbac51dc | 6e88a81f336e7fc239a6aa7afc2c10c80b250829 | refs/heads/master | 2020-05-07T08:22:30.321410 | 2019-03-17T15:29:11 | 2019-03-17T15:29:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,477 | py | # -*- coding: UTF-8 -*-
__author__ = 'Sliver'
from flask import current_app
from flask_login import UserMixin
from sqlalchemy import Integer, String, Column, Float, Boolean
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from app.models.base import Base, db
from app.models.gift import Gift
from app.models.wish import Wish
from app.models.drift import Drift
from app import login_manger
from app.libs.helper import is_isbn_or_key
from app.libs.enums import PendingStatus
from app.spider.bookgetter import BookGetter
class User(UserMixin, Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
nickname = Column(String(24), nullable=False)
phone_number = Column(String(18), unique=True)
_password = Column('password', String(128), nullable=False) # 将表字段命名为 password
email = Column(String(50), unique=True, nullable=False)
confirmed = Column(Boolean, default=False)
beans = Column(Float, default=0)
send_counter = Column(Integer, default=0)
receive_counter = Column(Integer, default=0)
wx_open_id = Column(String(50))
wx_name = Column(String(32))
@property
def password(self):
# 定义 password 属性
return self._password
@password.setter
def password(self, raw):
# 为 password 赋值时,进行加密操作
self._password = generate_password_hash(raw)
def check_password(self, raw):
# 检测密码是否正确
return check_password_hash(self._password, raw)
def can_save_to_list(self, isbn):
# 判断传入的 isbn 是否符合 isbn 规范
if is_isbn_or_key(isbn) != 'isbn':
return False
bookgetter = BookGetter()
bookgetter.search_by_isbn(isbn)
# 判断是否能在 API 中查询到这本书
if not bookgetter.first:
return False
# 其他判断原则
# 1.不允许一个用户同时赠送多本相同的书
# 2.一个用户不可能同时成为赠送者和索要者
# 既不在赠送清单,也不在心愿清单才能添加
gifting = Gift.query.filter_by(uid=self.id, isbn=isbn,
launched=False).first()
wishing = Wish.query.filter_by(uid=self.id, isbn=isbn,
launched=False).first()
if not gifting and not wishing:
return True
else:
return False
def can_send_drift(self):
# 判断当前用户的拾光豆是否符合要求
if self.beans < 1:
return False
# 获得用户已赠送书籍数量
success_gifts_count = Gift.query.filter_by(
uid=self.id, launched=True).count()
# 获得用户已获得书籍数量
success_receive_count = Drift.query.filter_by(
requester_id=self.id, pending=PendingStatus.Success).count()
# 每索取两本书,自己要送出一本书
return True if success_receive_count // 2 <= success_gifts_count else False
@property
def summary(self):
# 得到用户的简介信息
return {'nickname': self.nickname,
'beans': self.beans,
'email': self.email,
'send_receive': str(self.send_counter) + '/' + str(self.receive_counter),
'create_datetime': self.create_datetime.strftime('%Y-%m-%d')}
def generate_token(self, expiration=600):
# 添加随机字符串密钥和过期时间(默认为600s)生成令牌值
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'id': self.id}).decode('utf-8') # 将 bytes 类型的数据解码为 utf-8 格式的字符串
@staticmethod
def reset_password(token, new_password):
# 解密 token 获得相关用户信息
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
# 如果 token 过期或者伪造时,返回 False
return False
uid = data.get('id')
with db.auto_commit():
user = User.query.get(uid) # 返回指定主键对应的行
user.password = new_password
return True
@login_manger.user_loader
def get_user(uid):
return User.query.get(int(uid)) # 返回指定主键对应的行
| [
"[email protected]"
] | |
4dddc5101c9ecead38ab7a6e3dfa5601de36522c | 05a50cac9824f300cbb27594d2ce81c7eb59ddf9 | /VetDataHub/urls.py | 9649b1095ddba9daf49a064432c2ccfa9eb9d679 | [
"MIT"
] | permissive | nkoroi/KVDH | 7817809fa0f55dba753a40e06ae3cf9b3f410df7 | 622a2331325de113b95791b74c1f74383dcbd7f1 | refs/heads/master | 2021-07-10T03:34:01.589942 | 2017-10-02T09:18:47 | 2017-10-02T09:18:47 | 105,516,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,314 | py | """VetDataHub URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from VetDataHub import views
from vdh import views as vdh_views
from registration.backends.simple.views import RegistrationView
from accounts.forms import *
from accounts.views import *
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailcore import urls as wagtail_urls
app_name = 'vdh'
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.home , name='home'),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^accounts/register/$', UserFormView.as_view(), name='registration_register'),
# url(r'^users/$', vdh_views.user, name='user'),
url(r'^vpp-edit-profile/$', vdh_views.vpp_edit_profile, name= 'vpp_edit_profile'),
url(r'^vet-edit-profile/$', vdh_views.vet_edit_profile, name= 'vet_edit_profile'),
url(r'^med-producer-edit-profile/$', vdh_views.med_producer_edit_profile, name= 'med_producer_edit_profile'),
url(r'^med-distributer-edit-profile/$', vdh_views.med_distributer_edit_profile, name= 'med_distributer_edit_profile'),
url(r'^users/dashboard$', vdh_views.dashboard, name='dashboard'),
url(r'^data/clinical-work-data$', vdh_views.clinical_work_data, name= 'clinical_work_data'),
url(r'^data/med-prod-data$', vdh_views.med_producer_data, name= 'med_producer_data'),
url(r'^data/med-dist-data$', vdh_views.med_distributer_data, name= 'med_distributer_data'),
url(r'^dash/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^pages/', include(wagtail_urls)),
] | [
"[email protected]"
] | |
e9d73fa529be24c12126c57aee29a3464f5e754b | 53b8560a0b3d7e55b330f95c350c61359c5e41a2 | /Data Wrangling with MongoDB/Lesson07-Quiz14-Inserting-Multiple-Documents/insert.py | 06995ce9ac35bd74c49ecbb6ee2e87c82ef82ef0 | [] | no_license | nbrahman/Udacity | 2c5426c6cf82ed0066bb3e14afaf8b73b088b75c | a7b0a894c01fb1240a8dd25dac84b6ef060b9752 | refs/heads/master | 2021-01-01T20:02:01.915859 | 2017-07-29T16:12:37 | 2017-07-29T16:12:37 | 98,743,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | #!/usr/bin/env python
"""
Add a single line of code to the insert_autos function that will insert the
automobile data into the 'autos' collection. The data variable that is
returned from the process_file function is a list of dictionaries, as in the
example in the previous video.
"""
from autos import process_file
def insert_autos(infile, db):
data = process_file(infile)
for d in data:
db.autos.insert(d)
if __name__ == "__main__":
# Code here is for local use on your own computer.
from pymongo import MongoClient
client = MongoClient("mongodb://localhost:27017")
db = client.examples
insert_autos('autos-small.csv', db)
print(db.autos.find_one()) | [
"[email protected]"
] | |
bf56a0dc1f6922ec054372e2560c38e84666b4ce | a41422faf07bfca1e6a543d3fb50525a972f6424 | /datasets.py | 41ee173946aba961bd1e7a077460591a04135195 | [] | no_license | lichingcat/gifsplanation | f22a19475da50b497be741f278ba8b3ae23a2f8c | a974ac28d64c086915e75c7cc83f51b74baf35dc | refs/heads/main | 2023-06-18T20:53:29.827644 | 2021-07-20T23:54:44 | 2021-07-20T23:54:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,405 | py |
import torchvision, torchvision.transforms
import sys, os
sys.path.insert(0,"../torchxrayvision/")
import torchxrayvision as xrv
import matplotlib.pyplot as plt
import torch
from torch.nn import functional as F
import glob
import numpy as np
import skimage, skimage.filters
import captum, captum.attr
import torch, torch.nn
import pickle
import attribution
import pandas as pd
def get_data(dataset_str, transforms=True, size=224):
dataset_dir = "/home/groups/akshaysc/joecohen/"
if transforms:
transform = torchvision.transforms.Compose([xrv.datasets.XRayCenterCrop(),
xrv.datasets.XRayResizer(size)])
else:
transform = None
datasets = []
if "covid" in dataset_str:
dataset = xrv.datasets.COVID19_Dataset(
imgpath=dataset_dir + "/covid-chestxray-dataset/images",
csvpath=dataset_dir + "/covid-chestxray-dataset/metadata.csv",
transform=transform)
datasets.append(dataset)
if "pc" in dataset_str:
dataset = xrv.datasets.PC_Dataset(
imgpath=dataset_dir + "/PC/images-224",
transform=transform, unique_patients=False)
datasets.append(dataset)
if "rsna" in dataset_str:
dataset = xrv.datasets.RSNA_Pneumonia_Dataset(
imgpath=dataset_dir + "/kaggle-pneumonia-jpg/stage_2_train_images_jpg",
transform=transform,unique_patients=False, pathology_masks=True)
datasets.append(dataset)
if "nih" in dataset_str:
dataset = xrv.datasets.NIH_Dataset(
imgpath=dataset_dir + "/NIH/images-224",
transform=transform, unique_patients=False, pathology_masks=True)
datasets.append(dataset)
if "nilarge" in dataset_str:
dataset = xrv.datasets.NIH_Dataset(
imgpath=dataset_dir + "/NIH/ChestXray-NIHCC/images",
transform=transform, unique_patients=False, pathology_masks=True)
datasets.append(dataset)
if "siim" in dataset_str:
dataset = xrv.datasets.SIIM_Pneumothorax_Dataset(
imgpath=dataset_dir + "SIIM_TRAIN_TEST/dicom-images-train/",
csvpath=dataset_dir + "SIIM_TRAIN_TEST/train-rle.csv",
transform=transform, unique_patients=False, masks=True)
datasets.append(dataset)
if "chex" in dataset_str:
dataset = xrv.datasets.CheX_Dataset(
imgpath=dataset_dir + "/CheXpert-v1.0-small",
csvpath=dataset_dir + "/CheXpert-v1.0-small/train.csv",
transform=transform, unique_patients=False)
datasets.append(dataset)
if "google" in dataset_str:
dataset = xrv.datasets.NIH_Google_Dataset(
imgpath=dataset_dir + "/NIH/images-224",
transform=transform)
datasets.append(dataset)
if "mimic_ch" in dataset_str:
dataset = xrv.datasets.MIMIC_Dataset(
imgpath=dataset_dir + "/images-224-MIMIC/files",
csvpath=dataset_dir + "/MIMICCXR-2.0/mimic-cxr-2.0.0-chexpert.csv.gz",
metacsvpath=dataset_dir + "/MIMICCXR-2.0/mimic-cxr-2.0.0-metadata.csv.gz",
transform=transform, unique_patients=False)
datasets.append(dataset)
if "openi" in dataset_str:
dataset = xrv.datasets.Openi_Dataset(
imgpath=dataset_dir + "/OpenI/images/",
transform=transform)
datasets.append(dataset)
if "vin" in dataset_str:
dataset = xrv.datasets.VinBrain_Dataset(
imgpath=dataset_dir + "vinbigdata-chest-xray-abnormalities-detection/train",
csvpath=dataset_dir + "vinbigdata-chest-xray-abnormalities-detection/train.csv",
pathology_masks=True,
transform=transform)
datasets.append(dataset)
newlabels = set()
for d in datasets:
newlabels = newlabels.union(d.pathologies)
newlabels = sorted(newlabels)
#newlabels.remove("Support Devices")
#print("labels",list(newlabels))
for d in datasets:
xrv.datasets.relabel_dataset(list(newlabels), d, silent=True)
if len(datasets) > 1:
dmerge = xrv.datasets.Merge_Dataset(datasets)
else:
dmerge = datasets[0]
print(dmerge.string())
return dmerge
| [
"[email protected]"
] | |
0dce11e7728e7a633f374d4822fdb1e8af240194 | b2ce0c6f6eeebb46b4d9bea75b336a002143d83a | /workshop1-python/face_finder_realtime_snapper.py | e9464b7f2b6f33e69e81003ece69790056eb7c74 | [] | no_license | anirudhb/deco3eday | 72e0457d2c5ca59bf9b92ae8f26422111dd3fb8a | fc34a9bfac362cd8dfcdfb24ed78e072597dcd37 | refs/heads/master | 2020-05-09T18:20:19.581177 | 2019-11-16T20:46:35 | 2019-11-16T20:46:35 | 181,338,344 | 0 | 0 | null | 2019-11-16T20:46:37 | 2019-04-14T16:35:42 | Python | UTF-8 | Python | false | false | 2,809 | py | import numpy as np
import cv2 as cv
rectangles = False
face_cascade = cv.CascadeClassifier("haarcascade_frontalface_default.xml")
eye_cascade = cv.CascadeClassifier("haarcascade_eye_tree_eyeglasses.xml")
smile_cascade = cv.CascadeClassifier("haarcascade_smile.xml")
nose_cascade = cv.CascadeClassifier("haarcascade_mcs_nose.xml")
#smiley = cv.imread("smiley.jpg")
catnose = cv.imread("catnose.jpg")
print catnose.shape, catnose.shape[0], catnose.shape[1]
cap = cv.VideoCapture(0)
while True:
ret, img = cap.read()
grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(grey, 1.3, 5)
for (x,y,w,h) in faces:
if rectangles:
cv.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
roi_gray = grey[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
if rectangles:
cv.rectangle(roi_color, (ex-10,ey-10), (ex-10+ew,ey-10), (0,0,0), 2)
cv.rectangle(roi_color, (ex,ey), (ex+ew,ey+eh), (0,255,0), 2)
smiles = smile_cascade.detectMultiScale(roi_gray)
for (sx,sy,sw,sh) in smiles:
if rectangles:
cv.rectangle(roi_color, (sx,sy), (sx+sw,sy+sh), (0,0,255), 2)
#cv.addWeighted(smiley, 1, roi_color, 0, 0, roi_color)
#ss = smiley.shape
#`img[sy:sy+ss[0], sx:sx+ss[0]] = cv.addWeighted(smiley, 1, img[sy:sy+ss[0], sx:sx+ss[0]], 1, 0)
break # Only one smile per face.
noses = nose_cascade.detectMultiScale(roi_gray)
for (nx,ny,nw,nh) in noses:
print nx,ny,nw,nh
if rectangles:
cv.rectangle(roi_color, (nx,ny), (nx+nw,ny+nh), (255,255,0), 2)
# Move the nose a little so that it is centered onto the nose.
cn = catnose.shape
print cn
fx = y+ny-(cn[0]/2)#-(w/2)-(cn[0]/2)
fx2 = y+ny+nw+(cn[0]/2)#-(w/2)+(cn[0]/2)
fy = x+nx-(cn[0]/2)#-(h/2)-(cn[1]/2)
fy2 = x+nx+nh+(cn[0]/2)#-(h/2)+(cn[1]/2)
fx = max([0,fx])
fx2 = max([0,fx2])
fy = max([0,fy])
fy2 = max([0,fy2])
sl = img[fx:fx2, fy:fy2]
#cv.imshow("img", sl)
#cv.waitKey(0)
print sl.shape
# Slice out
nn = cv.resize(catnose, (sl.shape[1], sl.shape[0]))
print nn.shape
#cv.imshow("img", nn)
#cv.waitKey(0)
img[fx:fx2, fy:fy2] = cv.addWeighted(nn, 1, sl, 1, 0)
break # Only one nose per face.
#nsm = cv.resize(smiley, img.shape[:-1])
#img = cv.addWeighted(img, 0., nsm, 1., 0.)
cv.imshow("img", img)
cv.waitKey(1)
cap.release()
cv.destroyAllWindows()
| [
"[email protected]"
] | |
6146fd84ce66a4085dbb03cc7e8f50eac208595a | 5fbf2e1f3eca4893865aad5b2217552f9cf1399a | /commonlit/settings.py | 58ac5ff31f161ba69a73dfcd5e2934d1a33296f3 | [] | no_license | gautierdag/commonlit | 8d05ae5f0781ee495410dd502f80c990920c1589 | 1f5553ef00d2e326c767d9a50df0be5e07d606bc | refs/heads/main | 2023-08-04T00:27:52.189681 | 2021-09-30T11:37:48 | 2021-09-30T11:37:48 | 380,170,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | from pydantic import BaseSettings
class Settings(BaseSettings):
wandb_api_key: str
class Config:
env_file = ".env"
env_file_encoding = "utf-8"
case_sensitive = False
env_settings = Settings()
| [
"[email protected]"
] | |
67c98c9b9e286dcaee963e51d9d74d2fbb0d7091 | 14028bea18dcd4f89fca2306bf51dcbf6acabb44 | /apps/fhir/bluebutton/tests/test_fhir_resources_read_search_w_validation.py | 02943d51949acf9d4fc60ef3ea7a8e05e6c3d19d | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | CMSgov/bluebutton-web-server | 5694c7149d9f2f6efed9a2814c928d8a7539e4cb | fb0904c0b9d77dfb00523fe6ce69b946b640441e | refs/heads/master | 2023-09-01T18:17:31.088628 | 2023-08-25T20:43:14 | 2023-08-25T20:43:14 | 50,062,960 | 30 | 33 | NOASSERTION | 2023-09-14T10:24:34 | 2016-01-20T21:52:00 | Python | UTF-8 | Python | false | false | 22,028 | py | import json
from django.test.client import Client
from django.urls import reverse
from httmock import all_requests, HTTMock
from oauth2_provider.models import get_access_token_model
from apps.test import BaseApiTest
AccessToken = get_access_token_model()
C4BB_PROFILE_URLS = {
"INPATIENT": "http://hl7.org/fhir/us/carin-bb/StructureDefinition/C4BB-ExplanationOfBenefit-Inpatient-Institutional",
"OUTPATIENT": "http://hl7.org/fhir/us/carin-bb/StructureDefinition/C4BB-ExplanationOfBenefit-Outpatient-Institutional",
"PHARMACY": "http://hl7.org/fhir/us/carin-bb/StructureDefinition/C4BB-ExplanationOfBenefit-Pharmacy",
"NONCLINICIAN": "http://hl7.org/fhir/us/carin-bb/StructureDefinition/C4BB-ExplanationOfBenefit-Professional-NonClinician",
}
C4BB_SYSTEM_TYPES = {
"IDTYPE": "http://hl7.org/fhir/us/carin-bb/CodeSystem/C4BBIdentifierType",
}
def get_response_json(resource_file_name):
response_file = open("./apps/fhir/bluebutton/tests/fhir_resources/{}.json".format(resource_file_name), 'r')
resource = json.load(response_file)
response_file.close()
return resource
class FHIRResourcesReadSearchTest(BaseApiTest):
def setUp(self):
# create read and write capabilities
self.read_capability = self._create_capability('Read', [])
self.write_capability = self._create_capability('Write', [])
self._create_capability('patient', [
["GET", r"\/v1\/fhir\/Patient\/\-\d+"],
["GET", r"\/v1\/fhir\/Patient\/\d+"],
["GET", "/v1/fhir/Patient"],
])
self._create_capability('coverage', [
["GET", r"\/v1\/fhir\/Coverage\/.+"],
["GET", "/v1/fhir/Coverage"],
])
self._create_capability('eob', [
["GET", r"\/v1\/fhir\/ExplanationOfBenefit\/.+"],
["GET", "/v1/fhir/ExplanationOfBenefit"],
])
# Setup the RequestFactory
self.client = Client()
def _assertHasC4BBProfile(self, resource, c4bb_profile, v2=False):
meta_profile = None
try:
meta_profile = resource['meta']['profile'][0]
except KeyError:
pass
if not v2:
self.assertIsNone(meta_profile)
else:
self.assertIsNotNone(meta_profile)
self.assertEqual(meta_profile, c4bb_profile)
def _assertHasC4BBIdentifier(self, resource, c4bb_type, v2=False):
identifiers = None
try:
identifiers = resource['identifier']
except KeyError:
pass
self.assertIsNotNone(identifiers)
hasC4BB = False
for id in identifiers:
try:
system = id['type']['coding'][0]['system']
if system == c4bb_type:
hasC4BB = True
break
except KeyError:
pass
if v2:
self.assertTrue(hasC4BB)
else:
self.assertFalse(hasC4BB)
def test_read_patient_request(self):
self._read_patient_request(False)
def test_read_patient_request_v2(self):
self._read_patient_request(True)
def _read_patient_request(self, v2=False):
# create the user
first_access_token = self.create_token('John', 'Smith')
@all_requests
def catchall(url, req):
return {
'status_code': 200,
'content': get_response_json("patient_read_{}".format('v2' if v2 else 'v1')),
}
with HTTMock(catchall):
response = self.client.get(
reverse(
'bb_oauth_fhir_patient_read_or_update_or_delete'
if not v2 else 'bb_oauth_fhir_patient_read_or_update_or_delete_v2',
kwargs={'resource_id': '-20140000008325'}),
{'hello': 'world'},
Authorization="Bearer %s" % (first_access_token))
self.assertEqual(response.status_code, 200)
self._assertHasC4BBIdentifier(response.json(), C4BB_SYSTEM_TYPES['IDTYPE'], v2)
def test_search_patient_request(self):
self._search_patient_request(False)
def test_search_patient_request_v2(self):
self._search_patient_request(True)
def _search_patient_request(self, v2=False):
# create the user
first_access_token = self.create_token('John', 'Smith')
ver = 'v1' if not v2 else 'v2'
@all_requests
def catchall(url, req):
return {
'status_code': 200,
'content': get_response_json("patient_search_{}".format(ver)),
}
with HTTMock(catchall):
response = self.client.get(
reverse(
'bb_oauth_fhir_patient_search' if not v2 else 'bb_oauth_fhir_patient_search_v2'),
{'count': 5, 'hello': 'world'},
Authorization="Bearer %s" % (first_access_token))
self.assertEqual(response.status_code, 200)
# check C4BB in resource as v2 charactor
self.assertIsNotNone(response.json()['entry'])
self.assertTrue(len(response.json()['link']) > 0)
for r in response.json()['entry']:
self._assertHasC4BBIdentifier(r['resource'], C4BB_SYSTEM_TYPES['IDTYPE'], v2)
def test_search_eob_by_parameters_request(self):
self._search_eob_by_parameters_request(False)
def test_search_eob_by_parameters_request_v2(self):
self._search_eob_by_parameters_request(True)
def _search_eob_by_parameters_request(self, v2=False):
# create the user
first_access_token = self.create_token('John', 'Smith')
ver = 'v1' if not v2 else 'v2'
@all_requests
def catchall(url, req):
self.assertIn("https://fhir.backend.bluebutton.hhsdevcloud.us/{}/fhir/ExplanationOfBenefit/".format(ver), req.url)
self.assertIn("_format=application%2Fjson%2Bfhir", req.url)
return {
'status_code': 200,
'content': get_response_json("eob_search_pt_{}".format(ver)),
}
# Test service-date with valid parameter starting with "lt"
with HTTMock(catchall):
response = self.client.get(
reverse('bb_oauth_fhir_eob_search' if not v2 else 'bb_oauth_fhir_eob_search_v2'),
{'service-date': 'lt2022-11-18'},
Authorization="Bearer %s" % (first_access_token))
self.assertEqual(response.status_code, 200)
# assert v1 and v2 eob
for r in response.json()['entry']:
self._assertHasC4BBProfile(r['resource'], C4BB_PROFILE_URLS['PHARMACY'], v2)
# Test service-date range with valid parameter starting with "lt" and "ge"
# example url:
# http://localhost:8000/v2/fhir/ExplanationOfBenefit?
# _format=application%2Fjson%2Bfhir&startIndex=0
# &_count=10&patient=-20000000000001
# &service-date=gt2000-01-01
# &service-date=le2022-11-18
with HTTMock(catchall):
search_url = reverse('bb_oauth_fhir_eob_search' if not v2 else 'bb_oauth_fhir_eob_search_v2')
response = self.client.get(search_url + "?service-date=gt2000-01-01&service-date=le2022-11-18",
Authorization="Bearer %s" % (first_access_token))
self.assertEqual(response.status_code, 200)
# assert v1 and v2 eob
for r in response.json()['entry']:
self._assertHasC4BBProfile(r['resource'], C4BB_PROFILE_URLS['PHARMACY'], v2)
# Test service-date with invalid parameter starting with "dd"
with HTTMock(catchall):
response = self.client.get(
reverse('bb_oauth_fhir_eob_search' if not v2 else 'bb_oauth_fhir_eob_search_v2'),
{'service-date': 'dd2022-11-18'},
Authorization="Bearer %s" % (first_access_token))
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['detail'], 'the service-date operator is not valid')
self.assertEqual(response.status_code, 400)
# Test _lastUpdated with valid parameter starting with "lt"
with HTTMock(catchall):
response = self.client.get(
reverse('bb_oauth_fhir_eob_search' if not v2 else 'bb_oauth_fhir_eob_search_v2'),
{'_lastUpdated': 'lt2019-11-22T14:00:00-05:00'},
Authorization="Bearer %s" % (first_access_token))
self.assertEqual(response.status_code, 200)
# assert v1 and v2 eob
# noticed resources ids are different: in v1 the id is like: "id": "carrier--20587716665",
# in v2: "id": "pde--3269834580",
# will check resource id in the loop upon confirm with BFD
for r in response.json()['entry']:
self._assertHasC4BBProfile(r['resource'], C4BB_PROFILE_URLS['PHARMACY'], v2)
# Test _lastUpdated with invalid parameter starting with "zz"
with HTTMock(catchall):
response = self.client.get(
reverse('bb_oauth_fhir_eob_search' if not v2 else 'bb_oauth_fhir_eob_search_v2'),
{'_lastUpdated': 'zz2020-11-22T14:00:00-05:00'},
Authorization="Bearer %s" % (first_access_token))
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['detail'], 'the _lastUpdated operator is not valid')
self.assertEqual(response.status_code, 400)
# Test type= with single valid value: "pde"
with HTTMock(catchall):
response = self.client.get(
reverse(
'bb_oauth_fhir_eob_search' if not v2 else 'bb_oauth_fhir_eob_search_v2'),
{'type': 'pde'},
Authorization="Bearer %s" % (first_access_token))
self.assertEqual(response.status_code, 200)
# assert v1 and v2 eob
for r in response.json()['entry']:
self._assertHasC4BBProfile(r['resource'], C4BB_PROFILE_URLS['PHARMACY'], v2)
# Test type= with multiple (all valid values)
with HTTMock(catchall):
response = self.client.get(
reverse('bb_oauth_fhir_eob_search' if not v2 else 'bb_oauth_fhir_eob_search_v2'),
{'type': 'carrier,'
'pde,'
'dme,'
'hha,'
'hospice,'
'inpatient,'
'outpatient,'
'snf,'
'https://bluebutton.cms.gov/resources/codesystem/eob-type|carrier,'
'https://bluebutton.cms.gov/resources/codesystem/eob-type|pde,'
'https://bluebutton.cms.gov/resources/codesystem/eob-type|dme,'
'https://bluebutton.cms.gov/resources/codesystem/eob-type|hha,'
'https://bluebutton.cms.gov/resources/codesystem/eob-type|hospice,'
'https://bluebutton.cms.gov/resources/codesystem/eob-type|inpatient,'
'https://bluebutton.cms.gov/resources/codesystem/eob-type|outpatient,'
'https://bluebutton.cms.gov/resources/codesystem/eob-type|snf'},
Authorization="Bearer %s" % (first_access_token))
self.assertEqual(response.status_code, 200)
# assert v1 and v2 eob
for r in response.json()['entry']:
self._assertHasC4BBProfile(r['resource'], C4BB_PROFILE_URLS['PHARMACY'], v2)
# Test type= with an invalid type
with HTTMock(catchall):
response = self.client.get(
reverse('bb_oauth_fhir_eob_search' if not v2 else 'bb_oauth_fhir_eob_search_v2'),
{'type': 'carrier,'
'INVALID-TYPE,'
'dme,'},
Authorization="Bearer %s" % (first_access_token))
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['detail'], 'the type parameter value is not valid')
self.assertEqual(response.status_code, 400)
def test_read_eob_request(self):
self._read_eob_request(False)
def test_read_eob_request_v2(self):
self._read_eob_request(True)
def _read_eob_request(self, v2=False):
# create the user
first_access_token = self.create_token('John', 'Smith')
@all_requests
def catchall(url, req):
return {
'status_code': 200,
'content': get_response_json("eob_read_carrier_{}".format('v2' if v2 else 'v1')),
}
with HTTMock(catchall):
# here the eob carrier id serve as fake id
response = self.client.get(
reverse(
'bb_oauth_fhir_eob_read_or_update_or_delete'
if not v2 else 'bb_oauth_fhir_eob_read_or_update_or_delete_v2',
kwargs={'resource_id': 'carrier--22639159481'}),
Authorization="Bearer %s" % (first_access_token))
# assert v1 and v2 eob read using carrier id
self._assertHasC4BBProfile(response.json(), C4BB_PROFILE_URLS['NONCLINICIAN'], v2)
def test_read_eob_inpatient_request(self):
self._read_eob_inpatient_request(False)
def test_read_eob_inpatient_request_v2(self):
self._read_eob_inpatient_request(True)
def _read_eob_inpatient_request(self, v2=False):
# create the user
first_access_token = self.create_token('John', 'Smith')
@all_requests
def catchall(url, req):
return {
'status_code': 200,
'content': get_response_json("eob_read_in_pt_{}".format('v2' if v2 else 'v1')),
}
with HTTMock(catchall):
response = self.client.get(
reverse(
'bb_oauth_fhir_eob_read_or_update_or_delete'
if not v2 else 'bb_oauth_fhir_eob_read_or_update_or_delete_v2',
kwargs={'resource_id': 'inpatient-4436342082'}),
Authorization="Bearer %s" % (first_access_token))
self.assertEqual(response.status_code, 200)
# assert v1 and v2 eob inpatient
self._assertHasC4BBProfile(response.json(), C4BB_PROFILE_URLS['INPATIENT'], v2)
def test_read_eob_outpatient_request(self):
self._read_eob_outpatient_request(False)
def test_read_eob_outpatient_request_v2(self):
self._read_eob_outpatient_request(True)
def _read_eob_outpatient_request(self, v2=False):
# create the user
first_access_token = self.create_token('John', 'Smith')
@all_requests
def catchall(url, req):
return {
'status_code': 200,
'content': get_response_json("eob_read_out_pt_{}".format('v2' if v2 else 'v1')),
}
with HTTMock(catchall):
response = self.client.get(
reverse(
'bb_oauth_fhir_eob_read_or_update_or_delete'
if not v2 else 'bb_oauth_fhir_eob_read_or_update_or_delete_v2',
kwargs={'resource_id': 'outpatient-4388491497'}),
Authorization="Bearer %s" % (first_access_token))
self.assertEqual(response.status_code, 200)
# assert v1 and v2 eob outpatient
self._assertHasC4BBProfile(response.json(), C4BB_PROFILE_URLS['OUTPATIENT'], v2)
def test_read_coverage_request(self):
self._read_coverage_request(False)
def test_read_coverage_request_v2(self):
self._read_coverage_request(True)
def _read_coverage_request(self, v2=False):
# create the user
first_access_token = self.create_token('John', 'Smith')
@all_requests
def catchall(url, req):
return {'status_code': 200,
'content': get_response_json("coverage_read_{}".format('v2' if v2 else 'v1')), }
with HTTMock(catchall):
response = self.client.get(
reverse(
'bb_oauth_fhir_coverage_read_or_update_or_delete'
if not v2 else 'bb_oauth_fhir_coverage_read_or_update_or_delete_v2',
kwargs={'resource_id': 'coverage_id'}),
Authorization="Bearer %s" % (first_access_token))
self.assertEqual(response.status_code, 200)
subId = None
relationship = None
try:
subId = response.json()['subscriberId']
except KeyError:
pass
try:
relationship = response.json()['relationship']
except KeyError:
pass
if not v2:
self.assertIsNone(subId)
self.assertIsNone(relationship)
else:
self.assertIsNotNone(subId)
self.assertIsNotNone(relationship)
def test_search_coverage_request(self):
self._search_coverage_request(False)
def test_search_coverage_request_v2(self):
self._search_coverage_request(True)
def _search_coverage_request(self, v2=False):
# create the user
first_access_token = self.create_token('John', 'Smith')
@all_requests
def catchall(url, req):
return {'status_code': 200,
'content': get_response_json("coverage_search_{}".format('v2' if v2 else 'v1')), }
with HTTMock(catchall):
response = self.client.get(
reverse(
'bb_oauth_fhir_coverage_search'
if not v2 else 'bb_oauth_fhir_coverage_search_v2'),
Authorization="Bearer %s" % (first_access_token))
self.assertEqual(response.status_code, 200)
# assert v1 and v2 coverage resources
for r in response.json()['entry']:
subId = None
relationship = None
try:
subId = r['resource']['subscriberId']
except KeyError:
pass
try:
relationship = r['resource']['relationship']
except KeyError:
pass
if not v2:
self.assertIsNone(subId)
self.assertIsNone(relationship)
else:
self.assertIsNotNone(subId)
self.assertIsNotNone(relationship)
def test_fhir_meta_request(self):
self._query_fhir_meta(False)
def test_fhir_meta_request_v2(self):
self._query_fhir_meta(True)
def _query_fhir_meta(self, v2=False):
# create the user
first_access_token = self.create_token('John', 'Smith')
@all_requests
def catchall(url, req):
return {
'status_code': 200,
'content': get_response_json("fhir_meta_{}".format('v2' if v2 else 'v1')),
}
with HTTMock(catchall):
response = self.client.get(
reverse(
'fhir_conformance_metadata'
if not v2 else 'fhir_conformance_metadata_v2',),
Authorization="Bearer %s" % (first_access_token))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["resourceType"], "CapabilityStatement")
self.assertEqual(response.json()["fhirVersion"], '4.0.0' if v2 else '3.0.2')
def test_userinfo_request(self):
self._query_userinfo(False)
def test_userinfo_request_v2(self):
self._query_userinfo(True)
def _query_userinfo(self, v2=False):
# create the user
first_access_token = self.create_token('John', 'Smith')
@all_requests
def catchall(url, req):
return {
'status_code': 200,
'content': get_response_json("userinfo_{}".format('v2' if v2 else 'v1')),
}
with HTTMock(catchall):
response = self.client.get(
reverse(
'openid_connect_userinfo'
if not v2 else 'openid_connect_userinfo_v2',),
Authorization="Bearer %s" % (first_access_token))
# identical response for v1 and v2
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["sub"], response.json()["patient"])
def test_err_response_status_will_return_400_for_40x(self):
# 401 will also return 400
self._err_response_caused_by_illegalarguments(False, 401)
def test_err_response_404_will_return_4o4(self):
self._err_response_caused_by_illegalarguments(False, 404, 404)
def test_err_response_500_will_return_502(self):
self._err_response_caused_by_illegalarguments(False, 500, 502)
def _err_response_caused_by_illegalarguments(self, v2=False, bfd_status_code=500, expected_code=400):
# create the user
first_access_token = self.create_token('John', 'Smith')
@all_requests
def catchall(url, req):
return {
'status_code': bfd_status_code,
'content': get_response_json("resource_error_response"),
}
with HTTMock(catchall):
response = self.client.get(
reverse(
'bb_oauth_fhir_patient_read_or_update_or_delete'
if not v2 else 'bb_oauth_fhir_patient_read_or_update_or_delete_v2',
kwargs={'resource_id': '-20140000008325'}),
{'hello': 'world'},
Authorization="Bearer %s" % (first_access_token))
self.assertEqual(response.status_code, expected_code)
| [
"[email protected]"
] | |
6254bd3142e966181c216656c684fe9d81042c50 | 357d2b6e8aaee85da51811f0b15e984fac9e011d | /foodflex/__main__.py | 9a0ffde04554252810d03d204bb5d55785dd1f7a | [
"MIT",
"Bitstream-Vera"
] | permissive | Chasbob/food-flex-discord | 658486156367c078654d71ddfd4c4475b9b6c845 | ed2641f938b575815de8789c8c816cdb23b998e7 | refs/heads/master | 2020-09-22T12:33:42.008636 | 2020-04-06T12:34:42 | 2020-04-06T12:34:42 | 225,196,090 | 0 | 0 | MIT | 2019-12-01T16:55:37 | 2019-12-01T16:55:37 | null | UTF-8 | Python | false | false | 71 | py | from . import foodflex
if __name__ == "__main__":
foodflex.main()
| [
"[email protected]"
] | |
a70da63ae03009e13e1e3cec021e48b1e4d3eed3 | cfbebe41bba40833b1863e0c36fa92d0cef9bdc4 | /find_procedure.py | 0742151bd31f8a830ce5bdca5c06710bdef2b8d2 | [] | no_license | VoloshinAndrei/HomeWork_2_4 | 91721429f072354c8a6a8c4477eca7bd7db0a455 | 40e6f41ab2f17991abed07ab0e4a361a11dc2c96 | refs/heads/master | 2021-04-27T00:08:59.752946 | 2018-03-21T18:31:15 | 2018-03-21T18:31:15 | 123,757,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,869 | py | # Задание
# мне нужно отыскать файл среди десятков других
# я знаю некоторые части этого файла (на память или из другого источника)
# я ищу только среди .sql файлов
# 1. программа ожидает строку, которую будет искать (input())
# после того, как строка введена, программа ищет её во всех файлах
# выводит список найденных файлов построчно
# выводит количество найденных файлов
# 2. снова ожидает ввод
# поиск происходит только среди найденных на этапе 1
# 3. снова ожидает ввод
# ...
# Выход из программы программировать не нужно.
# Достаточно принудительно остановить, для этого можете нажать Ctrl + C
# Пример на настоящих данных
# python3 find_procedure.py
# Введите строку: INSERT
# ... большой список файлов ...
# Всего: 301
# Введите строку: APPLICATION_SETUP
# ... большой список файлов ...
# Всего: 26
# Введите строку: A400M
# ... большой список файлов ...
# Всего: 17
# Введите строку: 0.0
# Migrations/000_PSE_Application_setup.sql
# Migrations/100_1-32_PSE_Application_setup.sql
# Всего: 2
# Введите строку: 2.0
# Migrations/000_PSE_Application_setup.sql
# Всего: 1
# не забываем организовывать собственный код в функции
import os
import chardet
migrations = 'Migrations'
current_dir = os.path.dirname(os.path.abspath(__file__))
migrations_dir = os.path.join(current_dir, migrations)
def get_all_filename(dirname):
files = []
for file in os.listdir(dirname):
if file.endswith(".sql"):
files.append(file)
return files
def find_str_to_file(find_str, file_name):
with open(os.path.join(migrations_dir, file_name), 'rb') as f:
data = f.read()
result = chardet.detect(data)
s = data.decode(result['encoding']).upper()
return find_str in s
def main_def():
filename_list = get_all_filename(migrations_dir)
while True:
findstr = input('Введите строку для поиска:').upper()
filename_list_new = []
for filename in filename_list:
if find_str_to_file(findstr, filename):
print(filename)
filename_list_new.append(filename)
print('Всего:', len(filename_list_new))
filename_list = filename_list_new
main_def()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.