blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4fe3ec642a231dba1847cf965c868e08f7f03cdf
|
2320daecca65d435d78e37f4fd213cd41119a07a
|
/src/lib/Client/Tools/Systemd.py
|
e3f6a4169b113dfb5a559301bbe703489ddc3340
|
[
"mpich2",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mikemccllstr/bcfg2
|
06538645d12a606aa5558def717dfc24400f6eea
|
826f385767ccf9f608fcfbe35e381a9dbc59db4b
|
refs/heads/master
| 2021-01-17T09:52:32.800185 | 2011-10-16T03:06:31 | 2011-10-16T04:41:21 | 2,004,294 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,957 |
py
|
# This is the bcfg2 support for systemd
"""This is systemd support."""
import Bcfg2.Client.Tools
import Bcfg2.Client.XML
class Systemd(Bcfg2.Client.Tools.SvcTool):
"""Systemd support for Bcfg2."""
name = 'Systemd'
__execs__ = ['/bin/systemctl']
__handles__ = [('Service', 'systemd')]
__req__ = {'Service': ['name', 'status']}
def get_svc_command(self, service, action):
return "/bin/systemctl %s %s.service" % (action, service.get('name'))
def VerifyService(self, entry, _):
"""Verify Service status for entry."""
cmd = "/bin/systemctl status %s.service " % (entry.get('name'))
raw = ''.join(self.cmd.run(cmd)[1])
if raw.find('Loaded: error') >= 0:
entry.set('current_status', 'off')
status = False
elif raw.find('Active: active') >= 0:
entry.set('current_status', 'on')
if entry.get('status') == 'off':
status = False
else:
status = True
else:
entry.set('current_status', 'off')
if entry.get('status') == 'on':
status = False
else:
status = True
return status
def InstallService(self, entry):
"""Install Service entry."""
# don't take any actions for mode = 'manual'
if entry.get('mode', 'default') == 'manual':
self.logger.info("Service %s mode set to manual. Skipping "
"installation." % (entry.get('name')))
return True
if entry.get('status') == 'on':
pstatus = self.cmd.run(self.get_svc_command(entry, 'enable'))[0]
pstatus = self.cmd.run(self.get_svc_command(entry, 'start'))[0]
else:
pstatus = self.cmd.run(self.get_svc_command(entry, 'stop'))[0]
pstatus = self.cmd.run(self.get_svc_command(entry, 'disable'))[0]
return not pstatus
|
[
"[email protected]"
] | |
10a42a6cd0ea1f64e2bcbb93e47165bbf0af7d3a
|
4b6add2be2207c64f9bab6d2611dff45ba655613
|
/02_ProgramFlow/21_augmented assignment loop_ CHALLENGE.py
|
5cee4ab942734bfa75165ae646111589d6b832b8
|
[] |
no_license
|
bidhutdhakal/python
|
65a69d4c31239dc087dda41a6aba8aab021b351c
|
7174a33684df020b1afa339fa2d783e7614adb79
|
refs/heads/master
| 2022-11-15T02:10:13.250935 | 2020-07-19T10:23:17 | 2020-07-19T10:23:17 | 275,973,717 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 666 |
py
|
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~1~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# Early computers could only perform addn. In order to multiply one number by another, they performed repeated addition.
# For example, 5 * 8 was performed by adding 5 eight times.
# 5 + 5 + 5 + 5 + 5 +5 + 5 + 5 = 40
# Use a for loop, and augmented assignment, to give answer the result of multiplying number by multiplier.
number = 5
multiplier = 8
answer = 0
# add your loop after this comment
for i in range(multiplier):
answer += number
print(answer)
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~2~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
|
[
"[email protected]"
] | |
a02d20ef716ffb5e171da88ab492aba679f903ae
|
c250f7e1f1cbbc45bf7a6f28f8169cf6e7c74b38
|
/venv/Scripts/pip3-script.py
|
91405bcc9a5225247189fbc367eae4dcef36f1aa
|
[] |
no_license
|
Binetou1996/FirstProject1
|
40d74e4d1154a842ca2edbee0431ffcf17521c82
|
4af9b0269ac1c4e4ad2557e48b54934b4af5ca5f
|
refs/heads/master
| 2020-04-10T23:01:25.721331 | 2018-12-20T18:49:56 | 2018-12-20T18:49:56 | 161,339,363 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 429 |
py
|
#!C:\Users\HP\PycharmProjects\DjangoProject\FirstProject\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"[email protected]"
] | |
512035201e3a6c4d9414e34ad579f8b05cf82917
|
ff55d72d5265d42ce57f0e95347421c23c0e32f5
|
/python/p3
|
6b727111864558ab2950eca30cd757cb4a929756
|
[] |
no_license
|
tolgaman/testrepo
|
927f563cdd8eb00557b231766cd20260b63f9d11
|
39fa5d1e82ccfb3931bc4a73bd41e187a005fe1f
|
refs/heads/master
| 2020-04-05T22:44:04.441075 | 2019-07-24T22:12:17 | 2019-07-24T22:12:17 | 32,182,896 | 0 | 0 | null | 2015-03-13T21:58:09 | 2015-03-13T21:58:09 | null |
UTF-8
|
Python
| false | false | 1,885 |
#!/bin/python
pwd="lnzxklcvgonolxoy"
import email, getpass, imaplib, os
detach_dir = '.' # directory where to save attachments (default: current)
user = raw_input("Enter your GMail username:")
#pwd = getpass.getpass("Enter your password: ")
# connecting to the gmail imap server
m = imaplib.IMAP4_SSL("imap.gmail.com")
m.login(user,pwd)
m.select("cs2043") # here you a can choose a mail box like INBOX instead
# use m.list() to get all the mailboxes
resp, items = m.search(None, "ALL") # you could filter using the IMAP rules here (check http://www.example-code.com/csharp/imap-search-critera.asp)
items = items[0].split() # getting the mails id
for emailid in items:
resp, data = m.fetch(emailid, "(RFC822)") # fetching the mail, "`(RFC822)`" means "get the whole stuff", but you can ask for headers only, etc
email_body = data[0][1] # getting the mail content
mail = email.message_from_string(email_body) # parsing the mail content to get a mail object
#Check if any attachments at all
if mail.get_content_maintype() != 'multipart':
continue
print "["+mail["From"]+"] :" + mail["Subject"]
# we use walk to create a generator so we can iterate on the parts and forget about the recursive headach
for part in mail.walk():
# multipart are just containers, so we skip them
if part.get_content_maintype() == 'multipart':
continue
# is this part an attachment ?
if part.get('Content-Disposition') is None:
continue
#filename = part.get_filename()
filename = mail["From"] + "_hw1answer"
att_path = os.path.join(detach_dir, filename)
#Check if its already there
if not os.path.isfile(att_path) :
# finally write the stuff
fp = open(att_path, 'wb')
fp.write(part.get_payload(decode=True))
fp.close()
|
[
"[email protected]"
] | ||
db38ea1d5adfd8537211137c5b9e2bdacf5cf52f
|
12ea3e4c0e35d60736bb3d9aa2ae4cfdb7951679
|
/20oct15/parsepdbexample.py
|
c681d086a5f60621f6441042e3e9049306b21505
|
[] |
no_license
|
sstagg/bch5884
|
6b87b844dbd368428933a17ed5b4ffd7fed1d087
|
5b5d5f3ccebc56db0f9b370d6f1152b49099dff7
|
refs/heads/master
| 2023-01-13T19:51:47.950043 | 2020-11-24T17:27:17 | 2020-11-24T17:27:17 | 290,234,706 | 3 | 15 | null | null | null | null |
UTF-8
|
Python
| false | false | 329 |
py
|
#!/usr/bin/env python3
import sys
pdbfilename=sys.argv[1]
f=open(pdbfilename)
lines=f.readlines()
f.close()
records=[]
for line in lines:
x=float(line[30:38])
element=line[76:78].strip()
if element=="C":
mass=12.01
elif element=="N":
mass=14.01
else:
mass=None
records.append([x,element,mass])
print (records)
|
[
"[email protected]"
] | |
bf43644aded3dbfbc4cbf8ae8a0f2aa704ba25ce
|
d416d916492cd48b75d9f21bbf01625072df4820
|
/DMeans.py
|
1e889061d9f6adf6fa81f3c7c017a52a9224cc51
|
[] |
no_license
|
ZongweiZhou1/Multitude
|
da4392940d60e1cc827e31bdf75ce64e6d9f688d
|
db2a9df7ac00f13635b8a1408d9a01bbed1f846c
|
refs/heads/master
| 2020-06-26T00:09:55.397847 | 2020-01-13T03:33:35 | 2020-01-13T03:33:35 | 199,463,718 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,131 |
py
|
import numpy as np
import random
from time import time
from dynmeans_py.dynmeans import DynMeans
from utils.matching import matching
class DMeans():
def __init__(self, v_lambda, T_Q, K_tau, nRestarts=10, match_func=matching):
Q = v_lambda / T_Q
tau = (T_Q * (K_tau - 1.0) + 1.0) / (T_Q - 1.0)
self.dynmeans = DynMeans(v_lambda, Q, tau)
if nRestarts <= 0:
raise ValueError('libdynmeans: ERROR: Cannot have nRestarts <=0')
self.nRestarts = nRestarts
self.match_func = match_func
def cluster(self, newobs, ref_obs, verbose=False):
"""
:param newobs: N x 2, double
:param ref_obs: M x 2, double
:param verbose:
:return:
"""
tStart = time()
if len(newobs) == 0:
raise ValueError('libdynmeans: ERROR: newobservations is empty')
newobservations = newobs.flatten().tolist()
ref_observations = ref_obs.flatten().tolist()
self.dynmeans.set_data(newobservations, ref_observations)
observations_num = len(newobs)
randOrderings = list(range(observations_num))
finalObj = np.inf
if verbose:
print("libdynmeans: Clustering {} datapoints with {}"
" restarts.".format(observations_num, self.nRestarts))
for i in range(self.nRestarts):
self.dynmeans.set_tmpVariables(observations_num)
obj, prevobj = np.inf, np.inf
random.shuffle(randOrderings)
for j in range(100):
prevobj = obj
self.dynmeans.assignObservations(randOrderings)
prms = np.array(self.dynmeans.first_updateParams()).reshape(-1, 2)
mars, mrds = self.match_func(prms, ref_obs) # input args are all 2d
obj = self.dynmeans.setParameters(mars, mrds)
if obj > prevobj:
print("Error: obj > prevobj - monotonicity violated! Check your distance/set parameter functions...")
print("obj: {}, prevobj:{}".format(obj, prevobj))
break
if verbose:
print("libdymeans: Trail: [{}/{}] objective: {}".format(i+1, self.nRestarts, obj))
self.dynmeans.pin_debug(1)
if obj == prevobj:
break
if obj < finalObj:
finalObj = obj
self.dynmeans.set_finalPrms()
if verbose:
print('libdynmeans: Done clustering. Min Objective: {}'.format(finalObj))
self.dynmeans.pin_debug(2)
finalLabels = self.dynmeans.updateState()
tTaken = time() - tStart
return finalLabels, finalObj, tTaken
if __name__=='__main__':
v_lambda = 0.05
T_Q = 6.8
K_tau = 1.01
dmeans = DMeans(v_lambda, T_Q, K_tau)
for i in range(100):
newobs = np.random.rand(50, 2)
ref_obs = np.random.rand(8, 2)
finalLabels, finalObj, tTaken = dmeans.cluster(newobs, ref_obs, verbose=True)
print(finalLabels)
print(finalObj)
print(tTaken)
|
[
"[email protected]"
] | |
d0118703cd1be666f42813c975c6d146b4aba794
|
f72a82c5d8bc76078b9969ddd45d2cb94b65653d
|
/ccc/py/ccc15j3_oneline.py
|
a0e5b0f32ce89b79a0d8630a467725a6f845a374
|
[
"MIT"
] |
permissive
|
tylertian123/CompSciSolutions
|
1f76d6d7b60358ea24812b96b9f3d7d54c5ffdc8
|
33769a20ea613439f92055b40deeac4927cb0a91
|
refs/heads/master
| 2021-06-17T20:55:49.812172 | 2021-02-21T05:42:24 | 2021-02-21T05:42:24 | 179,722,285 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 194 |
py
|
for c in input():print(c,(min((abs(ord(c)-ord(v)),v)for v in'aeiou')[1]+((chr(ord(c)+1)if chr(ord(c)+1)not in'aeiou'else chr(ord(c)+2))if c!='z'else'z'))if c not in('aeiou')else'',sep='',end='')
|
[
"[email protected]"
] | |
138016f8d6a2dbb02fd72456d8ec41902932b14f
|
58bc1c34ccf4804c56b0eb40f05dbeb80e1aa01e
|
/Curso Python/FuncionesRecursivas/Recursividad.py
|
3d8a57fc4e62fba55a262c1b012194c430a895b5
|
[] |
no_license
|
CarlosG4rc/CursoPy
|
fa19b3cd7a8ea79ff6c249581813878c80a3def3
|
8faff0a37f05885be7bdf81564aad0c1f419d6eb
|
refs/heads/master
| 2023-01-01T09:52:02.814611 | 2020-10-26T20:03:04 | 2020-10-26T20:03:04 | 285,904,791 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 381 |
py
|
def cuenta_atras(num):
num -= 1
if num > 0:
print(num)
cuenta_atras(num)
else:
(print("Boooooom!!!"))
print("Fin de la funcion", num)
cuenta_atras(5)
def factorial(num):
print("Valor inicial ->",num)
if num > 1:
num = num * factorial(num -1)
print("valor final ->",num)
return num
print(factorial(5))
|
[
"[email protected]"
] | |
b6f61d9cae0e2d0f9dc640ed398d6d16ccf52c85
|
2e80c9630036d493400a5d568f2bae952ddd438a
|
/contents/1_Demo/05장 클래스/상속03_재정의.py
|
8eec3a91f2b0fe05c6a0375d0847b8878f3d9b58
|
[] |
no_license
|
gregor77/start-python
|
2a18847f93c3630d4106d342b63632b5a1ec119d
|
26481941f61eddd1f34aa53aa4c6f4afd0c03580
|
refs/heads/master
| 2021-01-10T10:56:43.225164 | 2015-10-29T08:20:26 | 2015-10-29T08:20:26 | 44,936,642 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,379 |
py
|
class Person:
" Super Class "
def __init__(self, name, phoneNumber):
self.Name = name
self.PhoneNumber = phoneNumber
def PrintInfo(self):
print("Info(Name:{0}, Phone Number: {1}".format(self.Name,
self.PhoneNumber))
def PrintPersonData(self):
print("Person(Name:{0}, Phone Number: {1}".format(self.Name, self.PhoneNumber))
class Student(Person):
" Sub Class "
def __init__(self, name, phoneNumber, subject, studentID):
#명시적으로 Person 생성자를 호출
Person.__init__(self, name, phoneNumber)
self.Subject = subject
self.StudentID = studentID
def PrintStudentData(self):
print("Student(Subject: {0}, Student ID: {1}".format(self.Subject,
self.StudentID))
def PrintInfo(self): #Person의 PrintInfo()메서드를 재정의
print("Info(Name:{0}, Phone Number:{1}".format(self.Name,
self.PhoneNumber))
print("Info(Subject:{0}, Student ID:{1}".format(self.Subject,
self.StudentID))
p = Person("Derick", "010-222-3333")
s = Student("Marry", "010-333-4444", "Computer Scient", "990000")
|
[
"[email protected]"
] | |
74603ae44b78d8c4712a09d82fe4f89f48dc5a9a
|
2e420d38c709da85426511c4f11ebf1e48d6ce42
|
/Database/DatabaseValue.py
|
1a309861858d7432308d723b7fdf35a7371e7354
|
[] |
no_license
|
NynkevK/miniProjectProg
|
50a7ec309dabbfefd30bd6b6135ce5dc6e129254
|
393ad325c8ac1d616a72c802d9a4f675a36519ae
|
refs/heads/master
| 2021-01-13T14:30:32.916039 | 2017-12-10T14:17:27 | 2017-12-10T14:17:27 | 72,853,957 | 0 | 2 | null | 2020-04-24T09:46:06 | 2016-11-04T14:16:05 |
Python
|
UTF-8
|
Python
| false | false | 217 |
py
|
# Geschreven door Robin
import datetime
class DatabaseValue:
Id = 0
License = ""
Firstname = ""
Lastname = ""
Email = ""
Arrival = ""
Departure = ""
Bill = 0
|
[
"[email protected]"
] | |
2ac346b17ec1259ea4d8fd11ea85aa3c5131e6e8
|
7edda81b50d243d34cd6ff03ae17398695e5118e
|
/venv/bin/pip3
|
3cf9ce2a9b5d6bf3534ad9be55a5cab5e5aa01e4
|
[] |
no_license
|
TyRolla/CAF2Script
|
0e8b3278f489f14d5064d1d58ba46f26273cf8dd
|
8005636bc85f145415208ddcd419b2915e73ee52
|
refs/heads/master
| 2022-12-31T01:24:16.487165 | 2020-10-17T21:53:58 | 2020-10-17T21:53:58 | 304,975,623 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 408 |
#!/Users/tyuehara/PycharmProjects/CAFScript/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"[email protected]"
] | ||
ec1a5719f569715605b75d20d9dea2e9ea1a20ef
|
eee741a9d6d55357fb597e0cc3379085f47c2c13
|
/processData.py
|
85071304b5d9fe473ea285664cbd0cd5dac57f28
|
[] |
no_license
|
mbstacy/gdal_ok_mesonet_data_process
|
6505be783056eeade9664782035c284d76f29e1c
|
18fe989560d54cc0fff336462c26897778daeaef
|
refs/heads/master
| 2021-01-10T07:32:55.865328 | 2016-02-23T22:42:48 | 2016-02-23T22:42:48 | 52,396,676 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,487 |
py
|
#!/usr/bin/env python
'''
Created on Feb 2, 2016
@author: ledapsTwo
'''
from osgeo import gdal,osr
from os import path
from csv import DictReader
import shlex,sys
import pandas as pd
import numpy as np
class raster:
def __init__(self,inFile):
gf = gdal.Open(inFile)
self.raster = gf
self.grid = gf.ReadAsArray()
#get number of rows and columns in the shape
self.numGrids = 1
if len(self.grid.shape) == 3:
self.numGrids,self.numRows,self.numCols = self.grid.shape
else:
self.numRows,self.numCols = self.grid.shape
#get projection and spatial reference infomation
srs = osr.SpatialReference()
srs.ImportFromWkt(gf.GetProjection())
srsLatLong = srs.CloneGeogCS()
self.srs = srs ; self.srsLatLong = srsLatLong
#create coordinate transform object for sample/line to lon/lat conversion
self.ct = osr.CoordinateTransformation(srs, srsLatLong)
#create coordinate transform object for lon/lat to sample/line conversion
self.ctInv = osr.CoordinateTransformation(srsLatLong, srs)
#get geographic transform information in cartesian space
self.geoMatrix = gf.GetGeoTransform()
#with no north correction this is equal to (pixel height * pixel width) = -900
dev = (self.geoMatrix[1] * self.geoMatrix[5]) - (self.geoMatrix[2] * self.geoMatrix[4])
#divide height/width components by this -900 to get a decimal degrees value
self.gtinv = (self.geoMatrix[0], self.geoMatrix[5]/dev, -1 * self.geoMatrix[2]/dev, self.geoMatrix[3], -1 * self.geoMatrix[4]/dev, self.geoMatrix[1]/dev)
def parseMesonetFile():
mesoCSV = "{0}.csv".format(mesoFile.split('.')[0]) #path.join(curDir,'%s.csv'%path.basename(mesoFile).split('.')[0])
if not path.exists(mesoCSV):
with open(mesoFile,'r') as f1:
data = f1.read()
data_list=data.split('\n')
table = []
for line in data_list[2:-1]:
table.append(shlex.split(line))
headers = table.pop(0)
df = pd.DataFrame(table,columns=headers)
outFile = path.basename(mesoFile).split('.')[0]
df.to_csv("%s.csv" % (outFile),index=False)
f = open(mesoCSV,'r')
aSites = DictReader(f)
return aSites
def convertLatLontoPixelLine(inGrid,lat,lon):
#convert lon/lat to cartesian coordinates
x,y,z = inGrid.ctInv.TransformPoint(lon,lat,0)
#subtract out upper left pixel coordinates to move origin to upper-left corner of the grid
u = x - inGrid.gtinv[0]
v = y - inGrid.gtinv[3]
#print lon,lat,x,y,u,v
#multiply u & v by 0.333333 or -0.333333 to convert cartesian to pixel/line combo
col = (inGrid.gtinv[1] * u) + (inGrid.gtinv[2] * v)
row = (inGrid.gtinv[4] * u) + (inGrid.gtinv[5] * v)
#print lon,lat,x,y,u,v,col,row
return row,col
def convertPixelLinetoLatLong(inGrid,row,col):
X = (inGrid.geoMatrix[0] + (inGrid.geoMatrix[1] * col) + (inGrid.geoMatrix[2] * row)) + inGrid.geoMatrix[1]/2.0
Y = (inGrid.geoMatrix[3] + (inGrid.geoMatrix[4] * col) + (inGrid.geoMatrix[5] * row)) + inGrid.geoMatrix[5]/2.0
(lon, lat, height) = inGrid.ct.TransformPoint(X,Y)
lon = round(lon,11) ; lat = round(lat,11)
return lat,lon
def main():
#read in TIF file as a raster object
tif = raster(tifFile)
#read in mesonet data and break at each new line
aSites = parseMesonetFile()
#print(aSites)
aOut = []
#walk through each site, pull the lat/lon and determine point on raster grid
for mesoSite in aSites:
#print (mesoSite)
siteID = mesoSite["STID"] #the site ID from the CSV
stNum = mesoSite["STNM"] #station number
stTime = mesoSite["TIME"] #station time
lat = float(mesoSite["LATT"]) #the latitude from the CSV
lon = float(mesoSite["LONG"]) #the longitude from the CSV
#the row and column on the raster above this mesonet site
rasterRow,rasterColumn = convertLatLontoPixelLine(tif, lat, lon)
#the value on the raster at this grid point
rasterValue = tif.grid[rasterRow,rasterColumn]
#build skeleton for header and station lines
header = "STID,STNM,TIME,LATT,LONG,RASTERVAL"
strOut = "%s,%s,%s,%s,%s,%s"%(siteID,stNum,stTime,lat,lon,rasterValue)
#walk through all attributes and place into above strings
for param in sorted(mesoSite.keys()):
#skip any of these as they have already been defined above
if param in ["STID","STNM","TIME","LATT","LONG"]: continue
header += ",%s"%param
strOut += ",%s"%mesoSite[param]
#add header first so it will be at the top of the output file
if header not in aOut: aOut.append(header)
#append station attributes to list
aOut.append(strOut)
#convert list to block of text and write to file
outFile = open("summary%s.csv"%ext,'w')
outFile.write("\n".join(aOut))
outFile.close()
print ("DONE")
if __name__ == "__main__":
#global curDir ; curDir = path.dirname(path.realpath(__file__))
global tifFile ; tifFile = sys.argv[1] #path.join(curDir,'y12.modisSSEBopET.tif')
global mesoFile ; mesoFile = sys.argv[2] #path.join(curDir,'2012_annual.mdf')
global ext; ext = ""
main()
|
[
"[email protected]"
] | |
aec4535762758f06ac7c4f621312ac9b23538101
|
97d96e5e323e46035441ee78238c57b38f5a8ea4
|
/channel/chat_app/admin.py
|
e8c66503fb52fcc0093e46195a13c6b2196faa12
|
[] |
no_license
|
sirakej/channels
|
38e6a1a189ba4312118ca06a25fbed06fde0a781
|
89464aad1446da8539b568a684bf20aabdb1c984
|
refs/heads/master
| 2020-03-26T07:13:20.243292 | 2018-08-14T00:44:56 | 2018-08-14T00:44:56 | 144,642,494 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 178 |
py
|
from django.contrib import admin
from .models import Channel
from .models import Post
# Register your models here.
admin.site.register(Channel)
admin.site.register(Post)
|
[
"[email protected]"
] | |
3b0e9d753652300a56b3b60e6c050f0d5717337a
|
b6fa02f758343ec3031787b98e34fcd56b4f0d40
|
/app/auth/forms.py
|
0c91dd77fb4e0624adfbeff45f8492d55545a73b
|
[
"MIT"
] |
permissive
|
kmunge/blog
|
4e8514416ca3fdc233731897608a0daa3270e9f8
|
73d83b9c4cf9b8c7a1a631d63be77a00ea469f1f
|
refs/heads/master
| 2022-12-10T04:26:37.903251 | 2019-06-04T09:08:16 | 2019-06-04T09:08:16 | 189,951,902 | 1 | 0 | null | 2022-09-16T18:02:46 | 2019-06-03T07:00:23 |
Python
|
UTF-8
|
Python
| false | false | 1,264 |
py
|
from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,BooleanField,SubmitField
from wtforms.validators import Required,Email,EqualTo
from ..models import User
from wtforms import ValidationError
class RegistrationForm(FlaskForm):
email = StringField('Your Email Address',validators=[Required(),Email()])
username = StringField('Enter your username',validators = [Required()])
password = PasswordField('Password',validators = [Required(), EqualTo('password_confirm',message = 'Passwords must match')])
password_confirm = PasswordField('Confirm Passwords',validators = [Required()])
submit = SubmitField('Sign Up')
def validate_email(self,data_field):
if User.query.filter_by(email =data_field.data).first():
raise ValidationError('There is an account with that email')
def validate_username(self,data_field):
if User.query.filter_by(username = data_field.data).first():
raise ValidationError('That username is taken')
class LoginForm(FlaskForm):
email = StringField('Your Email Address',validators=[Required(),Email()])
password = PasswordField('Password',validators =[Required()])
remember = BooleanField('Remember me')
submit = SubmitField('Sign In')
|
[
"[email protected]"
] | |
044ef7733d33340e7cf093fa5b1b04a826c31548
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_summary.py
|
18d09be192ac1b4023f64ab173806411d3dcea87
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 417 |
py
|
#calss header
class _SUMMARY():
def __init__(self,):
self.name = "SUMMARY"
self.definitions = [u'done suddenly, without discussion or legal arrangements: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"[email protected]"
] | |
0505e8092673d4890abc22dce0ecea40a9971c6e
|
a343666b05d8064e66cba91281eeaec9a1139728
|
/CNN_IMDB.py
|
1ffc44df4293707a260a58be2ffd9fc03fda31b1
|
[] |
no_license
|
AnhPhamFIT/Deeplearning
|
57a1507a1771f847be74bff312d952bdf772fb84
|
9c9ee65f9a09012bec7dc24d71d1aa4a036ea230
|
refs/heads/master
| 2020-12-02T19:49:08.059526 | 2017-07-06T06:18:12 | 2017-07-06T06:18:12 | 96,394,719 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,321 |
py
|
'''
import numpy
from keras.datasets import imdb
from matplotlib import pyplot
# load the dataset
(X_train, y_train), (X_test, y_test) = imdb.load_data()
X = numpy.concatenate((X_train, X_test), axis=0)
y = numpy.concatenate((y_train, y_test), axis=0)
# summarize size
print("Training data: ")
print(X.shape)
print(y.shape)
# Summarize number of classes
print("Classes: ")
print(numpy.unique(y))
# Summarize number of words
print("Number of words: ")
print(len(numpy.unique(numpy.hstack(X))))
# Summarize review length
print("Review length: ")
result = [len(x) for x in X]
print("Mean %.2f words (%f)" % (numpy.mean(result), numpy.std(result)))
# plot review length
pyplot.boxplot(result)
pyplot.show()
'''
'''
# MLP for the IMDB problem
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load the dataset but only keep the top n words, zero the rest
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)
max_words = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_words)
X_test = sequence.pad_sequences(X_test, maxlen=max_words)
# create the model
model = Sequential()
model.add(Embedding(top_words, 32, input_length=max_words))
model.add(Flatten())
model.add(Dense(250, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
# Fit the model
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=2, batch_size=128, verbose=2)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
'''
# CNN for the IMDB problem
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load the dataset but only keep the top n words, zero the rest
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)
# pad dataset to a maximum review length in words
max_words = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_words)
X_test = sequence.pad_sequences(X_test, maxlen=max_words)
# create the model
model = Sequential()
model.add(Embedding(top_words, 32, input_length=max_words))
model.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(250, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
# Fit the model
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=2, batch_size=128, verbose=2)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
|
[
"[email protected]"
] | |
9d987c578fd5533a6ec79b97bf8e6020dc51ffdd
|
ed061360b729e0f3d56b68862ee3bd4dbc4729c1
|
/interesting/dp/32. Longest Valid Parentheses.py
|
0fcce32c198811483f25ee2abe569462ee45c69a
|
[] |
no_license
|
qian99/leetcode-question
|
14a84003e576a8ee5ba16e3f8f652d00a5f4949b
|
bab87caf6217bf99e0c36b1cf5fefee80bb77b07
|
refs/heads/master
| 2021-01-11T20:52:57.068209 | 2018-10-12T01:48:34 | 2018-10-12T01:48:34 | 79,202,799 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 678 |
py
|
class Solution(object):
def longestValidParentheses(self, s):
n = len(s)
result = 0
dp = [0 for i in range(n + 1)]
i = 1
while i <= n:
j = i - 1
if s[j] == '(':
dp[i] = 0
elif j > 0:
if s[j - 1] == '(':
dp[i] = dp[i - 2] + 2
elif s[j - 1] == ')' and j - dp[i - 1] - 1 >= 0 and s[j - dp[i - 1] - 1] == '(':
dp[i] = dp[i - 1] + 2
if i - dp[i - 1] - 2 >= 0:
dp[i] += dp[i - dp[i - 1] - 2]
result = max(dp[i], result)
i += 1
return result
|
[
"[email protected]"
] | |
b7a3d70a976e7c845fe9fc09d9bf302fca00091a
|
cbd0af707921223221a5a81831155dc4fdfa8084
|
/git291-python/objects/mass_pass.py
|
e67beabd3e041b239f945553c7e81881b431b7fa
|
[] |
no_license
|
291dev/python
|
492365db1b91c1b016d966fa1ca0aad1f6cfb440
|
46750a091fa5cb2dc4d9d55b6617779a86e6c712
|
refs/heads/master
| 2022-12-24T13:01:25.186727 | 2020-09-30T01:10:26 | 2020-09-30T01:10:26 | 298,525,719 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 601 |
py
|
class Person(object):
kind = 'human'
def __init__(self):
self.words = []
def talk(self):
print('talk')
def add_words(self, word):
self.words.append(word)
@classmethod
def what_is_my_kind(self):
return self.kind
class Car(object):
def run(self):
print('run')
class PersonCar(Car, Person):
def fly(self):
print('fly')
person = Person()
car = Car()
personcar = PersonCar()
personcar.talk()
personcar.run()
personcar.fly()
print(personcar.kind)
personcar.add_words('mike')
personcar.add_words('make')
print(personcar.words)
print(PersonCar.what_is_my_kind())
|
[
"[email protected]"
] | |
480d27a5b612049855b51f12f126b608179977c2
|
aff72737860c226e56e3910ea0442648ef4d01a4
|
/programs/itertr.py
|
4bbf3a9e4a45ae3b980930b0b30bc0e36b585674
|
[] |
no_license
|
sivaprasadkonduru/Python-Programs
|
3db77270e085f785ecd0d9049557395010cfa1cd
|
aadace30e921f02210398bce20ab48d330a71812
|
refs/heads/master
| 2022-04-29T15:28:50.357467 | 2022-03-18T18:14:42 | 2022-03-18T18:14:42 | 94,407,324 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,082 |
py
|
'''
#Iterators:
#1.Iterator is a object which implements the iterator protocol.
#iterator consists 2methods, __iter__ and __next__
__iter__ returns iterator object itself.
__next__ returns the next value from the iterator.If there is no more items to return
then it should raise stopiteration exeption.
'''
s = "python"
for e in s:
print(e, end="")#it will return python
print() #it will return empty
n=iter(s)
print(n.__next__())
print(n.__next__())
print(n.__next__())
print(list(n))
#print(n.__next__())# it will throw an stopIteration exception.
class pow():
def __init__(self,max=0):
self.max=max
def __iter__(self):
self.n=0
return self
def __next__(self):
if self.n <= self.max:
result=2 ** self.n
self.n += 1
return result
else:
raise StopIteration
a=pow(8)
i=iter(a)
print(i.__next__())
print(i.__next__())
print(i.__next__())
print(i.__next__())
print(i.__next__())
print(i.__next__())
print(i.__next__())
|
[
"[email protected]"
] | |
e1dd18dadc4b80f6d2c18e37f5c8196b2b2ca032
|
0495e5acc5cd58b1a38748868215b9369ab8bfbb
|
/asteroWorking2WMFShip/polygon.py
|
e1dba87dc48b879cd03ba67655c04dd57abfddec
|
[] |
no_license
|
MacMeDan/astroids
|
537bdc459ad2bb5153f96c5ef5411e68b70ad104
|
5ccbd882f542e04dc104dfa7d20815ff9ece1feb
|
refs/heads/master
| 2021-01-17T19:52:58.860197 | 2013-12-15T02:18:35 | 2013-12-15T02:18:35 | 15,190,353 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,491 |
py
|
import math
import pygame.draw
from point import Point
import config
class Polygon:
def __init__(self, shape, position, rotation, color, surfaceing):
self.position = position
self.rotation = rotation
self.color = color
self.active = True
self.cache_points = (None, None, None)
self.dx = 0
self.dy = 0
self.surfaceing = surfaceing
# first, we find the shape's top-most left-most boundary, its origin
(origin_x, origin_y) = (shape[0].x, shape[0].y)
for p in shape:
if p.x < origin_x:
origin_x = p.x
if p.y < origin_y:
origin_y = p.y
# then, we orient all of its points relative to its origin
shifted = []
for p in shape:
shifted.append(Point(p.x - origin_x, p.y - origin_y))
# now shift them all based on the center of gravity
self.shape = shifted
self.center = self._findCenter()
self.shape = []
for p in shifted:
self.shape.append(Point(p.x - self.center.x, p.y - self.center.y))
# apply the rotation and offset to the shape of the polygon
def getPoints(self):
(oldrotation, oldposition, oldpoints) = self.cache_points
if oldrotation == self.rotation and oldposition == self.position:
return oldpoints
angle = math.radians(self.rotation)
sin = math.sin(angle)
cos = math.cos(angle)
points = []
for p in self.shape:
x = p.x * cos - p.y * sin + self.position.x
y = p.x * sin + p.y * cos + self.position.y
points.append(Point(x, y))
self.cache_points = (self.rotation, self.position, points)
return points
# test if the given point is inside this polygon
def contains(self, point):
points = self.getPoints()
crossingNumber = 0
for i in range(len(points)):
j = (i + 1) % len(points)
if (((points[i].x < point.x and point.x <= points[j].x) or
(points[j].x < point.x and point.x <= points[i].x)) and
(point.y > points[i].y + (points[j].y - points[i].y) /
(points[j].x - points[i].x) * (point.x - points[i].x))):
crossingNumber += 1
return crossingNumber % 2 == 1
def intersect(self, other_polygon):
fPoints = self.getPoints()
oPoints = other_polygon.getPoints()
for p in fPoints:
if other_polygon.contains(p):
return True
for b in oPoints:
if self.contains(b):
return True
else:
return False
def _findArea(self):
shape = self.shape
sum = 0.0
for i in range(len(shape)):
j = (i + 1) % len(self.shape)
sum += shape[i].x * shape[j].y - shape[j].x * shape[i].y
return abs(0.5 * sum)
def _findCenter(self):
shape = self.shape
(sum_x, sum_y) = (0.0, 0.0)
for i in range(len(shape)):
j = (i + 1) % len(self.shape)
sum_x += (shape[i].x + shape[j].x) * \
(shape[i].x * shape[j].y - shape[j].x * shape[i].y)
sum_y += (shape[i].y + shape[j].y) * \
(shape[i].x * shape[j].y - shape[j].x * shape[i].y)
area = self._findArea()
return Point(abs(sum_x / (6.0 * area)), abs(sum_y / (6.0 * area)))
def game_logic(self, keys, newkeys):
raise NotImplementedError()
def paint(self, surface):
if self.active:
points = self.getPoints()
self.shape2 =[]
for a in points:
self.shape2.append(a.pair())
pygame.draw.polygon(surface,self.color,self.shape2,0)
def move(self):
x,y = self.position.pair()
x += self.dx
y += self.dy
self.rotate(self.rotationSpeed)
self.position = Point(x,y)
if y < -20: #this makes your ship go completely off screen before wrapping to the other side
y = config.SCREEN_Y +20
if x < -20:
x = config.SCREEN_X +20
if y > config.SCREEN_Y +20:
y = -20
if x > config.SCREEN_X +20:
x = 20
self.position = Point(x,y)
def rotate(self,degrees):
pygame.transform.rotate(self.surfaceing, degrees)
self.rotation -= degrees
def isActive(self):
return self.active
def set_inactive(self):
self.active = False
|
[
"[email protected]"
] | |
60a71622737aa6e8a866253cebae37379422f533
|
7d84000f2977def7118b4c93a47b9d71c4ee38f8
|
/app/src/utils/log_streamer.py
|
ad37f010c1610fdbb84800feadcdb0afd9627020
|
[] |
no_license
|
tensorci/core
|
d405d17099987163dfc589711345ce414ace406e
|
50d18bb43f73b1d5d47fefad543c2554e87a6520
|
refs/heads/master
| 2021-03-19T13:27:26.219591 | 2020-12-03T01:14:57 | 2020-12-03T01:14:57 | 110,917,313 | 0 | 0 | null | 2020-12-03T01:15:26 | 2017-11-16T03:20:09 |
Python
|
UTF-8
|
Python
| false | false | 3,800 |
py
|
import log_formatter
from src import logger, dbi
from pyredis import redis
from src.helpers.definitions import tci_keep_alive
# TODO: This file is disgusting -- make it less disgusting
def should_complete_stream(data, deployment):
# Check if last_entry was specified in the log. Complete the stream if so.
complete = data.get('last_entry') == 'True'
# Check to see if this was an error log. Complete the stream if so.
if data.get('level') == 'error':
# Fail the deployment and log that this happened internally
logger.error('DEPLOYMENT FAILED: uid={}'.format(deployment.uid))
deployment.fail()
complete = True
return complete
def stream_deploy_logs(deployment, stream_key=None, block=30000):
complete = False
first_log = redis.xrange(stream_key, count=1)
# If logs already exist, yield the first one and then
# iterate over timestamps to continue yielding
if first_log:
ts, data = first_log[0]
first_log_yielded = False
while not complete:
try:
# yield the first log and continue
if not first_log_yielded:
first_log_yielded = True
complete = should_complete_stream(data, deployment)
yield log_formatter.deploy_log(data)
continue
# Get all logs since timestamp=ts
result = redis.xread(block=block, **{stream_key: ts})
if not result:
yield tci_keep_alive + '\n'
continue
items = result.get(stream_key)
if not items:
yield tci_keep_alive + '\n'
continue
for item in items:
ts, data = item
complete = should_complete_stream(data, deployment)
yield log_formatter.deploy_log(data)
except:
break
else:
ts = '0-0'
while not complete:
try:
# Get all logs since timestamp=ts
result = redis.xread(block=block, **{stream_key: ts})
if not result:
yield tci_keep_alive + '\n'
continue
items = result.get(stream_key)
if not items:
yield tci_keep_alive + '\n'
continue
for item in items:
ts, data = item
complete = should_complete_stream(data, deployment)
yield log_formatter.deploy_log(data)
except:
break
def stream_train_logs(deployment, block=30000):
stream_key = deployment.train_log()
first_log = redis.xrange(stream_key, count=1)
# If logs already exist, yield the first one and then
# iterate over timestamps to continue yielding
if first_log:
ts, data = first_log[0]
first_log_yielded = False
while True:
try:
# yield the first log and continue
if not first_log_yielded:
first_log_yielded = True
yield log_formatter.training_log(data, with_color=True)
continue
# Get all logs since timestamp=ts
result = redis.xread(block=block, **{stream_key: ts})
if not result:
yield tci_keep_alive + '\n'
continue
items = result.get(stream_key)
if not items:
yield tci_keep_alive + '\n'
continue
for item in items:
ts, data = item
yield log_formatter.training_log(data, with_color=True)
except:
break
else:
ts = '0-0'
while True:
try:
# Get all logs since timestamp=ts
result = redis.xread(block=block, **{stream_key: ts})
if not result:
yield tci_keep_alive + '\n'
continue
items = result.get(stream_key)
if not items:
yield tci_keep_alive + '\n'
continue
for item in items:
ts, data = item
yield log_formatter.training_log(data, with_color=True)
except:
break
|
[
"[email protected]"
] | |
a0d550e2fdb493ba6c99d7490c06e07da09bcdde
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/aws-xray-sdk/aws_xray_sdk/core/sampling/reservoir.pyi
|
322d1d38c3d821602e3e08cb5f590e0f85608dd7
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 |
Apache-2.0
| 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null |
UTF-8
|
Python
| false | false | 337 |
pyi
|
from enum import Enum
class Reservoir:
def __init__(self) -> None: ...
def borrow_or_take(self, now, can_borrow): ...
def load_quota(self, quota, TTL, interval) -> None: ...
@property
def quota(self): ...
@property
def TTL(self): ...
class ReservoirDecision(Enum):
TAKE: str
BORROW: str
NO: str
|
[
"[email protected]"
] | |
093f1c7e217d53e6170f3cbd036e6d7c263c909d
|
60262a70aa339390447a085856052512156b267d
|
/locators.py
|
27c7a3d424158d810ce01546c5917e6b1fe37b72
|
[
"MIT"
] |
permissive
|
luisdsantiago/auto_shopper
|
2977d94e1ce1bbc2acae5b0df0120a5fff1537c7
|
7c5df6dc6aca70e36abe79daa1952f25d9f00126
|
refs/heads/main
| 2023-07-27T14:58:58.075317 | 2021-09-12T18:30:11 | 2021-09-12T18:30:11 | 405,237,411 | 1 | 0 | null | 2021-09-11T00:40:41 | 2021-09-10T23:34:54 |
Python
|
UTF-8
|
Python
| false | false | 326 |
py
|
from selenium.webdriver.common.by import By
class MainPageLocators:
"""A class for main page locators. All main page locators should come here"""
GO_BUTTON = (By.ID, 'submit')
class SearchResultsPageLocators:
"""A class for search results locators. All search results locators should
come here"""
pass
|
[
"[email protected]"
] | |
9db8df813e7136b0ac07b7bfa9473d9701fbf616
|
776d97324adae9d929f90b701e6bf003df041bec
|
/0x0F-python-object_relational_mapping/4-cities_by_state.py
|
f3073c7627aa4e521b8aeef60a4670e4e66812f5
|
[] |
no_license
|
jblanco75/holbertonschool-higher_level_programming
|
6be3284ecd3a8168425cb82d0bee1f9321825a56
|
94b1db9f120e234ec61a6db044113e7f4618d3ef
|
refs/heads/main
| 2023-08-04T04:16:15.821683 | 2021-09-23T00:32:24 | 2021-09-23T00:32:24 | 361,894,154 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 636 |
py
|
#!/usr/bin/python3
"""script that lists all cities from the database hbtn_0e_4_usa"""
import sys
import MySQLdb
if __name__ == '__main__':
conn = MySQLdb.connect(host="localhost",
port=3306,
user=sys.argv[1],
passwd=sys.argv[2],
db=sys.argv[3])
cur = conn.cursor()
cur.execute("SELECT cities.id, cities.name, states.name\
FROM cities INNER JOIN states ON states.id = cities.state_id")
query_rows = cur.fetchall()
for row in query_rows:
print(row)
cur.close()
conn.close()
|
[
"[email protected]"
] | |
1f9a1493d3564a0265cdf0674188cf10e8bc3d9f
|
cdb7bb6215cc2f362f2e93a040c7d8c5efe97fde
|
/S/SumofSquareNumbers.py
|
cf292e41b9e81430c4e5ec8bd82b7e683dccd33d
|
[] |
no_license
|
bssrdf/pyleet
|
8861bbac06dfe0f0f06f6ad1010d99f8def19b27
|
810575368ecffa97677bdb51744d1f716140bbb1
|
refs/heads/master
| 2023-08-20T05:44:30.130517 | 2023-08-19T21:54:34 | 2023-08-19T21:54:34 | 91,913,009 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,367 |
py
|
''''
-Medium-
*Two Pointers*
Given a non-negative integer c, decide whether there're two integers a
and b such that a2 + b2 = c.
Example 1:
Input: c = 5
Output: true
Explanation: 1 * 1 + 2 * 2 = 5
Example 2:
Input: c = 3
Output: false
Example 3:
Input: c = 4
Output: true
Example 4:
Input: c = 2
Output: true
Example 5:
Input: c = 1
Output: true
Constraints:
0 <= c <= 2^31 - 1
'''
class Solution(object):
def judgeSquareSumMap(self, c):
"""
:type c: int
:rtype: bool
"""
m = set()
for i in range(int(c**0.5)+1):
i2 = i*i
m.add(i2)
if c - i2 in m:
return True
return False
def judgeSquareSum(self, c):
"""
:type c: int
:rtype: bool
"""
l, r = 0, int(c**0.5)
while l <= r:
n = l*l + r*r
if n == c: return True
elif n < c: l += 1
else: r -= 1
# print(l, r)
return False
if __name__ == '__main__':
print(Solution().judgeSquareSum(5))
print(Solution().judgeSquareSum(4))
print(Solution().judgeSquareSum(3))
print(Solution().judgeSquareSum(2))
print(Solution().judgeSquareSum(1))
print(Solution().judgeSquareSum(100000000))
print(Solution().judgeSquareSum(200000000))
|
[
"[email protected]"
] | |
84854d9fe6c10d92a0252c98664fb81ff407fd17
|
ff93afb0e4df31647381c797c12fb65f4a749cc1
|
/vitu/account/account.py
|
551bde3a72de645f1b0bbff76e3c089e05ac47ca
|
[
"Apache-2.0"
] |
permissive
|
LakeSky/vitu
|
4d96d1c460d837feeb4b119b24bf97f0bec3ee27
|
1fe60d51ace096c9c26db8f5c9f401dafe2f8b1a
|
refs/heads/master
| 2021-04-07T15:25:32.304503 | 2020-03-11T22:17:34 | 2020-03-11T22:17:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 28,094 |
py
|
'''/*---------------------------------------------------------------------------------------------
* Copyright (c) VituTech. All rights reserved.
* Licensed under the Apache License 2.0. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'''
import math
import numpy as np
from vitu.trade.order.order import Order
from vitu.core.match_engine import match_engine
from vitu.utils.trade_utils import order_update_position
from vitu.utils.error_utils import Errors
from vitu.utils.log_utils import logger
class Account(object):
__slots__ = [
'context',
'name',
'exchange',
'asset_class',
'current_position',
'history_orders',
'orders'
]
def __init__(self, context, name, exchange, asset_class, current_position):
"""
:param name: spot_account
:param exchange: binance/okex
:param asset_class: spot/contract
:param current_position: {'btc':Position,'eth':Position}
"""
self.context = context
self.name = name
self.exchange = exchange
self.asset_class = asset_class
self.current_position = current_position # key = 'btc'..., value = Position
self.history_orders = dict() # key = id, value = Order
self.orders = list() # 存储今日订单
def sell(self, symbol_exchange, price, qty):
"""
:param symbol_exchange: "BTC/USDT.binance"
:param price: 限价
:param qty: 数量
:return:
"""
exchange = symbol_exchange.split('.')[1]
symbol = symbol_exchange.split('.')[0]
base_currency = symbol.split('/')[0].lower() # btc 基础货币
quote_currency = symbol.split('/')[1].lower() # usdt 计价货币
if not isinstance(price,(int,float)) or np.isnan(price):
raise Errors.INVALID_PRICE
if not isinstance(qty, (int, float)) or np.isnan(qty):
raise Errors.INVALID_AMOUNT
# 低于current_price才能卖出,高于current_price才能买入
current_price = self.context.get_price(symbol_exchange)
if price > current_price:
logger.warning('限价单价格需要小于等于市场价才能卖出,卖出失败.')
return
if base_currency not in self.current_position.keys():
logger.warning('没有 {} 资产,卖出失败.'.format(base_currency))
return
qty = math.floor(qty*100000000)/100000000
price = math.floor(price*100000000)/100000000
amount = math.floor((qty*price)*100000000)/100000000
# 判断是否有足够资金下单
if self.current_position[base_currency].detail()['available'] < qty:
logger.warning('{} 不足,卖出失败.'.format(base_currency))
return
# 判断是否小于最下下单精度、最小下单金额
min_order_qty = self.context.min_order[exchange][symbol.lower()]['min_order_qty']
min_order_amount = self.context.min_order[exchange][symbol.lower()]['min_order_qty']
if qty < min_order_qty:
logger.warning('不足下单最小精度 {} {},卖出失败.'.format('%.6f'%min_order_qty,base_currency.upper()))
return
if amount < min_order_amount:
logger.warning('不足下单最小金额 {} {},卖出失败.'.format('%.6f'%min_order_amount,quote_currency.upper()))
return
# 下单
order = Order(self.context, self.name, symbol, self.exchange, self.asset_class, 'sell', 'limit', price, qty)
order_id = order['id']
self.history_orders[order_id] = order
order_update_position(self.context, self.current_position, order['side'], exchange, symbol, price, qty)
match_engine(self.current_position, self.context, order)
if not self.orders or self.orders[0]['create_time'] == self.context.current_datetime():
self.orders.append(order.detail())
logger.info('{} 卖出成功! 价格:{} 数量:{}'.format(base_currency, price, qty))
# return order.detail()
return order_id
def buy(self, symbol_exchange, price, qty):
"""
:param symbol_exchange: "BTC/USDT.binance"
:param price: 限价
:param qty: 数量
:return:
"""
exchange = symbol_exchange.split('.')[1]
symbol = symbol_exchange.split('.')[0]
base_currency = symbol.split('/')[0].lower() # btc 基础货币
quote_currency = symbol.split('/')[1].lower() # usdt 计价货币
if not isinstance(price,(int,float)) or np.isnan(price):
raise Errors.INVALID_PRICE
if not isinstance(qty, (int, float)) or np.isnan(qty):
raise Errors.INVALID_AMOUNT
# 低于current_price才能卖出,高于current_price才能买入
current_price = self.context.get_price(symbol_exchange)
if price < current_price:
logger.warning('限价单价格需要大于等于市场价才能买入,买入失败.')
return
if quote_currency not in self.current_position.keys():
logger.warning('没有 {} 资产,买入失败.'.format(quote_currency))
return
qty = math.floor(qty*100000000)/100000000
price = math.floor(price*100000000)/100000000
amount = math.floor((qty*price)*100000000)/100000000
# 判断是否有足够资金下单
if self.current_position[quote_currency].detail()['available'] < amount:
logger.warning('{} 不足,买入失败.'.format(quote_currency))
return
# 判断是否小于最下下单精度、最小下单金额
min_order_qty = self.context.min_order[exchange][symbol.lower()]['min_order_qty']
min_order_amount = self.context.min_order[exchange][symbol.lower()]['min_order_qty']
if qty < min_order_qty:
logger.warning('不足下单最小精度 {} {},买入失败.'.format(min_order_qty,base_currency.upper()))
return
if amount < min_order_amount:
logger.warning('不足下单最小金额 {} {},买入失败.'.format(min_order_amount,quote_currency.upper()))
return
# 下单
order = Order(self.context, self.name, symbol, self.exchange, self.asset_class, 'buy', 'limit', price, qty)
order_id = order['id']
self.history_orders[order_id] = order
order_update_position(self.context, self.current_position, order['side'], exchange, symbol, price, qty)
match_engine(self.current_position, self.context, order)
if not self.orders or self.orders[0]['create_time'] == self.context.current_datetime():
self.orders.append(order.detail())
logger.info('{} 买入成功! 价格:{} 数量:{}'.format(base_currency, price, qty))
# return order.detail()
return order_id
# def sell_value(self, symbol_exchange, price, value):
# """
# :param symbol_exchange: "BTC/USDT.binance"
# :param price: 限价
# :param value: 卖出多少资产价值
# :return:
# """
# exchange = symbol_exchange.split('.')[1]
# symbol = symbol_exchange.split('.')[0]
# base_currency = symbol.split('/')[0].lower() # btc 基础货币
# quote_currency = symbol.split('/')[1].lower() # usdt 计价货币
# if not isinstance(price, (int, float)) or np.isnan(price):
# raise Errors.INVALID_PRICE
# if not isinstance(value, (int, float)) or np.isnan(value):
# raise Errors.INVALID_AMOUNT
# # 低于current_price才能卖出,高于current_price才能买入
# current_price = self.context.get_price(symbol_exchange)
# if price > current_price:
# logger.warning('限价单价格需要小于等于市场价才能卖出,卖出失败.')
# return
# if base_currency not in self.current_position.keys():
# logger.warning('没有 {} 资产,卖出失败.'.format(base_currency))
# return
#
# qty = math.floor((value/current_price) * 100000000) / 100000000
# price = math.floor(price*100000000)/100000000
# amount = math.floor((qty*price)*100000000)/100000000
#
# # 判断是否有足够资金下单
# if self.current_position[base_currency].detail()['available'] < qty:
# logger.warning('{} 不足,卖出失败.'.format(base_currency))
# return
# # 判断是否小于最下下单精度、最小下单金额
# min_order_qty = self.context.min_order[exchange][symbol.lower()]['min_order_qty']
# min_order_amount = self.context.min_order[exchange][symbol.lower()]['min_order_qty']
# if qty < min_order_qty:
# logger.warning('不足下单最小精度 {} {},卖出失败.'.format('%.6f'%min_order_qty,base_currency.upper()))
# return
# if amount < min_order_amount:
# logger.warning('不足下单最小金额 {} {},卖出失败.'.format('%.6f'%min_order_amount,quote_currency.upper()))
# return
#
# # 下单
# order = Order(self.context, self.name, symbol, self.exchange, self.asset_class, 'sell', 'limit', price, qty)
# order_id = order['id']
# self.history_orders[order_id] = order
# order_update_position(self.context, self.current_position, order['side'], exchange, symbol, price, qty)
# match_engine(self.current_position, self.context, order)
# if not self.orders or self.orders[0]['create_time'] == self.context.current_datetime():
# self.orders.append(order.detail())
# logger.info('{} 卖出成功! 价格:{} 数量:{}'.format(base_currency, price, qty))
# return order_id
#
# def buy_value(self, symbol_exchange, price, value):
# """
# :param symbol_exchange: "BTC/USDT.binance"
# :param price: 限价
# :param value: 价值
# :return:
# """
# exchange = symbol_exchange.split('.')[1]
# symbol = symbol_exchange.split('.')[0]
# base_currency = symbol.split('/')[0].lower() # btc 基础货币
# quote_currency = symbol.split('/')[1].lower() # usdt 计价货币
# if not isinstance(price,(int,float)) or np.isnan(price):
# raise Errors.INVALID_PRICE
# if not isinstance(value, (int, float)) or np.isnan(value):
# raise Errors.INVALID_AMOUNT
# # 低于current_price才能卖出,高于current_price才能买入
# current_price = self.context.get_price(symbol_exchange)
# if price < current_price:
# logger.warning('限价单价格需要大于等于市场价才能买入,买入失败.')
# return
# if quote_currency not in self.current_position.keys():
# logger.warning('没有 {} 资产,买入失败.'.format(quote_currency))
# return
#
# qty = math.floor((value/current_price) * 100000000) / 100000000
# price = math.floor(price*100000000)/100000000
# amount = math.floor((qty*price)*100000000)/100000000
#
# # 判断是否有足够资金下单
# if self.current_position[quote_currency].detail()['available'] < amount:
# logger.warning('{} 不足,买入失败.'.format(quote_currency))
# return
# # 判断是否小于最下下单精度、最小下单金额
# min_order_qty = self.context.min_order[exchange][symbol.lower()]['min_order_qty']
# min_order_amount = self.context.min_order[exchange][symbol.lower()]['min_order_qty']
# if qty < min_order_qty:
# logger.warning('不足下单最小精度 {} {},买入失败.'.format(min_order_qty,base_currency.upper()))
# return
# if amount < min_order_amount:
# logger.warning('不足下单最小金额 {} {},买入失败.'.format(min_order_amount,quote_currency.upper()))
# return
#
# # 下单
# order = Order(self.context, self.name, symbol, self.exchange, self.asset_class, 'buy', 'limit', price, qty)
# order_id = order['id']
# self.history_orders[order_id] = order
# order_update_position(self.context, self.current_position, order['side'], exchange, symbol, price, qty)
# match_engine(self.current_position, self.context, order)
# if not self.orders or self.orders[0]['create_time'] == self.context.current_datetime():
# self.orders.append(order.detail())
# logger.info('{} 买入成功! 价格:{} 数量:{}'.format(base_currency,price,qty))
# return order_id
#
# def sell_target_value(self, symbol_exchange, price, target_value):
# """
# :param symbol_exchange: "BTC/USDT.binance"
# :param price: 限价
# :param target_value: 卖出到剩余多少资产价值
# :return:
# """
# exchange = symbol_exchange.split('.')[1]
# symbol = symbol_exchange.split('.')[0]
# base_currency = symbol.split('/')[0].lower() # btc 基础货币
# quote_currency = symbol.split('/')[1].lower() # usdt 计价货币
# if not isinstance(price, (int, float)) or np.isnan(price):
# raise Errors.INVALID_PRICE
# if not isinstance(target_value, (int, float)) or np.isnan(target_value):
# raise Errors.INVALID_AMOUNT
# # 低于current_price才能卖出,高于current_price才能买入
# current_price = self.context.get_price(symbol_exchange)
# if price > current_price:
# logger.warning('限价单价格需要小于等于市场价才能卖出,卖出失败.')
# return
# if base_currency not in self.current_position.keys():
# logger.warning('没有 {} 资产,卖出失败.'.format(base_currency))
# return
#
# available = self.current_position[base_currency].detail()['available']
#
# value = abs(target_value-available)
# qty = math.floor((value / current_price) * 100000000) / 100000000
# print(available,target_value,qty)
# if qty < 0:
# logger.warning('{} 不足,卖出失败.'.format(base_currency))
# return
# price = math.floor(price*100000000)/100000000
# amount = math.floor((qty*price)*100000000)/100000000
#
# # 判断是否有足够资金下单
# if self.current_position[base_currency].detail()['available'] < qty:
# logger.warning('{} 不足,卖出失败.'.format(base_currency))
# return
# # 判断是否小于最下下单精度、最小下单金额
# min_order_qty = self.context.min_order[exchange][symbol.lower()]['min_order_qty']
# min_order_amount = self.context.min_order[exchange][symbol.lower()]['min_order_qty']
# if qty < min_order_qty:
# logger.warning('不足下单最小精度 {} {},卖出失败.'.format('%.6f'%min_order_qty,base_currency.upper()))
# return
# if amount < min_order_amount:
# logger.warning('不足下单最小金额 {} {},卖出失败.'.format('%.6f'%min_order_amount,quote_currency.upper()))
# return
#
# # 下单
# order = Order(self.context, self.name, symbol, self.exchange, self.asset_class, 'sell', 'limit', price, qty)
# order_id = order['id']
# self.history_orders[order_id] = order
# order_update_position(self.context, self.current_position, order['side'], exchange, symbol, price, qty)
# match_engine(self.current_position, self.context, order)
# if not self.orders or self.orders[0]['create_time'] == self.context.current_datetime():
# self.orders.append(order.detail())
# logger.info('{} 卖出成功! 价格:{} 数量:{}'.format(base_currency, price, qty))
# return order_id
#
# def buy_target_value(self, symbol_exchange, price, target_value):
# """
# :param symbol_exchange: "BTC/USDT.binance"
# :param price: 限价
# :param target_value: 买入到剩余多少价值
# :return:
# """
# exchange = symbol_exchange.split('.')[1]
# symbol = symbol_exchange.split('.')[0]
# base_currency = symbol.split('/')[0].lower() # btc 基础货币
# quote_currency = symbol.split('/')[1].lower() # usdt 计价货币
# if not isinstance(price,(int,float)) or np.isnan(price):
# raise Errors.INVALID_PRICE
# if not isinstance(target_value, (int, float)) or np.isnan(target_value):
# raise Errors.INVALID_AMOUNT
# # 低于current_price才能卖出,高于current_price才能买入
# current_price = self.context.get_price(symbol_exchange)
# if price < current_price:
# logger.warning('限价单价格需要大于等于市场价才能买入,买入失败.')
# return
# if quote_currency not in self.current_position.keys():
# logger.warning('没有 {} 资产,买入失败.'.format(quote_currency))
# return
#
# available = self.current_position[quote_currency].detail()['available']
# value = math.floor((available - target_value) * 100000000) / 100000000
# qty = math.floor((value / current_price) * 100000000) / 100000000
# print(available,target_value,qty)
#
# if qty < 0:
# logger.warning('{} 不足,买入失败.'.format(quote_currency))
# return
# price = math.floor(price*100000000)/100000000
# amount = math.floor((qty*price)*100000000)/100000000
#
# # 判断是否有足够资金下单
# if self.current_position[quote_currency].detail()['available'] < amount:
# logger.warning('{} 不足,买入失败.'.format(quote_currency))
# return
# # 判断是否小于最下下单精度、最小下单金额
# min_order_qty = self.context.min_order[exchange][symbol.lower()]['min_order_qty']
# min_order_amount = self.context.min_order[exchange][symbol.lower()]['min_order_qty']
# if qty < min_order_qty:
# logger.warning('不足下单最小精度 {} {},买入失败.'.format(min_order_qty,base_currency.upper()))
# return
# if amount < min_order_amount:
# logger.warning('不足下单最小金额 {} {},买入失败.'.format(min_order_amount,quote_currency.upper()))
# return
#
# # 下单
# order = Order(self.context, self.name, symbol, self.exchange, self.asset_class, 'buy', 'limit', price, qty)
# order_id = order['id']
# self.history_orders[order_id] = order
# order_update_position(self.context, self.current_position, order['side'], exchange, symbol, price, qty)
# match_engine(self.current_position, self.context, order)
# if not self.orders or self.orders[0]['create_time'] == self.context.current_datetime():
# self.orders.append(order.detail())
# logger.info('{} 买入成功! 价格:{} 数量:{}'.format(base_currency,price,qty))
# return order_id
#
# def sell_target_pct(self, symbol_exchange, price, target_pct):
# """
# :param symbol_exchange: "BTC/USDT.binance"
# :param price: 限价
# :param target_pct: 卖出到资产百分比的数量
# :return:
# """
# exchange = symbol_exchange.split('.')[1]
# symbol = symbol_exchange.split('.')[0]
# base_currency = symbol.split('/')[0].lower() # btc 基础货币
# quote_currency = symbol.split('/')[1].lower() # usdt 计价货币
# if not isinstance(price, (int, float)) or np.isnan(price):
# raise Errors.INVALID_PRICE
# if not isinstance(target_pct, (int, float)) or np.isnan(target_pct):
# raise Errors.INVALID_AMOUNT
# # 低于current_price才能卖出,高于current_price才能买入
# current_price = self.context.get_price(symbol_exchange)
# if price > current_price:
# logger.warning('限价单价格需要小于等于市场价才能卖出,卖出失败.')
# return
# if base_currency not in self.current_position.keys():
# logger.warning('没有 {} 资产,卖出失败.'.format(base_currency))
# return
#
# qty = math.floor((self.current_position[base_currency].detail()['available'] * (1-target_pct)) * 100000000) / 100000000
# price = math.floor(price*100000000)/100000000
# amount = math.floor((qty*price)*100000000)/100000000
#
# # 下单
# order = Order(self.context, self.name, symbol, self.exchange, self.asset_class, 'sell', 'limit', price, qty)
# order_id = order['id']
# self.history_orders[order_id] = order
# order_update_position(self.context, self.current_position, order['side'], exchange, symbol, price, qty)
# match_engine(self.current_position, self.context, order)
# if not self.orders or self.orders[0]['create_time'] == self.context.current_datetime():
# self.orders.append(order.detail())
# logger.info('{} 卖出成功! 价格:{} 数量:{}'.format(base_currency, price, qty))
# return order_id
def sell_pct(self, symbol_exchange, price, pct):
"""
:param symbol_exchange: "BTC/USDT.binance"
:param price: 限价
:param pct: 卖出资产百分比的数量
:return:
"""
exchange = symbol_exchange.split('.')[1]
symbol = symbol_exchange.split('.')[0]
base_currency = symbol.split('/')[0].lower() # btc 基础货币
# quote_currency = symbol.split('/')[1].lower() # usdt 计价货币
if not isinstance(price, (int, float)) or np.isnan(price):
raise Errors.INVALID_PRICE
if not isinstance(pct, (int, float)) or np.isnan(pct):
raise Errors.INVALID_AMOUNT
if base_currency not in self.current_position.keys():
logger.warning('没有 {} 资产,卖出失败.'.format(base_currency))
return
base_min_qty = self.context.min_order[exchange][symbol.lower()]['min_order_qty']
if self.current_position[base_currency].detail()['available'] < base_min_qty:
logger.warning('{} 不足下单最小精度 {} {},卖出失败.'.format(base_currency,'%.6f'%base_min_qty,base_currency.upper()))
return
# 低于current_price才能卖出,高于current_price才能买入
current_price = self.context.get_price(symbol_exchange)
if price > current_price:
logger.warning('限价单价格需要小于等于市场价才能卖出,卖出失败.')
return
qty = math.floor((self.current_position[base_currency].detail()['available']*pct)*1000000)/1000000
price = math.floor(price*1000000)/1000000
order = Order(self.context, self.name, symbol, self.exchange, self.asset_class, 'sell', 'limit', price, qty)
order_id = order['id']
self.history_orders[order_id] = order
order_update_position(self.context, self.current_position, order['side'], exchange, symbol, price, qty)
match_engine(self.current_position, self.context, order)
if not self.orders or self.orders[0]['create_time'] == self.context.current_datetime():
self.orders.append(order.detail())
logger.info('{} 卖出成功! 价格:{} 数量:{}'.format(base_currency, price, qty))
# return order.detail()
return order_id
def buy_pct(self, symbol_exchange, price, pct):
"""
:param symbol_exchange: "BTC/USDT.binance"
:param price: 限价
:param pct: 买入资产百分比的数量
:return:
"""
exchange = symbol_exchange.split('.')[1]
symbol = symbol_exchange.split('.')[0]
base_currency = symbol.split('/')[0].lower() # btc 基础货币
quote_currency = symbol.split('/')[1].lower() # usdt 计价货币
if not isinstance(price,(int,float)) or np.isnan(price):
raise Errors.INVALID_PRICE
if quote_currency not in self.current_position.keys():
logger.warning('没有 {} 资产,买入失败.'.format(quote_currency))
return
quote_min_amount = self.context.min_order[exchange][symbol.lower()]['min_order_amount']
if self.current_position[quote_currency].detail()['available'] < quote_min_amount:
logger.warning('{} 不足下单最小精度 {} {},买入失败.'.format(quote_currency,quote_min_amount,quote_currency.upper()))
return
qty = math.floor((self.current_position[quote_currency].detail()['available'] * pct)/price*1000000)/1000000
if not isinstance(qty,(int,float)) or np.isnan(qty):
raise Errors.INVALID_AMOUNT
# 低于current_price才能卖出,高于current_price才能买入
current_price = self.context.get_price(symbol_exchange)
if price < current_price:
logger.warning('限价单价格需要大于等于市场价才能买入,买入失败.')
return
price = math.floor(price*1000000)/1000000
order = Order(self.context, self.name, symbol, self.exchange, self.asset_class, 'buy', 'limit', price, qty)
order_id = order['id']
self.history_orders[order_id] = order
order_update_position(self.context, self.current_position, order['side'], exchange, symbol, price, qty)
match_engine(self.current_position, self.context, order)
if not self.orders or self.orders[0]['create_time'] == self.context.current_datetime():
self.orders.append(order.detail())
logger.info('{} 买入成功! 价格:{} 数量:{}'.format(base_currency,price,qty))
# return order.detail()
return order_id
def get_positions(self):
"""
:return: 获取仓位
"""
return {asset:position.to_dict() for asset,position in self.current_position.items()}
def get_position(self, asset):
"""
:param asset: 币种
:return: 获取指定asset仓位
"""
asset = asset.lower()
if asset not in self.current_position.keys():
asset_position = {'asset':asset,'available':0,'frozen':0,'avg_cost_usdt':0,'avg_cost_btc':0}
return asset_position
else:
return self.current_position[asset].to_dict()
# def is_holding(self, asset):
# if asset.lower() in self.get_positions().keys():
# return True
# else:
# return False
def get_order(self, order_id):
"""
:param order_id: order订单号
:return: 通过order_id找对应order详情
"""
return self.history_orders[order_id]
def get_orders(self, status):
"""
:param status: 'Filled' | 'Partial_Filled' | 'Pending' | 'Cancelled' | 'Withdrawn'
:return: 返回指定状态的所有order
"""
return [order for order in self.history_orders.values() if order['status'] == status]
def get_trades(self, order_id):
"""
:param order_id: order订单号
:return: 通过order_id找对应trades详情
"""
return self.history_orders[order_id].trades
@property
def __dict__(self):
return {key: self.__getattribute__(key) for key in self.__slots__}
def to_dict(self):
return self.__dict__
def __repr__(self):
return "Account(name: {}, exchange: {}, asset_class: {}, current_position: {}, open_orders: {})".format(
self.name, self.exchange, self.asset_class, self.current_position, self.history_orders)
if __name__ == '__main__':
pass
|
[
"[email protected]"
] | |
55a7f8d39ad426e5df393667b76874cfa1f88fa0
|
6b4dcde85d83b7cec439ab6017951759b6afd840
|
/src/controller/player_controller.py
|
3dd8da5250515dca65a49f757edc1a58fb6aa76d
|
[] |
no_license
|
SirDavidLudwig/Quong
|
359577849be3cc57cb908daadb430f385c63888c
|
4f7e9818897dbe3b0239f223f5bd7a82ced3a1a6
|
refs/heads/master
| 2021-01-09T06:01:12.144481 | 2017-04-24T02:28:28 | 2017-04-24T02:28:28 | 80,892,499 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,282 |
py
|
from . controller import *
import core.quong
import pygame
from core.utils import *
class PlayerController(Controller):
def __init__(self, paddle):
super(PlayerController, self).__init__(paddle)
self.__client = getQuong().getClient()
def onEvent(self, event):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP or event.key == pygame.K_LEFT:
self.setDirection(self.getDirection() | Controller.LEFT_UP)
elif event.key == pygame.K_DOWN or event.key == pygame.K_RIGHT:
self.setDirection(self.getDirection() | Controller.RIGHT_DOWN)
self.__client.send({'c':'u', 'd':self.getDirection()})
elif event.type == pygame.KEYUP:
if event.key == pygame.K_UP or event.key == pygame.K_LEFT:
self.setDirection(self.getDirection() & ~Controller.LEFT_UP)
elif event.key == pygame.K_DOWN or event.key == pygame.K_RIGHT:
self.setDirection(self.getDirection() & ~Controller.RIGHT_DOWN)
self.__client.send({'c':'u', 'd':self.getDirection()})
elif event.type == core.quong.SOCKET_RECIEVE:
if event.entity == 'paddle' and event.id == self.getPaddle().getId():
self.getPaddle().setX(event.pos[0])
self.getPaddle().setY(event.pos[1])
self.setDirection(event.direction)
print(event.direction)
print("Setting position")
|
[
"[email protected]"
] | |
ae2a05e3496bf2f98c31bd079b0241da2f586458
|
12829fc90e0b4d54bdcd92eec13d81eca4763872
|
/gibson/tests.py
|
5f57c60e1698dc04e92669e1857757b66584ce0d
|
[
"MIT"
] |
permissive
|
jamesbond007dj/django-models
|
056f064e020d05ae10ee33a62e6959c85d5172ee
|
5529ee921770b8db091bbe33a7e38087f5592caf
|
refs/heads/master
| 2021-09-23T12:23:50.446739 | 2020-01-15T07:29:37 | 2020-01-15T07:29:37 | 233,948,148 | 0 | 0 |
MIT
| 2021-09-22T18:27:14 | 2020-01-14T22:26:35 |
Python
|
UTF-8
|
Python
| false | false | 1,824 |
py
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from .models import Gibson
class GibsonTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username='jb',
email='[email protected]',
password='7942qwer'
)
self.gibson = Gibson.objects.create(
title='SG Custom Shop Deluxe',
body='Gibson Custom Shop is the pinnacle of craftsmanship, quality, and sound excellence. Each instrument celebrates Gibson`s legacy through accuracy, authenticity and attention to detail.' ,
author=self.user,
)
def test_string_representation(self):
gibson = Gibson(title='SG Custom Shop Deluxe')
self.assertEqual(str(gibson), gibson.title)
def test_gibson_content(self):
self.assertEqual(f'{self.gibson.title}', 'SG Custom Shop Deluxe')
self.assertEqual(f'{self.gibson.author}', 'jb')
self.assertEqual(f'{self.gibson.body}', 'Gibson Custom Shop is the pinnacle of craftsmanship, quality, and sound excellence. Each instrument celebrates Gibson`s legacy through accuracy, authenticity and attention to detail.')
def test_gibson_list_view(self):
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
def test_gibson_detail_view(self):
response = self.client.get('/gibson/1/')
no_response = self.client.get('/gibson/100000/')
self.assertEqual(response.status_code, 200)
self.assertEqual(no_response.status_code, 404)
self.assertContains(response, 'SG Custom Shop Deluxe')
self.assertTemplateUsed(response, 'gibson_detail.html')
|
[
"[email protected]"
] | |
e4ca7ab4e69bb201e913b6fc374b71b556c316ba
|
af2a4bf0d9d69b4d11bf6bcfb6d8ef84e7857e41
|
/custom_components/media_player/plex.py
|
228e24b745d4dfd3724c9feffe909833fb645d49
|
[] |
no_license
|
blinkwise/homeassistant-config
|
f930d72487aa6b25476c3abf36d7e703d83805ba
|
d414eb2eaf685e6aee41512064c401c99f9effcf
|
refs/heads/master
| 2020-06-23T06:56:31.022551 | 2017-07-19T19:42:02 | 2017-07-19T19:42:02 | 94,219,695 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 32,858 |
py
|
"""
Support to interface with the Plex API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.plex/
"""
import json
import logging
import os
from datetime import timedelta
from urllib.parse import urlparse
import requests
import voluptuous as vol
from homeassistant import util
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, MEDIA_TYPE_TVSHOW, MEDIA_TYPE_VIDEO, PLATFORM_SCHEMA,
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PREVIOUS_TRACK,
SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
MediaPlayerDevice)
from homeassistant.const import (
DEVICE_DEFAULT_NAME, STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.loader import get_component
REQUIREMENTS = ['plexapi==2.0.2']
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(seconds=1)
PLEX_CONFIG_FILE = 'plex.conf'
CONF_INCLUDE_NON_CLIENTS = 'include_non_clients'
CONF_USE_EPISODE_ART = 'use_episode_art'
CONF_USE_CUSTOM_ENTITY_IDS = 'use_custom_entity_ids'
CONF_SHOW_ALL_CONTROLS = 'show_all_controls'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_INCLUDE_NON_CLIENTS, default=False):
cv.boolean,
vol.Optional(CONF_USE_EPISODE_ART, default=False):
cv.boolean,
vol.Optional(CONF_USE_CUSTOM_ENTITY_IDS, default=False):
cv.boolean,
})
def config_from_file(filename, config=None):
"""Small configuration file management function."""
if config:
# We're writing configuration
try:
with open(filename, 'w') as fdesc:
fdesc.write(json.dumps(config))
except IOError as error:
_LOGGER.error("Saving config file failed: %s", error)
return False
return True
else:
# We're reading config
if os.path.isfile(filename):
try:
with open(filename, 'r') as fdesc:
return json.loads(fdesc.read())
except IOError as error:
_LOGGER.error("Reading config file failed: %s", error)
# This won't work yet
return False
else:
return {}
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup the Plex platform."""
# get config from plex.conf
file_config = config_from_file(hass.config.path(PLEX_CONFIG_FILE))
if file_config:
# Setup a configured PlexServer
host, token = file_config.popitem()
token = token['token']
# Via discovery
elif discovery_info is not None:
# Parse discovery data
host = discovery_info.get('host')
_LOGGER.info("Discovered PLEX server: %s", host)
if host in _CONFIGURING:
return
token = None
else:
return
setup_plexserver(host, token, hass, config, add_devices_callback)
def setup_plexserver(host, token, hass, config, add_devices_callback):
"""Setup a plexserver based on host parameter."""
import plexapi.server
import plexapi.exceptions
try:
plexserver = plexapi.server.PlexServer('http://%s' % host, token)
except (plexapi.exceptions.BadRequest, plexapi.exceptions.Unauthorized,
plexapi.exceptions.NotFound) as error:
_LOGGER.info(error)
# No token or wrong token
request_configuration(host, hass, config,
add_devices_callback)
return
# If we came here and configuring this host, mark as done
if host in _CONFIGURING:
request_id = _CONFIGURING.pop(host)
configurator = get_component('configurator')
configurator.request_done(request_id)
_LOGGER.info("Discovery configuration done")
# Save config
if not config_from_file(
hass.config.path(PLEX_CONFIG_FILE), {host: {
'token': token
}}):
_LOGGER.error("Failed to save configuration file")
_LOGGER.info('Connected to: http://%s', host)
plex_clients = {}
plex_sessions = {}
track_utc_time_change(hass, lambda now: update_devices(), second=30)
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
# pylint: disable=too-many-branches
def update_devices():
"""Update the devices objects."""
try:
devices = plexserver.clients()
except plexapi.exceptions.BadRequest:
_LOGGER.exception('Error listing plex devices')
return
except OSError:
_LOGGER.error("Could not connect to plex server at http://%s",
host)
return
new_plex_clients = []
for device in devices:
# For now, let's allow all deviceClass types
if device.deviceClass in ['badClient']:
continue
if device.machineIdentifier not in plex_clients:
new_client = PlexClient(config, device, None,
plex_sessions, update_devices,
update_sessions)
plex_clients[device.machineIdentifier] = new_client
new_plex_clients.append(new_client)
else:
plex_clients[device.machineIdentifier].refresh(device, None)
# add devices with a session and no client (ex. PlexConnect Apple TV's)
if config.get(CONF_INCLUDE_NON_CLIENTS):
for machine_identifier, session in plex_sessions.items():
if (machine_identifier not in plex_clients
and machine_identifier is not None):
new_client = PlexClient(config, None, session,
plex_sessions, update_devices,
update_sessions)
plex_clients[machine_identifier] = new_client
new_plex_clients.append(new_client)
else:
plex_clients[machine_identifier].refresh(None, session)
for machine_identifier, client in plex_clients.items():
# force devices to idle that do not have a valid session
if client.session is None:
client.force_idle()
if new_plex_clients:
add_devices_callback(new_plex_clients)
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_sessions():
"""Update the sessions objects."""
try:
sessions = plexserver.sessions()
except plexapi.exceptions.BadRequest:
_LOGGER.exception("Error listing plex sessions")
return
plex_sessions.clear()
for session in sessions:
if (session.player is not None and
session.player.machineIdentifier is not None):
plex_sessions[session.player.machineIdentifier] = session
update_sessions()
update_devices()
def request_configuration(host, hass, config, add_devices_callback):
"""Request configuration steps from the user."""
configurator = get_component('configurator')
# We got an error if this method is called while we are configuring
if host in _CONFIGURING:
configurator.notify_errors(_CONFIGURING[host],
'Failed to register, please try again.')
return
def plex_configuration_callback(data):
"""The actions to do when our configuration callback is called."""
setup_plexserver(host,
data.get('token'), hass, config,
add_devices_callback)
_CONFIGURING[host] = configurator.request_config(
hass,
'Plex Media Server',
plex_configuration_callback,
description=('Enter the X-Plex-Token'),
entity_picture='/static/images/logo_plex_mediaserver.png',
submit_caption='Confirm',
fields=[{
'id': 'token',
'name': 'X-Plex-Token',
'type': ''
}])
# pylint: disable=too-many-instance-attributes, too-many-public-methods
class PlexClient(MediaPlayerDevice):
"""Representation of a Plex device."""
# pylint: disable=too-many-arguments
def __init__(self, config, device, session, plex_sessions,
update_devices, update_sessions):
"""Initialize the Plex device."""
from plexapi.utils import NA
self.force_idle()
self.na_type = NA
self._name = None
self.config = config
self._machine_identifier = None
self.plex_sessions = plex_sessions
self.update_devices = update_devices
self.update_sessions = update_sessions
# Music
self._media_album_artist = None
self._media_album_name = None
self._media_artist = None
self._media_track = None
# TV Show
self._media_episode = None
self._media_season = None
self._media_series_title = None
self.refresh(device, session)
# Assign custom entity ID if desired
if self.config.get(CONF_USE_CUSTOM_ENTITY_IDS):
prefix = ''
# allow for namespace prefixing when using custom entity names
if config.get("entity_namespace"):
prefix = config.get("entity_namespace") + '_'
# rename the entity id
if self.machine_identifier:
self.entity_id = "%s.%s%s" % (
'media_player', prefix,
self.machine_identifier.lower().replace('-', '_'))
else:
if self.name:
self.entity_id = "%s.%s%s" % (
'media_player', prefix,
self.name.lower().replace('-', '_'))
# pylint: disable=too-many-branches, too-many-statements
def refresh(self, device, session):
"""Refresh key device data."""
# new data refresh
if session:
self._session = session
if device:
self._device = device
self._session = None
if self._device:
self._machine_identifier = self._convert_na_to_none(
self._device.machineIdentifier)
self._name = self._convert_na_to_none(
self._device.title) or DEVICE_DEFAULT_NAME
self._device_protocol_capabilities = (
self._device.protocolCapabilities)
# set valid session, preferring device session
if self._device and self.plex_sessions.get(
self._device.machineIdentifier, None):
self._session = self._convert_na_to_none(self.plex_sessions.get(
self._device.machineIdentifier, None))
if self._session:
self._media_position = self._convert_na_to_none(
self._session.viewOffset)
self._media_content_id = self._convert_na_to_none(
self._session.ratingKey)
self._media_content_rating = self._convert_na_to_none(
self._session.contentRating)
else:
self._media_position = None
self._media_content_id = None
# player dependent data
if self._session and self._session.player:
self._is_player_available = True
self._machine_identifier = self._convert_na_to_none(
self._session.player.machineIdentifier)
self._name = self._convert_na_to_none(self._session.player.title)
self._player_state = self._session.player.state
self._session_username = self._convert_na_to_none(
self._session.username)
self._make = self._convert_na_to_none(self._session.player.device)
else:
self._player_state = 'idle'
self._is_player_available = False
if self._player_state == 'playing':
self._is_player_active = True
self._state = STATE_PLAYING
elif self._player_state == 'paused':
self._is_player_active = True
self._state = STATE_PAUSED
elif self.device:
self._is_player_active = False
self.force_idle()
else:
self._is_player_active = False
self.force_idle()
self._state = STATE_OFF
if self._is_player_active and self._session is not None:
self._session_type = self._session.type
self._media_duration = self._convert_na_to_none(
self._session.duration)
else:
self._session_type = None
self._media_duration = None
# media type
if self._session_type == 'clip':
_LOGGER.debug("Clip content type detected, compatibility may "
"vary: %s", self.entity_id)
self._media_content_type = MEDIA_TYPE_TVSHOW
elif self._session_type == 'episode':
self._media_content_type = MEDIA_TYPE_TVSHOW
elif self._session_type == 'movie':
self._media_content_type = MEDIA_TYPE_VIDEO
elif self._session_type == 'track':
self._media_content_type = MEDIA_TYPE_MUSIC
else:
self._media_content_type = None
# title (movie name, tv episode name, music song name)
if self._session:
self._media_title = self._convert_na_to_none(self._session.title)
# Movies
if (self.media_content_type == MEDIA_TYPE_VIDEO and
self._convert_na_to_none(self._session.year) is not None):
self._media_title += ' (' + str(self._session.year) + ')'
# TV Show
if (self._is_player_active and
self._media_content_type is MEDIA_TYPE_TVSHOW):
# season number (00)
if callable(self._convert_na_to_none(self._session.seasons)):
self._media_season = self._convert_na_to_none(
self._session.seasons()[0].index).zfill(2)
elif self._convert_na_to_none(
self._session.parentIndex) is not None:
self._media_season = self._session.parentIndex.zfill(2)
else:
self._media_season = None
# show name
self._media_series_title = self._convert_na_to_none(
self._session.grandparentTitle)
# episode number (00)
if self._convert_na_to_none(
self._session.index) is not None:
self._media_episode = str(self._session.index).zfill(2)
else:
self._media_season = None
self._media_series_title = None
self._media_episode = None
# Music
if (self._is_player_active and
self._media_content_type == MEDIA_TYPE_MUSIC):
self._media_album_name = self._convert_na_to_none(
self._session.parentTitle)
self._media_album_artist = self._convert_na_to_none(
self._session.grandparentTitle)
self._media_track = self._convert_na_to_none(self._session.index)
self._media_artist = self._convert_na_to_none(
self._session.originalTitle)
# use album artist if track artist is missing
if self._media_artist is None:
_LOGGER.debug("Using album artist because track artist was "
"not found: %s", self.entity_id)
self._media_artist = self._media_album_artist
else:
self._media_album_name = None
self._media_album_artist = None
self._media_track = None
self._media_artist = None
# set app name to library name
if (self._session is not None
and self._session.librarySectionID is not None):
self._app_name = self._convert_na_to_none(
self._session.server.library.sectionByID(
self._session.librarySectionID).title)
else:
self._app_name = ''
# media image url
if self._session is not None:
thumb_url = self._get_thumbnail_url(self._session.thumb)
if (self.media_content_type is MEDIA_TYPE_TVSHOW
and not self.config.get(CONF_USE_EPISODE_ART)):
thumb_url = self._get_thumbnail_url(
self._session.grandparentThumb)
if thumb_url is None:
_LOGGER.debug("Using media art because media thumb "
"was not found: %s", self.entity_id)
thumb_url = self._get_thumbnail_url(self._session.art)
self._media_image_url = thumb_url
else:
self._media_image_url = None
def _get_thumbnail_url(self, property_value):
"""Return full URL (if exists) for a thumbnail property."""
if self._convert_na_to_none(property_value) is None:
return None
if self._session is None or self._session.server is None:
return None
url = self._session.server.url(property_value)
response = requests.get(url, verify=False)
if response and response.status_code == 200:
return url
def force_idle(self):
"""Force client to idle."""
self._app_name = ''
self._device = None
self._device_protocol_capabilities = None
self._is_player_active = False
self._is_player_available = False
self._make = ''
self._media_content_id = None
self._media_content_rating = None
self._media_content_type = None
self._media_duration = None
self._media_image_url = None
self._media_title = None
self._player_state = 'idle'
self._previous_volume_level = 1 # Used in fake muting
self._session = None
self._session_type = None
self._session_username = None
self._state = STATE_IDLE
self._volume_level = 1 # since we can't retrieve remotely
self._volume_muted = False # since we can't retrieve remotely
@property
def unique_id(self):
"""Return the id of this plex client."""
return '{}.{}'.format(self.__class__, self.machine_identifier or
self.name)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def machine_identifier(self):
"""Return the machine identifier of the device."""
return self._machine_identifier
@property
def app_name(self):
"""Return the library name of playing media."""
return self._app_name
@property
def device(self):
"""Return the device, if any."""
return self._device
@property
def session(self):
"""Return the session, if any."""
return self._session
@property
def state(self):
"""Return the state of the device."""
return self._state
def update(self):
"""Get the latest details."""
self.update_devices(no_throttle=True)
self.update_sessions(no_throttle=True)
# pylint: disable=no-self-use, singleton-comparison
def _convert_na_to_none(self, value):
"""Convert PlexAPI _NA() instances to None."""
# PlexAPI will return a "__NA__" object which can be compared to
# None, but isn't actually None - this converts it to a real None
# type so that lower layers don't think it's a URL and choke on it
if value is self.na_type:
return None
else:
return value
@property
def _active_media_plexapi_type(self):
"""Get the active media type required by PlexAPI commands."""
if self.media_content_type is MEDIA_TYPE_MUSIC:
return 'music'
else:
return 'video'
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self._media_content_id
@property
def media_content_type(self):
"""Content type of current playing media."""
if self._session_type == 'clip':
_LOGGER.debug("Clip content type detected, "
"compatibility may vary: %s", self.entity_id)
return MEDIA_TYPE_TVSHOW
elif self._session_type == 'episode':
return MEDIA_TYPE_TVSHOW
elif self._session_type == 'movie':
return MEDIA_TYPE_VIDEO
elif self._session_type == 'track':
return MEDIA_TYPE_MUSIC
else:
return None
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._media_artist
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._media_album_name
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
return self._media_album_artist
@property
def media_track(self):
"""Track number of current playing media, music track only."""
return self._media_track
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._media_duration
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._media_image_url
@property
def media_title(self):
"""Title of current playing media."""
return self._media_title
@property
def media_season(self):
"""Season of curent playing media (TV Show only)."""
return self._media_season
@property
def media_series_title(self):
"""The title of the series of current playing media (TV Show only)."""
return self._media_series_title
@property
def media_episode(self):
"""Episode of current playing media (TV Show only)."""
return self._media_episode
@property
def make(self):
"""The make of the device (ex. SHIELD Android TV)."""
return self._make
@property
def supported_features(self):
"""Flag media player features that are supported."""
if not self._is_player_active:
return None
# force show all controls
if self.config.get(CONF_SHOW_ALL_CONTROLS):
return (SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK |
SUPPORT_NEXT_TRACK | SUPPORT_STOP |
SUPPORT_VOLUME_SET | SUPPORT_PLAY |
SUPPORT_TURN_OFF | SUPPORT_VOLUME_MUTE)
# only show controls when we know what device is connecting
if not self._make:
return None
# no mute support
elif self.make.lower() == "shield android tv":
_LOGGER.debug(
"Shield Android TV client detected, disabling mute "
"controls: %s", self.entity_id)
return (SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK |
SUPPORT_NEXT_TRACK | SUPPORT_STOP |
SUPPORT_VOLUME_SET | SUPPORT_PLAY |
SUPPORT_TURN_OFF)
# Only supports play,pause,stop (and off which really is stop)
elif self.make.lower().startswith("tivo"):
_LOGGER.debug(
"Tivo client detected, only enabling pause, play, "
"stop, and off controls: %s", self.entity_id)
return (SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_STOP |
SUPPORT_TURN_OFF)
# Not all devices support playback functionality
# Playback includes volume, stop/play/pause, etc.
elif self.device and 'playback' in self._device_protocol_capabilities:
return (SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK |
SUPPORT_NEXT_TRACK | SUPPORT_STOP |
SUPPORT_VOLUME_SET | SUPPORT_PLAY |
SUPPORT_TURN_OFF | SUPPORT_VOLUME_MUTE)
else:
return None
def _local_client_control_fix(self):
"""Detect if local client and adjust url to allow control."""
if self.device is None:
return
# if this device's machineIdentifier matches an active client
# with a loopback address, the device must be local or casting
for client in self.device.server.clients():
if ("127.0.0.1" in client.baseurl and
client.machineIdentifier == self.device.machineIdentifier):
# point controls to server since that's where the
# playback is occuring
_LOGGER.debug(
"Local client detected, redirecting controls to "
"Plex server: %s", self.entity_id)
server_url = self.device.server.baseurl
client_url = self.device.baseurl
self.device.baseurl = "{}://{}:{}".format(
urlparse(client_url).scheme,
urlparse(server_url).hostname,
str(urlparse(client_url).port))
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
if self.device and 'playback' in self._device_protocol_capabilities:
self._local_client_control_fix()
self.device.setVolume(
int(volume * 100), self._active_media_plexapi_type)
self._volume_level = volume # store since we can't retrieve
@property
def volume_level(self):
"""Return the volume level of the client (0..1)."""
if (self._is_player_active and self.device and
'playback' in self._device_protocol_capabilities):
return self._volume_level
@property
def is_volume_muted(self):
"""Return boolean if volume is currently muted."""
if self._is_player_active and self.device:
return self._volume_muted
def mute_volume(self, mute):
"""Mute the volume.
Since we can't actually mute, we'll:
- On mute, store volume and set volume to 0
- On unmute, set volume to previously stored volume
"""
if not (self.device and
'playback' in self._device_protocol_capabilities):
return
self._volume_muted = mute
if mute:
self._previous_volume_level = self._volume_level
self.set_volume_level(0)
else:
self.set_volume_level(self._previous_volume_level)
def media_play(self):
"""Send play command."""
if self.device and 'playback' in self._device_protocol_capabilities:
self._local_client_control_fix()
self.device.play(self._active_media_plexapi_type)
def media_pause(self):
"""Send pause command."""
if self.device and 'playback' in self._device_protocol_capabilities:
self._local_client_control_fix()
self.device.pause(self._active_media_plexapi_type)
def media_stop(self):
"""Send stop command."""
if self.device and 'playback' in self._device_protocol_capabilities:
self._local_client_control_fix()
self.device.stop(self._active_media_plexapi_type)
def turn_off(self):
"""Turn the client off."""
# Fake it since we can't turn the client off
self.media_stop()
def media_next_track(self):
"""Send next track command."""
if self.device and 'playback' in self._device_protocol_capabilities:
self._local_client_control_fix()
self.device.skipNext(self._active_media_plexapi_type)
def media_previous_track(self):
"""Send previous track command."""
if self.device and 'playback' in self._device_protocol_capabilities:
self._local_client_control_fix()
self.device.skipPrevious(self._active_media_plexapi_type)
# pylint: disable=W0613
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
if not (self.device and
'playback' in self._device_protocol_capabilities):
return
src = json.loads(media_id)
media = None
if media_type == 'MUSIC':
media = self.device.server.library.section(
src['library_name']).get(src['artist_name']).album(
src['album_name']).get(src['track_name'])
elif media_type == 'EPISODE':
media = self._get_tv_media(
src['library_name'], src['show_name'],
src['season_number'], src['episode_number'])
elif media_type == 'PLAYLIST':
media = self.device.server.playlist(src['playlist_name'])
elif media_type == 'VIDEO':
media = self.device.server.library.section(
src['library_name']).get(src['video_name'])
import plexapi.playlist
if (media and media_type == 'EPISODE' and
isinstance(media, plexapi.playlist.Playlist)):
# delete episode playlist after being loaded into a play queue
self._client_play_media(media=media, delete=True,
shuffle=src['shuffle'])
elif media:
self._client_play_media(media=media, shuffle=src['shuffle'])
def _get_tv_media(self, library_name, show_name, season_number,
episode_number):
"""Find TV media and return a Plex media object."""
target_season = None
target_episode = None
show = self.device.server.library.section(library_name).get(
show_name)
if not season_number:
playlist_name = "{} - {} Episodes".format(
self.entity_id, show_name)
return self.device.server.createPlaylist(
playlist_name, show.episodes())
for season in show.seasons():
if int(season.seasonNumber) == int(season_number):
target_season = season
break
if target_season is None:
_LOGGER.error("Season not found: %s\\%s - S%sE%s", library_name,
show_name,
str(season_number).zfill(2),
str(episode_number).zfill(2))
else:
if not episode_number:
playlist_name = "{} - {} Season {} Episodes".format(
self.entity_id, show_name, str(season_number))
return self.device.server.createPlaylist(
playlist_name, target_season.episodes())
for episode in target_season.episodes():
if int(episode.index) == int(episode_number):
target_episode = episode
break
if target_episode is None:
_LOGGER.error("Episode not found: %s\\%s - S%sE%s",
library_name, show_name,
str(season_number).zfill(2),
str(episode_number).zfill(2))
return target_episode
def _client_play_media(self, media, delete=False, **params):
"""Instruct Plex client to play a piece of media."""
if not (self.device and
'playback' in self._device_protocol_capabilities):
_LOGGER.error("Client cannot play media: %s", self.entity_id)
return
import plexapi.playqueue
playqueue = plexapi.playqueue.PlayQueue.create(
self.device.server, media, **params)
# Delete dynamic playlists used to build playqueue (ex. play tv season)
if delete:
media.delete()
self._local_client_control_fix()
server_url = self.device.server.baseurl.split(':')
self.device.sendCommand('playback/playMedia', **dict({
'machineIdentifier': self.device.server.machineIdentifier,
'address': server_url[1].strip('/'),
'port': server_url[-1],
'key': media.key,
'containerKey':
'/playQueues/{}?window=100&own=1'.format(
playqueue.playQueueID),
}, **params))
@property
def device_state_attributes(self):
"""Return the scene state attributes."""
attr = {}
attr['media_content_rating'] = self._media_content_rating
attr['session_username'] = self._session_username
attr['media_library_name'] = self._app_name
return attr
|
[
"[email protected]"
] | |
ac3766b055900a07ace3f3a8bd1dbf254696aa9f
|
afa2996d12a3ee96a8f969db85d19e76044a6738
|
/Greedy/16 Goodland Electricity
|
f340e2781eb2f6a8525b6d74e6cebbc14e46d18d
|
[
"MIT"
] |
permissive
|
raj713335/HackerRank_Algorithms
|
08a37d44ff5d38be007465d88e3807df9a745505
|
7d42364ce59d2755b436f607166c37edcf093904
|
refs/heads/main
| 2023-08-22T21:26:45.611471 | 2021-10-09T21:03:40 | 2021-10-09T21:03:40 | 374,290,147 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,226 |
#!/bin/python3
"""https://www.hackerrank.com/challenges/pylons/problem"""
import math
import os
import random
import re
import sys
#
# Complete the 'pylons' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER k
# 2. INTEGER_ARRAY arr
#
def pylons(k, arr):
# Write your code here
i=0
count=0
last_power_plant=-1
while(i<len(arr)):
i+=k-1
if i>=len(arr):
i=len(arr)-1
print(i)
while (i>last_power_plant):
if arr[i]==1:
count+=1
last_power_plant=i
break
else:
i-=1
if (i==last_power_plant):
return -1
i+=k
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
k = int(first_multiple_input[1])
arr = list(map(int, input().rstrip().split()))
result = pylons(k, arr)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"[email protected]"
] | ||
2592cd0cd2bea747a58634eb2386b2e46bdca291
|
a0015a3dc121c8fcdd5d2eadd522ece03b4ceec8
|
/docs/cornell CS class/Lesson 29. Coroutines/demos/read2.py
|
fe3ecc58b16f7d80b45c890599a931e740dcc82a
|
[
"MIT"
] |
permissive
|
LizzieDeng/kalman_fliter_analysis
|
fc40d475797dbddba5f9f2dfb224fbf68d77865f
|
50e728f32c496c3fcbb8ca3ee00857b999b88d99
|
refs/heads/main
| 2023-03-03T02:46:19.020078 | 2021-02-05T07:53:10 | 2021-02-05T07:53:10 | 329,243,328 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,137 |
py
|
"""
A module to show off a long-running function as a coroutine.
This module shows another advantage of a coroutine. We can
interleave two functions as they load from a file. There are
reasons we might want to do this when working with large data,
but they are beyond the scope of this course.
Author: Walker M. White
Date: November 2, 2020
"""
def merge(dict1,dict2):
"""
Returns a new dictionary merging (joining keys) dict1
and dict2.
If a key appears in only one of dict1 or dict2, the
value is the value from that dictionary. If it is in
both, the value is the sum of values.
Example: merge({'a':1,'b':2},{'b':3,'c':4}) returns
{'a':1,'b':5,'c':4}
Parameter dict1: The first dictionary to merge
Precondition: dict1 a dictionary with int or float values
Parameter dict2: The second dictionary to merge
Precondition: dict2 a dictionary with int or float values
"""
result = dict(dict1) # Makes a (shallow) copy
for k in dict2:
if k in dict1:
result[k] = result[k]+1
else:
result[k] = 1
return result
def add_word(word,counts):
"""
Adds a word to a word-count dictionary.
The keys of the dictionaries are strings, and the values
are integers. If the word is already in the dictionary,
adding it will increase the value by 1. Otherwise it
will add the key and assign it a value for 1.
Example: If count = ['a':1,'b':1}, add_word('a',count)
alters count to be {'a':2,'b':1}
Parameter word: The word to add
Precondition: word is a string
Parameter counts: The word-count dictionary
Precondition: count is a dictionary with string keys
and integer values
"""
if word in counts:
counts[word] = counts[word]+1
else:
counts[word] = 1
def wordcount(fname):
"""
Returns a dictionary with the individual word count of
fname
The is function opens the specified text file and creates
a dictionary from it. The keys of the dictionaries are
words (i.e. adjacent letters with no spaces or
punctuation). For example, in the string 'Who are you?',
the words are 'Who', 'are', and 'you'. The values are
the number of times that word (paying attention to
capitalization) appears in the file.
This function is a generator-based coroutine that stops
at every 10% of the file to return its amount of progress
to the original caller (the function that calls next()).
Parameter fname: The file name
Precondition: fname is a string and the name of a text
file
"""
# Load the entire file into a single string
file = open(fname)
text = file.read()
file.close()
counts = {}
word = '' # Accumulator to build a word
for pos in range(len(text)):
# Yield every 10%
if pos % (len(text)//10) == 0:
# Indicate the amount of progress we made
yield round(100*pos/len(text))
# Build up the word, one letter at a time
x = text[pos]
if x.isalpha():
word = word+x
else: # Word ends
# Add it if not empty
if word != '':
add_word(word,counts)
word = '' # Reset the accumulator
# Add the last word
if word != '':
add_word(word,counts)
return counts
def loadfiles(fname1,fname2):
"""
Creates a word-count dictionary for fname1, fname2 and
prints the combined size
The size of the word-count dictionary is the number of
distinct words in the file.
This function is the parent of wordcount, pushing it
forward with the next() function until it is done
reading the file. This function creates two wordcount
coroutines and interleaves them.
Parameter fname1: The first file name
Precondition: fname1 is a string and the name of a text file
Parameter fname2: The second file name
Precondition: fname2 is a string and the name of a text file
"""
loader1 = wordcount(fname1)
loader2 = wordcount(fname2)
result = {}
# We keep going as long as either loader is working
while (not loader1 is None) or (not loader2 is None):
# Load the next batch from fname1
if not loader1 is None:
try:
amount = next(loader1)
print('Loaded '+str(amount)+'% of '+repr(fname1))
except StopIteration as e:
result = merge(result,e.args[0]) # Access the return value
loader1 = None # We are done
# Load the next batch from fname2
if not loader2 is None:
try:
amount = next(loader2)
print('Loaded '+str(amount)+'% of '+repr(fname2))
except StopIteration as e:
result = merge(result,e.args[0]) # Access the return value
loader2 = None # We are done
print('Read a total of '+str(len(result))+' words.')
if __name__ == '__main__':
loadfiles('warpeace10.txt','kingjames10.txt')
|
[
"[email protected]"
] | |
8f90747f878c89576c75229672a53f42de5d6cc6
|
3e74384cda370ae442bc020648c596ce90e7f1cf
|
/flask_appbuilder/tests/test_security_permissions.py
|
16c13f2fad47f94ff633805e126483da9af60f23
|
[
"BSD-3-Clause"
] |
permissive
|
thesuperzapper/Flask-AppBuilder
|
846b34bb32768bd4f821fb4e8b73a09c8ba0d414
|
357c8100e96034c736860737a41c2004aa4e993b
|
refs/heads/master
| 2022-06-23T05:06:17.059006 | 2022-05-25T10:16:23 | 2022-05-25T10:16:23 | 251,502,746 | 0 | 1 |
BSD-3-Clause
| 2020-03-31T04:46:27 | 2020-03-31T04:46:26 | null |
UTF-8
|
Python
| false | false | 4,942 |
py
|
from flask_appbuilder import SQLA
from flask_appbuilder.tests.base import FABTestCase
from flask_login import AnonymousUserMixin
class SecurityPermissionsTestCase(FABTestCase):
def setUp(self):
from flask import Flask
from flask_appbuilder import AppBuilder
self.app = Flask(__name__)
self.app.config.from_object("flask_appbuilder.tests.config_security")
self.app.config["FAB_ADD_SECURITY_VIEWS"] = False
self.db = SQLA(self.app)
self.appbuilder = AppBuilder(self.app, self.db.session)
self._db_role_1 = self.appbuilder.sm.add_role("DB_ROLE1")
self._pvm1 = self.appbuilder.sm.add_permission_view_menu(
"can_show", "ModelDBView"
)
self._pvm2 = self.appbuilder.sm.add_permission_view_menu(
"can_delete", "ModelDBView"
)
self.appbuilder.sm.add_permission_role(self._db_role_1, self._pvm1)
self.appbuilder.sm.add_permission_role(self._db_role_1, self._pvm2)
# Insert test data
self._user01 = self.create_user(
self.appbuilder,
"user1",
"password1",
"",
first_name="user01",
last_name="user",
email="[email protected]",
role_names=["FAB_ROLE1", "DB_ROLE1"],
)
self._user02 = self.create_user(
self.appbuilder,
"user2",
"password1",
"",
first_name="user02",
last_name="user",
email="[email protected]",
role_names=["DB_ROLE1"],
)
self._user03 = self.create_user(
self.appbuilder,
"user3",
"password1",
"",
first_name="user03",
last_name="user",
email="[email protected]",
role_names=["FAB_ROLE2"],
)
self._user04 = self.create_user(
self.appbuilder,
"user4",
"password1",
"",
first_name="user04",
last_name="user",
email="[email protected]",
role_names=["FAB_ROLE1", "FAB_ROLE2"],
)
def tearDown(self):
self.appbuilder.get_session.delete(self._user01)
self.appbuilder.get_session.delete(self._user02)
self.appbuilder.get_session.delete(self._user03)
self.appbuilder.get_session.delete(self._user04)
self.appbuilder.get_session.delete(self._pvm1)
self.appbuilder.get_session.delete(self._pvm2)
self.appbuilder.get_session.delete(self._db_role_1)
self.appbuilder.get_session.commit()
def test_get_user_permissions_mixed(self):
"""
Security Permissions: Get user permissions mixes role types
"""
assert {
("can_list", "Model1View"),
("can_list", "Model2View"),
("can_show", "ModelDBView"),
("can_delete", "ModelDBView"),
} == self.appbuilder.sm.get_user_permissions(self._user01)
def test_get_user_permissions_db(self):
"""
Security Permissions: Get user permissions DB role type
"""
assert {
("can_delete", "ModelDBView"),
("can_show", "ModelDBView"),
} == self.appbuilder.sm.get_user_permissions(self._user02)
def test_get_user_permissions_builtin(self):
"""
Security Permissions: Get user permissions builtin role type
"""
assert {
("can_list", "Model3View"),
("can_list", "Model4View"),
} == self.appbuilder.sm.get_user_permissions(self._user03)
def test_get_user_permissions_builtin_multiple(self):
"""
Security Permissions: Get user permissions multiple builtin role type
"""
assert {
("can_list", "Model2View"),
("can_list", "Model1View"),
("can_list", "Model3View"),
("can_list", "Model4View"),
} == self.appbuilder.sm.get_user_permissions(self._user04)
def test_get_anonymous_permissions(self):
"""
Security Permissions: Get anonymous user permissions
"""
assert set() == self.appbuilder.sm.get_user_permissions(AnonymousUserMixin())
def test_get_role_permissions_builtin(self):
"""
Security Permissions: Get role permissions builtin
"""
role = self.appbuilder.sm.find_role("FAB_ROLE1")
assert {
("can_list", "Model2View"),
("can_list", "Model1View"),
} == self.appbuilder.sm.get_role_permissions(role)
def test_get_role_permissions_db(self):
"""
Security Permissions: Get role permissions db
"""
role = self.appbuilder.sm.find_role("DB_ROLE1")
assert {
("can_show", "ModelDBView"),
("can_delete", "ModelDBView"),
} == self.appbuilder.sm.get_role_permissions(role)
|
[
"[email protected]"
] | |
d50eab1307396bc0e84d299a1bc905d2e2726013
|
1d09a4459ba64f92f75048fecd9e38b10f9e9eda
|
/单词数量.py
|
bf99cf89120678d78ca7e8d6827b8a68c66b4cd2
|
[] |
no_license
|
kanghanjun/python
|
e0e3edace5a1c507f5041bbb880730ed94ae25b5
|
205abf5fecce2eea3c1478cb275d7c35bd2ba271
|
refs/heads/master
| 2021-05-14T16:08:29.107368 | 2018-01-02T13:20:56 | 2018-01-02T13:20:56 | 116,013,931 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 446 |
py
|
# 初始化 处理 输入的数据
inputstr = input()
inputstr = inputstr.lower()
lst = (inputstr).split(' ')
#使用字典存储,出现的字母的次数
dit = {}
max = 0
maxkey = ""
for item in lst:
if (dit.get(item, 0)):
dit[item] = dit[item] + 1
else:
dit[item] = 1
#每次判断寻找最大值和字母
if (dit[item] > max):
max = dit[item]
maxkey = item
print(maxkey)
|
[
"[email protected]"
] | |
4454142f9c4e8872c8f957ac0c309aecc8018b55
|
206850b00a70e56cbb16f128778479c289f81a66
|
/blog/migrations/0001_initial.py
|
a0494f61a8f1b6215120dd4b246a61674d42c55e
|
[] |
no_license
|
xsddota/xiao
|
d00a63a69eddf4bfcc9456f06378b0d54294c36f
|
23b5d231927f0fbcb7827df3d4d8f5ee85ad7140
|
refs/heads/master
| 2020-03-26T05:41:03.715372 | 2018-08-13T12:02:36 | 2018-08-13T12:02:36 | 131,020,630 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 656 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-27 14:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BlogPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150)),
('body', models.TextField()),
('timestamp', models.DateTimeField()),
],
),
]
|
[
"[email protected]"
] | |
c11e4f3e37a3b5b7c7c5195b4b6bb40bbf957818
|
f121f894eacd4632869be7df343c7d63e319ab03
|
/UCSB_Python_Mods/rmsd.py
|
9796eacf02d7333f858d7a53f6414051c6e1b6e8
|
[] |
no_license
|
jeff-wang/JmsScripts
|
955bc8436c0092be45c725c818a757cdb6745fd5
|
5cec8112637be7a19c4aac893f612aa8c354b733
|
refs/heads/master
| 2023-06-01T12:34:33.233127 | 2020-01-09T01:31:46 | 2020-01-09T01:31:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 25,179 |
py
|
#!/usr/bin/env python
#LAST MODIFIED: 12-19-08
Usage = """Calculates rmsd between pdb files.
Usage : rmsd.py [OPTIONS] PDBFILE1 PDBFILE2 ...
OR: rmsd.py [OPTIONS] --traj [--avg=n] [--frac=X] REFPDBFILE TRJFILE PRMTOPTILE
OPTIONS : "--first" will only compare the first to all other pdb files
"--align" to align the sequences before rmsd comparison
"--compres='1,3-5'" to use residues 1 and 3-5 to minimize RMSD
"--calcres='1-5,9'" to calculate rmsd only for residues 1-5 and 9
"--frac=X" to compute fraction of structures within X angstroms
"--nskip=X" number of configs in trajectory to skip (default is 0)
"--nread=X" number of configs in trajectory to read; -1 is all (default -1)
"--nstride=X" read configs every nstride frames (default is 1)
"""
#check for instructions
import sys
if __name__ == "__main__" and len(sys.argv) == 1:
print Usage
sys.exit()
from numpy import *
import copy, os, coords, random
import geometry, sequence, protein, scripttools
def RMSD(Pos1, Pos2, Align = False, Center = True,
CompInd = None, CalcInd = None, Verbose = False,
RetAlignment = False):
"""Calculates the RMSD between two conformations.
* Pos1: array of dimensions [N,3] for conformation 1
* Pos2: array of dimensions [N,3] for conformation 2
* Align: True = modify Pos2 to be aligned to Pos1
(default is False)
* CompInd: indices in [0,N) for positions in Pos to perform alignment
* CalcInd: indices in [0,N) for positions in Pos to compute RMSD
* Verbose: true to report shape/size mismatches
* RetAlignment: True to return the translation vectors and rotation matrix"""
#clean indices
AllInd = arange(len(Pos1), dtype = int)
if all(CompInd == AllInd): CompInd = None
if all(CalcInd == AllInd): CalcInd = None
#get indices
if CompInd is None:
p1, p2, n = Pos1, Pos2, len(Pos1)
else:
p1, p2, n = Pos1.take(CompInd, axis=0), Pos2.take(CompInd, axis=0), len(CompInd)
#check for correct shapes
if not shape(p1) == shape(p2):
if Verbose: print "Position vectors are not the same size."
return
elif not len(shape(p1)) == 2:
if Verbose: print "Position vectors are not the correct rank."
return
#get alignment
Pos1Vec, Pos2Vec, RotMat, Resid = geometry.AlignmentRMSD(p1, p2, Center = Center)
#compute rmsd
if not all(CompInd == CalcInd):
if CalcInd is None:
p1, p2, n = Pos1, Pos2, len(Pos1)
else:
p1, p2, n = Pos1.take(CalcInd, axis=0), Pos2.take(CalcInd, axis=0), len(CalcInd)
p1 = p1 + Pos1Vec
p2 = dot(p2 + Pos2Vec, RotMat)
Resid = sum((p1 - p2)**2, axis=None)
r = sqrt(Resid / float(n))
#align Pos2 to Pos1
if Align: Pos2[:,:] = dot(Pos2 + Pos2Vec, RotMat) - Pos1Vec
if RetAlignment:
return r, Pos1Vec, Pos2Vec, RotMat
else:
return r
def GetProteinClassMasks(p, AtomMask = None, CompResInd = None,
CalcResInd = None):
if AtomMask is None:
AtomInd = array(range(len(p.Atoms)), int)
else:
AtomInd = p.AtomInd(AtomName = AtomMask)
CompInd, CalcInd = None, None
if not CompResInd is None:
CompInd = p.AtomInd(AtomName = AtomMask, ResNum = CompResInd)
CompInd = [i for (i,j) in enumerate(AtomInd) if j in CompInd]
if not CalcResInd is None:
CalcInd = p.AtomInd(AtomName = AtomMask, ResNum = CalcResInd)
CalcInd = [i for (i,j) in enumerate(AtomInd) if j in CalcInd]
return AtomInd, CompInd, CalcInd
def GetProteinClassMasks2(p1, p2, AtomMask = None,
CompResInd = None, CalcResInd = None):
AtomInd1, AtomInd2, CompInd, CalcInd = [], [], [], []
count = 0
for (i, a1) in enumerate(p1.Atoms):
ResNum = p1.AtomResNum[i]
InComp, InCalc = True, True
if not CompResInd is None: InComp = ResNum in CompResInd
if not CalcResInd is None: InCalc = ResNum in CalcResInd
j = p2.AtomNum(ResNum, a1.Name, NotFoundError = False)
if j >= 0:
AtomInd1.append(i)
AtomInd2.append(j)
if InComp: CompInd.append(count)
if InCalc: CalcInd.append(count)
count += 1
AtomInd1 = array(AtomInd1, int)
AtomInd2 = array(AtomInd2, int)
CompInd = array(CompInd, int)
CalcInd = array(CalcInd, int)
return AtomInd1, AtomInd2, CompInd, CalcInd
def GetCoordsObjMasks(c, AtomMask = None, CompResInd = None,
CalcResInd = None):
if AtomMask is None:
AtomInd = array(range(len(c.AtomNames)), int)
else:
AtomInd = [i for (i, an) in enumerate(c.AtomNames)
if an.strip() in AtomMask]
AtomInd = array(AtomInd, int)
CompInd, CalcInd = None, None
if not CompResInd is None:
CompInd = [i for (i, an) in enumerate(c.AtomNames)
if an.strip() in AtomMask and c.AtomRes[i] in CompResInd]
CompInd = [i for (i,j) in enumerate(AtomInd) if j in CompInd]
if not CalcResInd is None:
CalcInd = [i for (i, an) in enumerate(c.AtomNames)
if an.strip() in AtomMask and c.AtomRes[i] in CalcResInd]
CalcInd = [i for (i,j) in enumerate(AtomInd) if j in CalcInd]
return AtomInd, CompInd, CalcInd
def RMSDProteinClass(p1, p2, Center = True, Backbone = True, AlignSeq = False,
CompResInd = None, CalcResInd = None, UpdateBFactors = False,
AlignAtoms = False):
"Returns (RMSD,NRes)"
#align the sequences
p2orig = p2
if AlignSeq:
Map = sequence.SeqMapClass(p1.Seq, p2.Seq)
p1 = p1[Map.a:Map.b]
p2 = p2[Map.c:Map.d]
#filter res indices
n = min(len(p1), len(p2))
if not CompResInd is None: CompResInd = [i for i in CompResInd if i < n]
if not CalcResInd is None: CalcResInd = [i for i in CalcResInd if i < n]
#get the right atoms
if Backbone:
AtomMask = ["N", "CA", "C"]
else:
AtomMask = None
if AlignAtoms:
AtomInd1, AtomInd2, CompInd, CalcInd = GetProteinClassMasks2(p1, p2,
AtomMask = AtomMask, CompResInd = CompResInd, CalcResInd = CalcResInd)
else:
AtomInd1, CompInd, CalcInd = GetProteinClassMasks(p1, AtomMask = AtomMask,
CompResInd = CompResInd,
CalcResInd = CalcResInd)
AtomInd2, CompInd, CalcInd = GetProteinClassMasks(p2, AtomMask = AtomMask,
CompResInd = CompResInd,
CalcResInd = CalcResInd)
#filter positions
Pos1, Pos2 = p1.Pos.take(AtomInd1, axis=0), p2.Pos.take(AtomInd2, axis=0).copy()
#compute rmsd
r = RMSD(Pos1, Pos2, Align = True, Center = Center,
CompInd = CompInd, CalcInd = CalcInd)
#determine num of residues used in rmsd calculation
if CalcResInd is None:
NRes = len(p1)
else:
NRes = len(CalcResInd)
#see if we need to update bfactors
if UpdateBFactors:
if CalcInd is None: CalcInd = range(len(AtomInd2))
Shift = p2orig.Res[Map.c].StartAtom
for i in CalcInd:
an = AtomInd2[i] + Shift
p2orig.Atoms[an].BFactor = sqrt(sum((Pos1[i] - Pos2[i])**2))
return r, NRes
def RMSDPdb(PdbFile1, PdbFile2, Center = True, Backbone = True,
AlignSeq = False, CompResInd = None, CalcResInd = None):
p1 = protein.ProteinClass(Pdb = PdbFile1)
p2 = protein.ProteinClass(Pdb = PdbFile2)
return RMSDProteinClass(p1, p2, Center = Center, Backbone = Backbone,
AlignSeq = AlignSeq, CompResInd = CompResInd,
CalcResInd = CalcResInd)
def ClusterMSS(CoordsObj, Cutoff, MaxIter = 3, MaxCluster = None,
MaxClusterWork = None, Method = 0, CompInd = None, CalcInd = None,
Weights = None, Verbose = True, IterMaxCluster = False,
IterNormalize = False):
"""Clusters conformations in a trajectory based on RMSD distance.
* CoordsObj: an object exposing the functions GetNextCoords() which
returns an array object of the next set of coordinates (or None
if at the end) and has optional list variable Mask, and
Reset() which moves back to the first set of coordinates
* Cutoff: maximum RMSD distance of a configuration to a cluster
* MaxIter: maximum number of iterations to perform
* MaxCluster: maximum number of clusters; negative values will force
all configs to be members of a cluster (default is none)
* MaxClusterWork: maximum numeber of working clusters
* Method: 0 to cluster such that each RMSD between a configuration
and the average cluster configuration is below Cutoff; 1 is
same except no alignment is performed
* CompInd: indices in [0,N) for positions in Pos to perform alignment
* CalcInd: indices in [0,N) for positions in Pos to compute RMSD
* Weights: weighting factor for each conformation
* IterMaxCluster: True will dump all but MaxCluster configs each iter
* IterNormalize: True will dump previous iter contribs to centroids
"""
def BasicRMSD(Pos1, Pos2, Ind = None):
"Calculates the rmsd between two configurations without alignment."
if Ind is None:
rmsdsq = sum((Pos1-Pos2)**2) / float(size(Pos1,0))
else:
rmsdqs = sum((Pos1[Ind]-Pos2[Ind])**2) / float(size(Pos1[Ind],0))
rmsdsq = max([rmsdsq,0.])
return sqrt(rmsdsq)
Iteration = 0 #iteration number
WeightSum = [] #total weights of clusters
PosSum = [] #list of cluster configuration arrays
FinalIters = 0 #number of iterations without additions/deletions of clusters
NCoord = len(CoordsObj)
if Weights is None: Weights = ones(NCoord, float)
Weights = array(Weights, float)
if not len(Weights) == NCoord:
raise IndexError, "Incorrect number of array elements in Weights."
#filter weights for too low values
Weights = Weights.copy()
Weights = Weights / Weights.max()
Weights[Weights < 1.e-100] = 0.
NFrameTot = int(sum(Weights > 0))
StartInd, NewStartInd = 0, -1
print "Using %d conformations in %d trajectory frames." % (NFrameTot, len(CoordsObj))
while FinalIters < 2:
Iteration += 1
FinalIters += 1
if Iteration > MaxIter:
if Verbose: print "Did not converge within maximum number of iterations"
break
if Verbose: print "Cluster iteration %d" % Iteration
if Verbose: print "Starting with %d clusters" % len(PosSum)
CoordsObj.Reset()
ClustNum = zeros(NCoord, int) #cluster number of each configuration, starting at 1
NAddThis = [0]*len(PosSum) #number of configs added to each cluster this iteration
ThisFrame = 0
PosSumThis = copy.deepcopy(PosSum)
WeightSumThis = copy.deepcopy(WeightSum)
#check where to start
if NewStartInd >= 0: StartInd = NewStartInd
NewStartInd = -1
for CurInd in range(StartInd, NCoord) + range(0, StartInd):
CurPos = CoordsObj[CurInd]
#check for zero weight
CurWeight = Weights[CurInd]
if CurWeight == 0.:
ClustNum[CurInd] = 0
continue
ThisFrame += 1
ind = -1 #cluster number assigned to this config; -1 means none
#calculate the rmsd between this configuration and each cluster config,
#but stop when a rmsd is found which is below the cutoff
minRMSD = 1.e300
for (i, PosSumi) in enumerate(PosSum):
ThisPos = PosSumi / WeightSum[i]
if Method == 0:
#rmsd between new config and average cluster config
r = RMSD(ThisPos, CurPos, Align = True, Center = True,
CompInd = CompInd, CalcInd = CalcInd)
else:
#rmsd between new config and average cluster config, without alignment
r = BasicRMSD(ThisPos, CurPos, Ind = CalcInd)
minRMSD = min(minRMSD, r)
if r < Cutoff:
#go with a cluster if rmsd is within the cutoff
ind = i
break
if ind >= 0:
#add the configuration to the cluster
PosSum[ind] = PosSum[ind] + CurPos * CurWeight
WeightSum[ind] = WeightSum[ind] + CurWeight
NAddThis[ind] = NAddThis[ind] + 1
ClustNum[CurInd] = ind+1
elif len(PosSum) < MaxClusterWork or MaxClusterWork is None:
#create a new cluster with this config, as long as it
#doesn't exceed the maximum number of working clusters
if minRMSD == 1.e300: minRMSD = 0.
if Verbose: print "Adding cluster: config %d (%d/%d) | min RMSD %.1f | %d clusters tot" % (CoordsObj.Index+1,
ThisFrame, NFrameTot, minRMSD, len(PosSum)+1)
PosSum.append(CurPos * CurWeight)
WeightSum.append(CurWeight)
NAddThis.append(1)
ClustNum[CurInd] = len(PosSum)
FinalIters = 0
else:
#cluster is nothing
ClustNum[CurInd] = 0
FinalIters = 0
if NewStartInd < 0:
NewStartInd = CurInd
if Verbose: print "Ran out of clusters. Next iteration starting from config %d" % (CoordsObj.Index+1,)
#remove contribution to centroids from all but this round
if IterNormalize:
for i in range(len(PosSumThis)):
PosSum[i] = PosSum[i] - PosSumThis[i]
WeightSum[i] = WeightSum[i] - WeightSumThis[i]
del PosSumThis
del WeightSumThis
#loop through clusters
i = 0
while i < len(PosSum):
#remove clusters that have no additions this iteration
if NAddThis[i] == 0:
if Verbose: print "Removing cluster %d" % (i+1,)
del PosSum[i]
del WeightSum[i]
del NAddThis[i]
for (k, cn) in enumerate(ClustNum):
if cn > i + 1:
ClustNum[k] -= 1
elif cn == i + 1:
ClustNum[k] = -1
FinalIters = 0
else:
i += 1
#sort clusters and then remove any beyond MaxCluster
ClustNum = array(ClustNum, int)
PosSum, ClustNum, WeightSum, NAddThis = __SortClust(PosSum, ClustNum, Weights, WeightSum, NAddThis, Verbose)
#crop off any extraneous clusters; clusterless configs
#are assigned a cluster index of 0
if IterMaxCluster and not MaxCluster is None and len(PosSum) > abs(MaxCluster):
del PosSum[abs(MaxCluster):]
WeightSum = WeightSum[:abs(MaxCluster)]
NAddThis = NAddThis[:abs(MaxCluster)]
ClustNum[abs(ClustNum) > abs(MaxCluster)] = 0
#crop off any extraneous clusters; clusterless configs
#are assigned a cluster index of 0
if not IterMaxCluster and not MaxCluster is None and len(PosSum) > abs(MaxCluster):
del PosSum[abs(MaxCluster):]
WeightSum = WeightSum[:abs(MaxCluster)]
NAddThis = NAddThis[:abs(MaxCluster)]
ClustNum[abs(ClustNum) > abs(MaxCluster)] = 0
#finalize things
if Verbose: print "Calculating average structures"
Pos = [x / y for (x, y) in zip(PosSum, WeightSum)]
del PosSum
del WeightSum
#get cluster populations
ClustWeights, ClustPop = __CalcClustPop(Pos, ClustNum, Weights)
#if there is a maximum cluster specification that's negative, force
#everything to the closest cluster
if not MaxCluster == None and MaxCluster < 0:
ClustWeights, ClustPop, Pos, ClustNum = __CalcForceClust(CoordsObj, ClustWeights, ClustPop, Pos, ClustNum, Weights,
CompInd, CalcInd, Verbose)
#calculate final rmsd values for configs and clusters
Pos, ClustNum, ConfRmsd, ClustRmsd = __CalcRmsd(CoordsObj, Pos, ClustNum, CompInd, CalcInd, Verbose)
if Verbose: print "%d configurations sorted into %d clusters" % (NFrameTot, len(Pos))
return Pos, ClustNum, ClustWeights, ClustPop, ConfRmsd, ClustRmsd
def __SortClust(Pos, ClustNum, Weights, WeightSum, NAddThis, Verbose = True):
if Verbose: print "Reordering clusters by population"
Sums = [(sum(Weights[abs(ClustNum) == i+1]), i) for i in range(len(Pos))]
Sums.sort()
Sums.reverse()
#resort the arrays
Pos = [Pos[j] for (i,j) in Sums]
WeightSum = [WeightSum[j] for (i,j) in Sums]
NAddThis = [NAddThis[j] for (i,j) in Sums]
#create a dictionary which will tell us the new cluster
#number for a given old cluster number
Trans = {0:0}
for i in range(len(Pos)):
ind = Sums[i][1] + 1
Trans[ind] = i + 1
Trans[-ind] = -i - 1
#update ClustNum with the rearranged cluster numbers
ClustNum = array([Trans[i] for i in ClustNum], int)
return Pos, ClustNum, WeightSum, NAddThis
def __CalcClustPop(Pos, ClustNum, Weights):
#update the cluster population
ClustWeights = array([sum(Weights[abs(ClustNum) == i+1]) for i in range(len(Pos))], float)
#update the populations
ClustPop = [float(sum(abs(ClustNum) == i+1)) for i in range(len(Pos))]
ClustPop = array(ClustPop, float)
return ClustWeights, ClustPop
def __CalcForceClust(CoordsObj, ClustWeights, ClustPop, Pos, ClustNum, Weights,
CompInd = None, CalcInd = None, Verbose = True):
#count the number of clusterless configurations
c = sum(ClustNum == 0)
if Verbose: print "Forcing %d extraneous configurations to existing clusters" % c
#find the nearest cluster to each clusterless config and assign it
CoordsObj.Reset()
for (j, CurPos) in enumerate(CoordsObj):
if ClustNum[j] == 0:
ind = -1
minr = 0.
for (i, Posi) in enumerate(Pos):
r = RMSD(Posi, CurPos, Align = False, Center = True,
CompInd = CompInd, CalcInd = CalcInd)
if r < minr or ind < 0:
ind = i
minr = r
ClustNum[j] = ind + 1
ClustWeights[ind] = ClustWeights[ind] + Weights[j]
ClustPop[ind] = ClustPop[ind] + 1.
return ClustWeights, ClustPop, Pos, ClustNum
def __CalcRmsd(CoordsObj, Pos, ClustNum,
CompInd = None, CalcInd = None, Verbose = True):
if Verbose: print "Calculating cluster rmsd values"
#calculate the pairwise cluster rmsd values
ClustRmsd = zeros((len(Pos), len(Pos)),float)
for (i, Posi) in enumerate(Pos):
for (j, Posj) in enumerate(Pos):
if j <= i: continue
ClustRmsd[i,j] = RMSD(Posi, Posj, Align=False, Center=True,
CompInd = CompInd, CalcInd = CalcInd)
ClustRmsd[j,i] = ClustRmsd[i,j]
if Verbose: print "Calculating final rmsd values"
#loop through configs and find the one with the lowest
#rmsd in each cluster
ConfRmsd = -1. * ones(len(ClustNum), float)
MinRmsd = [-1]*len(Pos)
CoordsObj.Reset()
for (CurInd, CurPos) in enumerate(CoordsObj):
i = abs(ClustNum[CurInd]) - 1
if i >= 0:
ConfRmsd[CurInd] = RMSD(Pos[i], CurPos, Align=False, Center=True,
CompInd = CompInd, CalcInd = CalcInd)
if MinRmsd[i] < 0:
MinRmsd[i] = CurInd
elif ConfRmsd[MinRmsd[i]] > ConfRmsd[CurInd]:
MinRmsd[i] = CurInd
#loop through the configs again and extract the
#coords of the minimum-rmsd configs for each clust
if Verbose: print "Finding nearest cluster structures"
for (i, ind) in enumerate(MinRmsd):
ClustNum[ind] = -ClustNum[ind]
Pos[i] = CoordsObj.Get(ind, coords.NoMask)
return Pos, ClustNum, ConfRmsd, ClustRmsd
def SaveClustResults(Pos, ClustNum, ClustWeights, ClustPop, ConfRmsd, ClustRmsd,
Prefix = "clust", ConfIndices = None, Verbose = False):
#make the indices
if ConfIndices is None:
ConfIndices = range(0, len(ConfRmsd))
#calculate the percent
x = sum(ClustWeights)
ClustPct = [100.*w / x for w in ClustWeights]
#save results file
s = "CLUSTER POPULATION:\nCluster number, Number of configs, Percent\n"
s += "\n".join(["%-5d %-7d %.2f" % (i+1, ClustPop[i], ClustPct[i])
for i in range(0,len(Pos))])
s += "\n\nCLUSTER-TO-CLUSTER RMSD:\nCluster number, Cluster number, RMSD\n"
for i in range(0,len(Pos)):
for j in range(i+1,len(Pos)):
s += "%-5d %-5d %-8.2f\n" % (i+1, j+1, ClustRmsd[i,j])
s += "\n\nCLUSTER CONFIG NUMBERS:\nCluster number, Config number, RMSD\n"
ClustConf = [(abs(cn), i) for (i, cn) in enumerate(ClustNum) if cn < 0]
ClustConf.sort()
for (cn, i) in ClustConf:
s += "%-5d %-5d %-8.2f\n" % (cn, ConfIndices[i], ConfRmsd[i])
s += "\n\nCLUSTER MEMBERSHIP:\nConfig number, Cluster number, RMSD\n"
for (i, r) in enumerate(ConfRmsd):
if r < 0.:
s += "%-7d %-5s %-8s\n" % (ConfIndices[i], "--", "--")
else:
s += "%-7d %-5d %-8.2f\n" % (ConfIndices[i], ClustNum[i], ConfRmsd[i])
fn = Prefix + "results.txt"
file(fn,"w").write(s)
#save cluster files
if Verbose:
for i in range(0,len(Pos)):
data = [(ConfRmsd[j], ConfIndices[j]) for j in range(0,len(ConfRmsd))
if abs(ClustNum[j]) == i + 1]
data.sort()
s = "\n".join(["%-7d %-8.2f" % (j[1],j[0]) for j in data]) + "\n"
fn = Prefix + "%04d-%dpc.txt" % (i+1, int(ClustPct[i]))
file(fn, "w").write(s)
#======== COMMAND-LINE RUNNING ========
def GetResList(Arg):
if Arg is None or Arg == "": return None
ResList = []
for s in """'"[]""":
Arg = Arg.replace(s,"")
for l in Arg.split(","):
if "-" in l:
a, b = [int(x) for x in l.split("-")]
ResList.extend(range(a-1, b))
else:
a = int(l)
ResList.append(a - 1)
ResList.sort()
ResList = [x for (i,x) in enumerate(ResList) if not x in ResList[i+1:]]
return ResList
def RepRMSD(r):
if not type(r) is list: r = [r]
if None in r:
return 'NA'
else:
return "%-8.2f" % mean(r)
def ClipRMSD(r):
if r is None:
return 1.e300
else:
return r
if __name__ == "__main__":
Args = scripttools.ParseArgs(sys.argv[1:])
#get options
Align = "align" in Args["FLAGS"]
CompResInd = GetResList(Args.get("compres", None))
CalcResInd = GetResList(Args.get("calcres", None))
#check for a trajectory
if "traj" in Args["FLAGS"]:
PdbRef, TrjFile, PrmtopFile = Args["ARGS"][:3]
NAvg = int(Args.get("avg", 1))
Cut = float(Args.get("frac", 0.))
NSkip = int(Args.get("nskip", 0))
NRead = int(Args.get("nread", -1))
NStride = int(Args.get("nstride", 1))
Trj = coords.TrjClass(TrjFile, PrmtopFile, NSkip = NSkip,
NRead = NRead, NStride = NStride)
pTrj = protein.ProteinClass()
pTrj.LinkTrj(Trj)
pRef = protein.ProteinClass(Pdb = PdbRef)
print "%-10s %-8s %-8s %-5s" % ("Frame","BB_RMSD", "All_RMSD", "NRes")
i = 0
y1, y2 = [], []
z1, z2 = [], []
for Pos in Trj:
i += 1
x1, NRes = RMSDProteinClass(pRef, pTrj, Backbone = True, AlignSeq = Align,
CompResInd = CompResInd, CalcResInd = CalcResInd)
x2, NRes = RMSDProteinClass(pRef, pTrj, Backbone = False, AlignSeq = Align,
CompResInd = CompResInd, CalcResInd = CalcResInd)
y1.append(x1)
y2.append(x2)
z1.append(ClipRMSD(x1))
z2.append(ClipRMSD(x2))
if i % NAvg == 0:
print "%-10d %-8s %-8s %-5d" % (Trj.Index+1, RepRMSD(y1), RepRMSD(y2), NRes)
y1, y2 = [], []
pTrj.UnlinkTrj()
if Cut > 0.:
z1 = array(z1, float)
z2 = array(z2, float)
frac1 = sum(z1 <= Cut) / float(len(z1))
frac2 = sum(z2 <= Cut) / float(len(z2))
print "\nFraction of trajectory within %.2f A:" % Cut
print " %.5f backbone" % frac1
print " %.5f all-atom" % frac2
else:
#get pdbs
Pdbs = [f for f in Args["ARGS"] if os.path.isfile(f)]
#check for files
for f in [f for f in Args["ARGS"] if not os.path.isfile(f)]:
print "Could not find %s." % f
N = len(Pdbs)
Filenames = [os.path.basename(x) for x in Pdbs]
if N <= 1:
print "Nothing to compare."
sys.exit()
if "first" in Args["FLAGS"]:
MaxLen1 = len(Filenames[0])
MaxLen2 = max([len(fn) for fn in Filenames[1:]])
print "%-*s %-*s %-8s %-8s %-5s" % (MaxLen1, "Pdb1", MaxLen2, "Pdb2",
"BB_RMSD", "All_RMSD", "NRes")
p1 = protein.ProteinClass(Pdb = Pdbs[0])
for j in range(1, N):
p2 = protein.ProteinClass(Pdb = Pdbs[j])
x1, NRes = RMSDProteinClass(p1, p2, Backbone = True, AlignSeq = Align,
CompResInd = CompResInd, CalcResInd = CalcResInd)
x2, NRes = RMSDProteinClass(p1, p2, Backbone = False, AlignSeq = Align,
CompResInd = CompResInd, CalcResInd = CalcResInd)
print "%-*s %-*s %-8s %-8s %-5d" % (MaxLen1, Filenames[0],
MaxLen2, Filenames[j],
RepRMSD(x1), RepRMSD(x2), NRes)
else:
MaxLen = max([len(fn) for fn in Filenames])
print "%-*s %-*s %-8s %-8s %-5s" % (MaxLen, "Pdb1", MaxLen, "Pdb2",
"BB_RMSD", "All_RMSD", "NRes")
Prots = [protein.ProteinClass(Pdb = f) for f in Pdbs]
for i in range(0, N):
for j in range(i+1, N):
x1, NRes = RMSDProteinClass(Prots[i], Prots[j], Backbone = True, AlignSeq = Align,
CompResInd = CompResInd, CalcResInd = CalcResInd)
x2, NRes = RMSDProteinClass(Prots[i], Prots[j], Backbone = False, AlignSeq = Align,
CompResInd = CompResInd, CalcResInd = CalcResInd)
print "%-*s %-*s %-8s %-8s %-5d" % (MaxLen, Filenames[i],
MaxLen, Filenames[j],
RepRMSD(x1), RepRMSD(x2), NRes)
|
[
"[email protected]"
] | |
ee13787901e1cb2cb22e3ad0a896df200708d570
|
1a166165ab8287d01cbb377a13efdb5eff5dfef0
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations/_ddos_custom_policies_operations.py
|
374762f783c3c434a29e76a5bebf00abdd56790d
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
manoj0806/azure-sdk-for-python
|
7a14b202ff80f528abd068bf50334e91001a9686
|
aab999792db1132232b2f297c76800590a901142
|
refs/heads/master
| 2023-04-19T16:11:31.984930 | 2021-04-29T23:19:49 | 2021-04-29T23:19:49 | 363,025,016 | 1 | 0 |
MIT
| 2021-04-30T04:23:35 | 2021-04-30T04:23:35 | null |
UTF-8
|
Python
| false | false | 20,334 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DdosCustomPoliciesOperations:
"""DdosCustomPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def get(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs
) -> "_models.DdosCustomPolicy":
"""Gets information about the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "_models.DdosCustomPolicy",
**kwargs
) -> "_models.DdosCustomPolicy":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosCustomPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "_models.DdosCustomPolicy",
**kwargs
) -> AsyncLROPoller["_models.DdosCustomPolicy"]:
"""Creates or updates a DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.DdosCustomPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DdosCustomPolicy or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_04_01.models.DdosCustomPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.DdosCustomPolicy":
"""Update a DDoS custom policy tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to update DDoS custom policy resource tags.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
|
[
"[email protected]"
] | |
46b142b96d6ec205f215bf65fe76cf618722fad6
|
7236d1d4873faa9735fd5e2d4598b211a370f731
|
/project/n/projects/projects/ecommapp/users/migrations/0007_myuser_date_join.py
|
d2f2c4be22f4cc171f14f93f40710f105bb9009e
|
[] |
no_license
|
Dreambigxz/my_first_django_app
|
05f5a5d330d72084489f9306fca9ca232af13999
|
9e21ebcbe63c7394280558d2977ef8a796960e0d
|
refs/heads/main
| 2023-01-03T18:45:20.712074 | 2020-10-23T09:05:47 | 2020-10-23T09:05:47 | 306,180,592 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 453 |
py
|
# Generated by Django 3.0.8 on 2020-09-03 16:55
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20200903_1752'),
]
operations = [
migrations.AddField(
model_name='myuser',
name='date_join',
field=models.DateField(default=django.utils.timezone.now),
),
]
|
[
"[email protected]"
] | |
fe14ddadd2c999acc769a9198b0b5ef3dfa6dab1
|
167c6226bc77c5daaedab007dfdad4377f588ef4
|
/python/ql/src/Security/CWE-377/InsecureTemporaryFile.py
|
3f2357e0a19b2399eb7180d8bb003938d2610822
|
[
"MIT"
] |
permissive
|
github/codeql
|
1eebb449a34f774db9e881b52cb8f7a1b1a53612
|
d109637e2d7ab3b819812eb960c05cb31d9d2168
|
refs/heads/main
| 2023-08-20T11:32:39.162059 | 2023-08-18T14:33:32 | 2023-08-18T14:33:32 | 143,040,428 | 5,987 | 1,363 |
MIT
| 2023-09-14T19:36:50 | 2018-07-31T16:35:51 |
CodeQL
|
UTF-8
|
Python
| false | false | 184 |
py
|
from tempfile import mktemp
def write_results(results):
filename = mktemp()
with open(filename, "w+") as f:
f.write(results)
print("Results written to", filename)
|
[
"[email protected]"
] | |
0f6adf9068518e8bc6f8cb156925ad34697c4170
|
043632ae21132223485f480f49863e8f1fa776d5
|
/site-packages/robotlibcore.py
|
c9b7acc9f31ea02c262d6c3e56296367e9b6c036
|
[] |
no_license
|
Eswarinisha/bapacho
|
4b0da361cf7624c13ae04eda27ef43b6682dc4a7
|
67300307e39a005f077a66c28c9e055001940915
|
refs/heads/master
| 2023-05-08T13:01:50.521561 | 2021-06-08T11:20:39 | 2021-06-08T11:20:39 | 307,718,868 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,117 |
py
|
# Copyright 2017- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic test library core for Robot Framework.
Main usage is easing creating larger test libraries. For more information and
examples see the project pages at
https://github.com/robotframework/PythonLibCore
"""
import inspect
import os
import sys
from robot.utils import PY_VERSION
try:
import typing
except ImportError:
typing = None
from robot.api.deco import keyword # noqa F401
from robot import __version__ as robot_version
PY2 = sys.version_info < (3,)
RF31 = robot_version < '3.2'
__version__ = '2.1.0'
class HybridCore(object):
def __init__(self, library_components):
self.keywords = {}
self.keywords_spec = {}
self.attributes = {}
self.add_library_components(library_components)
self.add_library_components([self])
def add_library_components(self, library_components):
self.keywords_spec['__init__'] = KeywordBuilder.build(self.__init__)
for component in library_components:
for name, func in self.__get_members(component):
if callable(func) and hasattr(func, 'robot_name'):
kw = getattr(component, name)
kw_name = func.robot_name or name
self.keywords[kw_name] = kw
self.keywords_spec[kw_name] = KeywordBuilder.build(kw)
# Expose keywords as attributes both using original
# method names as well as possible custom names.
self.attributes[name] = self.attributes[kw_name] = kw
def __get_members(self, component):
if inspect.ismodule(component):
return inspect.getmembers(component)
if inspect.isclass(component):
raise TypeError('Libraries must be modules or instances, got '
'class {!r} instead.'.format(component.__name__))
if type(component) != component.__class__:
raise TypeError('Libraries must be modules or new-style class '
'instances, got old-style class {!r} instead.'
.format(component.__class__.__name__))
return self.__get_members_from_instance(component)
def __get_members_from_instance(self, instance):
# Avoid calling properties by getting members from class, not instance.
cls = type(instance)
for name in dir(instance):
owner = cls if hasattr(cls, name) else instance
yield name, getattr(owner, name)
def __getattr__(self, name):
if name in self.attributes:
return self.attributes[name]
raise AttributeError('{!r} object has no attribute {!r}'
.format(type(self).__name__, name))
def __dir__(self):
if PY2:
my_attrs = dir(type(self)) + list(self.__dict__)
else:
my_attrs = super().__dir__()
return sorted(set(my_attrs) | set(self.attributes))
def get_keyword_names(self):
return sorted(self.keywords)
class DynamicCore(HybridCore):
def run_keyword(self, name, args, kwargs=None):
return self.keywords[name](*args, **(kwargs or {}))
def get_keyword_arguments(self, name):
spec = self.keywords_spec.get(name)
return spec.argument_specification
def get_keyword_tags(self, name):
return self.keywords[name].robot_tags
def get_keyword_documentation(self, name):
if name == '__intro__':
return inspect.getdoc(self) or ''
spec = self.keywords_spec.get(name)
return spec.documentation
def get_keyword_types(self, name):
spec = self.keywords_spec.get(name)
if spec is None:
raise ValueError('Keyword "%s" not found.' % name)
return spec.argument_types
def __get_keyword(self, keyword_name):
if keyword_name == '__init__':
return self.__init__
if keyword_name.startswith('__') and keyword_name.endswith('__'):
return None
method = self.keywords.get(keyword_name)
if not method:
raise ValueError('Keyword "%s" not found.' % keyword_name)
return method
def get_keyword_source(self, keyword_name):
method = self.__get_keyword(keyword_name)
path = self.__get_keyword_path(method)
line_number = self.__get_keyword_line(method)
if path and line_number:
return '%s:%s' % (path, line_number)
if path:
return path
if line_number:
return ':%s' % line_number
return None
def __get_keyword_line(self, method):
try:
lines, line_number = inspect.getsourcelines(method)
except (OSError, IOError, TypeError):
return None
for increment, line in enumerate(lines):
if line.strip().startswith('def '):
return line_number + increment
return line_number
def __get_keyword_path(self, method):
try:
return os.path.normpath(inspect.getfile(method))
except TypeError:
return None
class KeywordBuilder(object):
@classmethod
def build(cls, function):
return KeywordSpecification(
argument_specification=cls._get_arguments(function),
documentation=inspect.getdoc(function) or '',
argument_types=cls._get_types(function)
)
@classmethod
def _get_arguments(cls, function):
arg_spec = cls._get_arg_spec(function)
argument_specification = cls._get_default_and_named_args(
arg_spec, function
)
argument_specification.extend(cls._get_var_args(arg_spec))
kw_only_args = cls._get_kw_only(arg_spec)
if kw_only_args:
argument_specification.extend(kw_only_args)
argument_specification.extend(cls._get_kwargs(arg_spec))
return argument_specification
@classmethod
def _get_arg_spec(cls, function):
if PY2:
return inspect.getargspec(function)
return inspect.getfullargspec(function)
@classmethod
def _get_default_and_named_args(cls, arg_spec, function):
args = cls._drop_self_from_args(function, arg_spec)
args.reverse()
defaults = list(arg_spec.defaults) if arg_spec.defaults else []
formated_args = []
for arg in args:
if defaults:
formated_args.append(
cls._format_defaults(arg, defaults.pop())
)
else:
formated_args.append(arg)
formated_args.reverse()
return formated_args
@classmethod
def _drop_self_from_args(cls, function, arg_spec):
return arg_spec.args[1:] if inspect.ismethod(function) else arg_spec.args
@classmethod
def _get_var_args(cls, arg_spec):
if arg_spec.varargs:
return ['*%s' % arg_spec.varargs]
return []
@classmethod
def _get_kwargs(cls, arg_spec):
if PY2:
return ['**%s' % arg_spec.keywords] if arg_spec.keywords else []
return ['**%s' % arg_spec.varkw] if arg_spec.varkw else []
@classmethod
def _get_kw_only(cls, arg_spec):
kw_only_args = []
if PY2:
return kw_only_args
for arg in arg_spec.kwonlyargs:
if not arg_spec.kwonlydefaults or arg not in arg_spec.kwonlydefaults:
kw_only_args.append(arg)
else:
value = arg_spec.kwonlydefaults.get(arg, '')
kw_only_args.append(cls._format_defaults(arg, value))
return kw_only_args
@classmethod
def _format_defaults(cls, arg, value):
if RF31:
return '%s=%s' % (arg, value)
return arg, value
@classmethod
def _get_types(cls, function):
if function is None:
return function
types = getattr(function, 'robot_types', ())
if types is None or types:
return types
return cls._get_typing_hints(function)
@classmethod
def _get_typing_hints(cls, function):
if PY2:
return {}
try:
hints = typing.get_type_hints(function)
except Exception:
hints = function.__annotations__
arg_spec = cls._get_arg_spec(function)
all_args = cls._args_as_list(function, arg_spec)
for arg_with_hint in list(hints):
# remove return and self statements
if arg_with_hint not in all_args:
hints.pop(arg_with_hint)
default = cls._get_defaults(arg_spec)
return cls._remove_optional_none_type_hints(hints, default)
@classmethod
def _args_as_list(cls, function, arg_spec):
function_args = []
function_args.extend(cls._drop_self_from_args(function, arg_spec))
if arg_spec.varargs:
function_args.append(arg_spec.varargs)
function_args.extend(arg_spec.kwonlyargs or [])
if arg_spec.varkw:
function_args.append(arg_spec.varkw)
return function_args
# Copied from: robot.running.arguments.argumentparser
@classmethod
def _remove_optional_none_type_hints(cls, type_hints, defaults):
# If argument has None as a default, typing.get_type_hints adds
# optional None to the information it returns. We don't want that.
for arg, default in defaults:
if default is None and arg in type_hints:
type_ = type_hints[arg]
if cls._is_union(type_):
types = type_.__args__
if len(types) == 2 and types[1] is type(None): # noqa
type_hints[arg] = types[0]
return type_hints
# Copied from: robot.running.arguments.argumentparser
@classmethod
def _is_union(cls, typing_type):
if PY_VERSION >= (3, 7) and hasattr(typing_type, '__origin__'):
typing_type = typing_type.__origin__
return isinstance(typing_type, type(typing.Union))
@classmethod
def _get_defaults(cls, arg_spec):
if not arg_spec.defaults:
return {}
names = arg_spec.args[-len(arg_spec.defaults):]
return zip(names, arg_spec.defaults)
class KeywordSpecification(object):
def __init__(self, argument_specification=None, documentation=None, argument_types=None):
self.argument_specification = argument_specification
self.documentation = documentation
self.argument_types = argument_types
|
[
"[email protected]"
] | |
67d8405dae494c985db55a7991291fe6a81e390a
|
38c10c01007624cd2056884f25e0d6ab85442194
|
/third_party/chromite/cbuildbot/autotest_rpc_errors.py
|
1ee19f4a5238f93886962b5f9968b1f009275cf6
|
[
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
zenoalbisser/chromium
|
6ecf37b6c030c84f1b26282bc4ef95769c62a9b2
|
e71f21b9b4b9b839f5093301974a45545dad2691
|
refs/heads/master
| 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 |
BSD-3-Clause
| 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null |
UTF-8
|
Python
| false | false | 670 |
py
|
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Error codes used for the Autotest RPC Client, Proxy, and Server.
This is a copy of scripts/slave-internal/autotest_rpc/autotest_rpc_errors.py
from https://chrome-internal.googlesource.com/chrome/tools/build.
"""
CLIENT_CANNOT_CONNECT = 1
CLIENT_HTTP_CODE = 2
CLIENT_EMPTY_RESPONSE = 3
CLIENT_NO_RETURN_CODE = 4
PROXY_CANNOT_SEND_REQUEST = 11
PROXY_CONNECTION_LOST = 12
PROXY_TIMED_OUT = 13
SERVER_NO_COMMAND = 21
SERVER_NO_ARGUMENTS = 22
SERVER_UNKNOWN_COMMAND = 23
SERVER_BAD_ARGUMENT_COUNT = 24
|
[
"[email protected]"
] | |
cc30e9d67261942ea1e7ab2d69f8c72d5c1abeb2
|
e9cd817d3da30418faaba07d0413bee73beae28d
|
/comp_204/24_lists.py
|
b9ef8f46c3fcfc7e5717999c8ef6177c0d73db12
|
[] |
no_license
|
pierreTklein/ReviewSessions
|
b12460c4f7db0c8fc086783d45afedb0769ef1cb
|
92b445bfa4a6520fe7e3ce34c3d352f37a5d87a7
|
refs/heads/master
| 2020-04-10T02:27:58.294803 | 2018-12-07T17:02:28 | 2018-12-07T17:02:28 | 160,744,980 | 0 | 0 | null | 2018-12-07T17:02:30 | 2018-12-06T23:23:51 |
Python
|
UTF-8
|
Python
| false | false | 192 |
py
|
thisList = [1, 4, 1, 2, 3, 10]
a = thisList.reverse()
b = thisList.sort()
c = sorted(thisList)
d = min(thisList)
e = max(thisList)
print(a)
print(b)
print(c)
print(b is c)
print(d)
print(e)
|
[
"[email protected]"
] | |
5b25230969a0ac12fc34559b94806650e5289631
|
aef287bf41ac5ad2b1f45b3087094103db514489
|
/FuncionesAvanzadas/EjecicioComprimir.py
|
2c02606cded79cfcbeac711eec92f7d8da021684
|
[] |
no_license
|
macantivbl/Python
|
1180c3e6d694ecbfb97eefecb0fba8374f9d7d88
|
2e7663a0a5790caf05e8439940b48fbd220000cb
|
refs/heads/master
| 2023-07-07T07:05:46.549667 | 2021-08-17T03:47:41 | 2021-08-17T03:47:41 | 385,390,584 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 141 |
py
|
lista = ['a','b','c','b','d','m','n','n']
duplicados = list(set([value for value in lista if lista.count(value)>1 ]))
print(duplicados )
|
[
"[email protected]"
] | |
a83555606a892205a49c7f27d850b79e4d48341c
|
375e5bfe9c6e9e2fb962fd58e38c3ee0ec1a29d1
|
/Lab3 - Duality/ej4.py
|
65eb4c4ded2942f2251cefef48e1853f2cd32cff
|
[] |
no_license
|
luciabouza/Optimization-theory-and-algorithm
|
1cf90395b0c4383e906333a5672d9a3504b75627
|
23333663f1fc5dc8846efd6ed6f3fd2ca76cef44
|
refs/heads/main
| 2023-05-27T12:28:11.647820 | 2021-06-10T16:32:12 | 2021-06-10T16:32:12 | 375,763,709 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 857 |
py
|
import cvxpy as cp
import numpy as np
import matplotlib.pyplot as plt
def resolver(d2):
p = cp.Variable(3)
g1 = cp.Variable(1)
g2 = cp.Variable(1)
t = cp.Variable(1)
c1 = g1
c2 = 4 * (g2 - 40)
cost = c1 + t
constraints = [ [1,0,1]@p==g1, g2+[0,1,1]@p==d2, [1,-1,0]@p==10 , [-1,-1,1]@p==0 , [0,1,0]@p<= 30 ,[0,-1,0]@p<= 30, g1>=0, g2>=0, c2<=t , 0<=t]
prob = cp.Problem(cp.Minimize(cost),constraints)
prob.solve()
return prob.value, g1, g2, p, constraints
g1, g2, p2, l = [], [], [], []
for d2 in range(201):
value, g1i, g2i, p, constraints = resolver(d2)
g1.append(g1i.value)
g2.append(g2i.value)
p2.append(p[1].value)
l.append(constraints[2].dual_value)
plt.grid()
plt.xlabel('d2')
plt.plot(g1)
plt.plot(g2)
plt.plot(p2)
plt.plot(l)
plt.legend(['g1', 'g2', 'p2', 'lambda'])
plt.show()
|
[
"[email protected]"
] | |
116d387dd717fabe096b4ea161ad403d2870e88a
|
33976fddb32feae0b6b5d38b0a8994490fc4b1db
|
/src/ar6/constants/gases.py
|
4df95e992cbd6ed95181fc2ed1bf4bafd19e54c8
|
[
"MIT"
] |
permissive
|
chrisroadmap/ar6
|
e72e4bad8d1c1fa2751513dbecddb8508711859c
|
2f948c862dbc158182ba47b863395ec1a4aa7998
|
refs/heads/main
| 2023-04-16T22:57:02.280787 | 2022-09-27T13:31:38 | 2022-09-27T13:31:38 | 305,981,969 | 27 | 20 |
MIT
| 2022-09-27T13:31:38 | 2020-10-21T10:02:03 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 6,315 |
py
|
"""
Gas properties
"""
# Number of bromine atoms
br_atoms = {
'CCl4': 0,
'CFC11': 0,
'CFC113': 0,
'CFC114': 0,
'CFC115': 0,
'CFC12': 0,
'CH2Cl2': 0,
'CH3Br': 1,
'CH3CCl3': 0,
'CH3Cl': 0,
'CHCl3': 0,
'HCFC141b': 0,
'HCFC142b': 0,
'HCFC22': 0,
'Halon1211': 1,
'Halon1301': 1,
'Halon2402': 2,
}
# Number of chlorine atoms
cl_atoms = {
'CCl4': 4,
'CFC11': 3,
'CFC113': 3,
'CFC114': 2,
'CFC115': 1,
'CFC12': 2,
'CH2Cl2': 2,
'CH3Br': 0,
'CH3CCl3': 3,
'CH3Cl': 1,
'CHCl3': 3,
'HCFC141b': 2,
'HCFC142b': 1,
'HCFC22': 1,
'Halon1211': 0,
'Halon1301': 0,
'Halon2402': 0,
}
# Fractional release (for ozone depletion)
# References:
# Daniel, J. and Velders, G.: A focus on information and options for
# policymakers, in: Scientific Assessment of Ozone Depletion, WMO, 2011
# Newman et al., 2007: A new formulation of equivalent effective stratospheric
# chlorine (EESC)
fracrel = {
'CCl4': 0.56,
'CFC11': 0.47,
'CFC113': 0.29,
'CFC114': 0.12,
'CFC115': 0.04,
'CFC12': 0.23,
'CH2Cl2': 0, # no literature value available
'CH3Br': 0.60,
'CH3CCl3': 0.67,
'CH3Cl': 0.44,
'CHCl3': 0, # no literature value available
'HCFC141b': 0.34,
'HCFC142b': 0.17,
'HCFC22': 0.13,
'Halon1211': 0.62,
'Halon1301': 0.28,
'Halon2402': 0.65,
}
# Conversion between GHG names in GHG spreadsheet and RCMIP.
ghg_to_rcmip_names={
'HFC-125': 'HFC125',
'HFC-134a': 'HFC134a',
'HFC-143a': 'HFC143a',
'HFC-152a': 'HFC152a',
'HFC-227ea': 'HFC227ea',
'HFC-23': 'HFC23',
'HFC-236fa': 'HFC236fa',
'HFC-245fa': 'HFC245fa',
'HFC-32': 'HFC32',
'HFC-365mfc': 'HFC365mfc',
'HFC-43-10mee': 'HFC4310mee',
'NF3': 'NF3',
'C2F6': 'C2F6',
'C3F8': 'C3F8',
'n-C4F10': 'C4F10',
'n-C5F12': 'C5F12',
'n-C6F14': 'C6F14',
'i-C6F14': None,
'C7F16': 'C7F16',
'C8F18': 'C8F18',
'CF4': 'CF4',
'c-C4F8': 'cC4F8',
'SF6': 'SF6',
'SO2F2': 'SO2F2',
'CCl4': 'CCl4',
'CFC-11': 'CFC11',
'CFC-112': 'CFC112',
'CFC-112a': None,
'CFC-113': 'CFC113',
'CFC-113a': None,
'CFC-114': 'CFC114',
'CFC-114a': None,
'CFC-115': 'CFC115',
'CFC-12': 'CFC12',
'CFC-13': None,
'CH2Cl2': 'CH2Cl2',
'CH3Br': 'CH3Br',
'CH3CCl3': 'CH3CCl3',
'CH3Cl': 'CH3Cl',
'CHCl3': 'CHCl3',
'HCFC-124': None,
'HCFC-133a': None,
'HCFC-141b': 'HCFC141b',
'HCFC-142b': 'HCFC142b',
'HCFC-22': 'HCFC22',
'HCFC-31': None,
'Halon-1211': 'Halon1211',
'Halon-1301': 'Halon1301',
'Halon-2402': 'Halon2402',
}
# Hodnebrog et al., 2020: https://agupubs.onlinelibrary.wiley.com/doi/full/10.1029/2019RG000691
# unless stated
lifetimes = {
'CH4': 11.8, # chapter 6
'N2O': 109, # AR6 SOD
'HFC-125': 30,
'HFC-134a': 14,
'HFC-143a': 51,
'HFC-152a': 1.6,
'HFC-227ea': 36,
'HFC-23': 228,
'HFC-236fa': 213,
'HFC-245fa': 7.9,
'HFC-32': 5.4,
'HFC-365mfc': 8.9,
'HFC-43-10mee': 17,
'NF3': 569,
'C2F6': 10000,
'C3F8': 2600,
'n-C4F10': 2600,
'n-C5F12': 4100,
'n-C6F14': 3100,
'i-C6F14': 3100, # assumed
'C7F16': 3000,
'C8F18': 3000,
'CF4': 50000,
'c-C4F8': 3200,
'SF6': 3200,
'SO2F2': 36,
'CCl4': 32,
'CFC-11': 52,
'CFC-112': 63.6,
'CFC-112a': 52,
'CFC-113': 93,
'CFC-113a': 55,
'CFC-114': 189,
'CFC-114a': 105,
'CFC-115': 540,
'CFC-12': 102,
'CFC-13': 640,
'CH2Cl2': 0.4932,
'CH3Br': 0.8,
'CH3CCl3': 5,
'CH3Cl': 0.9,
'CHCl3': 0.5014,
'HCFC-124': 5.9,
'HCFC-133a': 4.6,
'HCFC-141b': 9.4,
'HCFC-142b': 18,
'HCFC-22': 11.9,
'HCFC-31': 1.2,
'Halon-1211': 16,
'Halon-1301': 72,
'Halon-2402': 28,
}
# Ozone depleting substances
ods_species = [
'CCl4',
'CFC11',
'CFC113',
'CFC114',
'CFC115',
'CFC12',
'CH2Cl2',
'CH3Br',
'CH3CCl3',
'CH3Cl',
'CHCl3',
'HCFC141b',
'HCFC142b',
'HCFC22',
'Halon1211',
'Halon1301',
'Halon2402',
]
# radiative efficiencies
# source: Hodnebrog et al 2020 https://agupubs.onlinelibrary.wiley.com/doi/full/10.1029/2019RG000691
radeff = {
'HFC-125': 0.23378,
'HFC-134a': 0.16714,
'HFC-143a': 0.168,
'HFC-152a': 0.10174,
'HFC-227ea': 0.27325,
'HFC-23': 0.19111,
'HFC-236fa': 0.25069,
'HFC-245fa': 0.24498,
'HFC-32': 0.11144,
'HFC-365mfc': 0.22813,
'HFC-43-10mee': 0.35731,
'NF3': 0.20448,
'C2F6': 0.26105,
'C3F8': 0.26999,
'n-C4F10': 0.36874,
'n-C5F12': 0.4076,
'n-C6F14': 0.44888,
'i-C6F14': 0.44888,
'C7F16': 0.50312,
'C8F18': 0.55787,
'CF4': 0.09859,
'c-C4F8': 0.31392,
'SF6': 0.56657,
'SO2F2': 0.21074,
'CCl4': 0.16616,
'CFC-11': 0.25941,
'CFC-112': 0.28192,
'CFC-112a': 0.24564,
'CFC-113': 0.30142,
'CFC-113a': 0.24094,
'CFC-114': 0.31433,
'CFC-114a': 0.29747,
'CFC-115': 0.24625,
'CFC-12': 0.31998,
'CFC-13': 0.27752,
'CH2Cl2': 0.02882,
'CH3Br': 0.00432,
'CH3CCl3': 0.06454,
'CH3Cl': 0.00466,
'CHCl3': 0.07357,
'HCFC-124': 0.20721,
'HCFC-133a': 0.14995,
'HCFC-141b': 0.16065,
'HCFC-142b': 0.19329,
'HCFC-22': 0.21385,
'HCFC-31': 0.068,
'Halon-1211': 0.30014,
'Halon-1301': 0.29943,
'Halon-2402': 0.31169,
}
rcmip_to_ghg_names = {v: k for k, v in ghg_to_rcmip_names.items()}
|
[
"[email protected]"
] | |
c528cf17d4c0b1fcd15cc529cebd10f3ba3b5ec7
|
394d81287d175851334ff2bc78c0dbd471923815
|
/chapter2/weather.py
|
c826fd5a5ab63877dcfb40731b2a73c59688d652
|
[
"MIT"
] |
permissive
|
rongpenl/design-patterns-python
|
b14200b91af017bf08701c1c570059d25efc5c83
|
cbe7e6da6ae94604abad256bd807af1642fee915
|
refs/heads/master
| 2022-12-28T00:00:26.853030 | 2020-09-22T04:30:03 | 2020-09-22T04:30:03 | 297,237,711 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,049 |
py
|
# observer pattern
from abc import ABC, abstractmethod
class Observer(ABC):
def __init__(self):
pass
@abstractmethod
def update(self, temperature: float, humidity: float, pressure: float):
pass
class Display(ABC):
def __init__(self):
pass
@abstractmethod
def display(self):
pass
class Subject(ABC):
def __init__(self):
pass
@abstractmethod
def register_observer(self, observer: Observer):
pass
@abstractmethod
def remove_observer(self, observer: Observer):
pass
@abstractmethod
def notify_observer(self):
pass
class WeatherData(Subject):
def __init__(self):
self._observers = set()
self._temperature = None
self._humidity = None
self._pressure = None
def register_observer(self, observer: Observer):
self._observers.add(observer)
def remove_observer(self, observer, Observer):
self._observers.remove(observer)
def notify_observer(self):
for observer in self._observers:
observer.update(self._temperature, self._humidity, self._pressure)
# print("Weather Data is updating {}".format(str(observer)))
def measure_changed(self):
self.notify_observer()
def set_measurement(self, temp: float, humidity: float, pressure: float):
self._temperature = temp
self._humidity = humidity
self._pressure = pressure
self.measure_changed()
class CurrentConditionDisplay(Observer, Display):
def __init__(self, weather_data: WeatherData):
self._weather_data = weather_data
self._weather_data.register_observer(self)
self._temperature = None
self._humidity = None
def __repr__(self):
return "a condition display"
def update(self, temperature: float, humidity: float, pressure: float):
self._temp = temperature
self._humidity = humidity
self._pressure = pressure
self.display()
def display(self):
print("humidity:{},pressure:{},temperature:{}".format(self._humidity,
self._pressure,
self._temp))
class StatisticsDisplay(Observer, Display):
def __init__(self, weather_data: WeatherData):
self._weather_data = weather_data
self._weather_data.register_observer(self)
self._temperatures = []
self._stats = {"min": None, "max": None, "ave": None}
def __repr__(self):
return "a statistics display"
def update(self, temperature: float, humidity: float, pressure: float):
self._temperatures.append(temperature)
self._stats["min"] = min(self._temperatures)
self._stats["max"] = max(self._temperatures)
self._stats["ave"] = sum(self._temperatures)/(len(self._temperatures))
self.display()
def display(self):
print("Avg/Max/Min temperature = {}/{}/{}".format(self._stats["ave"],
self._stats["max"],
self._stats["min"]))
class PredictionDisplay(Observer, Display):
def __init__(self, weather_data: WeatherData):
self._weather_data = weather_data
self._weather_data.register_observer(self)
def __repr__(self):
return "a prediction display"
def update(self, temperature: float, humidity: float, pressure: float):
self.display()
def display(self):
print("Building Predictions")
class WeatherStation():
def __init__(self):
weather_data = WeatherData()
CurrentConditionDisplay(weather_data)
StatisticsDisplay(weather_data)
PredictionDisplay(weather_data)
weather_data.set_measurement(1, 101, 10001)
weather_data.set_measurement(2, 102, 10002)
weather_data.set_measurement(3, 103, 10003)
if __name__ == "__main__":
WeatherStation()
|
[
"[email protected]"
] | |
e7b7e42217feeb3f01953b5944a02e89c881750e
|
8ef9d7cb0358228eae8a59db54229ef6c565c8bf
|
/celery_apps/tasks2.py
|
e9e9e230086eb41828828cd47d60bb8e9790a607
|
[] |
no_license
|
puguojingm/celery-learn
|
91bba39770b72ca6005dda000f6ae27f6b21fdf1
|
8f0a9538a7a39ca44abaaa148ec36d5c7dfc596c
|
refs/heads/master
| 2022-01-30T23:27:26.651631 | 2019-06-13T15:33:35 | 2019-06-13T15:33:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 130 |
py
|
from celery_apps import app
import time
@app.task
def multiply(x,y):
print ("multiply",x,y)
time.sleep(3)
return x*y
|
[
"[email protected]"
] | |
fdd5e59bb7f0d8b1e7d05261aae50d508479adfd
|
b0740629e6f17c9626315a95f25dd82d2be45a14
|
/tradingApp/migrations/0008_cart_gnumber.py
|
07b2fbe43308b0c78afb900c6e862d273962bb46
|
[] |
no_license
|
helloluoc/tradingsystem
|
19cea7c4959424993c15c02a6b0efa814dff5eef
|
d372c2ee3cdbcdfe4a1cd86edb164dbae876bc8b
|
refs/heads/master
| 2022-12-13T01:27:16.035413 | 2019-06-17T09:49:49 | 2019-06-17T09:49:49 | 141,142,090 | 0 | 1 | null | 2022-12-08T02:16:50 | 2018-07-16T13:25:32 |
Python
|
UTF-8
|
Python
| false | false | 450 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-07-04 16:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tradingApp', '0007_auto_20180704_1608'),
]
operations = [
migrations.AddField(
model_name='cart',
name='gnumber',
field=models.IntegerField(default=0),
),
]
|
[
"[email protected]"
] | |
66105928bd45916298b44a3115b1fea9210a9202
|
a3b07b1b616aed0010d9a7b9c781130c41551409
|
/majcats/majcat_single/majcat_single/asgi.py
|
fb304cb28f65f1f35f58e21a66c000865048dbba
|
[] |
no_license
|
AmeRefrain/majcat
|
a43c2ca760fbeaf00017c6eee3afc5442800fe80
|
64caa5def271c1ec455e1da3b29af5a9575acc30
|
refs/heads/master
| 2023-03-29T03:26:34.599538 | 2021-03-31T08:00:46 | 2021-03-31T08:00:46 | 352,010,808 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 403 |
py
|
"""
ASGI config for majcat_single project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'majcat_single.settings')
application = get_asgi_application()
|
[
"[email protected]"
] | |
9d1a437f349da26546e39c27cdd068e8674d9110
|
d1b677b4591c0adfc576607a244b565e2c767bbc
|
/Udacity-ML/titanic_survival_exploration-master_0/titanic_visualizations.py
|
6261a1b041bc05c09c34234a68c7c1abb8e08362
|
[
"MIT"
] |
permissive
|
quoniammm/happy-machine-learning
|
4614a09266fb778047462b2085250b39a14c7854
|
ffe3f5f24b4639a67707ba3b41e74601a379acb5
|
refs/heads/master
| 2021-06-20T01:44:17.051853 | 2017-07-31T02:40:56 | 2017-07-31T02:40:56 | 74,884,527 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,426 |
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def filter_data(data, condition):
"""
Remove elements that do not match the condition provided.
Takes a data list as input and returns a filtered list.
Conditions should be a list of strings of the following format:
'<field> <op> <value>'
where the following operations are valid: >, <, >=, <=, ==, !=
Example: ["Sex == 'male'", 'Age < 18']
"""
field, op, value = condition.split(" ")
# convert value into number or strip excess quotes if string
try:
value = float(value)
except:
value = value.strip("\'\"")
# get booleans for filtering
if op == ">":
matches = data[field] > value
elif op == "<":
matches = data[field] < value
elif op == ">=":
matches = data[field] >= value
elif op == "<=":
matches = data[field] <= value
elif op == "==":
matches = data[field] == value
elif op == "!=":
matches = data[field] != value
else: # catch invalid operation codes
raise Exception("Invalid comparison operator. Only >, <, >=, <=, ==, != allowed.")
# filter data and outcomes
data = data[matches].reset_index(drop = True)
return data
def survival_stats(data, outcomes, key, filters = []):
"""
Print out selected statistics regarding survival, given a feature of
interest and any number of filters (including no filters)
"""
# Check that the key exists
if key not in data.columns.values :
print "'{}' is not a feature of the Titanic data. Did you spell something wrong?".format(key)
return False
# Return the function before visualizing if 'Cabin' or 'Ticket'
# is selected: too many unique categories to display
if(key == 'Cabin' or key == 'PassengerId' or key == 'Ticket'):
print "'{}' has too many unique categories to display! Try a different feature.".format(key)
return False
# Merge data and outcomes into single dataframe
all_data = pd.concat([data, outcomes], axis = 1)
# Apply filters to data
for condition in filters:
all_data = filter_data(all_data, condition)
# Create outcomes DataFrame
all_data = all_data[[key, 'Survived']]
# Create plotting figure
plt.figure(figsize=(8,6))
# 'Numerical' features
if(key == 'Age' or key == 'Fare'):
# Remove NaN values from Age data
all_data = all_data[~np.isnan(all_data[key])]
# Divide the range of data into bins and count survival rates
min_value = all_data[key].min()
max_value = all_data[key].max()
value_range = max_value - min_value
# 'Fares' has larger range of values than 'Age' so create more bins
if(key == 'Fare'):
bins = np.arange(0, all_data['Fare'].max() + 20, 20)
if(key == 'Age'):
bins = np.arange(0, all_data['Age'].max() + 10, 10)
# Overlay each bin's survival rates
nonsurv_vals = all_data[all_data['Survived'] == 0][key].reset_index(drop = True)
surv_vals = all_data[all_data['Survived'] == 1][key].reset_index(drop = True)
plt.hist(nonsurv_vals, bins = bins, alpha = 0.6,
color = 'red', label = 'Did not survive')
plt.hist(surv_vals, bins = bins, alpha = 0.6,
color = 'green', label = 'Survived')
# Add legend to plot
plt.xlim(0, bins.max())
plt.legend(framealpha = 0.8)
# 'Categorical' features
else:
# Set the various categories
if(key == 'Pclass'):
values = np.arange(1,4)
if(key == 'Parch' or key == 'SibSp'):
values = np.arange(0,np.max(data[key]) + 1)
if(key == 'Embarked'):
values = ['C', 'Q', 'S']
if(key == 'Sex'):
values = ['male', 'female']
# Create DataFrame containing categories and count of each
frame = pd.DataFrame(index = np.arange(len(values)), columns=(key,'Survived','NSurvived'))
for i, value in enumerate(values):
frame.loc[i] = [value, \
len(all_data[(all_data['Survived'] == 1) & (all_data[key] == value)]), \
len(all_data[(all_data['Survived'] == 0) & (all_data[key] == value)])]
# Set the width of each bar
bar_width = 0.4
# Display each category's survival rates
for i in np.arange(len(frame)):
nonsurv_bar = plt.bar(i-bar_width, frame.loc[i]['NSurvived'], width = bar_width, color = 'r')
surv_bar = plt.bar(i, frame.loc[i]['Survived'], width = bar_width, color = 'g')
plt.xticks(np.arange(len(frame)), values)
plt.legend((nonsurv_bar[0], surv_bar[0]),('Did not survive', 'Survived'), framealpha = 0.8)
# Common attributes for plot formatting
plt.xlabel(key)
plt.ylabel('Number of Passengers')
plt.title('Passenger Survival Statistics With \'%s\' Feature'%(key))
plt.show()
# Report number of passengers with missing values
if sum(pd.isnull(all_data[key])):
nan_outcomes = all_data[pd.isnull(all_data[key])]['Survived']
print "Passengers with missing '{}' values: {} ({} survived, {} did not survive)".format( \
key, len(nan_outcomes), sum(nan_outcomes == 1), sum(nan_outcomes == 0))
|
[
"[email protected]"
] | |
2cb33275754ec783f5f546a411cf0fe226a579eb
|
f7982a468b6f76dc72c53e7c3644ae4e7e6f2f49
|
/pyEX/refdata/ric.py
|
6e4ab19987f1f4ec33a268a2b177446c705a78b6
|
[
"Apache-2.0"
] |
permissive
|
timkpaine/pyEX
|
55002c3718214c6e207976ab3661a47108c6c114
|
f678c791d05bc28911e25807241c392a9ee8134f
|
refs/heads/main
| 2023-08-20T00:17:53.162803 | 2022-11-22T02:51:13 | 2022-11-22T02:51:13 | 109,551,372 | 350 | 95 |
Apache-2.0
| 2023-09-11T12:26:54 | 2017-11-05T04:21:16 |
Python
|
UTF-8
|
Python
| false | false | 1,129 |
py
|
# *****************************************************************************
#
# Copyright (c) 2021, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from functools import wraps
import pandas as pd
from ..common import _get
def ricLookup(ric, token="", version="stable", filter="", format="json"):
"""This call converts a RIC to an iex symbol
https://iexcloud.io/docs/api/#ric-mapping
8am, 9am, 12pm, 1pm UTC daily
Args:
ric (str): ric to lookup
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Returns:
dict or DataFrame or list: result
"""
return _get(
"ref-data/ric?ric={}".format(ric),
token=token,
version=version,
filter=filter,
format=format,
)
@wraps(ricLookup)
def ricLookupDF(*args, **kwargs):
return pd.DataFrame(ricLookup(*args, **kwargs))
|
[
"[email protected]"
] | |
ba60fe38e13a738a0edfb12fb060eeb0f659882c
|
3bd6ae1adf1d723dd8daae733ddbebfbe92938b8
|
/leetcode/989.py
|
266d07552feede32647d03f5d80de0877b6da50e
|
[] |
no_license
|
maxwang967/kick-start
|
ede5bdfca18477bf649be5049f421e9700d6c92c
|
5fc90eb3aafa41dced1ef682dcce2a07b558a545
|
refs/heads/master
| 2023-03-31T00:38:38.584384 | 2021-04-02T06:05:10 | 2021-04-02T06:05:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,356 |
py
|
class Solution:
def addToArrayForm(self, A: List[int], K: int) -> List[int]:
result = []
K_array = [int(x) for x in str(K)]
K_n = len(K_array)
A_n = len(A)
carry = 0
border = min(A_n, K_n)
for i in range(border):
cur_K = K_n - 1 - i
cur_A = A_n - 1 - i
add_K = K_array[cur_K]
add_A = A[cur_A]
s = add_A + add_K + carry
carry = s // 10
dig = s % 10
result.append(dig)
if A_n == K_n and carry == 1:
result.append(carry)
elif border == A_n:
for i in range(border, K_n):
cur_K = K_n - 1 - i
add_K = K_array[cur_K]
s = add_K + carry
carry = s // 10
dig = s % 10
result.append(dig)
if i == K_n - 1 and carry == 1:
result.append(carry)
elif border == K_n:
for i in range(border, A_n):
cur_A = A_n - 1 - i
add_A = A[cur_A]
s = add_A + carry
carry = s // 10
dig = s % 10
result.append(dig)
if i == A_n - 1 and carry == 1:
result.append(carry)
return result[::-1]
|
[
"[email protected]"
] | |
a42658845c9f20032a391940e548d739fa593468
|
c8453f83242cd525a98606f665d9f5d9e84c6335
|
/lib/googlecloudsdk/third_party/apis/bigquery/v2/bigquery_v2_messages.py
|
c9af6411f99770bce94fba3d09d11478fa6e7675
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
paulfoley/GCP-Cloud_SDK
|
5188a04d8d80a2709fa3dba799802d57c7eb66a1
|
bec7106686e99257cb91a50f2c1b1a374a4fc66f
|
refs/heads/master
| 2021-06-02T09:49:48.309328 | 2017-07-02T18:26:47 | 2017-07-02T18:26:47 | 96,041,222 | 1 | 1 |
NOASSERTION
| 2020-07-26T22:40:49 | 2017-07-02T18:19:52 |
Python
|
UTF-8
|
Python
| false | false | 98,916 |
py
|
"""Generated message classes for bigquery version v2.
A data platform for customers to create, manage, share and query data.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'bigquery'
class BigqueryDatasetsDeleteRequest(_messages.Message):
"""A BigqueryDatasetsDeleteRequest object.
Fields:
datasetId: Dataset ID of dataset being deleted
deleteContents: If True, delete all the tables in the dataset. If False
and the dataset contains tables, the request will fail. Default is False
projectId: Project ID of the dataset being deleted
"""
datasetId = _messages.StringField(1, required=True)
deleteContents = _messages.BooleanField(2)
projectId = _messages.StringField(3, required=True)
class BigqueryDatasetsDeleteResponse(_messages.Message):
"""An empty BigqueryDatasetsDelete response."""
class BigqueryDatasetsGetRequest(_messages.Message):
"""A BigqueryDatasetsGetRequest object.
Fields:
datasetId: Dataset ID of the requested dataset
projectId: Project ID of the requested dataset
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class BigqueryDatasetsInsertRequest(_messages.Message):
"""A BigqueryDatasetsInsertRequest object.
Fields:
dataset: A Dataset resource to be passed as the request body.
projectId: Project ID of the new dataset
"""
dataset = _messages.MessageField('Dataset', 1)
projectId = _messages.StringField(2, required=True)
class BigqueryDatasetsListRequest(_messages.Message):
"""A BigqueryDatasetsListRequest object.
Fields:
all: Whether to list all datasets, including hidden ones
filter: An expression for filtering the results of the request by label.
The syntax is "labels.<name>[:<value>]". Multiple filters can be ANDed
together by connecting with a space. Example:
"labels.department:receiving labels.active". See Filtering datasets
using labels for details.
maxResults: The maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: Project ID of the datasets to be listed
"""
all = _messages.BooleanField(1)
filter = _messages.StringField(2)
maxResults = _messages.IntegerField(3, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(4)
projectId = _messages.StringField(5, required=True)
class BigqueryDatasetsPatchRequest(_messages.Message):
"""A BigqueryDatasetsPatchRequest object.
Fields:
dataset: A Dataset resource to be passed as the request body.
datasetId: Dataset ID of the dataset being updated
projectId: Project ID of the dataset being updated
"""
dataset = _messages.MessageField('Dataset', 1)
datasetId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
class BigqueryDatasetsUpdateRequest(_messages.Message):
"""A BigqueryDatasetsUpdateRequest object.
Fields:
dataset: A Dataset resource to be passed as the request body.
datasetId: Dataset ID of the dataset being updated
projectId: Project ID of the dataset being updated
"""
dataset = _messages.MessageField('Dataset', 1)
datasetId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
class BigqueryJobsCancelRequest(_messages.Message):
"""A BigqueryJobsCancelRequest object.
Fields:
jobId: [Required] Job ID of the job to cancel
projectId: [Required] Project ID of the job to cancel
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class BigqueryJobsGetQueryResultsRequest(_messages.Message):
"""A BigqueryJobsGetQueryResultsRequest object.
Fields:
jobId: [Required] Job ID of the query job
maxResults: Maximum number of results to read
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: [Required] Project ID of the query job
startIndex: Zero-based index of the starting row
timeoutMs: How long to wait for the query to complete, in milliseconds,
before returning. Default is 10 seconds. If the timeout passes before
the job completes, the 'jobComplete' field in the response will be false
"""
jobId = _messages.StringField(1, required=True)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
startIndex = _messages.IntegerField(5, variant=_messages.Variant.UINT64)
timeoutMs = _messages.IntegerField(6, variant=_messages.Variant.UINT32)
class BigqueryJobsGetRequest(_messages.Message):
"""A BigqueryJobsGetRequest object.
Fields:
jobId: [Required] Job ID of the requested job
projectId: [Required] Project ID of the requested job
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class BigqueryJobsInsertRequest(_messages.Message):
"""A BigqueryJobsInsertRequest object.
Fields:
job: A Job resource to be passed as the request body.
projectId: Project ID of the project that will be billed for the job
"""
job = _messages.MessageField('Job', 1)
projectId = _messages.StringField(2, required=True)
class BigqueryJobsListRequest(_messages.Message):
"""A BigqueryJobsListRequest object.
Enums:
ProjectionValueValuesEnum: Restrict information returned to a set of
selected fields
StateFilterValueValuesEnum: Filter for job state
Fields:
allUsers: Whether to display jobs owned by all users in the project.
Default false
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: Project ID of the jobs to list
projection: Restrict information returned to a set of selected fields
stateFilter: Filter for job state
"""
class ProjectionValueValuesEnum(_messages.Enum):
"""Restrict information returned to a set of selected fields
Values:
full: Includes all job data
minimal: Does not include the job configuration
"""
full = 0
minimal = 1
class StateFilterValueValuesEnum(_messages.Enum):
"""Filter for job state
Values:
done: Finished jobs
pending: Pending jobs
running: Running jobs
"""
done = 0
pending = 1
running = 2
allUsers = _messages.BooleanField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
projection = _messages.EnumField('ProjectionValueValuesEnum', 5)
stateFilter = _messages.EnumField('StateFilterValueValuesEnum', 6, repeated=True)
class BigqueryJobsQueryRequest(_messages.Message):
"""A BigqueryJobsQueryRequest object.
Fields:
projectId: Project ID of the project billed for the query
queryRequest: A QueryRequest resource to be passed as the request body.
"""
projectId = _messages.StringField(1, required=True)
queryRequest = _messages.MessageField('QueryRequest', 2)
class BigqueryProjectsListRequest(_messages.Message):
"""A BigqueryProjectsListRequest object.
Fields:
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
"""
maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(2)
class BigqueryTabledataInsertAllRequest(_messages.Message):
"""A BigqueryTabledataInsertAllRequest object.
Fields:
datasetId: Dataset ID of the destination table.
projectId: Project ID of the destination table.
tableDataInsertAllRequest: A TableDataInsertAllRequest resource to be
passed as the request body.
tableId: Table ID of the destination table.
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
tableDataInsertAllRequest = _messages.MessageField('TableDataInsertAllRequest', 3)
tableId = _messages.StringField(4, required=True)
class BigqueryTabledataListRequest(_messages.Message):
"""A BigqueryTabledataListRequest object.
Fields:
datasetId: Dataset ID of the table to read
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, identifying the result
set
projectId: Project ID of the table to read
startIndex: Zero-based index of the starting row to read
tableId: Table ID of the table to read
"""
datasetId = _messages.StringField(1, required=True)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
startIndex = _messages.IntegerField(5, variant=_messages.Variant.UINT64)
tableId = _messages.StringField(6, required=True)
class BigqueryTablesDeleteRequest(_messages.Message):
"""A BigqueryTablesDeleteRequest object.
Fields:
datasetId: Dataset ID of the table to delete
projectId: Project ID of the table to delete
tableId: Table ID of the table to delete
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
tableId = _messages.StringField(3, required=True)
class BigqueryTablesDeleteResponse(_messages.Message):
"""An empty BigqueryTablesDelete response."""
class BigqueryTablesGetRequest(_messages.Message):
"""A BigqueryTablesGetRequest object.
Fields:
datasetId: Dataset ID of the requested table
projectId: Project ID of the requested table
tableId: Table ID of the requested table
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
tableId = _messages.StringField(3, required=True)
class BigqueryTablesInsertRequest(_messages.Message):
"""A BigqueryTablesInsertRequest object.
Fields:
datasetId: Dataset ID of the new table
projectId: Project ID of the new table
table: A Table resource to be passed as the request body.
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
table = _messages.MessageField('Table', 3)
class BigqueryTablesListRequest(_messages.Message):
"""A BigqueryTablesListRequest object.
Fields:
datasetId: Dataset ID of the tables to list
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: Project ID of the tables to list
"""
datasetId = _messages.StringField(1, required=True)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
class BigqueryTablesPatchRequest(_messages.Message):
"""A BigqueryTablesPatchRequest object.
Fields:
datasetId: Dataset ID of the table to update
projectId: Project ID of the table to update
table: A Table resource to be passed as the request body.
tableId: Table ID of the table to update
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
table = _messages.MessageField('Table', 3)
tableId = _messages.StringField(4, required=True)
class BigqueryTablesUpdateRequest(_messages.Message):
"""A BigqueryTablesUpdateRequest object.
Fields:
datasetId: Dataset ID of the table to update
projectId: Project ID of the table to update
table: A Table resource to be passed as the request body.
tableId: Table ID of the table to update
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
table = _messages.MessageField('Table', 3)
tableId = _messages.StringField(4, required=True)
class BigtableColumn(_messages.Message):
"""A BigtableColumn object.
Fields:
encoding: [Optional] The encoding of the values when the type is not
STRING. Acceptable encoding values are: TEXT - indicates values are
alphanumeric text strings. BINARY - indicates values are encoded using
HBase Bytes.toBytes family of functions. 'encoding' can also be set at
the column family level. However, the setting at this level takes
precedence if 'encoding' is set at both levels.
fieldName: [Optional] If the qualifier is not a valid BigQuery field
identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier
must be provided as the column field name and is used as field name in
queries.
onlyReadLatest: [Optional] If this is set, only the latest version of
value in this column are exposed. 'onlyReadLatest' can also be set at
the column family level. However, the setting at this level takes
precedence if 'onlyReadLatest' is set at both levels.
qualifierEncoded: [Required] Qualifier of the column. Columns in the
parent column family that has this exact qualifier are exposed as .
field. If the qualifier is valid UTF-8 string, it can be specified in
the qualifier_string field. Otherwise, a base-64 encoded value must be
set to qualifier_encoded. The column field name is the same as the
column qualifier. However, if the qualifier is not a valid BigQuery
field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid
identifier must be provided as field_name.
qualifierString: A string attribute.
type: [Optional] The type to convert the value in cells of this column.
The values are expected to be encoded using HBase Bytes.toBytes function
when using the BINARY encoding value. Following BigQuery types are
allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Default
type is BYTES. 'type' can also be set at the column family level.
However, the setting at this level takes precedence if 'type' is set at
both levels.
"""
encoding = _messages.StringField(1)
fieldName = _messages.StringField(2)
onlyReadLatest = _messages.BooleanField(3)
qualifierEncoded = _messages.BytesField(4)
qualifierString = _messages.StringField(5)
type = _messages.StringField(6)
class BigtableColumnFamily(_messages.Message):
"""A BigtableColumnFamily object.
Fields:
columns: [Optional] Lists of columns that should be exposed as individual
fields as opposed to a list of (column name, value) pairs. All columns
whose qualifier matches a qualifier in this list can be accessed as ..
Other columns can be accessed as a list through .Column field.
encoding: [Optional] The encoding of the values when the type is not
STRING. Acceptable encoding values are: TEXT - indicates values are
alphanumeric text strings. BINARY - indicates values are encoded using
HBase Bytes.toBytes family of functions. This can be overridden for a
specific column by listing that column in 'columns' and specifying an
encoding for it.
familyId: Identifier of the column family.
onlyReadLatest: [Optional] If this is set only the latest version of value
are exposed for all columns in this column family. This can be
overridden for a specific column by listing that column in 'columns' and
specifying a different setting for that column.
type: [Optional] The type to convert the value in cells of this column
family. The values are expected to be encoded using HBase Bytes.toBytes
function when using the BINARY encoding value. Following BigQuery types
are allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN
Default type is BYTES. This can be overridden for a specific column by
listing that column in 'columns' and specifying a type for it.
"""
columns = _messages.MessageField('BigtableColumn', 1, repeated=True)
encoding = _messages.StringField(2)
familyId = _messages.StringField(3)
onlyReadLatest = _messages.BooleanField(4)
type = _messages.StringField(5)
class BigtableOptions(_messages.Message):
"""A BigtableOptions object.
Fields:
columnFamilies: [Optional] List of column families to expose in the table
schema along with their types. This list restricts the column families
that can be referenced in queries and specifies their value types. You
can use this list to do type conversions - see the 'type' field for more
details. If you leave this list empty, all column families are present
in the table schema and their values are read as BYTES. During a query
only the column families referenced in that query are read from
Bigtable.
ignoreUnspecifiedColumnFamilies: [Optional] If field is true, then the
column families that are not specified in columnFamilies list are not
exposed in the table schema. Otherwise, they are read with BYTES type
values. The default value is false.
readRowkeyAsString: [Optional] If field is true, then the rowkey column
families will be read and converted to string. Otherwise they are read
with BYTES type values and users need to manually cast them with CAST if
necessary. The default value is false.
"""
columnFamilies = _messages.MessageField('BigtableColumnFamily', 1, repeated=True)
ignoreUnspecifiedColumnFamilies = _messages.BooleanField(2)
readRowkeyAsString = _messages.BooleanField(3)
class CsvOptions(_messages.Message):
"""A CsvOptions object.
Fields:
allowJaggedRows: [Optional] Indicates if BigQuery should accept rows that
are missing trailing optional columns. If true, BigQuery treats missing
trailing columns as null values. If false, records with missing trailing
columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. The default
value is false.
allowQuotedNewlines: [Optional] Indicates if BigQuery should allow quoted
data sections that contain newline characters in a CSV file. The default
value is false.
encoding: [Optional] The character encoding of the data. The supported
values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery
decodes the data after the raw, binary data has been split using the
values of the quote and fieldDelimiter properties.
fieldDelimiter: [Optional] The separator for fields in a CSV file.
BigQuery converts the string to ISO-8859-1 encoding, and then uses the
first byte of the encoded string to split the data in its raw, binary
state. BigQuery also supports the escape sequence "\t" to specify a tab
separator. The default value is a comma (',').
quote: [Optional] The value that is used to quote data sections in a CSV
file. BigQuery converts the string to ISO-8859-1 encoding, and then uses
the first byte of the encoded string to split the data in its raw,
binary state. The default value is a double-quote ('"'). If your data
does not contain quoted sections, set the property value to an empty
string. If your data contains quoted newline characters, you must also
set the allowQuotedNewlines property to true.
skipLeadingRows: [Optional] The number of rows at the top of a CSV file
that BigQuery will skip when reading the data. The default value is 0.
This property is useful if you have header rows in the file that should
be skipped.
"""
allowJaggedRows = _messages.BooleanField(1)
allowQuotedNewlines = _messages.BooleanField(2)
encoding = _messages.StringField(3)
fieldDelimiter = _messages.StringField(4)
quote = _messages.StringField(5, default=u'"')
skipLeadingRows = _messages.IntegerField(6)
class Dataset(_messages.Message):
"""A Dataset object.
Messages:
AccessValueListEntry: A AccessValueListEntry object.
LabelsValue: [Experimental] The labels associated with this dataset. You
can use these to organize and group your datasets. You can set this
property when inserting or updating a dataset. See Labeling Datasets for
more information.
Fields:
access: [Optional] An array of objects that define dataset access for one
or more entities. You can set this property when inserting or updating a
dataset in order to control who is allowed to access the data. If
unspecified at dataset creation time, BigQuery adds default dataset
access for the following entities: access.specialGroup: projectReaders;
access.role: READER; access.specialGroup: projectWriters; access.role:
WRITER; access.specialGroup: projectOwners; access.role: OWNER;
access.userByEmail: [dataset creator email]; access.role: OWNER;
creationTime: [Output-only] The time when this dataset was created, in
milliseconds since the epoch.
datasetReference: [Required] A reference that identifies the dataset.
defaultTableExpirationMs: [Optional] The default lifetime of all tables in
the dataset, in milliseconds. The minimum value is 3600000 milliseconds
(one hour). Once this property is set, all newly-created tables in the
dataset will have an expirationTime property set to the creation time
plus the value in this property, and changing the value will only affect
new tables, not existing ones. When the expirationTime for a given table
is reached, that table will be deleted automatically. If a table's
expirationTime is modified or removed before the table expires, or if
you provide an explicit expirationTime when creating a table, that value
takes precedence over the default expiration time indicated by this
property.
description: [Optional] A user-friendly description of the dataset.
etag: [Output-only] A hash of the resource.
friendlyName: [Optional] A descriptive name for the dataset.
id: [Output-only] The fully-qualified unique name of the dataset in the
format projectId:datasetId. The dataset name without the project name is
given in the datasetId field. When creating a new dataset, leave this
field blank, and instead specify the datasetId field.
kind: [Output-only] The resource type.
labels: [Experimental] The labels associated with this dataset. You can
use these to organize and group your datasets. You can set this property
when inserting or updating a dataset. See Labeling Datasets for more
information.
lastModifiedTime: [Output-only] The date when this dataset or any of its
tables was last modified, in milliseconds since the epoch.
location: [Experimental] The geographic location where the dataset should
reside. Possible values include EU and US. The default value is US.
selfLink: [Output-only] A URL that can be used to access the resource
again. You can use this URL in Get or Update requests to the resource.
"""
class AccessValueListEntry(_messages.Message):
"""A AccessValueListEntry object.
Fields:
domain: [Pick one] A domain to grant access to. Any users signed in with
the domain specified will be granted the specified access. Example:
"example.com".
groupByEmail: [Pick one] An email address of a Google Group to grant
access to.
role: [Required] Describes the rights granted to the user specified by
the other member of the access object. The following string values are
supported: READER, WRITER, OWNER.
specialGroup: [Pick one] A special group to grant access to. Possible
values include: projectOwners: Owners of the enclosing project.
projectReaders: Readers of the enclosing project. projectWriters:
Writers of the enclosing project. allAuthenticatedUsers: All
authenticated BigQuery users.
userByEmail: [Pick one] An email address of a user to grant access to.
For example: [email protected].
view: [Pick one] A view from a different dataset to grant access to.
Queries executed against that view will have read access to tables in
this dataset. The role field is not required when this field is set.
If that view is updated by any user, access to the view needs to be
granted again via an update operation.
"""
domain = _messages.StringField(1)
groupByEmail = _messages.StringField(2)
role = _messages.StringField(3)
specialGroup = _messages.StringField(4)
userByEmail = _messages.StringField(5)
view = _messages.MessageField('TableReference', 6)
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""[Experimental] The labels associated with this dataset. You can use
these to organize and group your datasets. You can set this property when
inserting or updating a dataset. See Labeling Datasets for more
information.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
access = _messages.MessageField('AccessValueListEntry', 1, repeated=True)
creationTime = _messages.IntegerField(2)
datasetReference = _messages.MessageField('DatasetReference', 3)
defaultTableExpirationMs = _messages.IntegerField(4)
description = _messages.StringField(5)
etag = _messages.StringField(6)
friendlyName = _messages.StringField(7)
id = _messages.StringField(8)
kind = _messages.StringField(9, default=u'bigquery#dataset')
labels = _messages.MessageField('LabelsValue', 10)
lastModifiedTime = _messages.IntegerField(11)
location = _messages.StringField(12)
selfLink = _messages.StringField(13)
class DatasetList(_messages.Message):
"""A DatasetList object.
Messages:
DatasetsValueListEntry: A DatasetsValueListEntry object.
Fields:
datasets: An array of the dataset resources in the project. Each resource
contains basic information. For full information about a particular
dataset resource, use the Datasets: get method. This property is omitted
when there are no datasets in the project.
etag: A hash value of the results page. You can use this property to
determine if the page has changed since the last request.
kind: The list type. This property always returns the value
"bigquery#datasetList".
nextPageToken: A token that can be used to request the next results page.
This property is omitted on the final results page.
"""
class DatasetsValueListEntry(_messages.Message):
"""A DatasetsValueListEntry object.
Messages:
LabelsValue: [Experimental] The labels associated with this dataset. You
can use these to organize and group your datasets.
Fields:
datasetReference: The dataset reference. Use this property to access
specific parts of the dataset's ID, such as project ID or dataset ID.
friendlyName: A descriptive name for the dataset, if one exists.
id: The fully-qualified, unique, opaque ID of the dataset.
kind: The resource type. This property always returns the value
"bigquery#dataset".
labels: [Experimental] The labels associated with this dataset. You can
use these to organize and group your datasets.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""[Experimental] The labels associated with this dataset. You can use
these to organize and group your datasets.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
datasetReference = _messages.MessageField('DatasetReference', 1)
friendlyName = _messages.StringField(2)
id = _messages.StringField(3)
kind = _messages.StringField(4, default=u'bigquery#dataset')
labels = _messages.MessageField('LabelsValue', 5)
datasets = _messages.MessageField('DatasetsValueListEntry', 1, repeated=True)
etag = _messages.StringField(2)
kind = _messages.StringField(3, default=u'bigquery#datasetList')
nextPageToken = _messages.StringField(4)
class DatasetReference(_messages.Message):
"""A DatasetReference object.
Fields:
datasetId: [Required] A unique ID for this dataset, without the project
name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or
underscores (_). The maximum length is 1,024 characters.
projectId: [Optional] The ID of the project containing this dataset.
"""
datasetId = _messages.StringField(1)
projectId = _messages.StringField(2)
class ErrorProto(_messages.Message):
"""A ErrorProto object.
Fields:
debugInfo: Debugging information. This property is internal to Google and
should not be used.
location: Specifies where the error occurred, if present.
message: A human-readable description of the error.
reason: A short error code that summarizes the error.
"""
debugInfo = _messages.StringField(1)
location = _messages.StringField(2)
message = _messages.StringField(3)
reason = _messages.StringField(4)
class ExplainQueryStage(_messages.Message):
"""A ExplainQueryStage object.
Fields:
computeRatioAvg: Relative amount of time the average shard spent on CPU-
bound tasks.
computeRatioMax: Relative amount of time the slowest shard spent on CPU-
bound tasks.
id: Unique ID for stage within plan.
name: Human-readable name for stage.
readRatioAvg: Relative amount of time the average shard spent reading
input.
readRatioMax: Relative amount of time the slowest shard spent reading
input.
recordsRead: Number of records read into the stage.
recordsWritten: Number of records written by the stage.
status: Current status for the stage.
steps: List of operations within the stage in dependency order
(approximately chronological).
waitRatioAvg: Relative amount of time the average shard spent waiting to
be scheduled.
waitRatioMax: Relative amount of time the slowest shard spent waiting to
be scheduled.
writeRatioAvg: Relative amount of time the average shard spent on writing
output.
writeRatioMax: Relative amount of time the slowest shard spent on writing
output.
"""
computeRatioAvg = _messages.FloatField(1)
computeRatioMax = _messages.FloatField(2)
id = _messages.IntegerField(3)
name = _messages.StringField(4)
readRatioAvg = _messages.FloatField(5)
readRatioMax = _messages.FloatField(6)
recordsRead = _messages.IntegerField(7)
recordsWritten = _messages.IntegerField(8)
status = _messages.StringField(9)
steps = _messages.MessageField('ExplainQueryStep', 10, repeated=True)
waitRatioAvg = _messages.FloatField(11)
waitRatioMax = _messages.FloatField(12)
writeRatioAvg = _messages.FloatField(13)
writeRatioMax = _messages.FloatField(14)
class ExplainQueryStep(_messages.Message):
"""A ExplainQueryStep object.
Fields:
kind: Machine-readable operation type.
substeps: Human-readable stage descriptions.
"""
kind = _messages.StringField(1)
substeps = _messages.StringField(2, repeated=True)
class ExternalDataConfiguration(_messages.Message):
"""A ExternalDataConfiguration object.
Fields:
autodetect: [Experimental] Try to detect schema and format options
automatically. Any option specified explicitly will be honored.
bigtableOptions: [Optional] Additional options if sourceFormat is set to
BIGTABLE.
compression: [Optional] The compression type of the data source. Possible
values include GZIP and NONE. The default value is NONE. This setting is
ignored for Google Cloud Bigtable, Google Cloud Datastore backups and
Avro formats.
csvOptions: Additional properties to set if sourceFormat is set to CSV.
googleSheetsOptions: [Optional] Additional options if sourceFormat is set
to GOOGLE_SHEETS.
ignoreUnknownValues: [Optional] Indicates if BigQuery should allow extra
values that are not represented in the table schema. If true, the extra
values are ignored. If false, records with extra columns are treated as
bad records, and if there are too many bad records, an invalid error is
returned in the job result. The default value is false. The sourceFormat
property determines what BigQuery treats as an extra value: CSV:
Trailing columns JSON: Named values that don't match any column names
Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore
backups: This setting is ignored. Avro: This setting is ignored.
maxBadRecords: [Optional] The maximum number of bad records that BigQuery
can ignore when reading data. If the number of bad records exceeds this
value, an invalid error is returned in the job result. The default value
is 0, which requires that all records are valid. This setting is ignored
for Google Cloud Bigtable, Google Cloud Datastore backups and Avro
formats.
schema: [Optional] The schema for the data. Schema is required for CSV and
JSON formats. Schema is disallowed for Google Cloud Bigtable, Cloud
Datastore backups, and Avro formats.
sourceFormat: [Required] The data format. For CSV files, specify "CSV".
For Google sheets, specify "GOOGLE_SHEETS". For newline-delimited JSON,
specify "NEWLINE_DELIMITED_JSON". For Avro files, specify "AVRO". For
Google Cloud Datastore backups, specify "DATASTORE_BACKUP".
[Experimental] For Google Cloud Bigtable, specify "BIGTABLE". Please
note that reading from Google Cloud Bigtable is experimental and has to
be enabled for your project. Please contact Google Cloud Support to
enable this for your project.
sourceUris: [Required] The fully-qualified URIs that point to your data in
Google Cloud. For Google Cloud Storage URIs: Each URI can contain one
'*' wildcard character and it must come after the 'bucket' name. Size
limits related to load jobs apply to external data sources. For Google
Cloud Bigtable URIs: Exactly one URI can be specified and it has be a
fully specified and valid HTTPS URL for a Google Cloud Bigtable table.
For Google Cloud Datastore backups, exactly one URI can be specified,
and it must end with '.backup_info'. Also, the '*' wildcard character is
not allowed.
"""
autodetect = _messages.BooleanField(1)
bigtableOptions = _messages.MessageField('BigtableOptions', 2)
compression = _messages.StringField(3)
csvOptions = _messages.MessageField('CsvOptions', 4)
googleSheetsOptions = _messages.MessageField('GoogleSheetsOptions', 5)
ignoreUnknownValues = _messages.BooleanField(6)
maxBadRecords = _messages.IntegerField(7, variant=_messages.Variant.INT32)
schema = _messages.MessageField('TableSchema', 8)
sourceFormat = _messages.StringField(9)
sourceUris = _messages.StringField(10, repeated=True)
class GetQueryResultsResponse(_messages.Message):
"""A GetQueryResultsResponse object.
Fields:
cacheHit: Whether the query result was fetched from the query cache.
errors: [Output-only] All errors and warnings encountered during the
running of the job. Errors here do not necessarily mean that the job has
completed or was unsuccessful.
etag: A hash of this response.
jobComplete: Whether the query has completed or not. If rows or totalRows
are present, this will always be true. If this is false, totalRows will
not be available.
jobReference: Reference to the BigQuery Job that was created to run the
query. This field will be present even if the original request timed
out, in which case GetQueryResults can be used to read the results once
the query has completed. Since this API only returns the first page of
results, subsequent pages can be fetched via the same mechanism
(GetQueryResults).
kind: The resource type of the response.
numDmlAffectedRows: [Output-only, Experimental] The number of rows
affected by a DML statement. Present only for DML statements INSERT,
UPDATE or DELETE.
pageToken: A token used for paging results.
rows: An object with as many results as can be contained within the
maximum permitted reply size. To get any additional rows, you can call
GetQueryResults and specify the jobReference returned above. Present
only when the query completes successfully.
schema: The schema of the results. Present only when the query completes
successfully.
totalBytesProcessed: The total number of bytes processed for this query.
totalRows: The total number of rows in the complete query result set,
which can be more than the number of rows in this single page of
results. Present only when the query completes successfully.
"""
cacheHit = _messages.BooleanField(1)
errors = _messages.MessageField('ErrorProto', 2, repeated=True)
etag = _messages.StringField(3)
jobComplete = _messages.BooleanField(4)
jobReference = _messages.MessageField('JobReference', 5)
kind = _messages.StringField(6, default=u'bigquery#getQueryResultsResponse')
numDmlAffectedRows = _messages.IntegerField(7)
pageToken = _messages.StringField(8)
rows = _messages.MessageField('TableRow', 9, repeated=True)
schema = _messages.MessageField('TableSchema', 10)
totalBytesProcessed = _messages.IntegerField(11)
totalRows = _messages.IntegerField(12, variant=_messages.Variant.UINT64)
class GoogleSheetsOptions(_messages.Message):
"""A GoogleSheetsOptions object.
Fields:
skipLeadingRows: [Optional] The number of rows at the top of a sheet that
BigQuery will skip when reading the data. The default value is 0. This
property is useful if you have header rows that should be skipped. When
autodetect is on, behavior is the following: * skipLeadingRows
unspecified - Autodetect tries to detect headers in the first row. If
they are not detected, the row is read as data. Otherwise data is read
starting from the second row. * skipLeadingRows is 0 - Instructs
autodetect that there are no headers and data should be read starting
from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1
rows and tries to detect headers in row N. If headers are not detected,
row N is just skipped. Otherwise row N is used to extract column names
for the detected schema.
"""
skipLeadingRows = _messages.IntegerField(1)
class Job(_messages.Message):
"""A Job object.
Fields:
configuration: [Required] Describes the job configuration.
etag: [Output-only] A hash of this resource.
id: [Output-only] Opaque ID field of the job
jobReference: [Optional] Reference describing the unique-per-user name of
the job.
kind: [Output-only] The type of the resource.
selfLink: [Output-only] A URL that can be used to access this resource
again.
statistics: [Output-only] Information about the job, including starting
time and ending time of the job.
status: [Output-only] The status of this job. Examine this value when
polling an asynchronous job to see if the job is complete.
user_email: [Output-only] Email address of the user who ran the job.
"""
configuration = _messages.MessageField('JobConfiguration', 1)
etag = _messages.StringField(2)
id = _messages.StringField(3)
jobReference = _messages.MessageField('JobReference', 4)
kind = _messages.StringField(5, default=u'bigquery#job')
selfLink = _messages.StringField(6)
statistics = _messages.MessageField('JobStatistics', 7)
status = _messages.MessageField('JobStatus', 8)
user_email = _messages.StringField(9)
class JobCancelResponse(_messages.Message):
"""A JobCancelResponse object.
Fields:
job: The final state of the job.
kind: The resource type of the response.
"""
job = _messages.MessageField('Job', 1)
kind = _messages.StringField(2, default=u'bigquery#jobCancelResponse')
class JobConfiguration(_messages.Message):
"""A JobConfiguration object.
Messages:
LabelsValue: [Experimental] The labels associated with this job. You can
use these to organize and group your jobs. Label keys and values can be
no longer than 63 characters, can only contain lowercase letters,
numeric characters, underscores and dashes. International characters are
allowed. Label values are optional. Label keys must start with a letter
and each label in the list must have a different key.
Fields:
copy: [Pick one] Copies a table.
dryRun: [Optional] If set, don't actually run this job. A valid query will
return a mostly empty response with some processing statistics, while an
invalid query will return the same error it would if it wasn't a dry
run. Behavior of non-query jobs is undefined.
extract: [Pick one] Configures an extract job.
labels: [Experimental] The labels associated with this job. You can use
these to organize and group your jobs. Label keys and values can be no
longer than 63 characters, can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are
allowed. Label values are optional. Label keys must start with a letter
and each label in the list must have a different key.
load: [Pick one] Configures a load job.
query: [Pick one] Configures a query job.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""[Experimental] The labels associated with this job. You can use these
to organize and group your jobs. Label keys and values can be no longer
than 63 characters, can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are allowed.
Label values are optional. Label keys must start with a letter and each
label in the list must have a different key.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
copy = _messages.MessageField('JobConfigurationTableCopy', 1)
dryRun = _messages.BooleanField(2)
extract = _messages.MessageField('JobConfigurationExtract', 3)
labels = _messages.MessageField('LabelsValue', 4)
load = _messages.MessageField('JobConfigurationLoad', 5)
query = _messages.MessageField('JobConfigurationQuery', 6)
class JobConfigurationExtract(_messages.Message):
"""A JobConfigurationExtract object.
Fields:
compression: [Optional] The compression type to use for exported files.
Possible values include GZIP and NONE. The default value is NONE.
destinationFormat: [Optional] The exported file format. Possible values
include CSV, NEWLINE_DELIMITED_JSON and AVRO. The default value is CSV.
Tables with nested or repeated fields cannot be exported as CSV.
destinationUri: [Pick one] DEPRECATED: Use destinationUris instead,
passing only one URI as necessary. The fully-qualified Google Cloud
Storage URI where the extracted table should be written.
destinationUris: [Pick one] A list of fully-qualified Google Cloud Storage
URIs where the extracted table should be written.
fieldDelimiter: [Optional] Delimiter to use between fields in the exported
data. Default is ','
printHeader: [Optional] Whether to print out a header row in the results.
Default is true.
sourceTable: [Required] A reference to the table being exported.
"""
compression = _messages.StringField(1)
destinationFormat = _messages.StringField(2)
destinationUri = _messages.StringField(3)
destinationUris = _messages.StringField(4, repeated=True)
fieldDelimiter = _messages.StringField(5)
printHeader = _messages.BooleanField(6, default=True)
sourceTable = _messages.MessageField('TableReference', 7)
class JobConfigurationLoad(_messages.Message):
"""A JobConfigurationLoad object.
Fields:
allowJaggedRows: [Optional] Accept rows that are missing trailing optional
columns. The missing values are treated as nulls. If false, records with
missing trailing columns are treated as bad records, and if there are
too many bad records, an invalid error is returned in the job result.
The default value is false. Only applicable to CSV, ignored for other
formats.
allowQuotedNewlines: Indicates if BigQuery should allow quoted data
sections that contain newline characters in a CSV file. The default
value is false.
autodetect: [Experimental] Indicates if we should automatically infer the
options and schema for CSV and JSON sources.
createDisposition: [Optional] Specifies whether the job is allowed to
create new tables. The following values are supported: CREATE_IF_NEEDED:
If the table does not exist, BigQuery creates the table. CREATE_NEVER:
The table must already exist. If it does not, a 'notFound' error is
returned in the job result. The default value is CREATE_IF_NEEDED.
Creation, truncation and append actions occur as one atomic update upon
job completion.
destinationTable: [Required] The destination table to load the data into.
encoding: [Optional] The character encoding of the data. The supported
values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery
decodes the data after the raw, binary data has been split using the
values of the quote and fieldDelimiter properties.
fieldDelimiter: [Optional] The separator for fields in a CSV file. The
separator can be any ISO-8859-1 single-byte character. To use a
character in the range 128-255, you must encode the character as UTF8.
BigQuery converts the string to ISO-8859-1 encoding, and then uses the
first byte of the encoded string to split the data in its raw, binary
state. BigQuery also supports the escape sequence "\t" to specify a tab
separator. The default value is a comma (',').
ignoreUnknownValues: [Optional] Indicates if BigQuery should allow extra
values that are not represented in the table schema. If true, the extra
values are ignored. If false, records with extra columns are treated as
bad records, and if there are too many bad records, an invalid error is
returned in the job result. The default value is false. The sourceFormat
property determines what BigQuery treats as an extra value: CSV:
Trailing columns JSON: Named values that don't match any column names
maxBadRecords: [Optional] The maximum number of bad records that BigQuery
can ignore when running the job. If the number of bad records exceeds
this value, an invalid error is returned in the job result. The default
value is 0, which requires that all records are valid.
nullMarker: [Optional] Specifies a string that represents a null value in
a CSV file. For example, if you specify "\N", BigQuery interprets "\N"
as a null value when loading a CSV file. The default value is the empty
string. If you set this property to a custom value, BigQuery still
interprets the empty string as a null value for all data types except
for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets
the empty string as an empty value.
projectionFields: [Experimental] If sourceFormat is set to
"DATASTORE_BACKUP", indicates which entity properties to load into
BigQuery from a Cloud Datastore backup. Property names are case
sensitive and must be top-level properties. If no properties are
specified, BigQuery loads all properties. If any named property isn't
found in the Cloud Datastore backup, an invalid error is returned in the
job result.
quote: [Optional] The value that is used to quote data sections in a CSV
file. BigQuery converts the string to ISO-8859-1 encoding, and then uses
the first byte of the encoded string to split the data in its raw,
binary state. The default value is a double-quote ('"'). If your data
does not contain quoted sections, set the property value to an empty
string. If your data contains quoted newline characters, you must also
set the allowQuotedNewlines property to true.
schema: [Optional] The schema for the destination table. The schema can be
omitted if the destination table already exists, or if you're loading
data from Google Cloud Datastore.
schemaInline: [Deprecated] The inline schema. For CSV schemas, specify as
"Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER,
baz:FLOAT".
schemaInlineFormat: [Deprecated] The format of the schemaInline property.
schemaUpdateOptions: [Experimental] Allows the schema of the desitination
table to be updated as a side effect of the load job. Schema update
options are supported in two cases: when writeDisposition is
WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the
destination table is a partition of a table, specified by partition
decorators. For normal tables, WRITE_TRUNCATE will always overwrite the
schema. One or more of the following values are specified:
ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema.
ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original
schema to nullable.
skipLeadingRows: [Optional] The number of rows at the top of a CSV file
that BigQuery will skip when loading the data. The default value is 0.
This property is useful if you have header rows in the file that should
be skipped.
sourceFormat: [Optional] The format of the data files. For CSV files,
specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For
newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro,
specify "AVRO". The default value is CSV.
sourceUris: [Required] The fully-qualified URIs that point to your data in
Google Cloud Storage. Each URI can contain one '*' wildcard character
and it must come after the 'bucket' name.
writeDisposition: [Optional] Specifies the action that occurs if the
destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
table data. WRITE_APPEND: If the table already exists, BigQuery appends
the data to the table. WRITE_EMPTY: If the table already exists and
contains data, a 'duplicate' error is returned in the job result. The
default value is WRITE_APPEND. Each action is atomic and only occurs if
BigQuery is able to complete the job successfully. Creation, truncation
and append actions occur as one atomic update upon job completion.
"""
allowJaggedRows = _messages.BooleanField(1)
allowQuotedNewlines = _messages.BooleanField(2)
autodetect = _messages.BooleanField(3)
createDisposition = _messages.StringField(4)
destinationTable = _messages.MessageField('TableReference', 5)
encoding = _messages.StringField(6)
fieldDelimiter = _messages.StringField(7)
ignoreUnknownValues = _messages.BooleanField(8)
maxBadRecords = _messages.IntegerField(9, variant=_messages.Variant.INT32)
nullMarker = _messages.StringField(10)
projectionFields = _messages.StringField(11, repeated=True)
quote = _messages.StringField(12, default=u'"')
schema = _messages.MessageField('TableSchema', 13)
schemaInline = _messages.StringField(14)
schemaInlineFormat = _messages.StringField(15)
schemaUpdateOptions = _messages.StringField(16, repeated=True)
skipLeadingRows = _messages.IntegerField(17, variant=_messages.Variant.INT32)
sourceFormat = _messages.StringField(18)
sourceUris = _messages.StringField(19, repeated=True)
writeDisposition = _messages.StringField(20)
class JobConfigurationQuery(_messages.Message):
"""A JobConfigurationQuery object.
Messages:
TableDefinitionsValue: [Optional] If querying an external data source
outside of BigQuery, describes the data format, location and other
properties of the data source. By defining these properties, the data
source can then be queried as if it were a standard BigQuery table.
Fields:
allowLargeResults: If true, allows the query to produce arbitrarily large
result tables at a slight cost in performance. Requires destinationTable
to be set.
createDisposition: [Optional] Specifies whether the job is allowed to
create new tables. The following values are supported: CREATE_IF_NEEDED:
If the table does not exist, BigQuery creates the table. CREATE_NEVER:
The table must already exist. If it does not, a 'notFound' error is
returned in the job result. The default value is CREATE_IF_NEEDED.
Creation, truncation and append actions occur as one atomic update upon
job completion.
defaultDataset: [Optional] Specifies the default dataset to use for
unqualified table names in the query.
destinationTable: [Optional] Describes the table where the query results
should be stored. If not present, a new table will be created to store
the results.
flattenResults: [Optional] Flattens all nested and repeated fields in the
query results. The default value is true. allowLargeResults must be true
if this is set to false.
maximumBillingTier: [Optional] Limits the billing tier for this job.
Queries that have resource usage beyond this tier will fail (without
incurring a charge). If unspecified, this will be set to your project
default.
maximumBytesBilled: [Optional] Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail (without
incurring a charge). If unspecified, this will be set to your project
default.
parameterMode: [Experimental] Standard SQL only. Set to POSITIONAL to use
positional (?) query parameters or to NAMED to use named (@myparam)
query parameters in this query.
preserveNulls: [Deprecated] This property is deprecated.
priority: [Optional] Specifies a priority for the query. Possible values
include INTERACTIVE and BATCH. The default value is INTERACTIVE.
query: [Required] BigQuery SQL query to execute.
queryParameters: Query parameters for standard SQL queries.
schemaUpdateOptions: [Experimental] Allows the schema of the destination
table to be updated as a side effect of the query job. Schema update
options are supported in two cases: when writeDisposition is
WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the
destination table is a partition of a table, specified by partition
decorators. For normal tables, WRITE_TRUNCATE will always overwrite the
schema. One or more of the following values are specified:
ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema.
ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original
schema to nullable.
tableDefinitions: [Optional] If querying an external data source outside
of BigQuery, describes the data format, location and other properties of
the data source. By defining these properties, the data source can then
be queried as if it were a standard BigQuery table.
useLegacySql: Specifies whether to use BigQuery's legacy SQL dialect for
this query. The default value is true. If set to false, the query will
use BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-
reference/ When useLegacySql is set to false, the values of
allowLargeResults and flattenResults are ignored; query will be run as
if allowLargeResults is true and flattenResults is false.
useQueryCache: [Optional] Whether to look for the result in the query
cache. The query cache is a best-effort cache that will be flushed
whenever tables in the query are modified. Moreover, the query cache is
only available when a query does not have a destination table specified.
The default value is true.
userDefinedFunctionResources: [Experimental] Describes user-defined
function resources used in the query.
writeDisposition: [Optional] Specifies the action that occurs if the
destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
table data. WRITE_APPEND: If the table already exists, BigQuery appends
the data to the table. WRITE_EMPTY: If the table already exists and
contains data, a 'duplicate' error is returned in the job result. The
default value is WRITE_EMPTY. Each action is atomic and only occurs if
BigQuery is able to complete the job successfully. Creation, truncation
and append actions occur as one atomic update upon job completion.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class TableDefinitionsValue(_messages.Message):
"""[Optional] If querying an external data source outside of BigQuery,
describes the data format, location and other properties of the data
source. By defining these properties, the data source can then be queried
as if it were a standard BigQuery table.
Messages:
AdditionalProperty: An additional property for a TableDefinitionsValue
object.
Fields:
additionalProperties: Additional properties of type
TableDefinitionsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a TableDefinitionsValue object.
Fields:
key: Name of the additional property.
value: A ExternalDataConfiguration attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('ExternalDataConfiguration', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
allowLargeResults = _messages.BooleanField(1)
createDisposition = _messages.StringField(2)
defaultDataset = _messages.MessageField('DatasetReference', 3)
destinationTable = _messages.MessageField('TableReference', 4)
flattenResults = _messages.BooleanField(5, default=True)
maximumBillingTier = _messages.IntegerField(6, variant=_messages.Variant.INT32, default=1)
maximumBytesBilled = _messages.IntegerField(7)
parameterMode = _messages.StringField(8)
preserveNulls = _messages.BooleanField(9)
priority = _messages.StringField(10)
query = _messages.StringField(11)
queryParameters = _messages.MessageField('QueryParameter', 12, repeated=True)
schemaUpdateOptions = _messages.StringField(13, repeated=True)
tableDefinitions = _messages.MessageField('TableDefinitionsValue', 14)
useLegacySql = _messages.BooleanField(15)
useQueryCache = _messages.BooleanField(16, default=True)
userDefinedFunctionResources = _messages.MessageField('UserDefinedFunctionResource', 17, repeated=True)
writeDisposition = _messages.StringField(18)
class JobConfigurationTableCopy(_messages.Message):
"""A JobConfigurationTableCopy object.
Fields:
createDisposition: [Optional] Specifies whether the job is allowed to
create new tables. The following values are supported: CREATE_IF_NEEDED:
If the table does not exist, BigQuery creates the table. CREATE_NEVER:
The table must already exist. If it does not, a 'notFound' error is
returned in the job result. The default value is CREATE_IF_NEEDED.
Creation, truncation and append actions occur as one atomic update upon
job completion.
destinationTable: [Required] The destination table
sourceTable: [Pick one] Source table to copy.
sourceTables: [Pick one] Source tables to copy.
writeDisposition: [Optional] Specifies the action that occurs if the
destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
table data. WRITE_APPEND: If the table already exists, BigQuery appends
the data to the table. WRITE_EMPTY: If the table already exists and
contains data, a 'duplicate' error is returned in the job result. The
default value is WRITE_EMPTY. Each action is atomic and only occurs if
BigQuery is able to complete the job successfully. Creation, truncation
and append actions occur as one atomic update upon job completion.
"""
createDisposition = _messages.StringField(1)
destinationTable = _messages.MessageField('TableReference', 2)
sourceTable = _messages.MessageField('TableReference', 3)
sourceTables = _messages.MessageField('TableReference', 4, repeated=True)
writeDisposition = _messages.StringField(5)
class JobList(_messages.Message):
"""A JobList object.
Messages:
JobsValueListEntry: A JobsValueListEntry object.
Fields:
etag: A hash of this page of results.
jobs: List of jobs that were requested.
kind: The resource type of the response.
nextPageToken: A token to request the next page of results.
"""
class JobsValueListEntry(_messages.Message):
"""A JobsValueListEntry object.
Fields:
configuration: [Full-projection-only] Specifies the job configuration.
errorResult: A result object that will be present only if the job has
failed.
id: Unique opaque ID of the job.
jobReference: Job reference uniquely identifying the job.
kind: The resource type.
state: Running state of the job. When the state is DONE, errorResult can
be checked to determine whether the job succeeded or failed.
statistics: [Output-only] Information about the job, including starting
time and ending time of the job.
status: [Full-projection-only] Describes the state of the job.
user_email: [Full-projection-only] Email address of the user who ran the
job.
"""
configuration = _messages.MessageField('JobConfiguration', 1)
errorResult = _messages.MessageField('ErrorProto', 2)
id = _messages.StringField(3)
jobReference = _messages.MessageField('JobReference', 4)
kind = _messages.StringField(5, default=u'bigquery#job')
state = _messages.StringField(6)
statistics = _messages.MessageField('JobStatistics', 7)
status = _messages.MessageField('JobStatus', 8)
user_email = _messages.StringField(9)
etag = _messages.StringField(1)
jobs = _messages.MessageField('JobsValueListEntry', 2, repeated=True)
kind = _messages.StringField(3, default=u'bigquery#jobList')
nextPageToken = _messages.StringField(4)
class JobReference(_messages.Message):
"""A JobReference object.
Fields:
jobId: [Required] The ID of the job. The ID must contain only letters
(a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum
length is 1,024 characters.
projectId: [Required] The ID of the project containing this job.
"""
jobId = _messages.StringField(1)
projectId = _messages.StringField(2)
class JobStatistics(_messages.Message):
"""A JobStatistics object.
Fields:
creationTime: [Output-only] Creation time of this job, in milliseconds
since the epoch. This field will be present on all jobs.
endTime: [Output-only] End time of this job, in milliseconds since the
epoch. This field will be present whenever a job is in the DONE state.
extract: [Output-only] Statistics for an extract job.
load: [Output-only] Statistics for a load job.
query: [Output-only] Statistics for a query job.
startTime: [Output-only] Start time of this job, in milliseconds since the
epoch. This field will be present when the job transitions from the
PENDING state to either RUNNING or DONE.
totalBytesProcessed: [Output-only] [Deprecated] Use the bytes processed in
the query statistics instead.
"""
creationTime = _messages.IntegerField(1)
endTime = _messages.IntegerField(2)
extract = _messages.MessageField('JobStatistics4', 3)
load = _messages.MessageField('JobStatistics3', 4)
query = _messages.MessageField('JobStatistics2', 5)
startTime = _messages.IntegerField(6)
totalBytesProcessed = _messages.IntegerField(7)
class JobStatistics2(_messages.Message):
"""A JobStatistics2 object.
Fields:
billingTier: [Output-only] Billing tier for the job.
cacheHit: [Output-only] Whether the query result was fetched from the
query cache.
numDmlAffectedRows: [Output-only, Experimental] The number of rows
affected by a DML statement. Present only for DML statements INSERT,
UPDATE or DELETE.
queryPlan: [Output-only, Experimental] Describes execution plan for the
query.
referencedTables: [Output-only, Experimental] Referenced tables for the
job. Queries that reference more than 50 tables will not have a complete
list.
schema: [Output-only, Experimental] The schema of the results. Present
only for successful dry run of non-legacy SQL queries.
statementType: [Output-only, Experimental] The type of query statement, if
valid.
totalBytesBilled: [Output-only] Total bytes billed for the job.
totalBytesProcessed: [Output-only] Total bytes processed for the job.
undeclaredQueryParameters: [Output-only, Experimental] Standard SQL only:
list of undeclared query parameters detected during a dry run
validation.
"""
billingTier = _messages.IntegerField(1, variant=_messages.Variant.INT32)
cacheHit = _messages.BooleanField(2)
numDmlAffectedRows = _messages.IntegerField(3)
queryPlan = _messages.MessageField('ExplainQueryStage', 4, repeated=True)
referencedTables = _messages.MessageField('TableReference', 5, repeated=True)
schema = _messages.MessageField('TableSchema', 6)
statementType = _messages.StringField(7)
totalBytesBilled = _messages.IntegerField(8)
totalBytesProcessed = _messages.IntegerField(9)
undeclaredQueryParameters = _messages.MessageField('QueryParameter', 10, repeated=True)
class JobStatistics3(_messages.Message):
"""A JobStatistics3 object.
Fields:
inputFileBytes: [Output-only] Number of bytes of source data in a load
job.
inputFiles: [Output-only] Number of source files in a load job.
outputBytes: [Output-only] Size of the loaded data in bytes. Note that
while a load job is in the running state, this value may change.
outputRows: [Output-only] Number of rows imported in a load job. Note that
while an import job is in the running state, this value may change.
"""
inputFileBytes = _messages.IntegerField(1)
inputFiles = _messages.IntegerField(2)
outputBytes = _messages.IntegerField(3)
outputRows = _messages.IntegerField(4)
class JobStatistics4(_messages.Message):
"""A JobStatistics4 object.
Fields:
destinationUriFileCounts: [Output-only] Number of files per destination
URI or URI pattern specified in the extract configuration. These values
will be in the same order as the URIs specified in the 'destinationUris'
field.
"""
destinationUriFileCounts = _messages.IntegerField(1, repeated=True)
class JobStatus(_messages.Message):
"""A JobStatus object.
Fields:
errorResult: [Output-only] Final error result of the job. If present,
indicates that the job has completed and was unsuccessful.
errors: [Output-only] All errors encountered during the running of the
job. Errors here do not necessarily mean that the job has completed or
was unsuccessful.
state: [Output-only] Running state of the job.
"""
errorResult = _messages.MessageField('ErrorProto', 1)
errors = _messages.MessageField('ErrorProto', 2, repeated=True)
state = _messages.StringField(3)
@encoding.MapUnrecognizedFields('additionalProperties')
class JsonObject(_messages.Message):
"""Represents a single JSON object.
Messages:
AdditionalProperty: An additional property for a JsonObject object.
Fields:
additionalProperties: Additional properties of type JsonObject
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a JsonObject object.
Fields:
key: Name of the additional property.
value: A JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
JsonValue = extra_types.JsonValue
class ProjectList(_messages.Message):
"""A ProjectList object.
Messages:
ProjectsValueListEntry: A ProjectsValueListEntry object.
Fields:
etag: A hash of the page of results
kind: The type of list.
nextPageToken: A token to request the next page of results.
projects: Projects to which you have at least READ access.
totalItems: The total number of projects in the list.
"""
class ProjectsValueListEntry(_messages.Message):
"""A ProjectsValueListEntry object.
Fields:
friendlyName: A descriptive name for this project.
id: An opaque ID of this project.
kind: The resource type.
numericId: The numeric ID of this project.
projectReference: A unique reference to this project.
"""
friendlyName = _messages.StringField(1)
id = _messages.StringField(2)
kind = _messages.StringField(3, default=u'bigquery#project')
numericId = _messages.IntegerField(4, variant=_messages.Variant.UINT64)
projectReference = _messages.MessageField('ProjectReference', 5)
etag = _messages.StringField(1)
kind = _messages.StringField(2, default=u'bigquery#projectList')
nextPageToken = _messages.StringField(3)
projects = _messages.MessageField('ProjectsValueListEntry', 4, repeated=True)
totalItems = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class ProjectReference(_messages.Message):
"""A ProjectReference object.
Fields:
projectId: [Required] ID of the project. Can be either the numeric ID or
the assigned ID of the project.
"""
projectId = _messages.StringField(1)
class QueryParameter(_messages.Message):
"""A QueryParameter object.
Fields:
name: [Optional] If unset, this is a positional parameter. Otherwise,
should be unique within a query.
parameterType: [Required] The type of this parameter.
parameterValue: [Required] The value of this parameter.
"""
name = _messages.StringField(1)
parameterType = _messages.MessageField('QueryParameterType', 2)
parameterValue = _messages.MessageField('QueryParameterValue', 3)
class QueryParameterType(_messages.Message):
"""A QueryParameterType object.
Messages:
StructTypesValueListEntry: A StructTypesValueListEntry object.
Fields:
arrayType: [Optional] The type of the array's elements, if this is an
array.
structTypes: [Optional] The types of the fields of this struct, in order,
if this is a struct.
type: [Required] The top level type of this field.
"""
class StructTypesValueListEntry(_messages.Message):
"""A StructTypesValueListEntry object.
Fields:
description: [Optional] Human-oriented description of the field.
name: [Optional] The name of this field.
type: [Required] The type of this field.
"""
description = _messages.StringField(1)
name = _messages.StringField(2)
type = _messages.MessageField('QueryParameterType', 3)
arrayType = _messages.MessageField('QueryParameterType', 1)
structTypes = _messages.MessageField('StructTypesValueListEntry', 2, repeated=True)
type = _messages.StringField(3)
class QueryParameterValue(_messages.Message):
"""A QueryParameterValue object.
Messages:
StructValuesValue: [Optional] The struct field values, in order of the
struct type's declaration.
Fields:
arrayValues: [Optional] The array values, if this is an array type.
structValues: [Optional] The struct field values, in order of the struct
type's declaration.
value: [Optional] The value of this value, if a simple scalar type.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class StructValuesValue(_messages.Message):
"""[Optional] The struct field values, in order of the struct type's
declaration.
Messages:
AdditionalProperty: An additional property for a StructValuesValue
object.
Fields:
additionalProperties: Additional properties of type StructValuesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a StructValuesValue object.
Fields:
key: Name of the additional property.
value: A QueryParameterValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('QueryParameterValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
arrayValues = _messages.MessageField('QueryParameterValue', 1, repeated=True)
structValues = _messages.MessageField('StructValuesValue', 2)
value = _messages.StringField(3)
class QueryRequest(_messages.Message):
"""A QueryRequest object.
Fields:
defaultDataset: [Optional] Specifies the default datasetId and projectId
to assume for any unqualified table names in the query. If not set, all
table names in the query string must be qualified in the format
'datasetId.tableId'.
dryRun: [Optional] If set to true, BigQuery doesn't run the job. Instead,
if the query is valid, BigQuery returns statistics about the job such as
how many bytes would be processed. If the query is invalid, an error
returns. The default value is false.
kind: The resource type of the request.
maxResults: [Optional] The maximum number of rows of data to return per
page of results. Setting this flag to a small value such as 1000 and
then paging through results might improve reliability when the query
result set is large. In addition to this limit, responses are also
limited to 10 MB. By default, there is no maximum row count, and only
the byte limit applies.
parameterMode: [Experimental] Standard SQL only. Set to POSITIONAL to use
positional (?) query parameters or to NAMED to use named (@myparam)
query parameters in this query.
preserveNulls: [Deprecated] This property is deprecated.
query: [Required] A query string, following the BigQuery query syntax, of
the query to execute. Example: "SELECT count(f1) FROM
[myProjectId:myDatasetId.myTableId]".
queryParameters: [Experimental] Query parameters for Standard SQL queries.
timeoutMs: [Optional] How long to wait for the query to complete, in
milliseconds, before the request times out and returns. Note that this
is only a timeout for the request, not the query. If the query takes
longer to run than the timeout value, the call returns without any
results and with the 'jobComplete' flag set to false. You can call
GetQueryResults() to wait for the query to complete and read the
results. The default value is 10000 milliseconds (10 seconds).
useLegacySql: Specifies whether to use BigQuery's legacy SQL dialect for
this query. The default value is true. If set to false, the query will
use BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-
reference/ When useLegacySql is set to false, the values of
allowLargeResults and flattenResults are ignored; query will be run as
if allowLargeResults is true and flattenResults is false.
useQueryCache: [Optional] Whether to look for the result in the query
cache. The query cache is a best-effort cache that will be flushed
whenever tables in the query are modified. The default value is true.
"""
defaultDataset = _messages.MessageField('DatasetReference', 1)
dryRun = _messages.BooleanField(2)
kind = _messages.StringField(3, default=u'bigquery#queryRequest')
maxResults = _messages.IntegerField(4, variant=_messages.Variant.UINT32)
parameterMode = _messages.StringField(5)
preserveNulls = _messages.BooleanField(6)
query = _messages.StringField(7)
queryParameters = _messages.MessageField('QueryParameter', 8, repeated=True)
timeoutMs = _messages.IntegerField(9, variant=_messages.Variant.UINT32)
useLegacySql = _messages.BooleanField(10, default=True)
useQueryCache = _messages.BooleanField(11, default=True)
class QueryResponse(_messages.Message):
"""A QueryResponse object.
Fields:
cacheHit: Whether the query result was fetched from the query cache.
errors: [Output-only] All errors and warnings encountered during the
running of the job. Errors here do not necessarily mean that the job has
completed or was unsuccessful.
jobComplete: Whether the query has completed or not. If rows or totalRows
are present, this will always be true. If this is false, totalRows will
not be available.
jobReference: Reference to the Job that was created to run the query. This
field will be present even if the original request timed out, in which
case GetQueryResults can be used to read the results once the query has
completed. Since this API only returns the first page of results,
subsequent pages can be fetched via the same mechanism
(GetQueryResults).
kind: The resource type.
numDmlAffectedRows: [Output-only, Experimental] The number of rows
affected by a DML statement. Present only for DML statements INSERT,
UPDATE or DELETE.
pageToken: A token used for paging results.
rows: An object with as many results as can be contained within the
maximum permitted reply size. To get any additional rows, you can call
GetQueryResults and specify the jobReference returned above.
schema: The schema of the results. Present only when the query completes
successfully.
totalBytesProcessed: The total number of bytes processed for this query.
If this query was a dry run, this is the number of bytes that would be
processed if the query were run.
totalRows: The total number of rows in the complete query result set,
which can be more than the number of rows in this single page of
results.
"""
cacheHit = _messages.BooleanField(1)
errors = _messages.MessageField('ErrorProto', 2, repeated=True)
jobComplete = _messages.BooleanField(3)
jobReference = _messages.MessageField('JobReference', 4)
kind = _messages.StringField(5, default=u'bigquery#queryResponse')
numDmlAffectedRows = _messages.IntegerField(6)
pageToken = _messages.StringField(7)
rows = _messages.MessageField('TableRow', 8, repeated=True)
schema = _messages.MessageField('TableSchema', 9)
totalBytesProcessed = _messages.IntegerField(10)
totalRows = _messages.IntegerField(11, variant=_messages.Variant.UINT64)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters. Overrides userIp if both are provided.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
userIp: IP address of the site where the request originates. Use this if
you want to enforce per-user limits.
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for the response.
Values:
json: Responses with Content-Type of application/json
"""
json = 0
alt = _messages.EnumField('AltValueValuesEnum', 1, default=u'json')
fields = _messages.StringField(2)
key = _messages.StringField(3)
oauth_token = _messages.StringField(4)
prettyPrint = _messages.BooleanField(5, default=True)
quotaUser = _messages.StringField(6)
trace = _messages.StringField(7)
userIp = _messages.StringField(8)
class Streamingbuffer(_messages.Message):
"""A Streamingbuffer object.
Fields:
estimatedBytes: [Output-only] A lower-bound estimate of the number of
bytes currently in the streaming buffer.
estimatedRows: [Output-only] A lower-bound estimate of the number of rows
currently in the streaming buffer.
oldestEntryTime: [Output-only] Contains the timestamp of the oldest entry
in the streaming buffer, in milliseconds since the epoch, if the
streaming buffer is available.
"""
estimatedBytes = _messages.IntegerField(1, variant=_messages.Variant.UINT64)
estimatedRows = _messages.IntegerField(2, variant=_messages.Variant.UINT64)
oldestEntryTime = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
class Table(_messages.Message):
"""A Table object.
Messages:
LabelsValue: [Experimental] The labels associated with this table. You can
use these to organize and group your tables. Label keys and values can
be no longer than 63 characters, can only contain lowercase letters,
numeric characters, underscores and dashes. International characters are
allowed. Label values are optional. Label keys must start with a letter
and each label in the list must have a different key.
Fields:
creationTime: [Output-only] The time when this table was created, in
milliseconds since the epoch.
description: [Optional] A user-friendly description of this table.
etag: [Output-only] A hash of this resource.
expirationTime: [Optional] The time when this table expires, in
milliseconds since the epoch. If not present, the table will persist
indefinitely. Expired tables will be deleted and their storage
reclaimed.
externalDataConfiguration: [Optional] Describes the data format, location,
and other properties of a table stored outside of BigQuery. By defining
these properties, the data source can then be queried as if it were a
standard BigQuery table.
friendlyName: [Optional] A descriptive name for this table.
id: [Output-only] An opaque ID uniquely identifying the table.
kind: [Output-only] The type of the resource.
labels: [Experimental] The labels associated with this table. You can use
these to organize and group your tables. Label keys and values can be no
longer than 63 characters, can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are
allowed. Label values are optional. Label keys must start with a letter
and each label in the list must have a different key.
lastModifiedTime: [Output-only] The time when this table was last
modified, in milliseconds since the epoch.
location: [Output-only] The geographic location where the table resides.
This value is inherited from the dataset.
numBytes: [Output-only] The size of this table in bytes, excluding any
data in the streaming buffer.
numLongTermBytes: [Output-only] The number of bytes in the table that are
considered "long-term storage".
numRows: [Output-only] The number of rows of data in this table, excluding
any data in the streaming buffer.
schema: [Optional] Describes the schema of this table.
selfLink: [Output-only] A URL that can be used to access this resource
again.
streamingBuffer: [Output-only] Contains information regarding this table's
streaming buffer, if one is present. This field will be absent if the
table is not being streamed to or if there is no data in the streaming
buffer.
tableReference: [Required] Reference describing the ID of this table.
timePartitioning: [Experimental] If specified, configures time-based
partitioning for this table.
type: [Output-only] Describes the table type. The following values are
supported: TABLE: A normal BigQuery table. VIEW: A virtual table defined
by a SQL query. EXTERNAL: A table that references data stored in an
external storage system, such as Google Cloud Storage. The default value
is TABLE.
view: [Optional] The view definition.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""[Experimental] The labels associated with this table. You can use these
to organize and group your tables. Label keys and values can be no longer
than 63 characters, can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are allowed.
Label values are optional. Label keys must start with a letter and each
label in the list must have a different key.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
creationTime = _messages.IntegerField(1)
description = _messages.StringField(2)
etag = _messages.StringField(3)
expirationTime = _messages.IntegerField(4)
externalDataConfiguration = _messages.MessageField('ExternalDataConfiguration', 5)
friendlyName = _messages.StringField(6)
id = _messages.StringField(7)
kind = _messages.StringField(8, default=u'bigquery#table')
labels = _messages.MessageField('LabelsValue', 9)
lastModifiedTime = _messages.IntegerField(10, variant=_messages.Variant.UINT64)
location = _messages.StringField(11)
numBytes = _messages.IntegerField(12)
numLongTermBytes = _messages.IntegerField(13)
numRows = _messages.IntegerField(14, variant=_messages.Variant.UINT64)
schema = _messages.MessageField('TableSchema', 15)
selfLink = _messages.StringField(16)
streamingBuffer = _messages.MessageField('Streamingbuffer', 17)
tableReference = _messages.MessageField('TableReference', 18)
timePartitioning = _messages.MessageField('TimePartitioning', 19)
type = _messages.StringField(20)
view = _messages.MessageField('ViewDefinition', 21)
class TableCell(_messages.Message):
"""A TableCell object.
Fields:
v: A extra_types.JsonValue attribute.
"""
v = _messages.MessageField('extra_types.JsonValue', 1)
class TableDataInsertAllRequest(_messages.Message):
"""A TableDataInsertAllRequest object.
Messages:
RowsValueListEntry: A RowsValueListEntry object.
Fields:
ignoreUnknownValues: [Optional] Accept rows that contain values that do
not match the schema. The unknown values are ignored. Default is false,
which treats unknown values as errors.
kind: The resource type of the response.
rows: The rows to insert.
skipInvalidRows: [Optional] Insert all valid rows of a request, even if
invalid rows exist. The default value is false, which causes the entire
request to fail if any invalid rows exist.
templateSuffix: [Experimental] If specified, treats the destination table
as a base template, and inserts the rows into an instance table named
"{destination}{templateSuffix}". BigQuery will manage creation of the
instance table, using the schema of the base template table. See
https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-
tables for considerations when working with templates tables.
"""
class RowsValueListEntry(_messages.Message):
"""A RowsValueListEntry object.
Fields:
insertId: [Optional] A unique ID for each row. BigQuery uses this
property to detect duplicate insertion requests on a best-effort
basis.
json: [Required] A JSON object that contains a row of data. The object's
properties and values must match the destination table's schema.
"""
insertId = _messages.StringField(1)
json = _messages.MessageField('JsonObject', 2)
ignoreUnknownValues = _messages.BooleanField(1)
kind = _messages.StringField(2, default=u'bigquery#tableDataInsertAllRequest')
rows = _messages.MessageField('RowsValueListEntry', 3, repeated=True)
skipInvalidRows = _messages.BooleanField(4)
templateSuffix = _messages.StringField(5)
class TableDataInsertAllResponse(_messages.Message):
"""A TableDataInsertAllResponse object.
Messages:
InsertErrorsValueListEntry: A InsertErrorsValueListEntry object.
Fields:
insertErrors: An array of errors for rows that were not inserted.
kind: The resource type of the response.
"""
class InsertErrorsValueListEntry(_messages.Message):
"""A InsertErrorsValueListEntry object.
Fields:
errors: Error information for the row indicated by the index property.
index: The index of the row that error applies to.
"""
errors = _messages.MessageField('ErrorProto', 1, repeated=True)
index = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
insertErrors = _messages.MessageField('InsertErrorsValueListEntry', 1, repeated=True)
kind = _messages.StringField(2, default=u'bigquery#tableDataInsertAllResponse')
class TableDataList(_messages.Message):
"""A TableDataList object.
Fields:
etag: A hash of this page of results.
kind: The resource type of the response.
pageToken: A token used for paging results. Providing this token instead
of the startIndex parameter can help you retrieve stable results when an
underlying table is changing.
rows: Rows of results.
totalRows: The total number of rows in the complete table.
"""
etag = _messages.StringField(1)
kind = _messages.StringField(2, default=u'bigquery#tableDataList')
pageToken = _messages.StringField(3)
rows = _messages.MessageField('TableRow', 4, repeated=True)
totalRows = _messages.IntegerField(5)
class TableFieldSchema(_messages.Message):
"""A TableFieldSchema object.
Fields:
description: [Optional] The field description. The maximum length is 16K
characters.
fields: [Optional] Describes the nested schema fields if the type property
is set to RECORD.
mode: [Optional] The field mode. Possible values include NULLABLE,
REQUIRED and REPEATED. The default value is NULLABLE.
name: [Required] The field name. The name must contain only letters (a-z,
A-Z), numbers (0-9), or underscores (_), and must start with a letter or
underscore. The maximum length is 128 characters.
type: [Required] The field data type. Possible values include STRING,
BYTES, INTEGER, INT64 (same as INTEGER), FLOAT, FLOAT64 (same as FLOAT),
BOOLEAN, BOOL (same as BOOLEAN), TIMESTAMP, DATE, TIME, DATETIME, RECORD
(where RECORD indicates that the field contains a nested schema) or
STRUCT (same as RECORD).
"""
description = _messages.StringField(1)
fields = _messages.MessageField('TableFieldSchema', 2, repeated=True)
mode = _messages.StringField(3)
name = _messages.StringField(4)
type = _messages.StringField(5)
class TableList(_messages.Message):
"""A TableList object.
Messages:
TablesValueListEntry: A TablesValueListEntry object.
Fields:
etag: A hash of this page of results.
kind: The type of list.
nextPageToken: A token to request the next page of results.
tables: Tables in the requested dataset.
totalItems: The total number of tables in the dataset.
"""
class TablesValueListEntry(_messages.Message):
"""A TablesValueListEntry object.
Messages:
LabelsValue: [Experimental] The labels associated with this table. You
can use these to organize and group your tables.
ViewValue: Additional details for a view.
Fields:
friendlyName: The user-friendly name for this table.
id: An opaque ID of the table
kind: The resource type.
labels: [Experimental] The labels associated with this table. You can
use these to organize and group your tables.
tableReference: A reference uniquely identifying the table.
type: The type of table. Possible values are: TABLE, VIEW.
view: Additional details for a view.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""[Experimental] The labels associated with this table. You can use
these to organize and group your tables.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
class ViewValue(_messages.Message):
"""Additional details for a view.
Fields:
useLegacySql: True if view is defined in legacy SQL dialect, false if
in standard SQL.
"""
useLegacySql = _messages.BooleanField(1)
friendlyName = _messages.StringField(1)
id = _messages.StringField(2)
kind = _messages.StringField(3, default=u'bigquery#table')
labels = _messages.MessageField('LabelsValue', 4)
tableReference = _messages.MessageField('TableReference', 5)
type = _messages.StringField(6)
view = _messages.MessageField('ViewValue', 7)
etag = _messages.StringField(1)
kind = _messages.StringField(2, default=u'bigquery#tableList')
nextPageToken = _messages.StringField(3)
tables = _messages.MessageField('TablesValueListEntry', 4, repeated=True)
totalItems = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class TableReference(_messages.Message):
"""A TableReference object.
Fields:
datasetId: [Required] The ID of the dataset containing this table.
projectId: [Required] The ID of the project containing this table.
tableId: [Required] The ID of the table. The ID must contain only letters
(a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is
1,024 characters.
"""
datasetId = _messages.StringField(1)
projectId = _messages.StringField(2)
tableId = _messages.StringField(3)
class TableRow(_messages.Message):
"""A TableRow object.
Fields:
f: Represents a single row in the result set, consisting of one or more
fields.
"""
f = _messages.MessageField('TableCell', 1, repeated=True)
class TableSchema(_messages.Message):
"""A TableSchema object.
Fields:
fields: Describes the fields in a table.
"""
fields = _messages.MessageField('TableFieldSchema', 1, repeated=True)
class TimePartitioning(_messages.Message):
"""A TimePartitioning object.
Fields:
expirationMs: [Optional] Number of milliseconds for which to keep the
storage for a partition.
type: [Required] The only type supported is DAY, which will generate one
partition per day based on data loading time.
"""
expirationMs = _messages.IntegerField(1)
type = _messages.StringField(2)
class UserDefinedFunctionResource(_messages.Message):
"""A UserDefinedFunctionResource object.
Fields:
inlineCode: [Pick one] An inline resource that contains code for a user-
defined function (UDF). Providing a inline code resource is equivalent
to providing a URI for a file containing the same code.
resourceUri: [Pick one] A code resource to load from a Google Cloud
Storage URI (gs://bucket/path).
"""
inlineCode = _messages.StringField(1)
resourceUri = _messages.StringField(2)
class ViewDefinition(_messages.Message):
"""A ViewDefinition object.
Fields:
query: [Required] A query that BigQuery executes when the view is
referenced.
useLegacySql: Specifies whether to use BigQuery's legacy SQL for this
view. The default value is true. If set to false, the view will use
BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-
reference/ Queries and views that reference this view must use the same
flag value.
userDefinedFunctionResources: [Experimental] Describes user-defined
function resources used in the query.
"""
query = _messages.StringField(1)
useLegacySql = _messages.BooleanField(2)
userDefinedFunctionResources = _messages.MessageField('UserDefinedFunctionResource', 3, repeated=True)
|
[
"[email protected]"
] | |
f3d50b3681586ff978d649daee57b59e52e2ef91
|
ea5034be4ef4a58ffeaccb5342754efa47db64a3
|
/env/Lib/site-packages/PyInstaller/utils/_gitrevision.py
|
d8ddc6540d4a70f6310e3f7a3858228ac47d47a3
|
[] |
no_license
|
Airjumper/EX1
|
861fdd6b45f14a61eb37141a6b8c88a9b6313020
|
484190c0d3eb92f77136cfd9159c95bae0ebb403
|
refs/heads/master
| 2023-05-28T12:13:41.227063 | 2023-05-02T04:37:25 | 2023-05-02T04:37:25 | 263,378,611 | 0 | 1 | null | 2020-06-07T14:20:47 | 2020-05-12T15:37:27 |
Python
|
UTF-8
|
Python
| false | false | 572 |
py
|
#
# The content of this file will be filled in with meaningful data
# when creating an archive using `git archive` or by downloading an
# archive from github, e.g. from github.com/.../archive/develop.zip
#
rev = "253feb7e16" # abbreviated commit hash
commit = "253feb7e16cd3a6a6d3ea2fb80c3491d30671e39" # commit hash
date = "2020-06-08 17:27:14 +1000" # commit date
author = "Legorooj <[email protected]>"
ref_names = "HEAD -> develop" # incl. current branch
commit_message = """hooks: add hook for wcwidth, which is breaking CI because of missing files
"""
|
[
"[email protected]"
] | |
af107338dd9af11929b71bfaabf4b14394afe3e3
|
387392395d3b08a9b2980a14ef0a8c69698f02ee
|
/microblog/app/models.py
|
f95b03bdab81a53236278fc8c99cefa6a1ba2da1
|
[] |
no_license
|
keirsjd01/microblog
|
471596f55c52b97e5c21f77d6d9a59bf05f17963
|
54b459042ef0acc4ab65fca427b5f38c1464849b
|
refs/heads/master
| 2021-05-05T00:58:45.677731 | 2018-02-08T12:56:16 | 2018-02-08T12:56:16 | 119,525,103 | 0 | 0 | null | 2018-02-08T12:56:17 | 2018-01-30T11:13:01 |
Python
|
UTF-8
|
Python
| false | false | 2,159 |
py
|
from werkzeug.security import generate_password_hash, check_password_hash
from datetime import datetime
from app import login, db
from flask_login import UserMixin
from hashlib import md5
@login.user_loader
def load_user(id):
return User.query.get(int(id))
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
posts = db.relationship('Post', backref='author', lazy='dynamic')
def __repr__(self):
return '<User {}>'.format(self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def avatar(self, size):
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(
digest, size)
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime, default=datetime.utcnow)
''' followed = db.relationship(
'User', secondary=followers,
primaryjoin=(followers.c.follower_id == id),
secondaryjoin=(followers.c.followed_id == id),
backref=db.backref('followers', lazy='dynamic'), lazy='dynamic')
'''
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Post {}>'.format(self.body)
class Reviews(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(40))
title = db.Column(db.String(40))
review = db.Column(db.String(140))
''' user_id = db.Column(db.Integer, db.ForeignKey('user.id'))'''
'''followers = db.Table('followers',
db.Column('follower_id', db.Integer, db.ForeignKey('user.id')),
db.Column('followed_id', db.Integer, db.ForeignKey('user.id'))
) '''
|
[
"[email protected]"
] | |
726083e657d4bfe7dfdd3ffc0d4860c40b2161b0
|
98d9305b1717642bcfb842eecd84d63b6eeaf759
|
/Funtions/Passing_Information.py
|
95f7621fc81d57dd2ebdb67a24a82da35ae5f6f4
|
[] |
no_license
|
er-aditi/Learning-Python
|
5ceb020f4df8db9e34df78edfaecca3e1854c8a9
|
297eda435ee2e1cee643f94ea4c5de6a82e3c8a7
|
refs/heads/master
| 2020-03-24T17:22:22.129081 | 2019-06-19T05:47:26 | 2019-06-19T05:47:26 | 142,856,993 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 106 |
py
|
def greet_user(username):
print("Hello! " + username + ". ")
greet_user('jess')
greet_user('sarah')
|
[
"[email protected]"
] | |
ba1c4757ef1f8b196e08821d7a2d5b70c0bc2c0d
|
416f6e33fd290302a00304fafd064906733cc55f
|
/stock_screening_2020y3q.py
|
10bcf28847b5612641a5460579c60404594472cc
|
[
"Apache-2.0"
] |
permissive
|
jongheeyun/stock_screening
|
c3b8874502070a66132a7b88e8d723ba07510ebb
|
b7a0d2a82436e59fe8d4b89d2900c62252dbd46c
|
refs/heads/master
| 2023-02-20T09:15:25.443675 | 2021-01-16T06:11:14 | 2021-01-16T06:11:14 | 299,737,334 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,636 |
py
|
from typing import List, Any, Union
import requests
from bs4 import BeautifulSoup
import pandas as pd
from openpyxl import load_workbook
from openpyxl import Workbook
def create_column():
url = "https://finance.naver.com/item/main.nhn?code=005930" # 삼성전자
res = requests.get(url)
html = BeautifulSoup(res.text, "html.parser")
cop_anal = html.find("div", {"class", "section cop_analysis"})
sub_section_div = cop_anal.find("div", {"class", "sub_section"})
ifrs_table = sub_section_div.find("table", {"class", "tb_type1 tb_num tb_type1_ifrs"})
# 연도정보
thead = ifrs_table.find("thead")
all_thead_tr = thead.find_all("tr")
all_thead_tr_th = all_thead_tr[1].find_all("th")
# 컬럼 구성
columns = []
columns.append("기업명")
columns.append("시장")
columns.append("연간 영업이익: " + all_thead_tr_th[2].get_text().strip())
columns.append(all_thead_tr_th[3].get_text().strip())
columns.append("YoY(%)")
columns.append("분기 매출 " + all_thead_tr_th[8].get_text().strip())
columns.append(all_thead_tr_th[9].get_text().strip())
columns.append("YoY(%)")
columns.append("QoQ(%)")
columns.append("분기 영업이익" + all_thead_tr_th[8].get_text().strip())
columns.append(all_thead_tr_th[9].get_text().strip())
columns.append("YoY(%)")
columns.append("QoQ(%)")
columns.append("시가총액(억)")
columns.append("목표시총(억)")
columns.append("상승여력(%)")
columns.append("배당율(%)")
columns.append("멀티플")
columns.append("업종")
columns.append("기업개요")
return columns
def get_multiple_value(name, category):
name_multiple_tb = {
"코오롱글로벌": 5,
"뷰웍스": 15,
"스튜디오드래곤": 15,
"에코마케팅": 15,
"카카오": 30,
"NAVER": 30,
"에코프로비엠": 20,
"엘앤에프": 20,
"포스코케미칼": 20,
"한컴위드": 15,
"한글과컴퓨터": 15,
}
category_multiple_tb = {
"조선": 6,
"증권": 5,
"은행": 5,
"해운사": 7,
"건설": 8,
"호텔,레스토랑,레저": 7,
"IT서비스": 10,
"양방향미디어와서비스": 15,
"통신장비": 10,
"게임엔터테인먼트": 15,
"건강관리장비와용품": 15,
"소프트웨어": 15,
"제약": 20,
}
korean_multiple = 10
if name in name_multiple_tb:
return name_multiple_tb.get(name)
if category in category_multiple_tb:
return category_multiple_tb.get(category)
return korean_multiple
columns = create_column()
val_result_wb = Workbook()
val_result_ws = val_result_wb.active
val_result_ws.append(columns)
# 실제 내용은 html이기 때문에 read_html로 읽는다.
# 종목코드를 빈자리는 0으로 채워진 6자리 문자열로 변환한다.
stock_df = pd.read_html('http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]
stock_df['종목코드'] = stock_df['종목코드'].map(lambda x: f'{x:0>6}')
stock_arr = stock_df.to_numpy()
for stock_id in range(0, len(stock_arr)):
val_result_ws.append([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # 초기값
val_result_ws.cell(stock_id + 2, 1, stock_arr[stock_id][0]) # 기업명
url = "https://finance.naver.com/item/main.nhn?code=" + stock_arr[stock_id][1]
res = requests.get(url)
html = BeautifulSoup(res.text, "html.parser")
# kospi or kosdaq
stock_type = "코넥스"
new_totalinfo_div = html.find("div", {"class", "new_totalinfo"})
if not new_totalinfo_div:
continue
dl_totalinfo = new_totalinfo_div.find("dl", {"class", "blind"})
dd_totalinfo_all = dl_totalinfo.find_all("dd")
dd_text = dd_totalinfo_all[2].get_text().strip()
if "코스닥" in dd_text:
stock_type = "코스닥"
elif "코스피" in dd_text:
stock_type = "코스피"
cop_anal = html.find("div", {"class", "section cop_analysis"})
if not cop_anal:
continue
sub_section_div = cop_anal.find("div", {"class", "sub_section"})
ifrs_table = sub_section_div.find(
"table", {"class", "tb_type1 tb_num tb_type1_ifrs"})
tbody = ifrs_table.find("tbody")
all_tr = tbody.find_all("tr")
if len(all_tr) < 14:
continue
# 4년 매출
sales = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
sales_all_td = all_tr[0].find_all("td")
for i in range(0, len(sales)):
sales_text = sales_all_td[i].get_text().strip().replace(",", "")
if sales_text:
if sales_text[0] == '-':
if len(sales_text) > 1:
sales_text = sales_text[1:]
sales[i] = int(sales_text) * -1
else:
sales[i] = int(sales_text)
# 4년 영업이익, 6분기 영업이익
profits = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
profits_all_td = all_tr[1].find_all("td")
for i in range(0, len(profits)):
profit_text = profits_all_td[i].get_text().strip().replace(",", "")
if profit_text:
if profit_text[0] == '-':
if len(profit_text) > 1:
profit_text = profit_text[1:]
profits[i] = int(profit_text) * -1
else:
profits[i] = int(profit_text)
# 배당률
dividend_rate = 0.0
dividend_rate_td = all_tr[14].find_all("td")
dividend_rate_text = dividend_rate_td[3].get_text().strip()
if dividend_rate_text and dividend_rate_text[0] != '-':
dividend_rate = float(dividend_rate_text)
elif dividend_rate_td[2].get_text().strip():
dividend_rate_text = dividend_rate_td[2].get_text().strip()
if dividend_rate_text[0] != '-':
dividend_rate = float(dividend_rate_text)
# 시총(억)
market_cap = 0
cur_price = 0
trade_compare_div = html.find("div", {"class", "section trade_compare"})
if trade_compare_div: # 이미 계산된 값이 있다면 사용
compare_table = trade_compare_div.find(
"table", {"class", "tb_type1 tb_num"})
tbody = compare_table.find("tbody")
all_tr = tbody.find_all("tr")
cur_price_text = all_tr[0].find("td").get_text().strip().replace(",", "")
if cur_price_text: # 현재가
cur_price = int(cur_price_text)
if len(all_tr) > 3:
market_cap_text = all_tr[3].find("td").get_text().strip().replace(",", "")
if market_cap_text:
market_cap = int(market_cap_text)
if market_cap == 0:
tab_con1_div = html.find("div", {"class", "tab_con1"})
stock_total_table = tab_con1_div.find("table")
stock_total_tr = stock_total_table.find_all("tr")
stock_total_text = stock_total_tr[2].find("td").get_text().strip().replace(",", "")
if stock_total_text:
stock_total = int(stock_total_text)
market_cap = round(cur_price * stock_total / 100000000) # 억 단위로 변환
if market_cap == 0:
continue
# 업종
business_category = stock_arr[stock_id][2]
trade_compare = html.find("div", {"class", "section trade_compare"})
if trade_compare:
trade_compare = trade_compare.find("h4", {"class", "h_sub sub_tit7"})
trade_compare = trade_compare.find("a")
if trade_compare.get_text().strip():
business_category = trade_compare.get_text().strip()
# 종목 또는 업종에 따른 멀티플
multiple = get_multiple_value(stock_arr[stock_id][0], business_category)
# 예상시총: 당해년도 예상 영업이익 -> 다음 분기 예상 영업이익 -> 직전 두 분기 * 2 -> 직전년도 영업이익
base_val = profits[2]
if profits[3] > 0:
base_val = profits[3]
elif profits[9] > 0:
base_val = profits[7] + profits[8] + (profits[9] * 2)
elif profits[7] > 0 and profits[8] > 0:
base_val = (profits[7] + profits[8]) * 2
expected_market_cap = base_val * multiple
# 상승여력(%)
valuation = round((int(expected_market_cap) / int(market_cap) - 1.0) * 100)
if valuation < 0:
valuation = 0
year_profits_yoy = 0
if profits[3] > 0 and profits[2] > 0:
year_profits_yoy = round((profits[3] / profits[2] - 1.0) * 100)
elif profits[2] < 0 and profits[3] > 0:
year_profits_yoy = "흑전"
elif profits[2] < 0 and profits[3] < 0:
year_profits_yoy = "적지"
quarter_sales_qoq = 0
if sales[8] > 0 and sales[9] > 0:
quarter_sales_qoq = round((sales[9] / sales[8] - 1.0) * 100)
elif sales[8] < 0 and sales[9] > 0:
quarter_sales_qoq = "흑전"
elif sales[8] < 0 and sales[9] < 0:
quarter_sales_qoq = "적지"
quarter_sales_yoy = 0
if sales[5] > 0 and sales[9] > 0:
quarter_sales_yoy = round((sales[9] / sales[5] - 1.0) * 100)
quarter_profits_qoq = 0
if profits[8] > 0 and profits[9] > 0:
quarter_profits_qoq = round((profits[9] / profits[8] - 1.0) * 100)
elif profits[8] < 0 and profits[9] > 0:
quarter_profits_qoq = "흑전"
elif profits[8] < 0 and profits[9] < 0:
quarter_profits_qoq = "적지"
quarter_profits_yoy = 0
if profits[5] > 0 and profits[9] > 0:
quarter_profits_yoy = round((profits[9] / profits[5] - 1.0) * 100)
# 열 추가
col_data = (stock_type, # 코스피, 코스닥
profits[2], # 영업이익 직전 2년
profits[3],
year_profits_yoy,
sales[8], # 직전분기 매출
sales[9], # 이번분기 매출
quarter_sales_yoy, # 전년 동 분기
quarter_sales_qoq, # 직전 분기 매출
profits[8], # 직전 영업이익
profits[9], # 이번 영업이익
quarter_profits_yoy, # 전년 동 분기
quarter_profits_qoq, # 직전 분기
market_cap, # 현시가총액
expected_market_cap, # 목표시가총액
valuation, # 상승여력
dividend_rate, # 배당률
multiple, # 멀티플
business_category, # 업종
stock_arr[stock_id][3] # 기업설명
)
for idx in range(2, 21):
val_result_ws.cell(stock_id + 2, idx, col_data[idx - 2])
print("#" + str(stock_id) + ": " + stock_arr[stock_id][0])
val_result_wb.save("분기예상실적기준_평가.xlsx")
print("Finished!!")
|
[
"[email protected]"
] | |
260813429914a3e96d817d822aba4102fce06946
|
605d7e741244212091b34e518a8abd0c218437c3
|
/WebInterface/server/src/user/migrations/0004_auto_20210215_2145.py
|
53046c4e677b591a187124be284af52b90d3f033
|
[] |
no_license
|
nlpathak/TwitterSentiment
|
66671ae41b6681ec2fe6a7040613aef995d2dd59
|
8919a73e9ead147e365e5a9ba556b1ee0867e0a8
|
refs/heads/main
| 2023-03-24T17:33:39.577975 | 2021-03-26T01:52:04 | 2021-03-26T01:52:04 | 351,629,768 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 419 |
py
|
# Generated by Django 3.1.6 on 2021-02-15 21:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0003_auto_20210215_2136'),
]
operations = [
migrations.AlterField(
model_name='twitteruser',
name='id',
field=models.CharField(max_length=30, primary_key=True, serialize=False),
),
]
|
[
"[email protected]"
] | |
b53fb27016d732c08a7869d38d13162383b30b32
|
1e09bc56bf2904b349df1873e11da3d527437880
|
/lecture-27/AdjListGraph.py
|
8a03efffe1f2ce76c121133adbb645df489cf2d6
|
[] |
no_license
|
codeforcauseorg-archive/DSA-Live-Python-Jun-0621
|
f3444f5671cb4985644c7432517477c3585c70fb
|
e4fe544178d7851c24755242390f39675b99fabe
|
refs/heads/main
| 2023-08-09T08:31:41.449120 | 2021-09-14T16:44:39 | 2021-09-14T16:44:39 | 384,725,085 | 4 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,095 |
py
|
class Vertex:
def __init__(self, value):
self.value = value
self.neighbours = {}
class AdjListGraph:
def __init__(self):
self.vertices = dict()
def add_vertex(self, value):
if value not in self.vertices:
self.vertices[value] = Vertex(value)
def add_edge(self, first, second, weight):
if (first in self.vertices) and (second in self.vertices):
vfirst = self.vertices[first]
vsecond = self.vertices[second]
vfirst.neighbours[vsecond] = weight
vsecond.neighbours[vfirst] = weight
def min_spanning_tree(self):
edges = []
for vertex in self.vertices.values():
# print(vertex.neighbours.items())
for neighbour, weight in vertex.neighbours.items():
edges.append([weight, vertex.value, neighbour.value])
sorted_edges = sorted(edges)
acc = 0
for [weight, source, dest] in sorted_edges:
if self.union(source, dest):
acc += weight
return acc
# parents = {}
# for vertex in self.vertices:
# parents[vertex.value] = None
def union(self, parents, first, second):
first = self.find(parents, first)
second = self.find(parents, second)
if first == second:
return False
else:
parents[first] = second
def find(self, parents, item):
while parents[item] != None:
item = parents[item]
return item
def represent(self):
for vertex in self.vertices.values():
print(vertex.value, end="-> ")
for neighbour in vertex.neighbours:
print("[{} : {}]".format(neighbour.value, vertex.neighbours[neighbour]), end=", ")
print()
graph = AdjListGraph()
graph.add_vertex("A")
graph.add_vertex("B")
graph.add_vertex("C")
graph.add_vertex("D")
graph.add_edge("A", "B", 10)
graph.add_edge("B", "C", 15)
graph.add_edge("D", "C", 10)
graph.add_edge("A", "D", 20)
graph.min_spanning_tree()
|
[
"[email protected]"
] | |
0c09ea627fd4040ffbfb1f4ded9b64413584a698
|
9d253bccbf4d2c150df543cc9f40ce0d28a6a7e1
|
/python_practice/python_task/tstonglao.py
|
87870b2d4dbca56d6c14450b27e3242ccfc5e674
|
[] |
no_license
|
suehuijie/hogwarts_sue
|
209dac68dd2a79caf632def3dc4dc58e21b13a04
|
f89c1ee219e161a3fff31af28a767794e62b6203
|
refs/heads/master
| 2023-05-03T12:27:58.465878 | 2021-05-26T05:39:53 | 2021-05-26T05:39:53 | 330,535,777 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,094 |
py
|
class TongLao:
# 构造方法
def __init__(self, power):
self.hp = 1000
self.power = power
# see_people方法
def see_people(self, name):
if name == "WYZ" or name == "无崖子":
print("师弟!!")
elif name == "李秋水":
print("师弟是我的!!")
elif name == "丁春秋":
print("叛徒,我杀了你!!")
else:
print("传入的参数不正确!")
# fight_zms方法
def fight_zms(self, enemy_hp, enemy_power):
self.hp = self.hp / 2
self.power = self.power * 10
# 计算一回合后双方血量
self.hp = self.hp - enemy_power
enemy_hp = enemy_hp - self.power
# 比较双方血量大小。 血多的一方获胜
if self.hp > enemy_hp:
print("我赢了")
elif self.hp < enemy_hp:
print("我输了")
else:
print("平局")
if __name__ =="__main__":
tonglao = TongLao(120)
tonglao.see_people("丁春秋")
tonglao.fight_zms(1200, 250)
|
[
"[email protected]"
] | |
3bdab0e8fb8cd020ad4a6db81f0d1e4d49c8d1db
|
42bef12a3128fb79676a91d57595ca6abec96544
|
/django_orm/Like_Book/apps/like_app/views.py
|
318271a27e0b679f9bccd84d300082472d77c26e
|
[] |
no_license
|
chizakirov/django
|
c8bea894d355a72977ecfe86192b7b827fb1f3ea
|
b297b21c38b06d4bc9cd1a248a2ee7626c317cee
|
refs/heads/master
| 2020-04-14T21:50:28.093270 | 2019-01-04T18:48:23 | 2019-01-04T18:48:23 | 164,142,083 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 106 |
py
|
from django.shortcuts import render
def index(request):
return render(request,"like_app/index.html")
|
[
"[email protected]"
] | |
02bd4d8c5b7f5ee571bf79d27ec708e3779ed9cd
|
075557cfa0cf4520664f08dcadd5c6988016790e
|
/sendemail.py
|
81474e4dd61aecd8509eca44ddb1e06a7b2c6189
|
[] |
no_license
|
plt3/basic_selenium_projects
|
7135096e5897f2153c5352ec0d68e0cf69b4b79c
|
2596b5b96401ad387c376f4b99357dc46911c387
|
refs/heads/master
| 2022-09-24T22:59:21.124502 | 2020-05-29T16:01:50 | 2020-05-29T16:01:50 | 266,461,232 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,579 |
py
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
from secrets import *
def findbyclass(name, driver, keys='click'):
elem = driver.find_element_by_class_name(name)
if keys == 'click':
elem.click()
else:
elem.send_keys(keys)
def explicitwait(attribute, driver, keys='click'):
try:
elem = WebDriverWait(driver, 10).until(EC.presence_of_element_located(
(By.CLASS_NAME, attribute)))
except:
driver.quit()
if keys == 'click':
elem.click()
else:
elem.send_keys(keys)
def main():
useremail = input('Enter your email address: ')
usersubject = input('Enter the subject of your email: ')
usertext = input('Enter the body of your email: ')
browser = webdriver.Chrome()
browser.get('https://gmail.com')
login = browser.find_element_by_id('identifierId')
login.send_keys(username)
findbyclass('RveJvd', browser)
try:
pwd = WebDriverWait(browser, 10).until(
EC.element_to_be_clickable((By.CSS_SELECTOR, '#password .aCsJod .aXBtI .Xb9hP .whsOnd')))
except:
browser.quit()
pwd.send_keys(password)
findbyclass('RveJvd', browser)
explicitwait('z0', browser)
explicitwait('vO', browser, useremail)
findbyclass('aoT', browser, usersubject)
findbyclass('Am', browser, usertext)
findbyclass('dC', browser)
time.sleep(5)
main()
|
[
"[email protected]"
] | |
40ff69a510aed5bf9dbb06fa02fb21a0e29250b0
|
ab7edcc6b4817ef1cffe166256e0871a4eb270e9
|
/flota/commands/__init__.py
|
d2a9c670c9db1ec88ef6aa827b63e59ece5ffd9b
|
[] |
no_license
|
mayfield/flota
|
cc122562c6d7f45958862dd1096562b469c79c90
|
6046b7440ed714b32978ed408be721ed41dd2c21
|
refs/heads/master
| 2020-12-24T07:29:16.930991 | 2016-11-21T23:40:26 | 2016-11-21T23:40:26 | 56,109,989 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 62 |
py
|
__all__ = [
'images',
'misc',
'ps',
'rmi',
]
|
[
"[email protected]"
] | |
94ab65b6908c161276d618daba1b6975924072fe
|
7b710aeaf389bfec81e8e7423ec2aac8632cbc3e
|
/backend/reduceAirports.py
|
b610d2155805b0171c7d8f70b8fec2c215ddfc01
|
[] |
no_license
|
jasonpul/JetOut
|
e1b8e30a88b86789ebe194bd66cf3890f6294992
|
b7d1a61ba53c76cd8182cb4f7150cd7a42a92ff1
|
refs/heads/master
| 2022-11-23T23:29:31.112838 | 2020-07-29T12:06:17 | 2020-07-29T12:06:17 | 281,679,824 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 409 |
py
|
import json
with open('backend/airports.json') as f:
data = json.load(f)
newData = [{'code': i['code'], 'city': i['city']} for i in data]
index = sorted([(city['city'], i) for i, city in enumerate(newData)])
newData = [newData[index[i][1]] for i in range(len(newData))]
newData = newData[28:] + [newData[27],] + newData[:27]
with open('backend/reducedAirports.json', 'w') as f:
json.dump(newData, f)
|
[
"[email protected]"
] | |
cb34e95e6cd76ea0113438cb6b6c0d0a1dbbca99
|
0053dab54b07d9c3817ba631adcb2f92441be8a4
|
/ws/src/drift_agent/rl/handful-of-trials/scripts/mbexp.py
|
714fde2de3ad624b2c0fd128617c1777237f725c
|
[
"MIT"
] |
permissive
|
azeng97/avdrift
|
508e89266c438fbe0a7af460b06daf2585d46171
|
613b84d1839a1a78b6c841d17581ddcd83f68f64
|
refs/heads/master
| 2023-07-25T12:57:26.347903 | 2019-11-16T07:59:15 | 2019-11-16T07:59:15 | 204,614,897 | 0 | 0 | null | 2023-07-06T21:14:26 | 2019-08-27T03:48:14 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,763 |
py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import argparse
import pprint
import sys
from dotmap import DotMap
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from dmbrl.misc.MBExp import MBExperiment
from dmbrl.controllers.MPC import MPC
from dmbrl.config import create_config
import drift_gym
def main(env, ctrl_type, ctrl_args, overrides, logdir):
ctrl_args = DotMap(**{key: val for (key, val) in ctrl_args})
cfg = create_config(env, ctrl_type, ctrl_args, overrides, logdir)
cfg.pprint()
if ctrl_type == "MPC":
cfg.exp_cfg.exp_cfg.policy = MPC(cfg.ctrl_cfg)
exp = MBExperiment(cfg.exp_cfg)
os.makedirs(exp.logdir)
with open(os.path.join(exp.logdir, "config.txt"), "w") as f:
f.write(pprint.pformat(cfg.toDict()))
exp.run_experiment()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-env', type=str, default="drift_car",
help='Environment name: select from [cartpole, reacher, pusher, halfcheetah]')
parser.add_argument('-ca', '--ctrl_arg', action='append', nargs=2, default=[],
help='Controller arguments, see https://github.com/kchua/handful-of-trials#controller-arguments')
parser.add_argument('-o', '--override', action='append', nargs=2, default=[],
help='Override default parameters, see https://github.com/kchua/handful-of-trials#overrides')
parser.add_argument('-logdir', type=str, default='log',
help='Directory to which results will be logged (default: ./log)')
args = parser.parse_args()
main(args.env, "MPC", args.ctrl_arg, args.override, args.logdir)
|
[
"[email protected]"
] | |
a3810577a124499e43ca7c70f2fdc9889b8971e0
|
3df1fca973d127b0118d6c4a89940a2f41be75cc
|
/importpatches.py
|
c1cc6da556b0c954860afd78c1a80bc5c6cf7fad
|
[] |
no_license
|
hroncok/importpatches
|
182ec91294a55ccb2d610ffd220f3b65a598f602
|
e87183e6770f4307cf9452192c4824e6b3a8a5b7
|
refs/heads/master
| 2023-04-29T23:03:52.911202 | 2020-07-17T19:20:25 | 2020-07-17T19:24:17 | 281,077,112 | 0 | 0 | null | 2020-07-20T09:46:43 | 2020-07-20T09:46:43 | null |
UTF-8
|
Python
| false | false | 12,663 |
py
|
import subprocess
from pathlib import Path
import sys
import shlex
import re
import dataclasses
from textwrap import dedent
import tempfile
import shutil
import click # dnf install python3-click
REPO_KEY = 'importpatches.upstream'
PATCH_NUMBER_RE = re.compile('^(\d+):')
SPECIAL_PATCH_NUMBERS = {
'python-2.7.1-config.patch': 0,
'python-2.6-rpath.patch': 16,
'python-2.6.4-distutils-rpath.patch': 17,
}
PATCH_SECTION_START = '# (Patches taken from Git)'
PATCH_SECTION_STARTS = {
PATCH_SECTION_START,
'# 00001 #',
'# Modules/Setup.dist is ultimately used by the "makesetup" script to construct'
}
PATCH_SECTION_END = '# (New patches go here ^^^)'
FLIENAME_SAFE_RE = re.compile('^[a-zA-Z0-9._-]+$')
BUNDLED_VERSION_RE = re.compile('-_([A-Z]+)_VERSION = "([0-9.]+)"')
BUNDLED_VERSION_BLURB = """
# The following versions of setuptools/pip are bundled when this patch is not applied.
# The versions are written in Lib/ensurepip/__init__.py, this patch removes them.
# When the bundled setuptools/pip wheel is updated, the patch no longer applies cleanly.
# In such cases, the patch needs to be amended and the versions updated here:
"""
def removeprefix(self, prefix):
# PEP-616 backport
if self.startswith(prefix):
return self[len(prefix):]
else:
return self
@dataclasses.dataclass
class PatchInformation:
"""All information needed about a patch"""
number: int
patch_id: str
comment: str
filename: Path
trailer: str = ''
def handle_patch(repo, commit_id, *, tempdir):
"""Handle a single patch, writing it to `tempdir` and returning info
"""
message = run(
'git', 'show', '-s', '--format=%B', commit_id,
cwd=repo,
).stdout.strip()
summary, _, message_body = message.partition('\n')
if match := PATCH_NUMBER_RE.match(summary):
number = int(match.group(1))
paths = list(Path('.').glob(f'{number:05d}-*.patch'))
if len(paths) == 0:
path = Path(slugify(summary) + '.patch')
elif len(paths) == 1:
[path] = paths
else:
paths_msg = ''.join(f'\n {p}' for p in paths)
exit(
'More than one patch file matches {number}: {paths_msg}'
)
elif summary.endswith('.patch') and FLIENAME_SAFE_RE.match(summary):
path = Path(summary)
if match := re.search('\d{5,}', message):
number = int(str(match.group(0)))
elif summary in SPECIAL_PATCH_NUMBERS:
number = SPECIAL_PATCH_NUMBERS[summary]
else:
exit(
f'Cannot find patch number in {commit_id[:9]}: {summary}'
)
else:
exit(
f'Cannot derive patch filename from {commit_id[:9]}: {summary}'
)
patch_path = tempdir / path.name
with open(patch_path, 'w') as f:
proc = run(
'git', 'format-patch', '--stdout', '-1',
'--minimal', '--patience', '--abbrev=78', '--find-renames',
'--zero-commit', '--no-signature',
commit_id,
cwd=repo, stdout=f
)
with open(patch_path) as f:
hash_id = run('git', 'patch-id', '--stable', stdin=f).stdout.split()[0]
spec_comment = []
if summary.endswith('.patch'):
body = removeprefix(message_body.strip(), f'{number:05d} #\n')
spec_comment.append(body)
else:
spec_comment.append(re.sub(PATCH_NUMBER_RE, '', summary))
for line in message_body.splitlines():
if line.lower().startswith('co-authored-by:'):
continue
if re.fullmatch(r'\(cherry picked from commit .{40}\)', line):
continue
spec_comment.append(line)
if number == 189:
trailer = process_rpmwheels_patch(tempdir / path.name)
else:
trailer = ''
return PatchInformation(
number, hash_id, '\n'.join(spec_comment).strip(), path.name,
trailer,
)
def slugify(string):
"""Massage a string for filename safety
This should be similar to how git-format-patch generates filenames.
"""
return re.sub('[^a-z0-9_-]+', '-', string.lower()).strip('-')
def process_rpmwheels_patch(path):
"""Return a "trailer" with %global definitions for patch 189
"""
versions = {}
with path.open() as f:
for line in f:
if line.startswith('-_'):
print(line, BUNDLED_VERSION_RE)
if match := BUNDLED_VERSION_RE.match(line.strip()):
if match[1] in versions:
exit(f'Bundled version for {match[1]} appears twice')
versions[match[1]] = match[2]
version_lines = (
f'%global {name.lower()}_version {ver}\n'
for name, ver in sorted(versions.items())
)
return BUNDLED_VERSION_BLURB + ''.join(version_lines)
def run(*args, echo_stdout=True, **kwargs):
"""Like subprocess.run, but with logging and more appropriate defaults"""
kwargs.setdefault('check', True)
kwargs.setdefault('encoding', 'utf-8')
kwargs.setdefault('stdout', subprocess.PIPE)
prompt = click.style(f'{kwargs.get("cwd", "")}$ ', fg='cyan')
redirs = []
def add_redir(kwarg_name, symbol):
stream = kwargs.get(kwarg_name)
name = getattr(stream, 'name', None)
if name:
note = f' {symbol} {shlex.quote(name)}'
redirs.append(click.style(note, fg='cyan'))
add_redir('stdin', '<')
add_redir('stdout', '>')
click.echo(
prompt + ' '.join(shlex.quote(a) for a in args) + ''.join(redirs),
err=True,
)
result = subprocess.run(args, **kwargs)
if result.stdout != None and result.stdout.strip():
if echo_stdout:
click.echo(result.stdout, err=True)
else:
lines = result.stdout.count("\n")
click.echo(f'[{lines} lines]\n', err=True)
return result
@click.command(context_settings={'help_option_names': ['-h', '--help']})
@click.option(
'-r', '--repo', default=None, metavar='REPO',
help="Repository with upstream code and patches" +
f"(default is taken from Git config option `{REPO_KEY}`)"
)
@click.option(
'-t', '--tag', default=None, metavar='TAG',
help="Git tag corresponding to the upstream release " +
"(default is derived from %{upstream_version} in SPEC) " +
"(example: v3.9.0b4)"
)
@click.option(
'-f', '--head', default=None, metavar='BRANCH',
help="Git commit-ish from which to take patches " +
"(default is derived from --tag) " +
"(example: fedora-3.9)"
)
@click.argument(
'spec', default=None, required=False, type=Path,
)
def main(spec, repo, tag, head):
"""Update Fedora Python dist-git spec & patches from a Git repository
Meant to be run in a local clone of Fedora's pythonX.Y dist-git.
REPO should be a local clone of https://github.com/fedora-python/cpython.
Patches for all commits between TAG and BRANCH in that repository are
formatted into loal files, and the *.spec file is updated with comments
taken from commit messages.
Patches are numbered with numbers from:
https://fedoraproject.org/wiki/SIGs/Python/PythonPatches
(below, NNNNN stands for the patch number)
The commits must have summary, either::
NNNNN: Summary line
...
or the "old and dirty" style (used for Python 2)::
patch-filename.patch
# NNNNN #
...
Patch filenames are preserved, if they begin with ``NNNNN-``.
Patch 189 is handled specially: version numbers of bundled packages
are extracted from it.
Note that patch files are read and written from the current directory,
regardless of the --repo option.
There is no "dry run" option; commit/stash your work before running this.
"""
with tempfile.TemporaryDirectory() as d:
tempdir = Path(d)
if spec == None:
specs = list(Path('.').glob('*.spec'))
if len(specs) != 1:
raise click.UsageError(
"Either there must be a single spec file in current " +
"directory, or SPEC must be given."
)
spec = specs[0].resolve()
click.secho(f'Assuming SPEC is {spec}', fg='yellow')
if repo == None:
proc = run(
'git', 'config', '--get', REPO_KEY, check=False
)
if proc.returncode == 1:
# The section or key is invalid
raise click.UsageError(
f'Could not find upstream repo. Configure with ' +
f'`git config {REPO_KEY} .../cpython` or ' +
f'specify --repo explicitly.'
)
proc.check_returncode()
repo = proc.stdout.strip()
click.secho(f'Assuming --repo={repo}', fg='yellow')
if tag == None:
with spec.open() as f:
rpm_globals = []
for line in f:
line = line.strip()
if line.startswith('%global '):
rpm_globals.append(removeprefix(line, '%global '))
if line.startswith('%global upstream_version'):
upstream_version = run(
'rpm',
*(f'-D{d}' for d in rpm_globals),
'--eval', '%upstream_version'
).stdout.strip()
tag = f'v{upstream_version}'
break
else:
raise click.UsageError(
"Tag of upstream release not found in spec; check " +
"logic in the script or specify --tag explicitly."
)
click.secho(f'Assuming --tag={tag}', fg='yellow')
if head == None:
pybasever = '.'.join(tag.lstrip('v').split('.')[:2])
head = f'fedora-{pybasever}'
click.secho(f'Assuming --head={head}', fg='yellow')
log = run(
'git', 'rev-list', head, '^' + tag, cwd=repo, echo_stdout=False,
).stdout.splitlines()
if len(log) >= 100:
exit(
'There are more than 100 patches. Probably a wrong branch ' +
'was selected; try giving -c explicitly.'
)
patches_section = []
for commit_id in reversed(log):
result = handle_patch(
repo, commit_id, tempdir=tempdir,
)
comment = '\n'.join(
f'# {l}' if l else '#' for l in result.comment.splitlines()
)
section = dedent(f"""
# {result.number:05d} # {result.patch_id}
%s
Patch{result.number}: {result.filename}
""") % comment.replace('%', '%%')
if result.trailer:
section = section.rstrip() + result.trailer
patches_section.append(section)
spec_lines = []
outfile_path = tempdir / spec.name
with open(outfile_path, 'w') as outfile:
with spec.open('r') as infile:
echoing = True
found_start = False
for line in infile:
if line.rstrip() == PATCH_SECTION_END:
echoing = True
if line.rstrip() in PATCH_SECTION_STARTS:
outfile.write(PATCH_SECTION_START + '\n')
if found_start:
exit('Spec has multiple starts of section')
found_start = True
echoing = False
outfile.writelines(patches_section)
outfile.write('\n')
if echoing:
outfile.write(line)
if not found_start:
exit('Patches section not found in spec file')
if not echoing:
exit('End of patches section not found in spec file')
click.secho(f'Updating patches and spec', fg='yellow')
# Remove all existing patches
for path in Path('.').glob('*.patch'):
path.unlink()
# Move all files from tempdir to current directory
for path in tempdir.iterdir():
shutil.move(path, path.name)
click.secho('OK', fg='green')
if __name__ == '__main__':
try:
main()
except SystemExit as e:
if e.code != None:
raise
click.secho(f"{e}", fg='red')
raise SystemExit(1)
|
[
"[email protected]"
] | |
0f7d8ae5196b70d080e081d05be8478206494a1d
|
82cd10c024f284555845f006e518924fed3254c7
|
/Day-06[09-10-2021]/EmployeeProject/EmployeeProject/urls.py
|
256d1ab7ebea77beebcb3a9ed2b40858b129c6a2
|
[] |
no_license
|
AP-Skill-Development-Corporation/APSSDC-Workshop2021
|
61acba18eb55ec2e4bb96ded95d339c73c8ea1ac
|
fe1f5517f99b17bd0ebcf07c70ee26bd23f262ea
|
refs/heads/main
| 2023-08-12T16:29:53.208949 | 2021-10-16T15:47:22 | 2021-10-16T15:47:22 | 413,299,596 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,549 |
py
|
"""EmployeeProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from Employee import views
urlpatterns = [
path('admin/', admin.site.urls),
path('demo/',views.sample),
path('de/<str:name>/',views.dsname),
path('fe/<str:name>/<int:age>/',views.fname),
path('g/<str:fname>/<str:lname>/<int:sal>/',views.emp),
path('gt/<str:empname>/<int:sal>/',views.empdetials),
path('fy/<str:sname>/',views.dname),
path('sty/<str:stname>/<int:year>/<str:branch>/',views.stname),
path('reg/',views.regis),
path('se/',views.index,name="ind"),
path('about/',views.about,name="ab"),
path('contact/',views.contact,name="cnt"),
path('sam/',views.sample),
path('re/',views.register,name="rg"),
path('',views.operations,name="op"),
path('emv/<int:t>/',views.eview,name="ev"),
path('eup/<int:p>/',views.emup,name="ep"),
path('ed/<int:f>/',views.emdl,name="edl"),
]
|
[
"[email protected]"
] | |
76102b927b78f7e554f0600c9d4a21795372affd
|
39e2d61896ea2f44de8c4d9050a486d5183986fd
|
/mysite/settings.py
|
586cd1b096cf48cecb0979a7bad376e6fbe17ca8
|
[] |
no_license
|
Gutimartinez/my-first-blog
|
b47ed9d03f43b352ba4c90cea560fe72f66dc56c
|
15a80c1a4d507763007b3215c0e957a28726b899
|
refs/heads/master
| 2020-04-07T02:53:55.831813 | 2018-11-17T16:35:57 | 2018-11-17T16:35:57 | 157,994,492 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,188 |
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.9.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y=md4gap2cur-1_rhxi=(_!p^alb0%j*1ey$t$2$7u*(-zvwn9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["127.0.0.1",".pythonanywhere.com"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"blog",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'es-es'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT= os.path.join(BASE_DIR,"static")
|
[
"[email protected]"
] | |
d2afcde7b614459ed311129cdac65910e807b036
|
4e8b3c58fc430a00d572d8aa32144b8fa0b9e950
|
/testcases/rhsm/rhsm_level_2/tc_ID509791_proper_message_after_yum_operation_when_registered_next_generation_entitlement_server_without_subscription.py
|
1b68c9d9609d1e15a23935d91e46570521775b37
|
[] |
no_license
|
autumn-gao/entitlement-ci
|
4ea83efce13cbbe0c65b77b6d7bc1079d78dfdc7
|
ceeb298cddb8e5805cbeb54917c90718cdf50d91
|
refs/heads/master
| 2020-12-30T17:45:25.124283 | 2016-06-16T09:24:28 | 2016-06-16T09:24:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,584 |
py
|
from utils import *
from testcases.rhsm.rhsmbase import RHSMBase
from utils.exception.failexception import FailException
class tc_ID509791_proper_message_after_yum_operation_when_registered_next_generation_entitlement_server_without_subscription(RHSMBase):
def test_run(self):
case_name = self.__class__.__name__
logger.info("========== Begin of Running Test Case %s ==========" % case_name)
try:
username = self.get_rhsm_cons("username")
password = self.get_rhsm_cons("password")
self.sub_register(username, password)
# yum operation
cmd = 'yum repolist'
(ret, output) = self.runcmd(cmd, "list repos")
if ret ==0 and 'This system is registered to Red Hat Subscription Management, but is not receiving updates. You can use subscription-manager to assign subscriptions.' in output and 'repolist: 0' in output:
logger.info("It's successful to show proper message after yum operation when registered without subscription to next generation entitlement server")
else:
raise FailException("Test Failed - failed to show proper message after yum operation when registered without subscription to next generation entitlement server")
except Exception, e:
logger.error(str(e))
self.assert_(False, case_name)
finally:
self.restore_environment()
logger.info("=========== End of Running Test Case: %s ===========" % case_name)
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
fafb5f902128017c9a0e87f966658b6bcc3967fd
|
971ac2d9d99d636328b0a31d5f76ce7355cf81ac
|
/startechbd/startechbd/spiders/startech_laptops.py
|
79bb0fb11dd5e3c7e9141012a6f8698b71c2689f
|
[] |
no_license
|
ST12357/Web_Scraping-Scrapy-Startechbd
|
5b303ce3b9496fabb7769e6c9121f9f517be28b6
|
e7ae889d746ac7525aa3caf9ed6dbb68b45edf95
|
refs/heads/main
| 2023-06-12T13:21:21.894362 | 2021-07-09T20:00:24 | 2021-07-09T20:00:24 | 384,383,110 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,947 |
py
|
import scrapy
class StartechLaptopsSpider(scrapy.Spider):
name = 'startech_laptops'
allowed_domains = ['https://www.startech.com.bd']
start_urls = ['https://www.startech.com.bd/laptop-notebook/laptop?sort=p.price&order=ASC&limit=90',
'https://www.startech.com.bd/laptop-notebook/laptop?sort=p.price&order=ASC&limit=90&page=2',
'https://www.startech.com.bd/laptop-notebook/laptop?sort=p.price&order=ASC&limit=90&page=3',
'https://www.startech.com.bd/laptop-notebook/laptop?sort=p.price&order=ASC&limit=90&page=4',
'https://www.startech.com.bd/laptop-notebook/laptop?sort=p.price&order=ASC&limit=90&page=5',
'https://www.startech.com.bd/laptop-notebook/laptop?sort=p.price&order=ASC&limit=90&page=6',
'https://www.startech.com.bd/desktops?sort=p.price&order=ASC&limit=90',
'https://www.startech.com.bd/desktops?sort=p.price&order=ASC&limit=90&page=2',
'https://www.startech.com.bd/desktops?sort=p.price&order=ASC&limit=90&page=3'
]
def parse(self, response):
print("procesing:"+response.url)
Product_Name = response.xpath(
'//h4[@class="p-item-name"]/a/text()').extract()
Product_Categotry = response.xpath(
'//h6[@class="page-heading m-hide"]/text()').extract()*len(Product_Name)
Product_Description = []
for i in range(len(Product_Name)):
new_des = str(response.xpath(
'//div[@class="short-description"]/ul').extract()[i])
new_des = new_des.replace("\r", ",")
new_des = new_des.replace("<li>", "")
new_des = new_des.replace("</li>", "")
new_des = new_des.replace("<ul>", "")
new_des = new_des.replace("</ul>", "")
Product_Description.append(new_des)
Product_Price = response.xpath(
'//div[@class="p-item-price"]/span/text()').extract()
Product_Availability = []
for i in range(len(Product_Name)):
new_ava = str(response.xpath(
'//div[@class="actions"]/span/text()').extract()[i])
new_ava = new_ava.replace("Add to Compare", "")
Product_Availability.append(new_ava)
Product_URL = response.xpath(
'//h4[@class="p-item-name"]/a/@href').extract()
row_data = zip(Product_Categotry, Product_Name, Product_Description,
Product_Price, Product_Availability, Product_URL,)
for item in row_data:
scraped_info = {
'Page': response.url,
'Product_Category': item[0],
'Product_Name': item[1],
'Product_Description': item[2],
'Product_Price': item[3],
'Product_Availability': item[4],
'Product_URL': item[5]}
yield scraped_info
|
[
"[email protected]"
] | |
5ba4904c79c11ee96c12103d8f17c226aefa5a8c
|
56cac2ee7e7a4e629519964320ceb813e2dd5102
|
/App/migrations/0020_courses_whatsapp_group_link.py
|
80ab79aef5c6a53bb3317ba99ecb330222f47760
|
[] |
no_license
|
LinkCode-Projects/LinkCode-Website
|
70ed4f614847779ea5ba05bb0ebbaa6d9f85eece
|
f6c900b0c53425a36bc669b58c2adc5592ff6c8f
|
refs/heads/main
| 2023-06-26T07:26:07.702031 | 2021-07-31T06:24:28 | 2021-07-31T06:24:28 | 391,266,237 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 389 |
py
|
# Generated by Django 3.1.1 on 2020-12-24 08:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App', '0019_auto_20201219_1235'),
]
operations = [
migrations.AddField(
model_name='courses',
name='whatsapp_group_link',
field=models.URLField(null=True),
),
]
|
[
"[email protected]"
] | |
46b0d0c58218883358493e41caffae0b3d695983
|
7c83af09eb069917d40de0e58fc348fd0b14bb7d
|
/Outputs/avdss_7_res.py
|
b13d73bbf03c4df5bfe47d5330a35aba0f8c5a3a
|
[] |
no_license
|
soujanyaamd/EDA-tool-for-Scheduling-Binding-and-Verilog-file-generation
|
fd4db24c0e5dd0d51757b511a4638494c9174a73
|
8f31f7fea4ef439720e51e50b0057a938362479a
|
refs/heads/master
| 2020-05-14T09:25:20.086747 | 2019-04-16T20:29:03 | 2019-04-16T20:29:03 | 181,739,741 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,492 |
py
|
Python 3.6.5 (v3.6.5:f59c0932b4, Mar 28 2018, 16:07:46) [MSC v.1900 32 bit (Intel)] on win32
Type "copyright", "credits" or "license()" for more information.
>>>
============ RESTART: C:\Users\Soujanya\Desktop\AVD_SS\avdss_7.py ============
SCHEDULING AND BINDING SOLUTION
TIMESLOT 1
operation 1 ----> Add/Sub 1
operation 4 ----> Add/Sub 2
Time slot 1 ----> 1 nanoseconds
TIMESLOT 2
operation 7 ----> Add/Sub 1
operation 2 ----> Mul/Div 1
operation 5 ----> Mul/Div 2
Time required for slot 2 ----> 3 nanoseconds
TIMESLOT 3
operation 6 ----> Sq 1
operation 8 ----> Mul/Div 1
Time required for slot 3 ----> 3 nanoseconds
TIMESLOT 4
operation 9 ----> Sq 1
Time required for slot 4 ----> 2 nanoseconds
TIMESLOT 5
operation 3 ----> Sq 1
operation 10 ----> Add/Sub 1
Time required for slot 5 ----> 2 nanoseconds
TIMESLOT 6
operation 11 ----> Sqrt 1
operation 14 ----> Add/Sub 1
operation 18 ----> Add/Sub 2
Time required for slot 6 ----> 4 nanoseconds
TIMESLOT 7
operation 12 ----> Mul/Div 1
operation 15 ----> Sqrt 1
operation 19 ----> Sqrt 2
Time required for slot 7 ----> 4 nanoseconds
TIMESLOT 8
operation 13 ----> taninv 1
operation 16 ----> Mul/Div 1
operation 20 ----> Mul/Div 2
Time required for slot 8 ----> 4 nanoseconds
TIMESLOT 9
operation 17 ----> taninv 1
operation 21 ----> taninv 2
Time required for slot 9 ----> 4 nanoseconds
Total Time Required to complete the sequence of operations = 27
>>>
|
[
"[email protected]"
] | |
c3d4a68523f3f80696d083566f8b8dab12579800
|
28b3a96744bab96c002033d67b7e7091e0af7322
|
/AdverBrush/venv/Lib/site-packages/appium/webdriver/extensions/performance.py
|
dc266a1490c19378bccfb7257f1ef19eef8660f2
|
[] |
no_license
|
lichenglan/Python-tools
|
37759478acab35a48c3368bea3d501772c1093aa
|
4f6f42572549480182408dfd45af9f07026bb19f
|
refs/heads/master
| 2020-05-31T06:59:34.100962 | 2019-06-04T08:34:48 | 2019-06-04T08:34:48 | 190,153,627 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,399 |
py
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium import webdriver
from ..mobilecommand import MobileCommand as Command
class Performance(webdriver.Remote):
def get_performance_data(self, package_name, data_type, data_read_timeout=None):
"""Returns the information of the system state
which is supported to read as like cpu, memory, network traffic, and battery.
Android only.
:Args:
- package_name: The package name of the application
- data_type: The type of system state which wants to read.
It should be one of the supported performance data types.
Check :func:`.get_performance_data_types` for supported types
- data_read_timeout: (optional) The number of attempts to read
:Usage:
self.driver.get_performance_data('my.app.package', 'cpuinfo', 5)
"""
data = {'packageName': package_name, 'dataType': data_type}
if data_read_timeout is not None:
data['dataReadTimeout'] = data_read_timeout
return self.execute(Command.GET_PERFORMANCE_DATA, data)['value']
def get_performance_data_types(self):
"""Returns the information types of the system state
which is supported to read as like cpu, memory, network traffic, and battery.
Android only.
:Usage:
self.driver.get_performance_data_types()
"""
return self.execute(Command.GET_PERFORMANCE_DATA_TYPES)['value']
# pylint: disable=protected-access
def _addCommands(self):
self.command_executor._commands[Command.GET_PERFORMANCE_DATA] = \
('POST', '/session/$sessionId/appium/getPerformanceData')
self.command_executor._commands[Command.GET_PERFORMANCE_DATA_TYPES] = \
('POST', '/session/$sessionId/appium/performanceData/types')
|
[
"[email protected]"
] | |
67c67c45c81ce9055857ab7dd21cd59ef51b000e
|
a2a0c0b54dd6658ae039ebc834e3cc5ec2dd925b
|
/orders/models.py
|
dd3d6f49c37860447a7eb8536d6b28548ce0003d
|
[] |
no_license
|
GiulianoSoria/Pizza
|
feaaf38828f099b4282bc9fe3e33bef0b21f10ae
|
336068ac60016923efa688102f4cb45b64af4b61
|
refs/heads/master
| 2021-06-18T10:29:24.472742 | 2019-11-13T20:34:37 | 2019-11-13T20:34:37 | 199,933,574 | 0 | 0 | null | 2019-07-31T23:00:22 | 2019-07-31T21:48:35 |
Python
|
UTF-8
|
Python
| false | false | 1,272 |
py
|
from django.db import models
# Create your models here.
class Pizza(models.Model):
name = models.CharField(max_length=64)
size = models.CharField(max_length=1)
price = models.FloatField()
def __str__(self):
return f"{self.name} pizza of size {self.size} costs {self.price}"
class Sub(models.Model):
name = models.CharField(max_length=64)
size = models.CharField(max_length=1)
price = models.FloatField()
def __str__(self):
return f"{self.name} sub of size {self.size} costs {self.price}"
class Platter(models.Model):
name = models.CharField(max_length=64)
size = models.CharField(max_length=1)
price = models.FloatField()
def __str__(self):
return f"{self.name} platter of size {self.size} costs {self.price}"
class Salad(models.Model):
name = models.CharField(max_length=64)
price = models.FloatField()
def __str__(self):
return f"{self.name} costs {self.price}"
class Pasta(models.Model):
name = models.CharField(max_length=64)
price = models.FloatField()
def __str__(self):
return f"{self.name} pasta costs {self.price}"
class Topping(models.Model):
name = models.CharField(max_length=64)
def __str__(self):
return f"{self.name}"
|
[
"[email protected]"
] | |
11dcbbd586bc4a03e5877db9fc54ae3107aa578f
|
487a5965d505e0ff5f9b6af73e89baa4495e8247
|
/get_ip.py
|
f1a921f2b1d5d1df04f6033e0f0c27d8635f4432
|
[] |
no_license
|
civissmith/scripts
|
8e91266cf9f6e99f13123dc9eadefe0568870898
|
badb3f53464ca1e99311f1b563c38b326102fbe7
|
refs/heads/master
| 2021-01-17T10:32:19.496137 | 2018-01-17T12:57:29 | 2018-01-17T12:57:29 | 6,525,570 | 0 | 0 | null | 2012-11-14T18:41:59 | 2012-11-03T23:56:57 | null |
UTF-8
|
Python
| false | false | 5,399 |
py
|
#!/usr/bin/python -B
################################################################################
# @Title: get_ip.py
#
# @Author: Phil Smith
#
# @Date: 17-Aug-2013 9:18 PM
#
# @Project: Motorola Surfboard Hack
#
# @Purpose: Logs into the Motorola Surfboard Router, gets the current WAN IP
# address.
#
# Exit Codes:
# 0 - Success
# 1 - Could not find config file
# 2 - Could not get IP, MAC, duration and expiration
# @Revision:
# $Id: get_ip.py,v 1.2 2013/08/19 02:04:39 alpha Exp $
#
################################################################################
import re
import os
import sys
import fcntl
import socket
import struct
import urllib2
#
# Global Constants
#
RCFILE = os.path.expanduser("~") + '/.motorolarc' # This can be made user-independent.
#
# get_credentials() - Gets the username and password from the rc file.
# Returns (None, None) on error
#
def get_credentials():
if not os.path.exists(RCFILE):
print 'Could not find configuration file! (~/.motorolarc)'
exit(1)
#
# RCFILE should have the following two lines
# USER: <username>
# PASSwORD: <password>
#
rc = open(RCFILE, 'r')
for line in rc:
name = re.search(r'USER:\s*(.*)', line, re.IGNORECASE)
password = re.search(r'PASSWORD:\s*(.*)', line, re.IGNORECASE)
if name:
uname = name.group(1)
if password:
passwd = password.group(1)
rc.close()
if uname and passwd:
return uname, passwd
return None, None
#
# End get_credentials
#
#
# main(): Main entry point of this utility
#
def main( ADMIN_NAME, ADMIN_PASSWD ):
#
# Set the router address and port, these could be made into arguments -
# but I really don't care.
#
ROUTER_ADDR = '192.168.0.1'
ROUTER_PORT = 80
TIMEOUT = 2.5 # Seconds
# On the Motorola Surfboard SGB6580, the WAN IP addess is on the RgSetup.asp page.
WAN_IP_PAGE = 'http://' + ROUTER_ADDR + '/RgSetup.asp'
LOGOUT_PAGE = 'http://' + ROUTER_ADDR + '/logout.asp'
# Grab any TCP port - completely arbitrary.
TCP_PORT = 45555
#
# The LOGIN string will attempt to login into the router.
#
# All headers are taken from Wireshark capture.
# Host: header is set to mimic how Wireshark captured the data, instead of the normal
# ADDR, PORT form.
LOGIN = 'POST /goform/login HTTP/1.1\r\n' +\
'Host:%s\r\n' % ROUTER_ADDR +\
'User-Agent: Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0\r\n' +\
'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n' +\
'Accept-Language: en-US,en;q=0.5\r\n' +\
'Accept-Encoding: gzip, deflate\r\n' +\
'Referer: http://%s/login.asp\r\n' % ROUTER_ADDR +\
'Connection: keep-alive\r\n' +\
'Content-Type: application/x-www-form-urlencoded\r\n' +\
'Content-Length: 42\r\n\r\n' +\
'loginUsername=%s&loginPassword=%s' % (ADMIN_NAME, ADMIN_PASSWD)
socket.setdefaulttimeout(TIMEOUT)
#
# Hackish way to get the IP address of the active interface. The
# null_socket is never used, other than to get the address.
#
null_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
null_socket.connect((ROUTER_ADDR, ROUTER_PORT))
loc_addr = null_socket.getsockname()[0]
#
# Setup and send the login request
#
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) # Internet socket using TCP
# Allow the kernel to reuse the socket even if TIME_WAIT has not expired.
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind the socket to the chosen address and open the multicast port.
sock.bind((loc_addr, TCP_PORT))
sock.connect((ROUTER_ADDR, ROUTER_PORT))
# Send the LOGIN string to gain access to the router
sock.sendto(LOGIN, (ROUTER_ADDR, ROUTER_PORT))
response = urllib2.urlopen(WAN_IP_PAGE)
page_source = response.read()
# Parse the source looking for the WAN IP.
# IP Address
match = re.search(r'<td bgcolor=#E7DAAC><b>IP Address</b></td><td bgcolor=#E7DAAC>(.*)</td></tr>', page_source)
if match:
ip = match.group(1)
# MAC Address
match = re.search(r'<tr><td bgcolor=#E7DAAC> <td bgcolor=#E7DAAC><b>MAC Address</b></td><td bgcolor=#E7DAAC>(.*)</td></tr>', page_source)
if match:
mac = match.group(1)
# Duration
match = re.search(r'<tr><td bgcolor=#E7DAAC> </td><td bgcolor=#E7DAAC><b>Duration</b></td><td bgcolor=#E7DAAC>(.*)</td></tr>', page_source)
if match:
duration = match.group(1)
# Expiration
match = re.search(r'<tr><td bgcolor=#E7DAAC> </td><td bgcolor=#E7DAAC><b>Expires</b></td><td bgcolor=#E7DAAC>(.*)', page_source)
if match:
expire = match.group(1)
#
# Use urllib2 to request the logout, don't care about spoofing the browser anymore.
# The logout needs to occur, otherwise any webbrowser can just point to a guarded page
# and immediately have access.
#
response = urllib2.urlopen(LOGOUT_PAGE)
if ip and mac and duration and expire:
print "WAN IP Address: %15s" % ip
print "MAC Address: %23s" % mac
print "Lease Duration: %26s" % duration
print "Expiration Date: %26s" % expire
else:
print "Could not get all of the data elements!"
exit(2)
#
# Invocation Check
#
if __name__ == "__main__":
uname, passwd = get_credentials()
if uname and passwd:
main( uname, passwd)
|
[
"[email protected]"
] | |
39764d8d79f6697d5e9b2cffeb3f3e9487f9ea0a
|
2eee2da886d2574f030b22771e707e32f56cbaed
|
/chaospy/distributions/collection/chi.py
|
cb04231c2d16b7f21de4aa90574562d6e927b4fc
|
[
"MIT"
] |
permissive
|
lblonk/chaospy
|
1759f050229d1365802320d9b13f6195ec55a72c
|
1759a4307c6134b74ce63ff44973195f1e185f94
|
refs/heads/master
| 2022-11-12T19:50:15.108219 | 2020-07-03T11:13:42 | 2020-07-03T11:13:42 | 276,879,282 | 0 | 0 |
MIT
| 2020-07-03T11:03:14 | 2020-07-03T11:03:13 | null |
UTF-8
|
Python
| false | false | 3,779 |
py
|
"""Chi distribution."""
import numpy
from scipy import special
from ..baseclass import Dist
from ..operators.addition import Add
class chi(Dist):
"""Chi distribution."""
def __init__(self, df=1):
Dist.__init__(self, df=df)
def _pdf(self, x, df):
return x**(df-1.)*numpy.exp(-x*x*0.5)/(2.0)**(df*0.5-1)\
/special.gamma(df*0.5)
def _cdf(self, x, df):
return special.gammainc(df*0.5,0.5*x*x)
def _ppf(self, q, df):
return numpy.sqrt(2*special.gammaincinv(df*0.5, q))
def _lower(self, df):
return 0.
def _mom(self, k, df):
return 2**(.5*k)*special.gamma(.5*(df+k))\
/special.gamma(.5*df)
class Chi(Add):
"""
Chi distribution.
Args:
df (float, Dist):
Degrees of freedom
scale (float, Dist):
Scaling parameter
shift (float, Dist):
Location parameter
Examples:
>>> distribution = chaospy.Chi(2, 4, 1)
>>> distribution
Chi(df=2, scale=4, shift=1)
>>> q = numpy.linspace(0, 1, 5)
>>> distribution.inv(q).round(4)
array([ 1. , 4.0341, 5.7096, 7.6604, 28.1446])
>>> distribution.fwd(distribution.inv(q)).round(4)
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> distribution.pdf(distribution.inv(q)).round(4)
array([0. , 0.1422, 0.1472, 0.1041, 0. ])
>>> distribution.sample(4).round(4)
array([ 6.8244, 2.9773, 10.8003, 5.5892])
>>> distribution.mom(1).round(4)
6.0133
"""
def __init__(self, df=1, scale=1, shift=0):
self._repr = {"df": df, "scale": scale, "shift": shift}
Add.__init__(self, left=chi(df)*scale, right=shift)
class Maxwell(Add):
"""
Maxwell-Boltzmann distribution
Chi distribution with 3 degrees of freedom
Args:
scale (float, Dist):
Scaling parameter
shift (float, Dist):
Location parameter
Examples:
>>> distribution = chaospy.Maxwell(2, 3)
>>> distribution
Maxwell(scale=2, shift=3)
>>> q = numpy.linspace(0, 1, 5)
>>> distribution.inv(q).round(4)
array([ 3. , 5.2023, 6.0763, 7.0538, 17.0772])
>>> distribution.fwd(distribution.inv(q)).round(4)
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> distribution.pdf(distribution.inv(q)).round(4)
array([0. , 0.2638, 0.2892, 0.2101, 0. ])
>>> distribution.sample(4).round(4)
array([6.6381, 4.6119, 8.5955, 6.015 ])
>>> distribution.mom(1).round(4)
6.1915
"""
def __init__(self, scale=1, shift=0):
self._repr = {"scale": scale, "shift": shift}
Add.__init__(self, left=chi(3)*scale, right=shift)
class Rayleigh(Add):
"""
Rayleigh distribution
Args:
scale (float, Dist):
Scaling parameter
shift (float, Dist):
Location parameter
Examples:
>>> distribution = chaospy.Rayleigh(2, 3)
>>> distribution
Rayleigh(scale=2, shift=3)
>>> q = numpy.linspace(0, 1, 5)
>>> distribution.inv(q).round(4)
array([ 3. , 4.5171, 5.3548, 6.3302, 16.5723])
>>> distribution.fwd(distribution.inv(q)).round(4)
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> distribution.pdf(distribution.inv(q)).round(4)
array([0. , 0.2844, 0.2944, 0.2081, 0. ])
>>> distribution.sample(4).round(4)
array([5.9122, 3.9886, 7.9001, 5.2946])
>>> distribution.mom(1).round(4)
5.5066
"""
def __init__(self, scale=1, shift=0):
self._repr = {"scale": scale, "shift": shift}
Add.__init__(self, left=chi(2)*scale, right=shift)
|
[
"[email protected]"
] | |
aed9190de126750148813cee49a36b0191fe03cf
|
e1b09412691d9d375637760f9226e97a9927f108
|
/day17 网络编程再试/小测.py
|
c2a8d129a4144c2cf049d7051243dba1f8e97c09
|
[] |
no_license
|
fallencrasher/python-learning
|
572988c5d3cb6f97dcefdcdaf27101283756ebc7
|
ec712211a4e8895b31212ff750731be60781d0ca
|
refs/heads/master
| 2021-05-20T19:35:07.485620 | 2020-05-18T13:11:45 | 2020-05-18T13:11:45 | 252,392,628 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,632 |
py
|
#1、看代码写结果【如果有错误,则标注错误即可,并且假设程序报错可以继续执行】
class Foo(object):
a1 = 1
__a2 = 2
def __init__(self,num):
self.num = num
self.__salary = 1000
def show_data(self):
print(self.num+self.a1)
obj = Foo(666)
print(obj.num) #666
print(obj.a1) #1
#print(obj.__salary) #报错
#print(obj.__a2) #报错
print(Foo.a1) #1
#print(Foo.__a2) #报错
#2、看代码写结果【如果有错误,则标注错误即可,并且假设程序报错可以继续执行】
class Foo(object):
a1 = 1
def __init__(self,num):
self.num = num
def show_data(self):
print(self.num+self.a1)
obj1 = Foo(666)
obj2 = Foo(999)
print(obj1.num) #666
print(obj1.a1) #999
obj1.num = 18
obj1.a1 = 99
print(obj1.num) #18
print(obj1.a1) #99
print(obj2.a1) #1
print(obj2.num) #999
print(obj2.num) #999
print(Foo.a1) #1
print(obj1.a1) #99
#3、看代码写结果,注意返回值。
class Foo(object):
def f1(self):
return 999
def f2(self):
v = self.f1()
print('f2')
return v
def f3(self):
print('f3')
return self.f2()
def run(self):
result = self.f3()
print(result)
obj = Foo()
v1 = obj.run() # f3 f2 999
print(v1) # None
#4、看代码写结果【如果有错误,则标注错误即可,并且假设程序报错可以继续执行】
class Foo(object):
def f1(self):
print('f1')
@staticmethod
def f2():
print('f2')
obj = Foo()
obj.f1() # f1
obj.f2() # f2
#Foo.f1() # 报错,类不直接调用普通方法,只调用 静态方法和类方法
Foo.f2() #f2
#5、看代码写结果【如果有错误,则标注错误即可,并且假设程序报错可以继续执行】
class Foo(object):
def f1(self):
print('f1')
@classmethod
def f2(cls):
print('f2')
obj = Foo() #
obj.f1() #f1
obj.f2() #f2
#Foo.f1() # 报错,类不直接调用普通方法,只调用 静态方法和类方法
Foo.f2() #f2
#6、看代码写结果【如果有错误,则标注错误即可,并且假设程序报错可以继续执行】
class Foo(object):
def f1(self):
print('f1')
self.f2()
self.f3()
@classmethod
def f2(cls):
print('f2')
@staticmethod
def f3():
print('f3')
obj = Foo()
obj.f1() # f1 f2 f3
#7、看代码写结果【如果有错误,则标注错误即可,并且假设程序报错可以继续执行】
class Base(object):
@classmethod
def f2(cls):
print('f2')
@staticmethod
def f3():
print('f3')
class Foo(Base):
def f1(self):
print('f1')
self.f2()
self.f3()
obj = Foo()
obj.f1() # f1 f2 f3
#8、看代码写结果
class Foo(object):
def __init__(self, num):
self.num = num
v1 = [Foo for i in range(10)]
v2 = [Foo(5) for i in range(10)]
v3 = [Foo(i) for i in range(10)]
print(v1) # 10个 Foo类 组成的列表,都相同
print(v2) # 10个 Foo(5) 对象内存地址组成的列表,都不同,虽然内容相同,但是他们是不同的对象
print(v3) # 10个Foo类对象内存地址组成的列表,都不同
#9、看代码写结果
class StarkConfig(object):
def __init__(self, num):
self.num = num
def changelist(self, request):
print(self.num, request)
config_obj_list = [StarkConfig(1), StarkConfig(2), StarkConfig(3)]
for item in config_obj_list:
print(item.num) # 1 2 3
#10、看代码写结果
class StarkConfig(object):
def __init__(self, num):
self.num = num
def changelist(self, request):
print(self.num, request)
config_obj_list = [StarkConfig(1), StarkConfig(2), StarkConfig(3)]
for item in config_obj_list:
item.changelist(666) #1 666 2 666 3 666
# 11、看代码写结果
class Department(object):
def __init__(self,title):
self.title = title
class Person(object):
def __init__(self,name,age,depart):
self.name = name
self.age = age
self.depart = depart
d1 = Department('人事部') #
d2 = Department('销售部')
p1 = Person('武沛齐',18,d1)
p2 = Person('alex',18,d1)
p3 = Person('安安',19,d2)
print(p1.name) #武沛齐
print(p2.age) #18
print(p3.depart) # d2 对象的内存地址
print(p3.depart.title) # 销售部
# 12、看代码写结果
class Department(object):
def __init__(self,title):
self.title = title
class Person(object):
def __init__(self,name,age,depart):
self.name = name
self.age = age
self.depart = depart
def message(self):
msg = "我是%s,年龄%s,属于%s" %(self.name,self.age,self.depart.title)
print(msg)
d1 = Department('人事部') #
d2 = Department('销售部')
p1 = Person('武沛齐',18,d1)
p2 = Person('alex',18,d1)
p1.message() # 我是吴佩琦,年龄18,属于人事部
p2.message() # 我是alex,年龄18,属于销售部
|
[
"fallencrasher"
] |
fallencrasher
|
f112463a98d6d41398620a8de1ed9b90dda3443b
|
b6bc7e7ad84e388c3f7cb8827032d8da69ecf8c4
|
/Morphing/UI_assistant.py
|
bd07455e233ae8b22a7e8903dc7beeee5f542646
|
[] |
no_license
|
XiaoyueDuan/Morphing
|
495345b3cec93dbee43d5603ef35852c32020494
|
79ff5029c41b41167fac3d178dd91d945ce0267b
|
refs/heads/master
| 2021-01-19T19:09:55.201133 | 2017-06-01T08:34:55 | 2017-06-01T08:34:55 | 88,403,312 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,600 |
py
|
# -*- coding: utf-8 -*-
"""
File Description:
The .py file aims to assist morphingUI.py with more advanced function,
e.g. add events, interact with morphing algorithm
"""
from Morphing import Interface
import numpy as np
class Valid():
"""
Class Description:
The goal of this class is to check whether the data is enough to run morphing algorithm.
The condition of valid state is all bool type attributes are True.
"""
def __init__(self):
self.loadSourceImg=False
self.loadTargetImg=False
def checkDataType(self,interface):
"""
Function Description;
Check whether the data type in interface is correct
"""
valid=True
if(type(interface.sourceImg) is not np.ndarray):
print('Error: The format of source image is not numpy.ndarray')
valid=False
if(type(interface.targetImg) is not np.ndarray):
print('Error: The format of target image is not numpy.ndarray')
valid=False
if(type(interface.a) is not int):
print('Error: The format of a is not int')
valid=False
if(type(interface.b) is not float):
print('Error: The format of b is not float')
valid=False
if(type(interface.p) is not float):
print('Error: The format of p is not int')
valid=False
if(type(interface.framePerSecond) is not int):
print('Error: The format of frame per second is not int')
valid=False
if(type(interface.timeDur) is not int):
print('Error: The format of time duration is not int')
valid=False
return valid
def checkDataValue(self,interface):
"""
Function Description;
Check whether the data value or size in interface is correct
"""
valid=True
if not self.loadSourceImg:
print('Error: Have not loaded source image')
valid=False
if not self.loadTargetImg:
print('Error: Have not loaded source image')
valid=False
if interface.startPos.shape[0]<=0:
print('Error: No input source image lines')
valid=False
if interface.terminatePos.shape[0]<=0:
print('Error: No input target image lines')
valid=False
if interface.startPos.shape[1]!=4:
print('Error: The number of column of source image is not equal to 4')
valid=False
if interface.terminatePos.shape[1]!=4:
print('Error: The number of column of target image is not equal to 4')
valid=False
if interface.startPos.shape[0]!=interface.terminatePos.shape[0]:
print('Error: The number of lines in source and target image is different')
valid=False
if interface.a<0:
print('Error: a is less than 0')
valid=False
if interface.b<0:
print('Error: b is less than 0')
valid=False
if interface.p<0:
print('Error: p is less than 0')
valid=False
if interface.framePerSecond<=0:
print('Error: frame per second is less than 0')
valid=False
if interface.timeDur<0:
print('Error: time duration is less than 0')
valid=False
return valid
def isValid(self,interface):
valid=self.checkDataType(interface)
valid=valid and self.checkDataValue(interface)
return valid
|
[
"[email protected]"
] | |
cd3d97d846876037d74f4ccc46eefb915c555830
|
823b69dffc4a6e28b9e4c27ec176f8ce54d2e586
|
/args/arg_parser.py
|
c2cea4c5d26614670271806fddc28b28fb6b4b19
|
[] |
no_license
|
potlee/pbt
|
1f5af632aa100561da1c284b522a6ca181ea21c1
|
05160eca9f3a557a25d043502f90aca1a7b76b46
|
refs/heads/master
| 2020-03-25T23:48:47.867151 | 2018-06-23T19:40:16 | 2018-06-23T19:40:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,599 |
py
|
import argparse
import util
class ArgParser(object):
def __init__(self):
self.parser = argparse.ArgumentParser(description='Population-Based Training')
self.parser.add_argument('--gpu_ids', type=str, default='0',
help='Comma-separated list of GPUs to use.')
self.parser.add_argument('--batch_size', type=int, default=32, help='Batch size.')
self.parser.add_argument('--num_workers', type=int, default=4, help='Number of workers per data loader.')
self.parser.add_argument('--num_epochs', type=int, default=30,
help='Number of epochs to train for. If 0, train forever.')
self.parser.add_argument('--population_size', type=int, default=3,
help='Number of models in a population.')
self.parser.add_argument('--dataset', type=str, default='CIFAR10', choices=('CIFAR10',),
help='Dataset to train on.')
self.parser.add_argument('--ckpt_dir', type=str, default='ckpts/',
help='Directory to save checkpoints and population info.')
self.parser.add_argument('--name', type=str, required=True, help='Experiment name.')
self.parser.add_argument('--model', type=str, default='resnet50', help='Model name.')
self.parser.add_argument('--metric_name', type=str, default='val_loss',
help='Metric to optimize during PBT. Make sure to also set --maximize_metric')
self.parser.add_argument('--maximize_metric', type=util.str_to_bool, default=False,
help='If true, maximize the metric. Else minimize.')
self.parser.add_argument('--max_eval', type=int, default=1000,
help='Max number of examples to evaluate from the training set.')
self.parser.add_argument('--max_ckpts', type=int, default=3,
help='Max number of recent checkpoints to keep per model.')
self.parser.add_argument('--save_dir', type=str, default='logs', help='Directory for saving logs.')
self.parser.add_argument('--learning_rate', type=float, default=1e-1, help='Initial learning rate.')
self.parser.add_argument('--optimizer', type=str, default='sgd', choices=('sgd', 'adam'), help='Optimizer.')
self.parser.add_argument('--sgd_momentum', type=float, default=0.9, help='SGD momentum (SGD only).')
self.parser.add_argument('--sgd_dampening', type=float, default=0.9, help='SGD momentum (SGD only).')
self.parser.add_argument('--adam_beta_1', type=float, default=0.9, help='Adam beta 1 (Adam only).')
self.parser.add_argument('--adam_beta_2', type=float, default=0.999, help='Adam beta 2 (Adam only).')
self.parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (i.e., L2 regularization factor).')
self.parser.add_argument('--iters_per_print', type=int, default=4,
help='Number of iterations between printing loss to the console and TensorBoard.')
self.parser.add_argument('--search_space', type=str, default='lr,momentum,weight_decay')
def parse_args(self):
args = self.parser.parse_args()
args.gpu_ids = [int(i) for i in str(args.gpu_ids).split(',') if int(i) >= 0]
args.device = 'cpu' if len(args.gpu_ids) == 0 else 'cuda:{}'.format(args.gpu_ids[0])
args.search_space = str(args.search_space).split(',')
return args
|
[
"[email protected]"
] | |
5a0e45a0bad8693ccc51793e4fe0d1a2503bb752
|
7c37cceac4f8d42e8ef76af367ea9862d7f52166
|
/T1_Rosales_201810531-7/T1_Rosales_201810531-7.py
|
c9580d647cd4b2a2953d0969e146f415f85a74b6
|
[] |
no_license
|
Godiessssseh/Arqui
|
1ea62cf6fc867f52d8020d5a83c00ece835b34c7
|
f7122e739906519e27f7c05d42f67d5abec12a85
|
refs/heads/main
| 2023-06-14T07:51:00.370431 | 2021-07-06T02:57:27 | 2021-07-06T02:57:27 | 383,321,244 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,966 |
py
|
#Codigos que se deben usar. BCD, Gray, Excess of 3, Johnson, Paridad, PentaBit, Hamming.
#Se aceptan de la forma bcd, gry, ed3, jsn, par, pbt, ham
#Base cualquiera a decimal, decimal a binario.
codigos = ["bcd","gry","ed3","jsn","par","pbt","ham"] #Solo lo usaremos para revisar casos!
bases_posibles = list("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+?")
#Funcion que revisa si es binario o no
def es_binario(valor):
for i in str(valor):
if i in "10":
bin = True
else:
bin=False
break
if bin==True:
return True
else:
return False
#Función que pasa cualquier decimal a numero binario
def dec_to_binary(valor):
valor=int(valor)
binario = ''
while valor // 2 != 0:
binario = str(valor % 2) + binario
valor = valor // 2
return str(valor) + binario
#Funcion que pasa cualquier valor en cualquier base a decimal.
def cualquier_base_to_dec(valor,b):
list = []
pos = 0
decimal =0
#Revisamos caso donde sea unario o igual a 10:
if b==1:
return (len(valor))
elif b == 10:
return valor
else:
for i in str(valor):
list.append(i)
for i in range(len(list)):
if list[i] in bases_posibles:
a = bases_posibles.index(list[i])
list.pop(pos)
list.insert(pos,a)
pos = pos + 1
list.reverse()
pos = 0
for i in list:
decimal += int(list[pos])*(b**pos)
pos+=1
return decimal
#Funcion que pasa de un decimal a cualquier base numerica ( 1<n<64)
def dec_to_cualquier_base(n, b):
n = int(n)
new_number = []
base_nueva = ""
if b == 1:
base_nueva += "1" * n #Caso que sea unario!!
else:
while(n != 0): #Si es 0, ya no hay numero al que buscar resto
new_number.append(str(n % b))
n = n // b
new_number.reverse() #Lo ordenamos de atrás hacia adelante
for pos in range(len(new_number)): #Buscamos el valor en la posicion correspondiente.
base_nueva += bases_posibles[int(new_number[pos])]
return base_nueva
#Si retorno base_nueva, me trae el valor directamente basado en la lista de bases posibles. ej(1e23d)
#Función que pasa un decimal a bcd
def dec_to_bcd(n):
n = int(n)
if (n == 0): # Caso borde donde n es igual a 0
print("0000")
return
rev = 0 # Guardaremos aquí, el reverse de los valores que obtengamos.
while (n > 0):
rev = rev * 10 + (n % 10)
n = n // 10
# Iteramos los valores en el reverse.
a=""
while (rev > 0):
# Se busca el binario para cada dígito usando bitset
b = str(rev % 10)
# Se imprime la conversión a binario para el digito correspondiente
a = a + "{0:04b}".format(int(b, 16))
# Se divide en 10, para usar el proximo dígito.
rev = rev // 10
return a
#Función que pasa un decimal a exceso de 3
def dec_to_ed3(valor): #Aumento de 3!
valor = str(valor)
ed3 = 0
for i in valor:
d = int(i)+3 #A cada valor se le suma 3, y se pasa a binario.
ed3 = ed3 + int(dec_to_binary(d))
return ed3
def xor_c(a, b): #Función para concatenar
return '0' if (a == b) else '1'
def get_gray(n): #Lista con los valores de la tabla de gray, acorde al largo n del valor que se inserta.
if n == 0:
return ['']
first_half = get_gray(n - 1)
second_half = first_half.copy()
first_half = ['0' + code for code in first_half]
second_half = ['1' + code for code in reversed(second_half)]
return first_half + second_half
#Función que recibe un binario y lo retorna en codigo johnson
def johnson(valor):
list = []
lista2 = []
a = cualquier_base_to_dec(valor,2)
for i in range(10000): #Un rango muy largo solo para buscar el numero.
if int(a)<2**i:
break
flag=True
while flag:
if len(list)==2**i:
flag=False
else:
if list == []:
b = "0" * ((2**i)//2)
list.append(b)
elif "1"* ((2**i)//2) not in list: #Mitad de la tabla, caso cuando son todos los valores equivalente a 1 (1111 o 11111111)
b = b.replace("0", "1", 1) #Reemplazamos por 1 primero
c = b[::-1]
lista2.append(c)
list.append(lista2[0])
lista2 = []
else:
b = b.replace("1", "0", 1) #Se reemplazan por 0.
c = b[::-1]
lista2.append(c)
list.append(lista2[0])
lista2 = []
return list[int(a)]
def paridad(valor): #Caso cuando t es igual a par!
valor = str(valor)
a=0
list=[]
for i in valor:
list.append(int(i))
if list.count(1) % 2 ==0: #Si hay cantidad par de 1's, se debe retornar un 0. Si es impar, retornamos 1.
return a+1
else:
return a
def pentaBit (valor): #Caso cuando t es igual a pentabit!
valor = str(valor)
a = len(valor)
if a % 5 ==0:
return 1
else:
return 0
#Funcion que recibe un binario, y nos dice si tiene algun error o no.
def hamming(n):
n = str(n)
d = n
data = list(d)
data.reverse()
c, ch, j, r, error, h, parity_list, h_copy = 0, 0, 0, 0, 0, [], [], []
for k in range(0, len(data)): #Calcular los bits no redundantes
p = (2 ** c)
h.append(int(data[k]))
h_copy.append(data[k])
if (p == (k + 1)):
c = c + 1
for parity in range(0, (len(h))): #Determinar los bits de paridad
ph = (2 ** ch)
if (ph == (parity + 1)):
startIndex = ph - 1
i = startIndex
toXor = []
while (i < len(h)):
block = h[i:i + ph]
toXor.extend(block) #A la lista toXor le agrega la lista block
i += 2 * ph
for z in range(1, len(toXor)):
h[startIndex] = h[startIndex] ^ toXor[z]
parity_list.append(h[parity])
ch += 1
parity_list.reverse() #Lista con la paridad ordenada
error = sum(int(parity_list) * (2 ** i) for i, parity_list in enumerate(parity_list[::-1]))
if ((error) >= len(h_copy)): #Se comparan los valores de error con h.copy, si error es más grande, no se sabe donde esta el error.
return 0 #No se detecta el error
else: #Si h_copy es más grande que error, podemos buscar donde está el error y cambiarlo.
if (h_copy[error - 1] == '0'):
h_copy[error - 1] = '1'
elif (h_copy[error - 1] == '1'):
h_copy[error - 1] = '0'
h_copy.reverse()
return (int(''.join(map(str, h_copy))))
while True: #Debemos revisar todas las entradas, es requisito. Si sale un "-", se acaba el programa! Si la entrada es inválida, el programa se acaba.
entrada = input("")
if entrada=="-":
print("Fin del programa")
break
entrada = entrada.split() #Si entra al while, aplicamos split para separarlo en una lista de strings correspondientes. ["n", "b", "t"]
#Revisar casos! Si b es numero o codigo
#caso de entrada de b
if entrada[1].isnumeric(): #.isnumeric() nos revisa si el string ingresado tiene valores numericos.
if int(entrada[1])>=1 and int(entrada[1])<=64: # Revisar si el valor de t es numerico (cumplir 1<=b<= 64)
b = int(entrada[1]) #Se deja como entero porque es un numero
else:
print("Entrada inválida") #Se printea entrada invalida y vuelve a empezar el while. (Continue hace esto!)
continue
else:
if entrada[1] in codigos:
b = entrada[1]
else:
print("Entrada inválida") #Se printea entrada invalida y vuelve a empezar el while. (Continue hace esto!)
continue
#Caso de entrada de t
if entrada[2].isnumeric():
if int(entrada[2]) >= 1 and int(entrada[2]) <= 64:
t= int(entrada[2])
else:
print("Entrada inválida") #Se printea entrada invalida y vuelve a empezar el while. (Continue hace esto!)
continue
else:
if entrada[2] in codigos: # Revisar si el valor de t es numerico (cumplir 1<=t<= 64) o si está en códigos.
t = entrada[2]
else:
print("Entrada inválida") #Se printea entrada invalida y vuelve a empezar el while. (Continue hace esto!)
continue
#Ya sabemos b, ahora podemos revisar n
if str(b).isnumeric(): #Si es un valor númerico
if entrada[0].isnumeric():
if int(cualquier_base_to_dec(entrada[0],b)) >=1 and int(cualquier_base_to_dec(entrada[0],b)) <= 1000: #Tiene que estar en base 10 y 1<=n<=1000.
n = int(entrada[0])
else:
print("Entrada inválida") # Se printea entrada invalida y vuelve a empezar el while. (Continue hace esto!)
continue
elif cualquier_base_to_dec(entrada[0],b) >=1 and cualquier_base_to_dec(entrada[0],b) <= 1000:
n= entrada[0]
else:
print("Entrada inválida") #Se printea entrada invalida y vuelve a empezar el while. (Continue hace esto!)
continue
else: #Si b es codigo, dejaremos la base = 2. (Binaria)
if es_binario(entrada[0]):
b = 2
if cualquier_base_to_dec(entrada[0], b) >= 1 and cualquier_base_to_dec(entrada[0],b) <= 1000: # Tiene que estar en base 10 y 1<=n<=1000.
n = entrada[0]
else:
print("Entrada inválida") # Se printea entrada invalida y vuelve a empezar el while. (Continue hace esto!)
continue
#Si pasamos lo anterior, se tienen los valores de n, b y t.
if b==t:
if str(b).isnumeric():
print("Base",b,":",n)
else:
print("Codigo", b, ":", n)
elif str(b).isnumeric() and str(t).isnumeric():
n = cualquier_base_to_dec(n,b)
n = dec_to_cualquier_base(n,t)
print("Base",t,":",n)
elif t=="bcd":
n = cualquier_base_to_dec(n,b)
n = dec_to_bcd(n)
print("Codigo BCD:",n)
elif t == "gry":
if es_binario(n):
codes = get_gray(len(str(n)))
print("Codigo Gray:",dec_to_binary(codes.index(str(n))))
else:
num = n
num = cualquier_base_to_dec(n,b)
num = dec_to_binary(num)
codes = get_gray(len(num))
print("Codigo Gray:", dec_to_binary(codes.index(num)))
elif t=="ed3":
n = cualquier_base_to_dec(n,b)
print("Codigo Exceso de 3:", dec_to_ed3(n))
elif t=="jsn":
if es_binario(n):
print("Codigo Johnson:",johnson(n))
else:
n= cualquier_base_to_dec(n,b)
n= dec_to_binary(n)
print("Codigo Johnson:",johnson(n))
elif t =="par":
if es_binario(n):
print("Codigo Paridad:",paridad(n))
else:
n = cualquier_base_to_dec(n,b)
n = dec_to_binary(n)
print("Codigo Paridad:", paridad(n))
elif t =="pbt":
if es_binario(n):
print("Código Pentabit:", pentaBit(n))
else:
n = cualquier_base_to_dec(n,b)
n=dec_to_binary(n)
print("Código Pentabit:", pentaBit(n))
else:
if es_binario(n):
print("Codigo Hamming:",hamming(n))
else:
n = cualquier_base_to_dec(n, b)
n = dec_to_binary(n)
print("Codigo Hamming:", hamming(n))
|
[
"[email protected]"
] | |
1a5cc4dd4b02297aa61785f8fe17d28cdf7bae2c
|
99e494d9ca83ebafdbe6fbebc554ab229edcbacc
|
/.history/Day 1/Test/Answers/NegativeMarking_20210304211811.py
|
d220b7261e6beb16198606a036f3688522eaee56
|
[] |
no_license
|
Datta2901/CCC
|
c0364caa1e4937bc7bce68e4847c8d599aef0f59
|
4debb2c1c70df693d0e5f68b5798bd9c7a7ef3dc
|
refs/heads/master
| 2023-04-19T10:05:12.372578 | 2021-04-23T12:50:08 | 2021-04-23T12:50:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 516 |
py
|
t = int(input())
for i in range(t):
questions,requiredscore = map(int,input().split())
if questions * 4 < requiredscore:
print(-1)
continue
attempt = (requiredscore/questions) + 3
accuracy = attempt / 7
print(format(accuracy*100,'.2f')
#
# Here Accuracy can be find by using two linear equations
# They are Total Score(Required Score) = 4 * x - 3 * y
# Total Questions = x + y
# Here x is the total number of crct answers
# y is the total number of wrong answers
|
[
"[email protected]"
] | |
cf012a2c7444ec2ae46bcb9d28286cdda0ce7b8c
|
1868b0b0d55c5337e05ba1959523e4e9a51e626d
|
/bucket-list.py
|
c0f1336661fb444efaf04615844b791203c8bb00
|
[] |
no_license
|
DheerajJoshi/AWS-Python-boto3
|
394030bf8d577455dd451d7f6f613bc3f63162fa
|
10ec1f5f75fabc608a0e7351850753068f4fe00c
|
refs/heads/master
| 2021-06-16T08:36:56.332599 | 2017-05-05T10:43:16 | 2017-05-05T10:43:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 498 |
py
|
#list buckets using list_buckets function
import boto3
#create an s3 client
s3 = boto3.client('s3', aws_access_key_id = 'AKIAI6KLHZTL7BAVG6', aws_secret_access_key = 'Ds9fkJEgvm2+koAHFkz2d8/xOzR8/5tlZAp9To')
#call s3 to list current buckets
response = s3.list_buckets()
# Get a list of all bucket names from the response
buckets = [bucket['Name'] for bucket in response['Buckets']]
# Print out the bucket list
print("Bucket List: %s" % buckets)
#OUTPUT
#Bucket List: ['clinmdm', 'dheerajnew']
|
[
"[email protected]"
] | |
f1107157cde744af19ff94a0b22673dcd7f675e4
|
23ed27302202a734f5b1c24eefcc42a340064134
|
/Tareas/Tarea 1/Tarea1.py
|
dc04549bca807f97bc693d02a58ca633cacbf353
|
[] |
no_license
|
SergioGonzalez24/PensameintoComputacionalParaIngenieria
|
36cd325a9929ca72f37d4da4660a81ceece1987d
|
7761db38e7bb3088aa316046d5957299336dbfc2
|
refs/heads/main
| 2023-01-05T07:34:03.812978 | 2020-11-02T05:41:36 | 2020-11-02T05:41:36 | 309,269,349 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,767 |
py
|
###Sergio Manuel Gonzalez Vargas
###A01745446
###Tarea 1
#Ejercicio 1
#Escribe un algoritmo para verificar si un precio dado por el usuario es válido o no lo es, para ser válido debe ser un valor positivo.
print("Ejercicio 1")
precio = float(input("Ingrese precio"))
if precio > 0:
print("Valido")
else:
print("Invalido")
print("____________________________________________________________")
#Ejercico 2
#Escribe un algoritmo que muestre la velocidad promedio de un automóvil dadas la distancia recorrida en kilómetros y el tiempo que se tardó en recorrer esa distancia dado en horas.
print("Ejercicio 2")
distancia_km = float(input("Introducir distancia recorrida en KM"))
tiempo_hrs= float(input("Introducir el tiempo que tardo en recorrer la distancia total en hras "))
vel = distancia_km/tiempo_hrs
print(vel, "km/hrs")
print("____________________________________________________________")
#Ejercico 3
#Escribe un algoritmo que dada una longitud en metros, calcule y muestre su equivalente en pies.
print("Ejercicio 3")
longitud = float(input("Escribe la longitud en metros"))
#1m = 3.28pies
longitud = (longitud*3.28)
print(longitud,"ft")
print("____________________________________________________________")
#Ejercico 4
#Escribe un algoritmo que verifique si una persona puede obtener su licencia de conducir. Para hacerlo debe ser mayor de edad (18 años o más) y traer una identificación oficial.
print("Ejercico 4")
edad = int(input("Intruduce tu edad"))
if edad >= 18 :
doc_official = input("¿Tiene identificacion oficial? (escribir si o no)")
if doc_official == "si" :
print("Entregar licencia")
else:
print("no cumple con los requisitos")
else:
print("no tiene 18 años aun ")
|
[
"[email protected]"
] | |
ea36e3acd73f36ccfe19ee2598e91db7300e6d72
|
8685f4a332df175266685ca93d686e925fa83940
|
/alog/permute.py
|
17c6138ca760b3d0f9fe1c8f626c311f785456a9
|
[] |
no_license
|
danghualong/MyAlogrithms
|
843917052d32937d6f2caa5b7135cadbcd10c5c9
|
1f7cd5a372ee0489d72f29f8b22992514e7af98b
|
refs/heads/master
| 2022-12-10T11:00:07.038640 | 2021-03-23T11:00:56 | 2021-03-23T11:00:56 | 89,899,050 | 0 | 0 | null | 2022-12-08T08:19:30 | 2017-05-01T05:09:56 |
Python
|
UTF-8
|
Python
| false | false | 2,014 |
py
|
# 递归实现排列算法
class Solution(object):
def __init__(self):
self.result=[]
# 全排列算法
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if(nums==None):
return None
if(len(nums)==1):
return [[nums[0]]]
else:
result=[]
for i in range(len(nums)):
sub=nums[0:i]+nums[i+1:]
list=self.permute(sub)
for item in list:
item.insert(0,nums[i])
result.append(item)
return result
#全排列去重
def permuteUnique(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if(nums==None):
return None
if(len(nums)==1):
return [[nums[0]]]
else:
result=[]
existdNums=[]
for i in range(len(nums)):
# 元素如果参与排列,则不参与排列
if(nums[i] in existdNums):
continue
existdNums.append(nums[i])
sub=nums[0:i]+nums[i+1:]
list=self.permuteUnique(sub)
for item in list:
item.insert(0,nums[i])
result.append(item)
return result
def permute2(self,nums):
if(nums==None):
return None
self.backtrack('',nums,0)
return self.result
def backtrack(self,item,nums,i):
if(i==len(nums)):
self.result.append(item)
else:
for t in range(len(item)+1):
self.backtrack(item[:t]+str(nums[i])+item[t:],nums,i+1)
o=Solution()
items=o.permute([1,2,3,4])
print(items)
print("\n")
items2=o.permuteUnique([1,2,1,4])
print(items2)
print("\n")
items=o.permute2([1,2,3,4])
print(items)
|
[
"[email protected]"
] | |
adc5bbcfc28035977a9e7d0261a34f014180d313
|
e11dd6027353f75e1eb58ab71042e51b603e0c8b
|
/forms.py
|
c6c46533bda642511f5d9e4902ce37cd0ab946a5
|
[] |
no_license
|
noamoss/notifier
|
d7f2951f5936794fd90a0a429966222a28e77694
|
791083288ecd472429a571b00fe1438c563369cc
|
refs/heads/master
| 2022-10-05T06:34:52.409777 | 2016-07-07T21:28:53 | 2016-07-07T21:28:53 | 53,517,809 | 1 | 1 | null | 2022-09-16T17:45:53 | 2016-03-09T17:35:07 |
HTML
|
UTF-8
|
Python
| false | false | 2,557 |
py
|
# -*- coding: utf-8 -*-
# projects/forms.py
import json, feedparser
import urllib.request
from urllib.parse import urlparse
from flask_wtf import Form
from urllib.error import URLError
from wtforms import StringField, DateField, IntegerField, \
SelectField, PasswordField
from wtforms.validators import DataRequired, Length, EqualTo, Email, URL
from feeds import relevant_feeds
class RegisterForm(Form):
email = StringField(
'Email',
validators=[DataRequired(message="שדה חובה"), Email(message="כתובת לא תקינה"), Length(min=6, max=40,message="")]
)
password = PasswordField(
'Password',
validators=[DataRequired(message="שדה חובה"), Length(min=5, max=40, message="סיסמה באורך 5-40 תוים")]
)
confirm = PasswordField(
'Repeat Password',
validators=[DataRequired(message="שדה חובה"),
EqualTo('password',
message='הסיסמאות אינן תואמות')]
)
class LoginForm(Form):
email = StringField(
'Email',
validators=[DataRequired(message="שדה חובה")]
)
password = PasswordField(
'Password',
validators=[DataRequired(message="שדה חובה")]
)
class AddFeedForm(Form):
url = StringField(
'כתובת URL',
validators=[DataRequired(message="יש להזין כתובת למקור המידע"),
URL(message="כתובת מקור לא תקינה")],
)
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if self.url.data in relevant_feeds():
self.url.errors.append("מקור מידע קיים במנוי")
return False
url = urlparse(self.url.data).geturl()
try:
myjson = urllib.request.urlopen(url).read().decode('utf-8')
except URLError:
self.url.errors.append("כתובת לא קיימת")
return False
try:
json_object = json.loads(myjson)
return True
except ValueError:
pass
try:
myatom = feedparser.parse(url)
if myatom.status != 200:
self.url.errors.append('המקור שהוזן אינו בפורמט ATOM')
return False
except ValueError:
self.url.errors.append('המקור שהוזן אינו בפורמט JSON או ATOM')
return False
self.url=url
return True
|
[
"[email protected]"
] | |
ee91fc709a5415206b27ceb902754a8eb60d5c7f
|
6cb36cf49ea06e32c953c57d84059170c63ba048
|
/app/sectors/controllers.py
|
a92e59e7fbfb06d0afe89dd7b806bb8738ad20b4
|
[] |
no_license
|
cohenamitc/SecurityCourseProject
|
12a9a28454aa1cf856bf39d332336ada1fc3ef09
|
8fa3872125c368dd17ac814fc0edabca8a1d70b4
|
refs/heads/master
| 2023-05-10T13:26:27.701688 | 2021-06-01T17:04:01 | 2021-06-01T17:04:01 | 363,397,605 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 652 |
py
|
# Import flask dependencies
from flask import Blueprint, request, render_template, \
flash, g, session, redirect, url_for
from flask_login import login_required
# Import password / encryption helper tools
from werkzeug.security import check_password_hash, generate_password_hash
# Import the database object from the main app module
from app import db
# Define the blueprint: 'auth', set its url prefix: app.url/auth
comm_sectors = Blueprint('sectors', __name__, url_prefix='/sectors')
# Set the route and accepted methods
@comm_sectors.route('/')
@login_required
def sectors():
return render_template("sectors/sectors.html")
|
[
"[email protected]"
] | |
49b4bde9f3ad68bd20580c75ff2a5e15b1dc16d4
|
05b704d274f1589c82f458b2cfeca3922dac7543
|
/scripts/multiramp.py
|
a17903317ca2816d47c5808cc3041bbc55c0e176
|
[] |
no_license
|
andreassteinleitner/crazyflieFormation
|
7badde5b43da812d6d08d8f293d61f9dc8147e22
|
7e9d594a8507a46be806f563c0413734339fb9ef
|
refs/heads/master
| 2020-04-17T20:53:12.995403 | 2019-04-17T01:45:18 | 2019-04-17T01:45:18 | 166,925,064 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,285 |
py
|
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2014 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Simple example that connects Crazyflies at 'URI's, ramp up-down the motors and
disconnects.
"""
import logging
import time
from threading import Thread
import cflib
from cflib.crazyflie import Crazyflie
logging.basicConfig(level=logging.ERROR)
class MotorRampExample:
"""Example that connects to a Crazyflie and ramps the motors up/down and
the disconnects"""
def __init__(self, link_uri):
""" Initialize and run the example with the specified link_uri """
self._cf = Crazyflie(rw_cache='./cache')
self._cf.connected.add_callback(self._connected)
self._cf.disconnected.add_callback(self._disconnected)
self._cf.connection_failed.add_callback(self._connection_failed)
self._cf.connection_lost.add_callback(self._connection_lost)
self._cf.open_link(link_uri)
self.connected = True
print('Connecting to %s' % link_uri)
def _connected(self, link_uri):
""" This callback is called form the Crazyflie API when a Crazyflie
has been connected and the TOCs have been downloaded."""
# Start a separate thread to do the motor test.
# Do not hijack the calling thread!
Thread(target=self._ramp_motors).start()
def _connection_failed(self, link_uri, msg):
"""Callback when connection initial connection fails (i.e no Crazyflie
at the specified address)"""
print('Connection to %s failed: %s' % (link_uri, msg))
self.connected = False
def _connection_lost(self, link_uri, msg):
"""Callback when disconnected after a connection has been made (i.e
Crazyflie moves out of range)"""
print('Connection to %s lost: %s' % (link_uri, msg))
self.connected = False
def _disconnected(self, link_uri):
"""Callback when the Crazyflie is disconnected (called in all cases)"""
print('Disconnected from %s' % link_uri)
self.connected = False
def _ramp_motors(self):
thrust_mult = 1
thrust_step = 500
thrust = 20000
pitch = 0
roll = 0
yawrate = 0
# Unlock startup thrust protection
self._cf.commander.send_setpoint(0, 0, 0, 0)
while thrust >= 20000:
self._cf.commander.send_setpoint(roll, pitch, yawrate, thrust)
time.sleep(0.1)
if thrust >= 25000:
thrust_mult = -1
thrust += thrust_step * thrust_mult
self._cf.commander.send_setpoint(0, 0, 0, 0)
# Make sure that the last packet leaves before the link is closed
# since the message queue is not flushed before closing
time.sleep(0.1)
self._cf.close_link()
if __name__ == '__main__':
# Initialize the low-level drivers (don't list the debug drivers)
cflib.crtp.init_drivers(enable_debug_driver=False)
# Connect the two Crazyflies and ramps them up-down
le0 = MotorRampExample('radio://0/20/2M')
le1 = MotorRampExample('radio://1/40/2M')
le2 = MotorRampExample('radio://0/60/2M')
le3 = MotorRampExample('radio://0/80/2M')
le4 = MotorRampExample('radio://1/100/2M')
while(le0.connected or le1.connected):
time.sleep(0.1)
|
[
"[email protected]"
] | |
c0593805d9fcc7d217660376fbb2688f706642e2
|
0798277f2706998ab80442ac931579eb47f676e5
|
/boundary/property_handler.py
|
45635669e8b5a3731f321b2d7a0d6eb87f6a6557
|
[
"Apache-2.0"
] |
permissive
|
isabella232/pulse-api-cli
|
49ed38b0694ab289802f69ee6df4911cf3378e3f
|
b01ca65b442eed19faac309c9d62bbc3cb2c098f
|
refs/heads/master
| 2023-03-18T00:23:15.295727 | 2016-05-13T15:44:08 | 2016-05-13T15:44:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,244 |
py
|
#
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class PropertyHandler(object):
def __init__(self):
self._properties = None
def _process_properties(self, properties):
"""
Transforms the command line properties into python dictionary
:return:
"""
if properties is not None:
self._properties = {}
for p in properties:
d = p.split('=')
self._properties[d[0]] = d[1]
def _add_property_argument(self, parser, help_text):
parser.add_argument('-p', '--property', dest='properties', action='append',
required=False, metavar='property=value', help=help_text)
|
[
"[email protected]"
] | |
dc9a84126f4dc2d2e36bdd22635863a4a42193af
|
cd2c889f1923b433a150cf84769b9374822ef43a
|
/auth-service/src/auth_app/migrations/0001_initial.py
|
f90e13dd9715fb04b879361554e75ae17aa8ed56
|
[] |
no_license
|
JesuFemi-O/demo-django-microservice
|
b7c6453d8369ac2ec6844290ddb4155c53c43f00
|
ba5d566d17c4c80e313013f51c91530180a8ea4c
|
refs/heads/master
| 2023-06-02T06:22:59.097114 | 2021-06-21T22:51:45 | 2021-06-21T22:51:45 | 379,007,469 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,481 |
py
|
# Generated by Django 3.2.4 on 2021-06-21 10:39
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(db_index=True, max_length=255, unique=True)),
('email', models.CharField(db_index=True, max_length=255, unique=True)),
('first_name', models.CharField(max_length=400)),
('last_name', models.CharField(max_length=400)),
('role', models.CharField(choices=[('INSTRUCTOR', 'Instructor'), ('STUDENT', 'Student'), ('SYSTEM ADMIN', 'System Admin')], default='STUDENT', max_length=58, verbose_name='UserTypes')),
('display_picture', models.URLField(blank=True, max_length=526, null=True)),
('is_active', models.BooleanField(default=True)),
('is_verified', models.BooleanField(default=False)),
('is_staff', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('auth_provider', models.CharField(default='email', max_length=255)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
[
"[email protected]"
] | |
c63a4eb123b83fd7c5e71ea9e496a117a9d6ffcc
|
6dbb5ae0bcfcdb2895fb1a066fbe14e2ce3f04e4
|
/python/html_parse.py
|
fb09c3fbae045485c40effc1dfc01dc985babfef
|
[
"Apache-2.0"
] |
permissive
|
kev0960/ModooCode
|
8b5ed03eed2dbd9c753bbcf6aeaa704b234cf411
|
e8d5d9cf08077e227e0d36939d7977eeef46b6bb
|
refs/heads/master
| 2023-08-31T17:37:29.406419 | 2023-08-27T01:22:19 | 2023-08-27T01:22:19 | 148,594,006 | 54 | 8 |
Apache-2.0
| 2023-03-01T23:09:31 | 2018-09-13T06:44:32 |
C++
|
UTF-8
|
Python
| false | false | 10,947 |
py
|
from bs4 import BeautifulSoup
import os
from tqdm import tqdm
from html.parser import HTMLParser
class BlogDumpHtmlParser(HTMLParser):
def __init__(self, filename):
super().__init__()
self.in_box = False
self.in_box_with_div = False
self.in_list = False
self.list_type = None
self.in_script = False
self.in_title = False
self.ignore_p = False
self.encounter_title = False
self.in_link = False
self.link_url = None
self.filename = filename
self.div_cnt = 0
self.current_div_cnt = 0
output_file_name = filename.split('.')
output_file_name[-1] = 'md'
output_file_name = '.'.join(output_file_name)
output_file_name = output_file_name.split('/')
output_file_name[1] = 'md'
output_file_name = '/'.join(output_file_name)
self.output_file = open(output_file_name, 'w')
self.output_html = ""
def print_line(self, line):
self.output_html += line
# self.output_file.write(line)
def print(self, line):
self.output_html += (line + '\n')
# self.output_file.write(line + '\n')
def parse(self):
with open(self.filename, 'r') as f:
self.feed(f.read())
self.output_html = self.output_html[:self.output_html.rfind("공감")]
self.output_file.write(self.output_html)
def handle_starttag(self, tag, attrs):
def check_attr(attrs, attr_name):
for attr in attrs:
if attr[0] == attr_name:
if attr_name == 'class':
return attr[1].split(' ')
return attr[1]
return ""
if tag == 'img':
self.print("".format(
BlogDumpParser.replace_bracket(check_attr(attrs, 'alt')),
BlogDumpParser.srcset_to_src(
BlogDumpParser.replace_bracket(check_attr(attrs, 'srcset')))))
elif tag == 'a':
self.in_link = True
self.link_url = check_attr(attrs, 'href')
elif tag == 'ul':
self.in_list = True
self.list_type = tag
elif tag == 'ol':
self.in_list = True
self.list_type = tag
elif tag == 'li':
if self.in_list:
if self.list_type == 'ul':
self.print_line("* ")
else:
self.print_line("1. ")
elif tag == 'br':
if not self.in_box or not self.in_box_with_div:
self.print("") # New line.
elif tag == 'script':
self.in_script = True
elif tag == 'p' and (not self.ignore_p or self.in_box):
self.print("")
self.ignore_p = False
elif tag == 'span' or tag == 'td':
text_color = BlogDumpParser.what_color_is_this(check_attr(attrs, 'style'))
if text_color == 'light-blue':
self.in_title = True
if not self.encounter_title:
self.print_line("\n### ")
if tag == 'td':
self.ignore_p = True
self.encounter_title = True
elif tag == 'div' and 'txc-textbox' in check_attr(attrs, 'class'):
box_color = BlogDumpParser.what_color_is_this(check_attr(attrs, 'style'))
if box_color == 'blue':
self.print("```cpp")
elif box_color == 'red':
self.print("```warning")
else:
self.print("```info")
self.in_box = True
self.current_div_cnt = self.div_cnt
elif tag == 'div':
self.div_cnt += 1
self.print("")
if self.in_box:
self.in_box_with_div = True
def handle_endtag(self, tag):
if tag == 'ul' or tag == 'ol':
self.in_list = False
elif tag == 'script':
self.in_script = False
elif tag == 'div' and self.div_cnt > self.current_div_cnt:
self.div_cnt -= 1
elif tag == 'div' and self.in_box and self.div_cnt == self.current_div_cnt:
self.in_box = False
self.in_box_with_div = False
self.print("\n```")
def handle_data(self, data):
def check_char(c):
if 'a' <= c <= 'z' or 'A' <= c <= 'Z' or '0' <= c <= '9':
return True
elif c in {
'(', ')', '{', '}', ',', '.', '!', '+', '-', '*', '/', '=', '[', ']',
'<', '>', '~', '_', '&', '%', '|', '?'
}:
return True
return False
def is_code_chunk(s):
for c in s:
if not check_char(c):
return False
return True
def annotate_plain_text(s):
inline_code_start = False
# Figure out some possible "Code chunk" and wrap it with inline code notation.
words = s.split(' ')
i = 0
while i < len(words):
w = words[i]
if not is_code_chunk(w):
if inline_code_start:
# Put end tag
words[i - 1] = words[i - 1] + '`'
inline_code_start = False
if is_code_chunk(w):
if w.startswith('('):
# escape until it finds ')'
while i < len(words):
if ')' in words[i]:
break
i += 1
elif not inline_code_start:
inline_code_start = True
words[i] = '`' + w
i += 1
if inline_code_start:
words[-1] = words[-1] + '`'
annotated = ' '.join(words)
# Filter out some benign ones.
i = 0
while i < len(annotated):
if annotated[i] == '`':
end = annotated.find('`', i + 1)
w = annotated[i + 1:end]
if w in {'C', 'C++', 'Psi', 'C++ 11', 'forwarding)', '.', ')'}:
annotated = annotated[:i - 1] + ' ' + w + annotated[end + 1:]
elif w.isdigit() or (w.startswith('-') and w[1:].isdigit()):
annotated = annotated[:i - 1] + ' ' + w + annotated[end + 1:]
i += 1
return annotated
if self.in_script:
return
stripped = BlogDumpParser.remove_whitespace(data)
if self.in_title:
self.in_title = False
self.encounter_title = True
elif self.in_link:
self.in_link = False
if not BlogDumpParser.is_only_whitespace(stripped):
self.print_line(" [{0}]({1})".format(stripped, self.link_url))
return
elif not BlogDumpParser.is_only_whitespace(stripped):
self.encounter_title = False
if not BlogDumpParser.is_only_whitespace(stripped):
if not self.in_box:
stripped = annotate_plain_text(stripped)
self.print_line(stripped)
if self.encounter_title:
self.print("")
if self.in_list:
self.print("")
class BlogDumpParser:
def __init__(self, filename):
self.filename = filename
self.in_box = False
self.in_list = False
self.in_title = False
self.in_script = False
self.current_box_parent = None
self.current_list_parent = None
self.in_what_env = None
output_file_name = filename.split('.')
output_file_name[-1] = 'md'
output_file_name = '.'.join(output_file_name)
output_file_name = output_file_name.split('/')
output_file_name[1] = 'md'
output_file_name = '/'.join(output_file_name)
self.output_file = open(output_file_name, 'w')
@staticmethod
def remove_whitespace(line):
line = line.translate({ord(i): None for i in '\n'})
return line.rstrip()
@staticmethod
def replace_bracket(s):
s = s.replace("]", "")
s = s.replace(")", "")
return s
@staticmethod
def is_only_whitespace(line):
for i in range(len(line)):
if line[i] != ' ' or line[i] == '\n':
return False
return True
@staticmethod
def srcset_to_src(srcset):
src = srcset.split(' ')[0]
src = src[2:]
src = "http://" + src
return src
@staticmethod
def what_color_is_this(style):
if 'rgb(254, 222, 199)' in style:
return 'red'
elif 'rgb(219, 232, 251)' in style:
return 'blue'
elif 'rgb(243, 197, 52)' in style:
return 'yellow'
elif 'rgb(48, 88, 210)' in style or 'rgb(48,88,210)' in style:
return 'light-blue'
def check_box_ended(self, current):
if self.in_box and self.current_box_parent in current.parent:
self.in_box = False
return True
return False
def check_list_ended(self, current):
if self.in_list and self.current_list_parent in current.parent:
self.in_list = False
return True
return False
def print_line(self, line):
self.output_file.write(line)
def print(self, line):
self.output_file.write(line + '\n')
def parse(self):
with open(self.filename) as file:
soup = BeautifulSoup(file, "lxml")
for current in soup.recursiveChildGenerator():
try:
if self.check_box_ended(current):
self.print("\n```")
elif self.check_list_ended(current):
self.print("")
# Check for tags here.
tag = current.name
if tag == 'img':
self.print("".format(
BlogDumpParser.replace_bracket(current.attrs['alt']),
BlogDumpParser.srcset_to_src(
BlogDumpParser.replace_bracket(current.attrs['srcset']))))
elif tag == 'ul' or tag == 'ol':
self.in_list = True
self.current_list_parent = current
self.in_what_env = tag
elif tag == 'li':
if self.in_what_env == 'ul':
self.print_line("\n* ")
elif self.in_what_env == 'ol':
self.print_line("\n1. ")
elif tag == 'br':
self.print("") # New line.
elif tag == 'script':
self.in_script = True
elif tag == 'p':
self.print("")
elif tag == 'span':
text_color = BlogDumpParser.what_color_is_this(
current.attrs['style'])
if text_color == 'light-blue':
self.in_title = True
elif 'txc-textbox' in current.attrs['class']:
box_color = BlogDumpParser.what_color_is_this(
current.attrs['style'])
if box_color == 'blue':
self.print("```cpp")
elif box_color == 'red':
self.print("```warning")
else:
self.print("```info")
self.in_box = True
self.current_box_parent = current
self.in_what_env = box_color
except KeyError:
continue
except:
if self.in_script:
self.in_script = False
continue
stripped = BlogDumpParser.remove_whitespace(current)
if not BlogDumpParser.is_only_whitespace(stripped):
if self.in_title:
self.print_line('###')
self.in_title = False
self.print_line(stripped)
else:
self.in_title = False
self.output_file.close()
if __name__ == "__main__":
#'''
with tqdm(os.listdir('./blog')) as t:
for filename in t:
t.set_description("Dump : " + filename)
if 'comment' in filename:
continue
blog_dump = BlogDumpHtmlParser(os.path.join('./blog', filename))
blog_dump.parse()
'''
blog_dump = BlogDumpHtmlParser(os.path.join('./blog/dump_test.html'))
blog_dump.parse()
'''
|
[
"[email protected]"
] | |
911f054802f77dad2a660f46c44d59dec06745c0
|
7454df56509e6bed0e6e710be0773386d3091942
|
/game.py
|
0d68e641a03949fd3b41ccf35a36e4aa15f2aecf
|
[] |
no_license
|
bhavana212/python-training
|
aad89753681c73a89d110b1660f169532c4cb3a4
|
c2afb6762f2eda3ea72133c753774de72a063ad3
|
refs/heads/master
| 2023-09-04T03:43:20.563698 | 2021-10-03T16:35:56 | 2021-10-03T16:35:56 | 411,286,167 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 550 |
py
|
p1 = input("enter ")
p2 = input("enter ")
def compare(p1,p2):
if(p1 == p2):
print("draw")
elif p1 == "rock":
if p2 == "scissor":
return("p1 wins")
else:
return ("p2 wins")
elif p1 == "scissor":
if p2 == "paper":
return("p1 wins")
else:
return ("p2 wins")
elif p1 == "paper":
if p2 == "rock":
return("p1 wins")
else:
return ("p2 wins")
else:
return ("invalid syntax")
print((compare(p1,p2)))
|
[
"[email protected]"
] | |
5e881becf075b1b52eb57fdc637918870d7dc102
|
aeee98c73119207ec2c00ec6fc574f0c5667e7d0
|
/stage_1.0/sarcasm_detection_v2.py
|
352127319dd44241bf65844e87ab7f0c4604e1ea
|
[] |
no_license
|
jingerzzz/sarcasm-detection
|
03e0d3d3f8681ddfee0cb93d63ecdcaed7755acb
|
d3d07bace4171a329d864ea8dd741bbbf644c7cd
|
refs/heads/master
| 2021-03-31T22:52:45.722700 | 2020-03-18T04:29:09 | 2020-03-18T04:29:09 | 248,137,218 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,506 |
py
|
## v2: Change TfidfVectorizer instance
import pandas as pd
import numpy as np
import random
from io import StringIO
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import chi2
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
train_df = pd.read_csv('train.tsv',delimiter='\t',encoding='utf-8')
# test_df = pd.read_csv('test.tsv',delimiter='\t',encoding='utf-8')
train_df = train_df.rename(columns={train_df.columns[0]:"category", train_df.columns[1]:"comment", train_df.columns[2]:"parent_comment"})
# test_df = test_df.rename(columns={test_df.columns[0]:"category", test_df.columns[1]:"comment", test_df.columns[2]:"parent_comment"})
# Rename the column names for later use
df = train_df
data_size = len(df)
FNAME = "output_v2_data_size={}.txt".format(data_size)
output_file = open(FNAME,'w')
output_file.write("Dataset size:{}\n".format(len(df)))
tfidf = TfidfVectorizer(max_features = 5000)
features = tfidf.fit_transform(df.comment).toarray()
output_file.write("features shape:{}\n".format(features.shape))
print(features)
# f_feature = open("feature.txt",'w')
# for i in range(len(features)):
# f_feature.write("{}".format(features[i]))
# f_feature.close()
labels = df.category
print("done_1")
x_train, x_test, y_train, y_test = train_test_split(features, labels,test_size = .25, random_state = 0)
output_file.write("x_train size:{}\n".format(len(x_train)))
output_file.write("x_test size:{}\n".format(len(x_test)))
# Divide dataset into training set and testing set
# Using linear support vector classifier
lsvc = LinearSVC()
# training the model
lsvc.fit(x_train, y_train)
# getting the score of train and test data
output_file.write("train score: {}\n".format(lsvc.score(x_train, y_train)))
output_file.write("test score: {}\n".format(lsvc.score(x_test, y_test)))
# count_vect = CountVectorizer()
# x_train_counts = count_vect.fit_transform(x_train)
# tfidf_transformer = TfidfTransformer()
# x_train_tfidf = tfidf_transformer.fit_transform(x_train_counts)
# print(x_train_tfidf)
# print("done_2")
# clf = MultinomialNB().fit(x_train_tfidf, y_train)
print("done_3")
# print(clf.predict(count_vect.transform(["Thanks Obama!"])))
# models = [
# RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0),
# LinearSVC(),
# MultinomialNB(),
# LogisticRegression(random_state=0),
# ]
# # Test the accuracies of different models
# CV = 5
# cv_df = pd.DataFrame(index=range(CV * len(models)))
# entries = []
# index=1
# for model in models:
# model_name = model.__class__.__name__
# accuracies = cross_val_score(model, features, labels, scoring='accuracy', cv=CV)
# for fold_index, accuracy in enumerate(accuracies):
# entries.append((model_name, fold_index, accuracy))
# print("model_{}".format(index))
# index=index+1
# cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_index', 'accuracy'])
# cv_df_mean = cv_df.groupby('model_name').accuracy.mean()
# output_file.write("\nAccuracy\n")
# for name, value in cv_df_mean.items():
# output_file.write("{} {}\n".format(name,value))
output_file.close()
|
[
"[email protected]"
] | |
f4793c2f926a71e94cc4d319adbfac6291849d24
|
fa35b0fd2d0e1a570ba82f6e9b484b012fd1f5b0
|
/bop/admin.py
|
883581570ad741a447b46f7d76310b616306d9e9
|
[] |
no_license
|
RaihanEXE99/DevelopersBook
|
97624e8b30b35908f966ecbc8d2bc369bf28439a
|
744e63b5a17a2bdb3d59dacaa1906a0872cb93ba
|
refs/heads/master
| 2023-08-17T06:13:21.534751 | 2021-10-04T14:42:36 | 2021-10-04T14:42:36 | 285,777,293 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 162 |
py
|
from django.contrib import admin
from .models import biox,Notification
# # Register your models here.
admin.site.register(biox)
admin.site.register(Notification)
|
[
"[email protected]"
] | |
6db57545a3a673142f62c0c44d4291bc948050c6
|
39f1106d074988a608966ccb8a24a13a9925abb4
|
/utils.py
|
9e837a61fc596566ecf0abfc87937621658854ff
|
[
"MIT"
] |
permissive
|
KarlXing/ModulationRL
|
59372057275495cd848b98183bb3857474b79155
|
bdd7480b70d25326fe19a5af5f39c7a01b90a034
|
refs/heads/master
| 2020-04-20T10:50:40.208024 | 2019-02-02T07:26:41 | 2019-02-02T07:26:41 | 168,800,185 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,314 |
py
|
import torch
import torch.nn as nn
import numpy as np
from envs import VecNormalize
# Get a render function
def get_render_func(venv):
if hasattr(venv, 'envs'):
return venv.envs[0].render
elif hasattr(venv, 'venv'):
return get_render_func(venv.venv)
elif hasattr(venv, 'env'):
return get_render_func(venv.env)
return None
def get_vec_normalize(venv):
if isinstance(venv, VecNormalize):
return venv
elif hasattr(venv, 'venv'):
return get_vec_normalize(venv.venv)
return None
# Necessary for my KFAC implementation.
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1))
def forward(self, x):
if x.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
return x + bias
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
# https://github.com/openai/baselines/blob/master/baselines/common/tf_util.py#L87
def init_normc_(weight, gain=1):
weight.normal_(0, 1)
weight *= gain / torch.sqrt(weight.pow(2).sum(1, keepdim=True))
def tanh_g(x,g):
x = x/g
return torch.tanh(x)
def sigmoid(x):
return 1.0/(1+np.exp(-x))
def update_mode(evaluations, masks, reward, value, next_value, tonic_g, phasic_g, g, threshold, sigmoid_g, sigmoid_range, natural_value):
value = value.cpu()
next_value = next_value.cpu()
pd_error = reward-value+next_value
evaluations = 0.75*evaluations + 0.25*pd_error
evaluations = evaluations*masks
if sigmoid_g:
if not natural_value:
evaluations_mode = (abs(evaluations)-threshold)*(sigmoid_range/threshold)
else:
evaluations_mode = (evaluations - threshold)*(sigmoid_range/threshold)
evaluations_mode = sigmoid(evaluations_mode)
g = tonic_g+evaluations_mode*(phasic_g-tonic_g)
else:
for i in range(g.shape[0]):
if not natural_value:
g[i][0] = phasic_g if abs(evaluations[i][0]) > threshold else tonic_g
else:
g[i][0] = phasic_g if evaluations[i][0] > threshold else tonic_g
return evaluations, g, pd_error
def update_mode_entropy(device, evaluations, masks, dist_entropy, tonic_g, phasic_g, g, threshold, sigmoid_g, sigmoid_range, natural_value):
evaluations = 0.75*evaluations + 0.25*dist_entropy
evaluations = evaluations*masks
if sigmoid_g:
evaluations_mode = (evaluations - threshold)*(sigmoid_range/threshold)
g = 2*(phasic_g-1)*sigmoid(evaluations_mode)-(phasic_g-2)
mask = (g < 1).to(torch.device('cpu'), dtype=torch.float32)
g = g*(1-mask) + 1/(1-g*mask)
g = torch.clamp(g, tonic_g, phasic_g)
g = 1.0/g
# g = tonic_g+evaluations_mode*(phasic_g-tonic_g)
else:
for i in range(g.shape[0]):
g[i][0] = phasic_g if evaluations[i][0] > threshold else tonic_g
return evaluations, g
def neuro_activity(obs, g, mid = 128):
assert(obs.shape[0] == g.shape[0])
for i in range(obs.shape[0]):
obs[i] = (torch.tanh((obs[i]-mid)/g[i])+1)/2
return obs
def obs_representation(obs, modulation, g_device, input_neuro):
if modulation == 0: # no modulation
if input_neuro:
obs = neuro_activity(obs, g_device)
else:
obs = obs/255
elif modulation == 1: # input modulation
if input_neuro:
obs = neuro_activity(obs, g_device)
else:
obs = obs/255
obs = obs/g_device
else: # f1 modulation
obs = obs/255
return obs
def get_beta(device, dist_entropy, mean_entropy, min_beta, max_beta, current_beta_range, flatness, beta):
mean_entropy = mean_entropy.to(device)
evaluations_mode = (dist_entropy - mean_entropy)*(flatness/mean_entropy)
beta = (2*(current_beta_range - 1) * sigmoid(evaluations_mode)-(current_beta_range - 2)).to(device)
mask = (beta < 1).to(device, dtype=torch.float32)
beta = beta*(1-mask) + 1/(2-beta*mask)
beta = 1.0/beta
beta = torch.clamp(beta, min=min_beta, max=max_beta).unsqueeze(1)
return beta
|
[
"[email protected]"
] | |
460a8c855a8c080167902a083a49032ff11e9f57
|
31bd979746cceaecb1c91209177342f13083c86c
|
/hackerspace/git.py
|
d5b399b3ee79968e2c63a14bee520b410d8f92e1
|
[
"MIT"
] |
permissive
|
shaygover/HackerspaceTemplatePackage
|
dc5a720a081aec498b61a905793b5a0f025a6876
|
8704e9500e33e7b78497c0f3114bda1412c69a6a
|
refs/heads/master
| 2020-11-25T14:09:05.736071 | 2019-12-19T16:55:31 | 2019-12-19T16:55:31 | 228,707,266 | 0 | 0 |
MIT
| 2019-12-17T21:47:06 | 2019-12-17T21:47:05 | null |
UTF-8
|
Python
| false | false | 76 |
py
|
def add_issue(error):
print('TO DO: Add error as issue to git repo...')
|
[
"[email protected]"
] | |
53e9b9b82e55e37c19a8dc7be229c35791a377af
|
c8cb8411fac91198026d61c9a2e33d96937dba93
|
/Logistic_Regression.py
|
ae245c253ce9e50600b10257cb1305349d7940cd
|
[] |
no_license
|
anurag0308/Regression
|
c10219dd0ea6a100c7394f8c50f53fc1595a3fe6
|
ac924b9c965e5451b8a9a3a3893336d4ea01d251
|
refs/heads/master
| 2020-08-07T12:52:16.279411 | 2019-11-16T07:31:18 | 2019-11-16T07:31:18 | 213,457,567 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,561 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 18 22:50:39 2019
@author: Anurag sharma
"""
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn.preprocessing import MinMaxScaler
traindata1 = pd.read_csv("C:\\Users\\Anurag sharma\\Desktop\\CODE\\ML\\LogisticRegression\\train.csv")
testdata1 = pd.read_csv("C:\\Users\\Anurag sharma\\Desktop\\CODE\\ML\\LogisticRegression\\test.csv")
traindata = traindata1.drop(["Name","Id"],axis=1)
testdata = testdata1.drop(["Name","Id"],axis=1)
#from sklearn.preprocessing import MinMaxScaler
mean =traindata['3P%'].mean()
traindata['3P%'].fillna(mean,inplace=True)
mean =testdata['3P%'].mean()
testdata['3P%'].fillna(mean,inplace=True)
#from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
t = scaler.fit_transform(traindata)
a = t.shape
X=np.matrix([np.ones(a[0]),t[:,0],t[:,1],t[:,2],t[:,3],t[:,4],t[:,5],t[:,6],t[:,7],t[:,8],t[:,9],t[:,10],t[:,11],t[:,12],t[:,13],t[:,14],t[:,15],t[:,16],t[:,17],t[:,18]])
m = X.shape[1]
#scaler = MinMaxScaler()
t1 = scaler.fit_transform(testdata)
#Y = testval
Y=np.matrix([np.ones(t1.shape[0]),t1[:,0],t1[:,1],t1[:,2],t1[:,3],t1[:,4],t1[:,5],t1[:,6],t1[:,7],t1[:,8],t1[:,9],t1[:,10],t1[:,11],t1[:,12],t1[:,13],t1[:,14],t1[:,15],t1[:,16],t1[:,17],t1[:,18]])
def sigmoid(z):
return 1/(1+np.exp(-z))
#def cost(h,y):
# return -(y.dot(np.log(h))+(1-y).dot(np.log(1-h)))/m
def logistic_regression(X):
lr=0.001
noi=1000
y= np.matrix(t[:,19])
#m = X.shape[0]
theta = np.zeros([1,20])
te=0.001
cf_list=[]
for i in range(noi):
z= np.dot(X.T,theta.T)
h = sigmoid(z) #hypothesis
#cf = cost (h,y)
cf=-(np.sum((y.dot(np.log(h))+(1-y).dot(np.log(1-h)))))/m
cf_list.append(cf)
if(cf_list[i]<te):
break
elif(len(cf_list)>10 and np.mean(cf_list[-10:])==cf_list[-1]):
break
else:
theta=grad_desc(theta,lr,X)
return(cf_list,theta)
def grad_desc(theta,lr,X):
y= t[:,19]
#m = X.shape[1]
z= sigmoid(np.dot(X.T,theta.T))
c=(z.T-y).dot(X.T)
#for j in range (y):
theta = theta - (lr/m)*c
return theta
def predict(theta,Y):
prob=sigmoid(theta@Y)
values=np.where(prob>=0.5,1,0)
return np.squeeze(values)
cst,theta=logistic_regression(X)
id=testdata1['Id']
df=pd.DataFrame(predict(theta,Y),id)
df.to_csv('C:\\Users\\Anurag sharma\\Desktop\\CODE\\ML\\submission.csv')
|
[
"[email protected]"
] | |
ec88ef12bf7220d9b5f318f6abc8d7d178b9d2a1
|
dc210aa0f882a0762ca8b6ef339e7e5842bdb26b
|
/typeidea/config/models.py
|
edb190e82a7748c57a8507dd06a89691bcb53f22
|
[
"MIT"
] |
permissive
|
liushouchuan13/typeidea
|
e0821c898e15944ee8aaeb280769f5af44ad2322
|
d801f7a9b6b3e9026421cd26b4ef97732345cc81
|
refs/heads/master
| 2021-04-03T01:53:37.296456 | 2018-03-09T08:55:54 | 2018-03-09T08:55:54 | 103,502,663 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,785 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Link(models.Model):
STATUS_ITEMS = (
(1, '正常'),
(2, '删除'),
)
title = models.CharField(max_length=50, verbose_name='标题')
href = models.URLField(verbose_name='链接')
status = models.PositiveIntegerField(default=1, choices=STATUS_ITEMS, verbose_name='状态')
weight = models.PositiveIntegerField(default=1, choices=zip(range(1, 6), range(1, 6)),
verbose_name='权重',
help_text='权重越高位置越靠前')
owner = models.ForeignKey(User, verbose_name='作者')
created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
class Meta:
verbose_name = verbose_name_plural = '友链'
class SideBar(models.Model):
STATUS_ITEMS = (
(1, '展示'),
(2, '下线'),
)
SIDE_TYPE = (
(1, 'HTML'),
(2, '最新文章'),
(3, '热门文章'),
(4, '最近文章'),
)
title = models.CharField(max_length=50, verbose_name='标题')
display_type = models.PositiveIntegerField(default=1, choices=SIDE_TYPE,
verbose_name='展示类型')
content = models.CharField(max_length=500, blank=True, verbose_name='内容',
help_text='如果设置的不是HTML类型, 可为空')
owner = models.ForeignKey(User, verbose_name='作者')
created_time = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
class Meta:
verbose_name = verbose_name_plural = '侧边栏'
|
[
"[email protected]"
] | |
4ad42cd6418d521ed2f275b7b73aaa4e7036fbea
|
964b063c2461aad267ddd991fefaf7ab53b1ca94
|
/6-kyu/iq-test/python/solution.py
|
2221bc57375308dc79e1d3f085e299509f860057
|
[] |
no_license
|
lucasbflopes/codewars-solutions
|
26c4e2cd1be19db50cc8c1d9fc117c51c82a2259
|
72ef2c02dde7bd0d5a691e04e3b2a383e892f84b
|
refs/heads/master
| 2022-03-14T01:26:41.816498 | 2019-11-23T17:17:19 | 2019-11-23T17:17:19 | 114,834,447 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 193 |
py
|
def iq_test(numbers):
arr = [i % 2 == 0 for i in [int(j) for j in numbers.split()]]
if arr.count(True) > 1:
return arr.index(False)+1
else:
return arr.index(True)+1
|
[
"[email protected]"
] | |
e2c5a124b1d605b156114ec0a8636fb103cbd5d3
|
2c9677180eeec4e1657765b2095828ba43f041ee
|
/src/python/grpcio/grpc/_runtime_protos.py
|
88863e0306dc137bd06225c46e2e2b19e0c01eca
|
[
"Apache-2.0"
] |
permissive
|
morganwu277/grpc
|
a82e4348184a27b273159808327e7f6778a6d448
|
7c4bdd9c6ba176ad65ecea323de8ea4fd6999cf9
|
refs/heads/master
| 2022-11-29T22:01:10.376948 | 2020-08-18T23:53:02 | 2020-08-18T23:53:02 | 288,634,896 | 0 | 0 |
Apache-2.0
| 2020-08-19T04:43:28 | 2020-08-19T04:40:57 | null |
UTF-8
|
Python
| false | false | 5,718 |
py
|
# Copyright 2020 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
def _uninstalled_protos(*args, **kwargs):
raise NotImplementedError(
"Install the grpcio-tools package to use the protos function.")
def _uninstalled_services(*args, **kwargs):
raise NotImplementedError(
"Install the grpcio-tools package to use the services function.")
def _uninstalled_protos_and_services(*args, **kwargs):
raise NotImplementedError(
"Install the grpcio-tools package to use the protos_and_services function."
)
def _interpreter_version_protos(*args, **kwargs):
raise NotImplementedError(
"The protos function is only on available on Python 3.X interpreters.")
def _interpreter_version_services(*args, **kwargs):
raise NotImplementedError(
"The services function is only on available on Python 3.X interpreters."
)
def _interpreter_version_protos_and_services(*args, **kwargs):
raise NotImplementedError(
"The protos_and_services function is only on available on Python 3.X interpreters."
)
def protos(protobuf_path): # pylint: disable=unused-argument
"""Returns a module generated by the indicated .proto file.
THIS IS AN EXPERIMENTAL API.
Use this function to retrieve classes corresponding to message
definitions in the .proto file.
To inspect the contents of the returned module, use the dir function.
For example:
```
protos = grpc.protos("foo.proto")
print(dir(protos))
```
The returned module object corresponds to the _pb2.py file generated
by protoc. The path is expected to be relative to an entry on sys.path
and all transitive dependencies of the file should also be resolveable
from an entry on sys.path.
To completely disable the machinery behind this function, set the
GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true".
Args:
protobuf_path: The path to the .proto file on the filesystem. This path
must be resolveable from an entry on sys.path and so must all of its
transitive dependencies.
Returns:
A module object corresponding to the message code for the indicated
.proto file. Equivalent to a generated _pb2.py file.
"""
def services(protobuf_path): # pylint: disable=unused-argument
"""Returns a module generated by the indicated .proto file.
THIS IS AN EXPERIMENTAL API.
Use this function to retrieve classes and functions corresponding to
service definitions in the .proto file, including both stub and servicer
definitions.
To inspect the contents of the returned module, use the dir function.
For example:
```
services = grpc.services("foo.proto")
print(dir(services))
```
The returned module object corresponds to the _pb2_grpc.py file generated
by protoc. The path is expected to be relative to an entry on sys.path
and all transitive dependencies of the file should also be resolveable
from an entry on sys.path.
To completely disable the machinery behind this function, set the
GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true".
Args:
protobuf_path: The path to the .proto file on the filesystem. This path
must be resolveable from an entry on sys.path and so must all of its
transitive dependencies.
Returns:
A module object corresponding to the stub/service code for the indicated
.proto file. Equivalent to a generated _pb2_grpc.py file.
"""
def protos_and_services(protobuf_path): # pylint: disable=unused-argument
"""Returns a 2-tuple of modules corresponding to protos and services.
THIS IS AN EXPERIMENTAL API.
The return value of this function is equivalent to a call to protos and a
call to services.
To completely disable the machinery behind this function, set the
GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true".
Args:
protobuf_path: The path to the .proto file on the filesystem. This path
must be resolveable from an entry on sys.path and so must all of its
transitive dependencies.
Returns:
A 2-tuple of module objects corresponding to (protos(path), services(path)).
"""
if sys.version_info < (3, 5, 0):
protos = _interpreter_version_protos
services = _interpreter_version_services
protos_and_services = _interpreter_version_protos_and_services
else:
try:
import grpc_tools # pylint: disable=unused-import
except ImportError as e:
# NOTE: It's possible that we're encountering a transitive ImportError, so
# we check for that and re-raise if so.
if "grpc_tools" not in e.args[0]:
raise
protos = _uninstalled_protos
services = _uninstalled_services
protos_and_services = _uninstalled_protos_and_services
else:
from grpc_tools.protoc import _protos as protos # pylint: disable=unused-import
from grpc_tools.protoc import _services as services # pylint: disable=unused-import
from grpc_tools.protoc import _protos_and_services as protos_and_services # pylint: disable=unused-import
|
[
"[email protected]"
] | |
00f065d20644809c36a60a0fbfe0ad0aa3cd6ef9
|
4a0f2cc27cd39b8b8901ade728f3b1dc20c2a2ee
|
/controller/qt_classes/UbNewDocumentViewDelegate.py
|
096e2b7becda90dbfcb58540466702c64771dd6f
|
[] |
no_license
|
teamlm2/lm2_mgis
|
2c016423983a31fcdf15e34508401acf48177f35
|
9144b1234b25665737986995bd1da7492871151c
|
refs/heads/master
| 2021-11-11T23:43:12.647749 | 2021-10-26T07:55:58 | 2021-10-26T07:55:58 | 155,568,182 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,303 |
py
|
# coding=utf8
__author__ = 'B.Ankhbold'
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from sqlalchemy.exc import SQLAlchemyError
from ...model import SettingsConstants
from ...model.SetOfficialDocument import SetOfficialDocument
from ...utils.FileUtils import FileUtils
from ...utils.PluginUtils import PluginUtils
from ...utils.SessionHandler import SessionHandler
from ...utils.DatabaseUtils import *
from ...utils.FilePath import *
from ftplib import *
import shutil
import codecs
NAME_COLUMN = 0
DESCRIPTION_COLUMN = 1
VIEW_COLUMN = 2
FILE_PDF = 'pdf'
FILE_IMAGE = 'png'
class UbNewDocumentViewDelegate(QStyledItemDelegate):
def __init__(self, widget, parent):
super(UbNewDocumentViewDelegate, self).__init__(parent)
self.widget = widget
self.parent = parent
self.session = SessionHandler().session_instance()
self.button = QPushButton("", parent)
self.button.hide()
self.viewIcon = QIcon(":/plugins/lm2/file.png")
def paint(self, painter, option, index):
if index.column() == VIEW_COLUMN:
self.button.setIcon(self.viewIcon)
else:
super(UbNewDocumentViewDelegate, self).paint(painter, option, index)
return
self.button.setGeometry(option.rect)
button_picture = QPixmap.grabWidget(self.button)
painter.drawPixmap(option.rect.x(), option.rect.y(), button_picture)
def editorEvent(self, event, model, option, index):
if index is not None:
if index.isValid() and event.type() == QEvent.MouseButtonRelease:
if event.button() == Qt.RightButton:
return False
if index.column() == VIEW_COLUMN:
ftp = self.widget.item(index.row(), NAME_COLUMN).data(Qt.UserRole)
file_name = self.widget.item(index.row(), NAME_COLUMN).data(Qt.UserRole + 1)
file_type = self.widget.item(index.row(), NAME_COLUMN).data(Qt.UserRole + 2)
# print file_name
# print file_type
# print ftp.pwd()
# print ftp.nlst()
view_pdf = open(FilePath.view_file_path(), 'wb')
view_png = open(FilePath.view_file_png_path(), 'wb')
if file_type == FILE_IMAGE:
ftp.retrbinary('RETR ' + file_name, view_png.write)
else:
ftp.retrbinary('RETR ' + file_name, view_pdf.write)
try:
if file_type == FILE_IMAGE:
QDesktopServices.openUrl(QUrl.fromLocalFile(FilePath.view_file_png_path()))
else:
QDesktopServices.openUrl(QUrl.fromLocalFile(FilePath.view_file_path()))
except SQLAlchemyError, e:
PluginUtils.show_error(self.parent, self.tr("File Error"), self.tr("Could not execute: {0}").format(e.message))
return True
elif index.column() == DESCRIPTION_COLUMN or index.column() == NAME_COLUMN:
return True
else:
index.model().setData(index, 0, Qt.EditRole)
return False
|
[
"[email protected]"
] | |
87339e4385a890dc9a46c6e5efc4674cb85aefa2
|
4073f351551c2f73c5659cb3038a68360cc5b369
|
/Arbetsbok/kap 14/övn 14.1, sid. 36 - söka tal.py
|
9b318176e080635b41a000e7aeb4734430c42602
|
[
"MIT"
] |
permissive
|
Pharou/programmering1python
|
b9a5aca72354d3e7e91a5023a621d22a962ecd7c
|
9b689027db1f7fbf06925f3094fcb126880453e4
|
refs/heads/master
| 2022-11-28T06:33:17.295157 | 2020-07-25T11:02:07 | 2020-07-25T11:02:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,437 |
py
|
#!/usr/bin/python3.8
# Filnamn: övn 14.1, sid. 36 - söka tal.py
# Sökning
# Programmeringsövningar till kapitel 14
# Programmet slumpar först fram 20 tal mellan 1 och 100 och lagrar alla talen i
# en lista och sedan skrivs listan ut på skärmen. Därefter frågar programmet
# användaren efter ett tal som ska eftersökas. Slutligen undersöker programmet
# om talet finns i listan och om det finns, skriva ut på indexet det finns på.
# Om inte talet finns så ska användaren informeras om att det inte finns.
# Sökmetod: Linjär sökning
# Import av modul
from random import randint
# Funktionsdefinitioner
# Huvudprogram
def main():
lista = []
# Slumpa 20 st heltal mellan 1 och 100 och lägg dem eftervarandra i listan
for c in range(20):
lista.append(randint(1,100))
# Skriv ut listan
print(lista)
# Fråga användaren efte tal som eftersöks
tal = int(input('Anget tal som eftersöks: '))
# Utför en linjär sökning i hela listan
# Utgå ifrån att talet inte finns
index = -1
for i in range(len(lista)):
if tal == lista[i]:
# Om talet hittas sätt index till det och avbryt loopen
index = i
break
if index >= 0:
print('Talet ' + str(tal) + ' finns på index ' + str(index) + ' i listan.')
else:
print('Talet ' + str(tal) + ' finns inte i listan.')
## Huvudprogram anropas
main()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.