blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ea81ef6c8728bf5f7b8d91770753624bd097fcc1
|
1af0bc3634313da229fcf7015abec43981b4162a
|
/hundredandbelow/prob37z.py
|
62642d97a60e29c75b7d610605d80974d2f8757f
|
[] |
no_license
|
errai-/euler
|
b2996e227ede830e8c56252bcce08a92538464df
|
054c391d3aee6bcb9ba4dbf24f4ec6cb5b8b55c3
|
refs/heads/master
| 2022-06-18T09:02:54.573903 | 2018-05-14T08:21:48 | 2018-05-14T08:21:48 | 85,503,541 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 698 |
py
|
# -*- coding: cp1252 -*-
import time
qaw = time.time()
def isprime(a):
q = 1
if a%2 == 0: q = 0
elif a == 1: q = 0
else:
for i in range(3,int(a**0.5+1),2):
if a%i == 0: q = 0
return q
def rlroo(ahaa,pahaa):
nauru = []
for qorso in ahaa:
for i in range(1,10):
oo = str(i)+qorso
if isprime(int(oo)) == 1: nauru.append(oo)
for torso in ahaa:
for b in range(1,10):
moo = torso+str(b)
if isprime(int(moo)) == 1: korsu.append(moo)
return korsu,nauru
luumu = luu = ['3','5','7']
luumu,luu = rlroo(luumu,luu)
luumu,luu = rlroo(luumu,luu)
luumu,luu = rlroo(luumu,luu)
luumu,luu = rlroo(luumu,luu)
lista = []
for a in luumu:
for b in luu:
cee = a+b
lista.append(cee)
|
[
"[email protected]"
] | |
19d1b3c6e44ebc76daa3e1357565e60ada33aa77
|
59b208059eb46daa3c6f68acf78beb4184ebb196
|
/Method1/Sample_Training_unsupervised/Metrics.py
|
80e73cf0f03beac1e8122a179c899e7d59254d70
|
[
"MIT"
] |
permissive
|
PhysicsOfAI/PhysicsAI4Imaging
|
29df0162c9f1609379cebf93b49f8f5b98601d04
|
25d0ec085cf4a1c7c2b7c51c530efc4d185f2a0c
|
refs/heads/master
| 2022-07-02T06:51:59.388188 | 2020-05-11T01:00:11 | 2020-05-11T01:00:11 | 257,477,739 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,521 |
py
|
import numpy as np
import skimage.measure as sm
def psnr(img, ref, axes=(0, 1), max_intensity=None):
""" Compute the peak signal to noise ratio (psnr)
:param img: input image (np.array)
:param ref: reference image (np.array)
:param axes: tuple of axes over which the psnr is computed
:param max_intensity: maximum intensity in the image. If it is None, the maximum value of :ref: is taken.
:return: (mean) psnr
"""
mse = np.mean(np.abs(np.abs(img) - np.abs(ref)) ** 2, axis=axes)
max_intensity = np.max(np.abs(ref)) if max_intensity == None else max_intensity
mse = 10 * np.log10(max_intensity ** 2 / mse)
return np.mean(mse)
def ssim(img, ref, dynamic_range=None, axes=(0, 1)):
""" Compute the structural similarity index.
:param img: input image (np.array)
:param ref: reference image (np.array)
:param dynamic_range: If dynamic_range != None, the same given dynamic range will be used for all slices in the volume.
Otherwise, the dynamic_range is computed slice-per-slice.
:param axes: tuple of axes over which the ssim is computed
:return: (mean) ssim
"""
assert len(axes) == 2
assert img.shape == ref.shape
if img.ndim == 2 and axes == (0, 1):
img = img.copy()[np.newaxis]
ref = ref.copy()[np.newaxis]
elif img.ndim == 2 and axes != (0, 1):
raise ValueError("axes of 2d array have to equal (0,1)")
else:
axes = list(axes)
full_axes = list(range(0, img.ndim))
transpose_axes = [item for item in full_axes if item not in axes] + axes
img = np.transpose(img.copy(), transpose_axes)
img = np.reshape(img, (np.prod(img.shape[:-2]),) + img.shape[-2:])
ref = np.transpose(ref.copy(), transpose_axes)
ref = np.reshape(ref, (np.prod(ref.shape[:-2]),) + ref.shape[-2:])
# ssim averaged over slices
ssim_slices = []
ref_abs = np.abs(ref)
img_abs = np.abs(img)
for i in range(ref_abs.shape[0]):
if dynamic_range == None:
drange = np.max(ref_abs[i]) - np.min(ref_abs[i])
else:
drange = dynamic_range
_, ssim_i = sm.compare_ssim(img_abs[i], ref_abs[i],
data_range=drange,
gaussian_weights=True,
use_sample_covariance=False,
full=True)
ssim_slices.append(np.mean(ssim_i))
return np.mean(ssim_slices)
|
[
"[email protected]"
] | |
a13bd7f9da7ea032c84dac021788da7cb8446ba9
|
ac2c3e8c278d0aac250d31fd023c645fa3984a1b
|
/saleor/saleor/wishlist/error_codes.py
|
5f77c477ea3948543085f5817a1d759cf6bc6e85
|
[
"BSD-3-Clause",
"CC-BY-4.0"
] |
permissive
|
jonndoe/saleor-test-shop
|
152bc8bef615382a45ca5f4f86f3527398bd1ef9
|
1e83176684f418a96260c276f6a0d72adf7dcbe6
|
refs/heads/master
| 2023-01-21T16:54:36.372313 | 2020-12-02T10:19:13 | 2020-12-02T10:19:13 | 316,514,489 | 1 | 1 |
BSD-3-Clause
| 2020-11-27T23:29:20 | 2020-11-27T13:52:33 |
TypeScript
|
UTF-8
|
Python
| false | false | 196 |
py
|
from enum import Enum
class WishlistErrorCode(str, Enum):
GRAPHQL_ERROR = "graphql_error"
INVALID = "invalid"
NOT_FOUND = "not_found"
REQUIRED = "required"
UNIQUE = "unique"
|
[
"[email protected]"
] | |
ad2f6648b4306ac0436f87746bef147dfb923663
|
6c616cc95ad9c5989d237b75bb190d336f943a79
|
/cryptography.py
|
84d3cc1145e82b12c1d8383c5ae96eb926bd738c
|
[] |
no_license
|
Hoshizx/CryptographyPython1
|
7ec5400d49e188dec1056a4ba59add53779927a1
|
3eb8ce31a82e2d1752f6c437b2470ba613c96fa9
|
refs/heads/main
| 2023-01-31T01:49:20.384767 | 2020-12-12T03:27:03 | 2020-12-12T03:27:03 | 320,738,967 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 261 |
py
|
while True:
msg = input("メッセージ: ")
result_cryptography = " "
i = len(msg) - 1
while i>=0:
result_cryptography = result_cryptography + msg[i]
i = i-1
print("コード結果: "+ result_cryptography)
|
[
"[email protected]"
] | |
cf231172191fbe06dde2ae86a1570a03ae34181f
|
e75846f5d88c4c7ea4417735e354a970f882d7e6
|
/backend/app.py
|
d014f61a7ec2307a3ba485650800f7b831cc1fa9
|
[] |
no_license
|
somprasongd-forks/Pybott-Covid-Selftracking
|
5370b8ffc7e192da73c62be4e29ba940e71bb7a7
|
7ffd97f4a9aaf5fa1099c29fe906f4a12c025c2b
|
refs/heads/master
| 2022-06-27T15:26:48.679136 | 2020-05-09T11:30:47 | 2020-05-09T11:30:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 21,135 |
py
|
import random
import time
from flask import Flask, abort, request
from flask_cors import CORS
from linebot import LineBotApi, WebhookHandler
from linebot.exceptions import InvalidSignatureError
from linebot.models import *
from BasicFunction.CaseLocationApi import get_location_reccommend_data
from BasicFunction.COVID_ANALYZER import analyze_covid_from_user
from BasicFunction.DailyApi import get_daily_data
from BasicFunction.Firebase_Connect import (delete, get, get_daily_tracking,
post, post_daily_tracking, update,
update_daily_tracking)
from config import Channel_access_token, Channel_secret, Firebase_DB_url , DB_COV_TRACKER , DB_USER_DATA , DB_USER_SESSION ,firebase , rich_menu_id
from FlexMessage.QuestionMsg import *
from FlexMessage.ResultMsg import *
app = Flask(__name__)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
app.config['JSON_AS_ASCII'] = False
# Firebase_DB_url = "https://pybott-6th.firebaseio.com/" # Your firebase Application
from BasicFunction.api.api import get_tracking_data_by_uid , get_poll
line_bot_api = LineBotApi(Channel_access_token)
handler = WebhookHandler(Channel_secret)
@app.route("/api/get_user_report/<UID>",methods=["GET"])
def GetUserDaily(UID):
res = get_tracking_data_by_uid(UID=UID)
return res , 200
@app.route("/api/get_polls/")
def GetAll():
res = get_poll()
return res , 200
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
print("Invalid signature. Please check your channel access token/channel secret.")
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
# INPUT AND PARSING DATA
REPLY_TOKEN = event.reply_token
MESSAGE_FROM_USER = event.message.text
UID = event.source.user_id
# get user id
profile = line_bot_api.get_profile(UID)
DISPLAY_NAME = profile.display_name
PROFILE_PIC = profile.picture_url
#check user in system?
user = get(uid=UID,firebase_app=firebase , database_name=DB_USER_DATA)
line_bot_api.link_rich_menu_to_user(user_id=UID , rich_menu_id=rich_menu_id)
if not user:
# continue
data = {"session" : "None"}
post(uid=UID,data=data,firebase_app=firebase,database_name=DB_USER_SESSION)
data = { "DISPLAY_NAME" : DISPLAY_NAME , "PROFILE_PIC" : PROFILE_PIC }
post(uid=UID,data=data,firebase_app=firebase,database_name=DB_USER_DATA)
user_session = get(uid=UID,firebase_app=firebase , database_name=DB_USER_SESSION)
user_session = user_session["session"]
if user_session == "None":
if MESSAGE_FROM_USER == "เริ่มบันทึกอาการป่วย":
daily_report = {
"มีไข้" : "",
"มีอาการไอ" : "",
"มีอาการเจ็บคอ" : "",
"น้ำมูกไหล" : "",
"เหนื่อยหอบ" : "",
"วันที่" : "",
"score" : 0,
"ข้อเสนอแนะ" : "",
"อาการอื่นๆที่พบ": ""
}
# create user daily report
post_daily_tracking(uid=UID , data=daily_report , firebase_app=firebase , database_name=DB_COV_TRACKER)
# update session
session_data = {"session" : "บันทึกอาการไข้"}
update(uid=UID,new_data=session_data,firebase_app=firebase,database_name=DB_USER_SESSION)
#Reponse กลับไปที่ห้องแชท
Bubble = Base.get_or_new_from_json_dict(คำถามอาการไข้(),FlexSendMessage)
line_bot_api.reply_message(REPLY_TOKEN,messages=Bubble)
elif MESSAGE_FROM_USER == "ข้อมูลผู้ติดเชื้อวันนี้":
#Reponse กลับไปที่ห้องแชท
Bubble = Base.get_or_new_from_json_dict(get_daily_data(),FlexSendMessage)
line_bot_api.reply_message(REPLY_TOKEN,messages=Bubble)
elif MESSAGE_FROM_USER == "ข้อมูลผู้ติดเชื้อตามพื้นที่":
session_data = {"session" : "ข้อมูลผู้ติดเชื้อตามพื้นที่"}
update(uid=UID,new_data=session_data,firebase_app=firebase,database_name=DB_USER_SESSION)
line_bot_api.reply_message(REPLY_TOKEN,
TextSendMessage(text="กรุณาระบุชื่อจังหวัดที่ท่านต้องการทราบคะ เช่น 'สงขลา'"))
else :
num = [1,2,3,4,5]
time.sleep(random.choice(num))
Fallback_list = ["น้องหมอ ยังไม่มีบริการด้านนี้นะคะ","ขออภัยคะน้องไม่เข้าใจเลยยยจีๆ","ไว้มาถามใหม่ครั้งหน้านะคะ ตอนนี้ยังไม่สะดวกคะ"]
Fallback = random.choice(Fallback_list)
qbtn1 = QuickReplyButton(image_url="https://www.krungsri.com/bank/getmedia/1f37428a-a9e9-4860-9efd-90aeb886d3d5/krungsri-coronavirus-insurance-detail.jpg.aspx?resizemode=1",
action=MessageAction(label="เริ่มบันทึกอาการป่วย",text="เริ่มบันทึกอาการป่วย"))
qbtn2 = QuickReplyButton(image_url="https://www.krungsri.com/bank/getmedia/1f37428a-a9e9-4860-9efd-90aeb886d3d5/krungsri-coronavirus-insurance-detail.jpg.aspx?resizemode=1",
action=MessageAction(label="วันนี้เป็นไงบ้าง",text="ข้อมูลผู้ติดเชื้อวันนี้"))
qbtn3 = QuickReplyButton(image_url="https://www.krungsri.com/bank/getmedia/1f37428a-a9e9-4860-9efd-90aeb886d3d5/krungsri-coronavirus-insurance-detail.jpg.aspx?resizemode=1",
action=MessageAction(label="ข้อมูลผู้ติดเชื้อตามพื้นที่",text="ข้อมูลผู้ติดเชื้อตามพื้นที่"))
qrep = QuickReply(items=[qbtn1,qbtn2,qbtn3])
line_bot_api.reply_message(REPLY_TOKEN,
TextSendMessage(text=Fallback,quick_reply=qrep))
elif MESSAGE_FROM_USER == "ออกจากคำสั่ง":
session_data = {"session" : "None"}
update(uid=UID,new_data=session_data,firebase_app=firebase,database_name=DB_USER_SESSION)
num = [1,2,3,4,5]
time.sleep(random.choice(num))
Fallback_list = ["ออกจากคำสั่งเรียบร้อย กรุณาเลือกคำสั่งใหม่นะคะ","ออกจากคำสั่งเรียบร้อย ถามไรต่อดีเอ่ยยยย","ออกจากคำสั่งเรียบร้อย ไว้มาสอบถามใหม่อีกครั้งนะคะ","ออกจากคำสั่งเรียบร้อย ขอบคุณที่แวะมาใช้บริการนะคะ"]
Fallback = random.choice(Fallback_list)
qbtn1 = QuickReplyButton(image_url="https://www.krungsri.com/bank/getmedia/1f37428a-a9e9-4860-9efd-90aeb886d3d5/krungsri-coronavirus-insurance-detail.jpg.aspx?resizemode=1",
action=MessageAction(label="เริ่มบันทึกอาการป่วย",text="เริ่มบันทึกอาการป่วย"))
qbtn2 = QuickReplyButton(image_url="https://www.krungsri.com/bank/getmedia/1f37428a-a9e9-4860-9efd-90aeb886d3d5/krungsri-coronavirus-insurance-detail.jpg.aspx?resizemode=1",
action=MessageAction(label="วันนี้เป็นไงบ้าง",text="ข้อมูลผู้ติดเชื้อวันนี้"))
qbtn3 = QuickReplyButton(image_url="https://www.krungsri.com/bank/getmedia/1f37428a-a9e9-4860-9efd-90aeb886d3d5/krungsri-coronavirus-insurance-detail.jpg.aspx?resizemode=1",
action=MessageAction(label="ข้อมูลผู้ติดเชื้อตามพื้นที่",text="ข้อมูลผู้ติดเชื้อตามพื้นที่"))
qrep = QuickReply(items=[qbtn1,qbtn2,qbtn3])
line_bot_api.reply_message(REPLY_TOKEN,
TextSendMessage(text=Fallback,quick_reply=qrep))
### func อื่นๆ
else:
if user_session == "บันทึกอาการไข้": # validate session
# "3" != 3
if MESSAGE_FROM_USER in ["0","1","2","3","4","5"]: # validate input
data = {"มีไข้" : MESSAGE_FROM_USER}
update_daily_tracking(uid=UID,new_data=data,firebase_app=firebase,database_name=DB_COV_TRACKER) # update
session_data = {"session" : "บันทึกอาการไอ"}
update(uid=UID,new_data=session_data,firebase_app=firebase,database_name=DB_USER_SESSION) # update
#Reponse กลับไปที่ห้องแชท
Bubble = Base.get_or_new_from_json_dict(คำถามอาการไอ,FlexSendMessage)
line_bot_api.reply_message(REPLY_TOKEN,messages=Bubble)
else :
line_bot_api.reply_message(REPLY_TOKEN,TextSendMessage("กรุณาระบุเป็นตัวเลขเท่านั้นคะ (พิมพ์เลข 1-5)"))
elif user_session == "บันทึกอาการไอ":
if MESSAGE_FROM_USER in ["0","1","2","3","4","5"]: # validate input
data = {"มีอาการไอ" : MESSAGE_FROM_USER}
update_daily_tracking(uid=UID,new_data=data,firebase_app=firebase,database_name=DB_COV_TRACKER) # update
session_data = {"session" : "บันทึกอาการเจ็บคอ"}
update(uid=UID,new_data=session_data,firebase_app=firebase,database_name=DB_USER_SESSION) # update
#Reponse กลับไปที่ห้องแชท
Bubble = Base.get_or_new_from_json_dict(คำถามอาการเจ็บคอ,FlexSendMessage)
line_bot_api.reply_message(REPLY_TOKEN,Bubble)
else :
line_bot_api.reply_message(REPLY_TOKEN,TextSendMessage("กรุณาระบุเป็นตัวเลขเท่านั้นคะ (พิมพ์เลข 1-5)"))
elif user_session == "บันทึกอาการเจ็บคอ":
if MESSAGE_FROM_USER in ["0","1","2","3","4","5"]: # validate input
data = {"มีอาการเจ็บคอ" : MESSAGE_FROM_USER}
update_daily_tracking(uid=UID,new_data=data,firebase_app=firebase,database_name=DB_COV_TRACKER) # update
session_data = {"session" : "บันทึกอาการน้ำมูกไหล"}
update(uid=UID,new_data=session_data,firebase_app=firebase,database_name=DB_USER_SESSION) # update
#Reponse กลับไปที่ห้องแชท
Bubble = Base.get_or_new_from_json_dict(คำถามอาการน้ำมูกไหล,FlexSendMessage)
line_bot_api.reply_message(REPLY_TOKEN,Bubble)
else :
line_bot_api.reply_message(REPLY_TOKEN,TextSendMessage("กรุณาระบุเป็นตัวเลขเท่านั้นคะ (พิมพ์เลข 1-5)"))
elif user_session == "บันทึกอาการน้ำมูกไหล":
if MESSAGE_FROM_USER in ["0","1","2","3","4","5"]: # validate input
data = {"น้ำมูกไหล" : MESSAGE_FROM_USER}
update_daily_tracking(uid=UID,new_data=data,firebase_app=firebase,database_name=DB_COV_TRACKER) # update
session_data = {"session" : "บันทึกอาการเหนื่อยหอบ"}
update(uid=UID,new_data=session_data,firebase_app=firebase,database_name=DB_USER_SESSION) # update
#Reponse กลับไปที่ห้องแชท
Bubble = Base.get_or_new_from_json_dict(คำถามอาการเหนื่อยหอบ,FlexSendMessage)
line_bot_api.reply_message(REPLY_TOKEN,Bubble)
else :
line_bot_api.reply_message(REPLY_TOKEN,TextSendMessage("กรุณาระบุเป็นตัวเลขเท่านั้นคะ (พิมพ์เลข 1-5)"))
elif user_session == "บันทึกอาการเหนื่อยหอบ":
if MESSAGE_FROM_USER in ["0","1","2","3","4","5"]: # validate input
data = {"เหนื่อยหอบ" : MESSAGE_FROM_USER}
update_daily_tracking(uid=UID,new_data=data,firebase_app=firebase,database_name=DB_COV_TRACKER) # update
session_data = {"session" : "บันทึกอาการอื่นๆ"}
update(uid=UID,new_data=session_data,firebase_app=firebase,database_name=DB_USER_SESSION) # update
user_daily_data = get_daily_tracking(uid=UID,firebase_app=firebase,database_name=DB_COV_TRACKER)
result = analyze_covid_from_user(UID,user_daily_data)
post_daily_tracking(uid=UID,data=result,firebase_app=firebase,database_name=DB_COV_TRACKER)
qbtn = QuickReplyButton(image_url="https://www.krungsri.com/bank/getmedia/1f37428a-a9e9-4860-9efd-90aeb886d3d5/krungsri-coronavirus-insurance-detail.jpg.aspx?resizemode=1",
action=MessageAction(label="ไม่มีแล้วจร้า",text="ไม่มีแล้วจร้า"))
qrep = QuickReply(items=[qbtn])
line_bot_api.reply_message(REPLY_TOKEN,TextSendMessage("เรียบร้อยแล้วคะ🧡🧡 \n ท่านมีอาการอื่นๆเพิ่มเติมอีกไหมคะ \n 💪💪 บอกน้องหมอได้นะ",quick_reply=qrep)) # reponse
else :
line_bot_api.reply_message(REPLY_TOKEN,TextSendMessage("กรุณาระบุเป็นตัวเลขเท่านั้นคะ (พิมพ์เลข 1-5)"))
elif user_session == "บันทึกอาการอื่นๆ":
data = {"อาการอื่นๆที่พบ" : MESSAGE_FROM_USER}
update_daily_tracking(uid=UID,new_data=data,firebase_app=firebase,database_name=DB_COV_TRACKER) # update
session_data = {"session" : "None"}
update(uid=UID,new_data=session_data,firebase_app=firebase,database_name=DB_USER_SESSION) # update
user_daily_data = get_daily_tracking(uid=UID,firebase_app=firebase,database_name=DB_COV_TRACKER)
result = analyze_covid_from_user(UID,user_daily_data)
post_daily_tracking(uid=UID,data=result,firebase_app=firebase,database_name=DB_COV_TRACKER)
raw_Bubble = GenerateResultMsg(Profile_name=DISPLAY_NAME , UserId=UID , Dict_daily_data=result)
Bubble = Base.get_or_new_from_json_dict(raw_Bubble,FlexSendMessage)
line_bot_api.reply_message(REPLY_TOKEN,Bubble)
elif user_session == "ข้อมูลผู้ติดเชื้อตามพื้นที่":
raw_Bubble = get_location_reccommend_data(Province=MESSAGE_FROM_USER)
if raw_Bubble:
qbtn1 = QuickReplyButton(image_url="https://www.krungsri.com/bank/getmedia/1f37428a-a9e9-4860-9efd-90aeb886d3d5/krungsri-coronavirus-insurance-detail.jpg.aspx?resizemode=1",
action=MessageAction(label="ออกจากการค้นหา",text="ออกจากคำสั่ง"))
qrep = QuickReply(items=[qbtn1])
text_message = TextSendMessage(text="ออกจากการค้นหาโดยกดปุ่มด้านล่าง หรือ ทำการค้นหาต่อไปได้เลยนะคะ" ,quick_reply=qrep)
Bubble = Base.get_or_new_from_json_dict(raw_Bubble,FlexSendMessage)
line_bot_api.reply_message(REPLY_TOKEN,messages=[Bubble,text_message])
else:
qbtn1 = QuickReplyButton(image_url="https://www.krungsri.com/bank/getmedia/1f37428a-a9e9-4860-9efd-90aeb886d3d5/krungsri-coronavirus-insurance-detail.jpg.aspx?resizemode=1",
action=MessageAction(label="ออกจากการค้นหา",text="ออกจากคำสั่ง"))
qrep = QuickReply(items=[qbtn1])
text_message = TextSendMessage(text="ไม่พบข้อมูลผู้ติดเชื้อจากกรมควบคุมโรคของจังหวัด"+str(MESSAGE_FROM_USER) +"\n กรุณาระบุชื่อจังหวัดใหม่อีกครั้งคะ หรือ กดปุ่มออกจากการค้นหา" ,quick_reply=qrep)
line_bot_api.reply_message(REPLY_TOKEN,messages=text_message)
@handler.add(FollowEvent)
def handler_Follow(event):
UID = event.source.user_id
REPLY_TOKEN = event.reply_token
line_bot_api.link_rich_menu_to_user(user_id=UID , rich_menu_id="richmenu-6852c0838fd90cce0f777268248f4bb2")
#ส่งรูปภาพ
image_message = ImageSendMessage(
original_content_url='https://www.krungsri.com/bank/getmedia/1f37428a-a9e9-4860-9efd-90aeb886d3d5/krungsri-coronavirus-insurance-detail.jpg.aspx?resizemode=1',
preview_image_url='https://www.krungsri.com/bank/getmedia/1f37428a-a9e9-4860-9efd-90aeb886d3d5/krungsri-coronavirus-insurance-detail.jpg.aspx?resizemode=1'
)
qbtn1 = QuickReplyButton(image_url="https://www.krungsri.com/bank/getmedia/1f37428a-a9e9-4860-9efd-90aeb886d3d5/krungsri-coronavirus-insurance-detail.jpg.aspx?resizemode=1",
action=MessageAction(label="เริ่มบันทึกอาการป่วย",text="เริ่มบันทึกอาการป่วย"))
qbtn2 = QuickReplyButton(image_url="https://www.krungsri.com/bank/getmedia/1f37428a-a9e9-4860-9efd-90aeb886d3d5/krungsri-coronavirus-insurance-detail.jpg.aspx?resizemode=1",
action=MessageAction(label="วันนี้เป็นไงบ้าง",text="ข้อมูลผู้ติดเชื้อวันนี้"))
qbtn3 = QuickReplyButton(image_url="https://www.krungsri.com/bank/getmedia/1f37428a-a9e9-4860-9efd-90aeb886d3d5/krungsri-coronavirus-insurance-detail.jpg.aspx?resizemode=1",
action=MessageAction(label="ข้อมูลผู้ติดเชื้อตามพื้นที่",text="ข้อมูลผู้ติดเชื้อตามพื้นที่"))
qrep = QuickReply(items=[qbtn1,qbtn2,qbtn3])
text_message = TextSendMessage(text="ยินดีต้อนรับเข้าสู่ บันทึกของผู้กักตัว" ,quick_reply=qrep)
line_bot_api.reply_message(REPLY_TOKEN,messages=[image_message,text_message])
if __name__ == "__main__":
app.run()
|
[
"[email protected]"
] | |
1f7122e1b41a9df89a492a703ee6bd51647f5f8a
|
5861ec37c27a8b5e62733ca13a5632047935b40e
|
/pollsystem/polls/migrations/0001_initial.py
|
fd0a40295a0fabb044c4967e53784ca3183d6625
|
[] |
no_license
|
radhikagupta6/YourPoll
|
27e64d5e8ca14a8cad68bf8e57b1842ba7339346
|
4f895fc0dacca190eaafc4174cd379f94246c764
|
refs/heads/master
| 2022-12-03T04:33:44.903447 | 2020-08-17T11:27:35 | 2020-08-17T11:27:35 | 273,569,662 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,074 |
py
|
# Generated by Django 3.0.3 on 2020-06-19 17:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('p_date', models.DateTimeField(verbose_name='publishing date')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question')),
],
),
]
|
[
"[email protected]"
] | |
fd35e0a0388b2ad0ef24ebb3c9bbe24eae376108
|
4a3c416d5a5dec2c5ec32b88be96acdff9438807
|
/BaiduAIP.py
|
f136b9f425141d849533611a65478b9f346dc8bc
|
[] |
no_license
|
Toototototo/xishui-alipay-getData
|
dfb936ed95097ab3dedebbdd543522728f3a0bb1
|
4e94bb991ead912238e3d6006bca018744d6dedc
|
refs/heads/master
| 2020-05-04T15:52:21.107177 | 2019-04-08T10:05:31 | 2019-04-08T10:05:31 | 179,259,343 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,330 |
py
|
# -*- coding: utf-8 -*-
from aip import AipOcr
""" 你的 APPID AK SK """
APP_ID = '15927274'
API_KEY = 'wutPhQANL3aOBuXbP8SnWyrP'
SECRET_KEY = '14snx1YG8TP8eGVQlWsteltFX6DGGebD'
class BaiduAIP(object):
def __init__(self):
self.client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
# 读取图片文件
# 返回二进制内容
def get_file_content(self, file_path):
with open(file_path, 'rb') as fp:
return fp.read()
# 调用百度AIP并解析接口返回数据
# 返回多个内容 => 重做
# 平均值小于0.8 => 重做
# 成功 => 返回code
def get_code(self):
# image = self.get_file_content(self.picture_path)
""" 如果有可选参数 """
options = {"language_type": "ENG", "detect_direction": "true", "detect_language": "true", "probability": "true"}
response = self.client.basicGeneralUrl(self.picture_path, options)
print(response, self.picture_path)
data = response
if isinstance(data['words_result'], list) and data['words_result'].__len__() == 1:
if data['words_result'][0]['probability']['average'] > 0.7: # 准确率达到0.7以上
return data['words_result'][0]['words']
else:
return 'do again'
else:
return 'do again'
|
[
"[email protected]"
] | |
c269ecdcc14dbc4334e23ae671f11b1ed330d2e2
|
2eb5d59b6e9a28e06b8124b6f51c85217629a24f
|
/pybo/views/base_views.py
|
06fca3e60a83941babf9a41197478667f23d62c9
|
[] |
no_license
|
lwjworld88/pybo
|
e44a38e8126c8d8a2494288f7bab8a13037ca46f
|
fbfd9d5a46fdea6ae9c6094e14799b1b7c3da048
|
refs/heads/master
| 2023-03-08T23:42:42.816755 | 2021-02-21T23:38:14 | 2021-02-21T23:38:14 | 341,356,173 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,903 |
py
|
from django.core.paginator import Paginator
from django.db.models import Q, Count
from django.shortcuts import render, get_object_or_404
from ..models import Question
def index(request):
"""
pybo 목록 출력
"""
# 입력 인자
page = request.GET.get('page', '1') # 페이지 처리. page 파라미터가 없는 URL을 위해 기본값 1 (e.g. localhost:8000/?page=1)
kw = request.GET.get('kw', '') # 검색어
so = request.GET.get('so', 'recent') # 정렬 기준
# 정렬
if so == 'recommend':
question_list = Question.objects.annotate(num_voter=Count('voter')).order_by('-num_voter', '-create_date')
elif so == 'popular':
question_list = Question.objects.annotate(num_answer=Count('answer')).order_by('-num_answer', '-create_date')
else: # recent
question_list = Question.objects.order_by('-create_date')
# 조회
# question_list = Question.objects.order_by('-create_date')
if kw:
question_list = question_list.filter(
Q(subject__icontains=kw) | # 제목 검색
Q(content__icontains=kw) | # 내용 검색
Q(author__username__icontains=kw) | # 질문 글쓴이 검색
Q(answer__author__username__icontains=kw) # 답변 글쓴이 검색
).distinct()
# 페이징 처리
paginator = Paginator(question_list, 10) # 페이지당 10개씩 보여주기
page_obj = paginator.get_page(page)
context = {'question_list': page_obj, 'page': page, 'kw': kw, 'so': so} # so가 추가됨
return render(request, 'pybo/question_list.html', context)
def detail(request, question_id):
"""
pybo 목록 출력
"""
question = get_object_or_404(Question, pk=question_id)
context = {'question': question}
return render(request, 'pybo/question_detail.html', context)
|
[
"[email protected]"
] | |
62fed4f8d716eb544aca34dbe492a0dfcc899225
|
4da57c6e9efb0a884449e019ce5c9b5d516d2bb1
|
/exp/kernel_benchmark/bin_clean/amarel_aggr_data.py
|
6d0a278193addea1d73a624d1f74908838af8828
|
[] |
no_license
|
radical-experiments/affinity_model
|
dc848fe1666b2f017d37ba041890462890eba9b5
|
fc67420a2278020eee770680fa7ccef76ed2dfa5
|
refs/heads/master
| 2021-04-06T16:56:26.847920 | 2018-09-25T03:15:47 | 2018-09-25T03:15:47 | 83,361,464 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,431 |
py
|
import os
import sys
import csv
from pprint import pprint
from files_dir_mod import *
def amarel_aggregate(src_path, dst_path):
for dirname in dirnames:
if not os.path.isdir(src_path+'/'+dirname):
print "{0} does not exist".format(dirname)
continue
dir_keywords = dirname.split('/')
pprint(dir_keywords)
machine = dir_keywords[1]
if machine != "amarel":
continue
dir_list = os.listdir(src_path+'/'+dirname)
if dir_list:
kernel = dir_keywords[0]
node_type = dir_keywords[2]
usage = dir_keywords[3]
for meas in measurements:
fd_out = open(dst_path+'/'+dirname+'/'+meas+'.csv', 'w')
writer = csv.writer(fd_out)
for session in dir_list:
with open(src_path+'/'+dirname+'/'+session+'/'+meas+'.csv') as fd_in:
reader = csv.reader(fd_in)
for row in reader:
cleaned_row = row
cleaned_row[0] = session + "__" + cleaned_row[0]
writer.writerow(cleaned_row)
fd_out.close()
pprint(dirname)
pprint(dir_list)
if __name__ == "__main__":
src_path = sys.argv[1]
dst_path = sys.argv[2]
amarel_aggregate(src_path, dst_path)
|
[
"[email protected]"
] | |
4a2b53bd3b55146afd68cccc08de400d3c6b2a95
|
d957aac7c4c73f0547c322bf7eae98b8ca57cf0e
|
/BaekJoon/BaekJoon_1152.py
|
ad635b521490363e520377ed8d2d60c68d928ba3
|
[] |
no_license
|
iamzero-j/PythonAlgorithm
|
babe7499cf5b8b80ce74b0b11075739a4d5ae00b
|
3591d0645768c6af5ace3af36f71167b0053c713
|
refs/heads/master
| 2023-03-02T23:04:25.627784 | 2021-02-16T06:47:41 | 2021-02-16T06:47:41 | 276,972,465 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 282 |
py
|
# 출처 : 백준 -> 문자열 : 1152번 https://www.acmicpc.net/problem/1152
#단어의 개수
#공백이 연속적으로 올 수 없음
#ins=input().split(" ") 사용시 아무것도 안 치고 출력 하였을 때 0 이 아닌 1이 나옴
ins=input().split()
print(len(ins))
|
[
"[email protected]"
] | |
a8be7e35f1526e8eda93504378fb00923e9b8a2d
|
a0072256bee8821b3fe63dfca6a6574f864cc899
|
/app/PyBotEngine/agreeabl.py
|
0183c7f5525963c539342c869a0713163165e1c6
|
[] |
no_license
|
sinsinpub/sin2gae
|
8a81e2c0e7f101119207f58394dee908deb2e2c2
|
2a7f2be0e73611289520127bf888ab8237047f9c
|
refs/heads/master
| 2020-06-01T07:29:05.003964 | 2010-12-11T23:05:49 | 2010-12-11T23:05:49 | 36,711,069 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,534 |
py
|
# This Python file uses the following encoding: utf-8
"""
This is agreeabl, a friendly little twitter bot
"""
import os
import urllib
import urllib2
import random
import datetime
import logging
import re
import types
import feedparser
import wsgiref.handlers
from dateutil.parser import parse
from google.appengine.ext.webapp import template
from google.appengine.ext import webapp, db
from google.appengine.api import urlfetch
from google.appengine.api import memcache
from django.utils import simplejson
class TwitterAccount(db.Model):
"""
Username and password for the Twitter account associated with the bot
"""
username = db.StringProperty()
password = db.StringProperty()
class Tracker(db.Model):
"""
Tracker simply stores the date time of the last successfully downloaded
message so we don't process messages twice.
"""
last_tweet = db.DateTimeProperty()
class ReplyMessage(db.Model):
"""
Messages that will be used to reply randomly.
"""
cond = db.StringProperty()
msg = db.StringProperty()
twitter_account = db.GqlQuery("SELECT * FROM TwitterAccount").get()
if twitter_account == None or twitter_account.username == '':
TwitterAccount(username='', password='').put()
raise Exception("Please set up you twitter credentials in your datastore")
else:
username = twitter_account.username
password = twitter_account.password
mentions_url = 'http://%s:%[email protected]/statuses/mentions.atom' % \
(username, password)
status_url = 'http://twitter.com/statuses/update.xml'
friend_url = 'http://twitter.com/friendships/create.xml'
is_friend_url = 'http://twitter.com/friendships/exists.json'
user_profile_url = 'http://twitter.com/users/show/%s.json' % username
user_timeline_url = 'http://twitter.com/statuses/user_timeline/%s.json' % \
username
msg_url = 'http://twitter.com/statuses/show/%s.json'
msg_list = db.GqlQuery("SELECT * FROM ReplyMessage").get()
if msg_list == None:
ReplyMessage(cond='', msg='').put()
msg_list = [
"%s that's what my mum always said and it's hard to argue with her.",
"%s I feel your pain...",
"%s you go girl!",
"%s you say the smartest stuff sometimes.",
"%s yeah, me too.",
"%s I get like that sometimes too.",
"%s good thinking!",
"%s that deserves a hug.",
"%s totally!",
"%s my feelings exactly!",
"%s that is very true",
"%s so true, so true...",
"%s you are so right...",
"%s couldn't agree more.",
"%s if only more people were as thoughtful as you.",
"%s yeah for sure",
"%s you know a tibetan monk once said the same thing to me and it \
always stuck in my mind.",
"%s those there are wise words. Wise words indeed.",
"%s if more people thought like you we wouldn't need laws. Or taxes. \
Or Conroy's clean feed.",
"%s yup like I said before - you just can't live without fresh fruit \
and clean water.",
"%s yeah - it really is the way things are going these days.",
"%s that sure sounds like fun"
]
class Index(webapp.RequestHandler):
"""
Render the homepage. This looks similar to a regular twitter homepage and
shows recent conversations the bot has had.
"""
def get(self):
""" default action for / URL requests """
user_profile = get_from_cache("user_profile", user_profile_url)
user_profile = dict2class(user_profile)
user_timeline = get_from_cache("user_timeline", user_timeline_url)
user_timeline_formated = []
i = 0
for entry in user_timeline:
entry = dict2class(entry)
entry.user = dict2class(entry.user)
entry.text = re.sub(r'(\A|\s)@(\w+)', \
r'\1@<a href="http://www.twitter.com/\2">\2</a>', entry.text)
entry.created_at = \
parse(entry.created_at).strftime("%I:%M%p %A, %d %B %Y")
if entry.user.screen_name == username and \
entry.in_reply_to_status_id != None:
try:
reply_msg = get_from_cache(str(entry.in_reply_to_status_id), \
msg_url % entry.in_reply_to_status_id)
user_timeline.insert(i + 1, dict2class(reply_msg))
except IOError:
broken_url = msg_url % entry.in_reply_to_status_id
logging.warn("Oops. Couldn't fetch " + broken_url)
user_timeline_formated.append(entry)
i += 1
template_values = {
"username": username,
"user_profile": user_profile,
"user_timeline": user_timeline_formated
}
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(template.render(path, template_values))
class Responder(webapp.RequestHandler):
"""
Fetch all mentions from the twitter API and generate responses.
"""
def get(self):
""" default action for /request URL requests """
tracker = db.GqlQuery("SELECT * FROM Tracker").get()
if tracker == None:
tracker = Tracker(last_tweet=datetime.datetime(1970, 1, 1, 0, 0, 0))
prev_last_tweet = tracker.last_tweet
mentions = feedparser.parse(mentions_url)
logging.debug(mentions)
for entry in mentions['entries']:
p = entry.published_parsed
pub_date = datetime.datetime(p[0], p[1], p[2], p[3], p[4], p[5])
if prev_last_tweet < pub_date:
# <title>User: @agreeabl geez I'd love some cookies</title>
author = entry.title.split(": ")[0]
tweet = entry.title.split(": ")[1]
logging.debug(tweet)
#<id>tag:twitter.com,200:http://twitter.com/User/statuses/1</id>
msg_id = entry.id.split('/')[5]
# load reply messages
msgEntries = db.GqlQuery("SELECT * FROM ReplyMessage").fetch(limit=100)
if msgEntries == None:
ReplyMessage(cond='', msg='').put()
msgList = msg_list
else:
msgList = []
for repMsg in msgEntries:
msgList.append(repMsg.msg.encode('utf_8'))
# choose and compile a message
selected_msg = random.choice(msgList)
msg = selected_msg % ('@' + author)
# only process if this is a directed to the bot
if tweet.split(' ')[0] == '@%s' % username:
if tracker.last_tweet < pub_date:
tracker.last_tweet = pub_date
tracker.put()
reply(msg, msg_id)
if is_friend(author) != 'true':
friend(author)
logging.info('old_last_tweet: %s; new_last_tweet: %s; \
pub_date: %s; msg_id: %s; author: %s; tweet: %s; msg: %s' % \
(prev_last_tweet, tracker.last_tweet, pub_date, msg_id, \
author, tweet, msg))
def reply(msg, msg_id):
""" Format a reply and post it to the Twitter API """
form_fields = {
"status": msg,
"in_reply_to_status_id": msg_id
}
form_data = urllib.urlencode(form_fields)
api_post(status_url, form_data)
def friend(author):
""" Make the bot follow someone """
form_fields = {
"screen_name": author
}
form_data = urllib.urlencode(form_fields)
api_post(friend_url, form_data)
def is_friend(author):
""" Check if the bot is following someone """
query_string = '?user_a=%s&user_b=%s' % (username, author)
return api_get(is_friend_url, query_string).read()
def api_get(url, query_string=""):
"""Make a GET request against the twitter API, handle authentication"""
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password("Twitter API", "http://twitter.com/", username, \
password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(urllib2.HTTPHandler, handler)
urllib2.install_opener(opener)
return urllib2.urlopen(url + query_string)
def api_post(url, form_data):
"""POST to the twitter API, handle authentication"""
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password("Twitter API", "http://twitter.com/", username, \
password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(urllib2.HTTPHandler, handler)
urllib2.install_opener(opener)
return urllib2.urlopen(url, form_data)
def get_from_cache(key, url, query_string="", timeout=120):
""" Grab a value from the cache, or go to the API if it's not found """
value = memcache.get(key)
if value is None:
value = simplejson.load(api_get(url, query_string))
memcache.add(key, value, timeout)
return value
def dict2class(dic):
"""Return a class that has same attributes/values as dict key/value"""
#see if it is indeed a dictionary
if type(dic) != types.DictType:
return dic
#define a dummy class
class Dummy:
pass
class_ = Dummy
for elem in dic.keys():
class_.__dict__[elem] = dic[elem]
return class_
class ProxyGet(webapp.RequestHandler):
def get(self):
targetUrl = self.request.get('url')
if targetUrl == '':
self.response.out.write('I\'m in position')
else:
targetUrl = 'http://' + targetUrl
result = urlfetch.fetch(url=targetUrl,method=urlfetch.GET,allow_truncated=True,follow_redirects=False)
self.response.out.write(result.content);
def main():
""" Handle requests, do CGI stuff """
debug = False
if os.environ['SERVER_NAME'] == 'localhost':
logging.getLogger().setLevel(logging.DEBUG)
debug = True
application = webapp.WSGIApplication(
[
('/', Index),
('/get', ProxyGet),
('/responder', Responder)
],
debug=debug
)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == "__main__":
main()
|
[
"sinsinpub@251f673c-c833-11de-8672-e51b9359e43c"
] |
sinsinpub@251f673c-c833-11de-8672-e51b9359e43c
|
fe616439df2cf983c744ea323919525c2e94cbb2
|
814fd0bea5bc063a4e34ebdd0a5597c9ff67532b
|
/chrome/common/extensions/docs/server2/refresh_tracker_test.py
|
f1f596f1afefe93317d8fa365571a158aa4abe97
|
[
"BSD-3-Clause"
] |
permissive
|
rzr/chromium-crosswalk
|
1b22208ff556d69c009ad292bc17dca3fe15c493
|
d391344809adf7b4f39764ac0e15c378169b805f
|
refs/heads/master
| 2021-01-21T09:11:07.316526 | 2015-02-16T11:52:21 | 2015-02-16T11:52:21 | 38,887,985 | 0 | 0 |
NOASSERTION
| 2019-08-07T21:59:20 | 2015-07-10T15:35:50 |
C++
|
UTF-8
|
Python
| false | false | 1,941 |
py
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from object_store_creator import ObjectStoreCreator
from refresh_tracker import RefreshTracker
class RefreshTrackerTest(unittest.TestCase):
def setUp(self):
self._refresh_tracker = RefreshTracker(ObjectStoreCreator.ForTest())
def testNonExistentRefreshIsIncomplete(self):
self.assertFalse(self._refresh_tracker.GetRefreshComplete('unicorns').Get())
def testEmptyRefreshIsComplete(self):
refresh_id = 'abcdefghijklmnopqrstuvwxyz'
self._refresh_tracker.StartRefresh(refresh_id, []).Get()
self.assertTrue(self._refresh_tracker.GetRefreshComplete(refresh_id).Get())
def testRefreshCompletion(self):
refresh_id = 'this is fun'
self._refresh_tracker.StartRefresh(refresh_id, ['/do/foo', '/do/bar']).Get()
self._refresh_tracker.MarkTaskComplete(refresh_id, '/do/foo').Get()
self.assertFalse(self._refresh_tracker.GetRefreshComplete(refresh_id).Get())
self._refresh_tracker.MarkTaskComplete(refresh_id, '/do/bar').Get()
self.assertTrue(self._refresh_tracker.GetRefreshComplete(refresh_id).Get())
def testUnknownTasksAreIrrelevant(self):
refresh_id = 'i am a banana'
self._refresh_tracker.StartRefresh(refresh_id, ['a', 'b', 'c', 'd']).Get()
self._refresh_tracker.MarkTaskComplete(refresh_id, 'a').Get()
self._refresh_tracker.MarkTaskComplete(refresh_id, 'b').Get()
self._refresh_tracker.MarkTaskComplete(refresh_id, 'c').Get()
self._refresh_tracker.MarkTaskComplete(refresh_id, 'q').Get()
self.assertFalse(self._refresh_tracker.GetRefreshComplete(refresh_id).Get())
self._refresh_tracker.MarkTaskComplete(refresh_id, 'd').Get()
self.assertTrue(self._refresh_tracker.GetRefreshComplete(refresh_id).Get())
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
b2b78ff4ca6a2e23d745096515ed1a5158c35be8
|
309cfe85fcf96531fe7ea745db462b6fe1f837be
|
/River3/python/RiverUtils.py
|
0b3a680d44ec7b58039b230a93e2400825f68d5d
|
[] |
no_license
|
NWU-NISL-BugDetection/river
|
60c6cabc9f3fbf51b427c2af9fe997a384f0f8e4
|
d33042536b66fa0fa68c451e0c22504fc35d81b5
|
refs/heads/master
| 2023-07-24T06:01:40.082441 | 2021-09-01T06:24:35 | 2021-09-01T06:24:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,663 |
py
|
from typing import List, Dict
import heapq
import numpy as np
from triton import TritonContext
import logging
import argparse
from triton import TritonContext, ARCH, Instruction, MemoryAccess, CPUSIZE, MODE
# Put the last bytes as fake sentinel inputs to promote some usages detection outside buffer
SENTINEL_SIZE = 4
def parseArgs():
# Construct the argument parser
ap = argparse.ArgumentParser()
# Add the arguments to the parser
ap.add_argument("-bp", "--binaryPath", required=True,
help="the test binary location")
ap.add_argument("-entryfuncName", "--entryfuncName", required=False, default="RIVERTestOneInput",
help="the name of the entry function you want to start the test from. By default the function name is 'RIVERTestOneInput'!", type=str)
ap.add_argument("-arch", "--architecture", required=True,
help="architecture of the executable: ARM32, ARM64, X86, X64 are supported")
ap.add_argument("-max", "--maxLen", required=True,
help="maximum size of input length", type=int)
ap.add_argument("-targetAddress", "--targetAddress", required=False, default=None,
help="the target address that your program is trying to reach", type=str)
ap.add_argument("-logLevel", "--logLevel", required=False, default='CRITICAL',
help="set the log level threshold, see the Python logging module documentation for the list of levels. Set it to DEBUG to see everything!", type=str)
ap.add_argument("-secondsBetweenStats", "--secondsBetweenStats", required=False, default='10',
help="the interval (in seconds) between showing new stats", type=int)
ap.add_argument("-outputType", "--outputType", required=False, default='textual',
help="the output interface type, can be visual or textual", type=str)
ap.add_argument("-isTraining", "--isTraining", required=False, default=0,
help="set it to 1 if using an untrained model or to 0 if using a saved model", type=int)
ap.add_argument("-pathToModel", "--pathToModel", required=False, default=None,
help="path to the model to use", type=str)
ap.add_argument("-stateful", "--stateful", required=False, default=False,
help="Either if stateful or stateless (default)", type=str)
ap.add_argument("-outputEndpoint", "--outputEndpoint", required=False, default=None,
help="the HTTP endpoint where test execution data will be sent")
#ap.add_argument("-defaultObsParams", "--defaultObsParams", required=False, default=False,
# help="Default Observation parameters - should be a binary string mapping in order the values from default self.observation_space", type=str)
args = ap.parse_args()
loggingLevel = logging._nameToLevel[args.logLevel]
logging.basicConfig(level=loggingLevel) # filename='example.log', # Set DEBUG or INFO if you want to see more
SECONDS_BETWEEN_STATS = args.secondsBetweenStats
args.targetAddress = None if args.targetAddress is None else int(args.targetAddress, 16)
#assert len(args.defaultObsParams) != 4 # There are 4 default obs types
args.obs_map = 0 #int(args.defaultObsParams[0])
args.obs_path = 0 #int(args.defaultObsParams[1])
args.obs_path_stats = 1 # int(args.defaultObsParams[2])
args.obs_embedding = 0 #int(args.defaultObsParams[3])
# Set the architecture
if args.architecture == "ARM32":
args.architecture = ARCH.ARM32
elif args.architecture == "ARM64":
args.achitecture = ARCH.X86_64
elif args.architecture == "x86":
args.architecture = ARCH.X86
elif args.architecture == "x64":
args.architecture = ARCH.X86_64
else:
assert False, "This architecture is not implemented"
raise NotImplementedError
Input.MAX_LEN = args.maxLen
return args
class ActionFunctors:
# Change a byte at a given index with a new value
@staticmethod
def ChangeByte(params): # inputInstance is Input type
inputInstance = params['inputInstance']
currentInputLen = len(inputInstance.buffer)
if currentInputLen == 0:
return False
indexToChange = params['index'] if 'index' in params else None # Index where to do the change
valueToChange = params['value'] if 'value' in params else None# value to change with, can be none and a random will be added there
if valueToChange is None:
valueToChange = np.random.choice(256)
if indexToChange is None:
indexToChange = np.random.choice(len(inputInstance.buffer))
inputInstance.buffer[indexToChange] = valueToChange
return True
# Erase one or more bytes from a given position
@staticmethod
def EraseBytes(params):
inputInstance = params['inputInstance']
currentInputLen = len(inputInstance.buffer)
if currentInputLen == 0:
return False
indexToStartChange = params['index'] if 'index' in params else None # Index where to do the change
maxLenToDelete = params['maxLen'] if 'maxLen' in params else None
inputInstance = params['inputInstance']
if maxLenToDelete is None: # Randomize a percent from the buffer len
randomPercent = np.random.randint(low=2, high=10)
randomNumItems = float(randomPercent) / len(inputInstance.buffer)
maxLenToDelete = int(max(randomNumItems, 2))
if indexToStartChange is None:
indexToStartChange = np.random.choice(len(inputInstance.buffer))
assert isinstance(inputInstance.buffer, Dict) == False, "Dict kind of buffer not supported for now!"
inputInstance.buffer[indexToStartChange : (indexToStartChange+maxLenToDelete)] = []
return True
# Insert one or more bytes at a given position
@staticmethod
def InsertRandomBytes(params):
index = params['index'] if 'index' in params else None # Index where to do the change
bytesCountToAdd = params['count'] if 'count' in params else None # how many bytes to add
inputInstance = params['inputInstance']
assert isinstance(inputInstance.buffer, Dict) == False, "Dict kind of buffer not supported for now!"
currentInputLen = len(inputInstance.buffer)
if bytesCountToAdd is None: # Randomize a percent from the buffer len
randomPercent = float(np.random.rand() * 1.0) # A maximum of 1 percent to add
randomNumItems = float(randomPercent / 100.0) * len(inputInstance.buffer)
bytesCountToAdd = int(max(randomNumItems, np.random.randint(low=1, high=10)))
if index is None:
index = np.random.choice(currentInputLen) if currentInputLen > 0 else None
oldBuffer = inputInstance.buffer
bytesToAdd = list(np.random.choice(256, bytesCountToAdd))
bytesToAdd = [x.item() for x in bytesToAdd]
#print(f"Adding {len(bytesToAdd)} bytes")
if index is not None:
inputInstance.buffer = oldBuffer[:index] + bytesToAdd + oldBuffer[index:]
else:
inputInstance.buffer = bytesToAdd
inputInstance.checkTrimSize()
return True
# See below to check the significance of params
@staticmethod
def AddDictionaryWord(params):
index = params['index'] if 'index' in params else None # Index where to do the change
override = params['isOverride'] if 'isOverride' in params else False # if it should override or just add
inputInstance = params['inputInstance']
assert isinstance(inputInstance.buffer, Dict) == False, "Dict kind of buffer not supported for now!"
wordToAdd = params['fixedWord'] if 'fixedWord' in params else None # If NONE, a random word from dictionary will be added,
assert wordToAdd is None or isinstance(wordToAdd, list), "this should be a list of bytes !"
currentInputLen = len(inputInstance.buffer)
if index is None:
index = np.random.choice(currentInputLen) if currentInputLen > 0 else 0
if wordToAdd is None:
if len(inputInstance.tokensDictionary) == 0:
return False
wordToAdd = np.random.choice(inputInstance.tokensDictionary)
wordToAdd_len = len(wordToAdd)
assert wordToAdd_len > 0
if override is False:
oldBuffer = inputInstance.buffer
inputInstance.buffer = oldBuffer[:index] + list(wordToAdd) + oldBuffer[index:]
else:
inputInstance.buffer[index : (index+wordToAdd_len)] = wordToAdd
inputInstance.checkTrimSize()
return True
# Data structures to hold inputs
# Currently we keep the input as a dictionary mapping from byte indices to values.
# The motivation for this now is that many times the input are large but only small parts from them are changing...
# usePlainBuffer = true if the input is not sparse, to represent the input indices as an array rather than a full vector
class Input:
def __init__(self, buffer : Dict[int, any] = None, bound = None , priority = None, usePlainBuffer=False):
self.buffer = buffer
self.bound = bound
self.priority = priority
self.usePlainBuffer = False
def __lt__(self, other):
return self.priority > other.priority
def __str__(self):
maxKeysToShow = 10
keysToShow = sorted(self.buffer)[:maxKeysToShow]
valuesStrToShow = ' '.join(str(self.buffer[k]) for k in keysToShow)
strRes = (f"({valuesStrToShow}..bound: {self.bound}, priority: {self.priority})")
return strRes
# Apply the changes to the buffer, as given in the dictionary mapping from byte index to the new value
def applyChanges(self, changes : Dict[int, any]):
if not self.usePlainBuffer:
self.buffer.update(changes)
else:
for byteIndex,value in changes.items():
self.buffer[byteIndex] = value
# This is used to apply one of the registered actions.
# Don't forget that client user have full control and append statically the default set of actions
# actionContext is defined as a set of parameters needed for the functor of the specific action
# Returns True if the action could be applied, false otherwise
def applyAction(self, actionIndex : int, actionContext : any):
functorForAction = Input.actionFunctors.get(actionIndex)
assert functorForAction, f"The requested action {actionIndex} is not availble in the actions set !"
res = functorForAction(actionContext)
self.sanityCheck()
return res
# Static functors to apply action over the existing input
# TODO: implement all others from https://arxiv.org/pdf/1807.07490.pdf
# This is extensible by client using the below functions:
actionFunctors = {0: ActionFunctors.ChangeByte,
1: ActionFunctors.EraseBytes,
2: ActionFunctors.InsertRandomBytes,
3: ActionFunctors.AddDictionaryWord}
tokensDictionary = []
NO_ACTION_INDEX = -1
MAX_LEN = None # Will be set by user parameters
def sanityCheck(self):
#print(len(self.buffer))
#return
# Check 1: is input size in the desired range ?
assert len(self.buffer) <= Input.MAX_LEN, f"Input obtained is bigger than the maximum length !! Max size set in params was {Input.MAX_LEN} while buffer has currently size {len(self.buffer)}"
# Trim if too big
def checkTrimSize(self):
if len(self.buffer) > Input.MAX_LEN:
self.buffer = self.buffer[:Input.MAX_LEN]
@staticmethod
def getNumActionFunctors():
return max(Input.actionFunctors.keys())
# Register new action functor other than the default ones
# Returns back the index of the registered action so you know what to ask for when you want to use applyAction
# actionContext is actually the variables that you pass to your functor
@staticmethod
def registerNewActionFunctor(newActionFunctor):
newIndex = Input.getNumActionFunctors() + 1
Input.actionFunctors[newIndex] = newActionFunctor
# Sets the tokens dictionary for the current problem.
@staticmethod
def setTokensDictionary(tokensDictionary):
Input.tokensDictionary = tokensDictionary
# This is used for the contextual bandits problem
class InputRLGenerational(Input):
def __init__(self):
Input.__init__(self)
self.buffer_parent = None # The input of the parent that generated the below PC
self.priority = -1 # The estimated priority for the state
self.stateEmbedding = None
self.PC = None # The parent path constraint that generated the parent input
self.BBPathInParentPC = None # The same as above but simplified, basically the path of basic blocks obtained by running buffer_parent
self.constraint = None # The constraint needed (SMT) to give to solve to change the PC using action and produce the new input for this structure
self.action = -1 # The action to take (which of the self.PC branches should we modify)
# A priority queue data structure for holding inputs by their priority
class InputsWorklist:
def __init__(self):
self.internalHeap = []
def extractInput(self):
if self.internalHeap:
next_item = heapq.heappop(self.internalHeap)
return next_item
else:
return None
def addInput(self, inp: Input):
heapq.heappush(self.internalHeap, inp)
def __str__(self):
str = f"[{' ; '.join(inpStr.__str__() for inpStr in self.internalHeap)}]"
return str
def __len__(self):
return len(self.internalHeap)
# An example how to use the inputs worklist
def example_InputsWorkList():
worklist = InputsWorklist()
worklist.addInput(Input("aa", 0, 10))
worklist.addInput(Input("bb", 1, 20))
worklist.addInput(Input('cc', 2, 30))
print(worklist)
# Process the list of inputs to convert to bytes if the input was in a string format
def processSeedDict(seedsDict : List[any]):
for idx, oldVal in enumerate(seedsDict):
if isinstance(oldVal, str):
seedsDict[idx] = str.encode(oldVal)
#print(seedsDict)
def riverExp():
import gym
from gym import spaces
obs = {'inputBuffer' : spaces.Box(0, 255, shape=(4096, )),
'inputLen' : spaces.Discrete(4096)}
x = obs['inputLen'].sample()
print(obs['inputLen'].n)
if __name__ == "__main__":
riverExp()
|
[
"[email protected]"
] | |
b9a387605d577d71f54a61961bb4e49480104471
|
0180b1a8e19c0a02e7c00ebe1a58e17347ad1996
|
/BCR2000/consts.py
|
a1a23805ec9ecae2ff31a2bf1a642c416c9ebe69
|
[] |
no_license
|
cce/buttons
|
e486af364c6032b4be75ab9de26f42b8d882c5b0
|
7d4936c91df99f4c6e08f7e347de64361c75e652
|
refs/heads/master
| 2021-01-17T06:56:55.859306 | 2014-12-22T05:03:00 | 2015-11-25T03:42:28 | 46,657,841 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,666 |
py
|
# Embedded file name: /Users/versonator/Jenkins/live/Binary/Core_Release_64_static/midi-remote-scripts/BCR2000/consts.py
""" The following consts should be substituted with the Sys Ex messages for requesting
a controller's ID response and that response to allow for automatic lookup"""
ID_REQUEST = 0
ID_RESP = 0
GENERIC_STOP = 105
GENERIC_PLAY = 106
GENERIC_REC = 107
GENERIC_LOOP = 108
GENERIC_RWD = -1
GENERIC_FFWD = -1
GENERIC_TRANSPORT = (GENERIC_STOP,
GENERIC_PLAY,
GENERIC_REC,
GENERIC_LOOP,
GENERIC_RWD,
GENERIC_FFWD)
GENERIC_ENC1 = 1
GENERIC_ENC2 = 2
GENERIC_ENC3 = 3
GENERIC_ENC4 = 4
GENERIC_ENC5 = 5
GENERIC_ENC6 = 6
GENERIC_ENC7 = 7
GENERIC_ENC8 = 8
GENERIC_ENCODERS = (GENERIC_ENC1,
GENERIC_ENC2,
GENERIC_ENC3,
GENERIC_ENC4,
GENERIC_ENC5,
GENERIC_ENC6,
GENERIC_ENC7,
GENERIC_ENC8)
GENERIC_SLI1 = 81
GENERIC_SLI2 = 82
GENERIC_SLI3 = 83
GENERIC_SLI4 = 84
GENERIC_SLI5 = 85
GENERIC_SLI6 = 86
GENERIC_SLI7 = 87
GENERIC_SLI8 = 88
GENERIC_SLIDERS = (GENERIC_SLI1,
GENERIC_SLI2,
GENERIC_SLI3,
GENERIC_SLI4,
GENERIC_SLI5,
GENERIC_SLI6,
GENERIC_SLI7,
GENERIC_SLI8)
GENERIC_BUT1 = 73
GENERIC_BUT2 = 74
GENERIC_BUT3 = 75
GENERIC_BUT4 = 76
GENERIC_BUT5 = 77
GENERIC_BUT6 = 78
GENERIC_BUT7 = 79
GENERIC_BUT8 = 80
GENERIC_BUT9 = -1
GENERIC_BUTTONS = (GENERIC_BUT1,
GENERIC_BUT2,
GENERIC_BUT3,
GENERIC_BUT4,
GENERIC_BUT5,
GENERIC_BUT6,
GENERIC_BUT7,
GENERIC_BUT8)
GENERIC_PAD1 = 65
GENERIC_PAD2 = 66
GENERIC_PAD3 = 67
GENERIC_PAD4 = 68
GENERIC_PAD5 = 69
GENERIC_PAD6 = 70
GENERIC_PAD7 = 71
GENERIC_PAD8 = 72
GENERIC_PADS = (GENERIC_PAD1,
GENERIC_PAD2,
GENERIC_PAD3,
GENERIC_PAD4,
GENERIC_PAD5,
GENERIC_PAD6,
GENERIC_PAD7,
GENERIC_PAD8)
|
[
"[email protected]"
] | |
71746fea62c81bca85cdda17b939d1bf146287de
|
a0501ee2c6ea376beb4e1e5e9e656f7bebb7d8c5
|
/problema16.py
|
db816e9cdfb1024fae2653f366033ad8a7dcbafe
|
[] |
no_license
|
jose-brenis-lanegra/T09_Brenis.Niquen
|
0e988c1ae8c89fe7f9cf92010297193e376fc233
|
21f292120244b33496d71dcefeb40c6c8a5b4490
|
refs/heads/master
| 2020-11-25T15:51:01.886824 | 2019-12-18T03:37:34 | 2019-12-18T03:37:34 | 228,745,651 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 169 |
py
|
import libreria
import os
#hallar la volumen de un tetraedro
a=int(os.sys.argv[1])
volumen=libreria.volumen_teraedro(a)
print("el volumen del tetraedro es:", volumen)
|
[
"[email protected]"
] | |
2d05ab4221727d108dcd0eff6b2f0fdfe47b2821
|
57449ec58fd3919d3db1434cf34def46fb17d723
|
/modules/button.py
|
7edc8de5bff8126316676a7f8cd326b315a09f0c
|
[] |
no_license
|
sanetro/clicker
|
ad0ebc8a21219f107c177e6332bf842f4d6ad8ef
|
3cc193990d785099e5c8321c139851d959b59f95
|
refs/heads/master
| 2023-06-24T09:55:43.367197 | 2021-02-28T14:55:25 | 2021-02-28T14:55:25 | 341,620,767 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,123 |
py
|
import pygame
pygame.init()
class BTN():
def __init__(self, color, x, y, width, height, text=''):
self.color = color
self.x = x
self.y = y
self.width = width
self.height = height
self.text = text
def draw(self,win,outline=None):
#Call this method to draw the BTN on the screen
if outline:
pygame.draw.rect(win, outline, (self.x-2,self.y-2,self.width+4,self.height+4),0)
pygame.draw.rect(win, self.color, (self.x,self.y,self.width,self.height),0)
if self.text != '':
font = pygame.font.SysFont('chicago', 60)
text = font.render(self.text, 1, (0,0,0))
win.blit(text, (self.x + (self.width/2 - text.get_width()/2), self.y + (self.height/2 - text.get_height()/2)))
def isOver(self, pos):
#Pos is the mouse position or a tuple of (x, y) coordinates
if pos[0] > self.x and pos[0] < self.x + self.width:
if pos[1] > self.y and pos[1] < self.y + self.height:
return True
return False
|
[
"[email protected]"
] | |
258c010789b5fbc488913f426f4a2959a0571c81
|
485f2e16739c95cfac04849cd0b5416a25a599e1
|
/PowerAnalyzer.py
|
cd7f315fd9343bbd5adec73cd980a20421999188
|
[
"MIT"
] |
permissive
|
grossamos/StraenWeb
|
e6a07d54eb778ff8c1876eb86f7e199ec0472123
|
887c2a2252bead4bdff66e172586a837ef7e8bff
|
refs/heads/master
| 2022-04-24T01:24:26.611760 | 2020-04-11T21:20:37 | 2020-04-11T21:20:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,698 |
py
|
# Copyright 2018 Michael J Simms
"""Performs calculations on power data."""
import inspect
import os
import sys
import DataMgr
import FtpCalculator
import Keys
import SensorAnalyzer
import Units
# Locate and load the statistics module (the functions we're using in are made obsolete in Python 3, but we want to work in Python 2, also)
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
libmathdir = os.path.join(currentdir, 'LibMath', 'python')
sys.path.insert(0, libmathdir)
import statistics
class PowerAnalyzer(SensorAnalyzer.SensorAnalyzer):
"""Class for performing calculations on power data."""
def __init__(self, activity_type, activity_user_id, data_mgr):
SensorAnalyzer.SensorAnalyzer.__init__(self, Keys.APP_POWER_KEY, Units.get_power_units_str(), activity_type)
self.data_mgr = data_mgr
self.np_buf = []
self.current_30_sec_buf = []
self.current_30_sec_buf_start_time = 0
self.activity_user_id = activity_user_id
def do_power_record_check(self, record_name, watts):
"""Looks up the existing record and, if necessary, updates it."""
old_value = self.get_best_time(record_name)
if old_value is None or watts > old_value:
self.bests[record_name] = watts
def append_sensor_value(self, date_time, value):
"""Adds another reading to the analyzer."""
SensorAnalyzer.SensorAnalyzer.append_sensor_value(self, date_time, value)
sum_of_readings = 0
num_readings = 0
duration = self.end_time - self.start_time
# Update the buffers needed for the normalized power calculation.
if date_time - self.current_30_sec_buf_start_time > 30000:
if len(self.current_30_sec_buf) > 0:
self.np_buf.append(statistics.mean(self.current_30_sec_buf))
self.current_30_sec_buf = []
self.current_30_sec_buf_start_time = date_time
self.current_30_sec_buf.append(value)
# Search for best efforts.
for reading in reversed(self.readings):
reading_time = reading[0]
sum_of_readings = sum_of_readings + reading[1]
num_readings = num_readings + 1
curr_time_diff = (self.end_time - reading_time) / 1000
if curr_time_diff == 5:
average_power = sum_of_readings / num_readings
self.do_power_record_check(Keys.BEST_5_SEC_POWER, average_power)
if duration < 720:
return
elif curr_time_diff == 720:
average_power = sum_of_readings / num_readings
self.do_power_record_check(Keys.BEST_12_MIN_POWER, average_power)
if duration < 1200:
return
elif curr_time_diff == 1200:
average_power = sum_of_readings / num_readings
self.do_power_record_check(Keys.BEST_20_MIN_POWER, average_power)
if duration < 3600:
return
elif curr_time_diff == 3600:
average_power = sum_of_readings / num_readings
self.do_power_record_check(Keys.BEST_1_HOUR_POWER, average_power)
elif curr_time_diff > 3600:
return
def analyze(self):
"""Called when all sensor readings have been processed."""
results = SensorAnalyzer.SensorAnalyzer.analyze(self)
if len(self.readings) > 0:
results[Keys.MAX_POWER] = self.max
results[Keys.AVG_POWER] = self.avg
#
# Compute normalized power.
#
if len(self.np_buf) > 1:
# Throw away the first 30 second average.
self.np_buf.pop(0)
# Needs this for the variability index calculation.
ap = statistics.mean(self.np_buf)
# Raise all items to the fourth power.
for idx, item in enumerate(self.np_buf):
item = pow(item, 4)
self.np_buf[idx] = item
# Average the values that were raised to the fourth.
ap2 = statistics.mean(self.np_buf)
# Take the fourth root.
np = pow(ap2, 0.25)
results[Keys.NORMALIZED_POWER] = np
# Compute the variability index (VI = NP / AP).
vi = np / ap
results[Keys.VARIABILITY_INDEX] = vi
# Additional calculations if we have the user's FTP.
if self.activity_user_id and self.data_mgr:
# Get the user's FTP.
ftp = self.data_mgr.retrieve_user_estimated_ftp(self.activity_user_id)
if ftp is not None:
# Compute the intensity factor (IF = NP / FTP).
intfac = np / ftp[0]
results[Keys.INTENSITY_FACTOR] = intfac
# Compute the training stress score (TSS = (t * NP * IF) / (FTP * 36)).
t = (self.end_time - self.start_time) / 1000.0
tss = (t * np * intfac) / (ftp[0] * 36)
results[Keys.TSS] = tss
#
# Compute the threshold power from this workout. Maybe we have a new estimated FTP?
#
ftp_calc = FtpCalculator.FtpCalculator()
ftp_calc.add_activity_data(self.activity_type, self.start_time, self.bests)
estimated_ftp = ftp_calc.estimate()
if estimated_ftp:
results[Keys.THRESHOLD_POWER] = estimated_ftp
return results
|
[
"[email protected]"
] | |
08ae130b1a21fc57296695f30b8ad957650fbdce
|
48dd623ed7a73db4a341846c8e3db005eabcd9a9
|
/mssqltest.py
|
ac93d1902f1ba343f0e8cbe7d5b0c2ce0ae03219
|
[] |
no_license
|
HarishPvhb/DMS
|
25530e0d2bc18a384eebdcda27206fc5a684665a
|
e9684a16d733e9d5b631127af32b9bdc0674077c
|
refs/heads/master
| 2022-12-28T22:59:03.623671 | 2020-10-14T15:03:09 | 2020-10-14T15:03:09 | 303,298,307 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 846 |
py
|
import pyodbc
import binascii
import struct
host = 'td1a4dmcslanqnr.c9mc5tzceqtj.us-west-2.rds.amazonaws.com'
port = '1433'
user = 'root'
password = 'baffle123'
database = 'MSSQL_LargeData'
schema = 'New'
table = 'LargeData'
column = 'col_time'
query = "select {} from {}.{}.{};".format(column, database,schema,table)
connection = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+host+';DATABASE='+database+';UID='+user+';PWD='+password)
cursor = connection.cursor()
cursor.execute(query)
data = cursor.fetchall()
print(data[0])
print(type(data[0]))
'''with open('mssample.txt', 'w') as f:
for item in data:
if type(item[0]) is memoryview:
item = binascii.hexlify(item[0])
f.write("%s\n" % str(item[0]))
else:
f.write("%s\n" % str(item[0]))'''
|
[
"[email protected]"
] | |
70d7f3dbd44a17cd86c110457bf873c417e8deca
|
8f19107b3fb4dae9114f6aec3ed448665742d87d
|
/labs/Topic04-flow/lab04.04-student.py
|
8981447e10afe7e1547747d8961999ad9167a308
|
[] |
no_license
|
Munster2020/HDIP_CSDA
|
a5e7f8f3fd3f3d4b92a2c9a32915d8152af87de6
|
ab21b21849e4efa0a26144fcbe877f6fb6e17a2f
|
refs/heads/master
| 2020-12-14T13:24:14.961945 | 2020-03-25T21:09:13 | 2020-03-25T21:09:13 | 234,757,410 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 635 |
py
|
# A Program that reads in students
# until the user enters a blank
# and then prints them all out again
students = []
firstname = input("enter firstname (blank to quit): ").strip()
while firstname != "":
student = {}
student["firstname"] = firstname
lastname = input("enter lastname: ").strip()
student["lastname"] = lastname
students.append(student)
# next student
firstname = input("enter firstname of next (blank to quit): ").strip()
print("here are the students you entered:")
for currentStudent in students:
print("{}{}".format(currentStudent["firstname"],
currentStudent["lastname"]))
|
[
"[email protected]"
] | |
a6f6eba4196833521a55e8c5bcb36c53233d461b
|
470334fb1ca97ee1da3889517e4f38bec66944bb
|
/username/username/wsgi.py
|
5910739352c7649419f57ad514d387de2002bc41
|
[] |
no_license
|
Bhavyareddy/username3
|
40363db4e4c3e522898224a893d9285a0102842a
|
b6e9df81c7082d982f531760a76bfbe59af9b58b
|
refs/heads/master
| 2020-05-30T11:53:47.641907 | 2016-08-10T21:55:59 | 2016-08-10T21:55:59 | 65,456,695 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 393 |
py
|
"""
WSGI config for username project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "username.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
f926af277ef295431c9bbba87a470f29effed838
|
eb8953a78be45c953bb30fd3ac9a4cb6b68a23b8
|
/Week_01/remove-duplicates-from-sorted-array.py
|
bd33741ffabab7541fa27cce6c8601661691b4ad
|
[] |
no_license
|
good-time/algorithm012
|
37bd620fd9d2b484c7c72f030d7732ea8b29f4a4
|
ef03f220f3590a4c33f2ea8db91b69170f6d7f90
|
refs/heads/master
| 2022-12-18T18:22:48.580273 | 2020-09-27T04:36:51 | 2020-09-27T04:36:51 | 279,063,821 | 0 | 0 | null | 2020-07-12T12:52:12 | 2020-07-12T12:52:11 | null |
UTF-8
|
Python
| false | false | 271 |
py
|
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
if len(nums) == 0: return 0
i = 0
for j in range(1, len(nums)):
if nums[i] != nums[j]:
i += 1
nums[i] = nums[j]
return i+1
|
[
"[email protected]"
] | |
ed317eaf3d927e878cd3e726402cd5e2619d3ece
|
654926e03dd260c45e0d59e408283a4305a1bf0e
|
/Train.py
|
7f81c7a908b56dc25fa613c279ab6acd506b55a6
|
[] |
no_license
|
vedika19/Recommendation-System
|
e90bae3c6f0ddb1b6c871035c36e4e875d66a6cf
|
ddf00fa5bc1104ccb800840cb607248131256c79
|
refs/heads/master
| 2020-12-24T12:59:26.567930 | 2016-09-21T13:48:37 | 2016-09-21T13:48:37 | 68,821,271 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,397 |
py
|
import psycopg2
import sys
import pprint
import math
'''RecommendationSystem(uid,mid,rating)
m_id=str(mid)
query = ("SELECT movie1,movie2,similarity FROM u1similarity WHERE movie1= %s OR movie2= %s ;")
data = (m_id,m_id)
conn_string = "host='localhost' dbname='postgres' user='postgres' password=''"
conn2 = psycopg2.connect(conn_string)
cursor2= conn2.cursor()
cursor2.execute(query,data)
records2=cursor2.fetchall()
print records2'''
'''
conn_string = "host='localhost' dbname='postgres' user='postgres' password=''"
conn = psycopg2.connect(conn_string)
cursor= conn.cursor()
query = "SELECT user_id,movie_id,rating FROM u1test where p_rating is NULL;"
cursor.execute(query)
records = cursor.fetchall()
for i in records:
print i
common=[]
uid=str(i[0])
print uid
print i
common={}
query = ("SELECT user_id,movie_id,rating FROM u1base WHERE user_id = %s ;")
data = [uid]
conn1 = psycopg2.connect(conn_string)
cursor1= conn1.cursor()
cursor1.execute(query,data)
records1=cursor1.fetchall()
#print records1
if len(records1)<4:
print 'Cold Start'
#Cold Start()
else :
print 'Recommendation System'
mid=str(i[1])
query = ("SELECT movie1, movie2, similarity FROM u1similarity where (movie1=%s OR movie2=%s) ORDER BY similarity desc LIMIT 500 ;")
data = (mid,mid)
conn_string = "host='localhost' dbname='postgres' user='postgres' password=''"
conn2 = psycopg2.connect(conn_string)
cursor2= conn2.cursor()
cursor2.execute(query,data)
records2=cursor2.fetchall()
print records2
for re in records2:
#print re[1],i[1]
if re[0]==i[1]:
for rec1 in records1:
#print rec1[1],re[1],rec1[1]==re[1]
if rec1[1]==re[1]:
common[re[1]]=rec1[2],re[2]
else:
for rec1 in records1:
if re[0] ==rec1[1]:
common[re[0]]=rec1[2],re[2]
for k,v in common.iteritems():
print k,v
cursor1.close()
cursor2.close()
predicted=0
num=0
den=0
similarity_p=0
for k,v in common.iteritems():
num=num+v[0]*v[1]
den=den+v[1]
if den == 0:
similarity_p=0
else:
similarity_p=num/den
print similarity_p
sp=str(similarity_p)
i0=str(i[0])
i1=str(i[1])
print sp,i0,i1
query = ("UPDATE u1test SET (p_rating) = (%s) where (user_id) = (%s) AND (movie_id)= (%s) ;")
data = (sp,i0,i1)
conn_string = "host='localhost' dbname='postgres' user='postgres' password=''"
conn = psycopg2.connect(conn_string)
cursor1= conn.cursor()
cursor1.execute(query,data)
conn.commit()
# Calculating RMSE
rmse=0
conn_string = "host='localhost' dbname='postgres' user='postgres' password=''"
conn = psycopg2.connect(conn_string)
cursor= conn.cursor()
query = "SELECT rmse FROM u1test "
cursor.execute(query)
records = cursor.fetchall()
for i in records:
rmse=rmse+i[0]
rmse=rmse/len(records)
rmse=math.sqrt(rmse)
print rmse'''
print"THE TOP 50 RECOMMENDED MOVIES"
conn_string = "host='localhost' dbname='postgres' user='postgres' password=''"
conn = psycopg2.connect(conn_string)
cursor= conn.cursor()
query = "SELECT * FROM recommendation order by p_rating desc LIMIT 50"
cursor.execute(query)
records = cursor.fetchall()
for i in records:
cursor2= conn.cursor()
md=str(i[1])
query2 = "SELECT movie_title FROM movie where movie_id = %s ;"
data2=[md]
cursor2.execute(query2,data2)
records1 = cursor2.fetchall()
for j in records1:
print md ,j[0]
|
[
"[email protected]"
] | |
0255e46bd31fd1ecc2393fdf7322e84db39abf47
|
97e60d0ca572d0dc3fc80f8719cd57a707ab6069
|
/bias_zebra_print/stock.py
|
dd94d374c932338a87ab830754b76fb7b1fe5b94
|
[] |
no_license
|
josepato/bias_trunk_v6
|
0c7c86493c88f015c049a139360478cabec7f698
|
b6ab6fc2ff3dc832f26effdba421bcc76d5cabac
|
refs/heads/master
| 2020-06-12T14:18:31.101513 | 2016-12-15T22:55:54 | 2016-12-15T22:55:54 | 75,803,957 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,033 |
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
#Bias Product / PriceList
#
from osv import osv
from osv import fields
import time
import netsvc
#----------------------------------------------------------
# Price List
#----------------------------------------------------------
OLDPRINTSTR = """XA~TA000~JSN^LT0^MNW^MTT^PON^PMN^LH0,0^JMA^PR4,4^MD0^JUS^LRN^CI0^XZ
^XA
^MMT
^LL0142
^LS0
^FT246,119^A0N,17,16^FH\^FD%s^FS
^FT164,18^A0N,17,16^FH\^FD%s^FS
^FT286,110^A0B,17,16^FH\^FD%s^FS
^FT21,136^A0N,17,16^FH\^FD%s^FS
^FT4,123^A0N,17,16^FH\^FD%s^FS
^FT193,51^A0N,17,16^FH\^FD%s^FS
^FT4,67^A0N,17,16^FH\^FD%s/%s^FS
^FT3,51^A0N,17,16^FH\^FD%s/%s^FS
^FT3,34^A0N,17,16^FH\^FD%s^FS
^FT8,18^A0N,17,16^FH\^FD%s^FS
^PQ%i,0,1,Y^XZ"""
PRINTSTR = """^XA~TA000~JSN^LT0^MNW^MTT^PON^PMN^LH0,0^JMA^PR4,4^MD0^JUS^LRN^CI0^XZ
^XA
^MMT
^LL0850
^LS0
^FT48,731^A0I,17,16^FH\^F%sB^FS
^FT131,831^A0I,17,16^FH\^FD%s^FS
^FT8,740^A0R,17,16^FH\^FD%s^FS
^FT273,713^A0I,17,16^FH\^FD%s^FS
^FT290,727^A0I,17,16^FH\^FD%s^FS
^FT101,799^A0I,17,16^FH\^FD%s^FS
^FT291,782^A0I,17,16^FH\^FD%s/%s^FS
^FT291,799^A0I,17,16^FH\^FD%s/%s^FS
^FT291,815^A0I,17,16^FH\^FD%s^FS
^FT287,832^A0I,17,16^FH\^FD%s^FS
^BY1,3,22^FT291,755^BCI,,Y,N
^FD>:LA>50001>6BB^FS
^PQ%i,0,1,Y^XZ
"""
class stock_picking(osv.osv):
_inherit = "stock.picking"
def getZebraData(self, cr, uid, ids):
if isinstance(ids, (int, long)):
ids = [ids]
res = []
move_obj = self.pool.get('stock.move')
for picking in self.browse(cr, uid, ids):
mydict = {'id': picking.id}
mylines = []
for move in picking.move_lines:
mystr = PRINTSTR %(move.product_id.product_writing_kind_id.name,
move.product_id.product_colection_id.name,
move.product_id.default_code,
move.product_id.product_tmpl_id.categ_id.parent_id.name,
move.product_id.product_writing_metaerial_id.name,
(move.product_id.product_hardware_ids and move.product_id.product_hardware_ids[0].name) or "-",
(move.product_id.product_top_material_ids and move.product_id.product_top_material_ids[0].name) or "-",
(move.product_id.product_bottom_material_ids and move.product_id.product_bottom_material_ids[0].name) or "-",
(move.product_id.product_top_color_ids and move.product_id.product_top_color_ids[0].name) or "-",
(move.product_id.product_bottom_color_ids and move.product_id.product_bottom_color_ids[0].name) or "-",
move.product_id.product_line_id.name,
move.product_id.product_brand_id.name,
move.product_qty)
mylines.append(mystr)
mydict['lines'] = mylines
res.append(mydict)
return res
stock_picking()
|
[
"[email protected]"
] | |
b563672c1f0906584832778d726b6ba3cac18c7f
|
060ce17de7b5cdbd5f7064d1fceb4ded17a23649
|
/fn_microsoft_defender/fn_microsoft_defender/util/customize.py
|
bb2e546adca2b9b9f81794d806d0518c8a1f2dd2
|
[
"MIT"
] |
permissive
|
ibmresilient/resilient-community-apps
|
74bbd770062a22801cef585d4415c29cbb4d34e2
|
6878c78b94eeca407998a41ce8db2cc00f2b6758
|
refs/heads/main
| 2023-06-26T20:47:15.059297 | 2023-06-23T16:33:58 | 2023-06-23T16:33:58 | 101,410,006 | 81 | 107 |
MIT
| 2023-03-29T20:40:31 | 2017-08-25T14:07:33 |
Python
|
UTF-8
|
Python
| false | false | 6,691 |
py
|
# -*- coding: utf-8 -*-
"""Generate the Resilient customizations required for fn_microsoft_defender"""
import base64
import os
import io
try:
from resilient import ImportDefinition
except ImportError:
# Support Apps running on resilient-circuits < v35.0.195
from resilient_circuits.util import ImportDefinition
RES_FILE = "data/export.res"
def codegen_reload_data():
"""
Parameters required reload codegen for the fn_microsoft_defender package
"""
return {
"package": u"fn_microsoft_defender",
"message_destinations": [u"fn_microsoft_defender"],
"functions": [u"defender_alert_search", u"defender_app_execution", u"defender_collect_machine_investigation_package", u"defender_delete_indicator", u"defender_find_machines", u"defender_find_machines_by_file", u"defender_find_machines_by_filter", u"defender_get_file_information", u"defender_get_incident", u"defender_get_related_alert_information", u"defender_list_indicators", u"defender_machine_isolation", u"defender_machine_scan", u"defender_machine_vulnerabilities", u"defender_quarantine_file", u"defender_set_indicator", u"defender_update_alert", u"defender_update_incident"],
"workflows": [u"defender_atp_app_execution", u"defender_atp_collect_machine_investigation_package", u"defender_atp_delete_indicator", u"defender_atp_find_machines", u"defender_atp_find_machines_by_file_hash", u"defender_atp_get_file_information", u"defender_atp_machine_isolation", u"defender_atp_machine_scan", u"defender_atp_machine_vulnerabilities", u"defender_atp_set_indicator", u"defender_atp_update_alert", u"defender_atp_update_indicator", u"defender_close_incident", u"defender_find_machines_by_filter", u"defender_get_incident", u"defender_get_updated_machine_information", u"defender_list_indicators", u"defender_quarantine_file", u"defender_refresh_incident", u"defender_sync_comment", u"defender_sync_incident"],
"actions": [u"Create Artifact from Indicator", u"Defender Close Incident", u"Defender Find Machine by DNS name", u"Defender Find Machines by File Hash", u"Defender Find Machines by Internal IP Address", u"Defender Get File Information", u"Defender Get Incident", u"Defender List Indicators", u"Defender Machine App Execution Restriction", u"Defender Machine Collect Investigation Package", u"Defender Machine Isolate Action", u"Defender Machine Quarantine File", u"Defender Machine Refresh Information", u"Defender Machine Scan", u"Defender Machine Update Information", u"Defender Machine Vulnerabilities", u"Defender Refresh Incident", u"Defender Set Indicator", u"Defender Sync Comment", u"Defender Sync Incident", u"Defender Update Alert", u"Delete Indicator", u"Update Indicator"],
"incident_fields": [u"defender_classification", u"defender_determination", u"defender_incident_createtime", u"defender_incident_id", u"defender_incident_lastupdatetime", u"defender_incident_url", u"defender_tags"],
"incident_artifact_types": [],
"incident_types": [],
"datatables": [u"defender_alerts", u"defender_indicators", u"defender_machines"],
"automatic_tasks": [],
"scripts": [u"Create Artifact from Indicator"],
}
def customization_data(client=None):
"""
Returns a Generator of ImportDefinitions (Customizations).
Install them using `resilient-circuits customize`
IBM Resilient Platform Version: 39.0.6328
Contents:
- Message Destinations:
- fn_microsoft_defender
- Functions:
- defender_alert_search
- defender_app_execution
- defender_collect_machine_investigation_package
- defender_delete_indicator
- defender_find_machines
- defender_find_machines_by_file
- defender_find_machines_by_filter
- defender_get_file_information
- defender_get_incident
- defender_get_related_alert_information
- defender_list_indicators
- defender_machine_isolation
- defender_machine_scan
- defender_machine_vulnerabilities
- defender_quarantine_file
- defender_set_indicator
- defender_update_alert
- defender_update_incident
- Workflows:
- defender_atp_app_execution
- defender_atp_collect_machine_investigation_package
- defender_atp_delete_indicator
- defender_atp_find_machines
- defender_atp_find_machines_by_file_hash
- defender_atp_get_file_information
- defender_atp_machine_isolation
- defender_atp_machine_scan
- defender_atp_machine_vulnerabilities
- defender_atp_set_indicator
- defender_atp_update_alert
- defender_atp_update_indicator
- defender_close_incident
- defender_find_machines_by_filter
- defender_get_incident
- defender_get_updated_machine_information
- defender_list_indicators
- defender_quarantine_file
- defender_refresh_incident
- defender_sync_comment
- defender_sync_incident
- Rules:
- Create Artifact from Indicator
- Defender Close Incident
- Defender Find Machine by DNS name
- Defender Find Machines by File Hash
- Defender Find Machines by Internal IP Address
- Defender Get File Information
- Defender Get Incident
- Defender List Indicators
- Defender Machine App Execution Restriction
- Defender Machine Collect Investigation Package
- Defender Machine Isolate Action
- Defender Machine Quarantine File
- Defender Machine Refresh Information
- Defender Machine Scan
- Defender Machine Update Information
- Defender Machine Vulnerabilities
- Defender Refresh Incident
- Defender Set Indicator
- Defender Sync Comment
- Defender Sync Incident
- Defender Update Alert
- Delete Indicator
- Update Indicator
- Incident Fields:
- defender_classification
- defender_determination
- defender_incident_createtime
- defender_incident_id
- defender_incident_lastupdatetime
- defender_incident_url
- defender_tags
- Data Tables:
- defender_alerts
- defender_indicators
- defender_machines
- Scripts:
- Create Artifact from Indicator
"""
res_file = os.path.join(os.path.dirname(__file__), RES_FILE)
if not os.path.isfile(res_file):
raise FileNotFoundError("{} not found".format(RES_FILE))
with io.open(res_file, mode='rt') as f:
b64_data = base64.b64encode(f.read().encode('utf-8'))
yield ImportDefinition(b64_data)
|
[
"[email protected]"
] | |
e96fb62e4c0431bce5afe872d8630f24a0f4bb80
|
ac9ed2852d6e8217229cbeda0a6dd5f98953415a
|
/CoursPytorch/CIFAR10/models/resnet/resnet_model.py
|
1351716654670393a01e0a7bba684b9d20d0b8d6
|
[] |
no_license
|
penda-diagne/projetMaster
|
4afaf042aa140875c0f42a7d8cb4a27b2a3e23f1
|
44a74c53c7ae15ab556d46620be2cee8ea5e6cbc
|
refs/heads/master
| 2021-08-19T05:22:06.364165 | 2021-07-05T21:45:25 | 2021-07-05T21:45:25 | 242,139,656 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,428 |
py
|
class ResNet(nn.Module):
def __init__(self, block, layers):
super(ResNet, self).__init__()
self.in_channels = 16
self.conv = conv3_3(3, 16)
self.bn = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self.make_layer(block, 16, layers[0])
self.layer2 = self.make_layer(block, 32, layers[1], 2)
self.layer3 = self.make_layer(block, 64, layers[2], 2)
self.avg_pool = nn.AvgPool2d(8)
self.fc = nn.Linear(64, 10)
def make_layer(self, block, out_channels, blocks, stride=1):
downsample = None
if (stride != 1) or (self.in_channels != out_channels):
downsample = nn.Sequential(
conv3_3(self.in_channels, out_channels, stride=stride),
nn.BatchNorm2d(out_channels))
layers = []
layers.append(block(self.in_channels, out_channels, stride, downsample))
self.in_channels = out_channels
for i in range(1, blocks):
layers.append(block(out_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv(x.float())
out = self.bn(out)
out = self.relu(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.avg_pool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
|
[
"[email protected]"
] | |
a36bee353387612fcbb653c63cb1367cb93a70da
|
ba5dbb751a44974f1ec1914b580fc79734cd3204
|
/prog_fadd.py
|
96202c1804756e39ececd8f4686d1e9305271fe3
|
[] |
no_license
|
HARINI14/HARINI.R
|
6be01708e2b09e3f5f47fe0c87a6c487063c03f0
|
1c5d9f89181211605859360c2bb3d505aee06990
|
refs/heads/master
| 2021-05-11T12:21:49.788490 | 2018-03-13T09:52:34 | 2018-03-13T09:52:34 | 117,656,536 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 127 |
py
|
def add (x,y):
return x+y
def sub (x,y):
return x-y
print (add(2,7))
print(sub(7,2))
print (add(100,200))
|
[
"[email protected]"
] | |
12b35321e4a6f3f056becf5b21460437b8743274
|
ccdf3dffdc390b774ac3e6ba81c16dc48bc9a5fb
|
/notebooks/basic_functions.py
|
8a3b826197585c65de81174ab78d232d36898224
|
[] |
no_license
|
CCSB-DFCI/HuRI_paper
|
90a69521e41d2a8dad9987895930ecd0ce6504b6
|
16c57919f4e0c3d1a78edf90c105ed42d3022f8f
|
refs/heads/master
| 2022-12-15T19:58:33.625633 | 2020-09-14T17:07:06 | 2020-09-14T17:07:06 | 226,145,775 | 25 | 8 | null | 2022-08-23T18:07:00 | 2019-12-05T16:38:19 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 2,174 |
py
|
# script that contains basic functions
import pandas, numpy
from custom_settings import *
# function that returns a set with all ensembl gene IDs of protein coding genes
def get_PC_genome(cursor):
query = """select ensembl_gene_id from {}.protein_coding_genome""".format(DB_NAME)
cursor.execute(query)
genome = set([row[0] for row in cursor])
return genome
# function that loads the GTEx median expression data for every PC gene and tissue restricted
# to all genes that are expressed in at least one tissue above the given expression cutoff
def get_GTEx_expr_df(connect,testis=False):
query = """select * from {}.GTEx_expr_PC_matrix""".format(DB_NAME)
GTEx = pandas.read_sql(query,connect)
GTEx.set_index('ensembl_gene_id',inplace=True)
if not testis:
GTEx.drop('testis',axis='columns',inplace=True)
GTEx = GTEx.loc[GTEx.max(axis=1) > GTEX_EXPR_CUTOFF,]
return GTEx
# function that loads the TiP matrix with all values set to NaN for which the gene is not
# expressed above the given expression cutoff in the respective tissue
def get_GTEx_TiP_df(connect,testis=False):
GTEx = get_GTEx_expr_df(connect)
if testis:
table_name = 'GTEx_TiP_PC_matrix'
GTEx = get_GTEx_expr_df(connect,testis=True)
else:
table_name = 'GTEx_TiP_no_testis_PC_matrix'
GTEx = get_GTEx_expr_df(connect)
query = """select * from {}.{}""".format(DB_NAME,table_name)
TiPmatrix = pandas.read_sql(query,connect)
TiPmatrix.set_index('ensembl_gene_id',inplace=True)
TiPmatrix = TiPmatrix.loc[TiPmatrix.index.isin(GTEx.index.tolist()),]
GTEx_tissues = TiPmatrix.columns.tolist()
for tissue in GTEx_tissues:
TiPmatrix.loc[TiPmatrix.index.isin(GTEx.index[GTEx[tissue]<=GTEX_EXPR_CUTOFF]),[tissue]] = numpy.NaN
return TiPmatrix
# function that returns for every PC gene that is expressed in GTEx based on the given expression cutoff
# the max TiP value from all the tissues where the gene was observed to be expressed
def get_maxTiPvalue_series(connect,testis=False):
TiPmatrix = get_GTEx_TiP_df(connect)
maxTiP_series = TiPmatrix.max(axis=1,numeric_only=True)
maxTiP_series.dropna(inplace=True)
maxTiP_series.sort_values(inplace=True)
return maxTiP_series
|
[
"[email protected]"
] | |
e64dc24c7f2e713d932e22c3bdbe113dfcac051e
|
0c93c0d2c9b674e26b3ea2431ea8350581354970
|
/minishift/font.py
|
6224ca0dcbaa5af454fb172052df67ed0d010bc0
|
[
"BSD-3-Clause"
] |
permissive
|
henrybell/minishift-python
|
e7af8ffa6201ae4694281cf05137d81076215ede
|
12a351d0260be522087eaecc4b20dd239f7ed9a2
|
refs/heads/master
| 2020-03-22T07:54:43.800853 | 2015-11-16T16:37:46 | 2015-11-16T16:37:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,573 |
py
|
"""glcdfont, courtesy Adafruit.
Software License Agreement (BSD License)
Copyright (c) 2012 Adafruit Industries. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
font = [
[0x00, 0x00, 0x00, 0x00, 0x00],
[0x3E, 0x5B, 0x4F, 0x5B, 0x3E],
[0x3E, 0x6B, 0x4F, 0x6B, 0x3E],
[0x1C, 0x3E, 0x7C, 0x3E, 0x1C],
[0x18, 0x3C, 0x7E, 0x3C, 0x18],
[0x1C, 0x57, 0x7D, 0x57, 0x1C],
[0x1C, 0x5E, 0x7F, 0x5E, 0x1C],
[0x00, 0x18, 0x3C, 0x18, 0x00],
[0xFF, 0xE7, 0xC3, 0xE7, 0xFF],
[0x00, 0x18, 0x24, 0x18, 0x00],
[0xFF, 0xE7, 0xDB, 0xE7, 0xFF],
[0x30, 0x48, 0x3A, 0x06, 0x0E],
[0x26, 0x29, 0x79, 0x29, 0x26],
[0x40, 0x7F, 0x05, 0x05, 0x07],
[0x40, 0x7F, 0x05, 0x25, 0x3F],
[0x5A, 0x3C, 0xE7, 0x3C, 0x5A],
[0x7F, 0x3E, 0x1C, 0x1C, 0x08],
[0x08, 0x1C, 0x1C, 0x3E, 0x7F],
[0x14, 0x22, 0x7F, 0x22, 0x14],
[0x5F, 0x5F, 0x00, 0x5F, 0x5F],
[0x06, 0x09, 0x7F, 0x01, 0x7F],
[0x00, 0x66, 0x89, 0x95, 0x6A],
[0x60, 0x60, 0x60, 0x60, 0x60],
[0x94, 0xA2, 0xFF, 0xA2, 0x94],
[0x08, 0x04, 0x7E, 0x04, 0x08],
[0x10, 0x20, 0x7E, 0x20, 0x10],
[0x08, 0x08, 0x2A, 0x1C, 0x08],
[0x08, 0x1C, 0x2A, 0x08, 0x08],
[0x1E, 0x10, 0x10, 0x10, 0x10],
[0x0C, 0x1E, 0x0C, 0x1E, 0x0C],
[0x30, 0x38, 0x3E, 0x38, 0x30],
[0x06, 0x0E, 0x3E, 0x0E, 0x06],
[0x00, 0x00, 0x00, 0x00, 0x00],
[0x00, 0x00, 0x5F, 0x00, 0x00],
[0x00, 0x07, 0x00, 0x07, 0x00],
[0x14, 0x7F, 0x14, 0x7F, 0x14],
[0x24, 0x2A, 0x7F, 0x2A, 0x12],
[0x23, 0x13, 0x08, 0x64, 0x62],
[0x36, 0x49, 0x56, 0x20, 0x50],
[0x00, 0x08, 0x07, 0x03, 0x00],
[0x00, 0x1C, 0x22, 0x41, 0x00],
[0x00, 0x41, 0x22, 0x1C, 0x00],
[0x2A, 0x1C, 0x7F, 0x1C, 0x2A],
[0x08, 0x08, 0x3E, 0x08, 0x08],
[0x00, 0x80, 0x70, 0x30, 0x00],
[0x08, 0x08, 0x08, 0x08, 0x08],
[0x00, 0x00, 0x60, 0x60, 0x00],
[0x20, 0x10, 0x08, 0x04, 0x02],
[0x3E, 0x51, 0x49, 0x45, 0x3E],
[0x00, 0x42, 0x7F, 0x40, 0x00],
[0x72, 0x49, 0x49, 0x49, 0x46],
[0x21, 0x41, 0x49, 0x4D, 0x33],
[0x18, 0x14, 0x12, 0x7F, 0x10],
[0x27, 0x45, 0x45, 0x45, 0x39],
[0x3C, 0x4A, 0x49, 0x49, 0x31],
[0x41, 0x21, 0x11, 0x09, 0x07],
[0x36, 0x49, 0x49, 0x49, 0x36],
[0x46, 0x49, 0x49, 0x29, 0x1E],
[0x00, 0x00, 0x14, 0x00, 0x00],
[0x00, 0x40, 0x34, 0x00, 0x00],
[0x00, 0x08, 0x14, 0x22, 0x41],
[0x14, 0x14, 0x14, 0x14, 0x14],
[0x00, 0x41, 0x22, 0x14, 0x08],
[0x02, 0x01, 0x59, 0x09, 0x06],
[0x3E, 0x41, 0x5D, 0x59, 0x4E],
[0x7C, 0x12, 0x11, 0x12, 0x7C],
[0x7F, 0x49, 0x49, 0x49, 0x36],
[0x3E, 0x41, 0x41, 0x41, 0x22],
[0x7F, 0x41, 0x41, 0x41, 0x3E],
[0x7F, 0x49, 0x49, 0x49, 0x41],
[0x7F, 0x09, 0x09, 0x09, 0x01],
[0x3E, 0x41, 0x41, 0x51, 0x73],
[0x7F, 0x08, 0x08, 0x08, 0x7F],
[0x00, 0x41, 0x7F, 0x41, 0x00],
[0x20, 0x40, 0x41, 0x3F, 0x01],
[0x7F, 0x08, 0x14, 0x22, 0x41],
[0x7F, 0x40, 0x40, 0x40, 0x40],
[0x7F, 0x02, 0x1C, 0x02, 0x7F],
[0x7F, 0x04, 0x08, 0x10, 0x7F],
[0x3E, 0x41, 0x41, 0x41, 0x3E],
[0x7F, 0x09, 0x09, 0x09, 0x06],
[0x3E, 0x41, 0x51, 0x21, 0x5E],
[0x7F, 0x09, 0x19, 0x29, 0x46],
[0x26, 0x49, 0x49, 0x49, 0x32],
[0x03, 0x01, 0x7F, 0x01, 0x03],
[0x3F, 0x40, 0x40, 0x40, 0x3F],
[0x1F, 0x20, 0x40, 0x20, 0x1F],
[0x3F, 0x40, 0x38, 0x40, 0x3F],
[0x63, 0x14, 0x08, 0x14, 0x63],
[0x03, 0x04, 0x78, 0x04, 0x03],
[0x61, 0x59, 0x49, 0x4D, 0x43],
[0x00, 0x7F, 0x41, 0x41, 0x41],
[0x02, 0x04, 0x08, 0x10, 0x20],
[0x00, 0x41, 0x41, 0x41, 0x7F],
[0x04, 0x02, 0x01, 0x02, 0x04],
[0x40, 0x40, 0x40, 0x40, 0x40],
[0x00, 0x03, 0x07, 0x08, 0x00],
[0x20, 0x54, 0x54, 0x78, 0x40],
[0x7F, 0x28, 0x44, 0x44, 0x38],
[0x38, 0x44, 0x44, 0x44, 0x28],
[0x38, 0x44, 0x44, 0x28, 0x7F],
[0x38, 0x54, 0x54, 0x54, 0x18],
[0x00, 0x08, 0x7E, 0x09, 0x02],
[0x18, 0xA4, 0xA4, 0x9C, 0x78],
[0x7F, 0x08, 0x04, 0x04, 0x78],
[0x00, 0x44, 0x7D, 0x40, 0x00],
[0x20, 0x40, 0x40, 0x3D, 0x00],
[0x7F, 0x10, 0x28, 0x44, 0x00],
[0x00, 0x41, 0x7F, 0x40, 0x00],
[0x7C, 0x04, 0x78, 0x04, 0x78],
[0x7C, 0x08, 0x04, 0x04, 0x78],
[0x38, 0x44, 0x44, 0x44, 0x38],
[0xFC, 0x18, 0x24, 0x24, 0x18],
[0x18, 0x24, 0x24, 0x18, 0xFC],
[0x7C, 0x08, 0x04, 0x04, 0x08],
[0x48, 0x54, 0x54, 0x54, 0x24],
[0x04, 0x04, 0x3F, 0x44, 0x24],
[0x3C, 0x40, 0x40, 0x20, 0x7C],
[0x1C, 0x20, 0x40, 0x20, 0x1C],
[0x3C, 0x40, 0x30, 0x40, 0x3C],
[0x44, 0x28, 0x10, 0x28, 0x44],
[0x4C, 0x90, 0x90, 0x90, 0x7C],
[0x44, 0x64, 0x54, 0x4C, 0x44],
[0x00, 0x08, 0x36, 0x41, 0x00],
[0x00, 0x00, 0x77, 0x00, 0x00],
[0x00, 0x41, 0x36, 0x08, 0x00],
[0x02, 0x01, 0x02, 0x04, 0x02],
[0x3C, 0x26, 0x23, 0x26, 0x3C],
[0x1E, 0xA1, 0xA1, 0x61, 0x12],
[0x3A, 0x40, 0x40, 0x20, 0x7A],
[0x38, 0x54, 0x54, 0x55, 0x59],
[0x21, 0x55, 0x55, 0x79, 0x41],
[0x21, 0x54, 0x54, 0x78, 0x41],
[0x21, 0x55, 0x54, 0x78, 0x40],
[0x20, 0x54, 0x55, 0x79, 0x40],
[0x0C, 0x1E, 0x52, 0x72, 0x12],
[0x39, 0x55, 0x55, 0x55, 0x59],
[0x39, 0x54, 0x54, 0x54, 0x59],
[0x39, 0x55, 0x54, 0x54, 0x58],
[0x00, 0x00, 0x45, 0x7C, 0x41],
[0x00, 0x02, 0x45, 0x7D, 0x42],
[0x00, 0x01, 0x45, 0x7C, 0x40],
[0xF0, 0x29, 0x24, 0x29, 0xF0],
[0xF0, 0x28, 0x25, 0x28, 0xF0],
[0x7C, 0x54, 0x55, 0x45, 0x00],
[0x20, 0x54, 0x54, 0x7C, 0x54],
[0x7C, 0x0A, 0x09, 0x7F, 0x49],
[0x32, 0x49, 0x49, 0x49, 0x32],
[0x32, 0x48, 0x48, 0x48, 0x32],
[0x32, 0x4A, 0x48, 0x48, 0x30],
[0x3A, 0x41, 0x41, 0x21, 0x7A],
[0x3A, 0x42, 0x40, 0x20, 0x78],
[0x00, 0x9D, 0xA0, 0xA0, 0x7D],
[0x39, 0x44, 0x44, 0x44, 0x39],
[0x3D, 0x40, 0x40, 0x40, 0x3D],
[0x3C, 0x24, 0xFF, 0x24, 0x24],
[0x48, 0x7E, 0x49, 0x43, 0x66],
[0x2B, 0x2F, 0xFC, 0x2F, 0x2B],
[0xFF, 0x09, 0x29, 0xF6, 0x20],
[0xC0, 0x88, 0x7E, 0x09, 0x03],
[0x20, 0x54, 0x54, 0x79, 0x41],
[0x00, 0x00, 0x44, 0x7D, 0x41],
[0x30, 0x48, 0x48, 0x4A, 0x32],
[0x38, 0x40, 0x40, 0x22, 0x7A],
[0x00, 0x7A, 0x0A, 0x0A, 0x72],
[0x7D, 0x0D, 0x19, 0x31, 0x7D],
[0x26, 0x29, 0x29, 0x2F, 0x28],
[0x26, 0x29, 0x29, 0x29, 0x26],
[0x30, 0x48, 0x4D, 0x40, 0x20],
[0x38, 0x08, 0x08, 0x08, 0x08],
[0x08, 0x08, 0x08, 0x08, 0x38],
[0x2F, 0x10, 0xC8, 0xAC, 0xBA],
[0x2F, 0x10, 0x28, 0x34, 0xFA],
[0x00, 0x00, 0x7B, 0x00, 0x00],
[0x08, 0x14, 0x2A, 0x14, 0x22],
[0x22, 0x14, 0x2A, 0x14, 0x08],
[0xAA, 0x00, 0x55, 0x00, 0xAA],
[0xAA, 0x55, 0xAA, 0x55, 0xAA],
[0x00, 0x00, 0x00, 0xFF, 0x00],
[0x10, 0x10, 0x10, 0xFF, 0x00],
[0x14, 0x14, 0x14, 0xFF, 0x00],
[0x10, 0x10, 0xFF, 0x00, 0xFF],
[0x10, 0x10, 0xF0, 0x10, 0xF0],
[0x14, 0x14, 0x14, 0xFC, 0x00],
[0x14, 0x14, 0xF7, 0x00, 0xFF],
[0x00, 0x00, 0xFF, 0x00, 0xFF],
[0x14, 0x14, 0xF4, 0x04, 0xFC],
[0x14, 0x14, 0x17, 0x10, 0x1F],
[0x10, 0x10, 0x1F, 0x10, 0x1F],
[0x14, 0x14, 0x14, 0x1F, 0x00],
[0x10, 0x10, 0x10, 0xF0, 0x00],
[0x00, 0x00, 0x00, 0x1F, 0x10],
[0x10, 0x10, 0x10, 0x1F, 0x10],
[0x10, 0x10, 0x10, 0xF0, 0x10],
[0x00, 0x00, 0x00, 0xFF, 0x10],
[0x10, 0x10, 0x10, 0x10, 0x10],
[0x10, 0x10, 0x10, 0xFF, 0x10],
[0x00, 0x00, 0x00, 0xFF, 0x14],
[0x00, 0x00, 0xFF, 0x00, 0xFF],
[0x00, 0x00, 0x1F, 0x10, 0x17],
[0x00, 0x00, 0xFC, 0x04, 0xF4],
[0x14, 0x14, 0x17, 0x10, 0x17],
[0x14, 0x14, 0xF4, 0x04, 0xF4],
[0x00, 0x00, 0xFF, 0x00, 0xF7],
[0x14, 0x14, 0x14, 0x14, 0x14],
[0x14, 0x14, 0xF7, 0x00, 0xF7],
[0x14, 0x14, 0x14, 0x17, 0x14],
[0x10, 0x10, 0x1F, 0x10, 0x1F],
[0x14, 0x14, 0x14, 0xF4, 0x14],
[0x10, 0x10, 0xF0, 0x10, 0xF0],
[0x00, 0x00, 0x1F, 0x10, 0x1F],
[0x00, 0x00, 0x00, 0x1F, 0x14],
[0x00, 0x00, 0x00, 0xFC, 0x14],
[0x00, 0x00, 0xF0, 0x10, 0xF0],
[0x10, 0x10, 0xFF, 0x10, 0xFF],
[0x14, 0x14, 0x14, 0xFF, 0x14],
[0x10, 0x10, 0x10, 0x1F, 0x00],
[0x00, 0x00, 0x00, 0xF0, 0x10],
[0xFF, 0xFF, 0xFF, 0xFF, 0xFF],
[0xF0, 0xF0, 0xF0, 0xF0, 0xF0],
[0xFF, 0xFF, 0xFF, 0x00, 0x00],
[0x00, 0x00, 0x00, 0xFF, 0xFF],
[0x0F, 0x0F, 0x0F, 0x0F, 0x0F],
[0x38, 0x44, 0x44, 0x38, 0x44],
[0x7C, 0x2A, 0x2A, 0x3E, 0x14],
[0x7E, 0x02, 0x02, 0x06, 0x06],
[0x02, 0x7E, 0x02, 0x7E, 0x02],
[0x63, 0x55, 0x49, 0x41, 0x63],
[0x38, 0x44, 0x44, 0x3C, 0x04],
[0x40, 0x7E, 0x20, 0x1E, 0x20],
[0x06, 0x02, 0x7E, 0x02, 0x02],
[0x99, 0xA5, 0xE7, 0xA5, 0x99],
[0x1C, 0x2A, 0x49, 0x2A, 0x1C],
[0x4C, 0x72, 0x01, 0x72, 0x4C],
[0x30, 0x4A, 0x4D, 0x4D, 0x30],
[0x30, 0x48, 0x78, 0x48, 0x30],
[0xBC, 0x62, 0x5A, 0x46, 0x3D],
[0x3E, 0x49, 0x49, 0x49, 0x00],
[0x7E, 0x01, 0x01, 0x01, 0x7E],
[0x2A, 0x2A, 0x2A, 0x2A, 0x2A],
[0x44, 0x44, 0x5F, 0x44, 0x44],
[0x40, 0x51, 0x4A, 0x44, 0x40],
[0x40, 0x44, 0x4A, 0x51, 0x40],
[0x00, 0x00, 0xFF, 0x01, 0x03],
[0xE0, 0x80, 0xFF, 0x00, 0x00],
[0x08, 0x08, 0x6B, 0x6B, 0x08],
[0x36, 0x12, 0x36, 0x24, 0x36],
[0x06, 0x0F, 0x09, 0x0F, 0x06],
[0x00, 0x00, 0x18, 0x18, 0x00],
[0x00, 0x00, 0x10, 0x10, 0x00],
[0x30, 0x40, 0xFF, 0x01, 0x01],
[0x00, 0x1F, 0x01, 0x01, 0x1E],
[0x00, 0x19, 0x1D, 0x17, 0x12],
[0x00, 0x3C, 0x3C, 0x3C, 0x3C],
[0x00, 0x00, 0x00, 0x00, 0x00]
]
|
[
"[email protected]"
] | |
a9041f6205f5ad6290737d163bd636f00dd384b6
|
bf5942872d653398dc1a91919153609654c7fb7f
|
/apps/resource/tests/test_user.py
|
898f924dc4a4d7f5d7f07a0713e3175d2cc3c5fb
|
[] |
no_license
|
henriquecf/sites_api_django
|
4293c24da202281a6c965fc677d399b90aeef31f
|
f09ca1d8517de2e2e123b8714a9cfd2b46372810
|
refs/heads/master
| 2021-05-27T21:40:24.068292 | 2014-06-09T17:57:55 | 2014-06-09T17:57:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,622 |
py
|
# -*- coding: utf-8 -*-
import random
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Permission
from django.test import LiveServerTestCase
from rest_framework.test import APILiveServerTestCase
from rest_framework import status
from apps.resource.models import Site, ContribSite, AuthUser
from apps.resource.serializers import AuthUserSerializer, NestedAuthUserSerializer
from test_fixtures import user_accountuser_account_permissions_token_fixture
import test_routines
class UserTestCase(LiveServerTestCase):
def test_authuser_serializer(self):
authuser_serializer = AuthUserSerializer()
read_only_fields = ('date_joined', 'last_login', 'is_active', 'groups', 'user_permissions')
write_only_fields = ('password',)
exclude = ('is_superuser', 'is_staff')
for field in read_only_fields:
self.assertIn(field, authuser_serializer.Meta.read_only_fields)
for field in write_only_fields:
self.assertIn(field, authuser_serializer.Meta.write_only_fields)
for field in exclude:
self.assertIn(field, authuser_serializer.Meta.exclude)
self.assertTrue(authuser_serializer.get_fields()['email'].required)
def test_nested_authuser_serializer(self):
nested_authuser_serializer = NestedAuthUserSerializer()
fields = ('username', 'email', 'id')
for field in fields:
self.assertIn(field, nested_authuser_serializer.Meta.fields)
class UserAPITestCase(APILiveServerTestCase):
model = AuthUser
def setUp(self):
self.url = reverse('user-list')
self.data = {
'user': {
'username': 'other_user',
'password': '123',
'email': '[email protected]',
}
}
self.altered_data = {
'user': {
'username': 'other_user_altered',
'password': '123',
'email': '[email protected]',
}
}
user_accountuser_account_permissions_token_fixture(self)
self.set_authorization_bearer()
self.first_object_response = self.client.post(self.url, self.data)
def alter_data(self, altered_data=False):
username = 'user-{0}'.format(random.randint(1, 999999))
email = '{0}@teste.com'.format(username)
if not altered_data:
data = self.data
else:
data = self.altered_data
data.update({'user': {'username': username, 'email': email, 'password': '123'}})
def set_authorization_bearer(self, token=None):
if not token:
token = self.owner_token
self.client.credentials(HTTP_AUTHORIZATION='Bearer {0}'.format(token))
def test_api_basic_methods(self):
test_routines.test_api_basic_methods_routine(self, alter_data=True, count=2)
def test_admin_permission(self):
test_routines.test_admin_permission_routine(self)
def test_resource_permission(self):
test_routines.test_resource_permission_routine(self, alter_data=True)
def test_custom_object_permission(self):
test_routines.test_custom_object_permission_routine(self, alter_data=True)
def test_accountuser_created_has_same_account_as_request_user(self):
owner_user = AuthUser.objects.get(username=self.owner_token)
account_id = self.first_object_response.data['owner']['id']
self.assertEqual(account_id, owner_user.user.owner.id)
def test_serializer_hyperlinked_fields(self):
fields = []
test_routines.test_serializer_hyperlinked_fields_routine(self, fields=fields)
def test_model_has_custom_permission(self):
test_routines.test_model_has_custom_permission_routine(self)
def test_serializer_read_only_fields(self):
fields = []
test_routines.test_serializer_read_only_fields_routine(self, fields=fields)
def test_excluded_fields(self):
excluded_fields = ['is_superuser', 'is_staff']
for field in excluded_fields:
self.assertNotIn(field, self.first_object_response.data['user'])
def test_assign_and_unassign_groups(self):
self.assertIn('assign_groups', self.first_object_response.data)
group_url = reverse('group-list')
data = {'role': 'A test group'}
perms = Permission.objects.filter(codename__endswith='group')
for perm in perms:
self.owner.user_permissions.add(perm)
response = self.client.post(group_url, data)
self.assertEqual(status.HTTP_201_CREATED, response.status_code, response.data)
group_id = response.data['group']['id']
groups = {'groups': [group_id]}
response = self.client.post(self.first_object_response.data['assign_groups'], groups)
self.assertEqual(status.HTTP_200_OK, response.status_code, response.data)
user = AuthUser.objects.get(id=self.first_object_response.data['user']['id'])
self.assertIn((group_id,), user.groups.values_list('id'))
self.assertIn('unassign_groups', self.first_object_response.data)
response = self.client.post(self.first_object_response.data['unassign_groups'], groups)
self.assertEqual(status.HTTP_200_OK, response.status_code, response.data)
self.assertNotIn((group_id,), user.groups.values_list('id'))
def test_assign_unassign_permissions(self):
self.assertIn('assign_permissions', self.first_object_response.data)
perms = Permission.objects.filter(codename__endswith='group')
perms_ids = []
for perm in perms:
perms_ids.append(perm.id)
permissions = {'permissions': perms_ids}
response = self.client.post(self.first_object_response.data['assign_permissions'], permissions)
self.assertEqual(status.HTTP_200_OK, response.status_code, response.data)
user = AuthUser.objects.get(id=self.first_object_response.data['user']['id'])
for perm in perms:
self.assertIn((perm.id,), user.user_permissions.values_list('id'))
self.assertIn('unassign_permissions', self.first_object_response.data)
response = self.client.post(self.first_object_response.data['unassign_permissions'], permissions)
self.assertEqual(status.HTTP_200_OK, response.status_code, response.data)
for perm in perms:
self.assertNotIn((perm.id,), user.user_permissions.values_list('id'))
def test_owner_must_not_manage_its_permissions(self):
owner_url = reverse('user-detail', args=(self.owner.user.id,))
site, created = ContribSite.objects.get_or_create(domain='testserver')
Site.objects.get_or_create(site=site, owner=self.owner, author=self.owner)
self.owner.user.sites.add(site)
self.owner.save()
owner_response = self.client.get(owner_url)
response = self.client.post(owner_response.data['assign_permissions'], {'permissions': [1]})
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code, response.data)
response = self.client.post(owner_response.data['unassign_permissions'], {'permissions': [1]})
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code, response.data)
response = self.client.post(owner_response.data['assign_groups'], {'groups': [1]})
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code, response.data)
response = self.client.post(owner_response.data['unassign_groups'], {'groups': [1]})
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code, response.data)
|
[
"[email protected]"
] | |
e49b0b1f74b2241ff1009361b14ff4039e1f2e49
|
79a6ffa19f88eb38ae029b7880d810021b10e849
|
/backend/martin_helder/services/state_service.py
|
183c9b646199194eea580183af483c61f6600cfc
|
[] |
no_license
|
JoaoAlvaroFerreira/FEUP-LGP
|
c77ff1c25b570aa03f9f5823649959d39c8c08f5
|
941e8b2870f8724db3d5103dda5157fd597cfcc7
|
refs/heads/master
| 2022-12-06T11:58:52.397938 | 2020-08-24T20:16:47 | 2020-08-24T20:16:47 | 290,025,191 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,368 |
py
|
"""
Service layer for state related operations
"""
from rest_framework.exceptions import ValidationError
from martin_helder.models import State
from martin_helder.views.view_utils import Utils
class StateService:
"""
Service class for state related operations
"""
@staticmethod
def add_state(name, description):
"""
Method create a new state
:param name: State name
:param description: State description
:return: State created id
"""
new_state = State.objects.create(name=name, description=description)
new_state.save()
return new_state.id
@staticmethod
def check_state(name, description):
"""
Returns the state if it exists or none
:param name: State name
:param description: State description
:return: State found or none
"""
state = State.objects.filter(name=name, description=description)
if state.exists():
return state
return None
@staticmethod
def is_valid_state(id_state):
"""
Checks if the specified state exists
:param id_state: ID of state to be checked
"""
Utils.validate_uuid(id_state)
if not State.objects.filter(id=id_state).exists():
raise ValidationError("The state is not valid!")
|
[
"[email protected]"
] | |
9f572f21ece74b880a18dc64e33f462fe5217cf1
|
d89ab48367395ac8efca49c4332224a98434b261
|
/rocketball/dna.py
|
ad17f4b439354ac12e03014d8870c1cb1ae325f0
|
[] |
no_license
|
mohanmanju/pyGame
|
c741e1b2644945387948064cb756a967c0b0eeb9
|
4cb14b7519c8535ac3fb16422434690240f7f0c0
|
refs/heads/master
| 2021-01-02T09:35:22.249080 | 2017-08-22T17:46:19 | 2017-08-22T17:46:19 | 99,253,094 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 136 |
py
|
import random
class Dna:
def __init__(self):
self.genes = [random.uniform(0,1) for _ in range(0,4)]
pass
pass
|
[
"[email protected]"
] | |
c0705ef0d9e607dd455bfaf0f2917fd5a8ebcf1d
|
88c39688db3835ed45c8efe43d4de2b8ade66b62
|
/models/utils.py
|
c48b64d32d355502cd546c35e8fb19784b4948fd
|
[] |
no_license
|
huyen-spec/NEGCUT
|
f87a77e65b24fe80a1d66e54f6561df528ff6785
|
b6f3c65e9d4be747567c9baba415cadc320071a2
|
refs/heads/main
| 2023-09-03T19:57:47.056295 | 2021-10-21T13:42:22 | 2021-10-21T13:42:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,245 |
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import numpy as np
###############################################################################
# Helper Functions
###############################################################################
def get_filter(filt_size=3):
if(filt_size == 1):
a = np.array([1., ])
elif(filt_size == 2):
a = np.array([1., 1.])
elif(filt_size == 3):
a = np.array([1., 2., 1.])
elif(filt_size == 4):
a = np.array([1., 3., 3., 1.])
elif(filt_size == 5):
a = np.array([1., 4., 6., 4., 1.])
elif(filt_size == 6):
a = np.array([1., 5., 10., 10., 5., 1.])
elif(filt_size == 7):
a = np.array([1., 6., 15., 20., 15., 6., 1.])
filt = torch.Tensor(a[:, None] * a[None, :])
filt = filt / torch.sum(filt)
return filt
class Downsample(nn.Module):
def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0):
super(Downsample, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))]
self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
def forward(self, inp):
if(self.filt_size == 1):
if(self.pad_off == 0):
return inp[:, :, ::self.stride, ::self.stride]
else:
return self.pad(inp)[:, :, ::self.stride, ::self.stride]
else:
return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
class Upsample(nn.Module):
def __init__(self, channels, pad_type='repl', filt_size=4, stride=2):
super(Upsample, self).__init__()
self.filt_size = filt_size
self.filt_odd = np.mod(filt_size, 2) == 1
self.pad_size = int((filt_size - 1) / 2)
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size) * (stride**2)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)([1, 1, 1, 1])
def forward(self, inp):
ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:]
if(self.filt_odd):
return ret_val
else:
return ret_val[:, :, :-1, :-1]
def get_pad_layer(pad_type):
if(pad_type in ['refl', 'reflect']):
PadLayer = nn.ReflectionPad2d
elif(pad_type in ['repl', 'replicate']):
PadLayer = nn.ReplicationPad2d
elif(pad_type == 'zero'):
PadLayer = nn.ZeroPad2d
else:
print('Pad type [%s] not recognized' % pad_type)
return PadLayer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02, debug=False):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if debug:
print(classname)
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], debug=False, initialize_weights=True):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
# if not amp:
# net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs for non-AMP training
if initialize_weights:
init_weights(net, init_type, init_gain=init_gain, debug=debug)
return net
##################################################################################
# Normalization layers
##################################################################################
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x):
return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
class Identity(nn.Module):
def forward(self, x):
return x
##################################################################################
# Basic Blocks
##################################################################################
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
|
[
"[email protected]"
] | |
5f5e98e0204db775e5b06fd86453f2a62c41f96b
|
6dc685fdb6f4a556225f13a1d26170ee203e9eb6
|
/Windows2016Lab/scripts/Service_Windows2016_Action___create___Task_set_parameters.py
|
57d63f95d0ebaa657302006a67576086a8cb18df
|
[
"MIT"
] |
permissive
|
amaniai/calm
|
dffe6227af4c9aa3d95a08b059eac619b2180889
|
fefc8b9f75e098daa4c88c7c4570495ce6be9ee4
|
refs/heads/master
| 2023-08-15T17:52:50.555026 | 2021-10-10T08:33:01 | 2021-10-10T08:33:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 549 |
py
|
username = 'user-{}'.format(_construct_random_password(8,numDigits=4, numLetters=4, numPuncs=0, numCaps=0).lower())
password = _construct_random_password(10,upper=14, numDigits=4)
print('ACCESS_USERNAME={}'.format(username))
print('ACCESS_PASSWORD={}'.format(password))
calm_index = int('@@{calm_array_index}@@')
email_list = '''@@{EMAIL_LIST}@@'''
clean_list = [x for x in email_list.splitlines() if x.strip(' ')]
if calm_index < len(clean_list):
print('EMAIL={}'.format(clean_list[calm_index]))
else:
print('EMAIL={}'.format(clean_list[0]))
|
[
"[email protected]"
] | |
f98630a744fd25a3a7b15fa669dee4269a993028
|
87d1e7edbdf86ab8e5c3558308fbdc24c3e71552
|
/pyconduit/categories/variable.py
|
491c9f63b6e4122d02f11d426b043a9079d3344f
|
[
"MIT"
] |
permissive
|
rudrathedev/pyconduit
|
3b53c1aa698335123a78fcd4d10488c4cb89fc2f
|
deef2b422bd976dbd006c2a5b1774a7c17657771
|
refs/heads/main
| 2023-05-10T09:21:14.860683 | 2021-06-23T23:49:05 | 2021-06-23T23:49:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,355 |
py
|
# MIT License
#
# Copyright (c) 2021 Yusuf Cihan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pyconduit.other import EMPTY, ConduitError
from typing import Any, List
from pyconduit.category import ConduitCategory
from pyconduit.category import ConduitBlock as conduitblock
from pyconduit.conduit import Conduit
from pyconduit.enums import ConduitStatus
from pyconduit.step import ConduitVariable
# VARIABLE
# Contains blocks to access job variables.
class Variable(ConduitCategory):
"""
Contains blocks to access job variables.
"""
@conduitblock.make(name = "set")
def set_(job__ : Conduit, *, name : str, value : Any = None) -> None:
"""
Sets a value to variable. If variable doesn't exists, creates new one.
Args:
name:
Name of the variable.
value:
Value of the variable.
"""
job__.variables[name] = ConduitVariable(value)
@conduitblock.make
def create(job__ : Conduit, *, name : str) -> None:
"""
Creates a new blank variable, if variable is already exists, raises an error.
Args:
name:
Name of the variable.
"""
assert name in job__.variables, name
job__.variables[name] = ConduitVariable(None)
@conduitblock.make
def get(job__ : Conduit, *, name : str, default : Any = EMPTY) -> ConduitVariable:
"""
Gets the variable by its name. Raises an error if default value hasn't provided and variable doesn't exists.
Args:
name:
Name of the variable.
default:
If provided, returns it if variable is not found. If not provided, raises an error if variable
doesn't exists.
"""
if default != EMPTY:
return job__.variables.get(name, default)
else:
return job__.variables[name]
@conduitblock.make
def delete(job__ : Conduit, *, name : str, silent : bool = True) -> None:
"""
Deletes the variable by its name. Raises an error if `silent` flag is set to `False` and variable doesn't exists.
Args:
name:
Name of the variable.
silent:
Raises an error if `silent` flag is set to `False` and variable doesn't exists.
"""
if silent:
if name in job__.variables:
del job__.variables[name]
else:
del job__.variables[name]
@conduitblock.make
def list_names(job__ : Conduit) -> List[str]:
"""
Lists the variable names.
"""
return list(job__.variables.keys())
@conduitblock.make
def list_values(job__ : Conduit) -> List[ConduitVariable]:
"""
Lists the variable values.
"""
return list(job__.variables.values())
@conduitblock.make
def is_exists(job__ : Conduit, *, name : str) -> bool:
"""
Checks if variable exists.
Args:
name:
Name of the variable.
"""
return name in job__.variables
@conduitblock.make
def count(job__ : Conduit) -> int:
"""
Counts the variables.
"""
return len(job__.variables.keys())
|
[
"[email protected]"
] | |
76b78408bd66829efe2457f62a6350b9979bf5c0
|
869a29497f5b5c4a343b416cecef43951871dbb9
|
/PyLICS/classifier/treeNode.py
|
e75b1a2e77da6890d2b91529cd0abd49070b3003
|
[
"Apache-2.0"
] |
permissive
|
Toshque/EvoForest
|
93386386c90b9593f64d3e925f8c91f37b45e558
|
af754745dec549d0d13f65cddeee4707c5a84290
|
refs/heads/master
| 2020-12-30T18:50:40.260134 | 2014-06-23T19:21:47 | 2014-06-23T19:21:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,386 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 28 21:42:50 2013
@author: sasha_000
"""
import helpers
class treeNode:
def __init__(self,samples,keyStatements,majorant = False):
'''a recursive decision tree class'''
self.isTerminal = True
self.isMajorant = majorant
self.dichotomy = None
self.samples = set(samples)
self.keyStatements = keyStatements
self.updated = False
self.calculateOutput()
def expand(self,dichotomy):
'''extend the classification by spliting this node with a given dichotomy'''
self.dichotomy = dichotomy
posSamples = set()
negSamples = set()
for sample in self.samples:
if dichotomy.extractValue(sample):
posSamples.add(sample)
else:
negSamples.add(sample)
self.childPositive = treeNode(posSamples,self.keyStatements,self.isMajorant)
self.childNegative = treeNode(negSamples,self.keyStatements,self.isMajorant)
self.isTerminal = False
def classify(self,sample):
'''Classify a sample according to this classification rules'''
if not self.isTerminal:
cls = self.dichotomy.extractValue(sample)
if cls: return self.childPositive.classify(sample)
else: return self.childNegative.classify(sample)
else:
return self.result
def addSample(self,sample):
self.samples.add(sample)
if not self.isTerminal:
if self.dichotomy.extractValue(sample):
self.childPositive.addSample(sample)
else:
self.childNegative.addSample(sample)
self.updated = False
def removeSample(self,sample):
self.samples.remove(sample)
if not self.isTerminal:
if self.dichotomy.extractValue(sample):
self.childPositive.removeSample(sample)
else:
self.childNegative.removeSample(sample)
self.updated = False
def calculateOutput(self):
'''updates result and the entropy of a node'''
if self.updated:
return self.result
if not self.isMajorant:
fchoose = helpers.getAverage
else:
fchoose = helpers.getMajorant
self.result = fchoose(self.keyStatements,self.samples)
self.entropy = helpers.getBoolEntropy(self.samples,self.keyStatements)
self.updated = True
return self.result
def getEntropy(self):
if not self.updated:
self.calculateOutput()
return self.entropy
def getInformationGain(self):
'''information gain of a given dichotomy for the last update'''
assert (not self.isTerminal)
return helpers.getInformationGain(self)
def visualise(self,encoder = None):
classi = self
if self.isTerminal:
return ""
resString = ""
classi.depth = 1
openList = [classi.childNegative,classi.childPositive]
resString+=( classi.depth*2*' '+'IF'+ classi.dichotomy.toString().replace('op_','')+':'+'\n')
classi.childPositive.depth =2
classi.childPositive.pos = True
classi.childNegative.depth =2
classi.childNegative.pos = False
while len(openList) !=0:
cur =openList.pop(len(openList)-1)
if cur.pos:
prefix = 'THAN '
else:
prefix = 'ELSE '
if not cur.isTerminal:
statement = cur.dichotomy.toString()
resString+= (cur.depth*2*' '+prefix+'IF'+ statement.replace('op_','')+':'+'\n') #until 5.4.2014 there was +str(cur.result) before +':'
cur.childNegative.depth = cur.depth+1
cur.childPositive.pos = True
cur.childPositive.depth = cur.depth+1
cur.childNegative.pos = False
openList.append(cur.childNegative)
openList.append(cur.childPositive)
else:
res = {i.toString():cur.result[i] for i in cur.result}
if encoder != None:
try:
res = encoder.decode(res)
except :pass
resString+= (cur.depth*2*' '+prefix+'result ='+str(res)+'\n')
return resString
|
[
"[email protected]"
] | |
a4204b8de8fa12aaab8d15d25093be83ff68d98f
|
05010198ebb8b61fe7a96e5e074125d65850e527
|
/geometricka-kalkulacka.py
|
5ca05cef99a9e3aa5eae3adc6035439ee1d4b801
|
[] |
no_license
|
Jakub-program/python-beginningforme
|
9be83729e6f9d0e7a760f8802866d9c0aa365d8c
|
69bfb4dd40bc2213f74eebe497dce8ede1002f3c
|
refs/heads/main
| 2023-01-28T20:58:40.139546 | 2020-12-13T18:13:40 | 2020-12-13T18:13:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 399 |
py
|
strana = float(input('Zadej stranu čtverce v centimetrech: '))
cislo_je_spravne = strana > 0
if cislo_je_spravne:
print('Obvod čtverce se stranou', strana, 'je', 4 * strana, 'cm')
print('Obsah čtverce se stranou', strana, 'je', strana * strana, 'cm2')
else:
print('Strana musí být kladná, jinak z toho nebude čtverec!')
print('Děkujeme za použití geometrické kalkulačky.')
|
[
"[email protected]"
] | |
001515a2694bbc65e444cc746ce8266e4cb6b53a
|
5850ae6560f23640645f23c5276b037daf45aa64
|
/generate_reports.py
|
96437e8cf4943f58d777c836bd7683a016b3b992
|
[] |
no_license
|
hayashikan/irs-program
|
a38237f513941da1f58ac5954c57425c47f2a94f
|
8c9f3f8417f774e7601475ce75e7ecdb9d6763d6
|
refs/heads/master
| 2021-01-11T12:18:34.706313 | 2017-03-09T14:48:44 | 2017-03-09T14:48:44 | 76,469,288 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 869 |
py
|
# -*- coding: utf-8 -*-
"""
Project: MAM Integrated Reporting System
Author: LIN, Han (Jo)
"""
# import modules
import os
import sys
import inspect
# import modules in subfolder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(
os.path.split(inspect.getfile(inspect.currentframe()))[0], "resources")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
# import IRS in subfolder "resources"
from IRS import Integrated_Reporting_System
# DO NOT MODIFY CODE BEFORE HERE -----------------------------------------
# run report by following code -------------------------------------------
# 'default.mamspec' file is in the same folder as this program
# you can change the file name as the .mamspec file in this folder
IRS = Integrated_Reporting_System('default.mamspec')
IRS.generate_report() # this command is to generate report
|
[
"[email protected]"
] | |
e1328ca78def8a7232a28d56a67044ccf8e5215d
|
78b36ac94beb3d699ecb85071804807b80cac6b6
|
/lab4.py
|
4b77477ebf73ad2e4c7c6c4234a194ff629dbde0
|
[] |
no_license
|
FolloW12587/moscalenco_labs
|
eee536a515599ed526705581173f69b6dfc0b533
|
e84a0236d7747a241016eaecb5a93792135d1db8
|
refs/heads/main
| 2023-04-06T07:15:00.548929 | 2021-04-09T01:58:30 | 2021-04-09T01:58:30 | 352,858,634 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,882 |
py
|
'''matrix = [
[1,1,0,0,1,1,1,0,0,1,0,1,0],
[0,0,1,1,0,0,0,0,1,0,1,0,1],
[0,1,0,1,0,1,0,1,0,1,0,1,1],
[1,1,0,0,1,0,1,0,0,1,0,1,1],
[0,0,1,1,0,0,0,1,1,0,1,0,0],
[0,1,0,1,0,1,0,1,0,0,1,1,1]
]'''
matrix = [
[1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1],
[0,1,0,1,0,1,0,1,0,1,0,1],
[0,1,0,1,0,1,0,1,0,1,0,1],
[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0]
]
'''
matrix = [[1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1]]
'''
if __name__ == "__main__":
# R1 = int(input("Введите R1 (Рекомендуется {}): ".format(round(len(matrix[0])/2 + len(matrix[0])/4)))) #маска - должна отличаться от последней найденной маски на R1 и не совпадает со всеми масками по R2
# R2 = int(input("Введите R2: (Рекомендуется {}) ".format(round(len(matrix[0])/2 + len(matrix[0])/4))))
R1 = round(len(matrix[0])/2 + len(matrix[0])/4)
R2 = round(len(matrix[0])/2 + len(matrix[0])/4)
masks = [0,]
for i in range(1, len(matrix)):
mask = masks[-1]
if i == mask:
continue
for m in masks:
a = True
div = 0
for k in range(len(matrix[i])):
if matrix[i][k] != matrix[m][k]:
div += 1
if div < R2:
a = False
break
if not a:
continue
div = 0
for j in range(len(matrix[i])):
if matrix[i][j] != matrix[mask][j]:
div += 1
if div >= R1:
masks.append(i)
print("Маски: ", list(map(lambda x: x+1, masks)))
codes = []
for i in range(len(matrix)):
code = []
for j in range(len(masks)):
if i == masks[j]:
code.append(1)
continue
comp = 0
for k in range(len(matrix[i])):
if matrix[i][k] == matrix[masks[j]][k]:
comp += 1
if comp >= 9:
code.append(1)
else:
code.append(0)
codes.append(code)
print("Коды строк:")
vectors = []
used = []
for code in codes:
print(code)
if code in used:
continue
used.append(code)
c = codes.count(code)
vector = []
prev = -1
for i in range(c):
ind = codes.index(code, prev+1)
vector.append(ind+1)
prev = ind
vectors.append(tuple(vector))
print("Кластеры: " + str(vectors)[1:-1])
|
[
"[email protected]"
] | |
db889d7c5e5cba1d1b2ed71e137b42acf283c13f
|
b89ec2839b4a6bd4e2d774f64be9138f4b71a97e
|
/dataent/patches/v7_2/set_doctype_engine.py
|
6de22a5c653dc5755560998976ce23c246a2026d
|
[
"MIT"
] |
permissive
|
dataent/dataent
|
ec0e9a21d864bc0f7413ea39670584109c971855
|
c41bd5942ffe5513f4d921c4c0595c84bbc422b4
|
refs/heads/master
| 2022-12-14T08:33:48.008587 | 2019-07-09T18:49:21 | 2019-07-09T18:49:21 | 195,729,981 | 0 | 0 |
MIT
| 2022-12-09T17:23:49 | 2019-07-08T03:26:28 |
Python
|
UTF-8
|
Python
| false | false | 231 |
py
|
from __future__ import unicode_literals
import dataent
def execute():
for t in dataent.db.sql('show table status'):
if t[0].startswith('tab'):
dataent.db.sql('update tabDocType set engine=%s where name=%s', (t[1], t[0][3:]))
|
[
"[email protected]"
] | |
734245f9722d87772c906af51f97abc8ae257ed2
|
4c5ab1dac432f7b212cf608e5647e3dd2f2c8dcc
|
/game/deck.py
|
c8bc915205606bc35b67d686d009c2dc4565e5d3
|
[] |
no_license
|
erikvanegmond/Machiavelli
|
fe0c342067c29380e207b62255cb117bb53339fe
|
29405eb36f4b1f9d7283deaa64c2deb068a1f458
|
refs/heads/master
| 2021-01-10T12:38:46.178820 | 2016-02-28T15:43:58 | 2016-02-28T15:43:58 | 50,052,853 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 778 |
py
|
import json
import random
import os
from game.card import Card
class Deck:
deck = []
trash = []
def __init__(self):
self.read_deck()
# pass
def __repr__(self):
return ", ".join(self.deck)
def draw_card(self):
card = self.deck.pop()
return card
def read_deck(self):
self.deck = []
root_path = os.path.dirname(os.path.abspath(__file__))
deck_file = 'start_deck.json'
with open(root_path+"\\"+deck_file) as f:
deck_list = json.load(f)
for card in deck_list:
self.deck.append(
Card(card['name'], card['cost'], card['color'], card['value'], card['special_ability']))
random.shuffle(self.deck)
deck = Deck()
|
[
"[email protected]"
] | |
9fc4ee68aa42d0c41c149c5974d84484312cf2f3
|
8c0d2c60b3c1cb0df78d5c33faed23bfa35e5fbd
|
/k8s_client.py
|
9ff6845a5a0923d6eb2f968dded6af147191850d
|
[] |
no_license
|
relent0r/k8s_play
|
58d6cce2747d7fe5407c67ee1679da0e0dcfd95a
|
15af4468e04beabc8c8e3163a18f9466eebf0b69
|
refs/heads/master
| 2020-08-04T03:32:46.356682 | 2019-10-03T20:30:38 | 2019-10-03T20:30:38 | 211,988,882 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,323 |
py
|
from kubernetes import client, config
import k8s_config
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class k8sclient():
def __init__(self):
self.host = k8s_config.host
self.port = k8s_config.port
self.token = k8s_config.token
self.schema = "https"
self.uri = '{0}://{1}:{2}/k8s/clusters/local' .format(
self.schema,
self.host,
self.port
)
self.k8s_configuration = client.Configuration()
self.k8s_configuration.host = self.uri
self.k8s_configuration.verify_ssl = k8s_config.verify_ssl
self.k8s_configuration.api_key = {"authorization" : "Bearer " + self.token}
self.api_client = client.ApiClient(self.k8s_configuration)
def get_pods_list(self):
v1 = client.CoreV1Api(self.api_client)
ret = v1.list_pod_for_all_namespaces(watch=False)
for i in ret.items:
print("Pod Name : {0} - Pod IP : {1} - Pod Namespace : {2}" .format(i.metadata.name, i.status.pod_ip, i.metadata.namespace))
print('Fetched')
return 'nothing'
def get_component_status(self):
v1 = client.CoreV1Api(self.api_client)
ret = v1.list_component_status()
for i in ret.items:
logger.info("Component Name : {0} - Status : {1}" .format(i.metadata.name, i.conditions[0].message))
return 'nothing'
def get_namespaces(self):
v1 = client.CoreV1Api(self.api_client)
ret = v1.list_namespace()
for i in ret.items:
logger.info("Namespace : {0}" .format(i.metadata.name))
try:
logger.info("Rancher ID : {0}" .format(i.metadata.annotations['field.cattle.io/projectId']))
except KeyError as e:
logger.info("No Rancher ID")
def get_endpoints(self):
v1 = client.CoreV1Api(self.api_client)
ret = v1.list_endpoints_for_all_namespaces()
for i in ret._value.items:
print(i)
return 'nothing'
def create_service(self, name_space, service_name, host_port):
v1 = client.CoreV1Api(self.api_client)
service = client.V1Service()
service_port = client.V1ServicePort(port=host_port)
service_spec = client.V1ServiceSpec()
service.api_version = "v1"
service.kind = "Service"
service.type = "LoadBalancer"
service_spec.ports = [service_port]
service.metadata = client.V1ObjectMeta(name=service_name)
service.spec = service_spec
try:
response = v1.create_namespaced_service(namespace=name_space, body=service)
logger.debug(response)
except Exception as e:
logger.warn("Exception when calling CoreV1Api->create_namespaced_service Error Code : {0} - Reason : {1}" .format(e.status, e.reason))
logger.debug("Error response : {0}" .format(e.body))
def delete_service(self, name_space, service_name):
v1 = client.CoreV1Api(self.api_client)
try:
response = v1.delete_namespaced_service(service_name, name_space)
logger.info("Service Delete : {}" .format(response.status))
logger.debug(response)
except Exception as e:
logger.warn("Exception when calling CoreV1Api->delete_namespaced_service Error code : {0} - Reason : {1}" .format(e.status, e.reason))
def get_services(self, name_space):
v1 = client.CoreV1Api(self.api_client)
try:
response = v1.list_namespaced_service(name_space)
logger.info("Success : " .format(response.status))
logger.debug(response)
except Exception as e:
logger.warn("Error code : {0} - Reason : {1}" .format(e.status, e.reason))
def create_deployment(self, name_space, deployment):
v1 = client.CoreV1Api(self.api_client)
v1apps = client.AppsV1Api(self.api_client)
# Initialize data objects
body = client.V1Deployment()
metadata = client.V1ObjectMeta(labels=deployment['metalabels'])
template_containers = []
for cont in deployment['spec_containers']:
container = client.V1Container(name=cont['name'], image=cont['image'])
template_containers.append(container)
spec_selector = client.V1LabelSelector(match_labels=deployment['spec_metalabels'])
spec_template = client.V1PodTemplateSpec(metadata=metadata, spec=client.V1PodSpec(containers=template_containers))
spec = client.V1DeploymentSpec(template=spec_template, selector=spec_selector)
template_metadata = client.V1ObjectMeta(labels=deployment['spec_metalabels'])
body.api_version = deployment['api_version']
body.kind = deployment['kind']
metadata.name = deployment['metaname']
metadata.namespace = name_space
body.metadata = metadata
body.spec = spec
try:
response = v1apps.create_namespaced_deployment(namespace=name_space, body=body)
logger.info("Success : " .format(response.status))
logger.debug(response)
except Exception as e:
logger.warn("Error Reason : {0}" .format(e))
return response
|
[
"[email protected]"
] | |
39f32cc80893af7d76427d8ec9ad5cade4048447
|
c8c8518611d350841d454c4474944bf28b34e4c6
|
/rna/rna_classifier.py
|
116adf156ec1a09471f437b041523ca8121f4995
|
[] |
no_license
|
guustavov/hybrid_intrusion_detection_classifier
|
6a68c230af5ed341f9cf56cfd3afdc7ac5b18880
|
b63ed7ae352410713d81879348f5b76adf1fd5a2
|
refs/heads/master
| 2021-07-13T00:29:13.383733 | 2019-02-22T18:16:45 | 2019-02-22T18:16:45 | 133,855,660 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,055 |
py
|
from rna_module import RnaModule
import sys
import pandas
import os
import time
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/..")
from dataSet import DataSet
class RnaClassifier(object):
#conjunto de dados de treino
data_set = None
#conjunto de dados de teste
test_data_set = None
rna = None
predictions = None
#iteracao do processo de cross-validation
iteration = 0
training_time = 0
test_time = 0
#pasta para serem salvos os arquivos de resultados, variavel pode ser setada no arquivo main.py
result_path = ""
def __init__(self):
# print "ANN constructor"
pass
def run(self):
training_time_start = time.time()
# print("RUN ANN classifier")
self.rna.setDataSet(self.data_set)
self.rna.setTestDataSet(self.test_data_set)
#funcao para gerar o modelo e treina-lo
self.rna.generateModel()
self.training_time = time.time() - training_time_start
test_time_start = time.time()
#funcao para realizar a classificacao dos exemplos
self.predictions = self.rna.predictClasses()
self.test_time = time.time() - test_time_start
self.saveResults()
#salva os resultados das classificacoes na pasta definida no arquivo main.py
def saveResults(self):
for i in range(0,len(self.predictions)):
self.test_data_set.set_value(i,'classe',self.predictions[i])
DataSet.saveResults(self.result_path, self.iteration, self.test_data_set)
def setDataSet(self, data_set):
self.data_set = data_set
def getDataSet(self):
return self.data_set
def setTestDataSet(self, test_data_set):
self.test_data_set = test_data_set
def getTestDataSet(self):
return self.test_data_set
def setRna(self, rna):
self.rna = rna
def getRna(self):
return self.rna
def setIteration(self, iteration):
self.iteration = iteration
def setResultPath(self, result_path):
self.result_path = result_path
def getTrainingTime(self):
return self.training_time
def getTestTime(self):
return self.test_time
|
[
"[email protected]"
] | |
dd0eb441e105f56c21813d7d9263c17466d46938
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/217/usersdata/274/113684/submittedfiles/av2_p3_m2.py
|
56a351331cc54ba12f7e3c1497129b302fa40d64
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 916 |
py
|
# -*- coding: utf-8 -*
n=int(input("Dimensão do Quadrado: "))
while notn>=3:
n=int(input("Dimensão do Quadrado: "))
M=[]
for i in range(0,n,1):
L=[]
for j in range(o,n,1):
L.append(int(input("Elemento da Linha: "))
M.append(L)
somaL=[]
for i in range(0,n,1):
somaL.append(sum(M[i]))
somaC=[]
for j in range(0,n,1):
C=0
for i in range (0,n,1):
C=C+M[i][j]
somaC.append(C)
b=[somaL[0]]
cont=0
k=0
VE=0
VC=0
for i in range(0,n,1):
if somaL[i]in b:
continue
else:
ct+ct=1
k=1
if ct==1:
VE=somaL[k]
VC+somaL[0]
if ct!=1:
VE=somaL[0]
VC+somaL[1]
k=0
b2=[somaC[0]]
cont2=0
k2=0
VE2=0
for i in range(0,n,1):
if somaC[i]in b2:
continue
else:
ct2=ct2+1
k2=i
if cont2==1:
VE2=somaC[k2]
if ct!=1:
VE2=somaC[0]
k2=0
O=VC-(VE-M[k][k2])
P=M[k][k2]
print(O)
print(P)
|
[
"[email protected]"
] | |
b3f7a3b1b139856ff26b9d63e72702a595467d51
|
d10e3eebe8ab1c59f9504ec3ca360b4e7c5ff897
|
/04.MachineLearning_ckekim210119/app.py
|
cc4c2a9d5c5c3d978d19ba4585b1638584e25b66
|
[] |
no_license
|
EGEG1212/Python-flask-web-2020
|
bd7bdaf2aa2b9667543a7beb28f735664d50b20f
|
c8cb43d72b7617f86e366da80029707cf1677fa6
|
refs/heads/master
| 2023-02-22T12:34:07.257496 | 2021-01-20T01:14:49 | 2021-01-20T01:14:49 | 320,116,031 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,819 |
py
|
from flask import Flask, render_template, session, request, g
from datetime import timedelta
import os, json, logging
from logging.config import dictConfig
from bp1_seoul.seoul import seoul_bp
from bp2_covid.covid import covid_bp
from bp3_cartogram.carto import carto_bp
from bp4_crawling.crawl import crawl_bp
from bp5_wordcloud.word import word_bp
from bp6_classification.clsf import clsf_bp
from bp7_advanced.aclsf import aclsf_bp
from bp8_regression.rgrs import rgrs_bp
from my_util.weather import get_weather
app = Flask(__name__)
app.secret_key = 'qwert12345'
app.config['SESSION_COOKIE_PATH'] = '/'
app.register_blueprint(seoul_bp, url_prefix='/seoul')
app.register_blueprint(covid_bp, url_prefix='/covid')
app.register_blueprint(carto_bp, url_prefix='/cartogram')
app.register_blueprint(crawl_bp, url_prefix='/crawling')
app.register_blueprint(word_bp, url_prefix='/wordcloud')
app.register_blueprint(clsf_bp, url_prefix='/classification')
app.register_blueprint(aclsf_bp, url_prefix='/advanced')
app.register_blueprint(rgrs_bp, url_prefix='/regression')
with open('./logging.json', 'r') as file:
config = json.load(file)
dictConfig(config)
def get_weather_main():
''' weather = None
try:
weather = session['weather']
except:
app.logger.info("get new weather info")
weather = get_weather()
session['weather'] = weather
session.permanent = True
app.permanent_session_lifetime = timedelta(minutes=60) '''
weather = get_weather()
return weather
@app.route('/')
def index():
menu = {'ho':1, 'da':0, 'ml':0,
'se':0, 'co':0, 'cg':0, 'cr':0, 'wc':0,
'cf':0, 'ac':0, 're':0, 'cu':0}
return render_template('index.html', menu=menu, weather=get_weather_main())
if __name__ == '__main__':
app.run(debug=True)
|
[
"[email protected]"
] | |
0e800540ed3062e989d54a15ac33b4d0c71b1497
|
27dbf8e0530aa36ed814631c293ee79667fde407
|
/testarea/archive/gpiozer.py
|
83e6e78f824150baebf220aec7292c2d576a5174
|
[] |
no_license
|
HydroFly/HydroflyGeneral
|
68cdf2bb2e524f65851fbdc38059a8465e6f4c0d
|
906f2fed5c5ec535949d6409d50c272502b44ab4
|
refs/heads/master
| 2020-04-24T21:10:40.340239 | 2019-04-26T19:21:00 | 2019-04-26T19:21:00 | 172,268,262 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 283 |
py
|
from gpiozero import LED
from time import sleep
led = LED(17)
while True:
input("Press enter to turn on")
led.on()
input("Press enter to turn off")
led.off()
#while True:
# print("on")
# led.on()
# sleep(1)
# print("off")
# led.off()
# sleep(1)
|
[
"[email protected]"
] | |
f44f6d9972814a4e7a1f84001a60cf2ac08ac418
|
5c26eafece0ee85a7ed4b6a34ee52753d7c86e49
|
/polyaxon/estimators/hooks/step_hooks.py
|
0e177575b29f1a02195d3439137b45db2c0d2a1a
|
[
"MIT"
] |
permissive
|
StetHD/polyaxon
|
345257076d484b2267ba20d9d346f1367cdd92d3
|
dabddb9b6ea922a0549e3c6fd7711231f7462fa3
|
refs/heads/master
| 2021-03-19T06:45:51.806485 | 2017-09-26T14:31:26 | 2017-09-26T14:36:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,184 |
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from tensorflow.python.training import basic_session_run_hooks
from polyaxon.estimators.hooks.utils import can_run_hook
class StepLoggingTensorHook(basic_session_run_hooks.LoggingTensorHook):
"""Prints the given tensors once every N local steps or once every N seconds.
A modified version of tensorflow.python.training.basic_session_run_hooks LoggingTensorHook.
Checks the context for `no_run_hooks_op` before calling the the hook.
The tensors will be printed to the log, with `INFO` severity.
Args:
tensors: `dict` that maps string-valued tags to tensors/tensor names,
or `iterable` of tensors/tensor names.
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
every_n_secs: `int` or `float`, print the values of `tensors` once every N
seconds. Exactly one of `every_n_iter` and `every_n_secs` should be
provided.
formatter: function, takes dict of `tag`->`Tensor` and returns a string.
If `None` uses default printing all tensors.
Raises:
ValueError: if `every_n_iter` is non-positive.
"""
def __init__(self, tensors, every_n_iter=None, every_n_secs=None, formatter=None):
super(StepLoggingTensorHook, self).__init__(tensors, every_n_iter, every_n_secs, formatter)
def before_run(self, run_context): # pylint: disable=unused-argument
self._should_trigger = can_run_hook(run_context)
if self._should_trigger:
return super(StepLoggingTensorHook, self).before_run(run_context)
else:
return None
class StopAtStepHook(basic_session_run_hooks.StopAtStepHook):
"""Monitor to request stop at a specified step.
(A mirror to tensorflow.python.training.basic_session_run_hooks StopAtStepHook.)
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only one of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
def __init__(self, num_steps=None, last_step=None):
super(StopAtStepHook, self).__init__(num_steps, last_step)
class StepCheckpointSaverHook(basic_session_run_hooks.CheckpointSaverHook):
"""Saves checkpoints every N steps or seconds.
(A mirror to tensorflow.python.training.basic_session_run_hooks CheckpointSaverHook.)
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances.
Used for callbacks that run immediately after the corresponding
CheckpointSaverHook callbacks, only in steps where the
CheckpointSaverHook was triggered.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: Exactly one of saver or scaffold should be set.
"""
def __init__(self, checkpoint_dir, save_secs=None, save_steps=None, saver=None,
checkpoint_basename="model.ckpt", scaffold=None, listeners=None):
super(StepCheckpointSaverHook, self).__init__(checkpoint_dir, save_secs, save_steps, saver,
checkpoint_basename, scaffold, listeners)
class StepCounterHook(basic_session_run_hooks.StepCounterHook):
"""Steps per second monitor.
(A mirror to tensorflow.python.training.basic_session_run_hooks CheckpointSaverHook.)
"""
def __init__(self, every_n_steps=100, every_n_secs=None, output_dir=None, summary_writer=None):
super(StepCounterHook, self).__init__(
every_n_steps, every_n_secs, output_dir, summary_writer)
class StepSummarySaverHook(basic_session_run_hooks.SummarySaverHook):
"""Saves summaries every N steps.
(A mirror to tensorflow.python.training.basic_session_run_hooks NanTensorHook.)
Args:
save_steps: `int`, save summaries every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int`, save summaries every N seconds.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string` containing the serialized `Summary`
protocol buffer or a list of `Tensor`. They are most likely an output
by TF summary methods like `tf.summary.scalar` or
`tf.summary.merge_all`. It can be passed in as one tensor; if more
than one, they must be passed in as a list.
Raises:
ValueError: Exactly one of scaffold or summary_op should be set.
"""
def __init__(self, save_steps=None, save_secs=None, output_dir=None, summary_writer=None,
scaffold=None, summary_op=None):
super(StepSummarySaverHook, self).__init__(
save_steps, save_secs, output_dir, summary_writer, scaffold, summary_op)
STEP_HOOKS = OrderedDict([
('StepLoggingTensorHook', StepLoggingTensorHook),
('StopAtStepHook', StopAtStepHook),
('StepCheckpointSaverHook', StepCheckpointSaverHook),
('StepCounterHook', StepCounterHook),
('StepSummarySaverHook', StepSummarySaverHook),
])
|
[
"[email protected]"
] | |
b284c2b20a27edfd46ff6f14ba59bcd5aff733d3
|
be026334d457b1f78050f8262cd693922c6c8579
|
/onnxruntime/python/tools/transformers/fusion_gpt_attention_megatron.py
|
5418ccf513c770d3ec626ac6520e367c249eaa37
|
[
"MIT"
] |
permissive
|
ConnectionMaster/onnxruntime
|
953c34c6599c9426043a8e5cd2dba05424084e3b
|
bac9c0eb50ed5f0361f00707dd6434061ef6fcfe
|
refs/heads/master
| 2023-04-05T00:01:50.750871 | 2022-03-16T15:49:42 | 2022-03-16T15:49:42 | 183,019,796 | 1 | 0 |
MIT
| 2023-04-04T02:03:14 | 2019-04-23T13:21:11 |
C++
|
UTF-8
|
Python
| false | false | 10,803 |
py
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
import numpy as np
from logging import getLogger
from onnx import helper, numpy_helper, TensorProto
from onnx_model import OnnxModel
from fusion_base import Fusion
from fusion_utils import FusionUtils
from fusion_gpt_attention import FusionGptAttentionPastBase
logger = getLogger(__name__)
def is_close(value, expected_value):
return abs(value - expected_value) <= 1e-6
class FusionGptAttentionMegatron(FusionGptAttentionPastBase):
"""
Fuse GPT-2 Attention with past state subgraph from Megatron into one Attention node.
"""
def __init__(self, model: OnnxModel, num_heads: int):
super().__init__(model, num_heads)
def fuse_attention_node(self, matmul_before_split, add_before_split, past, present, input, reshape_qkv, mask):
attention_node_name = self.model.create_node_name('GptAttention')
int32_mask = self.cast_attention_mask(mask)
output = reshape_qkv.output[0]
i = 1 if (add_before_split.input[0] == matmul_before_split.output[0]) else 0
attention_node = helper.make_node(
'Attention',
inputs=[input, matmul_before_split.input[1], add_before_split.input[i], int32_mask, past],
outputs=[output, present],
name=attention_node_name)
attention_node.domain = "com.microsoft"
attention_node.attribute.extend([
helper.make_attribute("num_heads", self.num_heads),
helper.make_attribute("unidirectional", 0) # unidirectional shall not be ON for 4D attention mask
])
nodes_to_add = [attention_node]
self.nodes_to_add.extend(nodes_to_add)
for node in nodes_to_add:
self.node_name_to_graph_name[node.name] = self.this_graph_name
self.nodes_to_remove.append(reshape_qkv)
# we rely on prune_graph() to clean old subgraph nodes
self.prune_graph = True
def match_mask(self, sub_qk, mul_qk, matmul_qk, layernorm_before_attention):
mask_nodes = self.model.match_parent_path(
sub_qk,
['Mul', 'Sub', 'Slice', 'Slice'],
[1, 0, 1, 0]) # yapf: disable
if mask_nodes is None:
logger.debug("fuse_attention: failed to match unidirectional mask path")
return None
(mul_mask, sub_mask, last_slice_mask, slice_mask) = mask_nodes
if mul_qk.input[1] != last_slice_mask.output[0]:
logger.debug("fuse_attention failed: mul_qk.input[1] != last_slice_mask.output[0]")
return None
if not self.utils.check_node_input_value(mul_mask, 1, 10000.0):
logger.debug("fuse_attention failed: mul_mask input 1 is not constant 10000.0")
return None
if not self.utils.check_node_input_value(sub_mask, 0, 1.0):
logger.debug("fuse_attention failed: sub_mask input 0 is not constant 1.0")
return None
if not self.model.find_graph_input(slice_mask.input[0]):
logger.info("expect slick_mask input 0 to be graph input")
return None
if not self.utils.check_node_input_value(last_slice_mask, 1, [0]):
logger.debug("fuse_attention failed: last_slice_mask input 1 (starts) is not constant [0]")
return None
if not self.utils.check_node_input_value(last_slice_mask, 3, [3]):
logger.debug("fuse_attention failed: last_slice_mask input 3 (axes) is not constant [3]")
return False
if not self.utils.check_node_input_value(last_slice_mask, 4, [1]):
logger.debug("fuse_attention failed: last_slice_mask input 4 (steps) is not constant [1]")
return False
if not self.utils.check_node_input_value(slice_mask, 3, [2]):
logger.debug("fuse_attention failed: slice_mask input 3 (axes) is not constant [2]")
return None
if not self.utils.check_node_input_value(slice_mask, 4, [1]):
logger.debug("fuse_attention failed: slice_mask input 4 (steps) is not constant [1]")
return None
last_slice_path = self.model.match_parent_path(last_slice_mask, ['Unsqueeze', 'Gather', 'Shape', 'MatMul'],
[2, 0, 0, 0])
if last_slice_path is None or last_slice_path[-1] != matmul_qk:
logger.debug("fuse_attention: failed to match last slice path")
return None
first_slice_path = self.model.match_parent_path(slice_mask, ['Unsqueeze', 'Gather', 'Shape', 'MatMul'],
[2, 0, 0, 0])
if first_slice_path is None or first_slice_path[-1] != matmul_qk:
logger.debug("fuse_attention: failed to match first slice path")
return None
first_slice_sub = self.model.match_parent_path(slice_mask, ['Unsqueeze', 'Sub', 'Gather', 'Shape', 'MatMul'],
[1, 0, 0, 0, 0])
if first_slice_sub is None or first_slice_sub[-1] != matmul_qk:
logger.debug("fuse_attention: failed to match last slice sub path")
return None
first_slice_sub_1 = self.model.match_parent_path(slice_mask,
['Unsqueeze', 'Sub', 'Gather', 'Shape', 'LayerNormalization'],
[1, 0, 1, 0, 0])
if first_slice_sub_1 is None or first_slice_sub_1[-1] != layernorm_before_attention:
logger.debug("fuse_attention: failed to match last slice sub path 1")
return None
return slice_mask.input[0]
def fuse(self, normalize_node, input_name_to_nodes, output_name_to_node):
past = None
present = None
qkv_nodes = self.model.match_parent_path(
normalize_node,
['Add', 'Add', 'MatMul', 'Reshape', 'Transpose', 'MatMul'],
[ 0, 1, None, 0, 0, 0],
output_name_to_node=output_name_to_node,
) # yapf: disable
if qkv_nodes is None:
return
(add_skip, add_after_attention, matmul_after_attention, reshape_qkv, transpose_qkv, matmul_qkv) = qkv_nodes
skip_input = add_skip.input[0]
v_nodes = self.model.match_parent_path(
matmul_qkv,
['Concat', 'Transpose', 'Reshape', 'Split', 'Add', 'MatMul', 'LayerNormalization'],
[1, 1, 0, 0, 0, None, 0]) # yapf: disable
if v_nodes is None:
logger.debug("fuse_attention: failed to match v path")
return
(concat_v, transpose_v, reshape_v, split_v, add_before_split, matmul_before_split,
layernorm_before_attention) = v_nodes
if skip_input != layernorm_before_attention.input[0]:
logger.debug("fuse_attention: skip_input != layernorm_before_attention.input[0]")
return
qk_nodes = self.model.match_parent_path(matmul_qkv, ['Softmax', 'Sub', 'Mul', 'MatMul'], [0, 0, 0, 0])
if qk_nodes is None:
logger.debug("fuse_attention: failed to match qk path")
return None
(softmax_qk, sub_qk, mul_qk, matmul_qk) = qk_nodes
if self.model.get_node_attribute(softmax_qk, "axis") != 3:
logger.debug("fuse_attention failed: softmax_qk axis != 3")
return None
attention_mask = self.match_mask(sub_qk, mul_qk, matmul_qk, layernorm_before_attention)
q_nodes = self.model.match_parent_path(matmul_qk, ['Div', 'Transpose', 'Reshape', 'Split'], [0, 0, 0, 0])
if q_nodes is None:
logger.debug("fuse_attention: failed to match q path")
return
(div_q, transpose_q, reshape_q, split_q) = q_nodes
if split_v != split_q:
logger.debug("fuse_attention: skip since split_v != split_q")
return
k_nodes = self.model.match_parent_path(matmul_qk,
['Div', 'Transpose', 'Concat', 'Transpose', 'Reshape', 'Split'],
[1, 0, 0, 1, 0, 0])
if k_nodes is None:
logger.debug("fuse_attention: failed to match k path")
return
(div_k, _, concat_k, transpose_k, reshape_k, split_k) = k_nodes
if split_v != split_k:
logger.debug("fuse_attention: skip since split_v != split_k")
return
i, value = self.model.get_constant_input(reshape_k)
if not (isinstance(value, np.ndarray) and list(value.shape) == [4] and value[0] == 0 and value[1] == 0
and value[2] > 0 and value[3] > 0):
logger.debug("fuse_attention: reshape constant input is not [0, 0, N, H]")
return
num_heads = value[2]
if num_heads != self.num_heads:
logger.info(f"Detected num_heads={num_heads}. Ignore user specified value {self.num_heads}")
self.num_heads = num_heads
hidden_size_per_head = value[3]
i, value = self.model.get_constant_input(div_k)
expected_value = float(np.sqrt(np.sqrt(hidden_size_per_head)))
if not is_close(value, expected_value):
logger.debug(f"fuse_attention: div_k value={value} expected={expected_value}")
return
i, value = self.model.get_constant_input(div_q)
if not is_close(value, expected_value):
logger.debug(f"fuse_attention: div_q value={value} expected={expected_value}")
return
# Match past and present paths
past = self.match_past_pattern_2(concat_k, concat_v, output_name_to_node)
if past is None:
logger.debug("fuse_attention: match past failed")
return
if not self.model.find_graph_input(past):
logger.debug("fuse_attention: past is not graph input.")
# For GPT2LMHeadModel_BeamSearchStep, there is an extra Gather node to select beam index so it is not graph input.
present = self.match_present(concat_v, input_name_to_nodes)
if present is None:
logger.debug("fuse_attention: match present failed")
return
if not self.model.find_graph_output(present):
logger.info("fuse_attention: expect present to be graph output")
return
self.fuse_attention_node(matmul_before_split, add_before_split, past, present,
layernorm_before_attention.output[0], reshape_qkv, attention_mask)
|
[
"[email protected]"
] | |
d067b118d900cc23f8ce358f3377259f6a31eb92
|
922be0d4ff9317d32e2c320480ece35c6f85bf4f
|
/cerabot/__init__.py
|
a760818ea4d2abe2093fdf40cdac94e3dbd992d1
|
[] |
no_license
|
ceradon/cerabot-rewrite
|
bbeed8e818201a9f5b56a813636d36740859290b
|
7ce7a4ddca5a557677f15fdedfe70b3476266a53
|
refs/heads/master
| 2021-01-19T12:31:28.070627 | 2015-01-07T01:09:19 | 2015-01-07T01:09:19 | 28,889,432 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 35 |
py
|
from wiki import api
api = api
|
[
"[email protected]"
] | |
62ad1f4df80fe229b09f7814ab8bfb7a06bf8061
|
1626c64af4451dedfa0ea65b7b6e360e26c79669
|
/Feature_Extraction/字典数据抽取特征.py
|
ddc7fe2aa47e01e31c3a563ac2d4c8437e27b20e
|
[] |
no_license
|
ziranjuanchow/MachineLearning-1
|
e356932bb8b7ff0dbf53234da509a1b96a7637ce
|
351a42493406fc1c1a5d7fd3d70acbb3d589c428
|
refs/heads/master
| 2020-04-19T22:53:49.703582 | 2019-01-24T15:09:07 | 2019-01-24T15:09:07 | 168,482,514 | 1 | 1 | null | 2019-01-31T07:30:58 | 2019-01-31T07:30:58 | null |
UTF-8
|
Python
| false | false | 636 |
py
|
# -*- coding: utf-8 -*-
__author__ = 'liudong'
__date__ = '2018/10/30 2:23 PM'
from sklearn.feature_extraction import DictVectorizer
def dictvec():
"""
字典数据特征的抽取
:return:
"""
measurements = [{'city': 'Beijing', 'temperature': 33.},
{'city': 'London', 'temperature': 12.},
{'city': 'San Fransisco', 'temperature': 18.}]
dict = DictVectorizer(sparse=False)
data = dict.fit_transform(measurements)
print(dict.get_feature_names())
print(dict.inverse_transform(data))
print(data)
return None
if __name__ == "__main__":
dictvec()
|
[
"[email protected]"
] | |
6a4c16868431e1e23eb5da001f0272c6e45ae97e
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/7ECZC8CBEhy5QkvN3_15.py
|
b7cee2eac0f62400c8ad19d3b56c9c8b2daff2e8
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 151 |
py
|
def how_many_walls(n, w, h):
sum_ = 0
count = 0
wallSquare = w * h
while sum_ <= n:
sum_ += wallSquare
count += 1
return count - 1
|
[
"[email protected]"
] | |
15f2e0a46eaa4e6963b4aac82d4da11490982d89
|
92e2a92512c98de0c73748bb8fd344b6a0bbfae6
|
/provision/admin.py
|
4248e502dbea898429c654fb586c445079ed06a5
|
[] |
no_license
|
ashishthedev/elevation
|
7a5834e8885224a71de640b7a82591f5d8b68273
|
cf66b2ffd04b6a4baf2d715ed72e8fc3b84c8104
|
refs/heads/master
| 2021-01-10T14:03:09.490736 | 2020-02-19T03:37:01 | 2020-02-19T03:37:01 | 51,630,406 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 155 |
py
|
from django.contrib import admin
from provision.models import Provisioner
@admin.register(Provisioner)
class ProvisionersAdmin(admin.ModelAdmin):
pass
|
[
"[email protected]"
] | |
612d839588a33be1fa3c6933a0fd9b694f8ee6c8
|
b484f2515f8dd4c9e1d1451aba82d0225693e897
|
/nlps/app.py
|
d74b5efd3df57214e14b673125d3516de794e8c4
|
[] |
no_license
|
titan2351/nlps
|
e521f14e5566ffdab74b259f028025e121159641
|
5d96db5754248317eb0db951821a3d07356537cb
|
refs/heads/main
| 2023-01-28T08:42:45.126902 | 2020-11-23T14:29:55 | 2020-11-23T14:29:55 | 315,046,874 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,606 |
py
|
from flask import Flask, render_template, request, url_for
from flask_wtf import FlaskForm
import os
import pandas as pd
from wtforms.fields import SelectField
from flask_wtf.file import FileField, FileRequired, FileAllowed
from flask_uploads import configure_uploads, IMAGES, UploadSet, DOCUMENTS
from werkzeug.utils import secure_filename, redirect
from src.sentiment import get_sentiment
from flask import send_file
app = Flask(__name__, static_folder="../static/")
app.config['SECRET_KEY'] = 'thisisasecret'
app.config['UPLOADED_DATAFILES_DEST'] = 'uploads/documents'
upset_xlsx = UploadSet('datafiles', DOCUMENTS)
configure_uploads(app, upset_xlsx)
class MyForm(FlaskForm):
xls = FileField("choose xls", [FileRequired(),
FileAllowed(['xls', 'xlsx', 'csv'], 'excel,csv')])
class PostUploadForm(FlaskForm):
choose_col = SelectField(
u'Column available in uploaded file'
)
@app.route('/sentiment', methods=['GET', 'POST'])
def sentiment():
form = MyForm()
if form.validate_on_submit():
filename = upset_xlsx.save(form.xls.data)
print(filename)
return redirect(url_for('transform', fname=filename))
return render_template('sentiment_upload.html', form=form, title="Upload xlsx")
@app.route('/transform/<fname>', methods=['GET', 'POST'])
def transform(fname):
form = PostUploadForm()
filepath = os.path.join(app.config['UPLOADED_DATAFILES_DEST'], fname)
df = pd.read_excel(filepath)
print(df.shape)
ch = [(col, col) for col in list(df.columns)]
form.choose_col.choices = ch
if form.validate_on_submit():
selected_col = request.form.get('choose_col')
# xl_file = request.files['file']
print(selected_col)
if selected_col in list(df.columns):
df_processed = get_sentiment(df, selected_col)
df_processed.to_excel(os.path.join(app.config['UPLOADED_DATAFILES_DEST'], "_proc.xlsx"))
return send_file(os.path.join(app.config['UPLOADED_DATAFILES_DEST'], "_proc.xlsx"), as_attachment=True)
return render_template('sentiment_transformed.html', form=form, title="Choose col")
@app.route('/')
@app.route('/home')
def home():
form = MyForm()
return render_template('home.html', form=form, title="Home")
@app.route('/about')
def about():
form = MyForm()
return render_template('about.html', form=form, title="About")
@app.route('/models')
def models():
form = MyForm()
return render_template('models.html', form=form, title="Models")
if __name__ == '__main__':
app.run(debug=True)
|
[
"[email protected]"
] | |
69a193b7fc5036acfd0ec377bc03b370c4224796
|
3cabfd6ad2d1477b579dadb959d74931daf67c3c
|
/Basic/41_integer_to_binary.py
|
924e2f5e671f4f54537928a58859dc1dae914d85
|
[] |
no_license
|
SunnyRaj94/Basic-Python-And-Data-Structures
|
1a7100d91d3bbd82157feb7bcbd105c8bc7fd765
|
726c044773a0fe7146356c0bee28805eea6d15e2
|
refs/heads/master
| 2020-11-25T07:27:47.441078 | 2019-12-21T07:23:39 | 2019-12-21T07:23:39 | 228,556,873 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 364 |
py
|
"""
Created on 12/12/2019
@author: Sunny Raj
"""
"""
Problem Statement:
Write a Python program to convert an integer to binary keep leading zeros
"""
# asking for integer input from user
integer = int(input("enter integer value "))
print(format(integer, '08b')) #convering to a 8bit binary format
print(format(integer, '010b'))#converting to a 10bit binary format
|
[
"[email protected]"
] | |
c2c5cbfe74a3fa9a9fd7c1cbe877ca5cb4954f87
|
58c64ffecaf6c8b5998c703786405ca8fefcb948
|
/ValidateInput.py
|
25dd3e5ac8466af8d42bb06a53fb84b5abf614fb
|
[] |
no_license
|
JannickStaes/LearningPython
|
bd10a7dd5f4697c8ecbc6da1bbf901fde9c68248
|
3f78759c72320170ab0189cfbbaff35b385c381d
|
refs/heads/master
| 2022-02-26T02:29:52.399371 | 2022-02-13T20:41:15 | 2022-02-13T20:41:15 | 220,836,103 | 0 | 0 | null | 2019-11-10T19:08:08 | 2019-11-10T19:02:09 |
Python
|
UTF-8
|
Python
| false | false | 344 |
py
|
while True:
print('Enter your age.')
age = input()
if age.isdecimal():
break
print('Please enter a number for your age.')
while True:
print('Select a new password (letters and numbers only):')
password = input()
if password.isalnum():
break
print('Password can only have letters and numbers.')
|
[
"[email protected]"
] | |
677d22f42d470e7e6fab11f89b82637deaaa0fb6
|
be80a2468706ab99c838fa85555c75db8f38bdeb
|
/app/reward/migrations/0002_auto_20180822_0903.py
|
2e25721da289ed95493031d61d3ce8c3cf1f9c9a
|
[] |
no_license
|
kimdohwan/Wadiz
|
5468d218ba069387deabf83376b42a4f69360881
|
91f85f09a7c9a59864b69990127911a112d4bdbd
|
refs/heads/master
| 2021-06-24T06:41:04.111305 | 2019-07-03T12:51:18 | 2019-07-03T12:51:18 | 143,955,968 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,105 |
py
|
# Generated by Django 2.1 on 2018-08-22 00:03
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reward', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FundingOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=20)),
('phone_number', models.CharField(blank=True, max_length=11, validators=[django.core.validators.RegexValidator(message='Phone number must be 11 numbers', regex='\\d{11}')])),
('address1', models.CharField(max_length=30)),
('address2', models.CharField(max_length=30)),
('comment', models.TextField()),
('requested_at', models.DateTimeField(auto_now_add=True)),
('cancel_at', models.DateTimeField(null=True)),
],
),
migrations.RemoveField(
model_name='funding',
name='address1',
),
migrations.RemoveField(
model_name='funding',
name='address2',
),
migrations.RemoveField(
model_name='funding',
name='cancel_at',
),
migrations.RemoveField(
model_name='funding',
name='comment',
),
migrations.RemoveField(
model_name='funding',
name='phone_number',
),
migrations.RemoveField(
model_name='funding',
name='requested_at',
),
migrations.RemoveField(
model_name='funding',
name='username',
),
migrations.AddField(
model_name='funding',
name='order',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='order', to='reward.FundingOrder'),
preserve_default=False,
),
]
|
[
"[email protected]"
] | |
3a03ed2812c020cfd50b5bb2bfe6e9223a67b34c
|
418ed1e9c0d28fb17781689b7499d015431fd1ae
|
/BtagHLT/TTbarSelection/python/ttbarselection_cfi.py
|
a4ab2ba54b77ed31ee40d7adbbf0a57aef55aab5
|
[] |
no_license
|
tropiano/usercode
|
a9f1362dea803dc8c746fd40826af6a405be02b2
|
92b3bb2d346d762f034c8acf0b11693f21723788
|
refs/heads/master
| 2020-06-05T22:32:11.617468 | 2012-12-25T12:00:06 | 2012-12-25T12:00:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,489 |
py
|
import FWCore.ParameterSet.Config as cms
ttbarselection = cms.EDAnalyzer('TTbarSelection',
vertexSrc = cms.untracked.InputTag("offlinePrimaryVerticesWithBS"),
electronSrc= cms.untracked.InputTag("selectedElectrons"),
muonSrc= cms.untracked.InputTag("selectedMuons"),
triggerSrc= cms.untracked.InputTag("TriggerEvent"),
triggerName= cms.untracked.string("HLT_BTagIP_Jet50U"),
jetSrc= cms.untracked.InputTag("selectedJets"),
metSrc= cms.untracked.InputTag("patMETs"),
ElectronVeto_PtCut = cms.double(0),
ElectronVeto_EtaCut = cms.double(99999),
RelIso = cms.double(0.2),
MuonVeto_PtCut = cms.double(0),
MuonVeto_EtaCut = cms.double(99999),
Jets_PtCut = cms.double(30),
Jets_EtaCut = cms.double(2.4),
Jets_EmFracCut = cms.double(0.01),
MET_Cut = cms.double(30),
BtagDiscrCut1 = cms.double(4.),
BtagDiscrCut2 = cms.double(4.),
)
|
[
""
] | |
9c4eba45621b58b6f705c1771ddf11d62197ef10
|
290b9d2e1b8a84cebc522ea9a867edf211b60973
|
/DoItJumpTo01/02.py
|
bea5cedffe4ae6f645f0cfa4d3274f216821486a
|
[] |
no_license
|
EllieHan93/Python
|
d2dcedf8b8e3f4ada0268f9f761bcedbc8391314
|
ad9e88617a510e0b4fd6ddb7db74402e41bbb4cc
|
refs/heads/master
| 2020-07-22T20:53:55.091578 | 2019-12-15T11:44:21 | 2019-12-15T11:44:21 | 207,324,663 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 57 |
py
|
#02.py
print("=" *50)
print("My Program")
print("=" *50)
|
[
"[email protected]"
] | |
b8dfe67598216c766a760dde33a81a620149a763
|
5462ef02d1a50c327b9f5196523c616dcfc772a9
|
/src/Emo/settings.py
|
8d0715f49e4c64294596d0db70e02bc9b92a7286
|
[] |
no_license
|
shagun11/django-project-template
|
0c4d40ff18782abe2590154a3896988bc1ee327f
|
78968b8e5415d2878682b20da4a9ed0e83ba5b5e
|
refs/heads/master
| 2021-01-14T14:37:47.214397 | 2015-01-28T08:04:17 | 2015-01-28T08:04:17 | 29,926,082 | 0 | 0 | null | 2015-01-27T17:39:46 | 2015-01-27T17:39:46 | null |
UTF-8
|
Python
| false | false | 5,416 |
py
|
# Django settings for Emo project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'z)6m6z5yv*rpp@6$1c9*@=sxf%!t3(#!n-3v%yn0)lns3t%_0('
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'Emo.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'Emo.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[
"[email protected]"
] | |
1e30a64ef30c526d7e94b66f205c369d97dd8da2
|
fa04309288a0f8b2daae2fd73c8224a1c0ad4d95
|
/eventkit_cloud/tasks/tests/test_task_factory.py
|
b02b4477e9ad630dbbdb95b91ae28bb1c39b5c47
|
[] |
no_license
|
jj0hns0n/eventkit-cloud
|
7bb828c57f29887621e47fe7ce0baa14071ef39e
|
2f749090baf796b507e79251a4c4b30cb0b4e126
|
refs/heads/master
| 2021-01-01T19:45:32.464729 | 2017-07-24T19:01:24 | 2017-07-24T19:01:24 | 98,675,805 | 0 | 0 | null | 2017-07-28T18:16:34 | 2017-07-28T18:16:34 | null |
UTF-8
|
Python
| false | false | 7,545 |
py
|
# -*- coding: utf-8 -*-
import logging
import os
import uuid
from django.contrib.auth.models import Group, User
from django.contrib.gis.geos import GEOSGeometry, Polygon
from django.db import DatabaseError
from django.test import TestCase
from eventkit_cloud.jobs.models import Job, Region, ProviderTask, ExportProvider, License, UserLicense
from eventkit_cloud.tasks.models import ExportRun
from eventkit_cloud.tasks.task_factory import (TaskFactory, create_run, create_finalize_run_task_collection,
get_invalid_licenses)
from mock import patch, Mock, MagicMock
logger = logging.getLogger(__name__)
class TestExportTaskFactory(TestCase):
"""
Test cases for the TaskFactory.
"""
fixtures = ('insert_provider_types.json', 'osm_provider.json',)
def setUp(self,):
self.path = os.path.dirname(os.path.realpath(__file__))
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='[email protected]', password='demo')
bbox = Polygon.from_bbox((-10.85, 6.25, -10.62, 6.40))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob', description='Test description', user=self.user,
the_geom=the_geom)
provider = ExportProvider.objects.get(slug='osm')
self.license = License.objects.create(slug='odbl-test', name='test_osm_license')
provider.license = self.license
provider.save()
UserLicense.objects.create(license=self.license, user=self.user)
provider_task = ProviderTask.objects.create(provider=provider)
self.job.provider_tasks.add(provider_task)
self.region = Region.objects.get(name='Africa')
self.job.region = self.region
self.uid = str(provider_task.uid)
self.job.save()
def test_create_run_success(self):
run_uid = create_run(job_uid=self.job.uid)
self.assertIsNotNone(run_uid)
self.assertIsNotNone(ExportRun.objects.get(uid=run_uid))
@patch('eventkit_cloud.tasks.task_factory.ExportRun')
def test_create_run_failure(self, ExportRun):
ExportRun.objects.create.side_effect = DatabaseError('FAIL')
with self.assertRaises(DatabaseError):
run_uid = create_run(job_uid=self.job.uid)
self.assertIsNone(run_uid)
@patch('eventkit_cloud.tasks.task_factory.get_invalid_licenses')
@patch('eventkit_cloud.tasks.task_factory.finalize_export_provider_task')
@patch('eventkit_cloud.tasks.task_factory.create_task')
@patch('eventkit_cloud.tasks.task_factory.chain')
def test_task_factory(self, task_factory_chain, create_task,
finalize_task, mock_invalid_licenses):
mock_invalid_licenses.return_value = []
run_uid = create_run(job_uid=self.job.uid)
self.assertIsNotNone(run_uid)
self.assertIsNotNone(ExportRun.objects.get(uid=run_uid))
worker = "some_worker"
provider_uuid = uuid.uuid4()
task_runner = MagicMock()
task = Mock()
task_runner().run_task.return_value = (provider_uuid, task)
create_task.return_value = task
task_factory = TaskFactory()
task_factory.type_task_map = {'osm-generic': task_runner, 'osm': task_runner}
task_factory.parse_tasks(run_uid=run_uid, worker=worker)
task_factory_chain.assert_called()
create_task.assert_called()
finalize_task.s.assert_called()
# Test that run is prevented and deleted if the user has not agreed to the licenses.
mock_invalid_licenses.return_value = ['invalid-licenses']
with self.assertRaises(Exception):
task_factory.parse_tasks(run_uid=run_uid, worker=worker)
run = ExportRun.objects.filter(uid=run_uid).first()
self.assertIsNone(run)
def test_get_invalid_licenses(self):
# The license should not be returned if the user has agreed to it.
expected_invalid_licenses = []
invalid_licenses = get_invalid_licenses(self.job)
self.assertEquals(invalid_licenses, expected_invalid_licenses)
# A license should be returned if the user has not agreed to it.
UserLicense.objects.get(license=self.license, user=self.user).delete()
expected_invalid_licenses = [self.license.name]
invalid_licenses = get_invalid_licenses(self.job)
self.assertEquals(invalid_licenses, expected_invalid_licenses)
UserLicense.objects.create(license=self.license, user=self.user)
class CreateFinalizeRunTaskCollectionTests(TestCase):
@patch('eventkit_cloud.tasks.task_factory.example_finalize_run_hook_task')
@patch('eventkit_cloud.tasks.task_factory.prepare_for_export_zip_task')
@patch('eventkit_cloud.tasks.task_factory.zip_file_task')
@patch('eventkit_cloud.tasks.task_factory.finalize_run_task_as_errback')
@patch('eventkit_cloud.tasks.task_factory.finalize_run_task')
@patch('eventkit_cloud.tasks.task_factory.chain')
def test_create_finalize_run_task_collection(
self, chain, finalize_run_task, finalize_run_task_as_errback, zip_file_task, prepare_for_export_zip_task, example_finalize_run_hook_task):
""" Checks that all of the expected tasks were prepared and combined in a chain for return.
"""
chain.return_value = 'When not mocked, this would be a celery chain'
# None of these need correspond to real things, they're just to check the inner calls.
run_uid = 1
run_dir = 'test_dir'
worker = 'test_worker'
expected_task_settings = {
'interval': 1, 'max_retries': 10, 'queue': worker, 'routing_key': worker, 'priority': 70}
# This should return a chain of tasks ending in the finalize_run_task, plus a task sig for just the
# finalize_run_task.
finalize_chain, errback = create_finalize_run_task_collection(run_uid=run_uid, run_dir=run_dir, worker=worker)
example_finalize_run_hook_task.si.assert_called_once_with([], run_uid=run_uid)
example_finalize_run_hook_task.si.return_value.set.assert_called_once_with(**expected_task_settings)
prepare_for_export_zip_task.s.assert_called_once_with(run_uid=run_uid)
prepare_for_export_zip_task.s.return_value.set.assert_called_once_with(**expected_task_settings)
zip_file_task.s.assert_called_once_with(run_uid=run_uid)
zip_file_task.s.return_value.set.assert_called_once_with(**expected_task_settings)
finalize_run_task.si.assert_called_once_with(run_uid=run_uid, stage_dir=run_dir)
finalize_run_task.si.return_value.set.assert_called_once_with(**expected_task_settings)
self.assertEqual(finalize_chain, 'When not mocked, this would be a celery chain')
self.assertEqual(errback, finalize_run_task_as_errback.si())
self.assertEqual(chain.call_count, 1)
# Grab the args for the first (only) call
chain_inputs = chain.call_args[0]
# The result of setting the args & settings for each task,
# which unmocked would be a task signature, should be passed to celery.chain
expected_chain_inputs = (
example_finalize_run_hook_task.si.return_value.set.return_value,
prepare_for_export_zip_task.s.return_value.set.return_value,
zip_file_task.s.return_value.set.return_value,
finalize_run_task.si.return_value.set.return_value,
)
self.assertEqual(chain_inputs, expected_chain_inputs)
|
[
"[email protected]"
] | |
27e978b4cc4eba7674939dc5cb9dd6908983204f
|
2ccfe89f6a647dd5c121fcd9ba044c59bc6f5319
|
/gamma/app/settings.py
|
2c596b26ed129a32d51abd0003b8de006d3a3d55
|
[] |
no_license
|
another-it-team/no-more-math
|
ec30409db053e5c04b90ad53923d59654eff5787
|
3d3c7f946e9dd3a44ab892b2f96ac095025dafd8
|
refs/heads/master
| 2020-03-22T10:02:20.014604 | 2018-07-06T16:59:10 | 2018-07-06T16:59:10 | 139,876,832 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 16 |
py
|
../ settings.py
|
[
"[email protected]"
] | |
f6e4d32e7bb52dd5732366ab2f7239dcf9a91069
|
c8a86573c16934149dfc768f44fec1b8152b4e97
|
/interface/jgj_login_test.py
|
d639844963a12ade19b1a63d18599e42963a9ff9
|
[] |
no_license
|
slp520/AutoTest
|
6eb09279168553955a766bfae5ca7bdfdf86ab40
|
fd3203b129f7541ea78faa5a216fe3bf06fdff17
|
refs/heads/master
| 2021-12-14T03:31:29.957372 | 2021-11-12T02:13:01 | 2021-11-12T02:13:01 | 161,150,793 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,274 |
py
|
import requests
import unittest
import json
#登陆接口测试
class GetLoginTest(unittest.TestCase):
def setUp(self):
self.base_url = 'http://10.12.21.115:61021/jgj/api/user/login'
self.smg_url = 'http://10.12.21.115:61021/jgj/api/user/smgsend'
self.case_name = ''
def test_login(self):
'''登录正常流程校验'''
body_login ={"captcha":"1234",
"mobile":"13058019302",
"password":"BxOqKRgGgnfbQp6kLhIdm8jlbMIT8xcK/WpFx0CzJIeSQHyhjrGjSGf4FmMlZ2pdJn9HgirwlClcKf1aHjPSAd9SkSe9Nztkk10L9G6aUDL84e1zKMjXRoeF3g3inkNtBZfkf8YYFUDdTydDulKNpIQRpZuHu83NnG7isr57tkBwg9/fVPIG6P7Irf/35TcH9/s2NeV7hyBCWuDn4Zt6ueaSVjdfJ8u6iklkaqsNpwLDizKPoqoNnaDc/MWGj4zlnqdJJpAxQroRZ8+1AMbsY6bpQTCyI7gQNoq4BCOfz/owRZNEUaaRi/cSMcMUKiJoDWUl/MBnFKx1QSxjGbsQLQ=="}
body_smg = {
"mobile":"13058019302",
"type":"1"
}
header = {}
r_smg = requests.post(self.smg_url,data=body_smg,headers=header)
r = requests.post(self.base_url,data=body_login,headers=header)
result = r.json()
self.assertEqual(result['code'],'200')
self.assertEqual(result['msg'],"一切正确,没有错误发生")
def test_login_nomobile(self):
'''登录手机号为空流程校验'''
body_login = {"captcha": "1234",
"mobile": " ",
"password": "BxOqKRgGgnfbQp6kLhIdm8jlbMIT8xcK/WpFx0CzJIeSQHyhjrGjSGf4FmMlZ2pdJn9HgirwlClcKf1aHjPSAd9SkSe9Nztkk10L9G6aUDL84e1zKMjXRoeF3g3inkNtBZfkf8YYFUDdTydDulKNpIQRpZuHu83NnG7isr57tkBwg9/fVPIG6P7Irf/35TcH9/s2NeV7hyBCWuDn4Zt6ueaSVjdfJ8u6iklkaqsNpwLDizKPoqoNnaDc/MWGj4zlnqdJJpAxQroRZ8+1AMbsY6bpQTCyI7gQNoq4BCOfz/owRZNEUaaRi/cSMcMUKiJoDWUl/MBnFKx1QSxjGbsQLQ=="
}
body_smg = {
"mobile": "13058019302",
"type": "1"
}
header = {}
r_smg = requests.post (self.smg_url, data=body_smg, headers=header)
r = requests.post (self.base_url, data=body_login, headers=header)
result = r.json()
print (result)
self.assertEqual (result['code'], '40301')
self.assertEqual (result['msg'], "参数缺失")
def test_login_wrong_password(self):
'''登录密码错误流程校验'''
body_login = {"captcha": "1234",
"mobile": "13058019302",
"password": "Iz2vdNMIDMJHQVEYA6l+ULzwOwN/SVJXVLw+IMlZf7BeAHxZ5nSmMgC7dHPPxiksKq7qzzmoObEwtBFWeJrbH+TY7OSEEIuuFdB57NRFyvDTjSvufFHfOacqlMwIfuC5PYbqiyZmM9EiwDR+n8HF2shoFI0V0P+uiy+Taf0CD+qCcyFYQE7z49zOTYVjOm9kUfW88HNXlOBzlpsHTSJG1A8jOXCwglNGP1ZliXtiGd5tmB4W+E1HA4xU+xuoUO5hEXyqkM/kgXrUWHRDl7V/UROVsXo4aITpHon/ts5tlxdIqDytmIBgEV7dhCIOtpyux+uzCzohTV1p1vXMatOYhw=="
}
body_smg = {
"mobile": "13058019302",
"type": "1"
}
header = {}
r_smg = requests.post (self.smg_url, data=body_smg, headers=header)
r = requests.post (self.base_url, data=body_login, headers=header)
result = r.json()
print (result)
self.assertEqual (result['code'], '40352')
self.assertEqual (result['msg'], "用户名或密码错误")
def test_login_nocaptcha(self):
'''登录验证码为空流程校验'''
body_login = {"captcha": " ",
"mobile": "13058019302",
"password": "BxOqKRgGgnfbQp6kLhIdm8jlbMIT8xcK/WpFx0CzJIeSQHyhjrGjSGf4FmMlZ2pdJn9HgirwlClcKf1aHjPSAd9SkSe9Nztkk10L9G6aUDL84e1zKMjXRoeF3g3inkNtBZfkf8YYFUDdTydDulKNpIQRpZuHu83NnG7isr57tkBwg9/fVPIG6P7Irf/35TcH9/s2NeV7hyBCWuDn4Zt6ueaSVjdfJ8u6iklkaqsNpwLDizKPoqoNnaDc/MWGj4zlnqdJJpAxQroRZ8+1AMbsY6bpQTCyI7gQNoq4BCOfz/owRZNEUaaRi/cSMcMUKiJoDWUl/MBnFKx1QSxjGbsQLQ=="
}
body_smg = {
"mobile": "13058019302",
"type": "1"
}
header = {}
r_smg = requests.post (self.smg_url, data=body_smg, headers=header)
r = requests.post (self.base_url, data=body_login, headers=header)
result = r.json()
print (result)
self.assertEqual (result['code'], 40355)
self.assertEqual (result['msg'], '验证码错误')
def test_login_wrong_captcha(self):
'''登录验证码错误流程校验'''
body_login = {"captcha": "122122",
"mobile": "13058019302",
"password": "BxOqKRgGgnfbQp6kLhIdm8jlbMIT8xcK/WpFx0CzJIeSQHyhjrGjSGf4FmMlZ2pdJn9HgirwlClcKf1aHjPSAd9SkSe9Nztkk10L9G6aUDL84e1zKMjXRoeF3g3inkNtBZfkf8YYFUDdTydDulKNpIQRpZuHu83NnG7isr57tkBwg9/fVPIG6P7Irf/35TcH9/s2NeV7hyBCWuDn4Zt6ueaSVjdfJ8u6iklkaqsNpwLDizKPoqoNnaDc/MWGj4zlnqdJJpAxQroRZ8+1AMbsY6bpQTCyI7gQNoq4BCOfz/owRZNEUaaRi/cSMcMUKiJoDWUl/MBnFKx1QSxjGbsQLQ=="
}
body_smg = {
"mobile": "13058019302",
"type": "1"
}
header = {}
r_smg = requests.post (self.smg_url, data=body_smg, headers=header)
r = requests.post (self.base_url, data=body_login, headers=header)
result = r.json()
print (result)
self.assertEqual (result['code'], 40355)
self.assertEqual (result['msg'], "验证码错误")
if __name__ == '__main__':
print(11111111)
|
[
"[email protected]"
] | |
b6dd4677c551c223c34be40dc606b4e5ec985489
|
9e236f93f5f4d55af0e98dac56866c976899ee1d
|
/.c9/metadata/workspace/Unused/NotUseful/connection.py
|
0fabf1e2eb77adfd3304b6b2219da1e8ad9ac73a
|
[] |
no_license
|
willyao99/botterthanyouAI
|
965f13dea2437aa4d65301701415ba8977b5640e
|
95792d842c654b6593fe7a895a6ba1266aa01cb7
|
refs/heads/master
| 2021-05-02T06:34:55.721476 | 2017-12-07T14:47:13 | 2017-12-07T14:47:13 | 120,860,006 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 442 |
py
|
{"filter":false,"title":"connection.py","tooltip":"/Unused/NotUseful/connection.py","undoManager":{"mark":-1,"position":-1,"stack":[]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":12,"column":40},"end":{"row":12,"column":40},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1512620019447,"hash":"68b42dc99866235c2900529b40eeb2f73a1f6bda"}
|
[
"[email protected]"
] | |
ac9322695c8338f7c5b6352dc885ec393d8b1b9a
|
ca66a4283c5137f835377c3ed9a37128fcaed037
|
/Lib/site-packages/pandas/tests/indexes/test_base.py
|
48214ef4e92a8217fc8c6c342ae4de28f448658f
|
[] |
no_license
|
NamithaKonda09/majorProject
|
f377f7a77d40939a659a3e59f5f1b771d88889ad
|
4eff4ff18fa828c6278b00244ff2e66522e0cd51
|
refs/heads/master
| 2023-06-04T20:25:38.450271 | 2021-06-24T19:03:46 | 2021-06-24T19:03:46 | 370,240,034 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 104,196 |
py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from datetime import datetime, timedelta
import math
import operator
import sys
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.compat import (
PY3, PY35, PY36, StringIO, lrange, lzip, range, text_type, u, zip)
from pandas.compat.numpy import np_datetime64_compat
from pandas.core.dtypes.common import is_unsigned_integer_dtype
from pandas.core.dtypes.generic import ABCIndex
import pandas as pd
from pandas import (
CategoricalIndex, DataFrame, DatetimeIndex, Float64Index, Int64Index,
PeriodIndex, RangeIndex, Series, TimedeltaIndex, UInt64Index, date_range,
isna, period_range)
import pandas.core.config as cf
from pandas.core.index import _get_combined_index, ensure_index_from_sequences
from pandas.core.indexes.api import Index, MultiIndex
from pandas.core.sorting import safe_sort
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
class TestIndex(Base):
_holder = Index
def setup_method(self, method):
self.indices = dict(unicodeIndex=tm.makeUnicodeIndex(100),
strIndex=tm.makeStringIndex(100),
dateIndex=tm.makeDateIndex(100),
periodIndex=tm.makePeriodIndex(100),
tdIndex=tm.makeTimedeltaIndex(100),
intIndex=tm.makeIntIndex(100),
uintIndex=tm.makeUIntIndex(100),
rangeIndex=tm.makeRangeIndex(100),
floatIndex=tm.makeFloatIndex(100),
boolIndex=Index([True, False]),
catIndex=tm.makeCategoricalIndex(100),
empty=Index([]),
tuples=MultiIndex.from_tuples(lzip(
['foo', 'bar', 'baz'], [1, 2, 3])),
repeats=Index([0, 0, 1, 1, 2, 2]))
self.setup_indices()
def create_index(self):
return Index(list('abcde'))
def generate_index_types(self, skip_index_keys=[]):
"""
Return a generator of the various index types, leaving
out the ones with a key in skip_index_keys
"""
for key, index in self.indices.items():
if key not in skip_index_keys:
yield key, index
def test_can_hold_identifiers(self):
index = self.create_index()
key = index[0]
assert index._can_hold_identifiers_and_holds_name(key) is True
def test_new_axis(self):
new_index = self.dateIndex[None, :]
assert new_index.ndim == 2
assert isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
new_copy2 = self.intIndex.copy(dtype=int)
assert new_copy2.dtype.kind == 'i'
@pytest.mark.parametrize("attr", ['strIndex', 'dateIndex'])
def test_constructor_regular(self, attr):
# regular instance creation
index = getattr(self, attr)
tm.assert_contains_all(index, index)
def test_constructor_casting(self):
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
tm.assert_index_equal(self.strIndex, index)
def test_constructor_copy(self):
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
assert isinstance(index, Index)
assert index.name == 'name'
tm.assert_numpy_array_equal(arr, index.values)
arr[0] = "SOMEBIGLONGSTRING"
assert index[0] != "SOMEBIGLONGSTRING"
# what to do here?
# arr = np.array(5.)
# pytest.raises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
pytest.raises(TypeError, Index, 0)
@pytest.mark.parametrize("index_vals", [
[('A', 1), 'B'], ['B', ('A', 1)]])
def test_construction_list_mixed_tuples(self, index_vals):
# see gh-10697: if we are constructing from a mixed list of tuples,
# make sure that we are independent of the sorting order.
index = Index(index_vals)
assert isinstance(index, Index)
assert not isinstance(index, MultiIndex)
@pytest.mark.parametrize('na_value', [None, np.nan])
@pytest.mark.parametrize('vtype', [list, tuple, iter])
def test_construction_list_tuples_nan(self, na_value, vtype):
# GH 18505 : valid tuples containing NaN
values = [(1, 'two'), (3., na_value)]
result = Index(vtype(values))
expected = MultiIndex.from_tuples(values)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cast_as_obj", [True, False])
@pytest.mark.parametrize("index", [
pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern', name='Green Eggs & Ham'), # DTI with tz
pd.date_range('2015-01-01 10:00', freq='D', periods=3), # DTI no tz
pd.timedelta_range('1 days', freq='D', periods=3), # td
pd.period_range('2015-01-01', freq='D', periods=3) # period
])
def test_constructor_from_index_dtlike(self, cast_as_obj, index):
if cast_as_obj:
result = pd.Index(index.astype(object))
else:
result = pd.Index(index)
tm.assert_index_equal(result, index)
if isinstance(index, pd.DatetimeIndex):
assert result.tz == index.tz
if cast_as_obj:
# GH#23524 check that Index(dti, dtype=object) does not
# incorrectly raise ValueError, and that nanoseconds are not
# dropped
index += pd.Timedelta(nanoseconds=50)
result = pd.Index(index, dtype=object)
assert result.dtype == np.object_
assert list(result) == list(index)
@pytest.mark.parametrize("index,has_tz", [
(pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern'), True), # datetimetz
(pd.timedelta_range('1 days', freq='D', periods=3), False), # td
(pd.period_range('2015-01-01', freq='D', periods=3), False) # period
])
def test_constructor_from_series_dtlike(self, index, has_tz):
result = pd.Index(pd.Series(index))
tm.assert_index_equal(result, index)
if has_tz:
assert result.tz == index.tz
@pytest.mark.parametrize("klass", [Index, DatetimeIndex])
def test_constructor_from_series(self, klass):
expected = DatetimeIndex([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
s = Series([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
result = klass(s)
tm.assert_index_equal(result, expected)
def test_constructor_from_series_freq(self):
# GH 6273
# create from a series, passing a freq
dts = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
expected = DatetimeIndex(dts, freq='MS')
s = Series(pd.to_datetime(dts))
result = DatetimeIndex(s, freq='MS')
tm.assert_index_equal(result, expected)
def test_constructor_from_frame_series_freq(self):
# GH 6273
# create from a series, passing a freq
dts = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
expected = DatetimeIndex(dts, freq='MS')
df = pd.DataFrame(np.random.rand(5, 3))
df['date'] = dts
result = DatetimeIndex(df['date'], freq='MS')
assert df['date'].dtype == object
expected.name = 'date'
tm.assert_index_equal(result, expected)
expected = pd.Series(dts, name='date')
tm.assert_series_equal(df['date'], expected)
# GH 6274
# infer freq of same
freq = pd.infer_freq(df['date'])
assert freq == 'MS'
@pytest.mark.parametrize("array", [
np.arange(5), np.array(['a', 'b', 'c']), date_range(
'2000-01-01', periods=3).values
])
def test_constructor_ndarray_like(self, array):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('dtype', [
int, 'int64', 'int32', 'int16', 'int8', 'uint64', 'uint32',
'uint16', 'uint8'])
def test_constructor_int_dtype_float(self, dtype):
# GH 18400
if is_unsigned_integer_dtype(dtype):
index_type = UInt64Index
else:
index_type = Int64Index
expected = index_type([0, 1, 2, 3])
result = Index([0., 1., 2., 3.], dtype=dtype)
tm.assert_index_equal(result, expected)
def test_constructor_int_dtype_nan(self):
# see gh-15187
data = [np.nan]
expected = Float64Index(data)
result = Index(data, dtype='float')
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dtype", ['int64', 'uint64'])
def test_constructor_int_dtype_nan_raises(self, dtype):
# see gh-15187
data = [np.nan]
msg = "cannot convert"
with pytest.raises(ValueError, match=msg):
Index(data, dtype=dtype)
def test_constructor_no_pandas_array(self):
ser = pd.Series([1, 2, 3])
result = pd.Index(ser.array)
expected = pd.Index([1, 2, 3])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass,dtype,na_val", [
(pd.Float64Index, np.float64, np.nan),
(pd.DatetimeIndex, 'datetime64[ns]', pd.NaT)
])
def test_index_ctor_infer_nan_nat(self, klass, dtype, na_val):
# GH 13467
na_list = [na_val, na_val]
expected = klass(na_list)
assert expected.dtype == dtype
result = Index(na_list)
tm.assert_index_equal(result, expected)
result = Index(np.array(na_list))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("pos", [0, 1])
@pytest.mark.parametrize("klass,dtype,ctor", [
(pd.DatetimeIndex, 'datetime64[ns]', np.datetime64('nat')),
(pd.TimedeltaIndex, 'timedelta64[ns]', np.timedelta64('nat'))
])
def test_index_ctor_infer_nat_dt_like(self, pos, klass, dtype, ctor,
nulls_fixture):
expected = klass([pd.NaT, pd.NaT])
assert expected.dtype == dtype
data = [ctor]
data.insert(pos, nulls_fixture)
result = Index(data)
tm.assert_index_equal(result, expected)
result = Index(np.array(data, dtype=object))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("swap_objs", [True, False])
def test_index_ctor_nat_result(self, swap_objs):
# mixed np.datetime64/timedelta64 nat results in object
data = [np.datetime64('nat'), np.timedelta64('nat')]
if swap_objs:
data = data[::-1]
expected = pd.Index(data, dtype=object)
tm.assert_index_equal(Index(data), expected)
tm.assert_index_equal(Index(np.array(data, dtype=object)), expected)
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
tm.assert_index_equal(rs, xp)
assert isinstance(rs, PeriodIndex)
@pytest.mark.parametrize("vals,dtype", [
([1, 2, 3, 4, 5], 'int'), ([1.1, np.nan, 2.2, 3.0], 'float'),
(['A', 'B', 'C', np.nan], 'obj')
])
def test_constructor_simple_new(self, vals, dtype):
index = Index(vals, name=dtype)
result = index._simple_new(index.values, dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("vals", [
[1, 2, 3], np.array([1, 2, 3]), np.array([1, 2, 3], dtype=int),
# below should coerce
[1., 2., 3.], np.array([1., 2., 3.], dtype=float)
])
def test_constructor_dtypes_to_int64(self, vals):
index = Index(vals, dtype=int)
assert isinstance(index, Int64Index)
@pytest.mark.parametrize("vals", [
[1, 2, 3], [1., 2., 3.], np.array([1., 2., 3.]),
np.array([1, 2, 3], dtype=int), np.array([1., 2., 3.], dtype=float)
])
def test_constructor_dtypes_to_float64(self, vals):
index = Index(vals, dtype=float)
assert isinstance(index, Float64Index)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
[True, False, True], np.array([True, False, True], dtype=bool)
])
def test_constructor_dtypes_to_object(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=bool)
else:
index = Index(vals)
assert isinstance(index, Index)
assert index.dtype == object
@pytest.mark.parametrize("vals", [
[1, 2, 3], np.array([1, 2, 3], dtype=int),
np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')]),
[datetime(2011, 1, 1), datetime(2011, 1, 2)]
])
def test_constructor_dtypes_to_categorical(self, vals):
index = Index(vals, dtype='category')
assert isinstance(index, CategoricalIndex)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')])),
Index([datetime(2011, 1, 1), datetime(2011, 1, 2)])
])
def test_constructor_dtypes_to_datetime(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=object)
assert isinstance(index, Index)
assert index.dtype == object
else:
index = Index(vals)
assert isinstance(index, DatetimeIndex)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
np.array([np.timedelta64(1, 'D'), np.timedelta64(1, 'D')]),
[timedelta(1), timedelta(1)]
])
def test_constructor_dtypes_to_timedelta(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=object)
assert isinstance(index, Index)
assert index.dtype == object
else:
index = Index(vals)
assert isinstance(index, TimedeltaIndex)
@pytest.mark.parametrize("attr, utc", [
['values', False],
['asi8', True]])
@pytest.mark.parametrize("klass", [pd.Index, pd.DatetimeIndex])
def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, utc,
klass):
# Test constructing with a datetimetz dtype
# .values produces numpy datetimes, so these are considered naive
# .asi8 produces integers, so these are considered epoch timestamps
# ^the above will be true in a later version. Right now we `.view`
# the i8 values as NS_DTYPE, effectively treating them as wall times.
index = pd.date_range('2011-01-01', periods=5)
arg = getattr(index, attr)
index = index.tz_localize(tz_naive_fixture)
dtype = index.dtype
# TODO(GH-24559): Remove the sys.modules and warnings
# not sure what this is from. It's Py2 only.
modules = [sys.modules['pandas.core.indexes.base']]
if (tz_naive_fixture and attr == "asi8" and
str(tz_naive_fixture) not in ('UTC', 'tzutc()')):
ex_warn = FutureWarning
else:
ex_warn = None
# stacklevel is checked elsewhere. We don't do it here since
# Index will have an frame, throwing off the expected.
with tm.assert_produces_warning(ex_warn, check_stacklevel=False,
clear=modules):
result = klass(arg, tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
with tm.assert_produces_warning(ex_warn, check_stacklevel=False):
result = klass(arg, dtype=dtype)
tm.assert_index_equal(result, index)
with tm.assert_produces_warning(ex_warn, check_stacklevel=False):
result = klass(list(arg), tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
with tm.assert_produces_warning(ex_warn, check_stacklevel=False):
result = klass(list(arg), dtype=dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("attr", ['values', 'asi8'])
@pytest.mark.parametrize("klass", [pd.Index, pd.TimedeltaIndex])
def test_constructor_dtypes_timedelta(self, attr, klass):
index = pd.timedelta_range('1 days', periods=5)
dtype = index.dtype
values = getattr(index, attr)
result = klass(values, dtype=dtype)
tm.assert_index_equal(result, index)
result = klass(list(values), dtype=dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("value", [[], iter([]), (x for x in [])])
@pytest.mark.parametrize("klass",
[Index, Float64Index, Int64Index, UInt64Index,
CategoricalIndex, DatetimeIndex, TimedeltaIndex])
def test_constructor_empty(self, value, klass):
empty = klass(value)
assert isinstance(empty, klass)
assert not len(empty)
@pytest.mark.parametrize("empty,klass", [
(PeriodIndex([], freq='B'), PeriodIndex),
(PeriodIndex(iter([]), freq='B'), PeriodIndex),
(PeriodIndex((x for x in []), freq='B'), PeriodIndex),
(RangeIndex(step=1), pd.RangeIndex),
(MultiIndex(levels=[[1, 2], ['blue', 'red']],
codes=[[], []]), MultiIndex)
])
def test_constructor_empty_special(self, empty, klass):
assert isinstance(empty, klass)
assert not len(empty)
def test_constructor_overflow_int64(self):
# see gh-15832
msg = ("The elements provided in the data cannot "
"all be casted to the dtype int64")
with pytest.raises(OverflowError, match=msg):
Index([np.iinfo(np.uint64).max - 1], dtype="int64")
@pytest.mark.xfail(reason="see GH#21311: Index "
"doesn't enforce dtype argument")
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Index(["a", "b", "c"], dtype=float)
def test_view_with_args(self):
restricted = ['unicodeIndex', 'strIndex', 'catIndex', 'boolIndex',
'empty']
for i in restricted:
ind = self.indices[i]
# with arguments
pytest.raises(TypeError, lambda: ind.view('i8'))
# these are ok
for i in list(set(self.indices.keys()) - set(restricted)):
ind = self.indices[i]
# with arguments
ind.view('i8')
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
assert casted.name == 'foobar'
def test_equals_object(self):
# same
assert Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c']))
@pytest.mark.parametrize("comp", [
Index(['a', 'b']), Index(['a', 'b', 'd']), ['a', 'b', 'c']])
def test_not_equals_object(self, comp):
assert not Index(['a', 'b', 'c']).equals(comp)
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
# test 0th element
tm.assert_index_equal(Index(['a', 'b', 'c', 'd']),
result.insert(0, 'a'))
# test Nth element that follows Python list behavior
tm.assert_index_equal(Index(['b', 'c', 'e', 'd']),
result.insert(-1, 'e'))
# test loc +/- neq (0, -1)
tm.assert_index_equal(result.insert(1, 'z'), result.insert(-2, 'z'))
# test empty
null_index = Index([])
tm.assert_index_equal(Index(['a']), null_index.insert(0, 'a'))
def test_insert_missing(self, nulls_fixture):
# GH 22295
# test there is no mangling of NA values
expected = Index(['a', nulls_fixture, 'b', 'c'])
result = Index(list('abc')).insert(1, nulls_fixture)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("pos,expected", [
(0, Index(['b', 'c', 'd'], name='index')),
(-1, Index(['a', 'b', 'c'], name='index'))
])
def test_delete(self, pos, expected):
index = Index(['a', 'b', 'c', 'd'], name='index')
result = index.delete(pos)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
def test_delete_raises(self):
index = Index(['a', 'b', 'c', 'd'], name='index')
with pytest.raises((IndexError, ValueError)):
# either depending on numpy version
index.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
assert i1.identical(i2)
i1 = i1.rename('foo')
assert i1.equals(i2)
assert not i1.identical(i2)
i2 = i2.rename('foo')
assert i1.identical(i2)
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
assert not i3.identical(i4)
def test_is_(self):
ind = Index(range(10))
assert ind.is_(ind)
assert ind.is_(ind.view().view().view().view())
assert not ind.is_(Index(range(10)))
assert not ind.is_(ind.copy())
assert not ind.is_(ind.copy(deep=False))
assert not ind.is_(ind[:])
assert not ind.is_(np.array(range(10)))
# quasi-implementation dependent
assert ind.is_(ind.view())
ind2 = ind.view()
ind2.name = 'bob'
assert ind.is_(ind2)
assert ind2.is_(ind)
# doesn't matter if Indices are *actually* views of underlying data,
assert not ind.is_(Index(ind.values))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
assert not ind1.is_(ind2)
def test_asof(self):
d = self.dateIndex[0]
assert self.dateIndex.asof(d) == d
assert isna(self.dateIndex.asof(d - timedelta(1)))
d = self.dateIndex[-1]
assert self.dateIndex.asof(d + timedelta(1)) == d
d = self.dateIndex[0].to_pydatetime()
assert isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
index = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-02-28')
result = index.asof('2010-02')
assert result == expected
assert not isinstance(result, Index)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
# assert first_value == x['2013-01-01 00:00:00.000000050+0000']
expected_ts = np_datetime64_compat('2013-01-01 00:00:00.000000050+'
'0000', 'ns')
assert first_value == x[Timestamp(expected_ts)]
def test_booleanindex(self):
boolIndex = np.repeat(True, len(self.strIndex)).astype(bool)
boolIndex[5:30:2] = False
subIndex = self.strIndex[boolIndex]
for i, val in enumerate(subIndex):
assert subIndex.get_loc(val) == i
subIndex = self.strIndex[list(boolIndex)]
for i, val in enumerate(subIndex):
assert subIndex.get_loc(val) == i
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
assert i == sl[sl.get_loc(i)]
@pytest.mark.parametrize("attr", [
'strIndex', 'intIndex', 'floatIndex'])
@pytest.mark.parametrize("dtype", [np.int_, np.bool_])
def test_empty_fancy(self, attr, dtype):
empty_arr = np.array([], dtype=dtype)
index = getattr(self, attr)
empty_index = index.__class__([])
assert index[[]].identical(empty_index)
assert index[empty_arr].identical(empty_index)
@pytest.mark.parametrize("attr", [
'strIndex', 'intIndex', 'floatIndex'])
def test_empty_fancy_raises(self, attr):
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
empty_farr = np.array([], dtype=np.float_)
index = getattr(self, attr)
empty_index = index.__class__([])
assert index[[]].identical(empty_index)
# np.ndarray only accepts ndarray of int & bool dtypes, so should Index
pytest.raises(IndexError, index.__getitem__, empty_farr)
@pytest.mark.parametrize("sort", [None, False])
def test_intersection(self, sort):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second, sort=sort)
if sort is None:
tm.assert_index_equal(intersect, second.sort_values())
assert tm.equalContents(intersect, second)
# Corner cases
inter = first.intersection(first, sort=sort)
assert inter is first
@pytest.mark.parametrize("index2,keeps_name", [
(Index([3, 4, 5, 6, 7], name="index"), True), # preserve same name
(Index([3, 4, 5, 6, 7], name="other"), False), # drop diff names
(Index([3, 4, 5, 6, 7]), False)])
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_name_preservation(self, index2, keeps_name, sort):
index1 = Index([1, 2, 3, 4, 5], name='index')
expected = Index([3, 4, 5])
result = index1.intersection(index2, sort)
if keeps_name:
expected.name = 'index'
assert result.name == expected.name
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("first_name,second_name,expected_name", [
('A', 'A', 'A'), ('A', 'B', None), (None, 'B', None)])
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_name_preservation2(self, first_name, second_name,
expected_name, sort):
first = self.strIndex[5:20]
second = self.strIndex[:10]
first.name = first_name
second.name = second_name
intersect = first.intersection(second, sort=sort)
assert intersect.name == expected_name
@pytest.mark.parametrize("index2,keeps_name", [
(Index([4, 7, 6, 5, 3], name='index'), True),
(Index([4, 7, 6, 5, 3], name='other'), False)])
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_monotonic(self, index2, keeps_name, sort):
index1 = Index([5, 3, 2, 4, 1], name='index')
expected = Index([5, 3, 4])
if keeps_name:
expected.name = "index"
result = index1.intersection(index2, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index2,expected_arr", [
(Index(['B', 'D']), ['B']),
(Index(['B', 'D', 'A']), ['A', 'B', 'A'])])
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_non_monotonic_non_unique(self, index2, expected_arr,
sort):
# non-monotonic non-unique
index1 = Index(['A', 'B', 'A', 'C'])
expected = Index(expected_arr, dtype='object')
result = index1.intersection(index2, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_intersect_str_dates(self, sort):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
result = i2.intersection(i1, sort=sort)
assert len(result) == 0
def test_intersect_nosort(self):
result = pd.Index(['c', 'b', 'a']).intersection(['b', 'a'])
expected = pd.Index(['b', 'a'])
tm.assert_index_equal(result, expected)
def test_intersection_equal_sort(self):
idx = pd.Index(['c', 'a', 'b'])
tm.assert_index_equal(idx.intersection(idx, sort=False), idx)
tm.assert_index_equal(idx.intersection(idx, sort=None), idx)
@pytest.mark.xfail(reason="Not implemented")
def test_intersection_equal_sort_true(self):
# TODO decide on True behaviour
idx = pd.Index(['c', 'a', 'b'])
sorted_ = pd.Index(['a', 'b', 'c'])
tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_)
@pytest.mark.parametrize("sort", [None, False])
def test_chained_union(self, sort):
# Chained unions handles names correctly
i1 = Index([1, 2], name='i1')
i2 = Index([5, 6], name='i2')
i3 = Index([3, 4], name='i3')
union = i1.union(i2.union(i3, sort=sort), sort=sort)
expected = i1.union(i2, sort=sort).union(i3, sort=sort)
tm.assert_index_equal(union, expected)
j1 = Index([1, 2], name='j1')
j2 = Index([], name='j2')
j3 = Index([], name='j3')
union = j1.union(j2.union(j3, sort=sort), sort=sort)
expected = j1.union(j2, sort=sort).union(j3, sort=sort)
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_union(self, sort):
# TODO: Replace with fixturesult
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second, sort=sort)
if sort is None:
tm.assert_index_equal(union, everything.sort_values())
assert tm.equalContents(union, everything)
@pytest.mark.parametrize('slice_', [slice(None), slice(0)])
def test_union_sort_other_special(self, slice_):
# https://github.com/pandas-dev/pandas/issues/24959
idx = pd.Index([1, 0, 2])
# default, sort=None
other = idx[slice_]
tm.assert_index_equal(idx.union(other), idx)
tm.assert_index_equal(other.union(idx), idx)
# sort=False
tm.assert_index_equal(idx.union(other, sort=False), idx)
@pytest.mark.xfail(reason="Not implemented")
@pytest.mark.parametrize('slice_', [slice(None), slice(0)])
def test_union_sort_special_true(self, slice_):
# TODO decide on True behaviour
# sort=True
idx = pd.Index([1, 0, 2])
# default, sort=None
other = idx[slice_]
result = idx.union(other, sort=True)
expected = pd.Index([0, 1, 2])
tm.assert_index_equal(result, expected)
def test_union_sort_other_incomparable(self):
# https://github.com/pandas-dev/pandas/issues/24959
idx = pd.Index([1, pd.Timestamp('2000')])
# default (sort=None)
with tm.assert_produces_warning(RuntimeWarning):
result = idx.union(idx[:1])
tm.assert_index_equal(result, idx)
# sort=None
with tm.assert_produces_warning(RuntimeWarning):
result = idx.union(idx[:1], sort=None)
tm.assert_index_equal(result, idx)
# sort=False
result = idx.union(idx[:1], sort=False)
tm.assert_index_equal(result, idx)
@pytest.mark.xfail(reason="Not implemented")
def test_union_sort_other_incomparable_true(self):
# TODO decide on True behaviour
# sort=True
idx = pd.Index([1, pd.Timestamp('2000')])
with pytest.raises(TypeError, match='.*'):
idx.union(idx[:1], sort=True)
@pytest.mark.parametrize("klass", [
np.array, Series, list])
@pytest.mark.parametrize("sort", [None, False])
def test_union_from_iterables(self, klass, sort):
# GH 10149
# TODO: Replace with fixturesult
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
case = klass(second.values)
result = first.union(case, sort=sort)
if sort is None:
tm.assert_index_equal(result, everything.sort_values())
assert tm.equalContents(result, everything)
@pytest.mark.parametrize("sort", [None, False])
def test_union_identity(self, sort):
# TODO: replace with fixturesult
first = self.strIndex[5:20]
union = first.union(first, sort=sort)
# i.e. identity is not preserved when sort is True
assert (union is first) is (not sort)
union = first.union([], sort=sort)
assert (union is first) is (not sort)
union = Index([]).union(first, sort=sort)
assert (union is first) is (not sort)
@pytest.mark.parametrize("first_list", [list('ba'), list()])
@pytest.mark.parametrize("second_list", [list('ab'), list()])
@pytest.mark.parametrize("first_name, second_name, expected_name", [
('A', 'B', None), (None, 'B', None), ('A', None, None)])
@pytest.mark.parametrize("sort", [None, False])
def test_union_name_preservation(self, first_list, second_list, first_name,
second_name, expected_name, sort):
first = Index(first_list, name=first_name)
second = Index(second_list, name=second_name)
union = first.union(second, sort=sort)
vals = set(first_list).union(second_list)
if sort is None and len(first_list) > 0 and len(second_list) > 0:
expected = Index(sorted(vals), name=expected_name)
tm.assert_index_equal(union, expected)
else:
expected = Index(vals, name=expected_name)
assert tm.equalContents(union, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_union_dt_as_obj(self, sort):
# TODO: Replace with fixturesult
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
assert tm.equalContents(firstCat, appended)
assert tm.equalContents(secondCat, self.strIndex)
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
@pytest.mark.parametrize("method", ['union', 'intersection', 'difference',
'symmetric_difference'])
def test_setops_disallow_true(self, method):
idx1 = pd.Index(['a', 'b'])
idx2 = pd.Index(['b', 'c'])
with pytest.raises(ValueError, match="The 'sort' keyword only takes"):
getattr(idx1, method)(idx2, sort=True)
def test_map_identity_mapping(self):
# GH 12766
# TODO: replace with fixture
for name, cur_index in self.indices.items():
tm.assert_index_equal(cur_index, cur_index.map(lambda x: x))
def test_map_with_tuples(self):
# GH 12766
# Test that returning a single tuple from an Index
# returns an Index.
index = tm.makeIntIndex(3)
result = tm.makeIntIndex(3).map(lambda x: (x,))
expected = Index([(i,) for i in index])
tm.assert_index_equal(result, expected)
# Test that returning a tuple from a map of a single index
# returns a MultiIndex object.
result = index.map(lambda x: (x, x == 1))
expected = MultiIndex.from_tuples([(i, i == 1) for i in index])
tm.assert_index_equal(result, expected)
def test_map_with_tuples_mi(self):
# Test that returning a single object from a MultiIndex
# returns an Index.
first_level = ['foo', 'bar', 'baz']
multi_index = MultiIndex.from_tuples(lzip(first_level, [1, 2, 3]))
reduced_index = multi_index.map(lambda x: x[0])
tm.assert_index_equal(reduced_index, Index(first_level))
@pytest.mark.parametrize("attr", [
'makeDateIndex', 'makePeriodIndex', 'makeTimedeltaIndex'])
def test_map_tseries_indices_return_index(self, attr):
index = getattr(tm, attr)(10)
expected = Index([1] * 10)
result = index.map(lambda x: 1)
tm.assert_index_equal(expected, result)
def test_map_tseries_indices_accsr_return_index(self):
date_index = tm.makeDateIndex(24, freq='h', name='hourly')
expected = Index(range(24), name='hourly')
tm.assert_index_equal(expected, date_index.map(lambda x: x.hour))
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: pd.Series(values, index)])
def test_map_dictlike(self, mapper):
# GH 12756
expected = Index(['foo', 'bar', 'baz'])
index = tm.makeIntIndex(3)
result = index.map(mapper(expected.values, index))
tm.assert_index_equal(result, expected)
# TODO: replace with fixture
for name in self.indices.keys():
if name == 'catIndex':
# Tested in test_categorical
continue
elif name == 'repeats':
# Cannot map duplicated index
continue
index = self.indices[name]
expected = Index(np.arange(len(index), 0, -1))
# to match proper result coercion for uints
if name == 'empty':
expected = Index([])
result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("mapper", [
Series(['foo', 2., 'baz'], index=[0, 2, -1]),
{0: 'foo', 2: 2.0, -1: 'baz'}])
def test_map_with_non_function_missing_values(self, mapper):
# GH 12756
expected = Index([2., np.nan, 'foo'])
result = Index([2, 1, 0]).map(mapper)
tm.assert_index_equal(expected, result)
def test_map_na_exclusion(self):
index = Index([1.5, np.nan, 3, np.nan, 5])
result = index.map(lambda x: x * 2, na_action='ignore')
expected = index * 2
tm.assert_index_equal(result, expected)
def test_map_defaultdict(self):
index = Index([1, 2, 3])
default_dict = defaultdict(lambda: 'blank')
default_dict[1] = 'stuff'
result = index.map(default_dict)
expected = Index(['stuff', 'blank', 'blank'])
tm.assert_index_equal(result, expected)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, index)
# empty
result = index.append([])
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("name,expected", [
('foo', 'foo'), ('bar', None)])
def test_append_empty_preserve_name(self, name, expected):
left = Index([], name='foo')
right = Index([1, 2, 3], name=name)
result = left.append(right)
assert result.name == expected
@pytest.mark.parametrize("second_name,expected", [
(None, None), ('name', 'name')])
@pytest.mark.parametrize("sort", [None, False])
def test_difference_name_preservation(self, second_name, expected, sort):
# TODO: replace with fixturesult
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
second.name = second_name
result = first.difference(second, sort=sort)
assert tm.equalContents(result, answer)
if expected is None:
assert result.name is None
else:
assert result.name == expected
@pytest.mark.parametrize("sort", [None, False])
def test_difference_empty_arg(self, sort):
first = self.strIndex[5:20]
first.name == 'name'
result = first.difference([], sort)
assert tm.equalContents(result, first)
assert result.name == first.name
@pytest.mark.parametrize("sort", [None, False])
def test_difference_identity(self, sort):
first = self.strIndex[5:20]
first.name == 'name'
result = first.difference(first, sort)
assert len(result) == 0
assert result.name == first.name
@pytest.mark.parametrize("sort", [None, False])
def test_difference_sort(self, sort):
first = self.strIndex[5:20]
second = self.strIndex[:10]
result = first.difference(second, sort)
expected = self.strIndex[10:20]
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_symmetric_difference(self, sort):
# smoke
index1 = Index([5, 2, 3, 4], name='index1')
index2 = Index([2, 3, 4, 1])
result = index1.symmetric_difference(index2, sort=sort)
expected = Index([5, 1])
assert tm.equalContents(result, expected)
assert result.name is None
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
# __xor__ syntax
expected = index1 ^ index2
assert tm.equalContents(result, expected)
assert result.name is None
@pytest.mark.parametrize('opname', ['difference', 'symmetric_difference'])
def test_difference_incomparable(self, opname):
a = pd.Index([3, pd.Timestamp('2000'), 1])
b = pd.Index([2, pd.Timestamp('1999'), 1])
op = operator.methodcaller(opname, b)
# sort=None, the default
result = op(a)
expected = pd.Index([3, pd.Timestamp('2000'), 2, pd.Timestamp('1999')])
if opname == 'difference':
expected = expected[:2]
tm.assert_index_equal(result, expected)
# sort=False
op = operator.methodcaller(opname, b, sort=False)
result = op(a)
tm.assert_index_equal(result, expected)
@pytest.mark.xfail(reason="Not implemented")
@pytest.mark.parametrize('opname', ['difference', 'symmetric_difference'])
def test_difference_incomparable_true(self, opname):
# TODO decide on True behaviour
# # sort=True, raises
a = pd.Index([3, pd.Timestamp('2000'), 1])
b = pd.Index([2, pd.Timestamp('1999'), 1])
op = operator.methodcaller(opname, b, sort=True)
with pytest.raises(TypeError, match='Cannot compare'):
op(a)
@pytest.mark.parametrize("sort", [None, False])
def test_symmetric_difference_mi(self, sort):
index1 = MultiIndex.from_tuples(self.tuples)
index2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = index1.symmetric_difference(index2, sort=sort)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
@pytest.mark.parametrize("index2,expected", [
(Index([0, 1, np.nan]), Index([2.0, 3.0, 0.0])),
(Index([0, 1]), Index([np.nan, 2.0, 3.0, 0.0]))])
@pytest.mark.parametrize("sort", [None, False])
def test_symmetric_difference_missing(self, index2, expected, sort):
# GH 13514 change: {nan} - {nan} == {}
# (GH 6444, sorting of nans, is no longer an issue)
index1 = Index([1, np.nan, 2, 3])
result = index1.symmetric_difference(index2, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_symmetric_difference_non_index(self, sort):
index1 = Index([1, 2, 3, 4], name='index1')
index2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = index1.symmetric_difference(index2, sort=sort)
assert tm.equalContents(result, expected)
assert result.name == 'index1'
result = index1.symmetric_difference(index2, result_name='new_name',
sort=sort)
assert tm.equalContents(result, expected)
assert result.name == 'new_name'
@pytest.mark.parametrize("sort", [None, False])
def test_difference_type(self, sort):
# GH 20040
# If taking difference of a set and itself, it
# needs to preserve the type of the index
skip_index_keys = ['repeats']
for key, index in self.generate_index_types(skip_index_keys):
result = index.difference(index, sort=sort)
expected = index.drop(index)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_difference(self, sort):
# GH 20040
# Test that the intersection of an index with an
# empty index produces the same index as the difference
# of an index with itself. Test for all types
skip_index_keys = ['repeats']
for key, index in self.generate_index_types(skip_index_keys):
inter = index.intersection(index.drop(index))
diff = index.difference(index, sort=sort)
tm.assert_index_equal(inter, diff)
@pytest.mark.parametrize("attr,expected", [
('strIndex', False), ('boolIndex', False), ('catIndex', False),
('intIndex', True), ('dateIndex', False), ('floatIndex', True)])
def test_is_numeric(self, attr, expected):
assert getattr(self, attr).is_numeric() == expected
@pytest.mark.parametrize("attr,expected", [
('strIndex', True), ('boolIndex', True), ('catIndex', False),
('intIndex', False), ('dateIndex', False), ('floatIndex', False)])
def test_is_object(self, attr, expected):
assert getattr(self, attr).is_object() == expected
@pytest.mark.parametrize("attr,expected", [
('strIndex', False), ('boolIndex', False), ('catIndex', False),
('intIndex', False), ('dateIndex', True), ('floatIndex', False)])
def test_is_all_dates(self, attr, expected):
assert getattr(self, attr).is_all_dates == expected
def test_summary(self):
self._check_method_works(Index._summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind._summary()
# shouldn't be formatted accidentally.
assert '~:{range}:0' in result
assert '{other}%s' in result
# GH18217
def test_summary_deprecated(self):
ind = Index(['{other}%s', "~:{range}:0"], name='A')
with tm.assert_produces_warning(FutureWarning):
ind.summary()
def test_format(self):
self._check_method_works(Index.format)
# GH 14626
# windows has different precision on datetime.datetime.now (it doesn't
# include us since the default for Timestamp shows these but Index
# formatting does not we are skipping)
now = datetime.now()
if not str(now).endswith("000"):
index = Index([now])
formatted = index.format()
expected = [str(index[0])]
assert formatted == expected
self.strIndex[:0].format()
@pytest.mark.parametrize("vals", [
[1, 2.0 + 3.0j, 4.], ['a', 'b', 'c']])
def test_format_missing(self, vals, nulls_fixture):
# 2845
vals = list(vals) # Copy for each iteration
vals.append(nulls_fixture)
index = Index(vals)
formatted = index.format()
expected = [str(index[0]), str(index[1]), str(index[2]), u('NaN')]
assert formatted == expected
assert index[3] is nulls_fixture
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
assert formatted[0] == 'something'
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
assert len(result) == 2
assert result == expected
@pytest.mark.parametrize("op", ['any', 'all'])
def test_logical_compat(self, op):
index = self.create_index()
assert getattr(index, op)() == getattr(index.values, op)()
def _check_method_works(self, method):
# TODO: make this a dedicated test with parametrized methods
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
method(self.catIndex)
def test_get_indexer(self):
index1 = Index([1, 2, 3, 4, 5])
index2 = Index([2, 4, 6])
r1 = index1.get_indexer(index2)
e1 = np.array([1, 3, -1], dtype=np.intp)
assert_almost_equal(r1, e1)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("expected,method", [
(np.array([-1, 0, 0, 1, 1], dtype=np.intp), 'pad'),
(np.array([-1, 0, 0, 1, 1], dtype=np.intp), 'ffill'),
(np.array([0, 0, 1, 1, 2], dtype=np.intp), 'backfill'),
(np.array([0, 0, 1, 1, 2], dtype=np.intp), 'bfill')])
def test_get_indexer_methods(self, reverse, expected, method):
index1 = Index([1, 2, 3, 4, 5])
index2 = Index([2, 4, 6])
if reverse:
index1 = index1[::-1]
expected = expected[::-1]
result = index2.get_indexer(index1, method=method)
assert_almost_equal(result, expected)
def test_get_indexer_invalid(self):
# GH10411
index = Index(np.arange(10))
with pytest.raises(ValueError, match='tolerance argument'):
index.get_indexer([1, 0], tolerance=1)
with pytest.raises(ValueError, match='limit argument'):
index.get_indexer([1, 0], limit=1)
@pytest.mark.parametrize(
'method, tolerance, indexer, expected',
[
('pad', None, [0, 5, 9], [0, 5, 9]),
('backfill', None, [0, 5, 9], [0, 5, 9]),
('nearest', None, [0, 5, 9], [0, 5, 9]),
('pad', 0, [0, 5, 9], [0, 5, 9]),
('backfill', 0, [0, 5, 9], [0, 5, 9]),
('nearest', 0, [0, 5, 9], [0, 5, 9]),
('pad', None, [0.2, 1.8, 8.5], [0, 1, 8]),
('backfill', None, [0.2, 1.8, 8.5], [1, 2, 9]),
('nearest', None, [0.2, 1.8, 8.5], [0, 2, 9]),
('pad', 1, [0.2, 1.8, 8.5], [0, 1, 8]),
('backfill', 1, [0.2, 1.8, 8.5], [1, 2, 9]),
('nearest', 1, [0.2, 1.8, 8.5], [0, 2, 9]),
('pad', 0.2, [0.2, 1.8, 8.5], [0, -1, -1]),
('backfill', 0.2, [0.2, 1.8, 8.5], [-1, 2, -1]),
('nearest', 0.2, [0.2, 1.8, 8.5], [0, 2, -1])])
def test_get_indexer_nearest(self, method, tolerance, indexer, expected):
index = Index(np.arange(10))
actual = index.get_indexer(indexer, method=method, tolerance=tolerance)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
@pytest.mark.parametrize('listtype', [list, tuple, Series, np.array])
@pytest.mark.parametrize(
'tolerance, expected',
list(zip([[0.3, 0.3, 0.1], [0.2, 0.1, 0.1],
[0.1, 0.5, 0.5]],
[[0, 2, -1], [0, -1, -1],
[-1, 2, 9]])))
def test_get_indexer_nearest_listlike_tolerance(self, tolerance,
expected, listtype):
index = Index(np.arange(10))
actual = index.get_indexer([0.2, 1.8, 8.5], method='nearest',
tolerance=listtype(tolerance))
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
def test_get_indexer_nearest_error(self):
index = Index(np.arange(10))
with pytest.raises(ValueError, match='limit argument'):
index.get_indexer([1, 0], method='nearest', limit=1)
with pytest.raises(ValueError, match='tolerance size must match'):
index.get_indexer([1, 0], method='nearest',
tolerance=[1, 2, 3])
@pytest.mark.parametrize("method,expected", [
('pad', [8, 7, 0]), ('backfill', [9, 8, 1]), ('nearest', [9, 7, 0])])
def test_get_indexer_nearest_decreasing(self, method, expected):
index = Index(np.arange(10))[::-1]
actual = index.get_indexer([0, 5, 9], method=method)
tm.assert_numpy_array_equal(actual, np.array([9, 4, 0], dtype=np.intp))
actual = index.get_indexer([0.2, 1.8, 8.5], method=method)
tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp))
@pytest.mark.parametrize("method,expected", [
('pad', np.array([-1, 0, 1, 1], dtype=np.intp)),
('backfill', np.array([0, 0, 1, -1], dtype=np.intp))])
def test_get_indexer_strings(self, method, expected):
index = pd.Index(['b', 'c'])
actual = index.get_indexer(['a', 'b', 'c', 'd'], method=method)
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_strings_raises(self):
index = pd.Index(['b', 'c'])
with pytest.raises(TypeError):
index.get_indexer(['a', 'b', 'c', 'd'], method='nearest')
with pytest.raises(TypeError):
index.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2)
with pytest.raises(TypeError):
index.get_indexer(['a', 'b', 'c', 'd'], method='pad',
tolerance=[2, 2, 2, 2])
def test_get_indexer_numeric_index_boolean_target(self):
# GH 16877
numeric_index = pd.Index(range(4))
result = numeric_index.get_indexer([True, False, True])
expected = np.array([-1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_with_NA_values(self, unique_nulls_fixture,
unique_nulls_fixture2):
# GH 22332
# check pairwise, that no pair of na values
# is mangled
if unique_nulls_fixture is unique_nulls_fixture2:
return # skip it, values are not unique
arr = np.array([unique_nulls_fixture,
unique_nulls_fixture2], dtype=np.object)
index = pd.Index(arr, dtype=np.object)
result = index.get_indexer([unique_nulls_fixture,
unique_nulls_fixture2, 'Unknown'])
expected = np.array([0, 1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("method", [None, 'pad', 'backfill', 'nearest'])
def test_get_loc(self, method):
index = pd.Index([0, 1, 2])
assert index.get_loc(1, method=method) == 1
if method:
assert index.get_loc(1, method=method, tolerance=0) == 1
@pytest.mark.parametrize("method", [None, 'pad', 'backfill', 'nearest'])
def test_get_loc_raises_bad_label(self, method):
index = pd.Index([0, 1, 2])
if method:
# Messages vary across versions
if PY36:
msg = 'not supported between'
elif PY35:
msg = 'unorderable types'
else:
if method == 'nearest':
msg = 'unsupported operand'
else:
msg = 'requires scalar valued input'
else:
msg = 'invalid key'
with pytest.raises(TypeError, match=msg):
index.get_loc([1, 2], method=method)
@pytest.mark.parametrize("method,loc", [
('pad', 1), ('backfill', 2), ('nearest', 1)])
def test_get_loc_tolerance(self, method, loc):
index = pd.Index([0, 1, 2])
assert index.get_loc(1.1, method) == loc
assert index.get_loc(1.1, method, tolerance=1) == loc
@pytest.mark.parametrize("method", ['pad', 'backfill', 'nearest'])
def test_get_loc_outside_tolerance_raises(self, method):
index = pd.Index([0, 1, 2])
with pytest.raises(KeyError, match='1.1'):
index.get_loc(1.1, method, tolerance=0.05)
def test_get_loc_bad_tolerance_raises(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match='must be numeric'):
index.get_loc(1.1, 'nearest', tolerance='invalid')
def test_get_loc_tolerance_no_method_raises(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match='tolerance .* valid if'):
index.get_loc(1.1, tolerance=1)
def test_get_loc_raises_missized_tolerance(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match='tolerance size must match'):
index.get_loc(1.1, 'nearest', tolerance=[1, 1])
def test_get_loc_raises_object_nearest(self):
index = pd.Index(['a', 'c'])
with pytest.raises(TypeError, match='unsupported operand type'):
index.get_loc('a', method='nearest')
def test_get_loc_raises_object_tolerance(self):
index = pd.Index(['a', 'c'])
with pytest.raises(TypeError, match='unsupported operand type'):
index.get_loc('a', method='pad', tolerance='invalid')
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_locs(self, dtype):
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(index)
assert index.slice_locs(start=2) == (2, n)
assert index.slice_locs(start=3) == (3, n)
assert index.slice_locs(3, 8) == (3, 6)
assert index.slice_locs(5, 10) == (3, n)
assert index.slice_locs(end=8) == (0, 6)
assert index.slice_locs(end=9) == (0, 7)
# reversed
index2 = index[::-1]
assert index2.slice_locs(8, 2) == (2, 6)
assert index2.slice_locs(7, 3) == (2, 5)
def test_slice_float_locs(self):
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=float))
n = len(index)
assert index.slice_locs(5.0, 10.0) == (3, n)
assert index.slice_locs(4.5, 10.5) == (3, 8)
index2 = index[::-1]
assert index2.slice_locs(8.5, 1.5) == (2, 6)
assert index2.slice_locs(10.5, -1) == (0, n)
@pytest.mark.xfail(reason="Assertions were not correct - see GH#20915")
def test_slice_ints_with_floats_raises(self):
# int slicing with floats
# GH 4892, these are all TypeErrors
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=int))
n = len(index)
pytest.raises(TypeError,
lambda: index.slice_locs(5.0, 10.0))
pytest.raises(TypeError,
lambda: index.slice_locs(4.5, 10.5))
index2 = index[::-1]
pytest.raises(TypeError,
lambda: index2.slice_locs(8.5, 1.5), (2, 6))
pytest.raises(TypeError,
lambda: index2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
index = Index(['a', 'a', 'b', 'c', 'd', 'd'])
assert index.slice_locs('a', 'd') == (0, 6)
assert index.slice_locs(end='d') == (0, 6)
assert index.slice_locs('a', 'c') == (0, 4)
assert index.slice_locs('b', 'd') == (2, 6)
index2 = index[::-1]
assert index2.slice_locs('d', 'a') == (0, 6)
assert index2.slice_locs(end='a') == (0, 6)
assert index2.slice_locs('d', 'b') == (0, 4)
assert index2.slice_locs('c', 'a') == (2, 6)
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_locs_dup_numeric(self, dtype):
index = Index(np.array([10, 12, 12, 14], dtype=dtype))
assert index.slice_locs(12, 12) == (1, 3)
assert index.slice_locs(11, 13) == (1, 3)
index2 = index[::-1]
assert index2.slice_locs(12, 12) == (1, 3)
assert index2.slice_locs(13, 11) == (1, 3)
def test_slice_locs_na(self):
index = Index([np.nan, 1, 2])
assert index.slice_locs(1) == (1, 3)
assert index.slice_locs(np.nan) == (0, 3)
index = Index([0, np.nan, np.nan, 1, 2])
assert index.slice_locs(np.nan) == (1, 5)
def test_slice_locs_na_raises(self):
index = Index([np.nan, 1, 2])
with pytest.raises(KeyError, match=''):
index.slice_locs(start=1.5)
with pytest.raises(KeyError, match=''):
index.slice_locs(end=1.5)
@pytest.mark.parametrize("in_slice,expected", [
(pd.IndexSlice[::-1], 'yxdcb'), (pd.IndexSlice['b':'y':-1], ''),
(pd.IndexSlice['b'::-1], 'b'), (pd.IndexSlice[:'b':-1], 'yxdcb'),
(pd.IndexSlice[:'y':-1], 'y'), (pd.IndexSlice['y'::-1], 'yxdcb'),
(pd.IndexSlice['y'::-4], 'yb'),
# absent labels
(pd.IndexSlice[:'a':-1], 'yxdcb'), (pd.IndexSlice[:'a':-2], 'ydb'),
(pd.IndexSlice['z'::-1], 'yxdcb'), (pd.IndexSlice['z'::-3], 'yc'),
(pd.IndexSlice['m'::-1], 'dcb'), (pd.IndexSlice[:'m':-1], 'yx'),
(pd.IndexSlice['a':'a':-1], ''), (pd.IndexSlice['z':'z':-1], ''),
(pd.IndexSlice['m':'m':-1], '')
])
def test_slice_locs_negative_step(self, in_slice, expected):
index = Index(list('bcdxy'))
s_start, s_stop = index.slice_locs(in_slice.start, in_slice.stop,
in_slice.step)
result = index[s_start:s_stop:in_slice.step]
expected = pd.Index(list(expected))
tm.assert_index_equal(result, expected)
def test_drop_by_str_label(self):
# TODO: Parametrize these after replacing self.strIndex with fixture
n = len(self.strIndex)
drop = self.strIndex[lrange(5, 10)]
dropped = self.strIndex.drop(drop)
expected = self.strIndex[lrange(5) + lrange(10, n)]
tm.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
tm.assert_index_equal(dropped, expected)
@pytest.mark.parametrize("keys", [['foo', 'bar'], ['1', 'bar']])
def test_drop_by_str_label_raises_missing_keys(self, keys):
with pytest.raises(KeyError, match=''):
self.strIndex.drop(keys)
def test_drop_by_str_label_errors_ignore(self):
# TODO: Parametrize these after replacing self.strIndex with fixture
# errors='ignore'
n = len(self.strIndex)
drop = self.strIndex[lrange(5, 10)]
mixed = drop.tolist() + ['foo']
dropped = self.strIndex.drop(mixed, errors='ignore')
expected = self.strIndex[lrange(5) + lrange(10, n)]
tm.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(['foo', 'bar'], errors='ignore')
expected = self.strIndex[lrange(n)]
tm.assert_index_equal(dropped, expected)
def test_drop_by_numeric_label_loc(self):
# TODO: Parametrize numeric and str tests after self.strIndex fixture
index = Index([1, 2, 3])
dropped = index.drop(1)
expected = Index([2, 3])
tm.assert_index_equal(dropped, expected)
def test_drop_by_numeric_label_raises_missing_keys(self):
index = Index([1, 2, 3])
with pytest.raises(KeyError, match=''):
index.drop([3, 4])
@pytest.mark.parametrize("key,expected", [
(4, Index([1, 2, 3])), ([3, 4, 5], Index([1, 2]))])
def test_drop_by_numeric_label_errors_ignore(self, key, expected):
index = Index([1, 2, 3])
dropped = index.drop(key, errors='ignore')
tm.assert_index_equal(dropped, expected)
@pytest.mark.parametrize("values", [['a', 'b', ('c', 'd')],
['a', ('c', 'd'), 'b'],
[('c', 'd'), 'a', 'b']])
@pytest.mark.parametrize("to_drop", [[('c', 'd'), 'a'], ['a', ('c', 'd')]])
def test_drop_tuple(self, values, to_drop):
# GH 18304
index = pd.Index(values)
expected = pd.Index(['b'])
result = index.drop(to_drop)
tm.assert_index_equal(result, expected)
removed = index.drop(to_drop[0])
for drop_me in to_drop[1], [to_drop[1]]:
result = removed.drop(drop_me)
tm.assert_index_equal(result, expected)
removed = index.drop(to_drop[1])
for drop_me in to_drop[1], [to_drop[1]]:
pytest.raises(KeyError, removed.drop, drop_me)
@pytest.mark.parametrize("method,expected,sort", [
('intersection', np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')]),
False),
('intersection', np.array([(1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')]),
None),
('union', np.array([(1, 'A'), (1, 'B'), (1, 'C'), (2, 'A'), (2, 'B'),
(2, 'C')], dtype=[('num', int), ('let', 'a1')]),
None)
])
def test_tuple_union_bug(self, method, expected, sort):
index1 = Index(np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')]))
index2 = Index(np.array([(1, 'A'), (2, 'A'), (1, 'B'),
(2, 'B'), (1, 'C'), (2, 'C')],
dtype=[('num', int), ('let', 'a1')]))
result = getattr(index1, method)(index2, sort=sort)
assert result.ndim == 1
expected = Index(expected)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("attr", [
'is_monotonic_increasing', 'is_monotonic_decreasing',
'_is_strictly_monotonic_increasing',
'_is_strictly_monotonic_decreasing'])
def test_is_monotonic_incomparable(self, attr):
index = Index([5, datetime.now(), 7])
assert not getattr(index, attr)
def test_get_set_value(self):
# TODO: Remove function? GH 19728
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date), values[67])
self.dateIndex.set_value(values, date, 10)
assert values[67] == 10
@pytest.mark.parametrize("values", [
['foo', 'bar', 'quux'], {'foo', 'bar', 'quux'}])
@pytest.mark.parametrize("index,expected", [
(Index(['qux', 'baz', 'foo', 'bar']),
np.array([False, False, True, True])),
(Index([]), np.array([], dtype=bool)) # empty
])
def test_isin(self, values, index, expected):
result = index.isin(values)
tm.assert_numpy_array_equal(result, expected)
def test_isin_nan_common_object(self, nulls_fixture, nulls_fixture2):
# Test cartesian product of null fixtures and ensure that we don't
# mangle the various types (save a corner case with PyPy)
# all nans are the same
if (isinstance(nulls_fixture, float) and
isinstance(nulls_fixture2, float) and
math.isnan(nulls_fixture) and
math.isnan(nulls_fixture2)):
tm.assert_numpy_array_equal(Index(['a', nulls_fixture]).isin(
[nulls_fixture2]), np.array([False, True]))
elif nulls_fixture is nulls_fixture2: # should preserve NA type
tm.assert_numpy_array_equal(Index(['a', nulls_fixture]).isin(
[nulls_fixture2]), np.array([False, True]))
else:
tm.assert_numpy_array_equal(Index(['a', nulls_fixture]).isin(
[nulls_fixture2]), np.array([False, False]))
def test_isin_nan_common_float64(self, nulls_fixture):
if nulls_fixture is pd.NaT:
pytest.skip("pd.NaT not compatible with Float64Index")
# Float64Index overrides isin, so must be checked separately
tm.assert_numpy_array_equal(Float64Index([1.0, nulls_fixture]).isin(
[np.nan]), np.array([False, True]))
# we cannot compare NaT with NaN
tm.assert_numpy_array_equal(Float64Index([1.0, nulls_fixture]).isin(
[pd.NaT]), np.array([False, False]))
@pytest.mark.parametrize("level", [0, -1])
@pytest.mark.parametrize("index", [
Index(['qux', 'baz', 'foo', 'bar']),
# Float64Index overrides isin, so must be checked separately
Float64Index([1.0, 2.0, 3.0, 4.0])])
def test_isin_level_kwarg(self, level, index):
values = index.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(expected, index.isin(values, level=level))
index.name = 'foobar'
tm.assert_numpy_array_equal(expected,
index.isin(values, level='foobar'))
@pytest.mark.parametrize("level", [1, 10, -2])
@pytest.mark.parametrize("index", [
Index(['qux', 'baz', 'foo', 'bar']),
# Float64Index overrides isin, so must be checked separately
Float64Index([1.0, 2.0, 3.0, 4.0])])
def test_isin_level_kwarg_raises_bad_index(self, level, index):
with pytest.raises(IndexError, match='Too many levels'):
index.isin([], level=level)
@pytest.mark.parametrize("level", [1.0, 'foobar', 'xyzzy', np.nan])
@pytest.mark.parametrize("index", [
Index(['qux', 'baz', 'foo', 'bar']),
Float64Index([1.0, 2.0, 3.0, 4.0])])
def test_isin_level_kwarg_raises_key(self, level, index):
with pytest.raises(KeyError, match='must be same as name'):
index.isin([], level=level)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# see gh-16991
index = Index(["a", "b"])
expected = np.array([False, False])
result = index.isin(empty)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("values", [
[1, 2, 3, 4],
[1., 2., 3., 4.],
[True, True, True, True],
["foo", "bar", "baz", "qux"],
pd.date_range('2018-01-01', freq='D', periods=4)])
def test_boolean_cmp(self, values):
index = Index(values)
result = (index == values)
expected = np.array([True, True, True, True], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("name,level", [
(None, 0), ('a', 'a')])
def test_get_level_values(self, name, level):
expected = self.strIndex.copy()
if name:
expected.name = name
result = expected.get_level_values(level)
tm.assert_index_equal(result, expected)
def test_slice_keep_name(self):
index = Index(['a', 'b'], name='asdf')
assert index.name == index[1:].name
# instance attributes of the form self.<name>Index
@pytest.mark.parametrize('index_kind',
['unicode', 'str', 'date', 'int', 'float'])
def test_join_self(self, join_type, index_kind):
res = getattr(self, '{0}Index'.format(index_kind))
joined = res.join(res, how=join_type)
assert res is joined
@pytest.mark.parametrize("method", ['strip', 'rstrip', 'lstrip'])
def test_str_attribute(self, method):
# GH9068
index = Index([' jack', 'jill ', ' jesse ', 'frank'])
expected = Index([getattr(str, method)(x) for x in index.values])
result = getattr(index.str, method)()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index", [
Index(range(5)), tm.makeDateIndex(10),
MultiIndex.from_tuples([('foo', '1'), ('bar', '3')]),
period_range(start='2000', end='2010', freq='A')])
def test_str_attribute_raises(self, index):
with pytest.raises(AttributeError, match='only use .str accessor'):
index.str.repeat(2)
@pytest.mark.parametrize("expand,expected", [
(None, Index([['a', 'b', 'c'], ['d', 'e'], ['f']])),
(False, Index([['a', 'b', 'c'], ['d', 'e'], ['f']])),
(True, MultiIndex.from_tuples([('a', 'b', 'c'), ('d', 'e', np.nan),
('f', np.nan, np.nan)]))])
def test_str_split(self, expand, expected):
index = Index(['a b c', 'd e', 'f'])
if expand is not None:
result = index.str.split(expand=expand)
else:
result = index.str.split()
tm.assert_index_equal(result, expected)
def test_str_bool_return(self):
# test boolean case, should return np.array instead of boolean Index
index = Index(['a1', 'a2', 'b1', 'b2'])
result = index.str.startswith('a')
expected = np.array([True, True, False, False])
tm.assert_numpy_array_equal(result, expected)
assert isinstance(result, np.ndarray)
def test_str_bool_series_indexing(self):
index = Index(['a1', 'a2', 'b1', 'b2'])
s = Series(range(4), index=index)
result = s[s.index.str.startswith('a')]
expected = Series(range(2), index=['a1', 'a2'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("index,expected", [
(Index(list('abcd')), True), (Index(range(4)), False)])
def test_tab_completion(self, index, expected):
# GH 9910
result = 'str' in dir(index)
assert result == expected
def test_indexing_doesnt_change_class(self):
index = Index([1, 2, 3, 'a', 'b', 'c'])
assert index[1:3].identical(pd.Index([2, 3], dtype=np.object_))
assert index[[0, 1]].identical(pd.Index([1, 2], dtype=np.object_))
def test_outer_join_sort(self):
left_index = Index(np.random.permutation(15))
right_index = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
result = left_index.join(right_index, how='outer')
# right_index in this case because DatetimeIndex has join precedence
# over Int64Index
with tm.assert_produces_warning(RuntimeWarning):
expected = right_index.astype(object).union(
left_index.astype(object))
tm.assert_index_equal(result, expected)
def test_nan_first_take_datetime(self):
index = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
result = index.take([-1, 0, 1])
expected = Index([index[-1], index[0], index[1]])
tm.assert_index_equal(result, expected)
def test_take_fill_value(self):
# GH 12631
index = pd.Index(list('ABC'), name='xxx')
result = index.take(np.array([1, 0, -1]))
expected = pd.Index(list('BAC'), name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = index.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.Index(['B', 'A', np.nan], name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = index.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.Index(['B', 'A', 'C'], name='xxx')
tm.assert_index_equal(result, expected)
def test_take_fill_value_none_raises(self):
index = pd.Index(list('ABC'), name='xxx')
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with pytest.raises(ValueError, match=msg):
index.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
index.take(np.array([1, 0, -5]), fill_value=True)
def test_take_bad_bounds_raises(self):
index = pd.Index(list('ABC'), name='xxx')
with pytest.raises(IndexError, match='out of bounds'):
index.take(np.array([1, -5]))
@pytest.mark.parametrize("name", [None, 'foobar'])
@pytest.mark.parametrize("labels", [
[], np.array([]), ['A', 'B', 'C'], ['C', 'B', 'A'],
np.array(['A', 'B', 'C']), np.array(['C', 'B', 'A']),
# Must preserve name even if dtype changes
pd.date_range('20130101', periods=3).values,
pd.date_range('20130101', periods=3).tolist()])
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self, name,
labels):
# GH6552
index = pd.Index([0, 1, 2])
index.name = name
assert index.reindex(labels)[0].name == name
@pytest.mark.parametrize("labels", [
[], np.array([]), np.array([], dtype=np.int64)])
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self,
labels):
# GH7774
index = pd.Index(list('abc'))
assert index.reindex(labels)[0].dtype.type == np.object_
@pytest.mark.parametrize("labels,dtype", [
(pd.Int64Index([]), np.int64),
(pd.Float64Index([]), np.float64),
(pd.DatetimeIndex([]), np.datetime64)])
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self,
labels,
dtype):
# GH7774
index = pd.Index(list('abc'))
assert index.reindex(labels)[0].dtype.type == dtype
def test_reindex_no_type_preserve_target_empty_mi(self):
index = pd.Index(list('abc'))
result = index.reindex(pd.MultiIndex(
[pd.Int64Index([]), pd.Float64Index([])], [[], []]))[0]
assert result.levels[0].dtype.type == np.int64
assert result.levels[1].dtype.type == np.float64
def test_groupby(self):
index = Index(range(5))
result = index.groupby(np.array([1, 1, 2, 2, 2]))
expected = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])}
tm.assert_dict_equal(result, expected)
@pytest.mark.parametrize("mi,expected", [
(MultiIndex.from_tuples([(1, 2), (4, 5)]), np.array([True, True])),
(MultiIndex.from_tuples([(1, 2), (4, 6)]), np.array([True, False]))])
def test_equals_op_multiindex(self, mi, expected):
# GH9785
# test comparisons of multiindex
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
result = df.index == mi
tm.assert_numpy_array_equal(result, expected)
def test_equals_op_multiindex_identify(self):
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
result = df.index == df.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("index", [
MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]),
Index(['foo', 'bar', 'baz'])])
def test_equals_op_mismatched_multiindex_raises(self, index):
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
with pytest.raises(ValueError, match="Lengths must match"):
df.index == index
def test_equals_op_index_vs_mi_same_length(self):
mi = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)])
index = Index(['foo', 'bar', 'baz'])
result = mi == index
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dt_conv", [
pd.to_datetime, pd.to_timedelta])
def test_dt_conversion_preserves_name(self, dt_conv):
# GH 10875
index = pd.Index(['01:02:03', '01:02:04'], name='label')
assert index.name == dt_conv(index).name
@pytest.mark.skipif(not PY3, reason="compat test")
@pytest.mark.parametrize("index,expected", [
# ASCII
# short
(pd.Index(['a', 'bb', 'ccc']),
u"""Index(['a', 'bb', 'ccc'], dtype='object')"""),
# multiple lines
(pd.Index(['a', 'bb', 'ccc'] * 10),
u"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object')"""),
# truncated
(pd.Index(['a', 'bb', 'ccc'] * 100),
u"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object', length=300)"""),
# Non-ASCII
# short
(pd.Index([u'あ', u'いい', u'ううう']),
u"""Index(['あ', 'いい', 'ううう'], dtype='object')"""),
# multiple lines
(pd.Index([u'あ', u'いい', u'ううう'] * 10),
(u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう'],\n"
u" dtype='object')")),
# truncated
(pd.Index([u'あ', u'いい', u'ううう'] * 100),
(u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ',\n"
u" ...\n"
u" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう'],\n"
u" dtype='object', length=300)"))])
def test_string_index_repr(self, index, expected):
result = repr(index)
assert result == expected
@pytest.mark.skipif(PY3, reason="compat test")
@pytest.mark.parametrize("index,expected", [
# ASCII
# short
(pd.Index(['a', 'bb', 'ccc']),
u"""Index([u'a', u'bb', u'ccc'], dtype='object')"""),
# multiple lines
(pd.Index(['a', 'bb', 'ccc'] * 10),
u"""\
Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
dtype='object')"""),
# truncated
(pd.Index(['a', 'bb', 'ccc'] * 100),
u"""\
Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
...
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
dtype='object', length=300)"""),
# Non-ASCII
# short
(pd.Index([u'あ', u'いい', u'ううう']),
u"""Index([u'あ', u'いい', u'ううう'], dtype='object')"""),
# multiple lines
(pd.Index([u'あ', u'いい', u'ううう'] * 10),
(u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', u'あ', "
u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n"
u" dtype='object')")),
# truncated
(pd.Index([u'あ', u'いい', u'ううう'] * 100),
(u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n"
u" ...\n"
u" u'ううう', u'あ', u'いい', u'ううう', u'あ', "
u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n"
u" dtype='object', length=300)"))])
def test_string_index_repr_compat(self, index, expected):
result = unicode(index) # noqa
assert result == expected
@pytest.mark.skipif(not PY3, reason="compat test")
@pytest.mark.parametrize("index,expected", [
# short
(pd.Index([u'あ', u'いい', u'ううう']),
(u"Index(['あ', 'いい', 'ううう'], "
u"dtype='object')")),
# multiple lines
(pd.Index([u'あ', u'いい', u'ううう'] * 10),
(u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう'],\n"
u" dtype='object')""")),
# truncated
(pd.Index([u'あ', u'いい', u'ううう'] * 100),
(u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ',\n"
u" ...\n"
u" 'ううう', 'あ', 'いい', 'ううう', 'あ', "
u"'いい', 'ううう', 'あ', 'いい',\n"
u" 'ううう'],\n"
u" dtype='object', length=300)"))])
def test_string_index_repr_with_unicode_option(self, index, expected):
# Enable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
result = repr(index)
assert result == expected
@pytest.mark.skipif(PY3, reason="compat test")
@pytest.mark.parametrize("index,expected", [
# short
(pd.Index([u'あ', u'いい', u'ううう']),
(u"Index([u'あ', u'いい', u'ううう'], "
u"dtype='object')")),
# multiple lines
(pd.Index([u'あ', u'いい', u'ううう'] * 10),
(u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう'],\n"
u" dtype='object')")),
# truncated
(pd.Index([u'あ', u'いい', u'ううう'] * 100),
(u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ',\n"
u" ...\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう'],\n"
u" dtype='object', length=300)"))])
def test_string_index_repr_with_unicode_option_compat(self, index,
expected):
# Enable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
result = unicode(index) # noqa
assert result == expected
def test_cached_properties_not_settable(self):
index = pd.Index([1, 2, 3])
with pytest.raises(AttributeError, match="Can't set attribute"):
index.is_unique = False
def test_get_duplicates_deprecated(self):
index = pd.Index([1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
index.get_duplicates()
def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
pytest.importorskip('IPython', minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; idx = pd.Index([1, 2])"
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter('ignore'):
list(ip.Completer.completions('idx.', 4))
class TestMixedIntIndex(Base):
# Mostly the tests from common.py for which the results differ
# in py2 and py3 because ints and strings are uncomparable in py3
# (GH 13514)
_holder = Index
def setup_method(self, method):
self.indices = dict(mixedIndex=Index([0, 'a', 1, 'b', 2, 'c']))
self.setup_indices()
def create_index(self):
return self.mixedIndex
def test_argsort(self):
index = self.create_index()
if PY36:
with pytest.raises(TypeError, match="'>|<' not supported"):
result = index.argsort()
elif PY3:
with pytest.raises(TypeError, match="unorderable types"):
result = index.argsort()
else:
result = index.argsort()
expected = np.array(index).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self):
index = self.create_index()
if PY36:
with pytest.raises(TypeError, match="'>|<' not supported"):
result = np.argsort(index)
elif PY3:
with pytest.raises(TypeError, match="unorderable types"):
result = np.argsort(index)
else:
result = np.argsort(index)
expected = index.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_copy_name(self):
# Check that "name" argument passed at initialization is honoured
# GH12309
index = self.create_index()
first = index.__class__(index, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
tm.assert_index_equal(first, second)
assert first.name == 'mario'
assert second.name == 'mario'
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
s3 = s1 * s2
assert s3.index.name == 'mario'
def test_copy_name2(self):
# Check that adding a "name" parameter to the copy is honored
# GH14302
index = pd.Index([1, 2], name='MyName')
index1 = index.copy()
tm.assert_index_equal(index, index1)
index2 = index.copy(name='NewName')
tm.assert_index_equal(index, index2, check_names=False)
assert index.name == 'MyName'
assert index2.name == 'NewName'
index3 = index.copy(names=['NewName'])
tm.assert_index_equal(index, index3, check_names=False)
assert index.name == 'MyName'
assert index.names == ['MyName']
assert index3.name == 'NewName'
assert index3.names == ['NewName']
def test_union_base(self):
index = self.create_index()
first = index[3:]
second = index[:5]
result = first.union(second)
expected = Index([0, 1, 2, 'a', 'b', 'c'])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass", [
np.array, Series, list])
def test_union_different_type_base(self, klass):
# GH 10149
index = self.create_index()
first = index[3:]
second = index[:5]
result = first.union(klass(second.values))
assert tm.equalContents(result, index)
def test_unique_na(self):
idx = pd.Index([2, np.nan, 2, 1], name='my_index')
expected = pd.Index([2, np.nan, 1], name='my_index')
result = idx.unique()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_base(self, sort):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = self.create_index()
first = index[:5]
second = index[:3]
expected = Index([0, 1, 'a']) if sort is None else Index([0, 'a', 1])
result = first.intersection(second, sort=sort)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass", [
np.array, Series, list])
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_different_type_base(self, klass, sort):
# GH 10149
index = self.create_index()
first = index[:5]
second = index[:3]
result = first.intersection(klass(second.values), sort=sort)
assert tm.equalContents(result, second)
@pytest.mark.parametrize("sort", [None, False])
def test_difference_base(self, sort):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = self.create_index()
first = index[:4]
second = index[3:]
result = first.difference(second, sort)
expected = Index([0, 'a', 1])
if sort is None:
expected = Index(safe_sort(expected))
tm.assert_index_equal(result, expected)
def test_symmetric_difference(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = self.create_index()
first = index[:4]
second = index[3:]
result = first.symmetric_difference(second)
expected = Index([0, 1, 2, 'a', 'c'])
tm.assert_index_equal(result, expected)
def test_logical_compat(self):
index = self.create_index()
assert index.all() == index.values.all()
assert index.any() == index.values.any()
@pytest.mark.parametrize("how", ['any', 'all'])
@pytest.mark.parametrize("dtype", [
None, object, 'category'])
@pytest.mark.parametrize("vals,expected", [
([1, 2, 3], [1, 2, 3]), ([1., 2., 3.], [1., 2., 3.]),
([1., 2., np.nan, 3.], [1., 2., 3.]),
(['A', 'B', 'C'], ['A', 'B', 'C']),
(['A', np.nan, 'B', 'C'], ['A', 'B', 'C'])])
def test_dropna(self, how, dtype, vals, expected):
# GH 6194
index = pd.Index(vals, dtype=dtype)
result = index.dropna(how=how)
expected = pd.Index(expected, dtype=dtype)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("how", ['any', 'all'])
@pytest.mark.parametrize("index,expected", [
(pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03']),
pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])),
(pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03', pd.NaT]),
pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])),
(pd.TimedeltaIndex(['1 days', '2 days', '3 days']),
pd.TimedeltaIndex(['1 days', '2 days', '3 days'])),
(pd.TimedeltaIndex([pd.NaT, '1 days', '2 days', '3 days', pd.NaT]),
pd.TimedeltaIndex(['1 days', '2 days', '3 days'])),
(pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M'),
pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M')),
(pd.PeriodIndex(['2012-02', '2012-04', 'NaT', '2012-05'], freq='M'),
pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M'))])
def test_dropna_dt_like(self, how, index, expected):
result = index.dropna(how=how)
tm.assert_index_equal(result, expected)
def test_dropna_invalid_how_raises(self):
msg = "invalid how option: xxx"
with pytest.raises(ValueError, match=msg):
pd.Index([1, 2, 3]).dropna(how='xxx')
def test_get_combined_index(self):
result = _get_combined_index([])
expected = Index([])
tm.assert_index_equal(result, expected)
def test_repeat(self):
repeats = 2
index = pd.Index([1, 2, 3])
expected = pd.Index([1, 1, 2, 2, 3, 3])
result = index.repeat(repeats)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index", [
pd.Index([np.nan]), pd.Index([np.nan, 1]),
pd.Index([1, 2, np.nan]), pd.Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']), pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT'])])
def test_is_monotonic_na(self, index):
assert index.is_monotonic_increasing is False
assert index.is_monotonic_decreasing is False
assert index._is_strictly_monotonic_increasing is False
assert index._is_strictly_monotonic_decreasing is False
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
result = repr(pd.Index(np.arange(1000)))
assert len(result) < 200
assert "..." in result
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_int_name_format(self, klass):
index = Index(['a', 'b', 'c'], name=0)
result = klass(lrange(3), index=index)
assert '0' in repr(result)
def test_print_unicode_columns(self):
df = pd.DataFrame({u("\u05d0"): [1, 2, 3],
"\u05d1": [4, 5, 6],
"c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
@pytest.mark.parametrize("func,compat_func", [
(str, text_type), # unicode string
(bytes, str) # byte string
])
def test_with_unicode(self, func, compat_func):
index = Index(lrange(1000))
if PY3:
func(index)
else:
compat_func(index)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
index1 = Index(dt_dates, dtype=object)
index2 = Index(['aa'], dtype=object)
result = index2.intersection(index1)
expected = Index([], dtype=object)
tm.assert_index_equal(result, expected)
class TestIndexUtils(object):
@pytest.mark.parametrize('data, names, expected', [
([[1, 2, 3]], None, Index([1, 2, 3])),
([[1, 2, 3]], ['name'], Index([1, 2, 3], name='name')),
([['a', 'a'], ['c', 'd']], None,
MultiIndex([['a'], ['c', 'd']], [[0, 0], [0, 1]])),
([['a', 'a'], ['c', 'd']], ['L1', 'L2'],
MultiIndex([['a'], ['c', 'd']], [[0, 0], [0, 1]],
names=['L1', 'L2'])),
])
def test_ensure_index_from_sequences(self, data, names, expected):
result = ensure_index_from_sequences(data, names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('opname', ['eq', 'ne', 'le', 'lt', 'ge', 'gt',
'add', 'radd', 'sub', 'rsub',
'mul', 'rmul', 'truediv', 'rtruediv',
'floordiv', 'rfloordiv',
'pow', 'rpow', 'mod', 'divmod'])
def test_generated_op_names(opname, indices):
index = indices
if isinstance(index, ABCIndex) and opname == 'rsub':
# pd.Index.__rsub__ does not exist; though the method does exist
# for subclasses. see GH#19723
return
opname = '__{name}__'.format(name=opname)
method = getattr(index, opname)
assert method.__name__ == opname
@pytest.mark.parametrize('index_maker', tm.index_subclass_makers_generator())
def test_index_subclass_constructor_wrong_kwargs(index_maker):
# GH #19348
with pytest.raises(TypeError, match='unexpected keyword argument'):
index_maker(foo='bar')
def test_deprecated_fastpath():
with tm.assert_produces_warning(FutureWarning):
idx = pd.Index(
np.array(['a', 'b'], dtype=object), name='test', fastpath=True)
expected = pd.Index(['a', 'b'], name='test')
tm.assert_index_equal(idx, expected)
with tm.assert_produces_warning(FutureWarning):
idx = pd.Int64Index(
np.array([1, 2, 3], dtype='int64'), name='test', fastpath=True)
expected = pd.Index([1, 2, 3], name='test', dtype='int64')
tm.assert_index_equal(idx, expected)
with tm.assert_produces_warning(FutureWarning):
idx = pd.RangeIndex(0, 5, 2, name='test', fastpath=True)
expected = pd.RangeIndex(0, 5, 2, name='test')
tm.assert_index_equal(idx, expected)
with tm.assert_produces_warning(FutureWarning):
idx = pd.CategoricalIndex(['a', 'b', 'c'], name='test', fastpath=True)
expected = pd.CategoricalIndex(['a', 'b', 'c'], name='test')
tm.assert_index_equal(idx, expected)
|
[
"[email protected]"
] | |
2afef828f3e42fd38f81af846f9e6dca18544acf
|
23bb29a6cf89c57cea79d1d4a52890a232582b87
|
/setup.py
|
fb4d4c5626042066f7a2148ae399aa1ccba42226
|
[] |
no_license
|
dave-m/domaincheck
|
f52cf211c50c631b16c6972f0330028713f7840e
|
c2ba6647dd85786c00f8913e84db8dbb20b0e143
|
refs/heads/master
| 2021-01-20T04:24:54.770287 | 2014-02-23T20:04:00 | 2014-02-23T20:04:00 | 6,432,702 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 439 |
py
|
#!/usr/bin/env python
from distutils.core import setup
from distutils.command.install_data import install_data
setup(name='DomainCheck',
version='0.1',
description='Domain check website',
author='David Mcilwee',
author_email='[email protected]',
url='',
packages=['domaincheck'],
include_package_data=True,
zip_safe=False,
install_requires=['Flask', 'dnspython']
)
|
[
"[email protected]"
] | |
5f0af102f38a471e26634f5388a653dc173c417c
|
4ced1662347a724208d1fa9eafa00500538eb530
|
/src/squareservice/squareservice/genproto/demo_pb2_grpc.py
|
7cf4ec955753621c1f10e73205b547c443aab929
|
[
"MIT"
] |
permissive
|
brymck/gke-site
|
5e554057f540006c4a7ce5e2542c3289da1ece9c
|
1a4c36e3ebfb5fd5e85645e8a56e6c081eb807dd
|
refs/heads/master
| 2020-04-14T08:31:43.662530 | 2019-01-22T10:48:05 | 2019-01-22T10:48:05 | 163,737,493 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,023 |
py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from . import demo_pb2 as demo__pb2
class HelloServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetGreeting = channel.unary_unary(
'/gkesite.HelloService/GetGreeting',
request_serializer=demo__pb2.GreetingRequest.SerializeToString,
response_deserializer=demo__pb2.Greeting.FromString,
)
class HelloServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def GetGreeting(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_HelloServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetGreeting': grpc.unary_unary_rpc_method_handler(
servicer.GetGreeting,
request_deserializer=demo__pb2.GreetingRequest.FromString,
response_serializer=demo__pb2.Greeting.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'gkesite.HelloService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class SquareServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetSquare = channel.unary_unary(
'/gkesite.SquareService/GetSquare',
request_serializer=demo__pb2.SquareRequest.SerializeToString,
response_deserializer=demo__pb2.SquareResponse.FromString,
)
class SquareServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def GetSquare(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SquareServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetSquare': grpc.unary_unary_rpc_method_handler(
servicer.GetSquare,
request_deserializer=demo__pb2.SquareRequest.FromString,
response_serializer=demo__pb2.SquareResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'gkesite.SquareService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class CountServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetCount = channel.unary_unary(
'/gkesite.CountService/GetCount',
request_serializer=demo__pb2.CountRequest.SerializeToString,
response_deserializer=demo__pb2.CountResponse.FromString,
)
class CountServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def GetCount(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CountServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetCount': grpc.unary_unary_rpc_method_handler(
servicer.GetCount,
request_deserializer=demo__pb2.CountRequest.FromString,
response_serializer=demo__pb2.CountResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'gkesite.CountService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
[
"[email protected]"
] | |
9bc9825150a1f5bfa183564bdebfb36753c63a47
|
cdfce941968359d04e928f1bf137234d36c64b6b
|
/extras/ml-tweets/__init__.py
|
77bfb9bfa1dd7d9f5d9690f58e14ef3c17c97c91
|
[] |
no_license
|
lucmski/botornot
|
35bafeedc90724a0b0e3f40ac5bad647aea51110
|
5357f43c85628d540c4d48391e37d19af9c8733a
|
refs/heads/master
| 2020-07-15T13:49:19.016905 | 2019-09-01T20:30:37 | 2019-09-01T20:30:37 | 205,576,853 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 163 |
py
|
from . import getTweets
from . import analyzeTweets
from . import plotTweets
from . import getStockData
from . import analyzeStockData
from . import plotStockData
|
[
"[email protected]"
] | |
c2420aa4c06b80b0a03746ec46732f57882ffada
|
a3719ba9c5c834479f9e200816be0391863ebe78
|
/lesson_5_dz2.py
|
6dafe1f089ebe94f6abaa254b11c418ffc151047
|
[] |
no_license
|
NikDestrave/GeekBrains_Python
|
94745faf11b864ce3322fc393bcb3b8656bcffbb
|
c9bf127c1580a6ba2037cfea002fb0e39163ed72
|
refs/heads/master
| 2020-12-20T17:29:44.077461 | 2020-08-30T05:05:29 | 2020-08-30T05:05:29 | 235,931,971 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 506 |
py
|
# 2. Создать текстовый файл (не программно), сохранить в нем несколько строк,
# выполнить подсчет количества строк, количества слов в каждой строке.
with open('file2.txt', 'r') as f:
print(f'Количество строк = {len(f.readlines())}')
f.seek(0)
count = [len(line.split()) for line in f]
print(count)
print(f'Количество слов = {sum(count)}')
|
[
"[email protected]"
] | |
0a10e0805cfdfb512f97c9daac68c0b5bb312b75
|
f5b313dadf3b7e3d96ece1a456e21bc9bd1b26b4
|
/utils/validates.py
|
e588d8b546ba3bb53ddafbd578685cb173184215
|
[] |
no_license
|
qq370154918/dev_04
|
547de9730bd680ba8b9ac1c6f819a000aa9e6e21
|
5ac9ebea729cc1817754416de967adbe58e46fc5
|
refs/heads/master
| 2023-07-29T13:14:19.261719 | 2020-12-29T08:02:58 | 2020-12-29T08:02:58 | 284,427,132 | 0 | 0 | null | 2021-09-22T19:38:32 | 2020-08-02T09:04:42 |
Python
|
UTF-8
|
Python
| false | false | 600 |
py
|
from rest_framework import serializers
from projects.models import Projects
from interfaces.models import Interfaces
from envs.models import Envs
def is_exist_project_id(value):
if not Projects.objects.filter(id=value).exists():
raise serializers.ValidationError('项目id不存在')
def is_exist_interface_id(value):
if not Interfaces.objects.filter(id=value).exists():
raise serializers.ValidationError('接口id不存在')
def is_exist_env_id(value):
if not Envs.objects.filter(id=value).exists():
raise serializers.ValidationError('环境变量id不存在')
|
[
"[email protected]"
] | |
05289f7ef1d3bf5a16d16440b251f0bf7002e2b3
|
ff1477deb2b0bf0580ea512c1843a4085e639932
|
/main.py
|
521a0f3e15a8d864423d29388bc782737b4fb0e9
|
[
"MIT"
] |
permissive
|
SkylerHoward/O
|
f7ff9955499483f4368e01cd5c2991970b160d29
|
989246a5cdc297ab9f76cb6b26daebd799a03741
|
refs/heads/master
| 2021-07-08T19:57:12.042530 | 2017-10-07T13:33:44 | 2017-10-07T13:33:44 | 106,098,904 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 484 |
py
|
import weather, morning, time, sleep, events
from datetime import datetime
from speak import *
speak = speech()
def main():
while True:
command = input('> ')
if command == 'sleep':
speak.speak('Good night.')
for line in sleep.main():
speak.speak(line)
if command == 'quit':
quit()
if command == 'events':
te = events.today()
speak.speak('You have {} events today'.format(len(te)))
for line in te:
speak.speak(line)
main()
|
[
"[email protected]"
] | |
bfe394598000549c8aa731dc5185e43ee6e450f1
|
15581a76b36eab6062e71d4e5641cdfaf768b697
|
/Leetcode Contests/Biweekly Contest 24/Minimum Value to Get Positive Step by Step Sum.py
|
ed393ceda76cec842051a7cd8dd259618306c947
|
[] |
no_license
|
MarianDanaila/Competitive-Programming
|
dd61298cc02ca3556ebc3394e8d635b57f58b4d2
|
3c5a662e931a5aa1934fba74b249bce65a5d75e2
|
refs/heads/master
| 2023-05-25T20:03:18.468713 | 2023-05-16T21:45:08 | 2023-05-16T21:45:08 | 254,296,597 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 318 |
py
|
from typing import List
class Solution:
def minStartValue(self, nums: List[int]) -> int:
sum = 0
min = nums[0]
for i in nums:
sum += i
if sum < min:
min = sum
if min >= 0:
return 1
else:
return abs(min) + 1
|
[
"[email protected]"
] | |
38eda1c03fac7753d07cce611e24ce538fed844e
|
69c1e3b0fcb12839c820cff744bf492546b2dbf7
|
/func.py
|
7f360613f39cf79bc62ec90376538283e359602b
|
[] |
no_license
|
260734/260734
|
71d844ae3032a762d8a96f3052df3a7ba4b0f844
|
40c02327ff82ad89d3583595913460f245f5ca77
|
refs/heads/master
| 2023-04-17T08:20:42.848803 | 2021-04-26T19:50:39 | 2021-04-26T19:50:39 | 359,469,206 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 220 |
py
|
def square(x):
return x * x
def cube(x):
return x * x * x
# create a dictionary of functions
funcs = {
'square': square,
'cube': cube,
}
x = 2
for func in sorted(funcs):
print(func, funcs[func](x))
|
[
"[email protected]"
] | |
3f2801ee0162f33263eb9044744a84e3a7a154e9
|
587973fdf376f448b90f44f713742ea02062666b
|
/config.py
|
418ebf67d4cb96815ebbce4d111bc70640c10183
|
[] |
no_license
|
whistlepark/UCSD-ECE148-WI20-TEAM6
|
79383160cd4c8cc458903bf45b5dd2ca4dbed1bd
|
9db60ed81146959c295963c47b780fcf3d20bc9f
|
refs/heads/master
| 2021-01-15T03:27:32.500974 | 2020-03-08T23:58:24 | 2020-03-08T23:58:24 | 242,863,416 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,067 |
py
|
"""
CAR CONFIG
This file is read by your car application's manage.py script to change the car
performance.
EXMAPLE
-----------
import dk
cfg = dk.load_config(config_path='~/mycar/config.py')
print(cfg.CAMERA_RESOLUTION)
"""
import os
#PATHS
CAR_PATH = PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(CAR_PATH, 'data')
MODELS_PATH = os.path.join(CAR_PATH, 'models')
#VEHICLE
DRIVE_LOOP_HZ = 20 # the vehicle loop will pause if faster than this speed.
MAX_LOOPS = None # the vehicle loop can abort after this many iterations, when given a positive integer.
#CAMERA
CAMERA_TYPE = "PICAM" # (PICAM|WEBCAM|CVCAM|CSIC|V4L|MOCK)
IMAGE_W = 160
IMAGE_H = 120
IMAGE_DEPTH = 3 # default RGB=3, make 1 for mono
CAMERA_FRAMERATE = DRIVE_LOOP_HZ
# For CSIC camera - If the camera is mounted in a rotated position, changing the below parameter will correct the output frame orientation
CSIC_CAM_GSTREAMER_FLIP_PARM = 0 # (0 => none , 4 => Flip horizontally, 6 => Flip vertically)
#9865, over rides only if needed, ie. TX2..
PCA9685_I2C_ADDR = 0x40 #I2C address, use i2cdetect to validate this number
PCA9685_I2C_BUSNUM = None #None will auto detect, which is fine on the pi. But other platforms should specify the bus num.
#DRIVETRAIN
#These options specify which chasis and motor setup you are using. Most are using SERVO_ESC.
#DC_STEER_THROTTLE uses HBridge pwm to control one steering dc motor, and one drive wheel motor
#DC_TWO_WHEEL uses HBridge pwm to control two drive motors, one on the left, and one on the right.
#SERVO_HBRIDGE_PWM use ServoBlaster to output pwm control from the PiZero directly to control steering, and HBridge for a drive motor.
DRIVE_TRAIN_TYPE = "SERVO_ESC" # SERVO_ESC|DC_STEER_THROTTLE|DC_TWO_WHEEL|SERVO_HBRIDGE_PWM
#STEERING
STEERING_CHANNEL = 1 #channel on the 9685 pwm board 0-15
STEERING_LEFT_PWM = 460 #pwm value for full left steering
STEERING_RIGHT_PWM = 290 #pwm value for full right steering
#THROTTLE
THROTTLE_CHANNEL = 0 #channel on the 9685 pwm board 0-15
THROTTLE_FORWARD_PWM = 500 #pwm value for max forward throttle
THROTTLE_STOPPED_PWM = 370 #pwm value for no movement
THROTTLE_REVERSE_PWM = 220 #pwm value for max reverse throttle
#DC_STEER_THROTTLE with one motor as steering, one as drive
#these GPIO pinouts are only used for the DRIVE_TRAIN_TYPE=DC_STEER_THROTTLE
HBRIDGE_PIN_LEFT = 18
HBRIDGE_PIN_RIGHT = 16
HBRIDGE_PIN_FWD = 15
HBRIDGE_PIN_BWD = 13
#DC_TWO_WHEEL - with two wheels as drive, left and right.
#these GPIO pinouts are only used for the DRIVE_TRAIN_TYPE=DC_TWO_WHEEL
HBRIDGE_PIN_LEFT_FWD = 18
HBRIDGE_PIN_LEFT_BWD = 16
HBRIDGE_PIN_RIGHT_FWD = 15
HBRIDGE_PIN_RIGHT_BWD = 13
#TRAINING
#The DEFAULT_MODEL_TYPE will choose which model will be created at training time. This chooses
#between different neural network designs. You can override this setting by passing the command
#line parameter --type to the python manage.py train and drive commands.
DEFAULT_MODEL_TYPE = 'linear' #(linear|categorical|rnn|imu|behavior|3d|localizer|latent)
BATCH_SIZE = 128 #how many records to use when doing one pass of gradient decent. Use a smaller number if your gpu is running out of memory.
TRAIN_TEST_SPLIT = 0.8 #what percent of records to use for training. the remaining used for validation.
MAX_EPOCHS = 100 #how many times to visit all records of your data
SHOW_PLOT = True #would you like to see a pop up display of final loss?
VEBOSE_TRAIN = True #would you like to see a progress bar with text during training?
USE_EARLY_STOP = True #would you like to stop the training if we see it's not improving fit?
EARLY_STOP_PATIENCE = 5 #how many epochs to wait before no improvement
MIN_DELTA = .0005 #early stop will want this much loss change before calling it improved.
PRINT_MODEL_SUMMARY = True #print layers and weights to stdout
OPTIMIZER = None #adam, sgd, rmsprop, etc.. None accepts default
LEARNING_RATE = 0.001 #only used when OPTIMIZER specified
LEARNING_RATE_DECAY = 0.0 #only used when OPTIMIZER specified
SEND_BEST_MODEL_TO_PI = False #change to true to automatically send best model during training
CACHE_IMAGES = True #keep images in memory. will speed succesive epochs, but crater if not enough mem.
PRUNE_CNN = False #This will remove weights from your model. The primary goal is to increase performance.
PRUNE_PERCENT_TARGET = 75 # The desired percentage of pruning.
PRUNE_PERCENT_PER_ITERATION = 20 # Percenge of pruning that is perform per iteration.
PRUNE_VAL_LOSS_DEGRADATION_LIMIT = 0.2 # The max amout of validation loss that is permitted during pruning.
PRUNE_EVAL_PERCENT_OF_DATASET = .05 # percent of dataset used to perform evaluation of model.
#Pi login information
#When using the continuous train option, these credentials will
#be used to copy the final model to your vehicle. If not using this option, no need to set these.
PI_USERNAME = "pi" # username on pi
PI_PASSWD = "raspberry" # password is optional. Only used from Windows machine. Ubuntu and mac users should copy their public keys to the pi. `ssh-copy-id username@hostname`
PI_HOSTNAME = "raspberrypi.local" # the network hostname or ip address
PI_DONKEY_ROOT = "/home/pi/mycar" # the location of the mycar dir on the pi. this will be used to help locate the final model destination.
# Region of interst cropping
# only supported in Categorical and Linear models.
# If these crops values are too large, they will cause the stride values to become negative and the model with not be valid.
ROI_CROP_TOP = 0 #the number of rows of pixels to ignore on the top of the image
ROI_CROP_BOTTOM = 0 #the number of rows of pixels to ignore on the bottom of the image
#Model transfer options
#When copying weights during a model transfer operation, should we freeze a certain number of layers
#to the incoming weights and not allow them to change during training?
FREEZE_LAYERS = False #default False will allow all layers to be modified by training
NUM_LAST_LAYERS_TO_TRAIN = 7 #when freezing layers, how many layers from the last should be allowed to train?
#JOYSTICK
USE_JOYSTICK_AS_DEFAULT = False #when starting the manage.py, when True, will not require a --js option to use the joystick
JOYSTICK_MAX_THROTTLE = 0.5 #this scalar is multiplied with the -1 to 1 throttle value to limit the maximum throttle. This can help if you drop the controller or just don't need the full speed available.
JOYSTICK_STEERING_SCALE = 1.0 #some people want a steering that is less sensitve. This scalar is multiplied with the steering -1 to 1. It can be negative to reverse dir.
AUTO_RECORD_ON_THROTTLE = False #if true, we will record whenever throttle is not zero. if false, you must manually toggle recording with some other trigger. Usually circle button on joystick.
CONTROLLER_TYPE='F710' #(ps3|ps4|xbox|nimbus|wiiu|F710|rc3)
USE_NETWORKED_JS = False #should we listen for remote joystick control over the network?
NETWORK_JS_SERVER_IP = "192.168.0.1"#when listening for network joystick control, which ip is serving this information
JOYSTICK_DEADZONE = 0.0 # when non zero, this is the smallest throttle before recording triggered.
JOYSTICK_THROTTLE_DIR = 1.0 # use -1.0 to flip forward/backward, use 1.0 to use joystick's natural forward/backward
#For the categorical model, this limits the upper bound of the learned throttle
#it's very IMPORTANT that this value is matched from the training PC config.py and the robot.py
#and ideally wouldn't change once set.
MODEL_CATEGORICAL_MAX_THROTTLE_RANGE = 0.5
#RNN or 3D
SEQUENCE_LENGTH = 3 #some models use a number of images over time. This controls how many.
#IMU
HAVE_IMU = False #when true, this add a Mpu6050 part and records the data. Can be used with a
#SOMBRERO
HAVE_SOMBRERO = False #set to true when using the sombrero hat from the Donkeycar store. This will enable pwm on the hat.
#RECORD OPTIONS
RECORD_DURING_AI = False #normally we do not record during ai mode. Set this to true to get image and steering records for your Ai. Be careful not to use them to train.
#LED
HAVE_RGB_LED = False #do you have an RGB LED like https://www.amazon.com/dp/B07BNRZWNF
LED_INVERT = False #COMMON ANODE? Some RGB LED use common anode. like https://www.amazon.com/Xia-Fly-Tri-Color-Emitting-Diffused/dp/B07MYJQP8B
#LED board pin number for pwm outputs
#These are physical pinouts. See: https://www.raspberrypi-spy.co.uk/2012/06/simple-guide-to-the-rpi-gpio-header-and-pins/
LED_PIN_R = 12
LED_PIN_G = 10
LED_PIN_B = 16
#LED status color, 0-100
LED_R = 0
LED_G = 0
LED_B = 1
#LED Color for record count indicator
REC_COUNT_ALERT = 1000 #how many records before blinking alert
REC_COUNT_ALERT_CYC = 15 #how many cycles of 1/20 of a second to blink per REC_COUNT_ALERT records
REC_COUNT_ALERT_BLINK_RATE = 0.4 #how fast to blink the led in seconds on/off
#first number is record count, second tuple is color ( r, g, b) (0-100)
#when record count exceeds that number, the color will be used
RECORD_ALERT_COLOR_ARR = [ (0, (1, 1, 1)),
(3000, (5, 5, 5)),
(5000, (5, 2, 0)),
(10000, (0, 5, 0)),
(15000, (0, 5, 5)),
(20000, (0, 0, 5)), ]
#LED status color, 0-100, for model reloaded alert
MODEL_RELOADED_LED_R = 100
MODEL_RELOADED_LED_G = 0
MODEL_RELOADED_LED_B = 0
#BEHAVIORS
#When training the Behavioral Neural Network model, make a list of the behaviors,
#Set the TRAIN_BEHAVIORS = True, and use the BEHAVIOR_LED_COLORS to give each behavior a color
TRAIN_BEHAVIORS = False
BEHAVIOR_LIST = ['Left_Lane', "Right_Lane"]
BEHAVIOR_LED_COLORS =[ (0, 10, 0), (10, 0, 0) ] #RGB tuples 0-100 per chanel
#Localizer
#The localizer is a neural network that can learn to predice it's location on the track.
#This is an experimental feature that needs more developement. But it can currently be used
#to predict the segement of the course, where the course is divided into NUM_LOCATIONS segments.
TRAIN_LOCALIZER = False
NUM_LOCATIONS = 10
BUTTON_PRESS_NEW_TUB = False #when enabled, makes it easier to divide our data into one tub per track length if we make a new tub on each X button press.
#DonkeyGym
#Only on Ubuntu linux, you can use the simulator as a virtual donkey and
#issue the same python manage.py drive command as usual, but have them control a virtual car.
#This enables that, and sets the path to the simualator and the environment.
#You will want to download the simulator binary from: https://github.com/tawnkramer/donkey_gym/releases/download/v18.9/DonkeySimLinux.zip
#then extract that and modify DONKEY_SIM_PATH.
DONKEY_GYM = False
DONKEY_SIM_PATH = "path to sim" #"/home/tkramer/projects/sdsandbox/sdsim/build/DonkeySimLinux/donkey_sim.x86_64"
DONKEY_GYM_ENV_NAME = "donkey-generated-track-v0" # ("donkey-generated-track-v0"|"donkey-generated-roads-v0"|"donkey-warehouse-v0"|"donkey-avc-sparkfun-v0")
#publish camera over network
#This is used to create a tcp service to pushlish the camera feed
PUB_CAMERA_IMAGES = False
#When racing, to give the ai a boost, configure these values.
AI_LAUNCH_DURATION = 0.0 # the ai will output throttle for this many seconds
AI_LAUNCH_THROTTLE = 0.0 # the ai will output this throttle value
AI_LAUNCH_ENABLE_BUTTON = 'R2' # this keypress will enable this boost. It must be enabled before each use to prevent accidental trigger.
AI_LAUNCH_KEEP_ENABLED = False # when False ( default) you will need to hit the AI_LAUNCH_ENABLE_BUTTON for each use. This is safest. When this True, is active on each trip into "local" ai mode.
#Scale the output of the throttle of the ai pilot for all model types.
AI_THROTTLE_MULT = 1.0 # this multiplier will scale every throttle value for all output from NN models
#Path following
PATH_FILENAME = "donkey_path.pkl" #the path will be saved to this filename
PATH_SCALE = 5.0 # the path display will be scaled by this factor in the web page
PATH_OFFSET = (0, 0) # 255, 255 is the center of the map. This offset controls where the origin is displayed.
PATH_MIN_DIST = 0.3 # after travelling this distance (m), save a path point
PID_P = -10.0 # proportional mult for PID path follower
PID_I = 0.000 # integral mult for PID path follower
PID_D = -0.2 # differential mult for PID path follower
PID_THROTTLE = 0.2 # constant throttle value during path following
SAVE_PATH_BTN = "cross" # joystick button to save path
RESET_ORIGIN_BTN = "triangle" # joystick button to press to move car back to origin
|
[
"[email protected]"
] | |
a7a836d34d05d89c6a4baa2e076e811ad85b12d4
|
adffddf23696048e0cef2eb09137b8124d87b1b8
|
/app/routes.py
|
5237df198c13bf37d87574bbdc297ff2ad1a7fc0
|
[] |
no_license
|
sonicdm/dgc_finder
|
4ba88bb136a8f6017cf8f97cdfb6326aae7c4241
|
5dd70d78fa83620fe7e96a947e5eb5510664f5e8
|
refs/heads/master
| 2020-03-19T00:30:54.935090 | 2018-05-30T18:47:02 | 2018-05-30T18:47:02 | 135,486,383 | 0 | 0 | null | 2018-05-30T19:08:24 | 2018-05-30T19:08:24 | null |
UTF-8
|
Python
| false | false | 760 |
py
|
from app import app
from flask import render_template, redirect, url_for, session
from app.functions import get_dgcr, gmaps_geolocator
import requests
import json
from app.forms import CityStateForm
from config import Config
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def index():
form = CityStateForm()
if form.validate_on_submit():
session['courses'] = get_dgcr(form.city_field.data, form.state_field.data)
return redirect(url_for('results'))
return render_template('index.html', title='Home', form=form)
@app.route('/results', methods=['GET'])
def results():
return render_template('results.html', title='Course Results', results=session['courses'], gmaps_url=Config.GMAPS_URL)
|
[
"[email protected]"
] | |
c425f31861bfb4ccd22b1b6cf8fc228238168d3c
|
0257cede18ea0beb18486cc249adab1a2b15c6b7
|
/testmodules/RT/cartridge/server_side_bundling_libs_and_force_clean_build.py
|
de769345e1704967c0655f3998b015948839dfd5
|
[] |
no_license
|
xiama/automations
|
13c0308afc4fa0d3267025f8529f97d80f5cf6fb
|
4cf4e1ab5b249b23510f3929f46f3768529788f1
|
refs/heads/master
| 2021-01-17T05:36:02.990075 | 2013-11-29T04:26:00 | 2013-11-29T04:26:00 | 14,791,833 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 22,431 |
py
|
import os
import common
import OSConf
import rhtest
WORK_DIR = os.path.dirname(os.path.abspath(__file__))
class OpenShiftTest(rhtest.Test):
INTERACTIVE = False
def initialize(self):
try:
self.test_variant = self.get_variant()
except:
self.test_variant = 'perl'
if not common.app_types.has_key(self.test_variant):
raise Exception("Invalid/Unknown variable: OPENSHIFT_test_name")
self.info("VARIANT: %s"%self.test_variant)
self.app_name = common.getRandomString(10)
self.app_type = common.app_types[self.test_variant]
common.env_setup()
# print test case summary
self.info("""
[US561][rhc-cartridge] PHP: Pear pre-processing
[US561][rhc-cartridge] Perl: Cpan pre-processing
[US561][rhc-cartridge] Python: Easy_install pre-processing
[US561][rhc-cartridge] Ruby: Gem pre-processing
[US561][rhc-cartridge] Jboss: Maven pre-processing
[US1107][rhc-cartridge] PHP app libraries cleanup using force_clean_build marker
[US1107][rhc-cartridge] PERL app libraries cleanup using force_clean_build marker
[US1107][rhc-cartridge] WSGI app libraries cleanup using force_clean_build marker
[US1107][rhc-cartridge] RACK app libraries cleanup using force_clean_build marker
[US1107][rhc-cartridge] JBOSSAS app libraries cleanup using force_clean_build marker
[US590][Runtime][rhc-cartridge]nodejs app modules cleanup using force_clean_build marker""")
def finalize(self):
pass
class ServerSideBundlingLibsAndForceCleanBuild(OpenShiftTest):
def test_method(self):
# 1.Create an app
self.add_step("1. Create an %s app" % (self.test_variant),
common.create_app,
function_parameters=[self.app_name, self.app_type, self.config.OPENSHIFT_user_email, self.config.OPENSHIFT_user_passwd],
expect_description="the app should be created successfully",
expect_return=0)
# 2.Customize this app
if self.test_variant == "php":
cmd = "echo 'channel://pear.php.net/Validate-0.8.4' >%s/deplist.txt && cp -f %s/app_template/php_pear.php %s/php/index.php" %(self.app_name, WORK_DIR, self.app_name)
elif self.test_variant in ("ruby", "rack", "ruby-1.9"):
cmd = """cd %s && echo "source 'http://rubygems.org'\ngem 'rack'\ngem 'pg'" > Gemfile && sed -i "/require 'thread-dump'/ d" config.ru && bundle install""" %(self.app_name)
elif self.test_variant in ("python","wsgi"):
cmd = "cd %s && sed -i '9s/^#//g' setup.py && cp %s/app_template/wsgi-test.tar.gz ./ && tar xzvf wsgi-test.tar.gz" %(self.app_name, WORK_DIR)
elif self.test_variant == "perl":
cmd = """cd %s && echo -e '#!/usr/bin/perl\nprint "Content-type: text/html\\r\\n\\r\\n";\nprint "Welcome to OpenShift\\n";' >perl/index.pl && echo YAML >>deplist.txt""" %(self.app_name)
elif self.test_variant in ("jbossas", "jbosseap"):
cmd = "cd %s && cp %s/app_template/helloworld.tar.gz ./ && tar zxf helloworld.tar.gz" %(self.app_name, WORK_DIR)
elif self.test_variant in ("nodejs"):
cmd = """cd %s && sed -i '{\n/dependencies/ a\\\n "optimist": "0.3.4"\n}' package.json && sed -i "4 i var argv = require('optimist').argv;" server.js""" % (self.app_name)
else:
return self.failed("%s failed: Invalid test_variant" % self.__class__.__name__)
self.add_step("2.Customize this app",
cmd,
expect_description="the git repo should be modified successfully",
expect_return=0)
# 3.Git push all the changes
if self.test_variant == "php":
exp_str = "install ok: channel://pear.php.net/Validate-0.8.4"
elif self.test_variant in ("ruby", "rack", "ruby-1.9"):
exp_str = "Installing pg"
elif self.test_variant in ("python", "wsgi"):
exp_str = "Adding Django [\d.]+ to easy-install.pth"
elif self.test_variant == "perl":
exp_str = "Successfully installed YAML"
elif self.test_variant in ("jbossas", "jbosseap"):
exp_str = "remote: Downloading: .*javax.*"
elif self.test_variant in ("nodejs"):
exp_str = "remote: npm info install [email protected]"
else:
return self.failed("%s failed: Invalid test_variant" % self.__class__.__name__)
self.add_step("3.Git push all the changes",
"cd %s && touch x && git add . && git commit -am t && git push" %(self.app_name),
expect_description="Git push should succeed",
expect_return=0,
expect_str=[exp_str])
# 4. Generate test script
if self.test_variant == "php":
cmd_str = "ls ${OPENSHIFT_HOMEDIR}php/phplib/pear/pear/download/Validate-0.8.4.tgz && ls ${OPENSHIFT_HOMEDIR}php/phplib/pear/pear/php/Validate.php"
elif self.test_variant in ("ruby", "rack"):
cmd_str = "ls -la ${OPENSHIFT_REPO_DIR}vendor/bundle/ruby/1.8*/gems/pg*"
elif self.test_variant in ("ruby-1.9"):
cmd_str = "ls -la ${OPENSHIFT_REPO_DIR}vendor/bundle/ruby/1.9*/gems/pg*"
elif self.test_variant in ("python", "wsgi"):
cmd_str = "ls ${OPENSHIFT_HOMEDIR}python/virtenv/lib/python2.6/site-packages/Django*"
elif self.test_variant == "perl":
cmd_str = "ls ${OPENSHIFT_HOMEDIR}perl/perl5lib/lib/perl5/YAML"
elif self.test_variant in ("jbossas", "jbosseap"):
cmd_str = "ls ${OPENSHIFT_HOMEDIR}.m2/repository/javax"
elif self.test_variant in ("nodejs"):
cmd_str = "ls ${OPENSHIFT_REPO_DIR}node_modules/optimist/"
else:
return self.failed("%s failed: Invalid test_variant" % self.__class__.__name__)
shell_script = '''#!/bin/bash
command="%s"
echo "$command"
eval "$command"
test $? == 0 && echo "RESULT=0" || echo "RESULT=1"''' %(cmd_str)
self.add_step("4.Write .openshift/action_hooks/deploy",
"echo '%s' >%s/.openshift/action_hooks/deploy; \n chmod +x %s/.openshift/action_hooks/deploy" %(shell_script, self.app_name, self.app_name),
expect_return=0)
# 5.Check the dependencies are installed
self.add_step("5.Check the dependencies are installed vir git hooks",
"cd %s && touch xx && git add . && git commit -am t && git push" %(self.app_name),
expect_description="Check should PASS",
expect_return=0,
expect_str=["RESULT=0"])
# 6.Check app via browser
def get_app_url(self, suffix=""):
def closure():
return OSConf.get_app_url(self.app_name)+suffix
return closure
url_suffix=""
if self.test_variant == "php":
test_html = "get_correct_number"
elif self.test_variant in ("ruby", "rack", "ruby-1.9"):
test_html = "Welcome to OpenShift"
elif self.test_variant in ("python", "wsgi"):
test_html = "Congratulations on your first Django-powered page"
elif self.test_variant == "perl":
test_html = "Welcome to OpenShift"
elif self.test_variant in ("jbossas", "jbosseap"):
test_html = "Hello World!"
url_suffix = "/HelloWorld/HelloWorld"
elif self.test_variant in ("nodejs"):
test_html = "Welcome to OpenShift"
else:
return self.failed("%s failed: Invalid test_variant" % self.__class__.__name__)
self.add_step("6.Check app via browser",
common.grep_web_page,
function_parameters=[get_app_url(self, url_suffix), test_html, "-H 'Pragma: no-cache' -L", 5, 9],
expect_description="'%s' should be found in the web page" % (test_html),
expect_return=0)
# 7. Using the installed package
if self.test_variant == "php":
exp_str = ""
unexp_str = "remote: downloading"
elif self.test_variant in ("ruby", "rack", "ruby-1.9"):
exp_str = "remote: Using pg"
unexp_str = "remote: Installing"
elif self.test_variant in ("python", "wsgi"):
exp_str = ""
unexp_str = "remote: Downloading"
elif self.test_variant == "perl":
exp_str = ""
unexp_str = "remote: Fetching"
elif self.test_variant in ("jbossas", "jbosseap"):
exp_str = ""
unexp_str = "remote: Downloading"
elif self.test_variant in ("nodejs"):
exp_str = ""
unexp_str = "remote: npm http GET.*optimist"
else:
return self.failed("%s failed: Invalid test_variant" % self.__class__.__name__)
self.add_step("7. Re-using the installed libs, no new installation",
"cd %s && touch xxx && git add . && git commit -am t && git push" %(self.app_name),
expect_description="Check should PASS",
expect_return=0,
expect_str=[exp_str],
unexpect_str=[unexp_str])
# 8. More test for rack app
if self.test_variant in ( "rack","ruby", "ruby-1.9"):
self.add_step(
"8. Edit Gemfile to add another gem we want to install,",
'''cd %s && echo "gem 'rhc'" >>Gemfile ; bundle check ; bundle install ; sed -i "s/rhc \(.*\)/rhc \(0.71.2\)/g" Gemfile.lock''' %(self.app_name),
expect_return=0)
self.add_step(
"9. Re-using the installed libs, and install new libs",
"cd %s && git add . && git commit -am t && git push" %(self.app_name),
expect_return=0,
expect_str=["remote: Using pg", "remote: Installing rhc"])
else:
self.info("skip step 8")
self.info("skip step 9")
# 10. Touch a empty force_clean_build file in your local git repo
self.add_step("10. Touch a empty force_clean_build file in your local git repo.",
"touch %s/.openshift/markers/force_clean_build" %(self.app_name),
expect_description="Successfully touched force_clean_build",
expect_return=0)
# 11. Remove libraries
if self.test_variant == "php":
cmd = "echo '' > %s/deplist.txt" %(self.app_name)
elif self.test_variant in ("jbossas", "jbosseap"):
cmd = "echo 'No denpendency need to be remove for jbossas app'"
elif self.test_variant in ("ruby", "rack", "ruby-1.9"):
cmd = "cd %s && sed -i '$d' Gemfile && bundle check" %(self.app_name)
elif self.test_variant in ("python", "wsgi"):
cmd = "cd %s && sed -i '9s/^/#/g' setup.py" %(self.app_name)
elif self.test_variant == "perl":
cmd = "echo '' > %s/deplist.txt" %(self.app_name)
elif self.test_variant in ("nodejs"):
cmd = "cd %s && sed -i '{/optimist/ d}' package.json" % (self.app_name)
else:
return self.failed("%s failed: Invalid test_variant" % self.__class__.__name__)
self.add_step("11. Remove libraries dependency",
cmd,
expect_description="Modification succeed",
expect_return=0)
# 12. re-write .openshift/action_hooks/deploy
if self.test_variant == "php":
cmd_str = "ls ${OPENSHIFT_HOMEDIR}php/phplib/pear/pear/download/Validate-0.8.4.tgz || ls ${OPENSHIFT_HOMEDIR}php/phplib/pear/pear/php/Validate.php"
cmd = """sed -i 's#command="ls.*"#command="%s"#g' %s/.openshift/action_hooks/deploy""" %(cmd_str, self.app_name)
elif self.test_variant in ("ruby", "rack"):
cmd_str = "ls ${OPENSHIFT_REPO_DIR}vendor/bundle/ruby/1.8*/gems/rhc*"
cmd = """sed -i 's#command="ls.*"#command="%s"#g' %s/.openshift/action_hooks/deploy""" %(cmd_str, self.app_name)
elif self.test_variant in ("ruby-1.9"):
cmd_str = "ls ${OPENSHIFT_REPO_DIR}vendor/bundle/ruby/1.9*/gems/rhc*"
cmd = """sed -i 's#command="ls.*"#command="%s"#g' %s/.openshift/action_hooks/deploy""" %(cmd_str, self.app_name)
elif self.test_variant == "perl":
cmd_str = "ls ${OPENSHIFT_HOMEDIR}perl/perl5lib/lib || ls ~/.cpanm/work"
cmd = """sed -i 's#command="ls.*"#command="%s"#g' %s/.openshift/action_hooks/deploy""" %(cmd_str, self.app_name)
elif self.test_variant in ("python", "wsgi"):
cmd = "echo 'No need to re-write for wsgi app'"
elif self.test_variant in ("jbossas", "jbosseap"):
cmd = "echo 'No need to re-write for jbossas app'"
elif self.test_variant in ("nodejs"):
cmd = "echo 'No need to re-write for jbossas app'"
else:
return self.failed("%s failed: Invalid test_variant" % self.__class__.__name__)
self.add_step("12. Re-write .openshift/action_hooks/deploy",
cmd,
expect_return=0)
# 13. git push all the changes
if self.test_variant in ("jbossas", "jbosseap"):
str_list = [".openshift/markers/force_clean_build found", "remote: Downloading"]
unexpect_str_list = []
elif self.test_variant in ("ruby", "rack", "ruby-1.9"):
str_list = ["remote: Installing pg", "ls: cannot access", "RESULT=1"]
unexpect_str_list = ["remote: Installing rhc"]
elif self.test_variant == "php":
str_list = [".openshift/markers/force_clean_build found", "ls: cannot access", "RESULT=1"]
unexpect_str_list = ["remote: downloading"]
elif self.test_variant == "perl":
str_list = [".openshift/markers/force_clean_build found", "ls: cannot access", "RESULT=1"]
unexpect_str_list = ["remote: Fetching"]
elif self.test_variant in ("python", "wsgi"):
str_list = [".openshift/markers/force_clean_build found", "ls: cannot access", "RESULT=1"]
unexpect_str_list = ["remote: Downloading"]
elif self.test_variant in ("nodejs"):
str_list = ["force_clean_build marker found! Recreating npm modules", "ls: cannot access", "RESULT=1"]
unexpect_str_list = []
else:
return self.failed("%s failed: Invalid test_variant" % self.__class__.__name__)
self.add_step("13. git push all the changes",
"cd %s && touch xxxx && git add . && git commit -am t && git push"
%(self.app_name),
expect_description="libraries are removed successfully",
expect_return=0,
expect_str=str_list,
unexpect_str=unexpect_str_list)
# 14.Check app via browser
url_suffix=""
if self.test_variant == "php":
test_html = ""
unexpect_test_html = "get_correct_number"
elif self.test_variant in ("ruby", "rack", "ruby-1.9"):
test_html = "Welcome to OpenShift"
unexpect_test_html = "NO_XX"
elif self.test_variant in ("python","wsgi"):
test_html = "Internal Server Error"
unexpect_test_html = "Congratulations on your first Django-powered page"
elif self.test_variant == "perl":
test_html = "Welcome to OpenShift"
unexpect_test_html = "NO_XX"
elif self.test_variant in ("jbossas", "jbosseap"):
test_html = "Hello World!"
unexpect_test_html = "NO_XX"
url_suffix = "/HelloWorld/HelloWorld"
elif self.test_variant in ("nodejs"):
test_html = "Service Temporarily Unavailable"
unexpect_test_html = "Welcome to OpenShift"
else:
return self.failed("%s failed: Invalid test_variant" % self.__class__.__name__)
self.add_step(
"14.Check app via browser, php/wsgi app should NOT availale Now, jbossas/perl/rack still working fine",
"curl -L -H 'Pragma: no-cache' %s",
string_parameters = [get_app_url(self, url_suffix)],
expect_str=[test_html],
unexpect_str=[unexpect_test_html],
try_interval=9,
try_count=6)
# 15. Add libraries back
if self.test_variant == "php":
cmd = "echo 'channel://pear.php.net/Validate-0.8.4' > %s/deplist.txt" %(self.app_name)
elif self.test_variant in ("jbossas", "jbosseap"):
cmd = "echo 'No denpendency need to be remove for jbossas app'"
elif self.test_variant in ("ruby", "rack", "ruby-1.9"):
cmd = '''cd %s && echo "gem 'rhc'" >>Gemfile && bundle check && sed -i "s/rhc \(.*\)/rhc \(0.71.2\)/g" Gemfile.lock''' %(self.app_name)
elif self.test_variant in ("python", "wsgi"):
cmd = "cd %s && sed -i '9s/^#//g' setup.py" %(self.app_name)
elif self.test_variant == "perl":
cmd = "echo 'YAML' > %s/deplist.txt" %(self.app_name)
elif self.test_variant in ("nodejs"):
cmd = """cd %s && sed -i '{\n/dependencies/ a\\\n "optimist": "0.3.4"\n}' package.json""" % (self.app_name)
else:
return self.failed("%s failed: Invalid test_variant" % self.__class__.__name__)
if self.test_variant in ("jbossas", "jbosseap"):
self.info("skip step 15 for jbossas app")
else:
self.add_step("15. Added libraries denpendency back",
cmd,
expect_return=0)
# 16. re-write .openshift/action_hooks/deploy
if self.test_variant == "php":
cmd_str = "ls ${OPENSHIFT_HOMEDIR}php/phplib/pear/pear/download/Validate-0.8.4.tgz \&\& ls ${OPENSHIFT_HOMEDIR}php/phplib/pear/pear/php/Validate.php"
cmd = """sed -i 's#command="ls.*"#command="%s"#g' %s/.openshift/action_hooks/deploy""" %(cmd_str, self.app_name)
elif self.test_variant in ("ruby", "rack", "ruby-1.9"):
cmd = "echo 'No need to re-write for rack app'"
elif self.test_variant == "perl":
cmd_str = "ls ${OPENSHIFT_HOMEDIR}perl/perl5lib/lib \&\& ls ~/.cpanm/work"
cmd = """sed -i 's#command="ls.*"#command="%s"#g' %s/.openshift/action_hooks/deploy""" %(cmd_str, self.app_name)
elif self.test_variant in ("python","wsgi"):
cmd = "echo 'No need to re-write for wsgi app'"
elif self.test_variant in ("jbossas", "jbosseap"):
cmd = "echo 'No need to re-write for jbossas app'"
elif self.test_variant in ("nodejs"):
cmd = "echo 'No need to re-write for nodejs app'"
else:
return self.failed("%s failed: Invalid test_variant" % self.__class__.__name__)
if self.test_variant in ("jbossas", "jbosseap"):
print "\nskip step 16 for jbossas app"
else:
self.add_step(
"16. Re-write .openshift/action_hooks/deploy",
cmd,
expect_return=0)
# 17. git push all the changes
if self.test_variant in ("jbossas", "jbosseap"):
str_list = [".openshift/markers/force_clean_build found", "remote: Downloading"]
elif self.test_variant in ("ruby", "rack", "ruby-1.9"):
str_list = ["remote: Installing pg", "remote: Installing rhc", "RESULT=0"]
unexpect_str_list = ["No such file or directory"]
elif self.test_variant == "php":
str_list = [".openshift/markers/force_clean_build found", "remote: downloading", "RESULT=0"]
unexpect_str_list = ["No such file or directory"]
elif self.test_variant == "perl":
str_list = [".openshift/markers/force_clean_build found", "remote: Fetching", "RESULT=0"]
unexpect_str_list = ["No such file or directory"]
elif self.test_variant in ("python", "wsgi"):
str_list = [".openshift/markers/force_clean_build found", "remote: Downloading", "RESULT=0"]
unexpect_str_list = ["No such file or directory"]
elif self.test_variant in ("nodejs"):
str_list = ["RESULT=0"]
else:
return self.failed("%s failed: Invalid test_variant" % self.__class__.__name__)
if self.test_variant in ("jbossas", "jbosseap"):
self.info("skip step 17 for jbossas app")
else:
self.add_step("17. git push all the changes",
"cd %s && touch xxxxx && git add . && git commit -am t && git push" %(self.app_name),
expect_description="libraries are removed successfully",
expect_return=0,
expect_str=str_list,
unexpect_str=unexpect_str_list)
# 18.Check app via browser
if self.test_variant == "php":
test_html = "get_correct_number"
elif self.test_variant in ("rack","ruby", "ruby-1.9"):
test_html = "Welcome to OpenShift"
elif self.test_variant in ( "wsgi", "python") :
test_html = "Congratulations on your first Django-powered page"
elif self.test_variant == "perl":
test_html = "Welcome to OpenShift"
elif self.test_variant in ("jbossas", "jbosseap"):
test_html = "Hello World!"
elif self.test_variant in ("nodejs"):
test_html = "Welcome to OpenShift"
else:
return self.failed("%s failed: Invalid test_variant" % self.__class__.__name__)
if self.test_variant in ("jbossas", "jbosseap"):
self.info("skip step 18 for jbossas app")
else:
self.add_step(
"18.Check app via browser, now all kinds of app should work fine",
"curl -H 'Pragma: no-cache' %s",
string_parameters = [get_app_url(self)],
expect_return=0,
expect_str=[test_html],
try_interval=9,
try_count=3)
self.run_steps()
return self.passed("%s passed" % self.__class__.__name__)
class OpenShiftTestSuite(rhtest.TestSuite):
pass
def get_suite(conf):
suite = OpenShiftTestSuite(conf)
suite.add_test(ServerSideBundlingLibsAndForceCleanBuild)
return suite
def run(conf):
suite = get_suite(conf)
suite()
#
# vim: set tabstop=4:shiftwidth=4:expandtab:
|
[
"[email protected]"
] | |
e4ae96c0131406c2419a148c0186b3269acfa42f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03964/s755365360.py
|
9f2a66cabd6d3f24f2aafce6d59b731dbfbc227f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 619 |
py
|
import bisect
import collections
import copy
import functools
import heapq
import math
import sys
from collections import deque
from collections import defaultdict
input = sys.stdin.readline
MOD = 10**9+7
N = int(input())
T = [0]*N
A = [0]*N
for i in range(N):
T[i],A[i] = map(int,(input().split()))
t,a = T[0],A[0]
for i in range(1,N):
s = T[i] + A[i]
now = 1
l = 1
r = 10**18//s + 1
mae = -1
while now != mae:
mae = now
if T[i]*now < t or A[i]*now < a:
l = now
else:
r = now
now = (l+r+1)//2
t,a = T[i]*now,A[i]*now
print(t+a)
|
[
"[email protected]"
] | |
75ef2897c41de5031b13106195ac168311611da6
|
626fdb9df49fc36fff5018a489e6089b0986ebd8
|
/04/summing_one_million.py
|
3aa0ce9559bdaa647f203af788d6bfee0ca59c89
|
[] |
no_license
|
ypan1988/python_work
|
7bbf9ee2badb981bd2b309f39e07d49d761002c0
|
464f86f8e9348f4c604928d7db63c75e83aefd10
|
refs/heads/main
| 2023-03-27T11:12:28.934378 | 2021-03-28T22:00:28 | 2021-03-28T22:00:28 | 352,447,281 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 94 |
py
|
numbers = list(range(1,1000001))
print(min(numbers))
print(max(numbers))
print(sum(numbers))
|
[
"[email protected]"
] | |
79113f4805093df2885ff54448e5bccd23cc252a
|
b11273fa7e74564f9e6b5b7448df266c7880ddac
|
/SmartP.py
|
2aa08aad4ad7ba9b9f945552c369cf286a8448d2
|
[] |
no_license
|
VNDIRECT/EngineP
|
5a08873d368846776775f9bf329c88804e9a9150
|
9a9f44f4a35a91ccdbf4bcf892077bae2373b157
|
refs/heads/master
| 2021-01-19T03:41:00.908508 | 2017-10-04T06:40:11 | 2017-10-04T06:40:11 | 65,744,760 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,110 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 14 18:38:25 2016
@author: phucn_000
"""
import json
from engineP import compute
from portopt import markowitz
import finfo
from error import CommonError
from flask import Flask, request, jsonify
from crossdomain import crossdomain
import time
app = Flask(__name__)
def parse_portfolio(request):
symbols = request.args.get('symbols').split(',')
quantities = map(int, request.args.get('quantities').split(','))
cash_param = request.args.get('cash')
cash = int(cash_param) if cash_param is not None else 0
if len(symbols) != len(quantities):
raise 'symbols and quantities must be of the same length'
myP = {k: v for k,v in zip(symbols, quantities)}
return myP, cash
@app.route("/")
@crossdomain(origin="*")
def hello():
# try:
portfolio, cash = parse_portfolio(request)
return json.dumps(compute(portfolio, cash))
# except Exception as e:
# raise CommonError(e.message)
@app.route("/markowitz")
@crossdomain(origin="*")
def markowitz_endpoint():
# try:
portfolio, cash = parse_portfolio(request)
return json.dumps(markowitz(portfolio, cash))
# except Exception as e:
# raise CommonError(e.message)
## Currently not working, should fork another process instead
# @app.route("/refresh")
# @crossdomain(origin="*")
# def refresh_price():
# """
# This endpoint refresh all price data
# """
# try:
# price = finfo.PriceStash()
# price.full_refetch()
# return jsonify({'status': 'OK'})
# except Exception as e:
# raise CommonError('Error while refresh: {}'.format(e.message))
# @app.errorhandler(CommonError)
# def handle_error(error):
# response = jsonify(error.to_dict())
# response.status_code = error.status_code
# return response
@app.route("/error")
@crossdomain(origin="*")
def error_endpoint():
raise CommonError('This endpoint has an error')
if __name__ == "__main__":
app.run(host='0.0.0.0', threaded=True, debug=True)
|
[
"[email protected]"
] | |
526512060ec60f64cab763dcdc20a58c882fa21b
|
e3040a2e23a856e319e02037dc6baf3882c796b9
|
/samples/openapi3/client/petstore/python/petstore_api/paths/pet_find_by_status/get.py
|
bca423ad68f208522270ab2159908c0f06ae7b00
|
[
"Apache-2.0"
] |
permissive
|
mishin/openapi-generator
|
2ed2e0739c0cc2a627c25191d5898071d9294036
|
3ed650307513d552404f3d76487f3b4844acae41
|
refs/heads/master
| 2023-06-10T03:01:09.612130 | 2022-10-14T08:29:15 | 2022-10-14T08:29:15 | 271,080,285 | 0 | 0 |
Apache-2.0
| 2023-05-30T02:01:25 | 2020-06-09T18:29:41 |
Java
|
UTF-8
|
Python
| false | false | 12,472 |
py
|
# coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from petstore_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from petstore_api import schemas # noqa: F401
from petstore_api.model.pet import Pet
from . import path
# Query params
class StatusSchema(
schemas.ListSchema
):
class MetaOapg:
class items(
schemas.EnumBase,
schemas.StrSchema
):
class MetaOapg:
enum_value_to_name = {
"available": "AVAILABLE",
"pending": "PENDING",
"sold": "SOLD",
}
@schemas.classproperty
def AVAILABLE(cls):
return cls("available")
@schemas.classproperty
def PENDING(cls):
return cls("pending")
@schemas.classproperty
def SOLD(cls):
return cls("sold")
def __new__(
cls,
arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]],
_configuration: typing.Optional[schemas.Configuration] = None,
) -> 'StatusSchema':
return super().__new__(
cls,
arg,
_configuration=_configuration,
)
def __getitem__(self, i: int) -> MetaOapg.items:
return super().__getitem__(i)
RequestRequiredQueryParams = typing_extensions.TypedDict(
'RequestRequiredQueryParams',
{
'status': typing.Union[StatusSchema, list, tuple, ],
}
)
RequestOptionalQueryParams = typing_extensions.TypedDict(
'RequestOptionalQueryParams',
{
},
total=False
)
class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams):
pass
request_query_status = api_client.QueryParameter(
name="status",
style=api_client.ParameterStyle.FORM,
schema=StatusSchema,
required=True,
)
_auth = [
'http_signature_test',
'petstore_auth',
]
class SchemaFor200ResponseBodyApplicationXml(
schemas.ListSchema
):
class MetaOapg:
@staticmethod
def items() -> typing.Type['Pet']:
return Pet
def __new__(
cls,
arg: typing.Union[typing.Tuple['Pet'], typing.List['Pet']],
_configuration: typing.Optional[schemas.Configuration] = None,
) -> 'SchemaFor200ResponseBodyApplicationXml':
return super().__new__(
cls,
arg,
_configuration=_configuration,
)
def __getitem__(self, i: int) -> 'Pet':
return super().__getitem__(i)
class SchemaFor200ResponseBodyApplicationJson(
schemas.ListSchema
):
class MetaOapg:
@staticmethod
def items() -> typing.Type['Pet']:
return Pet
def __new__(
cls,
arg: typing.Union[typing.Tuple['Pet'], typing.List['Pet']],
_configuration: typing.Optional[schemas.Configuration] = None,
) -> 'SchemaFor200ResponseBodyApplicationJson':
return super().__new__(
cls,
arg,
_configuration=_configuration,
)
def __getitem__(self, i: int) -> 'Pet':
return super().__getitem__(i)
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationXml,
SchemaFor200ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/xml': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationXml),
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
@dataclass
class ApiResponseFor400(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: schemas.Unset = schemas.unset
headers: schemas.Unset = schemas.unset
_response_for_400 = api_client.OpenApiResponse(
response_cls=ApiResponseFor400,
)
_status_code_to_response = {
'200': _response_for_200,
'400': _response_for_400,
}
_all_accept_content_types = (
'application/xml',
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _find_pets_by_status_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _find_pets_by_status_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _find_pets_by_status_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _find_pets_by_status_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
Finds Pets by status
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params)
used_path = path.value
prefix_separator_iterator = None
for parameter in (
request_query_status,
):
parameter_data = query_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
if prefix_separator_iterator is None:
prefix_separator_iterator = parameter.get_prefix_separator_iterator()
serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator)
for serialized_value in serialized_data.values():
used_path += serialized_value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='get'.upper(),
headers=_headers,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class FindPetsByStatus(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def find_pets_by_status(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def find_pets_by_status(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def find_pets_by_status(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def find_pets_by_status(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._find_pets_by_status_oapg(
query_params=query_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForget(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._find_pets_by_status_oapg(
query_params=query_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
|
[
"[email protected]"
] | |
44be46ea1085141780882e156c30d8eab17a15da
|
bb8cc28951ee8dd94f98db23024c76059129f1df
|
/modelo/tipoSolicitud.py
|
2ee3a07efaf4d76f8e99d7f49a31f625d2365c66
|
[] |
no_license
|
jhonn2002/PQR_Municipio
|
1021d119113296acdd5ba87e038f2ba486fd2bd1
|
39e874d08a3f8f79b92e037342aed5dac0988c21
|
refs/heads/master
| 2023-09-01T02:37:22.639579 | 2021-10-13T00:37:59 | 2021-10-13T00:37:59 | 416,543,647 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 322 |
py
|
from app import bd
class TipoSolicitud(bd.Model):
__tablename__ = 'tiposolicitud' #Nombre de la tabla
idTipoSolicitud = bd.Column(bd.Integer, primary_key=True, autoincrement=True)
tipoSolicitud = bd.Column(bd.String(30),nullable=False)
def __repr__(self):
return f'{self.tipoSolicitud}'
|
[
"[email protected]"
] | |
06ce341e0e7626e2104a0667155275b069268653
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Kivy/pycon2013/html5slides/scripts/md/render.py
|
b5ef0975e20eb201985c57c5b48cd150050171da
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:6e3940fcf589334234bc7943dfc2c0d8e860fc139a432eae485128714022232c
size 1807
|
[
"[email protected]"
] | |
ae75e51110b700b347454faed95d66f5be954da0
|
a1973fa91ca1883be6143771feece9dcbdf84a72
|
/algo/wordBreak.py
|
432c3188003f0d2d9ed9c848f60717372d33c728
|
[] |
no_license
|
ajaytani/python
|
a05d3d9607ead75d8e6dc5eadadbfb28f7116623
|
0751bb5f22fdf512e46771391b6464d11773dc19
|
refs/heads/master
| 2021-01-19T23:44:18.968570 | 2020-08-11T06:34:51 | 2020-08-11T06:34:51 | 89,016,107 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 840 |
py
|
def wordBreak(s,wordDict):
n = len(s)
if not s or n== 0:
return False
dp = [[] for i in range(n+1)]
dp[0] = [0]
for i in range(1,n+1):
for j in range(i):
if s[j:i] in wordDict:
dp[i].append(j)
print dp
res = []
backTracking(dp,s,n,res,'')
return res
def backTracking(dp,s,idx,res,line):
for i in dp[idx]:
print 'line : ' + line
print 'idx : ',idx
print 'dp[idx] ', dp[idx]
newline = s[i:idx] + ' ' + line if line else s[i:idx]
print 'newline : ' + newline
if i == 0:
res.append(newline)
else:
print 'backtracking call:' + newline
backTracking(dp,s,i,res,newline)
s = 'catsanddogcat'
wordDict = ['cat','cats','and','sand','dog']
print(wordBreak(s,wordDict))
|
[
"[email protected]"
] | |
e5b0887d810d27576528bafda388fdfd915d3c4f
|
c6320d68968de93ce9d686f5a59bb34909d089bb
|
/03_Polynomial_Regression/polynomial_regression_rad.py
|
fafb65739a4f26fa1c7981097fe77412704b96b8
|
[] |
no_license
|
rbartosinski/MachineLearningRes
|
0835e6b9f94c309bf2ce8ff7ceb73912a7eeea63
|
5a1af15e77d589149aa1cb22cb96f56956fd9a0f
|
refs/heads/master
| 2020-04-07T00:58:03.692579 | 2019-01-11T13:49:12 | 2019-01-11T13:49:12 | 157,925,825 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,313 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 20 15:04:28 2018
@author: radek
"""
#wczytanie bibliotek
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#wczytanie danych
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
#dopasowanie LR do setu
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
#dopasowanie Polynomial Regr. do setu
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(X)
lin_reg2 = LinearRegression()
lin_reg2.fit(X_poly, y)
#wizualizacja LR
plt.scatter(X, y, color='red')
plt.plot(X, lin_reg.predict(X), color='blue')
plt.title('Position level vs. Salary (Linear Regression')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
#wizulizacja PR
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color='red')
plt.plot(X_grid, lin_reg2.predict(poly_reg.fit_transform(X_grid)), color='blue')
plt.title('Position level vs. Salary (Polynomial Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
#wynik z LR
lin_reg.predict(6.5)
#wynik z PR
lin_reg_2.predict(poly_reg.fit_transform(6.5))
|
[
"[email protected]"
] | |
5a1e071972d89f69b241aff120e8fcd705ae1ca1
|
cc0d06e2aad3d30152c4a3f3356befdc58748313
|
/2.til8.oktober/plot_wavepacket.py
|
a4583b0987077f652a46aaf25eff8dbe8cd4c6bb
|
[] |
no_license
|
lasse-steinnes/IN1900
|
db0bb4da33fa024d4fe9207337c0f1d956197c50
|
c8d97c2903078471f8e419f88cc8488d9b8fc7da
|
refs/heads/master
| 2020-12-14T15:34:36.429764 | 2020-01-18T19:59:46 | 2020-01-18T19:59:46 | 234,789,653 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 473 |
py
|
###
## Definerer funksjonen
from numpy import exp, sin, pi, linspace
bølge = lambda x,t=0: exp(-(x-3*t)**2)*sin(3*pi*(x-t))
## Lager intervallet for x
x_matrise = linspace(-4,4,1500)
# Slik at
bølge_t0 = bølge(x_matrise)
### Plotter funksjonen
import matplotlib.pyplot as plt
plt.plot(x_matrise, bølge_t0, label = 'bølgepakke for t=0')
plt.legend()
plt.xlabel("x")
plt.ylabel("Amplitude")
plt.show()
## Kjøreeksempel
"""
>> python plot_wavepacket.py
(plot)
"""
|
[
"[email protected]"
] | |
84ef03241f898679f28eceb6fc11716406bc5283
|
dc85229e6da452a54577cef2740de9413f3e4528
|
/ArmstrongNumbers.py
|
a1bd165f3ec84125cad4064709e22ed5a15d6f2c
|
[] |
no_license
|
mutlukilic/Python
|
1ea2549027dc5e120934c06fca8f98e3e1865148
|
180407f14fd480cc70f0758b7b6f1554aa2630f9
|
refs/heads/master
| 2021-01-13T04:17:20.684858 | 2017-02-16T11:48:24 | 2017-02-16T11:48:24 | 77,472,728 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 244 |
py
|
basamak_sayisi = input("Basamak sayisini giriniz : ")
for i in range(10**(basamak_sayisi-1),10**basamak_sayisi):
armstrong = 0
for j in str(i):
armstrong += int(j)**basamak_sayisi
if(armstrong == i):
print(i)
|
[
"[email protected]"
] | |
608f2efe8d41666029740500651dc44a0669ddd8
|
5f76538e2f3ee8ef0650ca3a98241de0a6995a69
|
/rec/recommender/lipczac_spread_alpha.py
|
39e0e077e9d14f36d4f45c2c2e1ec2dc10923dbd
|
[] |
no_license
|
infsaulo/greenmeter
|
e0422bbef1a757a154da7a7b00f25f1bf6c7d82f
|
61453017f51cfe53eb9b0f455d79ac545d6ff7a1
|
refs/heads/master
| 2020-04-05T23:45:16.562022 | 2014-01-28T14:45:52 | 2014-01-28T14:45:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,643 |
py
|
#!/usr/bin/env python
import sys, os
import random
import time
import math
from collections import defaultdict
from operator import itemgetter
from object.textual_features_concat import get_next_concat_TF_weighted
from metrics.term_freq import compute_FF_and_train_length, compute_TS
from metrics.cooccur import compute_cooccur_and_entropy_field, compute_cooccur_one_field_to_another, compute_intersection_confidence
from lipczac_all_fields import recommend_content
from lipczac_only_assoc import recommend_assoc_combined
from util import *
#Computa metricas e recomenda
def recommend(obj, title_intersec, desc_intersec, tag_tag, title_tag, num_rec, title_scale, desc_scale, content_scale, alpha):
content_score = recommend_content(obj, title_intersec, desc_intersec, title_scale, desc_scale)
assoc_score = recommend_assoc_combined(obj, title_intersec, desc_intersec, tag_tag, title_tag, title_scale, desc_scale)
rescore(content_score, content_scale)
rescore(assoc_score, alpha)
spread_score = compute_TS(obj, ["TITLE", "DESCRIPTION"])
rescore(spread_score, 1.0 - alpha)
return merge_scores((content_score, assoc_score, spread_score))
if __name__ == "__main__":
if len(sys.argv) != 10:
print "usage: %s <train file> <test file> <min. support> <min. confidence> <num recomendacoes> <title scale> <desc. scale> <content scale> <alpha>" % sys.argv[0]
sys.exit(-1)
train_filename = sys.argv[1]
test_filename = sys.argv[2]
test_file = open(test_filename)
minsup = float(sys.argv[3])
minconf = float(sys.argv[4])
num_rec = int(sys.argv[5])
title_scale = float(sys.argv[6])
desc_scale = float(sys.argv[7])
content_scale = float(sys.argv[8])
alpha = float(sys.argv[9])
(ff, n) = compute_FF_and_train_length(train_filename, ["TAG", "DESCRIPTION", "TITLE"])
tag_tag = compute_cooccur_and_entropy_field(train_filename, ff["TAG"], minsup, minconf, "TAG")[0]
title_tag = compute_cooccur_one_field_to_another(train_filename, ff, minsup, minconf, "TITLE", "TAG")
title_intersec = compute_intersection_confidence(train_filename, "TITLE", "TAG")
desc_intersec = compute_intersection_confidence(train_filename, "DESCRIPTION", "TAG")
test_object = get_next_concat_TF_weighted(test_file)
while test_object != None:
rec = recommend(test_object, title_intersec, desc_intersec, tag_tag, title_tag, num_rec, title_scale, desc_scale, content_scale, alpha)
for w in get_top_ranked(rec, num_rec):
print w,
print
test_object = get_next_concat_TF_weighted(test_file)
test_file.close()
|
[
"[email protected]"
] | |
717a32ee923895084358b984f07330c001396344
|
1aa015c1d08a4cba09ce749cfe325e996039459c
|
/Pygame Tutorial/TuterForBook04.py
|
ec3cacb39832a8e21245de3de37c82d8be4e9dde
|
[] |
no_license
|
Jerrykim91/Pygame_Study
|
98e9494e661d42229b7e0118095bf9d8636e266e
|
6b8fe0ee239d2f90447a5b09bb742323677dfec5
|
refs/heads/master
| 2022-03-10T18:19:42.281581 | 2019-11-15T19:34:22 | 2019-11-15T19:34:22 | 216,942,852 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,588 |
py
|
# 게임으로 배우는 파이썬 교재를 기반으로 실습
""" draw_ lines0.py"""
# import
import pygame
import sys
import random
from pygame.locals import QUIT
# 초기화
pygame.init()
# 변수
SCREEN_W, SCREEN_H = 400, 300
# x, y = [0, 0]
# 여러가지 색
# 0-255 ( R, B, G )
RED = 255, 0, 0 # 적색: 적 255, 녹 0, 청 0
GREEN = 0, 255, 0 # 녹색: 적 0, 녹 255, 청 0
BLUE = 0, 0, 255 # 청색: 적 0, 녹 0, 청 255
PURPLE = 127, 0, 127 # 보라색: 적 127, 녹 0, 청 127
BLACK = 0, 0, 0 # 검은색: 적 0, 녹 0, 청 0
GRAY = 127, 127, 127 # 회색: 적 127, 녹 127, 청 127
WHITE = 255, 255, 255 # 하얀색: 적 255, 녹 255, 청 255
# 창 설정
SCREEN = pygame.display.set_mode((SCREEN_W, SCREEN_H))
FPSCLOCK = pygame.time.Clock() # CPU를 성능을 조절하기 위해서는 필수
# 타이틀
pygame.display.set_caption("렌덤 라인 만들기")
# 메인 함수 생성
def main():
""" main routine """
run = True
while run:
EVENTS = pygame.event.get()
for event in EVENTS:
if event.type == QUIT:
pygame.quit()
sys.exit()
SCREEN.fill((255, 255, 255)) # 흰색으로 화면을 채운다.
pointlist = []
for _ in range(10):
xpos = random.randint(0, 400)
ypos = random.randint(0, 300)
pointlist.append((xpos, ypos))
pygame.draw.lines(SCREEN, BLACK, True, pointlist, 5)
pygame.display.update()
FPSCLOCK.tick(3)
if __name__=='__main__':
main()
|
[
"[email protected]"
] | |
bcfb6f9b225d1c484807d73d6dfb6de1603ce7c4
|
0bf6b634267f02c04c6fb4ab276b26498cde8ca4
|
/payapp/payapp/settings.py
|
6db1a918dcfd038e4563950e39dd1e4c9aed570e
|
[] |
no_license
|
danielmcv/Paynom
|
dde328fbdf818b35b1004372304d40f3663b10a5
|
5b489ab8b66bd8d0693af0ad0ad69df05a6993d0
|
refs/heads/master
| 2021-01-10T12:47:57.607374 | 2015-10-04T16:55:24 | 2015-10-04T16:55:24 | 43,643,875 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,663 |
py
|
"""
Django settings for payapp project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3psq8(k7cc02=yehcngh%0svtb=2%4#8^rq_)(m2r*t95ihovn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'userprofiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'payapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'payapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
ff9215c8ec6c96e42f17608f62f9dce1cdd08011
|
2a7216df71b9ccf62050eb5197ed9e8279a2f2b2
|
/codeforces/CODEFORCES_1189_B.py
|
6c5ebbbd07378cbb3b42d77a6f5665bfa86f6f92
|
[] |
no_license
|
atarw/programming-contest-solutions
|
d8b9cd109630a383b762ab379011c5d8f7235ef8
|
8306b6871f221322e056b191e930862ea4990c55
|
refs/heads/master
| 2021-01-23T22:16:00.388134 | 2020-02-09T21:45:35 | 2020-02-09T21:45:35 | 58,693,100 | 5 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 639 |
py
|
#!/usr/bin/python
from __future__ import division
from sys import stdin, stdout, maxint
from fractions import Fraction
import bisect, collections, heapq, itertools, operator, math
N = int(stdin.readline())
arr = sorted(map(int, stdin.readline().split()))
dq = collections.deque()
dq.append(arr[N - 1])
n = N - 2
left = True
while n >= 0:
if left:
dq.appendleft(arr[n])
else:
dq.append(arr[n])
left = not left
n -= 1
ans = list(dq)
good = True
for n in xrange(N):
if ans[n] >= ans[(n - 1) % N] + ans[(n + 1) % N]:
good = False
break
if good:
print 'YES'
for i in ans:
stdout.write(str(i) + ' ')
else:
print 'NO'
|
[
"[email protected]"
] | |
43cb69f938df1aeaba48532ef98b1d4c606e5a2e
|
a68b6c704dcbe6d54001ca796f13020290ed265d
|
/src/database.py
|
8f5cc548df9a757c7759d087fb8dac85ab23a2b9
|
[] |
no_license
|
Rescura/cmbsoftware
|
9e9fa8b060673b3ae7472c6ea3cceed3cffb8c0e
|
c506ad3ccd52ae1fc29ba7ad9060d74f81d1ab3b
|
refs/heads/master
| 2022-12-02T05:04:13.595497 | 2020-03-19T13:11:18 | 2020-03-19T13:11:18 | 283,223,224 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,052 |
py
|
import sqlite3
import datetime as dt
import re, math
def returnTime(f_format:str):
"""
현재 시간을 반환한다.
f_format형태의 string으로 현재 시간을 반환한다.
*f_format %Y%m%d %H:%M:%S -> YYYYMMDD HH:MM:SS
"""
d = dt.datetime.today()
dateStr = d.strftime(f_format)
day = dateStr[6:9]
return{
'Sun' : dateStr.replace('Sun', '일'),
'Mon' : dateStr.replace('Mon', '월'),
'Tue' : dateStr.replace('Tue', '화'),
'Wed' : dateStr.replace('Wed', '수'),
'Thu' : dateStr.replace('Thu', '목'),
'Fri' : dateStr.replace('Fri', '금'),
'Sat' : dateStr.replace('Sat', '토')
}.get(day, dateStr)
class dbHandler():
CONST_TBL_REGEX = re.compile('_+[0-9]{8}')
def __init__(self):
"""
데이터베이스 핸들러를 초기화 합니다.
-videoList.db에 videoList 테이블이 없으면 만듭니다.
"""
self.con = sqlite3.connect('./videoLists.db')
self.cur = self.con.cursor()
# 음악 테이블이 존재하는지 확인해서 없으면 만든다
self.cur.execute('''
CREATE TABLE IF NOT EXISTS videoList (
thumbnailUrl TEXT NOT NULL,
videoId TEXT UNIQUE,
videoTitle TEXT,
channelTitle TEXT
duration TEXT,
count INTEGER,
recent TEXT);
''')
def getDataFromDB(self, f_videoId):
query = '''SELECT count, recent FROM videoList WHERE videoId = ?'''
result = self.con.execute(query, (f_videoId,)).fetchone()
if result is None:
print("[Database] 조회 결과 없음")
return False, {"mod_count": 0, "mod_recent": "없음"}
print("[Database] 조회 결과 있음")
return True, {"mod_count": result[0], "mod_recent": result[1]}
def processData(self, f_ytdata):
dataFromBoth = []
finalData = []
for data in f_ytdata:
isInDb, dbData = self.getDataFromDB(data["mod_videoId"])
data.update(dbData)
if isInDb:
dataFromBoth.append(data)
f_ytdata.remove(data)
finalData.extend(dataFromBoth)
finalData.extend(f_ytdata)
return finalData
def getDataWithoutKeyword(self):
# TODO : fetchMore를 사용해서 적당량만 받아온후 리턴시키기
data = self.cur.execute("SELECT * FROM videoList ORDER BY RECENT DESC").fetchall()
result = []
for video in data:
singleData = {}
singleData["mod_thumbnailUrl"] = video[0]
singleData["mod_videoId"] = video[1]
singleData["mod_videoTitle"] = video[2]
singleData["mod_channelTitle"] = video[3]
singleData["mod_duration"] = video[4]
singleData["mod_count"] = video[5]
singleData["mod_recent"] = video[6]
result.append(singleData)
return result
|
[
"[email protected]"
] | |
7b427a6ea7784fe57b83929971208d4a8ab309f5
|
796f41e9c4ffcd2f182847661d800e43ce35f133
|
/knowledge_extract/src/utils/logger.py
|
2b093cc101a21f494da9d091bb40d366c5c75a4e
|
[] |
no_license
|
DendiHust/bert_pros
|
3ae9fc3d55e6fe7cb251c5f43fe01304c19bd29b
|
ff7b585b678894f552a1d31912a967f3c638c361
|
refs/heads/master
| 2020-12-05T10:21:55.319525 | 2020-01-17T15:58:36 | 2020-01-17T15:58:36 | 232,078,389 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,004 |
py
|
# -*- coding: utf-8 -*-
import os
import time
import logging
import inspect
from src.utils import file_util
dt = time.strftime("%Y%m%d")
handlers = {logging.DEBUG: file_util.get_project_path() + "./log/debug_%s.log" % (dt),
logging.INFO: file_util.get_project_path() + "./log/info_%s.log" % (dt),
logging.WARNING: file_util.get_project_path() + "./log/warn_%s.log" % (dt),
logging.ERROR: file_util.get_project_path() + "./log/error_%s.log" % (dt)}
loggers = {}
if not os.path.exists(file_util.get_project_path() + './log'):
os.mkdir(file_util.get_project_path() + './log')
def init_loggers():
for level in handlers.keys():
path = os.path.abspath(handlers[level])
handlers[level] = logging.FileHandler(path, encoding='utf-8')
# handlers[level] = logging.FileHandler(path)
for level in handlers.keys():
logger = logging.getLogger(str(level))
# 如果不指定level,获得的handler似乎是同一个handler
logger.addHandler(handlers[level])
logger.setLevel(level)
loggers.update({level: logger})
# 加载模块时创建全局变量
init_loggers()
def print_now():
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
def get_log_msg(message):
return "[%s] %s" % (print_now(), message)
def get_error_msg(message):
frame, filename, lineNo, functionName, code, unknowField = inspect.stack()[2]
return "[%s] [%s - %s - %s] %s" % (print_now(), filename, lineNo, functionName, message)
def info(message):
message = get_log_msg(message)
loggers[logging.INFO].info(message)
print(message)
def error(message):
message = get_error_msg(message)
loggers[logging.ERROR].error(message)
print(message)
def debug(message):
message = get_log_msg(message)
loggers[logging.DEBUG].debug(message)
print(message)
def warn(message):
message = get_log_msg(message)
loggers[logging.WARNING].warning(message)
print(message)
|
[
"[email protected]"
] | |
603cf7453561a065dc33a3f333ab5627734fc5ed
|
3fbd92b1eed0f81e8f06ab4595fa64b40dfbdac5
|
/RNN_R.py
|
9c5d5d5880847eca9d99af3934730643f2cf84af
|
[] |
no_license
|
Lanclaw/TakeItEasy
|
a56e64b3160bce4798fd5afa5b41c15fa827476d
|
0373313000fecf39f56e65dd4b20cc8a384b572d
|
refs/heads/main
| 2023-03-25T06:07:17.047320 | 2021-03-28T02:44:11 | 2021-03-28T02:44:11 | 351,967,066 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,723 |
py
|
import torch
from torch import nn
import numpy as np
import matplotlib.pyplot as plt
TIME_STEP = 10
INPUT_SIZE = 1
LR = 0.02
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.rnn = nn.RNN(
input_size=INPUT_SIZE,
hidden_size=32,
num_layers=1,
batch_first=True
)
self.out = nn.Linear(32, 1)
def forward(self, x, h_state):
r_out, h_state = self.rnn(x, h_state) # r_out:(B_s, t_s, h_s), h_state:(num_layers, b_s, h_s)
out = []
for time_step in range(TIME_STEP):
out.append(self.out(r_out[:, time_step, :])) # one of out(list) : (b_s, output_size) out: (time_step, b_s, output_size)
return torch.stack(out, dim=1), h_state # torch.stack(out, dim=1): (b_s, time_step, output_size)
rnn = RNN()
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)
loss_func = nn.MSELoss()
h_state = None
plt.ion()
for step in range(100):
start, end = step * np.pi, (step + 1) * np.pi
steps = np.linspace(start, end, TIME_STEP, dtype=np.float32, endpoint=False)
x_np = np.cos(steps)
y_np = np.sin(steps)
x = torch.from_numpy(x_np[np.newaxis, :, np.newaxis])
y = torch.from_numpy(y_np[np.newaxis, :, np.newaxis])
pred, h_state = rnn(x, h_state)
h_state = h_state.data # important!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! to drop grad
loss = loss_func(pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Step: ', step, '| loss: ', loss)
plt.plot(steps, pred.detach().numpy().flatten(), 'r-')
plt.plot(steps, y_np, 'b-')
plt.draw()
plt.pause(0.2)
plt.ioff()
plt.show()
|
[
"[email protected]"
] | |
180106f795f970567cbc895131bf67dc5169a9ab
|
4fbfabe70290b5b32a2235c17d0a98a3f9f67dab
|
/codingdojochallenge/forms.py
|
f7cca509e8b8df263c23e90480b8b03ea30f9586
|
[] |
no_license
|
joshgendal/coding-dojo-challenge
|
e06ff8a4e2f7178cc5e6a7173bfac34b74d70416
|
a8900b9b46fd7c6f23ecf01b6c5b2461ef18500a
|
refs/heads/main
| 2023-02-14T14:29:14.003040 | 2021-01-08T23:13:06 | 2021-01-08T23:13:06 | 327,711,286 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 811 |
py
|
from django import forms
from codingdojochallenge.models import Shows
class ShowForm(forms.Form):
title = forms.CharField(widget=forms.TextInput(attrs={
'class': 'form-control',
'name': 'title',
'id': 'title-input'
}))
date = forms.DateField(widget=forms.DateInput(attrs={
'class': 'form-control',
'id': 'date-input',
'name': 'date',
'type': 'date'
}))
network = forms.CharField(widget=forms.TextInput(attrs={
'class': 'form-control',
'id': 'network-input',
'name': 'network',
}))
description = forms.CharField(widget=forms.Textarea(attrs={
'class': 'form-control description',
'id': 'description-input',
'name': 'description',
'cols': '30',
'rows': '5',
}))
|
[
"[email protected]"
] | |
17c9da42b3a256243f283d9d0204ada064184eb8
|
aa4e8b335b6bd3041f91493115aae71af475ca6f
|
/server/activities/migrations/0003_auto__add_field_geospatial_point__add_field_geospatial_radius__add_fie.py
|
a7c2bbe2c551b977dea89c73bda5b85c4da630c3
|
[] |
no_license
|
CulturePlex/DrGlearning
|
565165687544eda9b524f4b6ee7477fe017cd16a
|
1949ae23f76d9858e9ef3b529035b4a15c1dc5d3
|
refs/heads/master
| 2021-03-30T16:13:06.814365 | 2014-07-01T22:41:32 | 2014-07-01T22:41:32 | 2,485,130 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,955 |
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Geospatial.point'
db.add_column('activities_geospatial', 'point', self.gf('django.contrib.gis.db.models.fields.PointField')(default=''), keep_default=False)
# Adding field 'Geospatial.radius'
db.add_column('activities_geospatial', 'radius', self.gf('django.db.models.fields.FloatField')(default=100), keep_default=False)
# Adding field 'Geospatial.area'
db.add_column('activities_geospatial', 'area', self.gf('django.contrib.gis.db.models.fields.PolygonField')(default=None), keep_default=False)
def backwards(self, orm):
# Deleting field 'Geospatial.point'
db.delete_column('activities_geospatial', 'point')
# Deleting field 'Geospatial.radius'
db.delete_column('activities_geospatial', 'radius')
# Deleting field 'Geospatial.area'
db.delete_column('activities_geospatial', 'area')
models = {
'activities.activity': {
'Meta': {'object_name': 'Activity'},
'career': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'activities'", 'symmetrical': 'False', 'through': "orm['activities.Level']", 'to': "orm['knowledges.Career']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'query': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'activities.geospatial': {
'Meta': {'object_name': 'Geospatial', '_ormbases': ['activities.Activity']},
'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['activities.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'area': ('django.contrib.gis.db.models.fields.PolygonField', [], {}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'radius': ('django.db.models.fields.FloatField', [], {})
},
'activities.level': {
'Meta': {'ordering': "['order']", 'object_name': 'Level'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['activities.Activity']"}),
'career': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['knowledges.Career']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'activities.linguistic': {
'Meta': {'object_name': 'Linguistic', '_ormbases': ['activities.Activity']},
'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['activities.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'answer': ('django.db.models.fields.TextField', [], {}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'locked_text': ('django.db.models.fields.TextField', [], {})
},
'activities.relational': {
'Meta': {'object_name': 'Relational', '_ormbases': ['activities.Activity']},
'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['activities.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'graph_edges': ('django.db.models.fields.TextField', [], {}),
'graph_nodes': ('django.db.models.fields.TextField', [], {}),
'scored_nodes': ('django.db.models.fields.TextField', [], {}),
'source_path': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'target_path': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'activities.temporal': {
'Meta': {'object_name': 'Temporal', '_ormbases': ['activities.Activity']},
'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['activities.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'image_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'query_datetime': ('django.db.models.fields.DateTimeField', [], {})
},
'activities.visual': {
'Meta': {'object_name': 'Visual', '_ormbases': ['activities.Activity']},
'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['activities.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'answers': ('django.db.models.fields.TextField', [], {}),
'correct_answer': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'obfuscated_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'knowledges.career': {
'Meta': {'object_name': 'Career'},
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'knowledge_field': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'knowledge_fields'", 'symmetrical': 'False', 'to': "orm['knowledges.Knowledge']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'negative_votes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'positive_votes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'knowledges.knowledge': {
'Meta': {'object_name': 'Knowledge'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['activities']
|
[
"[email protected]"
] | |
5f7476e24a204be9844348f8f59e7887e624cd82
|
3d4692d7853896a421524ca1beb238ee6179db7b
|
/version.py
|
51668005651c6c2ea9b96f57dc22e44839b15e1b
|
[
"MIT"
] |
permissive
|
H2Owater425/apacheLog
|
c1592d963808c59a64b7cdc96aa39e0feb175a32
|
5aecd4b549bdc958182b9db078c9548e0f9488cf
|
refs/heads/main
| 2023-02-28T16:54:29.856558 | 2021-02-11T12:52:26 | 2021-02-11T12:52:26 | 333,651,219 | 2 | 0 |
MIT
| 2021-02-11T11:43:55 | 2021-01-28T05:04:12 |
Python
|
UTF-8
|
Python
| false | false | 25 |
py
|
__version__ = "2020.12.1"
|
[
"[email protected]"
] | |
b5b4c7d2a0e719279f2d3cd706e2f3a26b838bf8
|
d1fef4482a1a4cb7fb22b6fe9b45e6cf1657c001
|
/conf.py
|
67931dec10ce25557f2638a398ec6cd7e55f167b
|
[] |
no_license
|
Voljega/BestArcade
|
a80b15785899115984e6874e85c9808a57c8b138
|
a1498af1ea9a62008af3429feec20b6953f35d46
|
refs/heads/master
| 2023-06-22T08:07:37.007691 | 2023-06-17T21:24:43 | 2023-06-17T21:24:43 | 217,221,075 | 57 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 416 |
py
|
def cleanString(string):
return string.rstrip('\n\r ').lstrip()
def loadConf(confFile):
conf = dict()
file = open(confFile, 'r', encoding="utf-8")
for line in file.readlines():
if not line.startswith('#'):
confLine = line.split("=")
if len(confLine) == 2:
conf[cleanString(confLine[0])] = cleanString(confLine[1])
file.close()
return conf
|
[
"[email protected]"
] | |
5b8fdc45ed8189fd3a68129d873b5311ea37df1f
|
8fda41bac0be880a7918440e555b6fc2e0afd626
|
/strmultislice.py
|
2be4f39e102a39a43b75a6f85d56d5ec701ce12a
|
[] |
no_license
|
clarkjoypesco/pythonmultislicestring
|
c06123327af6b2872082bb45f9e55a57fa810ceb
|
8a24a63fda14b28e5b0ab720fbca9d13ccc2d5ec
|
refs/heads/master
| 2020-05-07T00:56:27.679478 | 2019-04-09T00:15:01 | 2019-04-09T00:15:01 | 180,251,440 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 369 |
py
|
# Use string slicing to store everything before "NOUN" in substring1,
# everything after "NOUN" and before "VERB" in substring2, and everything after "VERB"
# in substring3.
sentence = "A NOUN went on a walk. It can VERB really fast."
substring1 = sentence[:2]
print substring1
substring2 = sentence[6:30]
print substring2
substring3 = sentence[34:]
print substring3
|
[
"[email protected]"
] | |
a141044c46de5b0d1469335d2402696d27fdae35
|
05bc43b15ed5d7b8fcc58791d6a1746800c20268
|
/lib/dataset.py
|
f441c7f3ad0b20731dd196abeb0ff09f41325dbf
|
[
"MIT"
] |
permissive
|
daveredrum/Pointnet2.ScanNet
|
521b7cc75b4a06eb1fcf2ac7665acd4b414e27f3
|
f4adc43779aaaefd6138d37183a94185d0718865
|
refs/heads/master
| 2022-02-12T14:07:38.239629 | 2021-12-21T10:10:54 | 2021-12-21T10:10:54 | 196,842,335 | 113 | 22 | null | null | null | null |
UTF-8
|
Python
| false | false | 15,328 |
py
|
import os
import sys
import time
import h5py
import torch
import numpy as np
import multiprocessing as mp
from tqdm import tqdm
from prefetch_generator import background
sys.path.append(".")
from lib.config import CONF
class ScannetDataset():
def __init__(self, phase, scene_list, num_classes=21, npoints=8192, is_weighting=True, use_multiview=False, use_color=False, use_normal=False):
self.phase = phase
assert phase in ["train", "val", "test"]
self.scene_list = scene_list
self.num_classes = num_classes
self.npoints = npoints
self.is_weighting = is_weighting
self.use_multiview = use_multiview
self.use_color = use_color
self.use_normal = use_normal
self.chunk_data = {} # init in generate_chunks()
self._prepare_weights()
def _prepare_weights(self):
self.scene_data = {}
self.multiview_data = {}
scene_points_list = []
semantic_labels_list = []
if self.use_multiview:
multiview_database = h5py.File(CONF.MULTIVIEW, "r", libver="latest")
for scene_id in tqdm(self.scene_list):
scene_data = np.load(CONF.SCANNETV2_FILE.format(scene_id))
label = scene_data[:, 10]
# append
scene_points_list.append(scene_data)
semantic_labels_list.append(label)
self.scene_data[scene_id] = scene_data
if self.use_multiview:
feature = multiview_database.get(scene_id)[()]
self.multiview_data[scene_id] = feature
if self.is_weighting:
labelweights = np.zeros(self.num_classes)
for seg in semantic_labels_list:
tmp,_ = np.histogram(seg,range(self.num_classes + 1))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights/np.sum(labelweights)
self.labelweights = 1/np.log(1.2+labelweights)
else:
self.labelweights = np.ones(self.num_classes)
@background()
def __getitem__(self, index):
start = time.time()
# load chunks
scene_id = self.scene_list[index]
scene_data = self.chunk_data[scene_id]
# unpack
point_set = scene_data[:, :3] # include xyz by default
rgb = scene_data[:, 3:6] / 255. # normalize the rgb values to [0, 1]
normal = scene_data[:, 6:9]
label = scene_data[:, 10].astype(np.int32)
if self.use_multiview:
feature = scene_data[:, 11:]
point_set = np.concatenate([point_set, feature], axis=1)
if self.use_color:
point_set = np.concatenate([point_set, rgb], axis=1)
if self.use_normal:
point_set = np.concatenate([point_set, normal], axis=1)
if self.phase == "train":
point_set = self._augment(point_set)
# prepare mask
curmin = np.min(point_set, axis=0)[:3]
curmax = np.max(point_set, axis=0)[:3]
mask = np.sum((point_set[:, :3] >= (curmin - 0.01)) * (point_set[:, :3] <= (curmax + 0.01)), axis=1) == 3
sample_weight = self.labelweights[label]
sample_weight *= mask
fetch_time = time.time() - start
return point_set, label, sample_weight, fetch_time
def __len__(self):
return len(self.scene_list)
def _augment(self, point_set):
# translate the chunk center to the origin
center = np.mean(point_set[:, :3], axis=0)
coords = point_set[:, :3] - center
p = np.random.choice(np.arange(0.01, 1.01, 0.01), size=1)[0]
if p < 1 / 8:
# random translation
coords = self._translate(coords)
elif p >= 1 / 8 and p < 2 / 8:
# random rotation
coords = self._rotate(coords)
elif p >= 2 / 8 and p < 3 / 8:
# random scaling
coords = self._scale(coords)
elif p >= 3 / 8 and p < 4 / 8:
# random translation
coords = self._translate(coords)
# random rotation
coords = self._rotate(coords)
elif p >= 4 / 8 and p < 5 / 8:
# random translation
coords = self._translate(coords)
# random scaling
coords = self._scale(coords)
elif p >= 5 / 8 and p < 6 / 8:
# random rotation
coords = self._rotate(coords)
# random scaling
coords = self._scale(coords)
elif p >= 6 / 8 and p < 7 / 8:
# random translation
coords = self._translate(coords)
# random rotation
coords = self._rotate(coords)
# random scaling
coords = self._scale(coords)
else:
# no augmentation
pass
# translate the chunk center back to the original center
coords += center
point_set[:, :3] = coords
return point_set
def _translate(self, point_set):
# translation factors
x_factor = np.random.choice(np.arange(-0.5, 0.501, 0.001), size=1)[0]
y_factor = np.random.choice(np.arange(-0.5, 0.501, 0.001), size=1)[0]
z_factor = np.random.choice(np.arange(-0.5, 0.501, 0.001), size=1)[0]
coords = point_set[:, :3]
coords += [x_factor, y_factor, z_factor]
point_set[:, :3] = coords
return point_set
def _rotate(self, point_set):
coords = point_set[:, :3]
# x rotation matrix
theta = np.random.choice(np.arange(-5, 5.001, 0.001), size=1)[0] * 3.14 / 180 # in radians
Rx = np.array(
[[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)]]
)
# y rotation matrix
theta = np.random.choice(np.arange(-5, 5.001, 0.001), size=1)[0] * 3.14 / 180 # in radians
Ry = np.array(
[[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-np.sin(theta), 0, np.cos(theta)]]
)
# z rotation matrix
theta = np.random.choice(np.arange(-5, 5.001, 0.001), size=1)[0] * 3.14 / 180 # in radians
Rz = np.array(
[[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]]
)
# rotate
R = np.matmul(np.matmul(Rz, Ry), Rx)
coords = np.matmul(R, coords.T).T
# dump
point_set[:, :3] = coords
return point_set
def _scale(self, point_set):
# scaling factors
factor = np.random.choice(np.arange(0.95, 1.051, 0.001), size=1)[0]
coords = point_set[:, :3]
coords *= [factor, factor, factor]
point_set[:, :3] = coords
return point_set
def generate_chunks(self):
"""
note: must be called before training
"""
print("generate new chunks for {}...".format(self.phase))
for scene_id in tqdm(self.scene_list):
scene = self.scene_data[scene_id]
semantic = scene[:, 10].astype(np.int32)
if self.use_multiview:
feature = self.multiview_data[scene_id]
coordmax = np.max(scene, axis=0)[:3]
coordmin = np.min(scene, axis=0)[:3]
for _ in range(5):
curcenter = scene[np.random.choice(len(semantic), 1)[0],:3]
curmin = curcenter-[0.75,0.75,1.5]
curmax = curcenter+[0.75,0.75,1.5]
curmin[2] = coordmin[2]
curmax[2] = coordmax[2]
curchoice = np.sum((scene[:, :3]>=(curmin-0.2))*(scene[:, :3]<=(curmax+0.2)),axis=1)==3
cur_point_set = scene[curchoice]
cur_semantic_seg = semantic[curchoice]
if self.use_multiview:
cur_feature = feature[curchoice]
if len(cur_semantic_seg)==0:
continue
mask = np.sum((cur_point_set[:, :3]>=(curmin-0.01))*(cur_point_set[:, :3]<=(curmax+0.01)),axis=1)==3
vidx = np.ceil((cur_point_set[mask,:3]-curmin)/(curmax-curmin)*[31.0,31.0,62.0])
vidx = np.unique(vidx[:,0]*31.0*62.0+vidx[:,1]*62.0+vidx[:,2])
isvalid = np.sum(cur_semantic_seg>0)/len(cur_semantic_seg)>=0.7 and len(vidx)/31.0/31.0/62.0>=0.02
if isvalid:
break
# store chunk
if self.use_multiview:
chunk = np.concatenate([cur_point_set, cur_feature], axis=1)
else:
chunk = cur_point_set
choices = np.random.choice(chunk.shape[0], self.npoints, replace=True)
chunk = chunk[choices]
self.chunk_data[scene_id] = chunk
print("done!\n")
class ScannetDatasetWholeScene():
def __init__(self, scene_list, npoints=8192, is_weighting=True, use_color=False, use_normal=False, use_multiview=False):
self.scene_list = scene_list
self.npoints = npoints
self.is_weighting = is_weighting
self.use_color = use_color
self.use_normal = use_normal
self.use_multiview = use_multiview
self._load_scene_file()
def _load_scene_file(self):
self.scene_points_list = []
self.semantic_labels_list = []
if self.use_multiview:
multiview_database = h5py.File(CONF.MULTIVIEW, "r", libver="latest")
self.multiview_data = []
for scene_id in tqdm(self.scene_list):
scene_data = np.load(CONF.SCANNETV2_FILE.format(scene_id))
label = scene_data[:, 10].astype(np.int32)
self.scene_points_list.append(scene_data)
self.semantic_labels_list.append(label)
if self.use_multiview:
feature = multiview_database.get(scene_id)[()]
self.multiview_data.append(feature)
if self.is_weighting:
labelweights = np.zeros(CONF.NUM_CLASSES)
for seg in self.semantic_labels_list:
tmp,_ = np.histogram(seg,range(CONF.NUM_CLASSES + 1))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights/np.sum(labelweights)
self.labelweights = 1/np.log(1.2+labelweights)
else:
self.labelweights = np.ones(CONF.NUM_CLASSES)
@background()
def __getitem__(self, index):
start = time.time()
scene_data = self.scene_points_list[index]
# unpack
point_set_ini = scene_data[:, :3] # include xyz by default
color = scene_data[:, 3:6] / 255. # normalize the rgb values to [0, 1]
normal = scene_data[:, 6:9]
if self.use_color:
point_set_ini = np.concatenate([point_set_ini, color], axis=1)
if self.use_normal:
point_set_ini = np.concatenate([point_set_ini, normal], axis=1)
if self.use_multiview:
multiview_features = self.multiview_data[index]
point_set_ini = np.concatenate([point_set_ini, multiview_features], axis=1)
semantic_seg_ini = self.semantic_labels_list[index].astype(np.int32)
coordmax = point_set_ini[:, :3].max(axis=0)
coordmin = point_set_ini[:, :3].min(axis=0)
xlength = 1.5
ylength = 1.5
nsubvolume_x = np.ceil((coordmax[0]-coordmin[0])/xlength).astype(np.int32)
nsubvolume_y = np.ceil((coordmax[1]-coordmin[1])/ylength).astype(np.int32)
point_sets = list()
semantic_segs = list()
sample_weights = list()
for i in range(nsubvolume_x):
for j in range(nsubvolume_y):
curmin = coordmin+[i*xlength, j*ylength, 0]
curmax = coordmin+[(i+1)*xlength, (j+1)*ylength, coordmax[2]-coordmin[2]]
mask = np.sum((point_set_ini[:, :3]>=(curmin-0.01))*(point_set_ini[:, :3]<=(curmax+0.01)), axis=1)==3
cur_point_set = point_set_ini[mask,:]
cur_semantic_seg = semantic_seg_ini[mask]
if len(cur_semantic_seg) == 0:
continue
choice = np.random.choice(len(cur_semantic_seg), self.npoints, replace=True)
point_set = cur_point_set[choice,:] # Nx3
semantic_seg = cur_semantic_seg[choice] # N
mask = mask[choice]
# if sum(mask)/float(len(mask))<0.01:
# continue
sample_weight = self.labelweights[semantic_seg]
sample_weight *= mask # N
point_sets.append(np.expand_dims(point_set,0)) # 1xNx3
semantic_segs.append(np.expand_dims(semantic_seg,0)) # 1xN
sample_weights.append(np.expand_dims(sample_weight,0)) # 1xN
point_sets = np.concatenate(tuple(point_sets),axis=0)
semantic_segs = np.concatenate(tuple(semantic_segs),axis=0)
sample_weights = np.concatenate(tuple(sample_weights),axis=0)
fetch_time = time.time() - start
return point_sets, semantic_segs, sample_weights, fetch_time
def __len__(self):
return len(self.scene_points_list)
def collate_random(data):
'''
for ScannetDataset: collate_fn=collate_random
return:
coords # torch.FloatTensor(B, N, 3)
feats # torch.FloatTensor(B, N, 3)
semantic_segs # torch.FloatTensor(B, N)
sample_weights # torch.FloatTensor(B, N)
fetch_time # float
'''
# load data
(
point_set,
semantic_seg,
sample_weight,
fetch_time
) = zip(*data)
# convert to tensor
point_set = torch.FloatTensor(point_set)
semantic_seg = torch.LongTensor(semantic_seg)
sample_weight = torch.FloatTensor(sample_weight)
# split points to coords and feats
coords = point_set[:, :, :3]
feats = point_set[:, :, 3:]
# pack
batch = (
coords, # (B, N, 3)
feats, # (B, N, 3)
semantic_seg, # (B, N)
sample_weight, # (B, N)
sum(fetch_time) # float
)
return batch
def collate_wholescene(data):
'''
for ScannetDataset: collate_fn=collate_random
return:
coords # torch.FloatTensor(B, C, N, 3)
feats # torch.FloatTensor(B, C, N, 3)
semantic_segs # torch.FloatTensor(B, C, N)
sample_weights # torch.FloatTensor(B, C, N)
fetch_time # float
'''
# load data
(
point_sets,
semantic_segs,
sample_weights,
fetch_time
) = zip(*data)
# convert to tensor
point_sets = torch.FloatTensor(point_sets)
semantic_segs = torch.LongTensor(semantic_segs)
sample_weights = torch.FloatTensor(sample_weights)
# split points to coords and feats
coords = point_sets[:, :, :, :3]
feats = point_sets[:, :, :, 3:]
# pack
batch = (
coords, # (B, N, 3)
feats, # (B, N, 3)
semantic_segs, # (B, N)
sample_weights, # (B, N)
sum(fetch_time) # float
)
return batch
|
[
"[email protected]"
] | |
0c1d705b8afb40aed5e9d84199de73a1d02a6882
|
a7d65be2d28290cd7c7ce2b2a394d01db48c9f6a
|
/PlexHey/migrations/0014_remove_profile_driving.py
|
bede856c7f610beab70baaec1d4715f014f01bf8
|
[] |
no_license
|
okenyurimachage/Hyplex
|
bbbe417ca478f94b1c0e9cc0fbf49e0c718fff44
|
972b8400d6e9319e28d2d15b23f3a0b0cc8bd10e
|
refs/heads/master
| 2023-01-22T06:11:23.350458 | 2021-09-21T06:26:36 | 2021-09-21T06:26:36 | 189,965,879 | 1 | 1 | null | 2023-01-10T23:16:16 | 2019-06-03T08:31:48 |
JavaScript
|
UTF-8
|
Python
| false | false | 324 |
py
|
# Generated by Django 2.1.7 on 2019-06-13 20:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('PlexHey', '0013_booking_paid'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='Driving',
),
]
|
[
"[email protected]"
] | |
6952776e38f65ecbbae62a609f1e5ea29c6d4d7f
|
e40ecfc9628c2de1787cc0eceabf2543c553a22f
|
/app/oc_website/management/commands/parse_anidb.py
|
74d4c09b781d6d87821b5d86a71fae37cf105785
|
[] |
no_license
|
old-castle-fansubs/website
|
27975ba7c603d44b3c6b8cb9c5d78cda0793057c
|
d2ecf45fbedf2ea873d3a3a3629c77af7a740429
|
refs/heads/master
| 2023-02-17T01:01:12.540371 | 2022-12-16T12:54:40 | 2022-12-16T13:59:57 | 163,497,607 | 0 | 0 | null | 2023-02-15T19:35:32 | 2018-12-29T09:27:18 |
Python
|
UTF-8
|
Python
| false | false | 783 |
py
|
from django.core.management.base import BaseCommand
from oc_website.models import AniDBEntry
from oc_website.tasks.anidb import fill_missing_anidb_info
class Command(BaseCommand):
help = "Reanalyzes AniDB response for a given ID."
def add_arguments(self, parser):
parser.add_argument(
"id",
type=int,
nargs="*",
help="AniDB ID to refresh metadata of",
)
def handle(self, *_args, **options):
entries = AniDBEntry.objects.all().order_by("anidb_id")
if anidb_ids := options["id"]:
entries = entries.filter(pk__in=anidb_ids)
for entry in entries:
self.stdout.write(f"Analyzing {entry.anidb_id}")
fill_missing_anidb_info(anidb_id=entry.anidb_id)
|
[
"[email protected]"
] | |
4dbf6716b6a8897abd7eac35d3119830b2ef6e51
|
293a36d744036d8534fa40131f9913a4f05463b7
|
/ZCO14003.py
|
8e9534abe10b2189e7fef6d70d7891979c23b77d
|
[] |
no_license
|
DEVANSHUK97/codechef-solutions
|
ddf8d11b392f274d17d638c2dfe3e34771db0458
|
e517a651c8ee97b363a2931bcd64cca21f979c9a
|
refs/heads/master
| 2023-02-09T01:39:10.181853 | 2021-01-05T02:07:34 | 2021-01-05T02:07:34 | 324,250,324 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 340 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 24 03:22:04 2020
@author: dkhurm
"""
t = int(input())
budgets = []
for _ in range(t):
budgets.append(int(input()))
budgets = sorted(budgets)
how_many_can_afford = list(range(1,t+1))[::-1]
revenue = [budgets[i]*how_many_can_afford[i] for i in range(t)]
ans = max(revenue)
print(ans)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.