blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3a89353fe1bf9bc2c3a18a54b8aa626d89c3dc77
|
15978aacf0e44a890e36ff94c305aca5a056e5e8
|
/13day/10-有返回的装饰器和通用的装饰器.py
|
49f8d4065d8cba8ccf18b0da1614f1193e0a14d8
|
[] |
no_license
|
ittoyou/1805_python_2
|
ffbe613d893208b2454ef4f25cc2b8a9951ff047
|
1d6331a83598863042912bb26205d34417abed73
|
refs/heads/master
| 2020-03-24T13:58:12.276827 | 2018-07-27T07:58:57 | 2018-07-27T07:58:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 425 |
py
|
def w1(fun):
def inner(*args,**kwargs):
print("验证登录")
return fun(*args,**kwargs)
return inner
@w1
def play(a,b):
print("------------%s-----%s----------"%(a,b))
return "hehe"
ret = play("1","2")
print(ret)
@w1
def play1():
print("哈哈哈")
play1()
@w1
def play2(a):
print("哈哈哈2%s"%a)
play2("嘎嘎")
@w1
def play3():
return "hahah3"
ret = play3()
print(ret)
|
[
"[email protected]"
] | |
249d0fc847698e8656f69bffdac9648ab002c339
|
45614a944ffbdb75a0bef955582a722da5ce7492
|
/python/selenium/delta_time.py
|
f3a2a4edc43929e36dcdc6408809e7ed0457801f
|
[] |
no_license
|
wccgoog/pass
|
1c8ab5393547634a27c7543556a75dec771a9e3d
|
0ec01536ae10b3d99707002c0e726072acb50231
|
refs/heads/2
| 2023-01-15T13:27:26.312648 | 2019-10-23T09:30:45 | 2019-10-23T09:30:45 | 122,595,075 | 0 | 2 | null | 2023-01-07T10:42:38 | 2018-02-23T08:38:36 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,938 |
py
|
# -*- coding: utf-8 -*-
import datetime,time
from selenium.webdriver.common.action_chains import ActionChains
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def write_delta_time(n):
driver=webdriver.Chrome()
driver.get('http://192.168.0.138:9998')
driver.maximize_window()
WebDriverWait(driver,15).until(EC.presence_of_all_elements_located((By.ID,'account')))
driver.find_element_by_id('account').send_keys('5815') #账号
driver.find_element_by_id('password').send_keys('WW5815') #密码
start=driver.find_element_by_css_selector('div.handler.handler_bg')
action=ActionChains(driver)
action.drag_and_drop_by_offset(start,250,0)
action.perform() #拖动滑块
driver.find_element_by_id('loginbutton').click()
WebDriverWait(driver,15).until(EC.presence_of_all_elements_located((By.ID,"tabwindow_0")))
f=open('C:/Users/Administrator/Desktop/time.txt','a')
for i in range(n):
for tab in driver.find_elements_by_css_selector('div.tab_close'):
tab.click()
driver.find_element_by_xpath("//ul[@id='jMenu']/li/a/span").click()
driver.find_element_by_css_selector("li.jmenu-level-0 > ul > li > a > span").click()
time_start=datetime.datetime.now()
WebDriverWait(driver,30).until(EC.frame_to_be_available_and_switch_to_it(0))
time.sleep(1) #不加会报错
WebDriverWait(driver,15).until(EC.presence_of_all_elements_located((By.XPATH,"//div[@id='ListTable']/div[5]/div/div[5]/div[8]")))
time_end=datetime.datetime.now()
time_total=time_end-time_start
f.write(str(time_total)+'\n')
driver.switch_to.default_content()
f.close()
if __name__=='__main__':
n=input('输入希望运行的次数: ')
write_delta_time(int(n))
|
[
"[email protected]"
] | |
775cd1d52f3be2c7466a0a059e18e4b512147420
|
82daa207e1f70369f77f5f9ddd9978c39a0c1b79
|
/kirimsms.py
|
c57d4fda275c6319a57ccad1573fd26171184395
|
[] |
no_license
|
tenizbr/bacasms
|
11b3b1baee22c5fd5ec27d486a69fef7c4a1aebc
|
0f7e36bc323a6bdc355c951ef9be4e254f70707b
|
refs/heads/master
| 2021-01-01T18:49:19.287454 | 2015-06-24T14:33:57 | 2015-06-24T14:33:57 | 37,904,605 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,068 |
py
|
#!/usr/bin/env python
"""
sms.py - Used to send txt messages.
"""
import serial
import time
class TextMessage:
def __init__(self, recipient="0123456789", message="TextMessage.content not set."):
self.recipient = recipient
self.content = message
def setRecipient(self, number):
self.recipient = number
def setContent(self, message):
self.content = message
def connectPhone(self):
self.ser = serial.Serial('/dev/ttyACM0', 460800, timeout=5)
time.sleep(1)
def sendMessage(self):
self.ser.write('ATZ\r')
time.sleep(1)
self.ser.write('AT+CMGF=1\r')
time.sleep(1)
self.ser.write('''AT+CMGS="''' + self.recipient + '''"\r''')
time.sleep(1)
self.ser.write(self.content + "\r")
time.sleep(1)
self.ser.write(chr(26))
time.sleep(1)
def disconnectPhone(self):
self.ser.close()
sms = TextMessage("0123456789","This is the message to send.")
sms.connectPhone()
sms.sendMessage()
sms.disconnectPhone()
|
[
"[email protected]"
] | |
3679dbbc8bc44685045edec9a6d71a1e00d53833
|
45ee96b582d7b3e045819db510088d2cb640dfde
|
/BOJ/Previous/Implementation/완전제곱수.py
|
e78f92dafcc73f0e1bfc49baa5f3d15bd4298468
|
[] |
no_license
|
tom9744/Algorithms
|
e54b649014f3b478bfbc7a0f9e8e56ad5dbc1304
|
4496b1c992ab4322289e5a200567f3df00478917
|
refs/heads/master
| 2023-05-06T00:59:12.767655 | 2021-05-26T16:26:50 | 2021-05-26T16:26:50 | 330,401,584 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 352 |
py
|
# 1977 : 완전제곱수
import math
M = int(input())
N = int(input())
perfect_square_numbers = []
for number in range(M, N + 1):
if math.sqrt(number).is_integer():
perfect_square_numbers.append(number)
if len(perfect_square_numbers) == 0:
print(-1)
else:
print(sum(perfect_square_numbers))
print(perfect_square_numbers[0])
|
[
"[email protected]"
] | |
121f9c22056af85d16160adacd12d9b6a5a75f8b
|
1d7d47afeee3f8a53c778ef18544a91bcfe119ad
|
/demo/lp_demo.py
|
505d391d6508f532c9a6cc64d85b257ab5e616f9
|
[] |
no_license
|
matanost/ARAS-Final-Project
|
d4b181df597dd5638b9033c8de5727e7b19feb25
|
624f9881974bfacda165fccf1633bc2d45a45575
|
refs/heads/master
| 2020-09-28T18:26:07.836332 | 2020-03-16T18:20:52 | 2020-03-16T18:20:52 | 226,835,200 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,404 |
py
|
from LP.linear_programing import simplex_result
import numpy as np
TOLERANCE = 0.001
def class_example():
A = np.array([[3, 2, 1, 2],
[1, 1, 1, 1],
[4, 3, 3, 4]])
b = np.array([225, 117, 420])
c = np.array([19, 13, 12, 17])
c = c.transpose()
s = simplex_result(A, b, c)
if s < 1827 + TOLERANCE and s > 1827 - TOLERANCE:
print("sucssess")
else:
print("fail")
def homework3_ex2():
A = np.array([[1, 1, 2],
[2, 0, 3],
[2, 1, 3]])
b = np.array([4, 5, 7])
c = np.array([3, 2, 4])
c = c.transpose()
s = simplex_result(A, b, c)
if s == 10.5:
print("sucssess")
else:
print("fail")
def homework3_ex1():
A = np.array([[-1, 1],
[-2, -2],
[-1, 4]])
b = np.array([-1, -6, 2])
c = np.array([1, 3])
c = c.transpose()
s = simplex_result(A, b, c)
if s == 'unbounded solution':
print("sucssess")
else:
print("fail")
def test_unbounded():
A = np.array([[1, 0]])
b = np.array([3])
c = np.array([0, 1])
c = c.transpose()
s = simplex_result(A, b, c)
if s == 'unbounded solution':
print("sucssess")
else:
print("fail")
if __name__ == "__main__":
class_example()
homework3_ex2()
test_unbounded()
homework3_ex1()
|
[
"[email protected]"
] | |
0cfbe10181f10df8829276d0b38f0d45af306190
|
3155cd6616fa6431648d0358369f01410e5f1d3c
|
/src/testpkg/__main__.py
|
ea77996f52eb4cc37c25bc255a18d416a845944c
|
[
"Apache-2.0"
] |
permissive
|
subbyte/python-config-install-test
|
3067e0d7db65d5274cea524c43ceeae116c4ab9b
|
802a32ca5e953cad67b9ed06c7105ba128d66728
|
refs/heads/main
| 2023-05-07T22:42:37.936625 | 2021-06-03T01:43:27 | 2021-06-03T01:43:27 | 372,955,818 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 257 |
py
|
import os
import pathlib
if __name__ == "__main__":
pp = pathlib.Path(os.path.join(os.getenv("pythonLocation", "/")))
print(pp)
ppcfg = pp / "etc" / "testpkg" / "testpkg.conf"
print(ppcfg)
with open(ppcfg) as h:
print(h.read())
|
[
"[email protected]"
] | |
df36af9c597acf1c47522428b4ce793ef2d0c96a
|
1f11fb9c91d06249388838a991a3c5e2d7c5922e
|
/innovation_dreams/settings.py
|
7bc4cadf4aa4cfda367c488a9101fb7bf03c4a61
|
[] |
no_license
|
alexolirib/inovation_dreams_backend
|
ee80f9da35bb7f7af5e6a44c2e17f02406339ba5
|
67bfda33e30038307459e41ba6c86ab66facd1dd
|
refs/heads/master
| 2023-05-01T09:59:35.288115 | 2020-05-30T23:11:46 | 2020-05-30T23:11:46 | 205,766,843 | 1 | 1 | null | 2023-04-21T20:37:09 | 2019-09-02T03:01:52 |
Python
|
UTF-8
|
Python
| false | false | 4,066 |
py
|
"""
Django settings for innovation_dreams project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
from decouple import config
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = ["*"]
DEFAULT_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
]
CORS_ORIGIN_ALLOW_ALL = True
LOCAL_APPS = [
'endereco',
'usuario',
'projeto'
]
INSTALLED_APPS = DEFAULT_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'innovation_dreams.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'innovation_dreams.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# # 'default': {
# # 'ENGINE': 'django.db.backends.postgresql',
# # 'NAME': 'post gres',
# # 'USER': 'postgres',
# # 'PASSWORD': 'Postgres2018!',
# # 'HOST': '127.0.0.1',
# # 'PORT': '5432',
# # }
# }
from dj_database_url import parse as dburl
default_dburl = 'sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')
DATABASES = { 'default': config('DATABASE_URL', default=default_dburl, cast=dburl), }
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
]
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.BasicAuthentication',
]
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Fortaleza'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
# image
MEDIA_ROOT = 'imagens'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_URL = '/media/'
|
[
"[email protected]"
] | |
38882048329053e5b49a2b61b7b397243de6c0e8
|
aec3410491c7f572a08fd11d050e34629629cf5a
|
/Blodsukker/blodsukker.py
|
73f3c77f19bac3b153158db232d7326a9e3b2050
|
[
"MIT"
] |
permissive
|
RaStrand/progNaturfag
|
ef7858ef305b4b981e7ab3d224944c0da114335a
|
bb3709762fe17436a53f83bc85b4695b73be303d
|
refs/heads/master
| 2021-10-12T02:46:30.000518 | 2019-01-31T20:39:07 | 2019-01-31T20:39:07 | 268,751,528 | 1 | 0 |
MIT
| 2020-06-02T08:59:14 | 2020-06-02T08:59:13 | null |
UTF-8
|
Python
| false | false | 459 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 29 18:11:17 2019
@author: emithun
"""
from pylab import*
tid = [0, 10, 20, 45, 60, 90]
kontroll = [4.5, 4.8, 4.3, 4.5, 4.9, 5]
cola = [4.0, 8.0, 9.3, 6.5, 5.4, 4.5]
zero = [4.3, 5.6, 5.4, 4.9, 4.0, 4.2]
nyTid = array(tid)*2
plot(tid,kontroll,tid,cola,tid,zero)
legend(["Kontroll","Cola","Zero"])
xlabel("Tid i minutter")
ylabel("Blodsukkernivå")
title("Blodsukkernivå-forsøk")
show()
|
[
"[email protected]"
] | |
dbb9d90b565890213e8b68e863b19b836cbdd4c3
|
17dc68eb7153673aef566aab3c8f1af05d28c721
|
/Extension.py
|
076d49abfc60580f5d88b9c00566ea0b4f6c9d31
|
[] |
no_license
|
gurudarshans1910/project-
|
06012f39bfe2dfac2ea5571857b92cda58a016f5
|
1ad4382a5f58a19f7f52daa82a0a86b592cffeac
|
refs/heads/master
| 2022-11-27T17:54:08.300517 | 2020-07-26T11:45:33 | 2020-07-26T11:45:33 | 279,602,275 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 118 |
py
|
file_name = input("Enter the file name \n")
file_extn = a.split(".")
print(" The extension is" + repr(file_extn[-1]))
|
[
"[email protected]"
] | |
11d91f7682d807291ec8c6d20fa64f3166ad3a77
|
f682c74fb65f0d951821b77bf96cee28d00ae3dd
|
/博物馆网数据采集子系统/展览爬取/广东省博物馆展览爬取.py
|
a267ae318e81038908bb00ebc4349ddfeb6944bd
|
[] |
no_license
|
1806-1/Software-engineering
|
7e5add7b40d123dca0daa39d83a8fc4c16f8cb0d
|
0a75ed857410bb8e1f882bd8e49504c43590ffd8
|
refs/heads/main
| 2023-05-13T00:07:58.579811 | 2021-06-06T08:09:41 | 2021-06-06T08:09:41 | 354,178,777 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,947 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 16 09:21:11 2021
@author: lenovo
"""
import requests
import pandas as pd
import csv
from bs4 import BeautifulSoup
hdrs = {'User-Agent':'Mozilla/5.0 (X11; Fedora; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)'}
# 博物馆活动列表页网址
url = "http://www.gdmuseum.com/"
r = requests.get(url, headers = hdrs)
soup = BeautifulSoup(r.content.decode('utf8', 'ignore'), 'lxml')
# class_ ='maintxt' 活动列表正文部分 根据网页tag修改
div_list = soup.find_all('div', class_ ='tz_r_first')
#查找下一级网址 即各个活动详情页的网址
anchors = soup.findAll('a')
links = [] #存取各个活动详情页网址
for tag in soup.find_all('ul', class_='xwdt'):
anchors = tag.findAll('a')
#print("anchors",anchors)
for a in anchors:
links.append(a['href'])
#print(links)
#从活动列表页爬取活动名称
TitleList = []# 存取活动名称 这个网址爬出来后十个字符刚好是活动时间
k = 0
for tag in soup.find_all('ul', class_='xwdt'):
k = k+1
title = tag.get_text()
TitleList.append(title)
#print(TitleList)
#
IntroList = []#存取简介(爬取结束后存的是大段文字,后面根据句号只取第一句上传数据库)
ImgList = []# 存取图片地址(爬取结束后与最终写入csv的Photolist一致,直接复制)
for kk in links:#遍历详情页链接
Detailurl = kk
Detailr = requests.get(Detailurl, headers = hdrs)
Detailsoup = BeautifulSoup(Detailr.content.decode('utf8', 'ignore'), 'lxml')
for tag in Detailsoup.findAll('div', class_ = 'yknr_mav'):#详情页活动介绍正文
img_link = tag.findAll('img') #查找所有img字段
print(img_link)
for a in img_link:#遍历img字段
ImgList.append("http://www.gdmuseum.com/" + a['href'])#网页给的img链接没有"http://www.sxhm.com/"自己加上
print("http://www.gdmuseum.com/" + a['href'])
break#只取第一张图片
i = 0#计数
for tag in Detailsoup.select('p', calss_ = 'content'):#<p class="MsoNormal">字段是文字介绍
i = i+1
if(i <= 2):#前两个是时间和杂项不需要用, 第三个才是介绍第一句,存入Introlist
continue
Introduce = tag.get_text()
# print(Introduce)
if(len(Introduce) > 5):#大于5个字的保存并且结束(即只保存第一句)
IntroList.append(Introduce)
break
else:
continue#可能是空格,太短的不保存
# print(IntroList)
# =============================================================================
# 爬取完成
# 开始数据格式处理
# =============================================================================
#最终写入csv的list
Name_Act_List = [] # 活动名
Time_Act_List = [] # 活动时间
Intro_Act_List = [] # 活动简介
Photo_Act_List = [] # 活动图片链接
newTitleList = TitleList[0].split('\n')#之前得到的titlelist是一整句,list中只有一个元素,各活动用‘\n'分割 通过这个语句从每个\n分开成新的元素
print(newTitleList)
for name in newTitleList:
lenth = len(name)
if(lenth < 2):#split可能截取出空格作为一个元素 太短的跳过
continue
Time = name[lenth-10:]#取后十个字符,刚好是时间
# if(len(Time) == 10):
# Time_Act_List.append(Time)
Time_Act_List.append(Time)
Title = name[:lenth-10]#后十个之外的是活动名
Name_Act_List.append(Title)
print(Time_Act_List)
print(Name_Act_List)
for intro in IntroList:
lenth = len(intro)
a = intro.find('。')#找第一个句号的位置
intro = intro[:a+1]#取第一个句号之前的作为简介
out = "".join(intro.split())#去掉’\x0xa‘等格式控制符只提取文本
Intro_Act_List.append(out)
print(out)
print(Intro_Act_List)
Photo_Act_List = ImgList
help_x_list = []
Museum_list = []
for i in range(0, len(Name_Act_List)):
help_x_list.append(str(i))
Museum_list.append("广东省博物馆
")
# =============================================================================
# 开始向CSV中写数据
# =============================================================================
dataframe = pd.DataFrame({
'博物馆名称':Museum_list,
'活动名字':Name_Act_List,
'活动时间':Time_Act_List,
'活动介绍':Intro_Act_List,
'活动图片地址':Photo_Act_List
})
dataframe.to_csv(r"广东省博物馆活动.csv",sep=',')
|
[
"[email protected]"
] | |
113710c73f25346364b05539416c76394df2f58f
|
115163545626f29825f09c7968320a492110c2a5
|
/firebase/firebase/settings.py
|
5c82483dd00707ce3ea6ad54c20c86089546b8b4
|
[] |
no_license
|
Rushiahire/Django_firebase
|
9848143716c6d3fca28ed9d1810edf4120e8bae1
|
72fcd69e15da27800623afa74c2159be6950305e
|
refs/heads/main
| 2023-09-02T11:54:38.879866 | 2021-11-20T17:04:04 | 2021-11-20T17:04:04 | 430,160,654 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,308 |
py
|
"""
Django settings for firebase project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-4x-y(e!00frpj^u-hlf6xqtnoyy2^3$owabj*^pag#ykfqk@8^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'firebase_app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'firebase.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'firebase.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"[email protected]"
] | |
8dcc5b1858c1ac40df3890d2861e9eaf22442438
|
f9c98f9c127fa1cd9fba17abe17199fb5440b36b
|
/andy_li/django/django_intro/disappearing_ninjas/disappearing_ninjas/apps.py
|
4075e272dbd1935f798f0a5e98afdbdc40b07924
|
[] |
no_license
|
RibRibble/python_april_2017
|
162e543f97afc77d44fcc858106e4730d3f7f760
|
3cc4240d371a8bad8da2ea085e3675272cca2de3
|
refs/heads/master
| 2021-01-19T01:12:34.667828 | 2017-04-27T22:11:53 | 2017-04-27T22:11:53 | 87,233,010 | 1 | 0 | null | 2017-04-04T20:41:44 | 2017-04-04T20:41:44 | null |
UTF-8
|
Python
| false | false | 177 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class DisappearingNinjasConfig(AppConfig):
name = 'disappearing_ninjas'
|
[
"[email protected]"
] | |
c457b7447520af9b8cde005c50bbaf62e7dae776
|
08d5e67db673a0e4c46f4c6967e49c20479b254e
|
/airexpress2.py
|
5a50df81cd5c566baad52588d1b43cac7b6ae96f
|
[] |
no_license
|
Willenbupt/Python
|
f49235c7cb6e8f0b7c08f0ac106ec68fef9423ec
|
d5cf8c41ac4fbf563389b91478b8b271f6c2986e
|
refs/heads/master
| 2022-06-05T08:33:34.421782 | 2020-05-02T06:08:13 | 2020-05-02T06:08:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,267 |
py
|
import sys
import re
class Data:
exchange_rate = 7
discount = 0.65
product_title = None
final_price = None
product_name = None
china_price = None
product_price = None
weight = None
freight_charges = None
china_product_url = None
profit = None
shippment_charge = None
opponent_price = None
promotion_cost = None
airexpress_commission = None
Feature = None
Specication = None
shippment_charge_judge = None
product_bianti_name = None
#输入上架产品的文本信息(已完善)
def get_information(self):
t = 1
print("请输入商品名称:")
Data.product_name = input()
while t:
print("请输入产品标题")
Data.product_title = sys.stdin.readline()
print('标题字符串的个数是:', len(Data.product_title), '个')
product_title1 = re.compile(r'\b[a-zA-Z]+\b', re.IGNORECASE).findall(Data.product_title)
# 将列表中的大写字母转换成小写
# 如果list中既包含字符串,又包含整数,由于非字符串类型没有lower()方法
product_title2 = [s.lower() for s in product_title1 if isinstance(s, str) == True]
print(product_title2)
t = 0
for i in product_title2:
count = product_title2.count(i)
print('标题中每个单词的出现次数是:')
print(i, ':', count)
if count >= 3:
print("出现标题关键词次数违规:")
print(i, ':', count)
print("请重新输入标题!")
t = 1
print("请输入货源url")
Data.china_product_url = input()
#输入上架变体的文本信息
def get_bianti_information(self):
print("请输入变体名称:")
Data.product_bianti_name = input()
#输入对手的竞品的信息
def get_opponent_info(self):
print("####接下来是竞品信息####")
print("对手是否包邮(0包邮/1不包邮):")
Data.shippment_charge_judge = input()
if int(Data.shippment_charge_judge) == 1:
print('请输入邮费:')
Data.shippment_charge = float(input())
else:
print('包邮')
print("请输入竞品价格")
Data.opponent_price = float(input())
if int(Data.shippment_charge_judge) == 1:
Data.opponent_price = Data.opponent_price + Data.shippment_charge
# 输入价格的计算参数
def get_caculate_information(self):
print('请输入重量:')
Data.weight = input()
print("请输入物流价格:")
Data.freight_charges = float(input())
print("请输入拿货价格:")
Data.china_price = float(input())
print("请输入目标利润:")
Data.profit = float(input())
def caculation(self):
Data.product_price = ((Data.profit + Data.freight_charges + Data.china_price) / Data.exchange_rate) / 0.87
Data.final_price = Data.product_price / 0.65
Data.airexpress_commission = Data.product_price * 0.08
Data.promotion_cost = Data.product_price * 0.05
# print(product_price,final_price)
def outpuy_product_bianti_info(self):
print("#################################变体信息##################################")
print('变体名称:', Data.product_bianti_name)
print('重量:', Data.weight)
print("#######平台抽成#######")
print('平台佣金:美元$', Data.airexpress_commission)
print('推广成本:美元$', Data.promotion_cost)
# print('\033[1;33m 仔细点 \" %s .\"\033[3;31m')
print("#######价格信息#######")
print('目标利润: 人民币¥', Data.profit)
print("物流价格:人民币¥", Data.freight_charges)
print('拿货价格:人民币¥', Data.china_price)
print('折扣价格:美元$', Data.product_price)
print('输出价格:美元$', Data.final_price)
def output_product_info(self):
print("#######################################################商品信息###########################################################")
print('商品名称:', Data.product_name)
print('商品标题:', Data.product_title)
print('重量:', Data.weight)
print('货源地址:', Data.china_product_url)
print("#######平台抽成#######")
print('平台佣金:美元$', Data.airexpress_commission)
print('推广成本:美元$', Data.promotion_cost)
#print('\033[1;33m 仔细点 \" %s .\"\033[3;31m')
print("#######价格信息#######")
print('目标利润: 人民币¥', Data.profit)
print("物流价格:人民币¥", Data.freight_charges)
print('拿货价格:人民币¥', Data.china_price)
print('折扣价格:美元$', Data.product_price)
print('输出价格:美元$', Data.final_price)
def ouput_opponent_info(self):
print("######竞品数据#######")
print("对手是否包邮:", Data.shippment_charge)
print('竞品价格:美元$', Data.opponent_price)
if __name__ == '__main__':
t1 = 1
t2 = 1
t3 = 1
while t1:
d = Data()
d.get_information()
d.get_caculate_information()
#输入竞争对手的信息
d.get_opponent_info()
d.caculation()
d.output_product_info()
#输出竞争对手的信息
d.ouput_opponent_info()
print("是否有其他变体需要计算(0/1):")
t2 = int(input())
while t2:
d.get_bianti_information()
d.get_caculate_information()
# 输入竞争对手的信息
d.get_opponent_info()
d.caculation()
d.outpuy_product_bianti_info()
d.ouput_opponent_info()
# 输出竞争对手的信息
d.ouput_opponent_info()
print("是否有其他变体需要计算(0/1):")
t2 = int(input())
print("是否有其他商品需要计算(0/1):")
t1 = int(input())
|
[
"[email protected]"
] | |
a4fc15a9e217a1380aa452ca9495d4b79182a1b4
|
303be1b0357a01faa6f748ac4fb6c4b1c45ac725
|
/06_is_palindrome.py
|
4b47e7cc461a25ad6600210ce7f869160ca83be8
|
[] |
no_license
|
ghufransyed/udacity_cs101
|
17747083de3991ac256b7cac1ccff0f71acc7d80
|
30084c3ee8f32d9b6edcaf007b08a99da90efc79
|
refs/heads/master
| 2016-09-06T14:29:26.055550 | 2014-02-19T12:52:14 | 2014-02-19T12:52:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 235 |
py
|
# 06_is_palindrome
def is_palindrome(s):
if s == '':
return True
else:
if s[0] == s[-1]:
return True and is_palindrome(s[1:-1])
else:
return False
print is_palindrome('abba')
|
[
"[email protected]"
] | |
b7ee65bfd9352b7f35dcd621f0d341848a5f8836
|
1f4e01585bf3fd07fede636c482c88914d3caa61
|
/tools.py
|
c9ae09a9d8e55180ef487fda941fc02110fca465
|
[
"MIT"
] |
permissive
|
afcarl/genetic_ptsne
|
99b5ce3bd6118ffd85b2af0838c3537f0d63ef2a
|
4edbc228b0bc77f7263b6285e13a954f7020669e
|
refs/heads/master
| 2020-04-23T18:11:20.132166 | 2018-12-21T01:07:30 | 2018-12-21T01:07:30 | 171,357,927 | 1 | 0 | null | 2019-02-18T21:21:36 | 2019-02-18T21:21:36 | null |
UTF-8
|
Python
| false | false | 820 |
py
|
import pandas as pd
from pathlib import Path
# need a function to write out the transformed data to a csv
# NOTE this function should be altered to write datafile into a specific directory
def write_csv(input_data, specified_filename):
# turn input_data into a pandas dataframe
df = pd.DataFrame(input_data)
# write it out
df.to_csv(path_or_buf=specified_filename , sep=',' , index=False, header=None)
# function that writes to a file called test_specs at the target location
# the 'shape' of a ptsne test, in bytes
# path parameter must be a Path object
def write_test_specs(path , num_gens , gensize):
path = path / 'test_specs'
path.write_bytes(bytes([num_gens,gensize]))
# writes dna, as text, to directory located at path
def write_dna(directory, dna):
path = directory / 'dna.dna'
path.write_text(dna)
|
[
"[email protected]"
] | |
ba056f23c31bedff0a62a7a936c8f4d65f3efb17
|
f86eede4f262d8fa17ff870b1fbab87eae31bafa
|
/azureml_wrapper.py
|
d5432191b5be596cbb7a568461f63687b024b37a
|
[] |
no_license
|
nasadigital/diplomska-instagram
|
84a356883404e9ebad3d28395177e69ecd48122e
|
7c65a53f6046c26e0ec7ff50de0e89773a32e657
|
refs/heads/master
| 2020-03-30T17:21:34.799690 | 2020-02-03T06:48:53 | 2020-02-03T06:48:53 | 151,452,063 | 0 | 0 | null | 2020-01-28T23:16:21 | 2018-10-03T17:23:56 |
Python
|
UTF-8
|
Python
| false | false | 1,101 |
py
|
from azureml.core import Dataset, Run
from load_data import train_test_bert, prep_train_test_bert
import random
random.seed(4)
run = Run.get_context()
workspace = run.experiment.workspace
dataset = Dataset.get_by_name(workspace=workspace, name='dataset')
dataset.download(target_path='.', overwrite=False)
dist = Dataset.get_by_name(workspace=workspace, name='dataset_dist')
dist.download(target_path='.', overwrite=False)
def bert_precompute():
prep_train_test_bert('./media.csv', './dist.dat', './models/1024dRoBertAModel',
10, result_path='./result1024dRoBertA.txt', check=1,
pretrained_weights='roberta-base')
def train_mlp():
os.makedirs(os.path.dirname('./outputs/'), exist_ok=True)
precalced = Dataset.get_by_name(workspace, name='distilbert-base-uncased_pack')
precalced.download(target_path='./outputs/', overwrite=False)
train_test_bert('./media.csv', './dist.dat', './models/768dBertModel',
10, result_path='./result768dBert.txt', check=1,
pretrained_weights='distilbert-base-uncased')
train_mlp()
|
[
"[email protected]"
] | |
f29c2824cc885dac4691b3a988b7b70954d4d8f4
|
21dc20848ecd3bb471668890d90d7a71e6a5e200
|
/VulcanoFileFormat/vulcano_file_format/exporter.py
|
12b28919f968a77f6b92473f2b3e5b1ed7e44dfb
|
[] |
no_license
|
Zingam/VulcanoFileFormat
|
0165ef33f49174c3421db4d1f39bcf2ad50ac2a8
|
578f2387f4c0e5168d6cbf9ea904cad51cf160c4
|
refs/heads/master
| 2021-06-08T19:39:28.382926 | 2021-04-10T08:18:01 | 2021-04-10T08:18:01 | 131,746,439 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,764 |
py
|
FFM_MESSAGE = "Vulcano File Format Exporter:\n "
# This is required to support reloading of modules in Blender with F8
if "bpy" in locals():
import importlib
importlib.reload(utils)
else:
from .blender import utils
import os # noqa
import bpy # noqa
import bmesh # noqa
def print_bl_collection_objects(bl_collection: bpy.types.Collection, tab_space: int):
for bl_collection in bl_collection.children:
collection_message = " > Collection: {}".format(bl_collection.name)
# Add white space to the front of the string
collection_message_length = len(collection_message) + tab_space
output_message = collection_message.rjust(collection_message_length)
print(output_message)
for bl_object in bl_collection.objects:
collection_message = " - {:10}: {}".format(
bl_object.type, bl_object.name)
# Add white space to the front of the string
collection_message_length = len(collection_message) + tab_space
output_message = collection_message.rjust(
collection_message_length)
print(output_message)
print_bl_collection_objects(bl_collection, tab_space + 2)
def print_object_info(bl_object: bpy.types.Object):
print(" > Found object \"{0}\" of type {1}"
.format(bl_object.name, bl_object.type))
location = bl_object.location
print(" at location: {0:10f}, {1:10f}, {2:10f}"
.format(location.x, location.y, location.z))
if bl_object.data is not None:
print(" of type: {0}".format(type(bl_object.data)))
def export_vffmsh(operator, context):
if operator.clear_system_console:
# Clear System Console
os.system("cls")
print("Blender version: {}\n".format(bpy.app.version_string))
import sys # noqa
print("Python version: {}".format(sys.version))
print(" info: {}".format(sys.version_info))
# Begin export
print("\n")
print("====================================================================")
export_utils = utils.get_utils()
if export_utils is None:
error_message = "Export failed: " + utils.get_last_error()
operator.report({'ERROR'}, error_message)
print(FFM_MESSAGE, error_message)
print("====================================================================")
return
modifier_manager = export_utils.ModifierManager()
print(FFM_MESSAGE, "Exporting mesh...")
print("\n")
# print("operator.exported_file_type",
# operator.exported_file_type,
# type(operator.exported_file_type))
# print("operator.path_mode",
# operator.path_mode,
# type(operator.path_mode))
# print("operator.use_selection",
# operator.use_selection,
# type(operator.use_selection))
# print("operator.apply_modifiers",
# operator.apply_modifiers,
# type(operator.apply_modifiers))
# Enumerate the collections in the scene's master collection
print(" Collections:")
print(" > Collection: ", context.scene.collection.name)
print_bl_collection_objects(context.scene.collection, 2)
# Enumerate the objects in the scene
print("\n")
for bl_object in context.scene.objects:
if bl_object.type == "MESH":
if operator.apply_modifiers:
# Create a temporary mesh with applied modifiers
mesh = modifier_manager.apply_modifiers(
bl_object, context, operator)
else:
mesh = bl_object.data
# Print mesh data
print(" ----------------------------------------------------------")
print_object_info(bl_object)
print("\n Vertex coordinates:\n")
for vertex in mesh.vertices:
print(" {0:10f}, {1:10f}, {2:10f}"
.format(vertex.co.x, vertex.co.y, vertex.co.z))
print("\n Faces (indices):\n")
for polygon in mesh.polygons:
indices = " "
for index in polygon.vertices:
indices += ("{0:4d},".format(index))
print(indices[:-1])
# Create a bmesh object from the mesh object
bmesh_object = bmesh.new()
bmesh_object.from_mesh(mesh)
# Remove the temporary mesh with applied modifiers
if operator.apply_modifiers:
modifier_manager.clear_mesh()
# Convert the bmesh object's faces to triangles
bmesh.ops.triangulate(bmesh_object, faces=bmesh_object.faces)
print("\n > Converting to:", type(bmesh_object))
print("\n Vertex coordinates:\n")
mesh = bl_object.data
for vertex in bmesh_object.verts:
print(" {0:10f}, {1:10f}, {2:10f}"
.format(vertex.co.x, vertex.co.y, vertex.co.z))
print("\n Faces (indices):\n")
for face in bmesh_object.faces:
indices = " "
for vertex in face.verts:
indices += ("{0:4d},".format(vertex.index))
print(indices[:-1])
bmesh_object.free()
print(" ----------------------------------------------------------")
if bl_object.type == "EMPTY":
print(" ----------------------------------------------------------")
print_object_info(bl_object)
print(" ----------------------------------------------------------")
print("\n")
print("Mesh successfully exported to file:\n ", operator.filepath)
print("====================================================================")
|
[
"[email protected]"
] | |
8a713a117dc1862bde8e05096dccf16502dfd405
|
20cb93e6271bf2bac3bb66421a44f81219a4d256
|
/p010-p019/p013/p13-01.py
|
09293a91d9328e71ba1036e9cf7f277f2ffd2d24
|
[] |
no_license
|
rojasreinold/Project-Euler
|
0d43f04836458d1e77985da6e807e11fecb57b60
|
8e3f9ab4ff125d32d5f8f6e8bd0f2fc297da0469
|
refs/heads/master
| 2021-01-15T09:38:41.142886 | 2016-09-20T14:49:16 | 2016-09-20T14:49:16 | 35,523,428 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 169 |
py
|
def findSum():
with open('input.txt') as f:
nums = f.read()
nums = map(int,nums)
numSum = sum(nums)
return numSum
data = findSum()
print data
|
[
"[email protected]"
] | |
408ba4fead11ceafb960751bf256d4555bc84bae
|
03f9339d6eb2437abae55676b2003fe87d7c0891
|
/abides/agent/ExchangeAgent.py
|
821c9e198adbd89fc286f689ccc43fb3c4b5f24a
|
[
"BSD-3-Clause"
] |
permissive
|
rom42pla/ai4t_project
|
e94a336ebc16f8006f4c7c0ecdc0aac6f011e8ac
|
6c51b57e54cd824594ff2bfa73c289e2b4898954
|
refs/heads/main
| 2023-02-03T21:58:37.155751 | 2020-12-22T10:21:53 | 2020-12-22T10:21:53 | 311,788,690 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 24,003 |
py
|
# The ExchangeAgent expects a numeric agent id, printable name, agent type, timestamp to open and close trading,
# a list of equity symbols for which it should create order books, a frequency at which to archive snapshots
# of its order books, a pipeline delay (in ns) for order activity, the exchange computation delay (in ns),
# the levels of order stream history to maintain per symbol (maintains all orders that led to the last N trades),
# whether to log all order activity to the agent log, and a random state object (already seeded) to use
# for stochasticity.
from agent.FinancialAgent import FinancialAgent
from message.Message import Message
from util.OrderBook import OrderBook
from util.util import log_print
import datetime as dt
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=UserWarning)
import pandas as pd
pd.set_option('display.max_rows', 500)
from copy import deepcopy
class ExchangeAgent(FinancialAgent):
def __init__(self, id, name, type, mkt_open, mkt_close, symbols, book_freq='S', wide_book=False,
pipeline_delay=40000,
computation_delay=1, stream_history=0, log_orders=False, random_state=None):
super().__init__(id, name, type, random_state)
# Do not request repeated wakeup calls.
self.reschedule = False
# Store this exchange's open and close times.
self.mkt_open = mkt_open
self.mkt_close = mkt_close
# Right now, only the exchange agent has a parallel processing pipeline delay. This is an additional
# delay added only to order activity (placing orders, etc) and not simple inquiries (market operating
# hours, etc).
self.pipeline_delay = pipeline_delay
# Computation delay is applied on every wakeup call or message received.
self.computation_delay = computation_delay
# The exchange maintains an order stream of all orders leading to the last L trades
# to support certain agents from the auction literature (GD, HBL, etc).
self.stream_history = stream_history
# Log all order activity?
self.log_orders = log_orders
# Create an order book for each symbol.
self.order_books = {}
for symbol in symbols:
self.order_books[symbol] = OrderBook(self, symbol)
# At what frequency will we archive the order books for visualization and analysis?
self.book_freq = book_freq
# Store orderbook in wide format? ONLY WORKS with book_freq == 0
self.wide_book = wide_book
# The subscription dict is a dictionary with the key = agent ID,
# value = dict (key = symbol, value = list [levels (no of levels to recieve updates for),
# frequency (min number of ns between messages), last agent update timestamp]
# e.g. {101 : {'AAPL' : [1, 10, pd.Timestamp(10:00:00)}}
self.subscription_dict = {}
# The exchange agent overrides this to obtain a reference to an oracle.
# This is needed to establish a "last trade price" at open (i.e. an opening
# price) in case agents query last trade before any simulated trades are made.
# This can probably go away once we code the opening cross auction.
def kernelInitializing(self, kernel):
super().kernelInitializing(kernel)
self.oracle = self.kernel.oracle
# Obtain opening prices (in integer cents). These are not noisy right now.
for symbol in self.order_books:
try:
self.order_books[symbol].last_trade = self.oracle.getDailyOpenPrice(symbol, self.mkt_open)
log_print("Opening price for {} is {}", symbol, self.order_books[symbol].last_trade)
except AttributeError as e:
log_print(str(e))
# The exchange agent overrides this to additionally log the full depth of its
# order books for the entire day.
def kernelTerminating(self):
super().kernelTerminating()
# If the oracle supports writing the fundamental value series for its
# symbols, write them to disk.
if hasattr(self.oracle, 'f_log'):
for symbol in self.oracle.f_log:
dfFund = pd.DataFrame(self.oracle.f_log[symbol])
if not dfFund.empty:
dfFund.set_index('FundamentalTime', inplace=True)
self.writeLog(dfFund, filename='fundamental_{}'.format(symbol))
log_print("Fundamental archival complete.")
if self.book_freq is None:
return
else:
# Iterate over the order books controlled by this exchange.
for symbol in self.order_books:
start_time = dt.datetime.now()
self.logOrderBookSnapshots(symbol)
end_time = dt.datetime.now()
print("Time taken to log the order book: {}".format(end_time - start_time))
print("Order book archival complete.")
def receiveMessage(self, currentTime, msg):
super().receiveMessage(currentTime, msg)
# Unless the intent of an experiment is to examine computational issues within an Exchange,
# it will typically have either 1 ns delay (near instant but cannot process multiple orders
# in the same atomic time unit) or 0 ns delay (can process any number of orders, always in
# the atomic time unit in which they are received). This is separate from, and additional
# to, any parallel pipeline delay imposed for order book activity.
# Note that computation delay MUST be updated before any calls to sendMessage.
self.setComputationDelay(self.computation_delay)
# Is the exchange closed? (This block only affects post-close, not pre-open.)
if currentTime > self.mkt_close:
# Most messages after close will receive a 'MKT_CLOSED' message in response. A few things
# might still be processed, like requests for final trade prices or such.
if msg.body['msg'] in ['LIMIT_ORDER', 'MARKET_ORDER', 'CANCEL_ORDER', 'MODIFY_ORDER']:
log_print("{} received {}: {}", self.name, msg.body['msg'], msg.body['order'])
self.sendMessage(msg.body['sender'], Message({"msg": "MKT_CLOSED"}))
# Don't do any further processing on these messages!
return
elif 'QUERY' in msg.body['msg']:
# Specifically do allow querying after market close, so agents can get the
# final trade of the day as their "daily close" price for a symbol.
pass
else:
log_print("{} received {}, discarded: market is closed.", self.name, msg.body['msg'])
self.sendMessage(msg.body['sender'], Message({"msg": "MKT_CLOSED"}))
# Don't do any further processing on these messages!
return
# Log order messages only if that option is configured. Log all other messages.
if msg.body['msg'] in ['LIMIT_ORDER', 'MARKET_ORDER', 'CANCEL_ORDER', 'MODIFY_ORDER']:
if self.log_orders: self.logEvent(msg.body['msg'], msg.body['order'].to_dict())
else:
self.logEvent(msg.body['msg'], msg.body['sender'])
# Handle the DATA SUBSCRIPTION request and cancellation messages from the agents.
if msg.body['msg'] in ["MARKET_DATA_SUBSCRIPTION_REQUEST", "MARKET_DATA_SUBSCRIPTION_CANCELLATION"]:
log_print("{} received {} request from agent {}", self.name, msg.body['msg'], msg.body['sender'])
self.updateSubscriptionDict(msg, currentTime)
# Handle all message types understood by this exchange.
if msg.body['msg'] == "WHEN_MKT_OPEN":
log_print("{} received WHEN_MKT_OPEN request from agent {}", self.name, msg.body['sender'])
# The exchange is permitted to respond to requests for simple immutable data (like "what are your
# hours?") instantly. This does NOT include anything that queries mutable data, like equity
# quotes or trades.
self.setComputationDelay(0)
self.sendMessage(msg.body['sender'], Message({"msg": "WHEN_MKT_OPEN", "data": self.mkt_open}))
elif msg.body['msg'] == "WHEN_MKT_CLOSE":
log_print("{} received WHEN_MKT_CLOSE request from agent {}", self.name, msg.body['sender'])
# The exchange is permitted to respond to requests for simple immutable data (like "what are your
# hours?") instantly. This does NOT include anything that queries mutable data, like equity
# quotes or trades.
self.setComputationDelay(0)
self.sendMessage(msg.body['sender'], Message({"msg": "WHEN_MKT_CLOSE", "data": self.mkt_close}))
elif msg.body['msg'] == "QUERY_LAST_TRADE":
symbol = msg.body['symbol']
if symbol not in self.order_books:
log_print("Last trade request discarded. Unknown symbol: {}", symbol)
else:
log_print("{} received QUERY_LAST_TRADE ({}) request from agent {}", self.name, symbol,
msg.body['sender'])
# Return the single last executed trade price (currently not volume) for the requested symbol.
# This will return the average share price if multiple executions resulted from a single order.
self.sendMessage(msg.body['sender'], Message({"msg": "QUERY_LAST_TRADE", "symbol": symbol,
"data": self.order_books[symbol].last_trade,
"mkt_closed": True if currentTime > self.mkt_close else False}))
elif msg.body['msg'] == "QUERY_SPREAD":
symbol = msg.body['symbol']
depth = msg.body['depth']
if symbol not in self.order_books:
log_print("Bid-ask spread request discarded. Unknown symbol: {}", symbol)
else:
log_print("{} received QUERY_SPREAD ({}:{}) request from agent {}", self.name, symbol, depth,
msg.body['sender'])
# Return the requested depth on both sides of the order book for the requested symbol.
# Returns price levels and aggregated volume at each level (not individual orders).
self.sendMessage(msg.body['sender'], Message({"msg": "QUERY_SPREAD", "symbol": symbol, "depth": depth,
"bids": self.order_books[symbol].getInsideBids(depth),
"asks": self.order_books[symbol].getInsideAsks(depth),
"data": self.order_books[symbol].last_trade,
"mkt_closed": True if currentTime > self.mkt_close else False,
"book": ''}))
# It is possible to also send the pretty-printed order book to the agent for logging, but forcing pretty-printing
# of a large order book is very slow, so we should only do it with good reason. We don't currently
# have a configurable option for it.
# "book": self.order_books[symbol].prettyPrint(silent=True) }))
elif msg.body['msg'] == "QUERY_ORDER_STREAM":
symbol = msg.body['symbol']
length = msg.body['length']
if symbol not in self.order_books:
log_print("Order stream request discarded. Unknown symbol: {}", symbol)
else:
log_print("{} received QUERY_ORDER_STREAM ({}:{}) request from agent {}", self.name, symbol, length,
msg.body['sender'])
# We return indices [1:length] inclusive because the agent will want "orders leading up to the last
# L trades", and the items under index 0 are more recent than the last trade.
self.sendMessage(msg.body['sender'],
Message({"msg": "QUERY_ORDER_STREAM", "symbol": symbol, "length": length,
"mkt_closed": True if currentTime > self.mkt_close else False,
"orders": self.order_books[symbol].history[1:length + 1]
}))
elif msg.body['msg'] == 'QUERY_TRANSACTED_VOLUME':
symbol = msg.body['symbol']
lookback_period = msg.body['lookback_period']
if symbol not in self.order_books:
log_print("Order stream request discarded. Unknown symbol: {}", symbol)
else:
log_print("{} received QUERY_TRANSACTED_VOLUME ({}:{}) request from agent {}", self.name, symbol,
lookback_period,
msg.body['sender'])
self.sendMessage(msg.body['sender'], Message({"msg": "QUERY_TRANSACTED_VOLUME", "symbol": symbol,
"transacted_volume": self.order_books[
symbol].get_transacted_volume(lookback_period),
"mkt_closed": True if currentTime > self.mkt_close else False
}))
elif msg.body['msg'] == "LIMIT_ORDER":
order = msg.body['order']
log_print("{} received LIMIT_ORDER: {}", self.name, order)
if order.symbol not in self.order_books:
log_print("Limit Order discarded. Unknown symbol: {}", order.symbol)
else:
# Hand the order to the order book for processing.
self.order_books[order.symbol].handleLimitOrder(deepcopy(order))
self.publishOrderBookData()
elif msg.body['msg'] == "MARKET_ORDER":
order = msg.body['order']
log_print("{} received MARKET_ORDER: {}", self.name, order)
if order.symbol not in self.order_books:
log_print("Market Order discarded. Unknown symbol: {}", order.symbol)
else:
# Hand the market order to the order book for processing.
self.order_books[order.symbol].handleMarketOrder(deepcopy(order))
self.publishOrderBookData()
elif msg.body['msg'] == "CANCEL_ORDER":
# Note: this is somewhat open to abuse, as in theory agents could cancel other agents' orders.
# An agent could also become confused if they receive a (partial) execution on an order they
# then successfully cancel, but receive the cancel confirmation first. Things to think about
# for later...
order = msg.body['order']
log_print("{} received CANCEL_ORDER: {}", self.name, order)
if order.symbol not in self.order_books:
log_print("Cancellation request discarded. Unknown symbol: {}", order.symbol)
else:
# Hand the order to the order book for processing.
self.order_books[order.symbol].cancelOrder(deepcopy(order))
self.publishOrderBookData()
elif msg.body['msg'] == 'MODIFY_ORDER':
# Replace an existing order with a modified order. There could be some timing issues
# here. What if an order is partially executed, but the submitting agent has not
# yet received the norification, and submits a modification to the quantity of the
# (already partially executed) order? I guess it is okay if we just think of this
# as "delete and then add new" and make it the agent's problem if anything weird
# happens.
order = msg.body['order']
new_order = msg.body['new_order']
log_print("{} received MODIFY_ORDER: {}, new order: {}".format(self.name, order, new_order))
if order.symbol not in self.order_books:
log_print("Modification request discarded. Unknown symbol: {}".format(order.symbol))
else:
self.order_books[order.symbol].modifyOrder(deepcopy(order), deepcopy(new_order))
self.publishOrderBookData()
def updateSubscriptionDict(self, msg, currentTime):
# The subscription dict is a dictionary with the key = agent ID,
# value = dict (key = symbol, value = list [levels (no of levels to recieve updates for),
# frequency (min number of ns between messages), last agent update timestamp]
# e.g. {101 : {'AAPL' : [1, 10, pd.Timestamp(10:00:00)}}
if msg.body['msg'] == "MARKET_DATA_SUBSCRIPTION_REQUEST":
agent_id, symbol, levels, freq = msg.body['sender'], msg.body['symbol'], msg.body['levels'], msg.body[
'freq']
self.subscription_dict[agent_id] = {symbol: [levels, freq, currentTime]}
elif msg.body['msg'] == "MARKET_DATA_SUBSCRIPTION_CANCELLATION":
agent_id, symbol = msg.body['sender'], msg.body['symbol']
del self.subscription_dict[agent_id][symbol]
def publishOrderBookData(self):
'''
The exchange agents sends an order book update to the agents using the subscription API if one of the following
conditions are met:
1) agent requests ALL order book updates (freq == 0)
2) order book update timestamp > last time agent was updated AND the orderbook update time stamp is greater than
the last agent update time stamp by a period more than that specified in the freq parameter.
'''
for agent_id, params in self.subscription_dict.items():
for symbol, values in params.items():
levels, freq, last_agent_update = values[0], values[1], values[2]
orderbook_last_update = self.order_books[symbol].last_update_ts
if (freq == 0) or \
((orderbook_last_update > last_agent_update) and (
(orderbook_last_update - last_agent_update).delta >= freq)):
self.sendMessage(agent_id, Message({"msg": "MARKET_DATA",
"symbol": symbol,
"bids": self.order_books[symbol].getInsideBids(levels),
"asks": self.order_books[symbol].getInsideAsks(levels),
"last_transaction": self.order_books[symbol].last_trade,
"exchange_ts": self.currentTime}))
self.subscription_dict[agent_id][symbol][2] = orderbook_last_update
def logOrderBookSnapshots(self, symbol):
"""
Log full depth quotes (price, volume) from this order book at some pre-determined frequency. Here we are looking at
the actual log for this order book (i.e. are there snapshots to export, independent of the requested frequency).
"""
def get_quote_range_iterator(s):
""" Helper method for order book logging. Takes pandas Series and returns python range() from first to last
element.
"""
forbidden_values = [0, 19999900] # TODO: Put constant value in more sensible place!
quotes = sorted(s)
for val in forbidden_values:
try:
quotes.remove(val)
except ValueError:
pass
return quotes
book = self.order_books[symbol]
if book.book_log:
print("Logging order book to file...")
dfLog = book.book_log_to_df()
dfLog.set_index('QuoteTime', inplace=True)
dfLog = dfLog[~dfLog.index.duplicated(keep='last')]
dfLog.sort_index(inplace=True)
if str(self.book_freq).isdigit() and int(self.book_freq) == 0: # Save all possible information
# Get the full range of quotes at the finest possible resolution.
quotes = get_quote_range_iterator(dfLog.columns.unique())
# Restructure the log to have multi-level rows of all possible pairs of time and quote
# with volume as the only column.
if not self.wide_book:
filledIndex = pd.MultiIndex.from_product([dfLog.index, quotes], names=['time', 'quote'])
dfLog = dfLog.stack()
dfLog = dfLog.reindex(filledIndex)
filename = f'ORDERBOOK_{symbol}_FULL'
else: # Sample at frequency self.book_freq
# With multiple quotes in a nanosecond, use the last one, then resample to the requested freq.
dfLog = dfLog.resample(self.book_freq).ffill()
dfLog.sort_index(inplace=True)
# Create a fully populated index at the desired frequency from market open to close.
# Then project the logged data into this complete index.
time_idx = pd.date_range(self.mkt_open, self.mkt_close, freq=self.book_freq, closed='right')
dfLog = dfLog.reindex(time_idx, method='ffill')
dfLog.sort_index(inplace=True)
if not self.wide_book:
dfLog = dfLog.stack()
dfLog.sort_index(inplace=True)
# Get the full range of quotes at the finest possible resolution.
quotes = get_quote_range_iterator(dfLog.index.get_level_values(1).unique())
# Restructure the log to have multi-level rows of all possible pairs of time and quote
# with volume as the only column.
filledIndex = pd.MultiIndex.from_product([time_idx, quotes], names=['time', 'quote'])
dfLog = dfLog.reindex(filledIndex)
filename = f'ORDERBOOK_{symbol}_FREQ_{self.book_freq}'
# Final cleanup
if not self.wide_book:
dfLog.rename('Volume')
df = pd.DataFrame({"Volumne": dfLog})
df.index = dfLog.index
else:
df = dfLog
df = df.reindex(sorted(df.columns), axis=1)
# Archive the order book snapshots directly to a file named with the symbol, rather than
# to the exchange agent log.
self.writeLog(df, filename=filename)
print("Order book logging complete!")
def sendMessage(self, recipientID, msg):
# The ExchangeAgent automatically applies appropriate parallel processing pipeline delay
# to those message types which require it.
# TODO: probably organize the order types into categories once there are more, so we can
# take action by category (e.g. ORDER-related messages) instead of enumerating all message
# types to be affected.
if msg.body['msg'] in ['ORDER_ACCEPTED', 'ORDER_CANCELLED', 'ORDER_EXECUTED']:
# Messages that require order book modification (not simple queries) incur the additional
# parallel processing delay as configured.
super().sendMessage(recipientID, msg, delay=self.pipeline_delay)
if self.log_orders: self.logEvent(msg.body['msg'], msg.body['order'].to_dict())
else:
# Other message types incur only the currently-configured computation delay for this agent.
super().sendMessage(recipientID, msg)
# Simple accessor methods for the market open and close times.
def getMarketOpen(self):
return self.mkt_open
def getMarketClose(self):
return self.mkt_close
|
[
"[email protected]"
] | |
4b5efebc4389a93d8965252d6fc514aa5bf9f279
|
636f71a073d3daccbd48901dbb0107178001d157
|
/polls/migrations/0004_auto_20180529_1821.py
|
8eb2b22b4b3cb066f4d05fa0f0ea0b449cb0c553
|
[] |
no_license
|
sakten/DjangoPlay
|
414a5a7d9830296488c5d2f2fab2f269f122ea57
|
84d1f8107cefd7ddb9bcda4750cdaa9adee07e1e
|
refs/heads/master
| 2020-03-18T21:37:22.014156 | 2018-05-29T14:57:19 | 2018-05-29T14:57:19 | 135,290,427 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 575 |
py
|
# Generated by Django 2.0.5 on 2018-05-29 13:21
from django.db import migrations, models
import polls.models
class Migration(migrations.Migration):
dependencies = [
('polls', '0003_auto_20180529_1649'),
]
operations = [
migrations.AlterField(
model_name='player',
name='playerClass',
field=models.CharField(choices=[(polls.models.PlayerClassChoise('Rogue'), 'Rogue'), (polls.models.PlayerClassChoise('Warior'), 'Warior'), (polls.models.PlayerClassChoise('Mage'), 'Mage')], max_length=10),
),
]
|
[
"[email protected]"
] | |
072d03ff2ba39018825c54d08383c435c59fd1c1
|
a3f075cf0a563f5db4ee7d7523c8d73c33387f31
|
/dev/arcadiy/users/admin.py
|
889c7dd4084dfb6c2b5268f5da94685d75fac441
|
[] |
no_license
|
Siragle/arcadiy
|
bd110d264e50e80021c3b8b009e9cf65859bd50b
|
f12d5c9fce5f23ff244fb69a8dc48afec0927178
|
refs/heads/master
| 2020-06-19T11:58:54.385351 | 2016-12-10T09:58:48 | 2016-12-10T09:58:48 | 74,906,455 | 1 | 2 | null | 2016-11-28T01:50:49 | 2016-11-27T18:33:19 | null |
UTF-8
|
Python
| false | false | 87 |
py
|
from django.contrib import admin
from .models import Users
admin.site.register(Users)
|
[
"[email protected]"
] | |
48ad6ffb2dfecc64d85b489070c222650a5850db
|
d0fea7640a52077780689eda4e4b8ffde957bfc9
|
/common/models/food/WxShareHistory.py
|
4b50ecb9f01abd7dae250faa969e03b8c517b63a
|
[] |
no_license
|
AClearZhang/orderself
|
0367c7fee8d09885b6b0ec2fb0f07018d761c7b8
|
0ea57aa83ca99261af400c2fb500047763f4b80d
|
refs/heads/master
| 2020-05-24T15:44:24.465007 | 2019-06-16T16:58:29 | 2019-06-16T16:58:29 | 187,338,548 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 537 |
py
|
# coding: utf-8
from sqlalchemy import Column, DateTime, Integer, String
from sqlalchemy.schema import FetchedValue
from application import db
class WxShareHistory(db.Model):
__tablename__ = 'wx_share_history'
id = db.Column(db.Integer, primary_key=True)
member_id = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())
share_url = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())
created_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())
|
[
"[email protected]"
] | |
992516ce3652fb5fdf7cfa11f3646e3c6acdf4b7
|
e1541afdd52726925f17e65ff72b6299524b2f46
|
/android/make_qr_code.py
|
847cc3075fa94856e8cbee747ebd0782e382fa29
|
[
"Apache-2.0"
] |
permissive
|
thedod/loplop
|
c4bcc9074d40ac6c909b5985a168f5af7481d12a
|
f73e7409531bcb4b27dfd1f2f97080b4703fdb59
|
refs/heads/master
| 2022-10-01T21:00:59.482434 | 2022-09-11T06:09:16 | 2022-09-11T06:09:16 | 6,298,872 | 0 | 0 | null | 2012-11-13T19:30:46 | 2012-10-19T17:32:56 |
Python
|
UTF-8
|
Python
| false | false | 965 |
py
|
# Based on the SConscript of oplop's SL4A implementation
# https://code.google.com/p/oplop/source/browse/SL4A/SConscript
from __future__ import with_statement
import urllib
import urllib2
def qr_code(target, source):
"""Generate the QR code for the SL4A script using the Google Chart API.
The docs on the chart API can be found at
http://code.google.com/apis/chart/docs/gallery/qr_codes.html
The zxing project's online QR code generator is at
http://zxing.appspot.com/generator/
"""
google_charts_api = 'http://chart.apis.google.com/chart'
args = {'cht': 'qr', 'chs': '391x391'}
with open(str(source), 'rb') as file:
args['chl'] = 'loplop.py\n' + file.read()
query = urllib.urlencode(args)
url = urllib2.urlopen('?'.join([google_charts_api, query]))
with open(str(target), 'wb') as file:
file.write(url.read())
if __name__=='__main__':
qr_code('qr_code_loplop_sl4a.png', 'loplop.sl4a.py')
|
[
"[email protected]"
] | |
effd970a9c63169de112e9a092fee62ef99c0409
|
10a9b06ae623c2ea6ecfc1b62d9e3ea60ed85cad
|
/route.py
|
702bab237bdec3c8d6ebc751c48e9b31a85028d0
|
[] |
no_license
|
giselezrossi/python_flask_crud
|
f9138a7001b2ba4b790d0e07fd335358fc77482a
|
84acd5b4e7931bec2a5cd7cff00680f8a2563014
|
refs/heads/master
| 2020-08-19T09:47:43.155317 | 2019-10-18T00:01:47 | 2019-10-18T00:01:47 | 215,906,638 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 525 |
py
|
from flask import Flask, request, jsonify
app = Flask(__name__)
@app.route("/")
def index():
return "Hello!"
@app.route("/minhaaplicacao", methods=['GET'])
def get():
return "teste"
@app.route("/minhaaplicacao", methods=['POST'])
def post():
data = request.json
return jsonify(data)
@app.route("/minhaaplicacao/<int:id>", methods=['PUT'])
def put(id):
pass
@app.route("/minhaaplicacao/<int:id>", methods=['DELETE'])
def delete(id):
print(id)
if __name__ == '__main__':
app.run(debug=True)
|
[
"[email protected]"
] | |
199693aef1523a92dec280c913acd26fa75b684e
|
e21e7623d99312dc8a4c0eedc0febb22d24c7918
|
/venv/bin/futurize
|
fa21932432d4e2703fbdc5920b7e4f2b8d2c7dd4
|
[] |
no_license
|
axelonet/E-voting-system-on-blockchain
|
49aa9b2b45f75e85ed9de4d113849c1f3d95dd1d
|
2651bab50f29a2b68ad17b2d2240279af2f24419
|
refs/heads/master
| 2023-01-04T04:03:44.817356 | 2020-04-15T06:06:36 | 2020-04-15T06:06:36 | 255,822,230 | 1 | 0 | null | 2020-10-25T11:52:19 | 2020-04-15T06:12:39 | null |
UTF-8
|
Python
| false | false | 446 |
#!"/Users/anmolpanwar/Documents/PycharmProjects/python practice/venv/bin/python"
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.18.2','console_scripts','futurize'
__requires__ = 'future==0.18.2'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.18.2', 'console_scripts', 'futurize')()
)
|
[
"[email protected]"
] | ||
8c2adeb23592943fc36a72d83ef45349225a505f
|
e8a9ae801883cb9e3801ce27cf108897f3ff51f0
|
/demo1.py
|
f2c3de5723ca0769be38d960e4179513dfde0ae9
|
[] |
no_license
|
vinay4goud/python_list
|
c12e6be861f704ee25bde7bf96e9706b4d1ae883
|
2f1cda1937d745c56679ea78cd6c3d31caff9f00
|
refs/heads/master
| 2020-04-19T11:38:15.573863 | 2019-04-02T11:25:02 | 2019-04-02T11:25:02 | 168,172,458 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,605 |
py
|
"""
reading a file , extracting datetime , name and content from the file
"""
# importing regular expression to execute the string as per requirment
import re
import datetime
# Classes provide a means of bundling data and functionality together
class expresions:
# function to assign values
def __init__(self, filesinfo):
self.fi = filesinfo
# function to read thelines by line
@property
def dataf(self):
"""
opeing file and reading file using mode
READING file line by line
:return: reading all the lines in file
"""
with open(self.fi, 'r') as filez:
contentz = filez.readlines()
list_file = []
for i in contentz:
dctionary_file = {}
datez = re.findall("\d{2}/\d{2}/\d{2}|\d{1}/\d{2}/\d{2} \d{1}:\d{2} am| \d{2}:\d{2} am|\d{1}:\d{2} pm|\d{2}:\d{2} pm",i)
#datez = re.findall('(\d+/\d+/\d+),\s(\d{2}\:\d{2}\s(?:AM|PM|am|pm))')
# read line if it has date in it
if datez:
#date_obj1 = datetime.datetime.strptime(" ".join(datez), '%d/%m/%y %I:%M %p ')
# split the line
match = i.split("-", 1)
# read date from list
try :
date_obj1 = datetime.datetime.strptime(match[0], '%d/%m/%y, %H:%M ')
except:
date_obj1 = datetime.datetime.strptime(match[0], '%d/%m/%y, %I:%M %p ')
if ':' not in match[1]:
matchsecual = []
matchsecual.append('none')
matchsecual.append (match[1])
else:
# split list into two
matchsecual= match[1].split(":",1)
# add key and values to empty dictionary
dctionary_file['datetime']= date_obj1
dctionary_file['name'] = matchsecual[0]
dctionary_file['content'] = matchsecual[1].rstrip()
#print(dctionary_file)
# add dictionay to list
list_file.append(dctionary_file )
# print the list
return (list_file)
# passing value
wfile = expresions("Chat with.txt")
dataf = wfile.dataf
print(dataf)
w_file =expresions("WhatsApp Chat with Builders real estate.txt")
datafile = w_file.dataf
print(datafile)
# infrom = wfile.extractinformation()
# print (f'{infrom}')
|
[
"[email protected]"
] | |
2d4093ec9864e6829b7393915632ac707442a1c5
|
c53677b8a1c0827954dc9cc3143ea4cd6812d3d8
|
/avito_russia/phonenumbers.py
|
9b1ed8b7ac3005eb5dd056f32288c66c35d0c0b4
|
[
"Apache-2.0"
] |
permissive
|
kubikrubikvkube/python_avito_buddy
|
c69ca897fdd075901de85f1370ca176a04addc8f
|
3bae8dbc30820f2afd00abbb559d70c0e55e8dfd
|
refs/heads/master
| 2022-04-11T18:11:52.527030 | 2020-03-21T15:17:52 | 2020-03-21T15:17:52 | 189,567,037 | 4 | 1 |
Apache-2.0
| 2019-07-17T14:21:31 | 2019-05-31T09:27:11 |
Python
|
UTF-8
|
Python
| false | false | 237 |
py
|
class PhoneNumberValidator:
@staticmethod
def is_valid(phonenumber: str) -> bool:
if phonenumber and phonenumber.startswith("7") and len(phonenumber) == 11:
return True
else:
return False
|
[
"[email protected]"
] | |
d86ed4f17212a8f42d127913036943b12e29ab0c
|
e3cce830548e9f1f88df5cbf347df610b842fbca
|
/ass23.py
|
9d4d86dc12d88ef2de5c12353441411cb1060da2
|
[] |
no_license
|
TapaniAlastalo/python_practices
|
d666de675ffd1e524b436f8cc790557505d2c47b
|
54dc3eb7a1bd2a6e3ccf9d5b901d8d48c16b8eb9
|
refs/heads/master
| 2020-12-12T12:15:54.775583 | 2020-01-15T16:36:22 | 2020-01-15T16:36:22 | 234,125,543 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 388 |
py
|
def calculator(command, first, second):
if command == "add":
return first + second
elif command == "sub":
return first - second
elif command == "multiply":
return first * second
else:
return 0
print(calculator("add", 1, 2)) #should print 3
print(calculator("sub", 1, 2)) #should print -1
print(calculator("multiply", 1, 2)) #should print 2
|
[
"[email protected]"
] | |
14e8824bedd651f4e64c978ea76622167087b5e4
|
7c9dfab9ee71de58544190fcdb8c145fcc73be20
|
/keras_style_transfer/library/style_transfer.py
|
412031162ead989af4fe10510b4a7548f6218b10
|
[
"MIT"
] |
permissive
|
javad-sajady/keras-style-transfer
|
1b7b2258729d90fa9716b20aafa3a759ec64fb87
|
2cb755498bc64d26bedc2e660604eee48fa15aa3
|
refs/heads/master
| 2021-09-07T00:17:00.014549 | 2018-02-14T02:32:06 | 2018-02-14T02:32:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,004 |
py
|
from keras_style_transfer.library.nst_utils import *
from keras_style_transfer.library.download_utils import download_vgg19
import numpy as np
import tensorflow as tf
def compute_content_cost(a_C, a_G):
"""
Computes the content cost
Arguments:
a_C -- tensor of dimension (1, n_C, n_H, n_W), hidden layer activations representing content of the image C
a_G -- tensor of dimension (1, n_C, n_H, n_W), hidden layer activations representing content of the image G
Returns:
J_content -- scalar that you compute using equation 1 above.
"""
# Retrieve dimensions from a_G (≈1 line)
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape a_C and a_G (≈2 lines)
a_C_unrolled = tf.reshape(tf.transpose(a_C, (3, 1, 2, 0)), shape=(n_H * n_W, n_C))
a_G_unrolled = tf.reshape(tf.transpose(a_G, (3, 1, 2, 0)), shape=(n_H * n_W, n_C))
# compute the cost with tensorflow (≈1 line)
J_content = tf.divide(tf.reduce_sum(tf.square(tf.subtract(a_C_unrolled, a_G_unrolled))), 4 * n_H * n_W * n_C)
return J_content
def compute_content_cost_test():
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
a_C = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
J_content = compute_content_cost(a_C, a_G)
print("J_content = " + str(J_content.eval()))
def gram_matrix(A):
"""
Argument:
A -- matrix of shape (n_C, n_H*n_W)
Returns:
GA -- Gram matrix of A, of shape (n_C, n_C)
"""
GA = tf.matmul(A, tf.transpose(A))
return GA
def gram_matrix_test():
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
A = tf.random_normal([3, 2 * 1], mean=1, stddev=4)
GA = gram_matrix(A)
print("GA = " + str(GA.eval()))
def compute_layer_style_cost(a_S, a_G):
"""
Arguments:
a_S -- tensor of dimension (1, n_C, n_H, n_W), hidden layer activations representing style of the image S
a_G -- tensor of dimension (1, n_C, n_H, n_W), hidden layer activations representing style of the image G
Returns:
J_style_layer -- tensor representing a scalar value, style cost defined above by equation (2)
"""
# Retrieve dimensions from a_G (≈1 line)
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape the images to have them of shape (n_C, n_H*n_W) (≈2 lines)
a_S = tf.reshape(tf.transpose(a_S, (3, 1, 2, 0)), shape=(n_C, n_H * n_W))
a_G = tf.reshape(tf.transpose(a_G, (3, 1, 2, 0)), shape=(n_C, n_H * n_W))
# Computing gram_matrices for both images S and G (≈2 lines)
GS = gram_matrix(a_S)
GG = gram_matrix(a_G)
# Computing the loss (≈1 line)
J_style_layer = tf.divide(tf.reduce_sum(tf.square(tf.subtract(GS, GG))), 4 * n_C * n_C * (n_H * n_W) * (n_H * n_W))
return J_style_layer
def compute_layer_style_cost_test():
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
a_S = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
J_style_layer = compute_layer_style_cost(a_S, a_G)
print("J_style_layer = " + str(J_style_layer.eval()))
def compute_style_cost(sess, model, STYLE_LAYERS):
"""
Computes the overall style cost from several chosen layers
Arguments:
model -- our tensorflow model
STYLE_LAYERS -- A python list containing:
- the names of the layers we would like to extract style from
- a coefficient for each of them
Returns:
J_style -- tensor representing a scalar value, style cost defined above by equation (2)
"""
# initialize the overall style cost
J_style = 0
for layer_name, coeff in STYLE_LAYERS:
# Select the output tensor of the currently selected layer
out = model[layer_name]
# Set a_S to be the hidden layer activation from the layer we have selected, by running the session on out
a_S = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model[layer_name]
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute style_cost for the current layer
J_style_layer = compute_layer_style_cost(a_S, a_G)
# Add coeff * J_style_layer of this layer to overall style cost
J_style += coeff * J_style_layer
return J_style
def total_cost(J_content, J_style, alpha=10, beta=40):
"""
Computes the total cost function
Arguments:
J_content -- content cost coded above
J_style -- style cost coded above
alpha -- hyperparameter weighting the importance of the content cost
beta -- hyperparameter weighting the importance of the style cost
Returns:
J -- total cost as defined by the formula above.
"""
J = J_content * alpha + J_style * beta
return J
def total_cost_test():
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(3)
J_content = np.random.randn()
J_style = np.random.randn()
J = total_cost(J_content, J_style)
print("J = " + str(J))
class StyleTransfer(object):
def __init__(self, vgg19_model_path):
self.model = None
self.vgg19_model_path = vgg19_model_path
def fit_and_transform(self, content_image, style_image, output_dir_path, num_iterations=200):
STYLE_LAYERS = [
('conv1_1', 0.2),
('conv2_1', 0.2),
('conv3_1', 0.2),
('conv4_1', 0.2),
('conv5_1', 0.2)]
content_image = reshape_and_normalize_image(content_image)
style_image = reshape_and_normalize_image(style_image)
input_image = generate_noise_image(content_image)
generated_image = input_image
# Reset the graph
tf.reset_default_graph()
sess = tf.InteractiveSession()
download_vgg19(self.vgg19_model_path)
self.model = load_vgg_model(self.vgg19_model_path)
print(self.model)
# Assign the content image to be the input of the VGG model.
sess.run(self.model['input'].assign(content_image))
# Select the output tensor of layer conv4_2
out = self.model['conv4_2']
# Set a_C to be the hidden layer activation from the layer we have selected
a_C = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2']
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute the content cost
J_content = compute_content_cost(a_C, a_G)
# Assign the input of the model to be the "style" image
sess.run(self.model['input'].assign(style_image))
# Compute the style cost
J_style = compute_style_cost(sess, self.model, STYLE_LAYERS)
J = total_cost(J_content, J_style)
# define optimizer (1 line)
optimizer = tf.train.AdamOptimizer(2.0)
# define train_step (1 line)
train_step = optimizer.minimize(J)
# Initialize global variables (you need to run the session on the initializer)
sess.run(tf.global_variables_initializer())
# Run the noisy input image (initial generated image) through the model. Use assign().
sess.run(self.model['input'].assign(input_image))
for i in range(num_iterations):
# Run the session on the train_step to minimize the total cost
sess.run(train_step)
# Compute the generated image by running the session on the current model['input']
generated_image = sess.run(self.model['input'])
# Print every 2 iteration.
if i % 2 == 0:
Jt, Jc, Js = sess.run([J, J_content, J_style])
print("Iteration " + str(i) + " :")
print("total cost = " + str(Jt))
print("content cost = " + str(Jc))
print("style cost = " + str(Js))
# Save every 20 iteration.
if i % 20 == 0:
# save current generated image in the "/output" directory
save_image(output_dir_path + "/" + str(i) + ".png", generated_image)
# save last generated image
save_image(output_dir_path + '/generated_image.jpg', generated_image)
return generated_image
def main():
compute_content_cost_test()
gram_matrix_test()
compute_layer_style_cost_test()
total_cost_test()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
151fd4393177bc88c5958eb560822b38b133d9cb
|
815ebbc235d2daf2f69e922998abcfd4a2cb5ae0
|
/omexml_write.py
|
8b2fad9b2cd2bf97aaa2031aa76ff8246570751d
|
[
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
sebi06/BioFormatsRead
|
f573638d58fefa73f78a495da85c44eb39aafc29
|
d8297fba2f76152ef9d2056c03fef226c8d35420
|
refs/heads/master
| 2021-06-10T00:17:30.083911 | 2021-05-21T14:58:28 | 2021-05-21T14:58:28 | 36,077,548 | 13 | 7 | null | 2017-10-04T14:42:35 | 2015-05-22T14:48:21 |
Python
|
UTF-8
|
Python
| false | false | 602 |
py
|
import numpy as np
import bioformats
import bioformats.omexml as ome
import javabridge as jv
import bioformats.omexml as ome
filename = r'testdata\T=30_Z=23_C=2_x=217_Y=94.czi'
urlnamespace = 'http://www.openmicroscopy.org/Schemas/OME/2016-06'
# Start JVM for bioformats
bfpackage = r'c:\Users\m1srh\Documents\Software\Bioformats\5.9.2\bioformats_package.jar'
jars = jv.JARS + [bfpackage]
jv.start_vm(class_path=jars, run_headless=True, max_heap_size='4G')
xml_metadata = bioformats.get_omexml_metadata(path=filename)
metadata = bioformats.OMEXML(xml_metadata)
omexml = ome.OMEXML()
jv.kill_vm()
|
[
"[email protected]"
] | |
f7b624c2386cf18bed60fd718766707f607adf30
|
8de952cde09fd46fb117b5337ce41fff3a2476bf
|
/Priyanka and Toys
|
8275cb3af0edba6eab4811a179c1b6de5587dbf4
|
[] |
no_license
|
saikiran-007/Hackerrank-coding-solutions
|
bdbeb23c0fe3fa90091e75a05859742a5aee511c
|
15496c96a9b9e2e198d73289adc68d03f14afa46
|
refs/heads/master
| 2023-06-27T03:56:27.831496 | 2021-07-28T09:56:12 | 2021-07-28T09:56:12 | 256,973,395 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 609 |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the toys function below.
def toys(w):
w=list(set(w))
w.sort()
c=0
i=0
while i<len(w):
t=w[i]
j=i+1
if j<len(w):
while w[j]<=t+4:
j+=1
if j==len(w):
break
c+=1
i=j
return c
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
w = list(map(int, input().rstrip().split()))
result = toys(w)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"[email protected]"
] | ||
0ba59acfa8f0211a664bd88acd4882ccb1be8081
|
97def1949bca845f40a2fb99fe7496e698f51764
|
/pyomo/contrib/parmest/examples/semibatch/parmest_parallel_example.py
|
ddaa0d303c9eb40f0da17dcab009aebdbfd6c9f5
|
[
"BSD-3-Clause"
] |
permissive
|
flexciton/pyomo
|
e009e5d300d27d943408a1ee5e0e1770d772a7fe
|
817bebc9c10f527263b2b8402fb1c038f1b37cf1
|
refs/heads/master
| 2023-03-03T08:56:22.922613 | 2022-01-18T15:22:57 | 2022-01-18T15:22:57 | 241,679,253 | 1 | 1 |
NOASSERTION
| 2022-04-11T16:48:48 | 2020-02-19T17:24:37 |
Python
|
UTF-8
|
Python
| false | false | 1,845 |
py
|
# ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
"""
The following script can be used to run semibatch parameter estimation in
parallel and save results to files for later analysis and graphics.
Example command: mpiexec -n 4 python semibatch_parmest_parallel.py
"""
import numpy as np
import pandas as pd
from itertools import product
import pyomo.contrib.parmest.parmest as parmest
from pyomo.contrib.parmest.examples.semibatch.semibatch import generate_model
### Parameter estimation
# Vars to estimate
theta_names = ['k1', 'k2', 'E1', 'E2']
# Data, list of json file names
data = []
for exp_num in range(10):
data.append('exp'+str(exp_num+1)+'.out')
# Note, the model already includes a 'SecondStageCost' expression
# for sum of squared error that will be used in parameter estimation
pest = parmest.Estimator(generate_model, data, theta_names)
### Parameter estimation with bootstrap resampling
bootstrap_theta = pest.theta_est_bootstrap(100)
bootstrap_theta.to_csv('bootstrap_theta.csv')
### Compute objective at theta for likelihood ratio test
k1 = np.arange(4, 24, 3)
k2 = np.arange(40, 160, 40)
E1 = np.arange(29000, 32000, 500)
E2 = np.arange(38000, 42000, 500)
theta_vals = pd.DataFrame(list(product(k1, k2, E1, E2)), columns=theta_names)
obj_at_theta = pest.objective_at_theta(theta_vals)
obj_at_theta.to_csv('obj_at_theta.csv')
|
[
"[email protected]"
] | |
e8f681e5343e00949fc0e08ea2a9238f224f882a
|
e8d927d593e2d8d08004e12ec11988062b9df8b3
|
/typeidea/comment/forms.py
|
91ba5d861325db5bce891dcee5d8f3d7fa0a14ba
|
[] |
no_license
|
choupihuan/typeidea
|
9c4f6a90dfb53f94dcaeb15e5e6e915f9f9e0ee6
|
f8e13b1e4afd0f67bf365bdbb04e35cc4f56a0da
|
refs/heads/master
| 2020-06-05T19:51:29.757681 | 2019-06-29T11:32:46 | 2019-06-29T11:32:46 | 192,530,809 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 373 |
py
|
import mistune
from django import forms
from .models import Comment
class CommentForm(forms.ModelForm):
def clean_content(self):
content = self.cleaned_data.get('content')
if len(content) <10:
raise forms.ValidationError('内容必须超过10')
return content
class Meta:
model = Comment
fields = ['content']
|
[
"[email protected]"
] | |
7ffa82f194c3ea745e4353afbfb80085484f5606
|
dd256415176fc8ab4b63ce06d616c153dffb729f
|
/aditya-works-feature-python_programming (1)/aditya-works-feature-python_programming/23-Jul-2019/method_examples/class_methods_2.py
|
05e1064eb9949454b9956604a1def6df3fba359e
|
[] |
no_license
|
adityapatel329/python_works
|
6d9c6b4a64cccbe2717231a7cfd07cb350553df3
|
6cb8b2e7f691401b1d2b980f6d1def848b0a71eb
|
refs/heads/master
| 2020-07-24T17:15:39.839826 | 2019-09-12T07:53:28 | 2019-09-12T07:53:28 | 207,993,516 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 318 |
py
|
class DecoratorExample:
def __init__(self):
print('Hello, world')
@classmethod
def example_function(cls):
print("In a class method ")
cls.some_other_function()
@staticmethod
def some_other_function():
print('Hello')
de = DecoratorExample()
de.example_function()
|
[
"[email protected]"
] | |
867a5f1f4eba9c7ccf06f57e34093b307e85c1bb
|
2b98ce9f3ae726824b88244790f00fc04e821b35
|
/odd.py
|
0ed512d0754d969d8ed18b9949a8734cf6642a7f
|
[] |
no_license
|
surajmalhotra/PythonTraining
|
4a055baab623a30fb4c410cb5426b8f1a2425611
|
47ab708e2f2023dbbf7e7637420994ff192f5932
|
refs/heads/master
| 2020-11-28T14:19:13.577724 | 2019-12-25T03:22:48 | 2019-12-25T03:22:48 | 229,845,613 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 455 |
py
|
from datetime import datetime
import time
import random
odds = {1, 3, 5, 7, 9, 11, 13, 15, 17, 19,
21, 23, 25, 27, 29, 31, 33, 35, 37, 39,
41, 43, 45, 47, 49, 51, 53, 55, 57, 59}
for i in range(5):
right_this_minute = datetime.today().minute
if right_this_minute in odds:
print("This minute seems a little odd.")
else:
print("Not an odd minute.")
wait_time = random.randint(1,5)
time.sleep(wait_time)
|
[
"[email protected]"
] | |
68931bdd3d8a5e5a40b0dcd263dddf62e75e7643
|
b90851dbf7eb42a74307b80dae00d4922bd3a739
|
/learn.py
|
57521041beecb91257640cd6e0159e1f66a0952b
|
[] |
no_license
|
divpypandey/Smart-Handy-Device
|
84b3c72ecaeb14f3a8c3b2e520c1b23b0bbf9e97
|
2bb1c2da79df78a0e22f3e8c99415643fea701cb
|
refs/heads/master
| 2022-09-19T08:49:58.589985 | 2020-06-05T13:59:29 | 2020-06-05T13:59:29 | 269,656,676 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,354 |
py
|
from sklearn import datasets
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import scipy as sp
import os, signals
from sklearn.externals import joblib
from sklearn.model_selection import GridSearchCV
'''
This module trains the machine learning algorithm and saves the model
into "model.pkl".
It also saves the classes to a file "classes.pkl"
It analyze the dataset contained into the "data" directory.
The dataset will be composed of N files. Each of those files is
a recording of a specific gesture.
The name of the file rapresents the meaning of the recording.
For example, the file:
a_sample_0_1.txt
Is a recording for the "a" sign, recorded in the batch "0".
This is a work in progress....
'''
#Check if the module is executed as main, needed for parallel processing
if __name__ == '__main__':
#List of parameters
SHOW_CONFUSION_MATRIX = True
x_data = []
y_data = []
classes = {}
root="data" #Default directory containing the dataset
print "Loading the dataset from '{directory}'...".format(directory=root)
#Fetch all the data files from the root directory of the dataset
for path, subdirs, files in os.walk(root):
for name in files:
if not name.startswith('_'):
category = name.split("_")[0]
number = ord(category) - ord('a')
#Get the filename
filename = os.path.join(path, name)
#Load the sample from file
sample = signals.Sample.load_from_file(filename)
#Linearize the sample and then add it to the x_data list
x_data.append(sample.get_linearized())
#Extract the category from the file name
#For example, the file "a_sample_0.txt" will be considered as "a"
category = name.split("_")[0]
#Get a number for the category, as an offset from the category
#to the a char in Ascii
#Add the category to the y_data list
y_data.append(number)
#Include the category and the corresponding number into a dictionary
#for easy access and referencing
classes[number] = category
print "DONE"
#Parameters used in the cross-validated training process
#The library automatically tries every possible combination to
#find the best scoring one.
params = {'C':[0.001,0.01,0.1,1], 'kernel':['linear']}
#Inizialize the model
svc = svm.SVC(probability = True)
#Inizialize the GridSearchCV with 8 processing cores and maximum verbosity
clf = GridSearchCV(svc, params,verbose =10, n_jobs=8)
#Split the dataset into two subset, one used for training and one for testing
X_train, X_test, Y_train, Y_test = train_test_split(x_data,
y_data, test_size=0.35, random_state=0)
print "Starting the training process..."
#Start the training process
clf.fit(X_train, Y_train)
#If SHOW_CONFUSION_MATRIX is true, prints the confusion matrix
if SHOW_CONFUSION_MATRIX:
print "Confusion Matrix:"
Y_predicted = clf.predict(X_test)
print confusion_matrix(Y_test, Y_predicted)
print "\nBest estimator parameters: "
print clf.best_estimator_
#Calculates the score of the best estimator found.
score = clf.score(X_test, Y_test)
print "\nSCORE: {score}\n".format(score = score)
print "Saving the model...",
#Saves the model to the "model.pkl" file
joblib.dump(clf, 'model.pkl')
#Saves the classes to the "classes.pkl" file
joblib.dump(classes, 'classes.pkl')
print "DONE"
|
[
"[email protected]"
] | |
8ca6365e72b0a16ebf03a9b59d69b49ddbce9120
|
cbb970c5adce5e3e13021aeab902382001455eff
|
/Calculator-D10.py
|
711f14b0a8b40e8a5bd46a0ef6d375c452775409
|
[] |
no_license
|
ezenielrios/Python-Calculator
|
e72aec9038f0ac32615703758d49e1cad97155da
|
6f4c051ab56e17f80ab9bc280c90b96e74659876
|
refs/heads/main
| 2023-06-13T23:38:33.054144 | 2021-07-08T02:39:51 | 2021-07-08T02:39:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,107 |
py
|
from replit import clear
from art import logo
#mathematical operation functions
def add(n1, n2):
return n1 + n2
def subtract(n1, n2):
return n1 - n2
def multiply(n1, n2):
return n1 * n2
def divide(n1, n2):
return n1 / n2
#Dictionary in order to call functions
operations = {
"+": add,
"-": subtract,
"*": multiply,
"/": divide
}
def calculator():
print(logo)
#use float for more acurate calculations
num1 = float(input("What's the first number?: "))
for symbol in operations:
print(symbol)
should_continue = True
while should_continue:
operation_symbol = input("Pick an operation: ")
num2 = float(input("What's the next number?: "))
calculation_function = operations[operation_symbol]
answer = calculation_function(num1, num2)
print(f"{num1} {operation_symbol} {num2} = {answer}")
if input(f"Type 'y' to continue calculating with {answer}, or type 'n' to start a new calculation: ") == 'y':
num1 = answer
else:
should_continue = False
clear()
calculator()
calculator()
|
[
"[email protected]"
] | |
8b18de84b5f869c57b8f7c04bc955792fa178d4a
|
5926e6f9fa41ad986be4a3f9e7de5abbca868a54
|
/ITFClusterLib.py
|
6a3bda28f3b5f9099bf9ac54ff536e2a0dc53cd3
|
[] |
no_license
|
slowbrain/IsotopeFitPython
|
0c0beefacf93bf9025bd85c79d5969b77013ff43
|
6314a170237fdcb3269738c077107c83e8404616
|
refs/heads/master
| 2021-01-17T05:34:35.073261 | 2016-02-01T13:17:48 | 2016-02-01T13:17:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,828 |
py
|
# Copyright Stefan Ralser, Johannes Postler, Arntraud Bacher
# Institut für Ionenphysik und Angewandte Physik
# Universitaet Innsbruck
# Lib for Cluster calculations
import string
import numpy as np
import os
def parseFormula(mol):
#parses the molecule string (mol) and returns
#a dictionary with each element as key and the
#number of each element as value
i = 0
l = len(mol)
elements = {}
charge = '-1'
while i<l:
if mol[i] in string.digits: # i is digit? -> charge
startpos = i
i = i+1
while i<l and (mol[i] in string.digits):
i = i+1
charge = mol[startpos:i]
elif mol[i] in string.ascii_uppercase: #i is uppercase?
startpos = i
i = i+1
while i<l and (mol[i] in string.ascii_lowercase):
i = i+1
molName = mol[startpos:i]
startpos = i
while i<l and (mol[i] in string.digits):
i = i+1
if startpos == i: # no number found
n = 1
else:
n = int(mol[startpos:i])
if molName in elements.keys():
# element already in dictionary
temp = elements[molName]
elements[molName] = temp + n
else:
elements[molName] = n
elif mol[i] == '(': # starts with a bracket
#print('Index of brackets')
ix = mol.find(')',i+1) # find closing bracket
#print(ix)
ix2 = mol.find('(',i+1) # find next opening bracket
#print(ix2)
while (ix2 > 0) and (ix2 < ix): # Doppelklammer
ix = mol.find(')',ix+1) #search for next closing bracket
#print(ix)
ix2 = mol.find('(',ix2+1) #search for next opening bracket
#print(ix2)
#i index of first opening bracket
molName = mol[i+1:ix]
i = ix+1
startpos = i
while i<l and (mol[i] in string.digits): # if digit is after molName
i = i+1
if startpos == i: # no number found
n = 1
else:
n = int(mol[startpos:i])
#print(n)
temp, notused = parseFormula(molName)
#compare temp with elements
for temp1 in temp.keys():
if temp1 in elements.keys():
valTemp = temp[temp1]
valElem = elements[temp1] + valTemp * n
else:
valTemp = temp[temp1]
elements[temp1] = valTemp * n
elif mol[i] == '[': # starts with [
ix = mol.find(']',i+1) # find closing bracket
molName = mol[i+1:ix] # no nested [ ]
i = ix+1
startpos = i
while i<l and (mol[i] in string.digits): # if digit is after [molName]
i = i+1
if startpos == i: # no number found
n = 1
else:
n = int(mol[startpos:i])
temp, notused = parseFormula(molName)
#compare temp with elements
for temp1 in temp.keys():
if temp1 in elements.keys():
valTemp = temp[temp1]
valElem = elements[temp1] + valTemp * n
else:
valTemp = temp[temp1]
elements[temp1] = valTemp * n
#print(elements)
return elements, charge
def oneString(elements,charge):
#print elements as one string
oneStr=charge
for atom in sorted(elements):
oneStr = oneStr+atom+str(elements[atom])
return oneStr
def parseMolecule(mol,mmd,th):
#parses the molecule string (mol) and returns
#the distribution, e.g. C60
distributions = []
i = 0
l = len(mol)
print(mol)
elements = {}
while i<l:
#print('Molecule to parse:'+mol)
if mol[i] in string.ascii_uppercase: #i is uppercase?
startpos = i
i = i+1
while i<l and (mol[i] in string.ascii_lowercase):
i = i+1
molName = mol[startpos:i] # startpos <= molName < i
startpos = i
while i<l and (mol[i] in string.digits):
i = i+1
if startpos == i: # no number found
n = 1
else:
n = int(mol[startpos:i])
if molName in elements:
# Element molName bereits vorhanden
print(molName+' bereits vorhanden!')
else:
elements[molName] = n
d = loadAtomicDist(molName)
temp = selfConvolute(d,n,mmd,th)
distributions.append(temp)
elif mol[i] == '(': # starts with a bracket
#print('Index of brackets')
ix = mol.find(')') # find closing bracket
#print(ix)
ix2 = mol.find('(',i+1) # find next opening bracket
#print(ix2)
while (ix2 > 0) and (ix2 < ix): # Doppelklammer
ix = mol.find(')',ix+1) #search for next closing bracket
#print(ix)
ix2 = mol.find('(',ix2+1) #search for next opening bracket
#print(ix2)
#i index of first opening bracket
molName = mol[i+1:ix]
i = ix+1
startpos = i
while i<l and (mol[i] in string.digits): # if digit is after molName
i = i+1
if startpos == i: # no number found
n = 1
else:
n = int(mol[startpos:i])
#print(n)
temp = selfConvolute(parseMolecule(molName,mmd,th),n,mmd,th)
distributions.append(temp)
dFinal = distributions[0]
for mols in range(len(distributions)-1):
dFinal = convolute(dFinal,distributions[mols+1])
dFinal = combineMasses(dFinal, mmd)
dFinal = applyThresh(dFinal, th)
#print(dFinal)
return dFinal, elements
def loadAtomicDist(molName):
#Loads atomic distribution from file
file = os.path.join(os.path.abspath(os.curdir),*['Atoms',molName+'.txt'])
d = np.loadtxt(file)
d[:,1] = d[:,1]/d.sum(axis=0)[1] # renorm
return d
def selfConvolute(d,n,mmd,th):
#d... Distribution 2D-array, e.g. of C
#n... Cluster Size, e.g. 60
#dFinal.. Distribution 2D-array, e.g. C60
dFinal = d
for i in range(1,n):
dFinal = convolute(dFinal,d)
dFinal = combineMasses(dFinal, mmd)
dFinal = applyThresh(dFinal, th)
return dFinal
def convolute(m1,m2):
# Convolution of m1 and m2
# e.g. m1 = [12, 1; 13, 2], m2 = [10, 3; 11, 4]
# -> result = [12+10,1*3; 12+11,1*4; 13+10,2*3; 13+11, 2*4]
a1 = np.repeat(m1[:,0],m2.shape[0])
a2 = np.tile(m2[:,0],m1.shape[0])
a3 = np.repeat(m1[:,1],m2.shape[0])
a4 = np.tile(m2[:,1],m1.shape[0])
return np.column_stack(((a1+a2),(a3*a4)))
def combineMasses(dFinal,minDist):
ix = np.argsort(dFinal[:,0])
dSorted = dFinal[ix,:]
massDiff = np.diff(dSorted[:,0])
ix = np.argmin(massDiff)
minDiff = massDiff[ix]
while minDiff <= minDist:
# combine peaks
# sum 2nd column
pSum = dSorted[ix,1] + dSorted[ix+1,1]
# weighted Mass
pMassW = ((dSorted[ix,0]*dSorted[ix,1])+(dSorted[ix+1,0]*dSorted[ix+1,1]))/pSum
dSorted[ix,:] = np.array([pMassW, pSum])
dSorted = np.delete(dSorted,ix+1,0)
massDiff = np.delete(massDiff,ix,0)
ix = np.argmin(massDiff)
minDiff = massDiff[ix]
return dSorted
def applyThresh(dFinal,thresh):
ix = (dFinal[:,1]>=thresh*np.max(dFinal[:,1])).nonzero()
return dFinal[ix]
|
[
"[email protected]"
] | |
213c49a939d8dc410a406a59c6a7a01b58c87a56
|
a96c8060736be392bed0d4c7524a3964c88d2010
|
/python/renrenlogin/renrenlogin/spiders/renren.py
|
1a3b8b31e059b719c7e47c5941a8b13f07c6f023
|
[] |
no_license
|
yinjun123/practice
|
2f830907e8a103136585ad20bdd6eab0e8c5f149
|
ca086d4abaf8b2cba1ae0ccd456b7a7382dd6404
|
refs/heads/master
| 2021-01-01T19:37:26.742866 | 2017-07-28T08:49:04 | 2017-07-28T08:49:04 | 98,628,903 | 0 | 1 | null | 2017-10-13T12:02:05 | 2017-07-28T08:41:48 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,819 |
py
|
# -*- coding: utf-8 -*-
import scrapy
class RenrenSpider(scrapy.Spider):
name = "renren"
allowed_domains = ["renren.com"]
start_urls = (
'http://www.renren.com/xxxxx',
'http://www.renren.com/11111',
'http://www.renren.com/xx',
)
cookies = {
"anonymid" : "ixrna3fysufnwv",
"_r01_" : "1",
"ap" : "327550029",
"JSESSIONID" : "abciwg61A_RvtaRS3GjOv",
"depovince" : "GW",
"springskin" : "set",
"jebe_key" : "f6fb270b-d06d-42e6-8b53-e67c3156aa7e%7Cc13c37f53bca9e1e7132d4b58ce00fa3%7C1484060607478%7C1%7C1486198628950",
"jebe_key" : "f6fb270b-d06d-42e6-8b53-e67c3156aa7e%7Cc13c37f53bca9e1e7132d4b58ce00fa3%7C1484060607478%7C1%7C1486198619601",
"ver" : "7.0",
"XNESSESSIONID" : "e703b11f8809",
"jebecookies" : "98c7c881-779f-4da8-a57c-7464175cd469|||||",
"ick_login" : "4b4a254a-9f25-4d4a-b686-a41fda73e173",
"_de" : "BF09EE3A28DED52E6B65F6A4705D973F1383380866D39FF5",
"p" : "ea5541736f993365a23d04c0946c10e29",
"first_login_flag" : "1",
"ln_uact" : "[email protected]",
"ln_hurl" : "http://hdn.xnimg.cn/photos/hdn521/20140529/1055/h_main_9A3Z_e0c300019f6a195a.jpg",
"t" : "691808127750a83d33704a565d8340ae9",
"societyguester" : "691808127750a83d33704a565d8340ae9",
"id" : "327550029",
"xnsid" : "f42b25cf",
"loginfrom" : "syshome"
}
def start_requests(self):
for url in self.start_urls:
#yield scrapy.Request(url, callback = self.parse)
#url = "http://www.renren.com/410043129/profile"
yield scrapy.FormRequest(url, cookies = self.cookies, callback = self.parse_page)
def parse_page(self, response):
print "===========" + response.url
with open("deng.html", "w") as filename:
filename.write(response.body)
|
[
"[email protected]"
] | |
6d74e9a1d2d05be56583d4e503afccee874d0f96
|
0f55ac63ce134630e76ce184111b5f3a050d0c8d
|
/Compilateur/Python/tp1/parser5.py
|
eb081def981c99bf2fc8ecdeaff84d5344a78d8a
|
[] |
no_license
|
roflolilolmao/thirdYear
|
e8091599483530a60909b17a940c9ac93cde0622
|
9a100b13485f033e12e93b57bded9369849bb36b
|
refs/heads/master
| 2020-05-17T13:02:25.710906 | 2016-11-24T12:03:01 | 2016-11-24T12:03:01 | 42,532,557 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,901 |
py
|
import ply.yacc as yacc
from lex5 import *
import pydot
from AST import *
__author__ = 'Quentin'
var = dict()
def p_program_statement(p):
'''program : statement
| statement SEMICOLON program'''
if len(p) > 2:
p[0] = ProgramNode([p[1]]+p[3].children)
else:
p[0] = ProgramNode(p[1])
def p_statement_exp(p):
'''statement : assignement
| structure'''
p[0] = p[1]
def p_print_statement(p):
'''statement : PRINT expression'''
p[0] = PrintNode(p[2])
def p_while_structure(p):
'''structure : WHILE expression BRA_OPEN program BRA_CLOSE'''
p[0] = WhileNode([p[2], p[4]])
def p_assignement(p):
'assignement : IDENTIFIER EQUAL expression'
p[0] = AssignNode([TokenNode(p[1]), p[3]])
def p_expression_par(p):
'expression : PAR_OPEN expression PAR_CLOSE'
p[0] = p[2]
def p_expression_num(p):
'''expression : NUMBER
| IDENTIFIER'''
p[0] = TokenNode(p[1])
def p_expression_unary_num(p):
'expression : ADD_OP NUMBER'
if p[1] == '-':
i = -1
else:
i = 1
p[0] = TokenNode(i * p[2])
operations = {
'+': lambda x, y: x+y,
'-': lambda x, y: x-y,
'*': lambda x, y: x*y,
'/': lambda x, y: x/y
}
def p_expression_op(p):
'''expression : expression ADD_OP expression
| expression MUL_OP expression'''
p[0] = OpNode(p[2], [p[1], p[3]])
def p_error(p):
print("parser1: Syntax error in line %d" % p.lineno)
yacc.yacc().errok()
precedence = (
('left', 'ADD_OP'),
('left', 'MUL_OP')
)
yacc.yacc()
def parse(program):
return yacc.parse(program)
if __name__ == '__main__':
import sys
prog = open(sys.argv[1]).read()
result = parse(prog)
print(result)
import os
graph = result.makegraphicaltree()
name = os.path.splitext(sys.argv[1])[0]+'-ast.pdf'
graph.write_pdf(name)
print("wrote ast to ", name)
|
[
"[email protected]"
] | |
55d3f0c1f3212420bb8f4c5e024af4f7024cad1b
|
1186ba1cbdcd963e8d924b111e9c8b67c819df4a
|
/reviews/migrations/0001_initial.py
|
4ee5f0ff3201cb5758eae67812f85ac6b1f74db7
|
[] |
no_license
|
nakku4251/helloworld.com
|
69e481271727d0c03eb7ce78744f0849d9bdb2d7
|
418d303680df4b87689a8c1d8d66847fbfd11675
|
refs/heads/master
| 2023-03-14T00:38:41.946106 | 2021-02-27T08:18:04 | 2021-02-27T08:18:04 | 335,054,050 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,060 |
py
|
# Generated by Django 3.0 on 2021-02-05 11:31
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('store_name', models.CharField(max_length=255, verbose_name='店名')),
('title', models.CharField(max_length=255, verbose_name='タイトル')),
('text', models.TextField(blank=True, verbose_name='口コミテキスト')),
('stars', models.IntegerField(choices=[(1, '⭐️'), (2, '⭐️⭐️'), (3, '⭐️⭐️⭐️'), (4, '⭐️⭐️⭐️⭐️'), (4, '⭐️⭐️⭐️⭐️⭐️')], verbose_name='星の数')),
('created_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name='作成日')),
],
),
]
|
[
"[email protected]"
] | |
d1e26da41ca066d9fb3d5b038568af586648cc20
|
84dfba4609a0fc880d34a5988b2027eda830e5d0
|
/billeterie_assos/event/migrations/0008_remove_profile_user_type.py
|
60ee9d2632de21e8e96e27d84748d99b71421e87
|
[] |
no_license
|
Jonathan2021/django_event_website
|
7a7888bddba3bf7b32bb5e4883cf0b290e117db5
|
a52936d459b82f606db83e9cbc40606e97a71fb6
|
refs/heads/master
| 2022-01-13T12:02:35.746387 | 2019-06-26T21:56:43 | 2019-06-26T21:56:43 | 184,823,654 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 330 |
py
|
# Generated by Django 2.2.1 on 2019-06-07 17:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('event', '0007_auto_20190607_1839'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='user_type',
),
]
|
[
"[email protected]"
] | |
f487928573a79250ae6d3f83431f0d7abcf93c8e
|
3f6fa5bbbaf07d56f29f0054b494b9dadcd26fc6
|
/backend/core/factories.py
|
d6fb727f180623b7fe693ecf8561450b9a8eb61c
|
[
"Unlicense"
] |
permissive
|
mashuq/academia
|
9a79b801b72217ab6a5935c433a9ea5704692474
|
571b3db58de4a70210ebd9d92c0f152016aec861
|
refs/heads/main
| 2023-05-20T04:05:41.950061 | 2021-06-14T07:47:00 | 2021-06-14T07:47:00 | 334,670,913 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,757 |
py
|
import factory
from factory.django import DjangoModelFactory
from .models import *
import factory.fuzzy
from django.utils import timezone
import random
class CourseCategoryFactory(DjangoModelFactory):
class Meta:
model = CourseCategory
name = factory.Faker("bs")
class CourseFactory(DjangoModelFactory):
class Meta:
model = Course
name = factory.Faker("bs")
code = factory.fuzzy.FuzzyText(length=10)
description = factory.Faker("paragraph")
curriculum = factory.Faker("paragraph")
image = 'https://picsum.photos/400/250'
visible = factory.Faker("boolean")
class SectionFactory(DjangoModelFactory):
class Meta:
model = Section
name = factory.Faker("bs")
start_date = factory.Faker(
"date_time", tzinfo=timezone.get_current_timezone())
end_date = factory.Faker(
"date_time", tzinfo=timezone.get_current_timezone())
visible = factory.Faker("boolean")
class SessionFactory(DjangoModelFactory):
class Meta:
model = Session
name = factory.Faker("bs")
serial = factory.Faker("random_int")
class McqFactory(DjangoModelFactory):
class Meta:
model = MultipleChoiceQuestion
question = factory.Faker("sentence")
choice1 = factory.Faker("word")
choice2 = factory.Faker("word")
choice3 = factory.Faker("word")
choice4 = factory.Faker("word")
correct_choice = random.choice([choice1, choice2, choice3, choice4])
mark = 1
class SqFactory(DjangoModelFactory):
class Meta:
model = ShortQuestion
question = factory.Faker("sentence")
mark = 5
class BqFactory(DjangoModelFactory):
class Meta:
model = BroadQuestion
question = factory.Faker("sentence")
mark = 10
|
[
"[email protected]"
] | |
2c2420f8e40458e5e260b24fd0446752e66a7576
|
b4ee5a2986d7232630aadc284194b3af3c9ac504
|
/hw/hw01/hw01.py
|
8c0a7d1634824267c092d8502b283b0e1131968e
|
[] |
no_license
|
lijian12345/cs61a-sp20
|
d42c855eb06fe26f7e0d37d483d1f4fa81a0e118
|
3a9aa5e922c3f8a4d31b6f197340d4828e342530
|
refs/heads/master
| 2023-04-03T10:36:32.192096 | 2021-04-19T11:06:50 | 2021-04-19T11:06:50 | 254,659,566 | 6 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,645 |
py
|
""" Homework 1: Control """
from operator import add, sub
def a_plus_abs_b(a, b):
"""Return a+abs(b), but without calling abs.
>>> a_plus_abs_b(2, 3)
5
>>> a_plus_abs_b(2, -3)
5
>>> # a check that you didn't change the return statement!
>>> import inspect, re
>>> re.findall(r'^\s*(return .*)', inspect.getsource(a_plus_abs_b), re.M)
['return h(a, b)']
"""
if b >= 0:
h = add
else:
h = sub
return h(a, b)
def two_of_three(x, y, z):
"""Return a*a + b*b, where a and b are the two smallest members of the
positive numbers x, y, and z.
>>> two_of_three(1, 2, 3)
5
>>> two_of_three(5, 3, 1)
10
>>> two_of_three(10, 2, 8)
68
>>> two_of_three(5, 5, 5)
50
>>> # check that your code consists of nothing but an expression (this docstring)
>>> # a return statement
>>> import inspect, ast
>>> [type(x).__name__ for x in ast.parse(inspect.getsource(two_of_three)).body[0].body]
['Expr', 'Return']
"""
return x * x + y * y + z * z - max(x, y, z) * max(x, y, z)
def largest_factor(x):
"""Return the largest factor of x that is smaller than x.
>>> largest_factor(15) # factors are 1, 3, 5
5
>>> largest_factor(80) # factors are 1, 2, 4, 5, 8, 10, 16, 20, 40
40
>>> largest_factor(13) # factor is 1 since 13 is prime
1
"""
for i in range(x - 1, 0, -1):
if x % i == 0:
return i
def if_function(condition, true_result, false_result):
"""Return true_result if condition is a true value, and
false_result otherwise.
>>> if_function(True, 2, 3)
2
>>> if_function(False, 2, 3)
3
>>> if_function(3==2, 3+2, 3-2)
1
>>> if_function(3>2, 3+2, 3-2)
5
"""
if condition:
return true_result
else:
return false_result
def with_if_statement():
"""
>>> result = with_if_statement()
6
>>> print(result)
None
"""
if c():
return t()
else:
return f()
def with_if_function():
"""
>>> result = with_if_function()
5
6
>>> print(result)
None
"""
return if_function(c(), t(), f())
def c():
return False
def t():
print(5)
def f():
print(6)
def hailstone(x):
"""Print the hailstone sequence starting at x and return its
length.
>>> a = hailstone(10)
10
5
16
8
4
2
1
>>> a
7
"""
num = 0
while x != 1:
print(x)
if x % 2 == 0:
x //= 2
else:
x = x * 3 + 1
num += 1
print(x)
return num + 1
|
[
"[email protected]"
] | |
722e532abb9d183c9faeb239a798949f7cbb32e0
|
a75b7fd002a9f8b4823dcc9cd6c2c5291ea31fe8
|
/ir_datasets/datasets/wikir.py
|
cfa056b832e3aa089533038d543bd5ee028d47f4
|
[
"Apache-2.0"
] |
permissive
|
FRD898/ir_datasets
|
3edadc3859eb3c3c7a3f7c33c14aebe709aad2f2
|
e4bfec64d41cc09c84315f675f2af768ea26f5b4
|
refs/heads/master
| 2023-06-16T10:32:12.367257 | 2021-07-18T10:41:20 | 2021-07-18T10:41:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,173 |
py
|
import contextlib
from pathlib import Path
from typing import NamedTuple
import ir_datasets
from ir_datasets.util import ZipExtractCache, DownloadConfig
from ir_datasets.datasets.base import Dataset, YamlDocumentation
from ir_datasets.formats import CsvQueries, CsvDocs, TrecQrels, TrecScoredDocs
NAME = 'wikir'
_logger = ir_datasets.log.easy()
QRELS_DEFS = {
2: "Query is the article title",
1: "There is a link to the article with the query as its title in the first sentence",
0: "Otherwise",
}
class File:
def __init__(self, dlc, relative_path):
self.dlc = dlc
self.relative_path = relative_path
def path(self):
return str(next(Path(self.dlc.path()).glob(self.relative_path)))
@contextlib.contextmanager
def stream(self):
with open(self.path(), 'rb') as f:
yield f
def _init():
base_path = ir_datasets.util.home_path()/NAME
dlc = DownloadConfig.context(NAME, base_path)
documentation = YamlDocumentation(f'docs/{NAME}.yaml')
subsets = {}
sources = [
('en1k', 369721),
('en59k', 2454785),
('fr14k', 736616),
('es13k', 645901),
('it16k', 503012),
]
for source, count_hint in sources:
source_dlc = ZipExtractCache(dlc[source], base_path/source)
docs = CsvDocs(File(source_dlc, "*/documents.csv"), namespace=source, lang=source[:2], count_hint=count_hint)
subsets[source] = Dataset(docs, documentation(source))
for split in ['training', 'validation', 'test']:
subsets[f'{source}/{split}'] = Dataset(
docs,
CsvQueries(File(source_dlc, f"*/{split}/queries.csv"), lang=source[:2]),
TrecQrels(File(source_dlc, f"*/{split}/qrels"), qrels_defs=QRELS_DEFS),
TrecScoredDocs(File(source_dlc, f"*/{split}/BM25.res")),
documentation(f'{source}/{split}')
)
base = Dataset(documentation('_'))
ir_datasets.registry.register(NAME, base)
for s in sorted(subsets):
ir_datasets.registry.register(f'{NAME}/{s}', subsets[s])
return base, subsets
collection, subsets = _init()
|
[
"[email protected]"
] | |
902454374f52acc89deee8bf40eafa743714ac5b
|
918bbb286cacba8f3b595e4a5281d6dff8d24d32
|
/collections/twitter_downloader/run_twitter_task_followers.py
|
a44739ad98a1d2d4fd5162adc1c602e80e17fc10
|
[
"MIT"
] |
permissive
|
dgonzo/bmdc_skullcandy
|
a4e653a37b9167ee38c99a5f3f3d7374cf1add49
|
03a38065196b603d9e6cce64fe45bfc1bcce3717
|
refs/heads/master
| 2021-01-25T07:35:16.716086 | 2014-04-08T14:48:38 | 2014-04-08T14:48:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,380 |
py
|
import sys
script,screen_name,OAUTH_TOKEN,OAUTH_TOKEN_SECRET = sys.argv
import twitter
# XXX: Go to http://dev.twitter.com/apps/new to create an app and get values
# for these credentials, which you'll need to provide in place of these
# empty string values that are defined as placeholders.
# See https://dev.twitter.com/docs/auth/oauth for more information
# on Twitter's OAuth implementation.
CONSUMER_KEY = 'ePgF72q92wF5co7c0hRToQ'
CONSUMER_SECRET = 'ycTbFjdm9R8LR9tQZr5DkRTjPQWJJZ1N0rg6PLw'
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,
CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth=auth)
import sys
import time
from urllib2 import URLError
from httplib import BadStatusLine
import json
import twitter
def make_twitter_request(twitter_api_func, max_errors=10, *args, **kw):
# A nested helper function that handles common HTTPErrors. Return an updated
# value for wait_period if the problem is a 500 level error. Block until the
# rate limit is reset if it's a rate limiting issue (429 error). Returns None
# for 401 and 404 errors, which requires special handling by the caller.
def handle_twitter_http_error(e, wait_period=2, sleep_when_rate_limited=True):
if wait_period > 3600: # Seconds
print >> sys.stderr, 'Too many retries. Quitting.'
raise e
# See https://dev.twitter.com/docs/error-codes-responses for common codes
if e.e.code == 401:
print >> sys.stderr, 'Encountered 401 Error (Not Authorized)'
return None
elif e.e.code == 404:
print >> sys.stderr, 'Encountered 404 Error (Not Found)'
return None
elif e.e.code == 429:
print >> sys.stderr, 'Encountered 429 Error (Rate Limit Exceeded)'
if sleep_when_rate_limited:
print >> sys.stderr, "Retrying in 15 minutes...ZzZ..."
sys.stderr.flush()
time.sleep(60*15 + 5)
print >> sys.stderr, '...ZzZ...Awake now and trying again.'
return 2
else:
raise e # Caller must handle the rate limiting issue
elif e.e.code in (500, 502, 503, 504):
print >> sys.stderr, 'Encountered %i Error. Retrying in %i seconds' % \
(e.e.code, wait_period)
time.sleep(wait_period)
wait_period *= 1.5
return wait_period
else:
raise e
# End of nested helper function
wait_period = 2
error_count = 0
while True:
try:
return twitter_api_func(*args, **kw)
except twitter.api.TwitterHTTPError, e:
error_count = 0
wait_period = handle_twitter_http_error(e, wait_period)
if wait_period is None:
return
except URLError, e:
error_count += 1
print >> sys.stderr, "URLError encountered. Continuing."
if error_count > max_errors:
print >> sys.stderr, "Too many consecutive errors...bailing out."
raise
except BadStatusLine, e:
error_count += 1
print >> sys.stderr, "BadStatusLine encountered. Continuing."
if error_count > max_errors:
print >> sys.stderr, "Too many consecutive errors...bailing out."
raise
# Sample usage
#twitter_api = oauth_login()
# See https://dev.twitter.com/docs/api/1.1/get/users/lookup for
# twitter_api.users.lookup
#response = make_twitter_request(twitter_api.users.lookup,
# screen_name="SocialWebMining")
#print json.dumps(response, indent=1)
from functools import partial
from sys import maxint
def get_friends_followers_ids(twitter_api, screen_name=None, user_id=None,
friends_limit=maxint, followers_limit=maxint):
# Must have either screen_name or user_id (logical xor)
assert (screen_name != None) != (user_id != None), \
"Must have screen_name or user_id, but not both"
# See https://dev.twitter.com/docs/api/1.1/get/friends/ids and
# https://dev.twitter.com/docs/api/1.1/get/followers/ids for details
# on API parameters
get_friends_ids = partial(make_twitter_request, twitter_api.friends.ids,
count=0)
get_followers_ids = partial(make_twitter_request, twitter_api.followers.ids,
count=100000)
friends_ids, followers_ids = [], []
for twitter_api_func, limit, ids, label in [
[get_friends_ids, friends_limit, friends_ids, "friends"],
[get_followers_ids, followers_limit, followers_ids, "followers"]
]:
if limit == 0: continue
cursor = -1
while cursor != 0:
# Use make_twitter_request via the partially bound callable...
if screen_name:
response = twitter_api_func(screen_name=screen_name, cursor=cursor)
else: # user_id
response = twitter_api_func(user_id=user_id, cursor=cursor)
if response is not None:
ids += response['ids']
cursor = response['next_cursor']
print >> sys.stderr, 'Fetched {0} total {1} ids for {2}'.format(len(ids),
label, (user_id or screen_name))
# XXX: You may want to store data during each iteration to provide an
# an additional layer of protection from exceptional circumstances
if len(ids) >= limit or response is None:
break
# Do something useful with the IDs, like store them to disk...
return friends_ids[:friends_limit], followers_ids[:followers_limit]
friends_ids, followers_ids = get_friends_followers_ids(twitter_api,
screen_name=screen_name,
friends_limit=0,
followers_limit=10000000)
# save list to a file
thefile = open("data/followers-"+screen_name+".csv", 'w')
for item in followers_ids:
thefile.write("%s\n" % item)
|
[
"[email protected]"
] | |
93d973806b72476402c087079c684e78920c1e44
|
ee8c4c954b7c1711899b6d2527bdb12b5c79c9be
|
/assessment2/amazon/run/core/controllers/letters.py
|
4d6a222a879f80298b4d6ad5f5d5743deb44e15d
|
[] |
no_license
|
sqlconsult/byte
|
02ac9899aebea4475614969b594bfe2992ffe29a
|
548f6cb5038e927b54adca29caf02c981fdcecfc
|
refs/heads/master
| 2021-01-25T14:45:42.120220 | 2018-08-11T23:45:31 | 2018-08-11T23:45:31 | 117,135,069 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 368 |
py
|
#!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('letters', __name__, url_prefix='/letters')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
|
[
"[email protected]"
] | |
c6ba53fe8fcf40b99acb006e8455679710417ccd
|
0fc2b7fc3220a6c5fec1aa2999847fa4442afb95
|
/accounts/forms.py
|
0c1d87781ba3efbda29e3d9d075c38508458238e
|
[] |
no_license
|
vinaykornapalli/wecan-hackathon
|
41a33455343a14262cc701650c4eeea5203bb547
|
d021ba4f932a79ae7d49b529ba7ed32b52318e93
|
refs/heads/master
| 2021-04-09T11:28:46.851316 | 2018-03-18T05:09:54 | 2018-03-18T05:09:54 | 125,592,912 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 255 |
py
|
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
class CreateSignupForm(UserCreationForm):
class Meta:
fields=('username','email','password1','password2')
model = get_user_model()
|
[
"[email protected]"
] | |
137982ad4fabf053ac21d39abd872101e3ece56c
|
b124d99a5d7a139d31405aefdbfed09f6eb3d55b
|
/beebcn/spiders/beebcn.py
|
0ee6535f8b6015585dac04bef036e47860cb503b
|
[] |
no_license
|
daniel-kanchev/beebcn
|
26efaab276e525b919b4fbeb06251a2850573de4
|
d7e8142b41501b2586e0f8e01f8a690355701268
|
refs/heads/main
| 2023-04-04T05:08:37.732275 | 2021-04-15T11:13:28 | 2021-04-15T11:13:28 | 358,230,159 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,320 |
py
|
import scrapy
from scrapy.loader import ItemLoader
from itemloaders.processors import TakeFirst
from datetime import datetime
from beebcn.items import Article
import requests
import json
import re
class beebcnSpider(scrapy.Spider):
name = 'beebcn'
start_urls = ['http://www.beeb.com.cn/#/home/banknews']
def parse(self, response):
json_response = json.loads(requests.get(
"http://www.beeb.com.cn/beebPortal/data/content/banknews.json?MmEwMD=5RroZJL4EsQSA_im0lwzRvTmJYy8PJ4cOClXNiNribCHRHjumBO3uBMMxoJzIJ3r62_9HrN9.tr70HIghQ5aKUXz1cuP4ESFycL1xKjK_Na4.JFV_a8PKOxBOF0DcMGoWbpFpqiVpl2aZy2VGwcostDBYt9hUkpu3u7a7ICHNf_K32mxnn0_.wxIMLtrYIf7PM3bZt993kiMI8Nyen.9unNqhUhblx0ILi5cJrPveYNJPVtvuppJobjGdG6nFKcBtQ_nFPjWN0kounYjSEQWn0O.t.BuCKWKbuGZkMNlyziFmT02JgsR0BLc4tfTEvv36").text)
articles = json_response["articleList"]
for article in articles:
item = ItemLoader(Article())
item.default_output_processor = TakeFirst()
title = article["title"]
date = article["createTime"]
p = re.compile(r'<.*?>')
content = p.sub('', article["content"])
item.add_value('title', title)
item.add_value('date', date)
item.add_value('content', content)
yield item.load_item()
|
[
"[email protected]"
] | |
08f43da276892e31c65ef74713f038e2737d7377
|
ad3e92102c252323c822714cb81bf1e1181d7270
|
/sd_files/.Trashes/501/reset.py
|
3ee76e7a35222810f1d0a591a561260c9cc9ff68
|
[] |
no_license
|
kevinwgrove/digital-foosball-revolution
|
8979824c24c7d59be4f569b5fb29e29546f0b376
|
ca6bb7dd27311da6deb9734641c19bead8628366
|
refs/heads/main
| 2023-06-05T03:47:45.626579 | 2021-06-17T01:49:52 | 2021-06-17T01:49:52 | 366,117,112 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 471 |
py
|
import time
import board
from digitalio import DigitalInOut, Direction, Pull
reset_button = DigitalInOut(board.D8)
reset_button.direction = Direction.INPUT
reset_button.pull = Pull.UP
while not reset_button.value:
time.sleep(0.1)
if reset_button.value:
away_score = 0
home_score = 0
half_time = half_time
print("Home Score: ", home_score)
print("Away Score: ", away_score)
break
|
[
"[email protected]"
] | |
71a1d35afe3081aaa5e44192447c7494b4a5050e
|
0a2cc497665f2a14460577f129405f6e4f793791
|
/sdk/containerregistry/azure-containerregistry/azure/containerregistry/_generated/models/_container_registry_enums.py
|
8ca5cfea37c17dd1bd1b22ec0ca9d9f1a79ba8bd
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
hivyas/azure-sdk-for-python
|
112158aa9e1dd6e30cf6b3dde19f5db6ea2a577b
|
8b3258fa45f5dc25236c22ad950e48aa4e1c181c
|
refs/heads/master
| 2023-06-17T12:01:26.392186 | 2021-05-18T19:56:01 | 2021-05-18T19:56:01 | 313,761,277 | 1 | 1 |
MIT
| 2020-12-02T17:48:22 | 2020-11-17T22:42:00 |
Python
|
UTF-8
|
Python
| false | false | 2,910 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.4.1, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class ArtifactArchitecture(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
I386 = "386"
AMD64 = "amd64"
ARM = "arm"
ARM64 = "arm64"
MIPS = "mips"
MIPS_LE = "mipsle"
MIPS64 = "mips64"
MIPS64_LE = "mips64le"
PPC64 = "ppc64"
PPC64_LE = "ppc64le"
RISC_V64 = "riscv64"
S390_X = "s390x"
WASM = "wasm"
class ArtifactOperatingSystem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
AIX = "aix"
ANDROID = "android"
DARWIN = "darwin"
DRAGONFLY = "dragonfly"
FREE_BSD = "freebsd"
ILLUMOS = "illumos"
I_OS = "ios"
JS = "js"
LINUX = "linux"
NET_BSD = "netbsd"
OPEN_BSD = "openbsd"
PLAN9 = "plan9"
SOLARIS = "solaris"
WINDOWS = "windows"
class ManifestOrderBy(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Sort options for ordering manifests in a collection.
"""
#: Do not provide an orderby value in the request.
NONE = "none"
#: Order manifests by LastUpdatedOn field, from most recently updated to least recently updated.
LAST_UPDATED_ON_DESCENDING = "timedesc"
#: Order manifest by LastUpdatedOn field, from least recently updated to most recently updated.
LAST_UPDATED_ON_ASCENDING = "timeasc"
class TagOrderBy(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
#: Do not provide an orderby value in the request.
NONE = "none"
#: Order tags by LastUpdatedOn field, from most recently updated to least recently updated.
LAST_UPDATED_ON_DESCENDING = "timedesc"
#: Order tags by LastUpdatedOn field, from least recently updated to most recently updated.
LAST_UPDATED_ON_ASCENDING = "timeasc"
class TokenGrantType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Grant type is expected to be refresh_token
"""
REFRESH_TOKEN = "refresh_token"
PASSWORD = "password"
|
[
"[email protected]"
] | |
10ef368d70d34858acddeed28d2af22b7d118230
|
9aa4faa97020ee989506b2131f3f0e6e455d62c1
|
/predict_cancer.py
|
0d914c36bf34fbcb8917e73d666038507e4ca018
|
[] |
no_license
|
dalugoSU/machine_learning_breast_cancer
|
f6fdc78b7ccbd68346a31eed8b527970f85d051e
|
66eb6607991b0f2adce95b235ef6da93a91a765f
|
refs/heads/main
| 2023-03-06T00:37:09.337796 | 2021-02-20T23:16:18 | 2021-02-20T23:16:18 | 340,506,505 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,917 |
py
|
class BreastCancerPrediction:
def tumor_data(self):
import numpy as np
"""
Takes input data to predict if tumor is malignant or benign
"""
mean_radius = float(input("Enter mean radius: "))
mean_radius = "{:.3e}".format(mean_radius)
mean_texture = float(input("Enter mean texture: "))
mean_texture = "{:.3e}".format(mean_texture)
mean_perimeter = float(input("Enter mean perimeter: "))
mean_perimeter = "{:.3e}".format(mean_perimeter)
mean_area = float(input("Enter mean area: "))
mean_area = "{:.3e}".format(mean_area)
mean_smoothness = float(input("Enter mean smoothness: "))
mean_smoothness = "{:.3e}".format(mean_smoothness)
mean_compactness = float(input("Enter mean compactness: "))
mean_compactness = "{:.3e}".format(mean_compactness)
mean_concavity = float(input("Enter mean concavity: "))
mean_concavity = "{:.3e}".format(mean_concavity)
mean_symmetry = float(input("Enter mean symmetry: "))
mean_symmetry = "{:.3e}".format(mean_symmetry)
mean_concave_points = float(input("Enter mean concave points: "))
mean_concave_points = "{:.3e}".format(mean_concave_points)
mean_fractal_dimension = float(input("Enter mean fractal dimension: "))
mean_fractal_dimension = "{:.3e}".format(mean_fractal_dimension)
radius_error = float(input("Enter radius error: "))
radius_error = "{:.3e}".format(radius_error)
texture_error = float(input("Enter texture error: "))
texture_error = "{:.3e}".format(texture_error)
perimeter_error = float(input("Enter perimeter error: "))
perimeter_error = "{:.3e}".format(perimeter_error)
area_error = float(input("Enter area error: "))
area_error = "{:.3e}".format(area_error)
smoothness_error = float(input("Enter smoothness error: "))
smoothness_error = "{:.3e}".format(smoothness_error)
compactness_error = float(input("Enter compactness error: "))
compactness_error = "{:.3e}".format(compactness_error)
concavity_error = float(input("Enter concavity error: "))
concavity_error = "{:.3e}".format(concavity_error)
concave_points_error = float(input("Enter concave points error: "))
concave_points_error = "{:.3e}".format(concave_points_error)
symmetry_error = float(input("Enter symmetry error: "))
symmetry_error = "{:.3e}".format(symmetry_error)
fractal_dimension_error = float(input("Enter fractal dimension error: "))
fractal_dimension_error = "{:.3e}".format(fractal_dimension_error)
worst_radius = float(input("Enter worst radius: "))
worst_radius = "{:.3e}".format(worst_radius)
worst_texture = float(input("Enter worst texture: "))
worst_texture = "{:.3e}".format(worst_texture)
worst_perimeter = float(input("Enter worst perimeter: "))
worst_perimeter = "{:.3e}".format(worst_perimeter)
worst_area = float(input("Enter worst area: "))
worst_area = "{:.3e}".format(worst_area)
worst_smoothness = float(input("Enter worst smoothness: "))
worst_smoothness = "{:.3e}".format(worst_smoothness)
worst_compactness = float(input("Enter worst compactness: "))
worst_compactness = "{:.3e}".format(worst_compactness)
worst_concavity = float(input("Enter worst concavity: "))
worst_concavity = "{:.3e}".format(worst_concavity)
worst_concave_points = float(input("Enter worst concave points: "))
worst_concave_points = "{:.3e}".format(worst_concave_points)
worst_symmetry = float(input("Enter worst symmetry: "))
worst_symmetry = "{:.3e}".format(worst_symmetry)
worst_fractal_dimension = float(input("Enter worst fractal dimension: "))
worst_fractal_dimension = "{:.3e}".format(worst_fractal_dimension)
return np.array([mean_radius, mean_texture, mean_perimeter, mean_area, mean_smoothness, mean_compactness,
mean_concavity, mean_concave_points, mean_symmetry, mean_fractal_dimension, radius_error, texture_error, perimeter_error,
area_error, smoothness_error, compactness_error, concavity_error, concave_points_error, symmetry_error,
fractal_dimension_error, worst_radius, worst_texture, worst_perimeter, worst_area, worst_smoothness,
worst_compactness, worst_concavity, worst_concave_points, worst_symmetry, worst_fractal_dimension])
def prediction(self, model, cancer_data):
data = cancer_data.reshape(-1, 30)
y_prediction = model.predict(data)
prediction = ""
if y_prediction[0] == 1:
prediction = "malignant"
else:
prediction = "benign"
return prediction
new_prediction = BreastCancerPrediction()
|
[
"[email protected]"
] | |
43d78dbe15d595afb8899abbcba35e810910cb14
|
b1aa0e99bb6aac1f94223a827dd4640699796992
|
/Virtuoso/asgi.py
|
ffd921b1ea915518f68def40b453e756addd0d84
|
[] |
no_license
|
PabonSC/virtuosoIT
|
8bc10c3006896c6e78c310da0f674a6bafd50929
|
af078e733bea692f6ba390eaa3df461ec39cd6aa
|
refs/heads/master
| 2023-04-27T13:46:29.440583 | 2021-05-17T14:51:43 | 2021-05-17T14:51:43 | 365,081,339 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 393 |
py
|
"""
ASGI config for Virtuoso project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Virtuoso.settings')
application = get_asgi_application()
|
[
"[email protected]"
] | |
da63f9d704b0a7fff5ca176497ea1f7a922ada1e
|
8fbb31e965b137d68cd7cdb6adc7b03002e48fc1
|
/chatRoomPage/serializer.py
|
eec1400a4367dbfa73101a83072450b982a108da
|
[] |
no_license
|
jainilparikh/django-Chat-app
|
a2342f02bc1318c18d2cb79ec2f3a94082d260db
|
b0b20098cac2e7809bd16b1c3bce31aab224b95b
|
refs/heads/main
| 2023-04-21T17:43:48.191642 | 2021-04-18T17:51:27 | 2021-04-18T17:51:27 | 359,038,522 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 366 |
py
|
from rest_framework import serializers
from .models import Language, Message
class LanguageSerializer(serializers.ModelSerializer):
class Meta:
model = Language
fields = ('id', 'lan1', 'name')
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = ('id', 'sender', 'receiver', 'message')
|
[
"[email protected]"
] | |
43bc88646bf35d940099c4319656feaba747bd11
|
5cf9c23f2dd042af96afc8748af4cea391b99521
|
/2DMinecraft.py
|
23acc9bfc6ac66625470e011d1435a7142f14dfd
|
[] |
no_license
|
wilsoncyap/pythonCoding
|
548ede11d0ddbdd85c07094b2ecd6de12ce053a2
|
0f916e421362ed213d46300f6e08087c18574ba7
|
refs/heads/master
| 2021-04-18T19:17:18.627650 | 2018-04-04T21:19:05 | 2018-04-04T21:19:05 | 126,550,549 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,442 |
py
|
import pygame, sys
from pygame.locals import *
import random
from cloud import Cloud
#constants representing colors
BLACK = (0,0,0)
BROWN = (153,76,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
WHITE = (255,255,255)
#number of clouds in the game
numcloud = 20
fpsClock = pygame.time.Clock()
#constants representing the different resources
DIRT = 0
GRASS = 1
WATER = 2
COAL = 3
CLOUD = 4
WOOD = 5
FIRE = 6
SAND = 7
GLASS = 8
ROCK = 9
STONE = 10
BRICK = 11
DIAMOND = 12
#a list of resources
resources = [DIRT, GRASS, WATER, COAL, WOOD, FIRE, SAND, GLASS, ROCK, STONE, BRICK, DIAMOND]
#a dictionary linking resources to texture
textures = {
DIRT : pygame.image.load('dirt.png'),
GRASS : pygame.image.load('grass.png'),
WATER : pygame.image.load('water.png'),
COAL : pygame.image.load('coal.png'),
CLOUD : pygame.image.load('cloud.png'),
WOOD : pygame.image.load('wood.png'),
FIRE : pygame.image.load('fire.png'),
SAND : pygame.image.load('sand.png'),
GLASS : pygame.image.load('glass.png'),
ROCK : pygame.image.load('rock.png'),
STONE : pygame.image.load('stone.png'),
BRICK : pygame.image.load('brick.png'),
DIAMOND : pygame.image.load('diamond.png')
}
inventory = {
DIRT : 0,
GRASS : 0,
WATER : 0,
COAL : 0,
WOOD : 0,
FIRE : 0,
SAND : 0,
GLASS : 0,
ROCK : 0,
STONE : 0,
BRICK : 0,
DIAMOND : 0
}
#map each resource to the EVENT key used to place/craft it
controls = {
DIRT : 49, #event 49 is the '1' key
GRASS : 50, #event 50 is the '2' key, etc.
WATER : 51, #3 key
COAL : 52, #4 key
WOOD : 53, #5 key
FIRE : 54, #6 key
SAND : 55, #7 key
GLASS : 56, #8 key
ROCK : 57, #9 key
STONE: 48, #0 key
BRICK: 45, #- key
DIAMOND : 61 #= key
}
craft = {
FIRE : {WOOD : 2, ROCK : 2},
STONE : {ROCK : 2},
GLASS : {FIRE : 1, SAND : 2},
DIAMOND : {WOOD : 2, COAL : 3},
BRICK : {ROCK : 2, FIRE : 1},
SAND : {ROCK : 2}
}
#the player image
PLAYER = pygame.image.load('player.png')
#the position of the player [x,y]
playerPos = [0,0]
#game dimensions
TILESIZE = 20
MAPWIDTH = 50
MAPHEIGHT = 30
tilemap = [[DIRT for w in range(MAPWIDTH)] for h in range(MAPHEIGHT)]
clouds = []
for i in range(numcloud):
clouds.append(Cloud(random.randint(-200, -50), random.randint(0, MAPHEIGHT*TILESIZE), random.randint(1, 3)))
#set up the display
pygame.init()
DISPLAYSURF = pygame.display.set_mode((MAPWIDTH*TILESIZE,MAPHEIGHT*TILESIZE + 50))
#add a font for our inventory
INVFONT = pygame.font.Font('FreeSansBold.ttf', 18)
#loop through each row
for rw in range(MAPHEIGHT):
#loop through each column in that row
for cl in range(MAPWIDTH):
#pick a random number between 0 and 100
randomNumber = random.randint(0,100)
if randomNumber == 0:
tile = DIAMOND
elif randomNumber >= 1 and randomNumber <= 5:
tile = ROCK
elif randomNumber >= 6 and randomNumber <= 15:
tile = COAL
elif randomNumber >= 16 and randomNumber <= 25:
tile = SAND
elif randomNumber >= 26 and randomNumber <= 40:
tile = WOOD
elif randomNumber >= 41 and randomNumber <= 55:
tile = WATER
elif randomNumber >= 56 and randomNumber <= 80:
tile = GRASS
else:
tile = DIRT
#set the position in the tilemap to randonmly chosen tile
tilemap[rw][cl] = tile
while True:
DISPLAYSURF.fill(BLACK)
#get all the user events
for event in pygame.event.get():
#if the user wants to quit
if event.type == QUIT:
#end the game and close the window
pygame.quit()
sys.exit()
#if a key is pressed
elif event.type == KEYDOWN:
#if the right arrow is pressed
if event.key == K_RIGHT and playerPos[0] < MAPWIDTH:
#change the player's x position positive
playerPos[0] += 1
#if the left arrow key is pressed
if event.key == K_LEFT and playerPos[0] > 0:
#change the player's x position negative
playerPos[0] -= 1
#if the down key is pressed
if event.key == K_DOWN and playerPos[1] < MAPHEIGHT:
#change player's y position positive
playerPos[1] += 1
#if the up arrow is pressed
if event.key == K_UP and playerPos[1] > 0:
#change the player's y position negative
playerPos[1] -= 1
if event.key == K_SPACE:
#what resource is the player standing on?
currentTile = tilemap[playerPos[1]][playerPos[0]]
#player now has 1 more of this resource
inventory[currentTile] += 1
#the player is now standing on dirt
tilemap[playerPos[1]][playerPos[0]] = DIRT
for key in controls:
#if this key was pressed
if event.key == controls[key]:
#CRAFT if the mouse is also pressed
if pygame.mouse.get_pressed()[0]:
#if the item can be crafted
if key in craft:
#keeps track of whether we have the resources
#to craft this item
canBeMade = True
#for each item needed to craft...
for i in craft[key]:
#... if we don't have enough...
if craft[key][i] > inventory[i]:
#... we can't craft it!
canBeMade = False
break
#if we can craft it (we have all needed resources)
if canBeMade == True:
#take each item from the inventory
for i in craft[key]:
inventory[i] -= craft[key][i]
#add the crafted item to the inventory
inventory[key] += 1
#PLACE if the mouse wasn't pressed
else:
#get the tile the player is standing on
currentTile = tilemap[playerPos[1]][playerPos[0]]
#if we have the item to place
if inventory[key] > 0:
#take it from the inventory
inventory[key] -= 1
#swap it with the tile we are standing on
inventory[currentTile] += 1
#place the item
tilemap[playerPos[1]][playerPos[0]] = key
#loop through each row
for row in range(MAPHEIGHT):
#loop through each column in the row
for column in range(MAPWIDTH):
#draw the resource at that position in the tilemap, using the correct image
DISPLAYSURF.blit(textures[tilemap[row][column]], (column*TILESIZE,row*TILESIZE))
#display the player at the correct position
DISPLAYSURF.blit(PLAYER,(playerPos[0]*TILESIZE,playerPos[1]*TILESIZE))
#loop through each cloud
for c in range(numcloud):
#display the cloud
DISPLAYSURF.blit(textures[CLOUD].convert_alpha(), (clouds[c].cloudx,clouds[c].cloudy))
#move the cloud to the left slightly
clouds[c].cloudx += clouds[c].cloudspd
#if cloud has moved past the map
if clouds[c].cloudx > MAPWIDTH*TILESIZE:
#pick a new position to place the cloud
clouds[c].cloudy = random.randint(0,MAPHEIGHT*TILESIZE)
clouds[c].cloudx = -200
#display the inventory, stating 10 pixels in
placePosition = 10
for item in resources:
#add the image
DISPLAYSURF.blit(textures[item],(placePosition, MAPHEIGHT*TILESIZE+20))
placePosition += 30
#add the text showing the amount in the inventory
textObj = INVFONT.render(str(inventory[item]), True, WHITE, BLACK)
DISPLAYSURF.blit(textObj,(placePosition,MAPHEIGHT*TILESIZE+20))
placePosition += 50
#update the display
pygame.display.update()
fpsClock.tick(24)
|
[
"[email protected]"
] | |
bce9236ba47161386e6d78fb6ab91d9f22cd2864
|
8cff9687a2833a370b9d9dee9176e1fbc6d01357
|
/Python3/uri 1080.py
|
6789e7256d8e37391ec919f4f6e85b99f0d20371
|
[] |
no_license
|
asadugalib/URI-Solution
|
aec9619c70347f331b62bcb569e4dcc426ed9cb6
|
2713ff5bd4c0bb55455d14baf05104b8b22d8e21
|
refs/heads/master
| 2023-03-26T21:12:24.649479 | 2021-03-30T14:43:27 | 2021-03-30T14:43:27 | 255,056,382 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 130 |
py
|
#1080
number = []
for i in range(100):
number.append(int(input()))
print(max(number))
print(number.index(max(number))+1)
|
[
"[email protected]"
] | |
0d8ecad2d93709388b54de364493ba3f9fbd140c
|
b30945eb14483621d0f198523bb268c0f4d4d080
|
/eventnodes/image/resize.py
|
aa512b0ba9ffe93eb9b3ae714956d8c298ff4d2d
|
[
"BSD-3-Clause"
] |
permissive
|
cgboffin/Subotai
|
bf38493dbdafb741918a596fa34857d226693910
|
607381ce41ea450bca094a5dc8990649dbaa0390
|
refs/heads/master
| 2023-08-14T08:58:40.609003 | 2021-10-03T17:40:28 | 2021-10-03T17:40:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,701 |
py
|
from PySide2 import QtCore
from PySide2.QtCore import Slot
from .baseimage import BaseImageNode
from eventnodes.base import ComputeNode
from eventnodes.params import StringParam, IntParam, PARAM
from eventnodes.signal import Signal, INPUT_PLUG, OUTPUT_PLUG
from .imageparam import ImageParam
from PIL import Image
class Resize(BaseImageNode):
type = 'ResizeImage'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.signals.append(Signal(node=self, name='event', pluggable=INPUT_PLUG))
self.signals.append(Signal(node=self, name='event', pluggable=OUTPUT_PLUG))
self.params.append(ImageParam(name='image', value=None, pluggable=PARAM | INPUT_PLUG))
self.params.append(IntParam(name='width', value=0, pluggable=PARAM | INPUT_PLUG))
self.params.append(IntParam(name='height', value=0, pluggable=PARAM | INPUT_PLUG))
self.params.append(ImageParam(name='image', value=None, pluggable=PARAM | OUTPUT_PLUG))
@ComputeNode.Decorators.show_ui_computation
def compute(self):
self.start_spinner_signal.emit()
width = self.get_first_param('width', pluggable=INPUT_PLUG)
height = self.get_first_param('height', pluggable=INPUT_PLUG)
image_ = self.get_first_param('image', pluggable=INPUT_PLUG)
out_image_ = self.get_first_param('image', pluggable=OUTPUT_PLUG)
if image_():
img = image_()
img = img.resize((int(width()), int(height())))
out_image_.value = img
signal = self.get_first_signal('event', pluggable=OUTPUT_PLUG)
self.stop_spinner_signal.emit()
signal.emit_event()
super().compute()
|
[
"[email protected]"
] | |
623583644c0d02dc968de94f521369aee4a8764d
|
9ad099b2aa9dff96e1ce66b0726a580b039d285e
|
/auth/api/v1/models/users.py
|
9a2c84af6d4d879ed5979eb22415ad7d86892594
|
[] |
no_license
|
Ivan-Terex91/ugc_sprint_2
|
26528b6b4e9fcab82f44db6e9b33121bad6b388b
|
2fee2350a4cae4ac3b06f0b4ebb0d5cb0dda7e33
|
refs/heads/main
| 2023-06-20T16:25:12.154781 | 2021-07-26T13:28:56 | 2021-07-26T13:28:56 | 385,500,298 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 691 |
py
|
from api import api
from flask_restx import fields
UserModel = api.model(
"UserModel",
{
"id": fields.String(readonly=True, as_uuid=True),
"first_name": fields.String(),
"last_name": fields.String(),
"birthdate": fields.Date(),
"country": fields.String(),
"email": fields.String(),
},
)
LoginRequestModel = api.model(
"LoginRequestModel",
{
"email": fields.String(required=True),
"password": fields.String(required=True),
},
)
ChangePassword = api.model(
"ChangePassword",
{
"old_password": fields.String(required=True),
"new_password": fields.String(required=True),
},
)
|
[
"[email protected]"
] | |
c9a499e0b0d202e5ea52f5ef6a9c4580d811345f
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_45/72.py
|
8cf783ea15df40bf45a0fc4e0429b4f48fca706b
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,556 |
py
|
#!/usr/bin/env python
#
# jam.py
#
# Copyright 2009 Denis <denis@denis-desktop>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#import re
import itertools
tor = []
p = 0
q = 0
def solve(left, right):
global tor
if not tor:
return 0
if left > right:
return 0
i = 0
middle = float(right + left)/float(2)
'''
goon = True
l = len(tor)
while goon:
if i >= l:
goon = False
i -= 1
if tor[i] > middle:
goon = False
i += 1
i -= 1
if i > 0 and abs(middle - tor[i-1]) <= abs(middle - tor[i]) and tor[i-1] >= left:
i -= 1
'''
min = {'diff': 99999, 'pos': -1}
for i in xrange(0,len(tor)):
newdiff = abs(middle-tor[i])
if newdiff < min['diff']:
min['diff'] = newdiff
min['pos'] = i
released = tor[min['pos']]
if released < left or released > right:
return 0
#print left,' ',middle,' ',right
#print 'of',tor,'choose',released
del tor[min['pos']]
answer = right-left
answer += solve(left, released-1)
answer += solve(released+1, right)
return answer
def force(to, left, right):
aaa = 99999
if not to:
return 0
if left == right:
return 0
i = 0
#print 'Got',to,left,right
l = len(to)
while i < l and to[i] < left:
i += 1
#print 'Skipped to',i,'(',to[i],')'
while i < l and to[i] <= right:
answer = right-left
if i > 0:
answer += force(to[:i], left, to[i]-1)
if i < l:
answer += force(to[i+1:], to[i]+1, right)
aaa = min(aaa, answer)
i += 1
return aaa
def main():
global tor, p, q
with open("C-small-attempt5.in") as f:
n = f.readline()
n = int(n)
for case in xrange(1, n+1):
p, q = map(int, f.readline().strip().split(' '))
tor = map(int, f.readline().strip().split(' '))
#answer = solve(1, p)
answer = force(tor, 1, p)
print "Case #%d: %d" % (case, answer)
return 0
if __name__ == '__main__': main()
|
[
"[email protected]"
] | |
1faf49c702992a2c1a658d1ecaa560137a81edcd
|
d7f226a00a95b6dae842d4f8ab83ec892781a0e9
|
/src_xml/model/transformer.py
|
bc41cd5464ac83e6bfce455e4ceed11b969ef734
|
[] |
no_license
|
archfool/polysemy
|
7d2afddfae4e4f81cf84262c1f2465c86937d3fb
|
e4e7c6a22b506bc5b5cf424c15a7c5e75453e9c0
|
refs/heads/main
| 2023-05-05T21:33:06.146040 | 2021-05-22T10:11:12 | 2021-05-22T10:11:12 | 329,535,871 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 34,239 |
py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from logging import getLogger
import math
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .memory import HashingMemory
N_MAX_POSITIONS = 512 # maximum input sequence length
DECODER_ONLY_PARAMS = [
'layer_norm15.%i.weight', 'layer_norm15.%i.bias',
'encoder_attn.%i.q_lin.weight', 'encoder_attn.%i.q_lin.bias',
'encoder_attn.%i.k_lin.weight', 'encoder_attn.%i.k_lin.bias',
'encoder_attn.%i.v_lin.weight', 'encoder_attn.%i.v_lin.bias',
'encoder_attn.%i.out_lin.weight', 'encoder_attn.%i.out_lin.bias'
]
TRANSFORMER_LAYER_PARAMS = [
'attentions.%i.q_lin.weight', 'attentions.%i.q_lin.bias',
'attentions.%i.k_lin.weight', 'attentions.%i.k_lin.bias',
'attentions.%i.v_lin.weight', 'attentions.%i.v_lin.bias',
'attentions.%i.out_lin.weight', 'attentions.%i.out_lin.bias',
'layer_norm1.%i.weight', 'layer_norm1.%i.bias',
'ffns.%i.lin1.weight', 'ffns.%i.lin1.bias',
'ffns.%i.lin2.weight', 'ffns.%i.lin2.bias',
'layer_norm2.%i.weight', 'layer_norm2.%i.bias'
]
logger = getLogger()
def Embedding(num_embeddings, embedding_dim, padding_idx=None):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
# nn.init.normal_(m.weight, mean=0, std=1)
# nn.init.xavier_uniform_(m.weight)
# nn.init.constant_(m.bias, 0.)
return m
def create_sinusoidal_embeddings(n_pos, dim, out):
position_enc = np.array([
[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)]
for pos in range(n_pos)
])
out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
out.requires_grad = False
def gelu(x):
"""
GELU activation
https://arxiv.org/abs/1606.08415
https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/model_pytorch.py#L14
https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/modeling.py
"""
# return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
return 0.5 * x * (1.0 + torch.erf(x / math.sqrt(2.0)))
def get_masks(slen, lengths, causal):
"""
Generate hidden states mask, and optionally an attention mask.
"""
assert lengths.max().item() <= slen
bs = lengths.size(0)
alen = torch.arange(slen, dtype=torch.long, device=lengths.device)
mask = alen < lengths[:, None]
# attention mask is the same as mask, or triangular inferior attention (causal)
if causal:
attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None]
else:
attn_mask = mask
# sanity check
assert mask.size() == (bs, slen)
assert causal is False or attn_mask.size() == (bs, slen, slen)
return mask, attn_mask
class PredLayer(nn.Module):
"""
Prediction layer (cross_entropy or adaptive_softmax).
"""
def __init__(self, params):
super().__init__()
self.asm = params.asm
self.n_words = params.n_words
self.pad_index = params.pad_index
dim = params.emb_dim
if params.asm is False:
self.proj = Linear(dim, params.n_words, bias=True)
else:
self.proj = nn.AdaptiveLogSoftmaxWithLoss(
in_features=dim,
n_classes=params.n_words,
cutoffs=params.asm_cutoffs,
div_value=params.asm_div_value,
head_bias=True, # default is False
)
def forward(self, x, y, get_scores=False):
"""
Compute the loss, and optionally the scores.
"""
assert (y == self.pad_index).sum().item() == 0
if self.asm is False:
scores = self.proj(x).view(-1, self.n_words)
loss = F.cross_entropy(scores, y, reduction='mean')
else:
_, loss = self.proj(x, y)
scores = self.proj.log_prob(x) if get_scores else None
return scores, loss
def get_scores(self, x):
"""
Compute scores.
"""
assert x.dim() == 2
return self.proj.log_prob(x) if self.asm else self.proj(x)
class MultiHeadAttention(nn.Module):
NEW_ID = itertools.count()
def __init__(self, n_heads, dim, dropout):
super().__init__()
self.layer_id = next(MultiHeadAttention.NEW_ID)
self.dim = dim
self.n_heads = n_heads
self.dropout = dropout
assert self.dim % self.n_heads == 0
self.q_lin = Linear(dim, dim)
self.k_lin = Linear(dim, dim)
self.v_lin = Linear(dim, dim)
self.out_lin = Linear(dim, dim)
def forward(self, input, mask, kv=None, cache=None):
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
"""
# Input is (bs, qlen, dim)
# Mask is (bs, klen) (non-causal) or (bs, klen, klen)
bs, qlen, dim = input.size()
if kv is None:
klen = qlen if cache is None else cache['slen'] + qlen
else:
klen = kv.size(1)
assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)
n_heads = self.n_heads
dim_per_head = dim // n_heads
mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen)
def shape(x):
""" projection """
return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
def unshape(x):
""" compute context """
return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)
if kv is None:
k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)
elif cache is None or self.layer_id not in cache:
k = v = kv
k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)
if cache is not None:
if self.layer_id in cache:
if kv is None:
k_, v_ = cache[self.layer_id]
k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)
v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)
else:
k, v = cache[self.layer_id]
cache[self.layer_id] = (k, v)
q = q / math.sqrt(dim_per_head) # (bs, n_heads, qlen, dim_per_head)
scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, qlen, klen)
mask = (mask == 0).view(mask_reshape).expand_as(scores) # (bs, n_heads, qlen, klen)
scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen)
weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)
weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)
context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, qlen, dim)
return self.out_lin(context)
class TransformerFFN(nn.Module):
def __init__(self, in_dim, dim_hidden, out_dim, dropout, gelu_activation):
super().__init__()
self.dropout = dropout
self.lin1 = Linear(in_dim, dim_hidden)
self.lin2 = Linear(dim_hidden, out_dim)
self.act = gelu if gelu_activation else F.relu
def forward(self, input):
x = self.lin1(input)
x = self.act(x)
x = self.lin2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
return x
class TransformerModel(nn.Module):
ATTRIBUTES = ['encoder', 'with_output', 'eos_index', 'pad_index', 'n_langs', 'n_words', 'dim', 'n_layers',
'n_heads', 'hidden_dim', 'dropout', 'attention_dropout', 'asm', 'asm_cutoffs', 'asm_div_value']
def __init__(self, params, dico, is_encoder, with_output):
"""
Transformer model (encoder or decoder).
"""
super().__init__()
# encoder / decoder, output layer
self.is_encoder = is_encoder
self.is_decoder = not is_encoder
self.with_output = with_output
# dictionary / languages
self.n_langs = params.n_langs
self.n_words = params.n_words
self.eos_index = params.eos_index
self.pad_index = params.pad_index
self.dico = dico
self.id2lang = params.id2lang
self.lang2id = params.lang2id
self.use_lang_emb = getattr(params, 'use_lang_emb', True)
assert len(self.dico) == self.n_words
assert len(self.id2lang) == len(self.lang2id) == self.n_langs
# model parameters
self.dim = params.emb_dim # 512 by default
self.hidden_dim = self.dim * 4 # 2048 by default
self.n_heads = params.n_heads # 8 by default
self.n_layers = params.n_layers
self.dropout = params.dropout
self.attention_dropout = params.attention_dropout
assert self.dim % self.n_heads == 0, 'transformer dim must be a multiple of n_heads'
# embeddings
self.position_embeddings = Embedding(N_MAX_POSITIONS, self.dim)
if params.sinusoidal_embeddings:
create_sinusoidal_embeddings(N_MAX_POSITIONS, self.dim, out=self.position_embeddings.weight)
if params.n_langs > 1 and self.use_lang_emb:
self.lang_embeddings = Embedding(self.n_langs, self.dim)
self.embeddings = Embedding(self.n_words, self.dim, padding_idx=self.pad_index)
self.layer_norm_emb = nn.LayerNorm(self.dim, eps=1e-12)
# transformer layers
self.attentions = nn.ModuleList()
self.layer_norm1 = nn.ModuleList()
self.ffns = nn.ModuleList()
self.layer_norm2 = nn.ModuleList()
if self.is_decoder:
self.layer_norm15 = nn.ModuleList()
self.encoder_attn = nn.ModuleList()
# memories
self.memories = nn.ModuleDict()
if getattr(params, 'use_memory', False):
mem_positions = params.mem_enc_positions if is_encoder else params.mem_dec_positions
for layer_id, pos in mem_positions:
assert 0 <= layer_id <= params.n_layers - 1
assert pos in ['in', 'after']
self.memories['%i_%s' % (layer_id, pos)] = HashingMemory.build(self.dim, self.dim, params)
for layer_id in range(self.n_layers):
self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))
self.layer_norm1.append(nn.LayerNorm(self.dim, eps=1e-12))
if self.is_decoder:
self.layer_norm15.append(nn.LayerNorm(self.dim, eps=1e-12))
self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))
if ('%i_in' % layer_id) in self.memories:
self.ffns.append(None)
else:
self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, dropout=self.dropout,
gelu_activation=params.gelu_activation))
self.layer_norm2.append(nn.LayerNorm(self.dim, eps=1e-12))
# output layer
if self.with_output:
self.pred_layer = PredLayer(params)
if params.share_inout_emb:
self.pred_layer.proj.weight = self.embeddings.weight
# polysemy
self.polysemy_dense = nn.Linear(self.dim * 6, 2)
def forward(self, mode, **kwargs):
"""
Forward function with different forward modes.
### Small hack to handle PyTorch distributed.
"""
if mode == 'fwd':
return self.fwd(**kwargs)
elif mode == 'predict':
return self.predict(**kwargs)
elif mode == 'polysemy':
return self.polysemy(**kwargs)
elif mode == 'polysemy_predict':
return self.polysemy_predict(**kwargs)
else:
raise Exception("Unknown mode: %s" % mode)
def fwd(self, x, lengths, causal, src_enc=None, src_len=None, positions=None, langs=None, cache=None):
"""
Inputs:
`x` LongTensor(slen, bs), containing word indices
`lengths` LongTensor(bs), containing the length of each sentence
`causal` Boolean, if True, the attention is only done over previous hidden states
`positions` LongTensor(slen, bs), containing word positions
`langs` LongTensor(slen, bs), containing language IDs
"""
# lengths = (x != self.pad_index).float().sum(dim=1)
# mask = x != self.pad_index
# check inputs
slen, bs = x.size()
assert lengths.size(0) == bs
assert lengths.max().item() <= slen
x = x.transpose(0, 1) # batch size as dimension 0
assert (src_enc is None) == (src_len is None)
if src_enc is not None:
assert self.is_decoder
assert src_enc.size(0) == bs
# generate masks
mask, attn_mask = get_masks(slen, lengths, causal)
if self.is_decoder and src_enc is not None:
src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]
# positions
if positions is None:
positions = x.new(slen).long()
positions = torch.arange(slen, out=positions).unsqueeze(0)
else:
assert positions.size() == (slen, bs)
positions = positions.transpose(0, 1)
# langs
if langs is not None:
assert langs.size() == (slen, bs)
langs = langs.transpose(0, 1)
# do not recompute cached elements
if cache is not None:
_slen = slen - cache['slen']
x = x[:, -_slen:]
positions = positions[:, -_slen:]
if langs is not None:
langs = langs[:, -_slen:]
mask = mask[:, -_slen:]
attn_mask = attn_mask[:, -_slen:]
# embeddings
tensor = self.embeddings(x)
tensor = tensor + self.position_embeddings(positions).expand_as(tensor)
if langs is not None and self.use_lang_emb:
tensor = tensor + self.lang_embeddings(langs)
tensor = self.layer_norm_emb(tensor)
tensor = F.dropout(tensor, p=self.dropout, training=self.training)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
# transformer layers
for i in range(self.n_layers):
# self attention
attn = self.attentions[i](tensor, attn_mask, cache=cache)
attn = F.dropout(attn, p=self.dropout, training=self.training)
tensor = tensor + attn
tensor = self.layer_norm1[i](tensor)
# encoder attention (for decoder only)
if self.is_decoder and src_enc is not None:
attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)
attn = F.dropout(attn, p=self.dropout, training=self.training)
tensor = tensor + attn
tensor = self.layer_norm15[i](tensor)
# FFN
if ('%i_in' % i) in self.memories:
tensor = tensor + self.memories['%i_in' % i](tensor)
else:
tensor = tensor + self.ffns[i](tensor)
tensor = self.layer_norm2[i](tensor)
# memory
if ('%i_after' % i) in self.memories:
tensor = tensor + self.memories['%i_after' % i](tensor)
# TODO: add extra layer norm here?
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
# update cache length
if cache is not None:
cache['slen'] += tensor.size(1)
# move back sequence length to dimension 0
tensor = tensor.transpose(0, 1)
return tensor
def predict(self, tensor, pred_mask, y, get_scores):
"""
Given the last hidden state, compute word scores and/or the loss.
`pred_mask` is a ByteTensor of shape (slen, bs), filled with 1 when
we need to predict a word
`y` is a LongTensor of shape (pred_mask.sum(),)
`get_scores` is a boolean specifying whether we need to return scores
"""
masked_tensor = tensor[pred_mask.unsqueeze(-1).expand_as(tensor)].view(-1, self.dim)
scores, loss = self.pred_layer(masked_tensor, y, get_scores)
return scores, loss
def polysemy(self, x):
data_sent1, data_sent2 = x
word_ids_1, lengths_1, langs_1, positions_1, key_word_idxs_1 = data_sent1
word_ids_2, lengths_2, langs_2, positions_2, key_word_idxs_2 = data_sent2
tensor_sent1 = self.fwd(x=word_ids_1, lengths=lengths_1, positions=positions_1, langs=langs_1, causal=False)
tensor_sent2 = self.fwd(x=word_ids_2, lengths=lengths_2, positions=positions_2, langs=langs_2, causal=False)
def cal_similarity(tensor_sent1_single, key_word_idx_1, tensor_sent2_single, key_word_idx_2):
if torch.cuda.is_available():
cos_sim_list = torch.empty(len(key_word_idx_1)*len(key_word_idx_2), device='cuda')
else:
cos_sim_list = torch.empty(len(key_word_idx_1)*len(key_word_idx_2), device='cpu')
count = 0
for id_1 in key_word_idx_1:
for id_2 in key_word_idx_2:
id_1_tensor = torch.tensor(id_1, dtype=torch.long)\
.unsqueeze(0).unsqueeze(1).expand([-1, tensor_sent1_single.size()[1]])
id_2_tensor = torch.tensor(id_2, dtype=torch.long)\
.unsqueeze(0).unsqueeze(1).expand([-1, tensor_sent2_single.size()[1]])
if torch.cuda.is_available():
id_1_tensor = id_1_tensor.cuda()
id_2_tensor = id_2_tensor.cuda()
key_word_tensor_1 = torch.gather(tensor_sent1_single, dim=0, index=id_1_tensor)
key_word_tensor_2 = torch.gather(tensor_sent2_single, dim=0, index=id_2_tensor)
cos_sim_list[count] = torch.cosine_similarity(key_word_tensor_1, key_word_tensor_2, dim=1)
count += 1
cos_sim = torch.cat((torch.max(cos_sim_list).reshape([1,1]),torch.mean(cos_sim_list).reshape([1,1])), dim=1)
return cos_sim
if torch.cuda.is_available():
cos_sim_batch = torch.empty(tensor_sent1.size()[1], 2, device='cuda')
else:
cos_sim_batch = torch.empty(tensor_sent1.size()[1], 2, device='cpu')
for i, (key_word_idx_1, key_word_idx_2) in enumerate(zip(key_word_idxs_1, key_word_idxs_2)):
cos_sim = cal_similarity(tensor_sent1[:, i, :], key_word_idx_1, tensor_sent2[:, i, :], key_word_idx_2)
cos_sim_batch[i, :] = cos_sim
def get_feature_tensor(tensor_sent, key_word_idxs):
if torch.cuda.is_available():
feature_tensor_batch = torch.empty(tensor_sent.size()[1], 2, tensor_sent.size()[2], device='cuda')
else:
feature_tensor_batch = torch.empty(tensor_sent.size()[1], 2, tensor_sent.size()[2], device='cpu')
for i, key_word in enumerate(key_word_idxs):
tensor_single = tensor_sent[:, i, :]
index = torch.tensor(key_word, dtype=torch.long).unsqueeze(1).expand([-1, tensor_single.size()[1]])
if torch.cuda.is_available():
index = index.cuda()
key_word_tensor = torch.gather(tensor_single, dim=0, index=index)
key_word_tensor_avg_pooling = torch.mean(key_word_tensor, dim=0)
key_word_tensor_max_pooling = torch.max(key_word_tensor, dim=0).values
# sent_tensor = tensor_single[0, :]
# feature_tensor_single = torch.cat((key_word_tensor_max_pooling,
# key_word_tensor_avg_pooling,
# sent_tensor
# ), dim=0)
feature_tensor_batch[i, 0, :] = key_word_tensor_avg_pooling
feature_tensor_batch[i, 1, :] = key_word_tensor_max_pooling
return feature_tensor_batch
feature_tensor_sent1 = get_feature_tensor(tensor_sent1, key_word_idxs_1)
feature_tensor_sent2 = get_feature_tensor(tensor_sent2, key_word_idxs_2)
feature_tensor = torch.cat((
torch.cosine_similarity(feature_tensor_sent1[:, 0, :], feature_tensor_sent2[:, 0, :], dim=1).unsqueeze(1),
torch.cosine_similarity(feature_tensor_sent1[:, 1, :], feature_tensor_sent2[:, 1, :], dim=1).unsqueeze(1),
torch.cosine_similarity(feature_tensor_sent1[:, 0, :], feature_tensor_sent2[:, 1, :], dim=1).unsqueeze(1),
torch.cosine_similarity(feature_tensor_sent1[:, 1, :], feature_tensor_sent2[:, 0, :], dim=1).unsqueeze(1)
), dim=1)
return torch.cat((cos_sim_batch, feature_tensor), dim=1)
def polysemy_predict(self, x):
tensor = self.polysemy(x)
result = F.softmax(self.polysemy_dense(tensor), dim=-1)
return tensor
def generate(self, src_enc, src_len, tgt_lang_id, max_len=200, sample_temperature=None):
"""
Decode a sentence given initial start.
`x`:
- LongTensor(bs, slen)
<EOS> W1 W2 W3 <EOS> <PAD>
<EOS> W1 W2 W3 W4 <EOS>
`lengths`:
- LongTensor(bs) [5, 6]
`positions`:
- False, for regular "arange" positions (LM)
- True, to reset positions from the new generation (MT)
`langs`:
- must be None if the model only supports one language
- lang_id if only one language is involved (LM)
- (lang_id1, lang_id2) if two languages are involved (MT)
"""
# input batch
bs = len(src_len)
assert src_enc.size(0) == bs
# generated sentences
generated = src_len.new(max_len, bs) # upcoming output
generated.fill_(self.pad_index) # fill upcoming ouput with <PAD>
generated[0].fill_(self.eos_index) # we use <EOS> for <BOS> everywhere
# positions
positions = src_len.new(max_len).long()
positions = torch.arange(max_len, out=positions).unsqueeze(1).expand(max_len, bs)
# language IDs
langs = src_len.new(max_len).long().fill_(tgt_lang_id)
langs = langs.unsqueeze(1).expand(max_len, bs)
# current position / max lengths / length of generated sentences / unfinished sentences
cur_len = 1
gen_len = src_len.clone().fill_(1)
unfinished_sents = src_len.clone().fill_(1)
# cache compute states
cache = {'slen': 0}
while cur_len < max_len:
# compute word scores
tensor = self.forward(
'fwd',
x=generated[:cur_len],
lengths=gen_len,
positions=positions[:cur_len],
langs=langs[:cur_len],
causal=True,
src_enc=src_enc,
src_len=src_len,
cache=cache
)
assert tensor.size() == (1, bs, self.dim), (
cur_len, max_len, src_enc.size(), tensor.size(), (1, bs, self.dim))
tensor = tensor.data[-1, :, :].type_as(src_enc) # (bs, dim)
scores = self.pred_layer.get_scores(tensor) # (bs, n_words)
# select next words: sample or greedy
if sample_temperature is None:
next_words = torch.topk(scores, 1)[1].squeeze(1)
else:
next_words = torch.multinomial(F.softmax(scores / sample_temperature, dim=1), 1).squeeze(1)
assert next_words.size() == (bs,)
# update generations / lengths / finished sentences / current length
generated[cur_len] = next_words * unfinished_sents + self.pad_index * (1 - unfinished_sents)
gen_len.add_(unfinished_sents)
unfinished_sents.mul_(next_words.ne(self.eos_index).long())
cur_len = cur_len + 1
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if unfinished_sents.max() == 0:
break
# add <EOS> to unfinished sentences
if cur_len == max_len:
generated[-1].masked_fill_(unfinished_sents.byte(), self.eos_index)
# sanity check
assert (generated == self.eos_index).sum() == 2 * bs
return generated[:cur_len], gen_len
def generate_beam(self, src_enc, src_len, tgt_lang_id, beam_size, length_penalty, early_stopping, max_len=200):
"""
Decode a sentence given initial start.
`x`:
- LongTensor(bs, slen)
<EOS> W1 W2 W3 <EOS> <PAD>
<EOS> W1 W2 W3 W4 <EOS>
`lengths`:
- LongTensor(bs) [5, 6]
`positions`:
- False, for regular "arange" positions (LM)
- True, to reset positions from the new generation (MT)
`langs`:
- must be None if the model only supports one language
- lang_id if only one language is involved (LM)
- (lang_id1, lang_id2) if two languages are involved (MT)
"""
# check inputs
assert src_enc.size(0) == src_len.size(0)
assert beam_size >= 1
# batch size / number of words
bs = len(src_len)
n_words = self.n_words
# expand to beam size the source latent representations / source lengths
src_enc = src_enc.unsqueeze(1).expand((bs, beam_size) + src_enc.shape[1:]).contiguous().view(
(bs * beam_size,) + src_enc.shape[1:])
src_len = src_len.unsqueeze(1).expand(bs, beam_size).contiguous().view(-1)
# generated sentences (batch with beam current hypotheses)
generated = src_len.new(max_len, bs * beam_size) # upcoming output
generated.fill_(self.pad_index) # fill upcoming ouput with <PAD>
generated[0].fill_(self.eos_index) # we use <EOS> for <BOS> everywhere
# generated hypotheses
generated_hyps = [BeamHypotheses(beam_size, max_len, length_penalty, early_stopping) for _ in range(bs)]
# positions
positions = src_len.new(max_len).long()
positions = torch.arange(max_len, out=positions).unsqueeze(1).expand_as(generated)
# language IDs
langs = positions.clone().fill_(tgt_lang_id)
# scores for each sentence in the beam
beam_scores = src_enc.new(bs, beam_size).fill_(0)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1)
# current position
cur_len = 1
# cache compute states
cache = {'slen': 0}
# done sentences
done = [False for _ in range(bs)]
while cur_len < max_len:
# compute word scores
tensor = self.forward(
'fwd',
x=generated[:cur_len],
lengths=src_len.new(bs * beam_size).fill_(cur_len),
positions=positions[:cur_len],
langs=langs[:cur_len],
causal=True,
src_enc=src_enc,
src_len=src_len,
cache=cache
)
assert tensor.size() == (1, bs * beam_size, self.dim)
tensor = tensor.data[-1, :, :] # (bs * beam_size, dim)
scores = self.pred_layer.get_scores(tensor) # (bs * beam_size, n_words)
scores = F.log_softmax(scores, dim=-1) # (bs * beam_size, n_words)
assert scores.size() == (bs * beam_size, n_words)
# select next words with scores
_scores = scores + beam_scores[:, None].expand_as(scores) # (bs * beam_size, n_words)
_scores = _scores.view(bs, beam_size * n_words) # (bs, beam_size * n_words)
next_scores, next_words = torch.topk(_scores, 2 * beam_size, dim=1, largest=True, sorted=True)
assert next_scores.size() == next_words.size() == (bs, 2 * beam_size)
# next batch beam content
# list of (bs * beam_size) tuple(next hypothesis score, next word, current position in the batch)
next_batch_beam = []
# for each sentence
for sent_id in range(bs):
# if we are done with this sentence
done[sent_id] = done[sent_id] or generated_hyps[sent_id].is_done(next_scores[sent_id].max().item())
if done[sent_id]:
next_batch_beam.extend([(0, self.pad_index, 0)] * beam_size) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
# next words for this sentence
for idx, value in zip(next_words[sent_id], next_scores[sent_id]):
# get beam and word IDs
beam_id = idx // n_words
word_id = idx % n_words
# end of sentence, or next word
if word_id == self.eos_index or cur_len + 1 == max_len:
generated_hyps[sent_id].add(generated[:cur_len, sent_id * beam_size + beam_id].clone(),
value.item())
else:
next_sent_beam.append((value, word_id, sent_id * beam_size + beam_id))
# the beam for next step is full
if len(next_sent_beam) == beam_size:
break
# update next beam content
assert len(next_sent_beam) == 0 if cur_len + 1 == max_len else beam_size
if len(next_sent_beam) == 0:
next_sent_beam = [(0, self.pad_index, 0)] * beam_size # pad the batch
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == beam_size * (sent_id + 1)
# sanity check / prepare next batch
assert len(next_batch_beam) == bs * beam_size
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_words = generated.new([x[1] for x in next_batch_beam])
beam_idx = src_len.new([x[2] for x in next_batch_beam])
# re-order batch and internal states
generated = generated[:, beam_idx]
generated[cur_len] = beam_words
for k in cache.keys():
if k != 'slen':
cache[k] = (cache[k][0][beam_idx], cache[k][1][beam_idx])
# update current length
cur_len = cur_len + 1
# stop when we are done with each sentence
if all(done):
break
# visualize hypotheses
# print([len(x) for x in generated_hyps], cur_len)
# globals().update( locals() );
# !import code; code.interact(local=vars())
# for ii in range(bs):
# for ss, ww in sorted(generated_hyps[ii].hyp, key=lambda x: x[0], reverse=True):
# print("%.3f " % ss + " ".join(self.dico[x] for x in ww.tolist()))
# print("")
# select the best hypotheses
tgt_len = src_len.new(bs)
best = []
for i, hypotheses in enumerate(generated_hyps):
best_hyp = max(hypotheses.hyp, key=lambda x: x[0])[1]
tgt_len[i] = len(best_hyp) + 1 # +1 for the <EOS> symbol
best.append(best_hyp)
# generate target batch
decoded = src_len.new(tgt_len.max().item(), bs).fill_(self.pad_index)
for i, hypo in enumerate(best):
decoded[:tgt_len[i] - 1, i] = hypo
decoded[tgt_len[i] - 1, i] = self.eos_index
# sanity check
assert (decoded == self.eos_index).sum() == 2 * bs
return decoded, tgt_len
class BeamHypotheses(object):
def __init__(self, n_hyp, max_len, length_penalty, early_stopping):
"""
Initialize n-best list of hypotheses.
"""
self.max_len = max_len - 1 # ignoring <BOS>
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.n_hyp = n_hyp
self.hyp = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.hyp)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / len(hyp) ** self.length_penalty
if len(self) < self.n_hyp or score > self.worst_score:
self.hyp.append((score, hyp))
if len(self) > self.n_hyp:
sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.hyp)])
del self.hyp[sorted_scores[0][1]]
self.worst_score = sorted_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.n_hyp:
return False
elif self.early_stopping:
return True
else:
return self.worst_score >= best_sum_logprobs / self.max_len ** self.length_penalty
|
[
"[email protected]"
] | |
b00b909f46a01c1c9e3b70851c629b87b7869153
|
d69b84ee135aa772512f3adabe916dc790db6b68
|
/leaders/data/cjw_mysql.py
|
96ac832c227c2f6ef211d64d39061faed595bc8e
|
[] |
no_license
|
choi97201/DartFss
|
9605b617dc52c3ee5921b171f1325e4621b1f4b7
|
0d68b1dad23fb8fda9a4ed52c93a816e8433b151
|
refs/heads/master
| 2023-06-12T20:43:34.537026 | 2021-06-24T23:26:08 | 2021-06-24T23:26:08 | 340,304,042 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,994 |
py
|
import pymysql
import pandas as pd
class Maria:
def __init__(self):
self.is_chrome = False
self.path = None
self.connect = None
self.cur = None
def setMaria(self, host='localhost', user='root', password='sa1234', db='jaemoo', charset='utf8', port=3306):
self.connect = pymysql.connect(host=host, user=user, password=password, db=db, charset=charset, port=port)
self.cur = self.connect.cursor()
return
def mariaReplaceData(self, tablename, data, start):
df = self.mariaShowData(tablename)
cols = list(df.columns)
sql = "replace into {}{} ".format(tablename, tuple(cols[start:])).replace('\'', '')
sql += ' values {};'.format(data)
self.cur.execute(sql)
def mariaInsertData(self, tablename, data):
sql = "insert into {} values {};".format(tablename, data)
try:
self.cur.execute(sql)
except Exception as e:
print(sql)
print(e.args)
return
def mariaCommitDB(self):
self.connect.commit()
return
def mariaShowData(self, tablename, sql=None):
try:
if sql is None:
self.cur.execute('select * from ' + tablename)
df = self.cur.fetchall()
field_names = [i[0] for i in self.cur.description]
df = pd.DataFrame(df, columns=field_names)
return df
else:
self.cur.execute(sql)
df = self.cur.fetchall()
field_names = [i[0] for i in self.cur.description]
df = pd.DataFrame(df, columns=field_names)
return df
except Exception as e:
print(e)
return None
def mariaSql(self, sql):
self.cur.execute(sql)
self.cur.fetchall()
return
def mariaCreateTable(self, tablename, columns, columns_type):
sql = "CREATE TABLE {} ({} {} PRIMARY KEY".format(tablename, columns[0], columns_type[0])
try:
for i in range(1, len(columns)):
sql += ", {} {}".format(columns[i], columns_type[i])
sql += ');'
self.cur.execute(sql)
except Exception as e:
print(sql)
print(e)
pass
return
def mariaShowTables(self, sql=None):
if sql is None:
self.cur.execute('show tables')
df = self.cur.fetchall()
field_names = [i[0] for i in self.cur.description]
df = pd.DataFrame(df, columns=field_names)
else:
try:
self.cur.execute(sql)
df = self.cur.fetchall()
field_names = [i[0] for i in self.cur.description]
df = pd.DataFrame(df, columns=field_names)
except Exception as e:
df = None
return df
|
[
"[email protected]"
] | |
53a9e7b485281b9e04dc2f024cb312a1c0bfe6fa
|
cd21d80241deeb96f4acf16e865cef439b3158d1
|
/manage.py
|
bba21b475398de4476bf5a91b9cbc71c682bea8d
|
[] |
no_license
|
sankha555/DBPortal
|
72cac7118334337fc653ce2c0c133598c4f783d1
|
6e8354df09f34e0a6708630524e10f6949301de7
|
refs/heads/master
| 2020-12-02T07:26:42.624213 | 2019-12-30T15:00:42 | 2019-12-30T15:00:42 | 230,722,294 | 0 | 0 | null | 2019-12-29T08:14:33 | 2019-12-29T08:14:33 | null |
UTF-8
|
Python
| false | false | 628 |
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dbportal.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
b2279b3d38a8ce42700aa7f9de69200a0892029a
|
e76220466b05fb441fee39e2b37cfb2e38a3ba05
|
/mysql/mysql.py
|
3c9b8664f6a77a559ceb2da689e79baad4c5b7d9
|
[] |
no_license
|
slmao-oamls/pythonmode
|
c8a5c3b7d272ee0991d450b81bfcf3dbff4ca16b
|
c17c8270967a2ca45e9e2c14839a488671afef92
|
refs/heads/master
| 2021-09-10T09:12:52.778194 | 2018-03-23T10:17:30 | 2018-03-23T10:17:30 | 126,019,739 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,586 |
py
|
# # 导入MySQL驱动:
# import pymysql
# # 注意把password设为你的root口令:
# conn=pymysql.connect(user='root',password='', host='localhost',database='test')
# cursor=conn.cursor()
# #cursor.execute('create table user (id varchar(20) primary key,name varchar(20))')
# cursor.execute('insert into users(id,name) values (%s,%s)',['no2','jake'])
# conn.commit()
# print('成功插入', cursor.rowcount, '条数据')
# cursor.close()
# cursor=conn.cursor()
# cursor.execute('select * from users where id=%s',('no1',))
# vs=cursor.fetchall()
# print(vs)
# cursor.close()
# conn.close()
from sqlalchemy import Column,String,create_engine,ForeignKey
from sqlalchemy.orm import sessionmaker,relationship
from sqlalchemy.ext.declarative import declarative_base
Base=declarative_base()
class User(Base):
__tablename__='users'
id=Column(String(20),primary_key=True)
name=Column(String(20))
books=relationship('Book')
class Book(Base):
__tablename__='books'
id=Column(String(20),primary_key=True)
name=Column(String(20))
# “多”的一方的book表是通过外键关联到user表的:
users_id=Column(String(20),ForeignKey('users.id'))
engine=create_engine('mysql+pymysql://root:@localhost:3306/test')
DBSession=sessionmaker(bind=engine)
session=DBSession()
#插入数据
# new_user=User(id='no3',name='Bob')
# session.add(new_user)
# session.commit()
# session.close()
#读取数据
user=session.query(User).filter(User.id=='no1').one()
print(user.name)
# print(user.books[0].users_id)
for i in user.books:
print(i.id,i.name,i.users_id)
session.close()
|
[
"[email protected]"
] | |
da1d15bc3e9266e82839cc5424e5467de8846c18
|
3841c7003128899f8afb6686c09adff408e07703
|
/answer_for_eval
|
00656cf04dba4e5a5a0b65744dc64dcaaa92ec67
|
[] |
no_license
|
WenyanH/Question-Answer-System
|
552bdd479ceaf0f8c4cda92fb5023252e6379a33
|
ebdcd2e1211601c73ce0e6b8906e8749e433c1d7
|
refs/heads/master
| 2021-01-11T04:55:01.466486 | 2016-04-18T20:36:06 | 2016-04-18T20:36:06 | 71,286,666 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 373 |
#!/usr/bin/python
import main, sys
from lib import question_evaluation
wiki_article = sys.argv[1]
question_text = sys.argv[2]
main.main()
docs = main.read_sentences_from_file(wiki_article)
docs_q = main.read_sentences_from_file(question_text, False, True)
sorted_question = question_evaluation.question_evalution(docs_q, docs, 3)
for q in sorted_question:
print q
|
[
"[email protected]"
] | ||
86a3651026e5979337761c147682c2c2c955d1a1
|
74ff2b631f1dd8c8a39246500174ee0317b04ac9
|
/P1HW2_BasicMath_DewsClarence.py
|
e8be084f49bec423e11dc3cc3b2eee039a5ddb00
|
[] |
no_license
|
dewsclarence/CTI110
|
dfd242cf26019bc583e54b5a189a73eb66050a7e
|
2e833710448e02b204fc09a833a2102939ec6671
|
refs/heads/master
| 2023-01-03T01:44:40.026678 | 2020-10-28T00:45:54 | 2020-10-28T00:45:54 | 295,258,002 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 885 |
py
|
# Using IDLE Math
# 9/13/2020
# CTI-110 P1HW2 - Basic Math
# Clarence Dews
print("Enter number 1-10:", end=' ')
var1 = int(input()) # User input number 1-10
print("Enter number 11-20:", end=' ')
var2 = int(input()) # User input number 11-20
print("First number entered: ",var1) # Shows first number entered
print("Second number entered: ",var2) # Shows second number entered
var3 = var1 + var2 #Adds var1 and var2
var4 = var1 * var2 #Multiplies var1 and var2
print("Sum of both numbers: ",var3) #Displays results of addition
print("Result of mutiplying both numbers: ",var4) #Displays results of multiplication
#Start
#Input number 1-10
#Input number 11-20
#Displays var1
#Displays var2
#Calculates addition of var1 and var2
#Calculates multiplication of var1 and var2
#Displays var3
#Displays var4
#End
|
[
"[email protected]"
] | |
047d839364b362aa6a76bfe9643bcb4b78963590
|
ab1c920583995f372748ff69d38a823edd9a06af
|
/shultais_courses/dictionaries/intro_to_dictionaries/digits_rus_eng.py
|
8c5217d96a9dfe04a252496ac2455eacff1ddcc8
|
[] |
no_license
|
adyadyat/pyprojects
|
5e15f4e33892f9581b8ebe518b82806f0cd019dc
|
c8f79c4249c22eb9e3e19998d5b504153faae31f
|
refs/heads/master
| 2022-11-12T16:59:17.482303 | 2020-07-04T09:08:18 | 2020-07-04T09:08:18 | 265,461,663 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,568 |
py
|
import sys
key = int(sys.argv[1])
value = sys.argv[2]
digits = {
1: {"ru": "один", "en": "one"},
2: {"ru": "два", "en": "two"},
3: {"ru": "три", "en": "three"},
4: {"ru": "четыре", "en": "four"},
5: {"ru": "пять", "en": "five"},
6: {"ru": "шесть", "en": "six"},
7: {"ru": "семь", "en": "seven"},
8: {"ru": "восемь", "en": "eight"},
9: {"ru": "девять", "en": "nine"},
0: {"ru": "ноль", "en": "zero"}
}
print(digits[key][value])
"""
ЧИСЛА НА РУССКОМ И АНГЛИЙСКОМ
Ниже в редакторе находится словарь digits,
который содержит набор чисел и их названия
на русском и английском языках.
Обратите внимание,
что ключами словаря выступают целые числа (так тоже можно),
а значениями вложенные словари.
Напишите программу,
которая принимает из аргументов командной строки два параметра:
цифру и язык, а затем выводит название цифры на этом языке.
Учитывайте, что если ключ словаря задан числом,
то при доступе по ключу,
в квадратных скобках нужно также указывать число.
Пример использования:
> python program.py 4 ru
> четыре
"""
|
[
"[email protected]"
] | |
aa2b95f7faa2fc42bef989a9baf53826273ef7e8
|
05504059ec84709a8b1cf3988cf71b0613fe9f5a
|
/parser.py
|
293d43240a7d41e2cbd56d908ce3a41ad6df24d9
|
[] |
no_license
|
Bsq-collab/8g
|
2511b0b76be908ceb3fae713ce91f66938f0602a
|
81d318837303ee7bd8aeb2dbcfc3ee6a2305255a
|
refs/heads/master
| 2022-03-20T04:40:57.966967 | 2018-04-18T00:06:37 | 2018-04-18T00:06:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,554 |
py
|
from display import *
from matrix import *
from draw import *
from copy import deepcopy
"""
Goes through the file named filename and performs all of the actions listed in that file.
The file follows the following format:
Every command is a single character that takes up a line
Any command that requires arguments must have those arguments in the second line.
The commands are as follows:
push: push a copy of the current top of the coordinate system stack to the stack
pop: pop off the current top of the coordinate system stack
All the shape commands work as follows:
1) Add the shape to a temporary matrix
2) Multiply that matrix by the current top of the coordinate system stack
3) Draw the shape to the screen
4) Clear the temporary matrix
sphere: add a sphere -
takes 4 arguemnts (cx, cy, cz, r)
torus: add a torus -
takes 5 arguemnts (cx, cy, cz, r1, r2)
box: add a rectangular prism -
takes 6 arguemnts (x, y, z, width, height, depth)
circle: add a circle -
takes 3 arguments (cx, cy, r)
hermite: add a hermite curve -
takes 8 arguments (x0, y0, x1, y1, rx0, ry0, rx1, ry1)
bezier: add a bezier curve -
takes 8 arguments (x0, y0, x1, y1, x2, y2, x3, y3)
line: add a line -
takes 6 arguemnts (x0, y0, z0, x1, y1, z1)
All the transformation commands work as follows:
1) Create the appropriate transformation matrix
2) Multiply that matrix by current top of the coordinat system stack
scale: takees 3 arguments (sx, sy, sz)
move: takes 3 arguments (tx, ty, tz)
rotate: takes 2 arguments (axis, theta) axis should be x, y or z
display: display the screen
save: save the screen to a file -
takes 1 argument (file name)
quit: end parsing
See the file script for an example of the file format
"""
ARG_COMMANDS = [ 'line', 'scale', 'move', 'rotate', 'save', 'circle', 'bezier', 'hermite', 'box', 'sphere', 'torus']
def parse_file( fname, edges, polygons, transform, screen, color ):
f = open(fname)
lines = f.readlines()
m=new_matrix()
ident(m)
cstack=[m]
step = 100
step_3d = 20
c = 0
while c < len(lines):
line = lines[c].strip()
#print ':' + line + ':'
if line in ARG_COMMANDS:
c+= 1
args = lines[c].strip().split(' ')
#print 'args\t' + str(args)
if line =='push':
cstack.append(deepcopy(cstack[-1]))
elif line=='pop':
if len(cstack)>1:
cstack.pop()
if line == 'sphere':
#print 'SPHERE\t' + str(args)
add_sphere(polygons,
float(args[0]), float(args[1]), float(args[2]),
float(args[3]), step_3d)
matrix_mult(cstack[-1],polygons)
draw_polygons(polygons,screen,color)
polygons=[]
elif line == 'torus':
#print 'TORUS\t' + str(args)
add_torus(polygons,
float(args[0]), float(args[1]), float(args[2]),
float(args[3]), float(args[4]), step_3d)
matrix_mult(cstack[-1],polygons)
draw_polygons(polygons,screen,color)
polygons=[]
elif line == 'box':
#print 'BOX\t' + str(args)
add_box(polygons,
float(args[0]), float(args[1]), float(args[2]),
float(args[3]), float(args[4]), float(args[5]))
matrix_mult(cstack[-1],polygons)
draw_polygons(polygons,screen,color)
polygons=[]
elif line == 'circle':
#print 'CIRCLE\t' + str(args)
add_circle(edges,
float(args[0]), float(args[1]), float(args[2]),
float(args[3]), step)
matrix_mult(cstack[-1],edges)
draw_lines(edges,screen,color)
edges=[]
elif line == 'hermite' or line == 'bezier':
#print 'curve\t' + line + ": " + str(args)
add_curve(edges,
float(args[0]), float(args[1]),
float(args[2]), float(args[3]),
float(args[4]), float(args[5]),
float(args[6]), float(args[7]),
step, line)
matrix_mult(cstack[-1],edges)
draw_lines(edges,screen,color)
edges=[]
elif line == 'line':
#print 'LINE\t' + str(args)
add_edge( edges,
float(args[0]), float(args[1]), float(args[2]),
float(args[3]), float(args[4]), float(args[5]) )
matrix_mult(cstack[-1],edges)
draw_lines(edges,screen,color)
edges=[]
elif line == 'scale':
#print 'SCALE\t' + str(args)
t = make_scale(float(args[0]), float(args[1]), float(args[2]))
#matrix_mult(t, transform)
matrix_mult(cstack[-1], t)
cstack[-1]=t
elif line == 'move':
#print 'MOVE\t' + str(args)
t = make_translate(float(args[0]), float(args[1]), float(args[2]))
#matrix_mult(t, transform)
matrix_mult(cstack[-1], t)
cstack[-1]=t
elif line == 'rotate':
#print 'ROTATE\t' + str(args)
theta = float(args[1]) * (math.pi / 180)
if args[0] == 'x':
t = make_rotX(theta)
elif args[0] == 'y':
t = make_rotY(theta)
else:
t = make_rotZ(theta)
#matrix_mult(t, transform)
matrix_mult(cstack[-1], t)
cstack[-1]=t
elif line == 'clear':
edges = []
polygons = []
elif line == 'ident':
#ident(transform)
ident(cstack[pos])
#elif line == 'apply':
# matrix_mult( transform, edges )
# matrix_mult( transform, polygons )
elif line == 'display' or line == 'save':
# clear_screen(screen)
#draw_lines(edges, screen, color)
#draw_polygons(polygons, screen, color)
if line == 'display':
display(screen)
else:
save_extension(screen, args[0])
c+= 1
|
[
"[email protected]"
] | |
dffc5a467855e49541f65ecbcb8177809bcd6184
|
1363dfe1aad7643d0faddc3d60d357267ffb7121
|
/src/segnet/loadbatches.py
|
fcf95a7d055688905ae424e74a70febb7319b1aa
|
[] |
no_license
|
cjlcarvalho/ulcer-image-segmentation
|
38eadcbe83badc7afb80556c97fb28068a1e54f1
|
23d95c69009e911822ca982941856c6e73dfc704
|
refs/heads/master
| 2020-03-27T16:55:33.283827 | 2018-09-05T16:03:07 | 2018-09-05T16:03:07 | 146,816,323 | 1 | 2 | null | 2018-08-31T19:47:08 | 2018-08-30T23:14:14 |
Python
|
UTF-8
|
Python
| false | false | 2,005 |
py
|
import cv2
import itertools
import numpy as np
import os
def imageArray(path, width, height, imgNorm="sub_mean", ordering='channels_first'):
try:
img = cv2.imread(path, 1)
if imgNorm == "sub_and_divide":
img = cv2.resize(img, (width, height)).astype(np.float32) / 127.5 - 1
elif imgNorm == "sub_mean":
img = cv2.resize(img, (width, height)).astype(np.float32)
img[:,:,0] -= 103.939
img[:,:,1] -= 116.779
img[:,:,2] -= 123.68
elif imgNorm == "divide":
img = cv2.resize(img, (width, height)).astype(np.float32) / 255.0
except Exception as e:
print(path, e)
img = np.zeros((height, width, 3))
if ordering == 'channels_first':
img = np.rollaxis(img, 2, 0)
return img
def segmentationArray(path, nClasses, width, height):
seg_labels = np.zeros((height, width, nClasses))
try:
img = cv2.imread(path, 1)
img = cv2.resize(img, (width, height))
img = img[:,:,0]
for c in range(nClasses):
seg_labels[:,:,c] = (img == c).astype(int)
except Exception as e:
print(e)
seg_labels = np.reshape(seg_labels, (width * height, nClasses))
return seg_labels
def segmentGenerator(images_path, segs_path, batch_size, n_classes, input_height, input_width, output_height, output_width):
assert images_path[-1] == '/' and segs_path[-1] == '/'
images = [images_path + f for f in os.listdir(images_path)]
segmentations = [segs_path + f for f in os.listdir(images_path) if os.path.isfile(segs_path + f)]
zipped = itertools.cycle(zip(images, segmentations))
while True:
X = []
Y = []
for _ in range(batch_size):
img, seg = next(zipped)
X.append(imageArray(img, input_width, input_height))
Y.append(segmentationArray(seg, n_classes, output_width, output_height))
yield np.array(X), np.array(Y)
|
[
"[email protected]"
] | |
cf53ef5ed08b07917f1bafebfd98837aa6df5e39
|
36957a9ce540846d08f151b6a2c2d582cff1df47
|
/VR/Python/Python36/Lib/site-packages/django/contrib/auth/migrations/0004_alter_user_username_opts.py
|
8f8441f88f5e0f3b2074e39c01c7ef863cb3c28a
|
[] |
no_license
|
aqp1234/gitVR
|
60fc952307ef413e396d31e0d136faffe087ed2b
|
e70bd82c451943c2966b8ad1bee620a0ee1080d2
|
refs/heads/master
| 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 |
C#
|
UTF-8
|
Python
| false | false | 128 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:68dd281e8039ab66caa4937c4a723c4fd18db5304accb445a332fceed361f3f2
size 785
|
[
"[email protected]"
] | |
505b1782957e7bfa8c32e71fd63adbfa93d801cc
|
eab5cc398d562d7d26e08e92e9ef4a6a6f38990e
|
/blhnet.py
|
8a32b036096674f494202692319a709cd63b40e2
|
[] |
no_license
|
rmehrad/BLHNet
|
e582ba41f60b2ec532b3848d1ff2dd8193055bb1
|
04fa6b89b60538808b3c2031b913ac5dde254ca9
|
refs/heads/master
| 2022-09-23T06:30:16.898044 | 2020-05-25T21:07:10 | 2020-05-25T21:07:10 | 266,263,842 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,349 |
py
|
#!/usr/bin/python3.8
import sys
import socket
import getopt
import threading
import subprocess
# define some global variables
listen = False
command = False
upload = False
execute = ""
target = ""
upload_destination = ""
port = 0
# this runs a command and returns the output
def run_command(command):
# trim the newline
command = command.rstrip()
# run the command and get the output back
try:
output = subprocess.check_output(command,stderr=subprocess.STDOUT, shell=True)
except:
output = "Failed to execute command.\r\n"
# send the output back to the client
return output
# this handles incoming client connections
def client_handler(client_socket):
global upload
global execute
global command
# check for upload
if len(upload_destination):
# read in all of the bytes and write to our destination
file_buffer = ""
# keep reading data until none is available
while True:
data = client_socket.recv(1024)
if not data:
break
else:
file_buffer += data
# now we take these bytes and try to write them out
try:
file_descriptor = open(upload_destination,"wb")
file_descriptor.write(file_buffer)
file_descriptor.close()
# acknowledge that we wrote the file out
client_socket.send("Successfully saved file to %s\r\n" % upload_destination)
except:
client_socket.send("Failed to save file to %s\r\n" % upload_destination)
# check for command execution
if len(execute):
# run the command
output = run_command(execute)
client_socket.send(output)
# now we go into another loop if a command shell was requested
if command:
while True:
# show a simple prompt
client_socket.send(b"<BHP:#> ")
# now we receive until we see a linefeed (enter key)
cmd_buffer = ""
while "\n" not in cmd_buffer:
cmd_buffer += str(client_socket.recv(1024))
# we have a valid command so execute it and send back the results
response = run_command(cmd_buffer)
# send back the response
client_socket.send(response)
# this is for incoming connections
def server_loop():
global target
global port
# if no target is defined we listen on all interfaces
if not len(target):
target = "0.0.0.0"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((target,port))
server.listen(5)
while True:
client_socket, addr = server.accept()
# spin off a thread to handle our new client
client_thread = threading.Thread(target=client_handler,args=(client_socket,))
client_thread.start()
# if we don't listen we are a client....make it so.
def client_sender(buffer):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# connect to our target host
client.connect((target,port))
# if we detect input from stdin send it
# if not we are going to wait for the user to punch some in
if len(buffer):
client.send(buffer)
while True:
# now wait for data back
recv_len = 1
response = ""
while recv_len:
data = client.recv(4096)
recv_len = len(data)
response+= data
if recv_len < 4096:
break
print (response,)
# wait for more input
buffer = input("")
buffer += "\n"
# send it off
client.send(buffer)
except:
# just catch generic errors - you can do your homework to beef this up
print ("[*] Exception! Exiting.")
# teardown the connection
client.close()
def usage():
print ("Netcat Replacement")
print()
print ("Usage: bhpnet.py -t target_host -p port")
print ("-l --listen - listen on [host]:[port] for incoming connections")
print ("-e --execute=file_to_run - execute the given file upon receiving a connection")
print ("-c --command - initialize a command shell")
print ("-u --upload=destination - upon receiving connection upload a file and write to [destination]")
print()
print()
print ("Examples: ")
print ("bhpnet.py -t 192.168.0.1 -p 5555 -l -c")
print ("bhpnet.py -t 192.168.0.1 -p 5555 -l -u=c:\\target.exe")
print ("bhpnet.py -t 192.168.0.1 -p 5555 -l -e=\"cat /etc/passwd\"")
print ("echo 'ABCDEFGHI' | ./bhpnet.py -t 192.168.11.12 -p 135")
sys.exit(0)
def main():
global listen
global port
global execute
global command
global upload_destination
global target
if not len(sys.argv[1:]):
usage()
# read the commandline options
try:
opts, args = getopt.getopt(sys.argv[1:],"hle:t:p:cu:",["help","listen","execute","target","port","command","upload"])
except getopt.GetoptError as err:
print (str(err))
usage()
for o,a in opts:
if o in ("-h","--help"):
usage()
elif o in ("-l","--listen"):
listen = True
elif o in ("-e", "--execute"):
execute = a
elif o in ("-c", "--commandshell"):
command = True
elif o in ("-u", "--upload"):
upload_destination = a
elif o in ("-t", "--target"):
target = a
elif o in ("-p", "--port"):
port = int(a)
else:
assert False,"Unhandled Option"
# are we going to listen or just send data from stdin
if not listen and len(target) and port > 0:
# read in the buffer from the commandline
# this will block, so send CTRL-D if not sending input
# to stdin
buffer = sys.stdin.read()
# send data off
client_sender(buffer)
# we are going to listen and potentially
# upload things, execute commands and drop a shell back
# depending on our command line options above
if listen:
server_loop()
main()
|
[
"[email protected]"
] | |
0d17fa63c3a9a2d0bf33421eedf521554eb962c1
|
6d383860086918f9f5d2dc9b8eec9f4e98295b3c
|
/bes.ekleme.py
|
d25691e4c12ce6eb06ae7323a08eefe7ec73944a
|
[] |
no_license
|
unalfaruk/Python-Temel-Ornekler
|
055f1e37fcd90371dfc719e5a621c012beb7cb13
|
86c10671c810c458f79f58c8b0094140cbccdb09
|
refs/heads/master
| 2021-06-14T11:08:42.221908 | 2017-01-08T21:54:48 | 2017-01-08T21:54:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 72 |
py
|
def f(x):
'''girdiye 5 ekler'''
return x+5
s=f(3)
print(s)
|
[
"[email protected]"
] | |
661213532518f79f4fbd1621693e6a80ee48a239
|
86206b05a6e0a425ba5401de50b8645bddf77780
|
/Oper Python/Oper Cobra/Sandbox/SFDCLib.py
|
774e5e469ac749c3fe50e5a661a4ac709b6b7eff
|
[] |
no_license
|
QuestTestAutomation/PersistentDesktop1python
|
2e626ea16ce0fd4c697b156fdc2f9b3ca85bbd7b
|
ece25957edb6f87b2777b261b31914d22ebd99ad
|
refs/heads/master
| 2021-03-10T21:55:25.450872 | 2020-03-27T09:45:14 | 2020-03-27T09:45:14 | 246,488,801 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 33,048 |
py
|
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from robot.libraries.BuiltIn import BuiltIn
from selenium.webdriver.support.ui import Select
import time
import re
import CPQLib
# import Libs/CPQLib.py
# def get_webdriver_instance():
# se2lib = BuiltIn().get_library_instance('Selenium2Library')
# return se2lib._current_browser()
def Open_Browser_And_Launch_Application(brow,url):
if brow == 'ff':
driver = webdriver.Firefox()
driver.get(url)
if brow == 'gc':
driver = webdriver.Chrome(executable_path='C:\Selenium\Selenium 3.4\Drivers\chromedriver.exe')
#driver = webdriver.Chrome()
# driver.get(url)
# driver = webdriver.Remote(command_executor='http://localhost:9515/',desired_capabilities=DesiredCapabilities.CHROME)
# driver.get(url)
if brow == 'ie':
driver = webdriver.Ie()
return driver
def get_text_column_value(driver,label,Index):
id = "-1"
tempindex = 0
i = 0
tindex = 0
lists = driver.find_elements_by_tag_name("TD")
for list in lists:
i = int(i) + int(1)
# print "label : " + str(label)
if list.get_attribute("class") == "labelCol" and list.text == label:
# print "hurr : " + str(list.text)
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
tindex = int(i) + int(1)
if tindex == i:
# idiv = list.find_element_by_tag_name("div")
id = list.text
# print "hurr hurr : " + str(list.text)
# id = idiv.text
# # print "hurr hurr : " + str(idiv.text)
break
return id
def get_Select_id(driver,title,Index,visibletext):
id = "-1"
tempindex = 0
i = 0
tindex = 0
lists = driver.find_elements_by_tag_name("SELECT")
for list in lists:
if list.get_attribute("title") == title:
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
id = list.get_attribute("id")
select = Select(list)
select.select_by_visible_text(visibletext)
return id
def Select_value_from_list(driver,title,Index,visibletext):
id = get_Select_id(driver,title,Index,visibletext)
def set_input_column_value(driver,label,Index,fieldvalue):
id = "-1"
tempindex = 0
i = 0
tindex = 0
lists = driver.find_elements_by_tag_name("TD")
for list in lists:
i = int(i) + int(1)
# labelCol
# requiredInput
if list.get_attribute("class") == "labelCol" and list.text == label:
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
tindex = int(i) + int(1)
if tindex == i:
id = list.get_attribute("id")
webele = list.find_element_by_id(list.get_attribute("id"))
driver.execute_script("return arguments[0].scrollIntoView(true);", webele)
webele.click()
webele.send_keys(fieldvalue)
print "id is " + str(id)
return id
def click_SFDCbutton(driver,btntitle,btnindex):
id = "-1"
tempindex = 0
i = 0
tindex = 0
lists = driver.find_elements_by_tag_name("INPUT")
# print len(lists)
for list in lists:
# print " class :" + str(list.get_attribute("class"))
print " Title :" + str(list.get_attribute("title"))
if list.get_attribute("class") is not None and list.get_attribute("title") is not None :
if list.get_attribute("class").upper() == ("btn").upper() and list.get_attribute("title").upper() == (btntitle).upper() :
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(btnindex):
print " Title :" + str(list.get_attribute("title"))
id = list.get_attribute("value")
driver.execute_script("return arguments[0].scrollIntoView(true);", list)
list.click()
break
return id
def set_required_input_column_value(driver,label,Index,fieldvalue):
id = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_tag_name("TD")
print "text " + str(len(lists))
for list in lists:
print str(i)
i = int(i) + int(1)
if tindex == i:
inpelements = list.find_elements_by_tag_name("INPUT")
for inpele in inpelements:
if len(inpelements) == int(1):
driver.execute_script("return arguments[0].scrollIntoView(true);", inpele)
inpele.click()
inpele.send_keys(fieldvalue)
break
if list.get_attribute("class") == "labelCol requiredInput":
lbllists = list.find_elements_by_tag_name("LABEL")
# print "label : " + str(len(lbllists))
for lbllist in lbllists:
if list.get_attribute("class") == "labelCol requiredInput" and int((lbllist.text).find(label)) != int('-1'):
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
tindex = int(i) + int(1)
return id
def Search_Account_Country_Lookup(driver) :
handle = []
parent_h = driver.current_window_handle
print "The current page title : " + driver.title
Open_Required_Loopup_Window(driver,'Country','1')
# driver.find_element_by_xpath("/html/body/div/div[2]/table/tbody/tr/td[2]/form/div/div[2]/div[3]/table/tbody/tr[10]/td[2]/div/span/a/img").click()
handles = driver.window_handles # before the pop-up window closes
# driver.remove(parent_h)
handles.remove(parent_h)
driver.switch_to_window(handles.pop())
popup_h = driver.current_window_handle
driver.implicitly_wait(10) # seconds
print "The current popup page title : " + driver.title
time.sleep(20)
driver.switch_to.frame(0)
# ele = driver.find_element_by_id('searchFrame')
# ele.click()
# lists = driver.find_elements_by_tag_name("input")
# # print "*********---- " + str(len(lists))
# # print "*********----frame " + str(len(driver.find_elements_by_tag_name("frame")))
# # print "*********----form " + str(len(driver.find_elements_by_tag_name("form")))
# # print "*********----div " + str(len(driver.find_elements_by_tag_name("div")))
# # print "*********----input " + str(len(driver.find_elements_by_tag_name("input")))
# for list in lists:
# print "*********----framelistinput " + str(len(list.find_elements_by_tag_name("input")))
# print "*********----framelistdiv " + str(len(list.find_elements_by_tag_name("div")))
# print "*********----framelistform " + str(len(list.find_elements_by_tag_name("form")))
# print "*********----framelisthtml " + str(len(list.find_elements_by_tag_name("html")))
# print "*********" + list.get_attribute("id")
# print "*********" + list.get_attribute("class")
element = driver.find_element_by_id('lksrch')
driver.execute_script("return arguments[0].scrollIntoView(true);", element)
driver.find_element_by_id('lksrch').click()
driver.find_element_by_id('lksrch').send_keys('India')
lists = driver.find_elements_by_tag_name("input")
# driver.find_element_by_id(_tag_name("INPUT")'lksrch').click()
# driver.find_element_by_id('lksrch').sendkeys("India")
# driver.find_element_by_xpath('/html/body/form/div/div[2]/input[1]').click()
# driver.find_element_by_xpath('/html/body/form/div/div[2]/input[1]').sendkeys("India")
driver.find_element_by_xpath("/html/body/form/div/div[2]/input[2]").click()
time.sleep(10)
driver.switch_to_window(popup_h)
driver.implicitly_wait(10) # seconds
driver.switch_to.frame(1)
time.sleep(5)
driver.find_element_by_link_text('India').click()
time.sleep(10)
driver.switch_to_window(parent_h)
def Handle_Lookup_Frame(driver) :
element = driver.find_element_by_xpath("/html/body/div/div[2]/table/tbody/tr/td[2]/form/div/div[2]/div[3]/table/tbody/tr[10]/td[2]/div/span/a/img")
driver.execute_script("return arguments[0].scrollIntoView(true);", element)
driver.find_element_by_xpath("/html/body/div/div[2]/table/tbody/tr/td[2]/form/div/div[2]/div[3]/table/tbody/tr[10]/td[2]/div/span/a/img").click()
driver.switch_to_frame("searchFrame")
driver.implicitly_wait(10) # seconds
driver.find_element_by_xpath('/html/body/form/div/div[2]/input[1]').click()
driver.find_element_by_xpath('/html/body/form/div/div[2]/input[1]').sendkeys("India")
def set_text_input_column_value(driver,label,Index,fieldvalue):
id = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_tag_name("TD")
# print "text " + str(len(lists))
for list in lists:
# print str(i)
i = int(i) + int(1)
# labelCol
# requiredInpu t
if tindex == i:
# id = list.get_attribute("id")
# webele = list.find_element_by_id(list.get_attribute("id"))
# webele.click()
# webele.send_keys(fieldvalue)
inpelements = list.find_elements_by_tag_name("INPUT")
for inpele in inpelements:
if len(inpelements) == int(1):
driver.execute_script("return arguments[0].scrollIntoView(true);", inpele)
inpele.click()
inpele.send_keys(fieldvalue)
break
if list.get_attribute("class") == "labelCol":
lbllists = list.find_elements_by_tag_name("LABEL")
for lbllist in lbllists:
if list.get_attribute("class") == "labelCol" and lbllist.text == label:
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
tindex = int(i) + int(1)
return id
def Open_Required_Loopup_Window(driver,label,Index):
id = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_tag_name("TD")
# print "text " + str(len(lists))
for list in lists:
# print str(i)
i = int(i) + int(1)
if tindex == i:
inpelements = list.find_elements_by_tag_name("A")
for inpele in inpelements:
if len(inpelements) == int(1):
driver.execute_script("return arguments[0].scrollIntoView(true);", inpele)
inpele.click()
break
if list.get_attribute("class") == "labelCol requiredInput":
lbllists = list.find_elements_by_tag_name("LABEL")
# print "label : " + str(len(lbllists))
for lbllist in lbllists:
if list.get_attribute("class") == "labelCol requiredInput" and int((lbllist.text).find(label)) != int('-1'):
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
tindex = int(i) + int(1)
return id
def search_SFDC_Entity(driver,searchvalue):
print "searchvalue : " + searchvalue
element = driver.find_element_by_id("phSearchInput")
driver.execute_script("return arguments[0].scrollIntoView(true);", element)
driver.find_element_by_id("phSearchInput").click()
driver.find_element_by_id("phSearchInput").send_keys(searchvalue)
driver.find_element_by_id("phSearchButton").click()
def search_SFDC_Entity_Sidebar(driver,searchvalue) :
element = driver.find_element_by_id("sbstr")
driver.execute_script("return arguments[0].scrollIntoView(true);", element)
driver.find_element_by_id("sbstr").click()
driver.find_element_by_id("sbstr").send_keys(searchvalue)
# driver.find_element_by_name("search").click()
btntitle = 'Go!'
click_SFDCbutton(driver, btntitle, 1)
def Navigate_to_Header(driver,searchvalue,Index):
id = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_tag_name("H3")
print "text " + str(len(lists))
for list in lists:
print list.text
if (list.text).upper() == searchvalue.upper() :
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
driver.execute_script("return arguments[0].scrollIntoView(true);", list)
list.click()
def Click_Section_Button(driver,btntitle,Index) :
id = "-1"
tempindex = 0
lists = driver.find_elements_by_class_name("pbButton")
# print "pbbtn : " + str(len(lists))
for list in lists:
btnlists = list.find_elements_by_tag_name("INPUT")
# print "btnlists : " + str(len(btnlists))
if int(id) == int(1) :
break
for btnlist in btnlists :
print btnlist.get_attribute("title")
if btnlist.get_attribute("class") is not None and btnlist.get_attribute("title") is not None:
if btnlist.get_attribute("class").upper() == ("btn").upper() and btnlist.get_attribute("title").upper() == (btntitle).upper():
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
id = "1"
# print btnlist.get_attribute("title")
driver.execute_script("return arguments[0].scrollIntoView(true);", btnlist)
btnlist.click()
break
def Click_Section_Button1(driver,btntitle,Index) :
id = "-1"
tempindex = 0
btnlists = driver.find_elements_by_tag_name("INPUT")
# print "btnlists : " + str(len(btnlists))
for btnlist in btnlists :
print btnlist.get_attribute("title")
if btnlist.get_attribute("class") is not None and btnlist.get_attribute("title") is not None:
if btnlist.get_attribute("class").upper() == ("btn").upper() and btnlist.get_attribute("title").upper() == (btntitle).upper():
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
id = "1"
# print btnlist.get_attribute("title")
driver.execute_script("return arguments[0].scrollIntoView(true);", btnlist)
btnlist.click()
break
def Search_Required_Lookup(driver,label,Index,lookupvalue) :
handle = []
parent_h = driver.current_window_handle
print "The current page title : " + driver.title
# Open_Loopup_Window(driver,label,Index)
Open_Required_Loopup_Window(driver,label,Index)
# driver.find_element_by_xpath("/html/body/div/div[2]/table/tbody/tr/td[2]/form/div/div[2]/div[3]/table/tbody/tr[10]/td[2]/div/span/a/img").click()
handles = driver.window_handles # before the pop-up window closes
# driver.remove(parent_h)
handles.remove(parent_h)
driver.switch_to_window(handles.pop())
popup_h = driver.current_window_handle
driver.implicitly_wait(5) # seconds
print "The current popup page title : " + driver.title
time.sleep(5)
driver.switch_to.frame(0)
driver.find_element_by_id('lksrch').click()
driver.find_element_by_id('lksrch').send_keys(lookupvalue)
click_SFDCbutton(driver, 'Go!', 1)
driver.switch_to_window(popup_h)
driver.implicitly_wait(3) # seconds
driver.switch_to.frame(1)
time.sleep(2)
driver.find_element_by_link_text(lookupvalue).click()
time.sleep(3)
driver.switch_to_window(parent_h)
def Search_Lookup(driver,label,Index,lookupvalue) :
handle = []
parent_h = driver.current_window_handle
print "The current page title : " + driver.title
Open_Lookup_Window(driver,label,Index)
# Open_Required_Loopup_Window(driver,label,Index)
# driver.find_element_by_xpath("/html/body/div/div[2]/table/tbody/tr/td[2]/form/div/div[2]/div[3]/table/tbody/tr[10]/td[2]/div/span/a/img").click()
handles = driver.window_handles # before the pop-up window closes
handles.remove(parent_h)
driver.switch_to_window(handles.pop())
popup_h = driver.current_window_handle
driver.implicitly_wait(5) # seconds
print "The current popup page title : " + driver.title
time.sleep(5)
driver.switch_to.frame(0)
driver.find_element_by_id('lksrch').click()
driver.find_element_by_id('lksrch').send_keys(lookupvalue)
click_SFDCbutton(driver, 'Go!', 1)
driver.switch_to_window(popup_h)
driver.implicitly_wait(3) # seconds
driver.switch_to.frame(1)
time.sleep(2)
driver.find_element_by_link_text(lookupvalue).click()
time.sleep(3)
driver.switch_to_window(parent_h)
def Open_Lookup_Window(driver,label,Index):
id = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_tag_name("TD")
print "text " + str(len(lists))
for list in lists:
print str(i)
i = int(i) + int(1)
if tindex == i:
inpelements = list.find_elements_by_tag_name("A")
for inpele in inpelements:
if len(inpelements) == int(1):
driver.execute_script("return arguments[0].scrollIntoView(true);", inpele)
inpele.click()
break
if list.get_attribute("class") == "labelCol":
lbllists = list.find_elements_by_tag_name("LABEL")
# print "label : " + str(len(lbllists))
for lbllist in lbllists:
if list.get_attribute("class") == "labelCol" and lbllist.text == label:
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
tindex = int(i) + int(1)
return id
def Click_SFDCLink(driver,label,Index):
id = "-1"
tempindex = 0
lists = driver.find_elements_by_tag_name("A")
print "text " + str(len(lists))
print " label " + str(label)
for list in lists:
# if id != "-1" :
# break
# print "**********************************************"
# print "if : " + str(int((list.text).find(label)))
# print "if : " + str(((list.text).strip()).upper() == (label.strip()).upper())
# print "list.text : " + str(list.text)
# print " label " + str(label)
if (((list.text).strip()).upper() == (label.strip()).upper()):
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
id = list.get_attribute("Id")
driver.execute_script("return arguments[0].scrollIntoView(true);", list)
list.click()
break
def select_required_input_column_value(driver, label, Index, fieldvalue):
id = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_tag_name("TD")
print "text " + str(len(lists))
for list in lists:
print str(i)
i = int(i) + int(1)
if tindex == i:
inpelements = list.find_elements_by_tag_name("Select")
for inpele in inpelements:
if len(inpelements) == int(1):
driver.execute_script("return arguments[0].scrollIntoView(true);", inpele)
inpele.click()
select = Select(inpele)
select.select_by_visible_text(fieldvalue)
break
if list.get_attribute("class") == "labelCol requiredInput":
lbllists = list.find_elements_by_tag_name("LABEL")
# print "label : " + str(len(lbllists))
for lbllist in lbllists:
if list.get_attribute("class") == "labelCol requiredInput" and int((lbllist.text).find(label)) != int('-1'):
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
tindex = int(i) + int(1)
return id
def select_input_column_value(driver,label,Index,fieldvalue):
id = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_class_name("labelCol")
print "lenlists " + str(len(lists))
for list in lists:
if list is not None and list.get_attribute("class") is not None :
lbllists = list.find_elements_by_tag_name("LABEL")
print "Class : " + str(list.get_attribute("class"))
print "lenlists " + str(len(lbllists))
if int(len(lbllists)) > 0 :
for lbllist in lbllists:
if lbllist is not None and lbllist.get_attribute("for") is not None:
print "Class : " + str(list.get_attribute("class"))
print "lbllist.text : " + str(lbllist.text)
if list.get_attribute("class") == "labelCol" and lbllist.text == label:
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
print "for : " + str(list.get_attribute("for"))
def Get_Input_FieldID(driver,label,Index) :
forid = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_class_name("labelCol")
# print "lenlists " + str(len(lists))
for list in lists:
if list is not None and list.get_attribute("class") is not None :
lbllists = list.find_elements_by_tag_name("LABEL")
# print "lenlists " + str(len(lbllists))
if int(len(lbllists)) > 0 :
for lbllist in lbllists:
if lbllist is not None and lbllist.get_attribute("for") is not None and lbllist.text is not None :
if list.get_attribute("class") == "labelCol" and lbllist.text == label:
# print "Class : " + str(list.get_attribute("class"))
# print "lbllist.text : " + str(lbllist.text)
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
# print "for : " + str(lbllist.get_attribute("for"))
forid = lbllist.get_attribute("for")
break
return forid
def Get_Required_Input_FieldID(driver,label,Index) :
forid = "-1"
tempindex = 0
i = 0
tindex = "-10000"
classname = 'labelCol requiredInput'
lists = driver.find_elements_by_css_selector('td[class="labelCol requiredInput"]')
# print "lenlists " + str(len(lists))
for list in lists:
# print "lenlclass " + str(list.get_attribute("class"))
if list is not None and list.get_attribute("class") is not None :
lbllists = list.find_elements_by_tag_name("LABEL")
# print "lenlists " + str(len(lbllists))
if int(len(lbllists)) > 0 :
for lbllist in lbllists:
# print "lbllist.text : " + str(lbllist.text)
if lbllist is not None and lbllist.get_attribute("for") is not None and lbllist.text is not None :
if list.get_attribute("class") == "labelCol requiredInput" and int((lbllist.text).find(label)) != int('-1'):
# print "Class : " + str(list.get_attribute("class"))
# print "lbllist.text : " + str(lbllist.text)
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(Index):
# print "for : " + str(lbllist.get_attribute("for"))
forid = lbllist.get_attribute("for")
break
return forid
def select_Required_SFDC_Dropdown_Value(driver,label,Index,vFieldvalue):
myid = Get_Required_Input_FieldID(driver, label, Index)
# print "Hello : " + str(myid)
element = driver.find_element_by_id(myid)
driver.execute_script("return arguments[0].scrollIntoView(true);", element)
driver.find_element_by_id(myid).click()
select = Select(driver.find_element_by_id(myid))
# select by visible text
select.select_by_visible_text(vFieldvalue)
def select_SFDC_Dropdown_Value(driver,label,Index,vFieldvalue):
myid = Get_Input_FieldID(driver, label, Index)
print "Hello : " + str(myid)
element = driver.find_element_by_id(myid)
driver.execute_script("return arguments[0].scrollIntoView(true);", element)
driver.find_element_by_id(myid).click()
select = Select(driver.find_element_by_id(myid))
# select by visible text
select.select_by_visible_text(vFieldvalue)
def Select_Steel_Brick_Dropdown_Value(driver,label,Index) :
tempindex = 0
i = 0
tindex = "-10000"
# driver.find_element_by_id("tsidButton").click()
print "Hu " + str(driver.find_element_by_id("tsidLabel").text)
if driver.find_element_by_id("tsidLabel").text != label :
element = driver.find_element_by_id("tsidLabel")
driver.execute_script("return arguments[0].scrollIntoView(true);", element)
driver.find_element_by_id("tsidLabel").click()
driver.find_element_by_link_text(label).click()
def click_CPQ_Buttom(driver,btntitle,Index) :
bid = "-1"
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_tag_name("PAPER-BUTTON")
print "lenlists " + str(len(lists))
for list in lists:
print list.text
if list.text is not None :
if list.text == btntitle :
print list.text
print list.text == btntitle
tempindex = int(tempindex) + int(1)
if tempindex == Index:
list.click()
bid = "1"
break
return bid
def click_CPQ_Link(driver,label,Index) :
tempindex = 0
i = 0
tindex = "-10000"
lists = driver.find_elements_by_class_name("list")
print "lenlists " + str(len(lists))
for list in lists:
alists = list.find_elements_by_tag_name("A")
for alist in alists:
if alist.text is not None:
if alist.text == label:
tempindex = int(tempindex) + int(1)
if tempindex == Index:
list.click()
break
def wait_CPQ_button(driver,btntitle,Index) :
for x in range(0,20,1) :
btnid = click_CPQ_Buttom(driver,btntitle,Index)
if btnid == "1" :
break
elif btnid == "-1" :
time.sleep(10)
def wait_for_opty_sbl_rowid(driver,label,index) :
id = "-1"
val = get_text_column_value(driver,label,index)
val11 = isNotBlank (val)
# print "is none : " + val is None
# print "is none val : " + str(val)
# print "is none : " + str(val11)
for i in range(0,900,10):
driver.refresh()
time.sleep(10)
val = get_text_column_value(driver,label,index)
# print "iThe value of index : " + str(i)
# print "iThe value of val : " + str(val)
# if val == "-1":
if isBlank(val):
time.sleep(10)
val = get_text_column_value(driver, label, index)
else :
# print "tu nee amma true" + str(val)
id = val
break
# print "Siebel row id is : " + str(val)
return id
def isNotBlank (myString):
if myString and myString.strip():
#myString is not None AND myString is not empty or blank
return True
#myString is None OR myString is empty or blank
return False
def isBlank (myString):
if myString and myString.strip():
#myString is not None AND myString is not empty or blank
return False
#myString is None OR myString is empty or blank
return True
def Handle_CPQQuote_Popup(driver) :
qtext = "-1"
quoteno = "-1"
parent_h = driver.current_window_handle
for i in range(0, 600, 10):
handles = driver.window_handles
if len(handles) == 2 :
handles.remove(parent_h)
driver.switch_to_window(handles.pop())
popup_h = driver.current_window_handle
# driver.implicitly_wait(10) # seconds
print "The current popup page title : " + driver.title
# time.sleep(20)
driver.switch_to_window(popup_h)
qtext = driver.find_element_by_css_selector('html body span').text
print "qtext value is : " + str(qtext)
element = driver.find_element_by_css_selector('html body center input')
driver.execute_script("return arguments[0].scrollIntoView(true);", element)
driver.find_element_by_css_selector('html body center input').click()
time.sleep(5)
driver.switch_to_window(parent_h)
re_prodId = re.compile(r'Quote is successfully created.Quote Number is([^"]+).')
for m in re_prodId.findall(qtext):
# print(m)
quoteno = m
quoteno = quoteno.strip()
break
else :
time.sleep(10)
print " Handles Length : " + str(len(handles))
time.sleep(6)
driver.refresh()
return quoteno
def browser_navigate_back(driver) :
driver.execute_script("window.history.go(-1)")
time.sleep(5)
def click_SFDCbutton_new(driver,btntitle,btnindex):
id = "-1"
tempindex = 0
i = 0
tindex = 0
lists = driver.find_elements_by_class_name("btn")
# print len(lists)
for list in lists:
# print " class :" + str(list.get_attribute("class"))
print " Title :" + str(list.get_attribute("title"))
if list.get_attribute("class") is not None and list.get_attribute("title") is not None :
if list.get_attribute("class").upper() == ("btn").upper() and list.get_attribute("title").upper() == (btntitle).upper() :
tempindex = int(tempindex) + int(1)
if int(tempindex) == int(btnindex):
print " Title :" + str(list.get_attribute("title"))
id = list.get_attribute("value")
driver.execute_script("return arguments[0].scrollIntoView(true);", list)
list.click()
break
return id
def get_quote_number(driver,Quotelink) :
id = "-1"
vTarindex = 0
vSourceindex = 0
driver.refresh()
time.sleep(5)
elelink = driver.find_element_by_link_text(Quotelink)
ahref = elelink.get_attribute("href")
ele_splits = ahref.split("/")
vTarindex = len(ele_splits) - int(1)
tarstr = str((ele_splits[int(vTarindex)]))
print "tarstr " + str(tarstr)
elems = driver.find_elements_by_xpath("//a[@href]")
# print "length : " + str(len(elems))
for elem in elems:
print "href : " + str(elem.get_attribute("href"))
# # print "text : " + str(elem.get_attribute("text"))
print "text : " + str(elem.text)
ahref1 = elem.get_attribute("href")
ele_splits1 = ahref1.split("/")
vSourceindex = len(ele_splits1) - int(1)
sourcestr = str((ele_splits1[int(vSourceindex)]))
# print "sourcestr " + str(sourcestr)
if sourcestr == tarstr :
if elem.text == Quotelink:
id = "-1'"
else :
id = elem.text
# print "href ++ : " + str(elem.get_attribute("href"))
#
#
# print "ele len : " + str(len(ele_splits))
# print "ele len : " + str((ele_splits[3]))
# linkxpath = "//a[@href=/" \
# "" + ele_splits[3] + "]"
# print "linkxpath : " + str(linkxpath)
# elements = driver.find_elements_by_xpath(linkxpath)
# print "linlen : " + str(len(elements))
# for element in elements:
# if element.text == Quotelink:
# id = "-1'"
# else :
# id = element.text
return id
def scroll_to_bottom(driver) :
lastHeight = driver.execute_script("return document.body.scrollHeight")
while True:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(5)
newHeight = driver.execute_script("return document.body.scrollHeight")
if newHeight == lastHeight:
break
lastHeight = newHeight
def switch_to_new_tab(driver) :
main_window = driver.current_window_handle
driver.switch_to.window(driver.window_handles[1])
return main_window
def CPQ_Add_Products_Handle_Errors(driver) :
id = "-1"
errors = CPQLib.pgselectproduct_checkfor_errors(driver)
print "errors : " + str(errors)
if errors == "-1" :
print "No Errors"
else :
browser_navigate_back(driver)
time.sleep(30)
CPQLib.pgconfigureproduts_click_add_products(driver)
# Wait Until Page Contains Product Selection
CPQLib.pgselectproduct_waitfor_Select_Button(driver)
|
[
"[email protected]"
] | |
a3c44ac3caef1772825491221d0e233ff15558dd
|
e009ea0876ad1c2d0c68a3740a6dff3aa51a4736
|
/movielist/models.py
|
d4872b7212bd43f822e5180c85e9e9b9c96154c8
|
[] |
no_license
|
janstawowy/rest
|
ec8c2d25fff496c5007d2c0b81996a1bf22875f1
|
3ad1f607e72d31cc53cfe2be7dca84fd5ff53ec7
|
refs/heads/master
| 2022-12-14T13:35:40.431669 | 2018-11-22T12:05:17 | 2018-11-22T12:05:17 | 153,265,165 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 484 |
py
|
from django.db import models
class Person(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Movie(models.Model):
title = models.CharField(max_length=255)
description = models.TextField()
director = models.ForeignKey(Person, related_name="movies_directed", null=True, on_delete=models.SET_NULL)
year = models.SmallIntegerField()
actors = models.ManyToManyField(Person, related_name="movies_cast")
|
[
"[email protected]"
] | |
658522164516cb94f977d82b579d0358b953ca20
|
d0a0ae44942c31d2fd5221ee0b6391f9053e2deb
|
/network/socket/tcp_socket.py
|
7dff8480ba3c5f9559905217489972ab33e9efca
|
[] |
no_license
|
sfirmery/python-sandbox
|
216bc3e4470f10cf97477a5c733f2db45714ebb0
|
bfc501f423374a0fdffc26df237a5246f1c8b5a6
|
refs/heads/master
| 2016-09-05T16:23:30.838477 | 2014-02-25T22:10:21 | 2014-02-25T22:10:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 649 |
py
|
import socket
import time
TCP_PORT = 9000
# create udp socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind
sock.bind(('', TCP_PORT))
sock.listen(1)
while 1:
pair = sock.accept()
if pair is not None:
conn, addr = pair
print 'Incoming connection from %s' % repr(addr)
while 1:
data = conn.recv(8192)
if data:
print "received message:", repr(data), "from:", repr(addr)
conn.send("OK")
else:
break
print "connection closed."
conn.close()
|
[
"[email protected]"
] | |
b3bcab9ef44391663e01b7141bd760910fec2d0c
|
0cf19ef1804be284fa086f6db29f3e694306edfa
|
/src/products/models.py
|
ed015d8ce6c9c531cfe593010258bf1b08b0a4c9
|
[] |
no_license
|
GiperScriper/Market
|
b6b8a177d90babffc39c39f9e2e3b8f48223181c
|
ac594328324c127e1237e6fcc5c24a050cb680a6
|
refs/heads/master
| 2016-09-10T19:50:19.161389 | 2014-03-17T20:24:22 | 2014-03-17T20:24:22 | 17,602,478 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,282 |
py
|
from django.db import models
from django.contrib.auth.models import User
class Product(models.Model):
user = models.ForeignKey(User, null=True, blank=True)
title = models.CharField(max_length=200)
description = models.CharField(max_length=700)
price = models.DecimalField(max_digits=15, decimal_places=2)
sale_price = models.DecimalField(max_digits=15, decimal_places=2, null=True, blank=True)
slug = models.SlugField()
timestamp = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
order = models.IntegerField(default=0)
active = models.BooleanField(default=True)
def __unicode__(self):
return self.title
class Meta:
ordering = ['-order']
class ProductImage(models.Model):
product = models.ForeignKey(Product)
image = models.ImageField(upload_to="products/image/")
title = models.CharField(max_length=200, null=True, blank=True)
featured_image = models.BooleanField(default=False)
timestamp = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.title
class Tag(models.Model):
product = models.ForeignKey(Product)
tag = models.CharField(max_length=50)
slug = models.SlugField()
timestamp = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.tag
class Category(models.Model):
products = models.ManyToManyField(Product)
title = models.CharField(max_length=200)
description = models.CharField(max_length=700)
slug = models.SlugField()
timestamp = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.title
class Meta:
verbose_name = u'Category'
verbose_name_plural = u'Categories'
class CategoryImage(models.Model):
category = models.ForeignKey(Category)
image = models.ImageField(upload_to="category/image/")
title = models.CharField(max_length=200, null=True, blank=True)
featured_image = models.BooleanField(default=False)
timestamp = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.title
class Meta:
verbose_name = u'Category Image'
verbose_name_plural = u'Category Images'
|
[
"[email protected]"
] | |
9a55b7e079bf414689a764ae85de4867eb8c3763
|
739cb489d188acb7cc2aab8d2e5853901e113715
|
/10-setdiccionarios/diccionarios.py
|
33c29320a1958c6ac4b93f8bdee949ff94a9e90b
|
[] |
no_license
|
juancafernandez18/curso-udemy
|
2c83453b7dbcecb31ebb7b2897f02673306614bc
|
d1cc1b34f8caf113ca81bdba86089342992c08db
|
refs/heads/master
| 2022-09-11T03:39:19.659934 | 2020-06-01T01:47:47 | 2020-06-01T01:47:47 | 268,403,910 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 732 |
py
|
"""
un diccionario es un tipo de dato que almacena un conjunto de datos
en formato clave > valor
parecido a un array asociativo
"""
persona = {
"nombre":"victor",
"apellido": "robles",
}
print(persona)
contactos = [
{
'nombre': 'juan',
'apellido': 'fernandez',
'numero': '3794744867'
},
{
'nombre': 'zipora',
'apellido': 'fernandez',
'numero': '374654'
}
]
print(contactos)
print("LISTADO DE CONTACTOS")
print("---------------------")
for contacto in contactos:
print("Nombre del contacto",contacto['nombre'])
print("Apellido:",contacto['apellido'])
print("Celular:",contacto['numero'])
print("---------------------------------------")
|
[
"[email protected]"
] | |
4f840b089e1c72661a5d31db332f0f88712ba8a2
|
a3dff48bb6460d9130cffdef3ca8cefb6ca01f2e
|
/adrians_calculations/lib.py
|
1e7a04d70ebe8c7629278b978019a33f6ed8ad77
|
[] |
no_license
|
adrhish/adrians_calculations
|
e6887acf8af52734f1e785e719370327442ac590
|
0f277fe9a51cf3b0cc8fce6f4bbc96ea3fa1e2c5
|
refs/heads/master
| 2022-12-04T20:23:19.237367 | 2020-08-17T13:48:17 | 2020-08-17T13:48:17 | 288,146,166 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,268 |
py
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2018 Jean Bizot <[email protected]>
""" Main lib for adrians_calculations Project
"""
from os.path import split
import pandas as pd
import datetime
pd.set_option('display.width', 200)
def try_me(name):
print('your name is', str(name))
def clean_data(data):
""" clean data
"""
# Remove columns starts with vote
cols = [x for x in data.columns if x.find('vote') >= 0]
data.drop(cols, axis=1, inplace=True)
# Remove special characteres from columns
data.loc[:, 'civility'] = data['civility'].replace('\.', '', regex=True)
# Calculate Age from day of birth
actual_year = datetime.datetime.now().year
data.loc[:, 'Year_Month'] = pd.to_datetime(data.birthdate)
data.loc[:, 'Age'] = actual_year - data['Year_Month'].dt.year
# Uppercase variable to avoid duplicates
data.loc[:, 'city'] = data['city'].str.upper()
# Take 2 first digits, 2700 -> 02700 so first two are region
data.loc[:, 'postal_code'] = data.postal_code.str.zfill(5).str[0:2]
# Remove columns with more than 50% of nans
cnans = data.shape[0] / 2
data = data.dropna(thresh=cnans, axis=1)
# Remove rows with more than 50% of nans
rnans = data.shape[1] / 2
data = data.dropna(thresh=rnans, axis=0)
# Discretize based on quantiles
data.loc[:, 'duration'] = pd.qcut(data['surveyduration'], 10)
# Discretize based on values
data.loc[:, 'Age'] = pd.cut(data['Age'], 10)
# Rename columns
data.rename(columns={'q1': 'Frequency'}, inplace=True)
# Transform type of columns
data.loc[:, 'Frequency'] = data['Frequency'].astype(int)
# Rename values in rows
drows = {1: 'Manytimes', 2: 'Onetimebyday', 3: '5/6timesforweek',
4: '4timesforweek', 5: '1/3timesforweek', 6: '1timeformonth',
7: '1/trimestre', 8: 'Less', 9: 'Never'}
data.loc[:, 'Frequency'] = data['Frequency'].map(drows)
return data
if __name__ == '__main__':
# For introspections purpose to quickly get this functions on ipython
import adrians_calculations
folder_source, _ = split(adrians_calculations.__file__)
df = pd.read_csv('{}/data/data.csv.gz'.format(folder_source))
clean_data = clean_data(df)
print(' dataframe cleaned')
|
[
"[email protected]"
] | |
f8a701143ff317c117978afa224950eae849bdb4
|
9ff2cf4aa1075b1254269d95e0d0f4538f3c54c9
|
/testarduino3.py
|
bc409a252e1cadf37005c3771426368a2dc3da09
|
[] |
no_license
|
brew-it/dev01
|
d92b469892da1c4ec01ca4378064c29898461e3d
|
170d60efbab2f73fe36b00ef01b46fcb396fafe5
|
refs/heads/master
| 2021-01-12T11:55:15.020789 | 2016-11-04T00:25:12 | 2016-11-04T00:25:12 | 69,316,143 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 696 |
py
|
from nanpy import (ArduinoApi, SerialManager, Servo)
from time import sleep
import robot.device_info
try:
connection = SerialManager()
a = ArduinoApi(connection = connection)
except:
print("Failed to connect to arduino")
for j in range(30,54, 1):
a.digitalWrite(j, a.HIGH)
for i in range(30,54):
a.pinMode(i, a.OUTPUT)
##
##
##for j in range(30,54, 1):
## a.digitalWrite(j, a.LOW)
##
## sleep(1)
oldrelay = 30
while(1):
relay = int(input("Select a relay")) +29
if(relay >= 30 and relay <= 54):
a.digitalWrite(relay, a.LOW)
if(relay != oldrelay):
a.digitalWrite(oldrelay,a.HIGH)
oldrelay = relay
|
[
"[email protected]"
] | |
4dafd2675375326d00071f92b91080bea9677ef3
|
1498148e5d0af365cd7fd16197174174a7fa9800
|
/t001481.py
|
4bca6b1596cd106695153b484bdcabd65c9b8121
|
[] |
no_license
|
feiyanshiren/myAcm
|
59a2b80fe7e02787defcb152eee3eae26135322a
|
00c7082d5143ddf87aeeafbdb6ce29da46dc8a12
|
refs/heads/master
| 2023-09-01T12:12:19.866447 | 2023-09-01T09:09:56 | 2023-09-01T09:09:56 | 148,560,672 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 269 |
py
|
for T in range(int(input())):
b = list(bin(int(input())).replace("0b", ""))
if b[-1] == "1":
for i in range(len(b) - 1, -1, -1):
if b[i] == "1":
b[i] = "0"
else:
break
print("".join(b))
|
[
"[email protected]"
] | |
ad65848ff0c8b91968e53caab17971ad3c7ee9ac
|
8bf1ce907800da0c22f646ffb4272d62accc7253
|
/DRFdemo/wsgi.py
|
a78626f81d9ef59f80f2cada719e1a835300cef6
|
[] |
no_license
|
lx0089/DRF_DEMO
|
c4654d3561fe8f4cd95eaf67a26fdaa097a6a0ef
|
00a8a33a998b1e8daeaec376614afa018cd3704c
|
refs/heads/master
| 2022-04-29T13:46:39.437462 | 2019-08-28T07:25:05 | 2019-08-28T07:25:05 | 204,871,604 | 0 | 0 | null | 2022-04-22T22:15:37 | 2019-08-28T07:17:39 |
Python
|
UTF-8
|
Python
| false | false | 392 |
py
|
"""
WSGI config for DRFdemo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DRFdemo.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
6f3f18539c8923851681793d40f4dcb3f50d3d64
|
60d2212eb2e287a0795d58c7f16165fd5315c441
|
/app01/migrations/0001_initial.py
|
3a9dc3818912587831f59c416cdcc28191857ff6
|
[] |
no_license
|
zhouf1234/mysite2
|
29145ceb470273f39fc11dd91945203db7fe0238
|
63747c789d39cf752f2b80509d8e3db9145b3492
|
refs/heads/master
| 2020-05-05T03:11:31.696639 | 2019-04-05T10:41:42 | 2019-04-05T10:41:42 | 179,663,615 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 445 |
py
|
# Generated by Django 2.1.2 on 2018-11-01 09:05
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=32)),
],
),
]
|
[
"="
] |
=
|
bfd676d773802be6dd45564ee4e0ce7c4f60f0f2
|
963e0002a4324978c2fadc49e056e12151cf882e
|
/2019/11/11.py
|
10b1d7c8994c086553a71b26cfdf0ae62a56c330
|
[] |
no_license
|
dionyziz/advent-of-code
|
5d7706f5e0c7d53569a97a80a2aaff14fca32ae7
|
1752201ae1c57d73c32dea2702ade595d6b1552c
|
refs/heads/master
| 2022-12-26T16:41:23.167521 | 2022-12-13T05:17:12 | 2022-12-13T05:17:12 | 225,867,904 | 10 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,076 |
py
|
from computer import Computer
from collections import defaultdict
computer = Computer()
computer.load_file("11.txt")
color_at = defaultdict(lambda: 0)
def paint(computer, color_at):
modified = set()
direction = 1j
location = 0
execution = computer.execute()
while True:
try:
computer.write_to(color_at[location])
color_at[location] = computer.read_from()
modified.add((int(location.real), int(location.imag)))
turn = computer.read_from()
direction *= 1j
direction *= turn * 2 - 1
location += direction
except StopIteration:
return modified
print(len(paint(computer, color_at)))
color_at = defaultdict(lambda: 0)
color_at[0] = 1
modified = paint(computer, color_at)
xs, ys = zip(*list(modified))
for y in reversed(range(min(ys), max(ys) + 1)):
row = []
for x in reversed(range(min(xs), max(xs) + 1)):
if color_at[x + y * 1j] == 0:
row.append('.')
else:
row.append('#')
print(''.join(row))
|
[
"[email protected]"
] | |
544d404ba3f8a75a5b1fcc8d563c0853c8c6ea7c
|
3bd40415aabba9ba705e8e20387d3521a48004eb
|
/Problem Solving/Data structures/2D Array - DS.py
|
39b1b724e0d98a4c39d2e96089f49ff8e622a409
|
[] |
no_license
|
Narendran36/HackerRank
|
7da6f4ffc8a21031d3776c82e8969ca79eca0b06
|
f58ce1cfaa383ed8aec8ec10467048f6f8465624
|
refs/heads/master
| 2022-12-04T04:25:19.062493 | 2020-08-19T19:13:24 | 2020-08-19T19:13:24 | 256,822,744 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 647 |
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the hourglassSum function below.
def hourglassSum(arr):
h_glass = []
for i in range(0,4):
for j in range(0,4):
h_glass.append([arr[i][j], arr[i][j+1], arr[i][j+2], arr[i+1][j+1], arr[i+2][j], arr[i+2][j+1], arr[i+2][j+2]])
return sum(max(h_glass, key = sum))
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
arr = []
for _ in range(6):
arr.append(list(map(int, input().rstrip().split())))
result = hourglassSum(arr)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"[email protected]"
] | |
1ca80286d0440f54504ffaea5212be6a2a8f9e32
|
5c83a49479d614c9604d09da4aa66ddb4f1ba4af
|
/productoLogic.py
|
adda98d6891e7ddcd5df54c30ce9182ecf7a4766
|
[] |
no_license
|
VanessaFlores26/PopUp_AYUDA
|
397e4a861c01283b9817c283a2e8b37d7727f03d
|
0b3d410f08d9fa594cdce8d79f6063ed6dfd3f9b
|
refs/heads/master
| 2022-12-02T10:33:44.842199 | 2020-08-15T05:54:05 | 2020-08-15T05:54:05 | 287,675,531 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,239 |
py
|
from logic import Logic
from productoObj import productoObj
import os
class productoLogic(Logic):
def __init__(self):
super().__init__()
self.keys = [
"id",
"nombre",
"nombre_foto",
"foto",
"descripcion",
"costo_unitario",
"precio_venta",
"patente",
"id_emprendimiento",
"likes",
]
def insertNewProducto(
self,
name,
foto,
descripcion,
costo_unitario,
precio_venta,
patente,
id_emprendimiento,
):
database = self.get_databaseXObj()
sql = (
"insert into fishingdb.productos (id, nombre, descripcion, costo_unitario, precio_venta, patente, id_emprendimiento) "
+ "values (0, %s, %s, %s, %s, %s, %s);"
)
data = (
name,
descripcion,
costo_unitario,
precio_venta,
patente,
id_emprendimiento,
)
rows = database.executeNonQueryRowsTuple(sql, data)
id_producto = self.getIdProductoByIdEmprendimiento(
id_emprendimiento, descripcion
)
nombre_foto = str(id_producto) + ".png"
sql2 = (
"update fishingdb.productos "
+ "set productos.nombre_foto = %s, productos.foto = %s "
+ "where productos.id = %s;"
)
data2 = (nombre_foto, foto, id_producto)
database.executeNonQueryRowsTuple(sql2, data2)
self.saveImagesProductos(id_producto)
return rows
def insertNewProductoWithoutPhoto(
self,
name,
nombre_foto,
descripcion,
costo_unitario,
precio_venta,
patente,
id_emprendimiento,
):
database = self.get_databaseXObj()
sql = (
"insert into fishingdb.productos (id, nombre, nombre_foto, descripcion, costo_unitario, precio_venta, patente, id_emprendimiento) "
+ "values (0, %s, %s, %s, %s, %s, %s, %s);"
)
data = (
name,
nombre_foto,
descripcion,
costo_unitario,
precio_venta,
patente,
id_emprendimiento,
)
rows = database.executeNonQueryRowsTuple(sql, data)
return rows
def getAllProductosByIdEmprendimiento(self, id_emprendimiento):
dataBase = self.get_databaseXObj()
sql = f"select * from fishingdb.productos where id_emprendimiento = {id_emprendimiento};"
data = dataBase.executeQuery(sql)
data = self.tupleToDictionaryList(data, self.keys)
return data
def saveImagesProductos(self, id_producto):
data = self.getProductoByIdDiccionary(id_producto)
for registro in data:
foto = registro["foto"]
nombre_foto = registro["nombre_foto"]
if nombre_foto != "products.jpg":
path = os.getcwd() + "\\static\\images\\productos\\" + nombre_foto
with open(path, "wb") as file:
file.write(foto)
def deleteProducto(self, id_producto):
database = self.get_databaseXObj()
sql = "delete from fishingdb.productos " + f"where id = {id_producto};"
rows = database.executeNonQueryRows(sql)
return rows
def updateProductoWithoutPhoto(
self, id_producto, name, descripcion, costo_unitario, precio_venta, patente,
):
database = self.get_databaseXObj()
sql = (
"update fishingdb.productos"
+ f" set nombre ='{name}', descripcion='{descripcion}', costo_unitario={costo_unitario}, precio_venta={precio_venta}, patente={patente}"
+ f" where id = {id_producto};"
)
rows = database.executeNonQueryRows(sql)
return rows
def updateProducto(
self,
id_producto,
name,
foto,
descripcion,
costo_unitario,
precio_venta,
patente,
):
database = self.get_databaseXObj()
sql = (
"update fishingdb.productos"
+ " set nombre = %s, foto = %s, descripcion = %s, costo_unitario = %s, precio_venta = %s, patente = %s, nombre_foto = %s"
+ " where id = %s;"
)
data = (
name,
foto,
descripcion,
costo_unitario,
precio_venta,
patente,
str(id_producto) + ".png",
id_producto,
)
rows = database.executeNonQueryRowsTuple(sql, data)
self.saveImagesProductos(id_producto)
return rows
def getProductoById(self, id):
dataBase = self.get_databaseXObj()
sql = "SELECT * FROM fishingdb.productos " + f"where productos.id = {id};"
print(sql)
data = dataBase.executeQuery(sql)
data = self.tupleToDictionaryList(data, self.keys)
if len(data) > 0:
data_dic = data[0]
prodObj = productoObj(
data_dic["id"],
data_dic["nombre"],
data_dic["nombre_foto"],
data_dic["foto"],
data_dic["descripcion"],
data_dic["costo_unitario"],
data_dic["precio_venta"],
data_dic["patente"],
data_dic["id_emprendimiento"],
)
return prodObj
else:
return None
def getIdProductoByIdEmprendimiento(self, id_emprendimiento, descripcion):
dataBase = self.get_databaseXObj()
sql = (
"SELECT productos.id FROM fishingdb.productos "
+ f"where productos.id_emprendimiento = {id_emprendimiento} and productos.descripcion = '{descripcion}';"
)
data = dataBase.executeQuery(sql)
id_producto = data[0][0]
return id_producto
def getProductoByIdDiccionary(self, id):
dataBase = self.get_databaseXObj()
sql = "SELECT * FROM fishingdb.productos " + f"where productos.id = {id};"
print(sql)
data = dataBase.executeQuery(sql)
data = self.tupleToDictionaryList(data, self.keys)
return data
|
[
"[email protected]"
] | |
6409ffff6a083b3c48d050cf0b0da4cd4e24c754
|
98811c0c149c1873c12322f20345dab1488a1870
|
/nnet/hue/split_data.py
|
421bc33d6fd8d8aa179440fa714ee4c730371b24
|
[] |
no_license
|
mverleg/kaggle_otto
|
682d5f83a070b7e88054401e6fba221d8e1b6227
|
b23beb58a1a0652e9eb98f5db31eae52303b6f85
|
refs/heads/main
| 2021-01-17T08:54:39.096781 | 2016-04-12T09:25:26 | 2016-04-12T09:25:26 | 37,781,556 | 0 | 1 | null | 2016-04-12T09:25:27 | 2015-06-20T18:47:45 |
Python
|
UTF-8
|
Python
| false | false | 226 |
py
|
def split_data(data, labels, test_frac = 0.1):
N = int(len(labels) * test_frac)
train = data[N:, :]
test = data[:N, :]
train_labels = labels[N:]
test_labels = labels[:N]
return train, train_labels, test, test_labels
|
[
"mark@rafiki"
] |
mark@rafiki
|
3e305636020fb1fbb5ebe8d62c8f592a38e0586e
|
f980929e94530c8276e24fb5be1b3d5fe2e4bc5a
|
/pages/hovers_page.py
|
7719a3c2464a5b50025ae11a55957af1ea0ebbcc
|
[] |
no_license
|
arokiaanburaj/python_UI_automation
|
ae9a55019b5f00339dce773d2cd95f3efc0ddb85
|
cb57c61aa90c2fcf3651531a3ff298aec08d514d
|
refs/heads/master
| 2020-08-01T12:36:14.254048 | 2019-09-26T06:12:06 | 2019-09-26T06:12:06 | 210,998,779 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,235 |
py
|
"""
@author: Prerna Pal
@email: [email protected]
@date: 20-May-2015
"""
import logging
from selenium.webdriver.common.action_chains import ActionChains
from utility.services import Services
logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.INFO)
class HoversPage:
def __init__(self, driver):
self.driver = driver
self.services = Services(self.driver)
self.header = "Hovers"
self.xpath_heading = "//h3"
self.xpath_image = "//div[@class='figure']"
self.xpath_image_1 = self.xpath_image + "[1]//img"
self.xpath_image_2 = self.xpath_image + "[2]//img"
self.xpath_image_3 = self.xpath_image + "[3]//img"
self.xpath_image_1_caption = self.xpath_image + "[1]//div[@class='figcaption']"
self.xpath_image_2_caption = self.xpath_image + "[2]//div[@class='figcaption']"
self.xpath_image_3_caption = self.xpath_image + "[3]//div[@class='figcaption']"
def verify_hovers_page(self):
"""
This method is to verify Hovers page.
return: instance of Hovers page
rtype: HoversPage instance
"""
logging.info("## Verifying Hovers page ##")
self.services.wait_for_element(self.xpath_heading)
actual_heading = self.services.get_text_by_xpath(self.xpath_heading)
logging.info("# Actual heading on Hovers page: %s" % actual_heading)
assert actual_heading == self.header, "Actual header (%s), should be same as expected header (%s)." % (
actual_heading, self.header)
def verify_hovers_functionality(self):
image_1 = self.driver.find_element_by_xpath(self.xpath_image_1)
image_2 = self.driver.find_element_by_xpath(self.xpath_image_2)
self.services.assert_element_visibility(self.xpath_image_2_caption, False)
action_chain = ActionChains(self.driver)
action_chain.move_to_element(image_1).perform()
from time import sleep
sleep(1)
action_chain.move_to_element(image_2).perform()
from time import sleep
sleep(1)
self.services.is_element_visible(self.xpath_image_2_caption)
|
[
"[email protected]"
] | |
9c79484cbe1d4d5ce3ebb7823945e0a4fe6718b5
|
30d1c90057d9406d5741ea33390dd0058563bb32
|
/Testing/Testing.py
|
5b4d00c0ecc837209c69addec20f6a811a6c48e2
|
[] |
no_license
|
PolodeBliek/OTR---LaTeX
|
da3a2d8c532dda33d95b6b16b5a01c07ff6b870b
|
b6b21ba6cf84abafa3153237feb0df62acf69489
|
refs/heads/master
| 2020-06-08T09:48:47.190402 | 2019-06-22T20:30:05 | 2019-06-22T20:30:05 | 193,208,496 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,280 |
py
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches
import random
import copy
import itertools
from scipy.ndimage import gaussian_filter
from scipy import signal
from skimage import data, io
from skimage import img_as_float, img_as_ubyte
from skimage.morphology import reconstruction
from skimage.color import rgb2gray
from scipy.signal import find_peaks
from skimage.exposure import histogram
import more_itertools as mit
import math
from PIL import Image
import pytesseract
import os
def avg(l):
#Function Used to determine the average of a given list
return (sum(l)/len(l))
def Distance(Number, List):
#Function used to find the distance of a number to the nearest element in that list
ListTemp = copy.deepcopy(List)
ListTemp = [abs(x-Number) for x in ListTemp]
return (min(ListTemp))
def intersection(lst1, lst2):
#Intersection of 2 lists
lst3 = [value for value in lst1 if value in lst2]
return lst3
#Specifying Tesseract location:
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
FileNumber = 7
if FileNumber == 1:
File = 'Example.png'
else:
File = 'Example' + str(FileNumber) + '.png'
print(File)
image = img_as_float(io.imread("C:\\Users\\Polo\\Documents\\GitHub\\OTR---LaTeX\\TestImages\\" + File))
(height,width, _) = image.shape
Table = img_as_float(rgb2gray(image)) #Convert Original Image to Grayscale
Complex = True
Demo = True
if Demo:
pass
#fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2, figsize=(16, 5))
else:
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(8, 2.5))
HorSlices = []
VerSlices = []
EmptyHorSlices = []
EmptyVerSlices = []
AllPixels = []
for index in range(width):
VerSlices.append([])
for index in range(height):
HorSlices.append(Table[index].tolist())
AllPixels = AllPixels + Table[index].tolist()
for index2 in range(width):
VerSlices[index2].append(Table[index, index2])
for index in range(height):
if max(HorSlices[index])-min(HorSlices[index])<0.1:
EmptyHorSlices.append(index)
for index in range(width):
if max(VerSlices[index])-min(VerSlices[index])<0.1:
EmptyVerSlices.append(index)
NegColor = max(AllPixels)
VerSums = [1.0 - (1/height)*sum(x) for x in zip(*HorSlices)]
HorSums = [1.0 - (1/width)*sum(x) for x in zip(*VerSlices)]
PotRowPeakHeight = max(HorSums)*0.5 if max(HorSums)*0.5 > 0.10 else 0.10
PotColPeakHeight = max(VerSums)*0.5 if max(VerSums)*0.5 > 0.10 else 0.10
PotCol, _ = find_peaks(VerSums, height = PotColPeakHeight)
PotRow, _ = find_peaks(HorSums, height = PotRowPeakHeight)
PotCol2 = []
PotRow2 = []
for index in range(len(PotCol)):
PotColPeaks, _ = find_peaks(VerSlices[PotCol[index]], height = min(VerSlices[PotCol[index]]) + (max(VerSlices[PotCol[index]])-min(VerSlices[PotCol[index]]))*0.5)
PotColPeaks = list(set(PotColPeaks)-set(EmptyHorSlices)-set(PotRow))
if len(PotColPeaks) == 0:
PotCol2.append(PotCol[index])
else:
pass
for index in range(len(PotRow)):
PotRowPeaks, _ = find_peaks(HorSlices[PotRow[index]], height = min(HorSlices[PotRow[index]]) + (max(HorSlices[PotRow[index]])-min(HorSlices[PotRow[index]]))*0.5)
PotRowPeaks = list(set(PotRowPeaks)-set(EmptyVerSlices)-set(PotCol))
if len(PotRowPeaks) == 0:
PotRow2.append(PotRow[index])
else:
pass
Columns = PotCol2
Rows = PotRow2
DroppedCols = list(set(PotColPeaks)-set(Columns))
DroppedRows = list(set(PotRowPeaks)-set(Rows))
EmptyHorSlices2 = []
EmptyVerSlices2 = []
if len(Rows) != 0:
for index in range(len(EmptyHorSlices)):
dist = [abs(i-EmptyHorSlices[index]) for i in Rows]
if min(dist) <= 10:
pass
else:
EmptyHorSlices2.append(EmptyHorSlices[index])
if len(Columns) != 0:
for index in range(len(EmptyVerSlices)):
dist = [abs(i-EmptyVerSlices[index]) for i in Columns]
if min(dist) <= 10:
pass
else:
EmptyVerSlices2.append(EmptyVerSlices[index])
NegRows = [list(group) for group in mit.consecutive_groups(EmptyHorSlices2)]
NegCols = [list(group) for group in mit.consecutive_groups(EmptyVerSlices2)]
NegCols = [int(sum(group)/len(group)) for group in NegCols]
NegRows = [int(sum(group)/len(group)) for group in NegRows]
if Complex:
VerAvg = avg(VerSums)
VerSeperators = list(set(Columns + NegCols + [width, 0]))
VerSeperators.sort()
TextColumns= []
VerSumsPieces = [None for x in range(len(VerSeperators))]
for index in range(len(VerSeperators)-1):
if VerSeperators[index+1]- 5 - (VerSeperators[index]+5) >= 1:
VerSumsPieces[index] = VerSums[VerSeperators[index]+5:VerSeperators[index+1]-5]
for element in VerSumsPieces:
if type(element)==list and len(element) != 0:
TextColumns.append(element)
for index in range(len(VerSums)):
if Distance(index, VerSeperators) <= 5:
VerSums[index] = 0
Boundaries = []
for index in range(len(VerSeperators)-1):
Boundaries.append([VerSeperators[index],VerSeperators[index + 1]])
Boundaries2 = []
for index in range(len(Boundaries)):
if max(VerSums[Boundaries[index][0]:Boundaries[index][1]]) >= 0.5*VerAvg:
Boundaries2.append(Boundaries[index])
SymmetryFactors = []
for index in range(len(Boundaries2)):
SymmetryList = []
for index2 in range(Boundaries2[index][1]-Boundaries2[index][0]):
SymmetryList.append(abs(sum(VerSums[Boundaries2[index][0]:index2+Boundaries2[index][0]])/sum(VerSums[Boundaries2[index][0]:Boundaries2[index][1]])-0.5))
MidPoint = SymmetryList.index(min(SymmetryList)) + Boundaries2[index][0]
SymmetryFactor = ((MidPoint-Boundaries2[index][0])/(Boundaries2[index][1]-Boundaries2[index][0])-0.5)
if abs(SymmetryFactor)<=0.10:
SymmetryFactors.append("C")
elif SymmetryFactor<=-0.10:
SymmetryFactors.append("L")
else:
SymmetryFactors.append("R")
HorAvg = avg(HorSums)
HorSeperators = list(set(Rows + NegRows + [height, 0]))
HorSeperators.sort()
TextColumns= []
HorSumsPieces = [None for x in range(len(HorSeperators))]
for index in range(len(HorSeperators)-1):
if HorSeperators[index+1]- 5 - (HorSeperators[index]+5) >= 1:
HorSumsPieces[index] = VerSums[HorSeperators[index]+5:HorSeperators[index+1]-5]
for element in HorSumsPieces:
if type(element)==list and len(element) != 0:
TextColumns.append(element)
for index in range(len(HorSums)):
if Distance(index, HorSeperators) <= 5:
HorSums[index] = 0
HorBoundaries = []
for index in range(len(HorSeperators)-1):
HorBoundaries.append([HorSeperators[index],HorSeperators[index + 1]])
HorBoundaries2 = []
for index in range(len(HorBoundaries)):
if max(HorSums[HorBoundaries[index][0]:HorBoundaries[index][1]]) >= 0.5*HorAvg:
HorBoundaries2.append(HorBoundaries[index])
TextRows = [(group[1]-group[0])/2 + group[0] for group in HorBoundaries2]
TextLocs = [(group[1]-group[0])/2 + group[0] for group in Boundaries2]
HorPieces = Rows + NegRows + TextRows
HorPieces.sort()
for index in range(len(HorPieces)):
if HorPieces[index] in Rows:
HorPieces[index] = "R"
if HorPieces[index] in NegRows:
HorPieces[index] = "N"
if HorPieces[index] in TextRows:
HorPieces[index] = "T"
HorPieces = HorPieces[1:] if HorPieces[0] == "N" else HorPieces
HorPieces = HorPieces[:-1] if HorPieces[-1] == "N" else HorPieces
VerPieces = Columns + NegCols + TextLocs
VerPieces.sort()
for index in range(len(VerPieces)):
if VerPieces[index] in Columns:
VerPieces[index] = "C"
if VerPieces[index] in NegCols:
VerPieces[index] = "N"
if VerPieces[index] in TextLocs:
VerPieces[index] = "T"
VerPieces = VerPieces[1:] if VerPieces[0] == "N" else VerPieces
VerPieces = VerPieces[:-1] if VerPieces[-1] == "N" else VerPieces
Row = [[] for index in range(len(TextLocs))]
Row2 = [[] for index in range(len(TextLocs))]
Markers = []
Locations = []
Text = []
for index in range(len(Row)):
Row[index] = Table[HorBoundaries2[index][0]:HorBoundaries2[index][1],0:width]
HeightTemp = Row[index].shape[0]
Row2[index] = [[] for index3 in range(len(TextLocs))]
for index2 in range(len(Row2[index])):
Row2[index][index2] = Row[index][0:HeightTemp, Boundaries2[index2][0]:Boundaries2[index2][1]]
Location = [index, index2]
Locations.append(Location)
io.imsave("Pic" + str(index) + "_" + str(index2) + ".png", Row2[index][index2])
io.imsave("Text.png", img_as_ubyte(Row2[index][index2]))
TextTemp = pytesseract.image_to_string(Image.open("Text.png"), lang="nld")
Text.append(TextTemp)
Markers.append([[HorBoundaries2[index][0],Boundaries2[index2][1]],[HorBoundaries2[index][1],Boundaries2[index2][0]]])
Locations2 = [str(group[0])+'.'+str(group[1]) for group in Locations]
TableContents = dict(zip(Locations2, Text))
VerFill =[]
for index in range(len(Row2)):
for index2 in range(len(Row2[index])):
HeightTemp, WidthTemp = Row2[index][index2].shape
FillList = []
for index3 in range(WidthTemp):
index4 = 0
Filled = False
while (index4 < HeightTemp) and (not(Filled)):
if Row2[index][index2][index4, index3] != NegColor:
Filled = True
else:
index4 += 1
FillList.append(int(Filled))
VerFill.append(FillList)
for index in range(len(VerFill)):
Sensitivity = int(math.floor(len(VerFill[index])/20))
VerFill[index] = "".join(map(str, VerFill[index]))
keys = []
solution = []
Original = copy.deepcopy(VerFill[index])
for index2 in range(Sensitivity):
keys.append("1" + index2*"0" + "1")
solution.append("1"*(index2 + 2))
VerFill[index] = VerFill[index].replace(keys[index2], solution[index2])
VerFill[index]= list(map(int, list(VerFill[index])))
VerFill2 = copy.deepcopy(VerFill)
for key, group in itertools.groupby(VerFill[index]):
VerFill2[index].append(key)
Seperators = list(set(Rows + NegRows + [width, 0]))
Seperators.sort()
SumsPieces = [None for x in range(len(Seperators))]
for index in range(len(Seperators)-1):
if Seperators[index+1]- 5 - (Seperators[index]+5) >= 1:
SumsPieces[index] = HorSums[Seperators[index]+5:Seperators[index+1]-5]
TextRows = SumsPieces
for index in range(len(VerSums)):
if Distance(index, Seperators) <= 5:
VerSums[index] = 0
os.system("cls")
print(TableContents)
for index in range(0, len(HorBoundaries2)):
for index2 in range(0, len(Boundaries2)):
print(TableContents[str(index)+"."+str(index2)], end = "\t")
print("\n")
if Demo:
pass
# ax0.imshow(Table, vmin=image.min(), vmax=image.max(), cmap='gray')
# for index in range(len(Markers)):
# ax0.Rectangle((Markers[index][0][0], Markers[index][0][1]), Markers[index][1][0]-Markers[0][0], Markers[index][1][1] - Markers[index][0][1])
# for index in range(0, len(Rows)):
# ax0.axhline(Rows[index], color="r")
# for index1 in range(0, len(Columns)):
# ax0.axvline(Columns[index1], color="r")
# for index in range(len(NegRows)):
# ax0.axhline(NegRows[index], color = "blue")
# for index in range(len(NegCols)):
# ax0.axvline(NegCols[index], color = "blue")
# ax0.set_title('Table')
# ax0.axis('off')
#
# ax1.plot(VerFill[0], color = "black", label = "Vertical Fill")
# ax1.set_ylim(-0.5, 1.5)
else:
ax0.plot(HorSums, color = "black", label="Horizontale Som")
# for index in range(len(PotColPeaks)):
# ax0.plot(PotColPeaks[index], VerSums[PotColPeaks[index]], "x", color = "red")
ax0.set_ylim(0, 2)
ax0.set_title('Som')
ax0.set_xticks([])
ax0.legend()
ax1.plot(VerSums, color = "black", label="Verticale Som")
for index in range(len(PotCol)):
ax1.plot(PotCol[index], VerSums[PotCol[index]], "x", color = "r")
ax1.set_ylim(0, 2)
ax1.set_title('Som')
ax1.set_xticks([])
ax1.legend()
ax2.imshow(Table, vmin=image.min(), vmax=image.max(), cmap='gray')
for index in range(0, len(Rows)):
ax2.axhline(Rows[index], color="r")
for index1 in range(0, len(Columns)):
ax2.axvline(Columns[index1], color="r")
for index in range(len(NegRows)):
ax2.axhline(NegRows[index], color = "blue")
for index in range(len(NegCols)):
ax2.axvline(NegCols[index], color = "blue")
ax2.set_title('Table')
ax2.axis('off')
#print("This is a table with Width: " + str(len(Boundaries2)) + " and Height: " + str(len(HorBoundaries2)))
if Demo:
pass
else:
fig.tight_layout()
plt.show()
|
[
"[email protected]"
] | |
5422d855e15010c82c1da5f453b099c5cbb404c1
|
310841013efe61f54347d834d6cecb10f297ec8a
|
/data-science-not/weeks/m05_structdata/p1/time_series_currency.py
|
7d012063a8c33d772ba961fee10678f45f4c1861
|
[
"MIT"
] |
permissive
|
nurseiit/comm-unist
|
ad3e158d41575c2a9b6ffbc9d2e52bc964de0026
|
e7a122c910bf12eddf5c0ffc2c666995b4989408
|
refs/heads/master
| 2021-08-20T04:43:49.939173 | 2020-06-14T12:51:59 | 2020-06-14T12:51:59 | 194,407,107 | 6 | 1 |
MIT
| 2019-10-19T17:42:43 | 2019-06-29T13:10:35 |
C++
|
UTF-8
|
Python
| false | false | 6,950 |
py
|
"""
Today's objective is to manipulate data in a time series and practice with the basics of matplotlib,
while working with data about currency values.
The files GBP2USD.txt and JPY2USD.txt contain historical daily values of GBP (pounds) and JPY (yen) against the US dollar.
For instance, the entry "2017-09-28 112.74" in JPY2USD.txt means that, on sept 28th 2017, it took 112.74 yen to buy 1 US dollar
(or, in other words, that 1 US dollar was worth 112.74)
Example of the plots to be printed as output of these functions are given (see .png files in this package)
file_management_example.py shows how to read data from a file
A complete tutorial about matplotlib: https://www.machinelearningplus.com/plots/matplotlib-tutorial-complete-guide-python-plot-examples/
"""
import matplotlib.pyplot as plt
import datetime as dt
# wealth calculator
def historical_wealth_calculator(amount, currency, year, month, day):
"""
This function calculates the value in US dollars of a certain "amount" (of a given "currency") on a specific date
(identified by the parameters year, month and day)
For instance, historical_wealth_calculator(145, "GBP", "2016", "06", "15") should return the value in USD of 145 GBP
on June 15 2016 (The answer is 394.66...)
Note that, given the data that you are provided, "GBP" and "JPY" are the only possible currencies
(and your program should raise an error if a different currency is requested)
"""
if currency == "GBP":
file = open("GBP2USD.txt", "r")
for line in file:
date = line.split('\t')
if date[0] == year + '-' + month + '-' + day:
return float(date[1])*amount
print("No data for " + year + '-' + month + '-' + day)
elif currency == "JPY":
file = open("JPY2USD.txt", "r")
for line in file:
date = line.split('\t')
if date[0] == year + '-' + month + '-' + day:
return float(date[1]) * amount
print("No data for " + year + '-' + month + '-' + day)
else:
print("Wrong Currency")
def from_string_to_date(dates):
"""
This function is GIVEN and can be used to format date values in your plots
:param dates:
:return:
"""
x = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in dates]
return x
def plot_currency(currency):
"""
This function should plot the value of a given currency ("GBP" or "JPY") on the y axis as a function of
the date on the x axis
In this function, you also need to properly set a title for the plot and labels for each axis
Note: once you have obtained a list of strings with the dates, e.g. ["2017-10-23", "2017-10-22",...], use
the given function from_string_to_date() to convert it into a list of "date" objects that can be
correctly handled by matplotlib
"""
if currency == "GBP":
file = open("GBP2USD.txt", "r")
dates = []
rate = []
for line in file:
line = line.strip()
tmp = line.split('\t')
dates.append(tmp[0])
rate.append(float(tmp[1]))
dts = from_string_to_date(dates)
plt.plot(dts, rate)
plt.show()
elif currency == "JPY":
file = open("JPY2USD.txt", "r")
dates = []
rate = []
for line in file:
line = line.strip()
tmp = line.split('\t')
dates.append(tmp[0])
rate.append(float(tmp[1]))
dts = from_string_to_date(dates)
print(dts)
plt.plot(dts, rate)
plt.show()
else:
print("Wrong currency")
def plot_relative_diff(currency):
"""
This function plots the relative daily increase/decrease of a currency value (JPY and GBP are the only allowed).
For instance, if on day X and day X+1 it takes 1.2 and 1.7 JPY to buy one USD, respectively, then
the y-value of the plot on day X+1 will be (1.7-1.2)/1.2
Note that you will not be able to calculate a value for the first day in which exchange rates are recorded
(because there is no previous value to consider)!
:param currency:
:return:
"""
if currency == "GBP":
file = open("GBP2USD.txt", "r")
dates = []
rate = []
dif = []
for line in file:
line = line.strip()
tmp = line.split('\t')
dates.append(tmp[0])
rate.append(float(tmp[1]))
for i in range(1, len(rate)):
dif.append((rate[i] - rate[i-1])/rate[i-1])
dts = from_string_to_date(dates)
plt.plot(dts[1:], dif)
plt.show()
elif currency == "JPY":
file = open("JPY2USD.txt", "r")
dates = []
rate = []
for line in file:
line = line.strip()
tmp = line.split('\t')
dates.append(tmp[0])
rate.append(float(tmp[1]))
for i in range(len(rate) - 1):
dif.append((rate[i] - rate[i-1]) / rate[i - 1])
dts = from_string_to_date(dates)
plt.plot(dts[1:], dif)
plt.show()
else:
print("Wrong currency")
def plot_currency_bars_fromdate(currency, fromdate):
"""
This function should work as plot_currency(), but:
(i) It plots a "bar chart" instead of a line
(ii) It plots only data from a certain date "fromdate" until the most recent data available
(see gbp_fromdate.png for an example outcome from "2016-01-04")
"""
if currency == "GBP":
file = open("GBP2USD.txt", "r")
dates = []
rate = []
dif = []
pnt = -1
for line in file:
line = line.strip()
tmp = line.split('\t')
dates.append(tmp[0])
rate.append(float(tmp[1]))
if pnt < 0:
if (tmp[0] != fromdate):
pnt -= 1
else:
pnt *= -1
dts = from_string_to_date(dates)
plt.bar(dts[:pnt], rate[:pnt], width=2, alpha=0.5)
plt.show()
elif currency == "JPY":
file = open("JPY2USD.txt", "r")
dates = []
rate = []
pnt = -1
for line in file:
line = line.strip()
tmp = line.split('\t')
dates.append(tmp[0])
rate.append(float(tmp[1]))
if pnt < 0:
if (tmp[0] != fromdate):
pnt -= 1
else:
pnt *= -1
dts = from_string_to_date(dates)
plt.plot(dts[:pnt], rate[:pnt])
plt.bar()
plt.show()
else:
print("Wrong currency")
if __name__ == "__main__":
# Test historical_wealth_calculator
print(historical_wealth_calculator(278, "GBP", "2016", "06", "15"))
# Test plot (see gbp.png)
plot_currency("JPY")
plot_relative_diff("GBP")
# Test plot from date
plot_currency_bars_fromdate("GBP", "2016-01-04")
|
[
"[email protected]"
] | |
b229faa371f95361509e086d0d3d6efdad889824
|
5045a0886fead7262f3e6ca81765e245c181e8fd
|
/reg_lin_hubble.py
|
abb25aa6e899a69f98d5c8a6c5534513a656d610
|
[] |
no_license
|
flo-collab/Reglin-hubble
|
dfaab9d26a9773c5c4b6940908a61bbada9d731b
|
afcf1eff1e3fd1f0a5745d1959808a90ec3d2f84
|
refs/heads/master
| 2023-05-07T22:56:33.393646 | 2021-05-22T21:31:37 | 2021-05-22T21:31:37 | 369,907,588 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,175 |
py
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
# import seaborn as sns
plt.style.use('seaborn-darkgrid')
# On charge le dataset
hubble_df = pd.read_csv('hubble_data.csv')
# Aperçu du dataset
print(hubble_df.head())
print(hubble_df.describe())
print(hubble_df.shape)
#
X = np.ones((len(hubble_df), 2))
X[:, 1] = hubble_df['distance']
y = hubble_df['recession_velocity']
'''
print("voila X :", X, "\n voila y :", y)
print(X.shape, y.shape)
'''
# on divise notre jeu de données en 2 parties
# 80%, pour l’apprentissage et les 20% restant pour le test.
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)
'''
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
print(type(X_train))
'''
# entrainement du modèle
model_regLin = LinearRegression()
model_regLin.fit(X_train, y_train)
# on regarde les resultats : Les coefficients
a = model_regLin.coef_
a = a[1]
b = model_regLin.intercept_
print('Les coefficients trouves sont: \n', 'a =', a, ' et b = ', b)
# Evaluation du training set
y_train_predict = model_regLin.predict(X_train)
rmse = (np.sqrt(mean_squared_error(y_train, y_train_predict)))
r2 = r2_score(y_train, y_train_predict)
# affichage des resultats de performance
print('La performance du modele sur la base de test')
print('--------------------------------------')
print('Lerreur quadratique moyenne est {}'.format(rmse))
print('le score R2 est {}'.format(r2))
plt.title("Relation entre distance et vitesse radiale des nebuleuses extra-galactiques")
plt.xlabel('Distance')
plt.ylabel('vitesse radiale')
# parametrage de l'affichage du nuage de points :
plt.plot(hubble_df['distance'], hubble_df['recession_velocity'],'ro', color = '#FF9933', markersize=7 )
# parametrage de l'affichage de la droite de regression linéaire de 0 à 2 :
plt.plot([0, 2], [ b, b + 2 * a], linestyle='--', c='#d00000' , label="y = {} * x + {}".format(a, b))
plt.legend(loc='lower right')
plt.show()
|
[
"[email protected]"
] | |
0897156b39c9be968a8fa19db785e50da3ab0175
|
a7de0dc33924b1e66c89a1669dca6dbb0c1195a3
|
/examples/str.startswith/ex2.py
|
2d2077dac1757d10002c02fa3b5664ea018ca6cc
|
[
"MIT"
] |
permissive
|
mcorne/python-by-example
|
b6fba25c91d764a600c35bfd53965d9c2d7de014
|
15339c0909c84b51075587a6a66391100971c033
|
refs/heads/master
| 2020-04-23T20:53:58.805326 | 2019-10-16T12:18:20 | 2019-10-16T12:18:20 | 171,453,992 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 48 |
py
|
print('uk.domain.net'.startswith(('fr', 'uk')))
|
[
"[email protected]"
] | |
3aac65dcd3e5246a678308d832621e51ec5e48fb
|
6faf3668a1d5f2bd7a5ef14b005554f43ec84da7
|
/assignment_1/code/modules.py
|
6ff44194318f5e5d43106ab47f7fe8e75d023ddd
|
[] |
no_license
|
maxbos/deep-learning-uva
|
0d1bce7dc9783c25c598070938c5ab3c659a1f83
|
6a0fdfe45c0055282bab794237ed8b51892e4764
|
refs/heads/master
| 2022-01-13T20:26:30.974495 | 2019-05-30T21:51:04 | 2019-05-30T21:51:04 | 182,390,100 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,345 |
py
|
"""
This module implements various modules of the network.
You should fill in code into indicated sections.
"""
import numpy as np
class LinearModule(object):
"""
Linear module. Applies a linear transformation to the input data.
"""
def __init__(self, in_features, out_features):
"""
Initializes the parameters of the module.
Args:
in_features: size of each input sample
out_features: size of each output sample
TODO:
Initialize weights self.params['weight'] using normal distribution with mean = 0 and
std = 0.0001. Initialize biases self.params['bias'] with 0.
Also, initialize gradients with zeros.
"""
self.params = {
'weight': np.random.normal(loc=0, scale=0.0001, size=(out_features, in_features)),
'bias': np.zeros((out_features, 1)),
}
self.grads = {
'weight': np.zeros((out_features, in_features)),
'bias': np.zeros((out_features, 1)),
}
def forward(self, x):
"""
Forward pass.
Args:
x: input to the module
Returns:
out: output of the module
TODO:
Implement forward pass of the module.
Hint: You can store intermediate variables inside the object. They can be used in backward pass computation. #
"""
# Store the input for usage in the backward step
self.x = x
# We transpose the input `x` since it is a matrix of size `n_samples x n_features`
# and we want to calculate the weighted inputs.
out = np.matmul(self.params['weight'], x.T) + self.params['bias']
# We transpose the result back to the size `n_samples x n_features` for future forward
# calls.
return out.T
def backward(self, dout):
"""
Backward pass.
Args:
dout: gradients of the previous module
Returns:
dx: gradients with respect to the input of the module
TODO:
Implement backward pass of the module. Store gradient of the loss with respect to
layer parameters in self.grads['weight'] and self.grads['bias'].
"""
# Calculate the gradients of the current module
dx = np.matmul(dout, self.params['weight'])
# Calculate the gradients w.r.t. to the weights of the current layer
self.grads['weight'] = np.matmul(dout.T, self.x)
# Reshape the `dout` matrix, since this is a matrix of gradients per sample,
# we want to perform a matrix multiplication between one sample and one `ones` vector.
self.grads['bias'] = np.matmul(dout.T, np.ones((dout.shape[0], 1)))
return dx
class ReLUModule(object):
"""
ReLU activation module.
"""
def forward(self, x):
"""
Forward pass.
Args:
x: input to the module
Returns:
out: output of the module
TODO:
Implement forward pass of the module.
Hint: You can store intermediate variables inside the object. They can be used in backward pass computation. #
"""
self.x_mask = x > 0
out = x.clip(min=0)
return out
def backward(self, dout):
"""
Backward pass.
Args:
dout: gradients of the previous modul
Returns:
dx: gradients with respect to the input of the module
TODO:
Implement backward pass of the module.
"""
dx = np.multiply(dout, self.x_mask)
return dx
class SoftMaxModule(object):
"""
Softmax activation module.
"""
def forward(self, x):
"""
Forward pass.
Args:
x: input to the module
Returns:
out: output of the module
TODO:
Implement forward pass of the module.
To stabilize computation you should use the so-called Max Trick - https://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick/
Hint: You can store intermediate variables inside the object. They can be used in backward pass computation. #
"""
# Get the maximum value at each row (each sample)
b = x.max(axis=1)[:, None]
# Use the Exp-normalize trick, so from each value subtract the maximum value
# in its row (same sample) and calculate the exponential
y = np.exp(x - b)
# Sum the exponential values over each row (which is one sample),
# and divide each exponential value by the summation of its row
out = y / y.sum(axis=1)[:, None]
self.n_outputs = x.shape[1]
self.out = out
return out
def backward(self, dout):
"""
Backward pass.
Args:
dout: gradients of the previous modul
Returns:
dx: gradients with respect to the input of the module
TODO:
Implement backward pass of the module.
"""
# Create a 3d-tensor where the first dimension is the number of samples,
# and the value of sample is repeated on the x axis, to create a 2d matrix
# for each sample.
# softmax_grads_3d = np.repeat(self.out[:, :, np.newaxis], self.n_outputs, axis=2)
grads_map = np.dstack([self.out] * self.n_outputs)
eye_matrix = np.identity(self.n_outputs)
softmax_grads = np.einsum('ik, ijk -> ijk', self.out, np.subtract(eye_matrix, grads_map))
dx = np.einsum('ij, ijk -> ik', dout, softmax_grads)
return dx
class CrossEntropyModule(object):
"""
Cross entropy loss module.
"""
def forward(self, x, y):
"""
Forward pass.
Args:
x: input to the module
y: labels of the input
Returns:
out: cross entropy loss
TODO:
Implement forward pass of the module.
"""
# For every neuron in the input that corresponds to the neuron for
# the actual correct target, we calculate the negative log.
# Do this by first performing element-wise multiplication of x and y,
# this yields a matrix with only values at the positions where y = 1.
# Finally, calculate the average loss from all individual losses.
out = (-np.log(np.multiply(x, y).sum(axis=1))).mean()
return out
def backward(self, x, y):
"""
Backward pass.
Args:
x: input to the module
y: labels of the input
Returns:
dx: gradient of the loss with the respect to the input x.
TODO:
Implement backward pass of the module.
"""
dx = -np.divide(y, x)/len(x)
return dx
|
[
"[email protected]"
] | |
fafcbf5da3006c2152cea7f3063f90fb21b14d37
|
31a96d3a1bc7ec1608d17e5f4038f0ffa86a4e68
|
/db.py
|
a8567fea55fef2dc93e2a670804fd4a7e51eb7b9
|
[] |
no_license
|
ammevissen/Udemy_Python_Primer_Heroku_One
|
b01e39e91592db472e786aea3650f1daea1aca20
|
b943ab4b02ef6e743bbb1d6052c29715154be94c
|
refs/heads/main
| 2023-05-03T14:02:07.893514 | 2021-05-21T19:02:54 | 2021-05-21T19:02:54 | 369,330,807 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 69 |
py
|
from flask_sqlalchemy import SQLAlchemy
db=SQLAlchemy() #our ORM
|
[
"[email protected]"
] | |
daaa788ee7e30201727b6f6d4cc3ae438c5fca62
|
42337cdb839982d762667c38727a12b0de82423c
|
/bin/jamf_pro_addon_for_splunk/aob_py2/cloudconnectlib/core/ext.py
|
9773b94a410c8baa1d606f5767410a92c391b689
|
[
"MIT"
] |
permissive
|
jamf/SplunkBase
|
166722f764ca666b0421dd6c895c82d6123be022
|
5c0edaac0bc8b040fda638845f24e39b6c324888
|
refs/heads/master
| 2022-06-25T22:39:04.668321 | 2022-06-09T15:24:40 | 2022-06-09T15:24:40 | 232,366,538 | 5 | 4 |
MIT
| 2022-06-14T14:32:22 | 2020-01-07T16:25:51 |
Python
|
UTF-8
|
Python
| false | false | 12,529 |
py
|
from builtins import str
from builtins import range
import calendar
import json
import re
import traceback
from collections import Iterable
from datetime import datetime
import six
from jsonpath_rw import parse
from .exceptions import FuncException, StopCCEIteration, QuitJobError
from .pipemgr import PipeManager
from ..common import util, log
_logger = log.get_cc_logger()
def regex_search(pattern, source, flags=0):
"""Search substring in source through regex"""
if not isinstance(source, six.string_types):
_logger.warning('Cannot apply regex search on non-string: %s', type(source))
return {}
try:
matches = re.search(pattern=pattern, string=source, flags=flags)
except Exception:
_logger.warning('Unable to search pattern=%s and flags=%s in string, error=%s',
pattern, flags, traceback.format_exc())
return {}
else:
return matches.groupdict() if matches else {}
def regex_match(pattern, source, flags=0):
"""
Determine whether a string is match a regex pattern.
:param pattern: regex pattern
:param source: candidate to match regex
:param flags: flags for regex match
:return: `True` if candidate match pattern else `False`
"""
try:
return re.match(pattern, source, flags) is not None
except Exception:
_logger.warning(
'Unable to match source with pattern=%s, cause=%s',
pattern,
traceback.format_exc()
)
return False
def regex_not_match(pattern, source, flags=0):
"""
Determine whether a string is not match a regex pattern.
:param pattern: regex expression
:param source: candidate to match regex
:param flags: flags for regex match
:return: `True` if candidate not match pattern else `False`
"""
return not regex_match(pattern, source, flags)
def json_path(source, json_path_expr):
""" Extract value from string with JSONPATH expression.
:param json_path_expr: JSONPATH expression
:param source: string to extract value
:return: A `list` contains all values extracted
"""
if not source:
_logger.debug('source to apply JSONPATH is empty, return empty.')
return ''
if isinstance(source, six.string_types):
_logger.debug(
'source expected is a JSON, not %s. Attempt to'
' convert it to JSON',
type(source)
)
try:
source = json.loads(source)
except Exception as ex:
_logger.warning(
'Unable to load JSON from source: %s. '
'Attempt to apply JSONPATH "%s" on source directly.',
ex,
json_path_expr
)
try:
expression = parse(json_path_expr)
results = [match.value for match in expression.find(source)]
_logger.debug(
'Got %s elements extracted with JSONPATH expression "%s"',
len(results), json_path_expr
)
if not results:
return ''
return results[0] or '' if len(results) == 1 else results
except Exception as ex:
_logger.warning(
'Unable to apply JSONPATH expression "%s" on source,'
' message=%s cause=%s',
json_path_expr,
ex,
traceback.format_exc()
)
return ''
def splunk_xml(candidates,
time=None,
index=None,
host=None,
source=None,
sourcetype=None):
""" Wrap a event with splunk xml format.
:param candidates: data used to wrap as event
:param time: timestamp which must be empty or a valid float
:param index: index name for event
:param host: host for event
:param source: source for event
:param sourcetype: sourcetype for event
:return: A wrapped event with splunk xml format.
"""
if not isinstance(candidates, (list, tuple)):
candidates = [candidates]
time = time or None
if time:
try:
time = float(time)
except ValueError:
_logger.warning(
'"time" %s is expected to be a float, set "time" to None',
time
)
time = None
xml_events = util.format_events(
candidates,
time=time,
index=index,
host=host,
source=source,
sourcetype=sourcetype
)
_logger.info(
"[%s] events are formated as splunk stream xml",
len(candidates)
)
return xml_events
def std_output(candidates):
""" Output a string to stdout.
:param candidates: List of string to output to stdout or a single string.
"""
if isinstance(candidates, six.string_types):
candidates = [candidates]
all_str = True
for candidate in candidates:
if all_str and not isinstance(candidate, six.string_types):
all_str = False
_logger.debug(
'The type of data needs to print is "%s" rather than %s',
type(candidate),
str(six.string_types)
)
try:
candidate = json.dumps(candidate)
except:
_logger.exception('The type of data needs to print is "%s"'
' rather than %s',
type(candidate),
str(six.string_types))
if not PipeManager().write_events(candidate):
raise FuncException('Fail to output data to stdout. The event'
' writer is stopped or encountered exception')
_logger.debug('Writing events to stdout finished.')
return True
def _parse_json(source, json_path_expr=None):
if not source:
_logger.debug('Unable to parse JSON from empty source, return empty.')
return {}
if json_path_expr:
_logger.debug(
'Try to extract JSON from source with JSONPATH expression: %s, ',
json_path_expr
)
source = json_path(source, json_path_expr)
elif isinstance(source, six.string_types):
source = json.loads(source)
return source
def json_empty(source, json_path_expr=None):
"""Check whether a JSON is empty, return True only if the JSON to
check is a valid JSON and is empty.
:param json_path_expr: A optional JSONPATH expression
:param source: source to extract JSON
:return: `True` if the result JSON is empty
"""
try:
data = _parse_json(source, json_path_expr)
if isinstance(data, (list, tuple)):
return all(len(ele) == 0 for ele in data)
return len(data) == 0
except Exception as ex:
_logger.warning(
'Unable to determine whether source is json_empty, treat it as '
'not json_empty: %s', ex
)
return False
def json_not_empty(source, json_path_expr=None):
"""Check if a JSON object is not empty, return True only if the
source is a valid JSON object and the value leading by
json_path_expr is empty.
:param json_path_expr: A optional JSONPATH expression
:param source: source to extract JSON
:return: `True` if the result JSON is not empty
"""
try:
data = _parse_json(source, json_path_expr)
if isinstance(data, (list, tuple)):
return any(len(ele) > 0 for ele in data)
return len(data) > 0
except Exception as ex:
_logger.warning(
'Unable to determine whether source is json_not_empty, '
'treat it as not json_not_empty: %s',
ex
)
return False
def set_var(value):
"""Set a variable which name should be specified in `output` with value"""
return value
def _fix_microsecond_format(fmt, micros):
"""
implement %Nf so that user can control the digital number of microsecond.
If number of % is even, don't do replacement.
If N is not in [1-6], don't do replacement.
If time length m is less than N, convert it to 6 digitals and return N
digitals.
"""
micros = str(micros).zfill(6)
def do_replacement(x, micros):
if int(x.group(1)) in range(1, 7) and len(x.group()) % 2:
return x.group().replace('%' + x.group(1) + 'f',
micros[:min(int(x.group(1)), len(micros))])
return x.group()
return re.sub(r'%+([1-6])f', lambda x: do_replacement(x, micros), fmt)
def _fix_timestamp_format(fmt, timestamp):
"""Replace '%s' in time format with timestamp if the number
of '%' before 's' is odd."""
return re.sub(
r'%+s',
(
lambda x:
x.group() if len(x.group()) % 2 else x.group().replace('%s',
timestamp)
),
fmt
)
def time_str2str(date_string, from_format, to_format):
"""Convert a date string with given format to another format. Return
the original date string if it's type is not string or failed to parse or
convert it with format."""
if not isinstance(date_string, six.string_types):
_logger.warning(
'"date_string" must be a string type, found %s,'
' return the original date_string directly.',
type(date_string)
)
return date_string
try:
dt = datetime.strptime(date_string, from_format)
# Need to pre process '%s' in to_format here because '%s' is not
# available on all platforms. Even on supported platforms, the
# result may be different because it depends on implementation on each
# platform. Replace it with UTC timestamp here directly.
if to_format:
timestamp = calendar.timegm(dt.timetuple())
to_format = _fix_timestamp_format(to_format, str(timestamp))
to_format = _fix_microsecond_format(to_format, str(dt.microsecond))
return dt.strftime(to_format)
except Exception:
_logger.warning(
'Unable to convert date_string "%s" from format "%s" to "%s",'
' return the original date_string, cause=%s',
date_string,
from_format,
to_format,
traceback.format_exc()
)
return date_string
def is_true(value):
"""Determine whether value is True"""
return str(value).strip().lower() == 'true'
def exit_if_true(value):
"""Raise a StopCCEIteration exception if value is True"""
if is_true(value):
raise StopCCEIteration
def exit_job_if_true(value):
"""Raise a QuitJob exception if value is True"""
if is_true(value):
raise QuitJobError
def assert_true(value, message=None):
"""Assert value is True"""
if not is_true(value):
raise AssertionError(
message or '"{value}" is not true'.format(value=value)
)
def split_by(source, target, separator=None):
"""Split the source to multiple values by the separator"""
try:
if not source:
return []
elif isinstance(source, six.string_types) and separator:
values = source.split(separator)
return [{target: value.strip()} for value in values]
elif isinstance(source, six.string_types):
return [{target: source}]
elif isinstance(source, Iterable):
return [{target: value} for value in source]
else:
return [{target: source}]
except Exception as ex:
_logger.warning("split_by method encountered exception "
"source=%s message=%s cause=%s", source, ex,
traceback.format_exc())
return []
_extension_functions = {
'assert_true': assert_true,
'exit_if_true': exit_if_true,
'exit_job_if_true': exit_job_if_true,
'is_true': is_true,
'regex_match': regex_match,
'regex_not_match': regex_not_match,
'regex_search': regex_search,
'set_var': set_var,
'splunk_xml': splunk_xml,
'std_output': std_output,
'json_path': json_path,
'json_empty': json_empty,
'json_not_empty': json_not_empty,
'time_str2str': time_str2str,
'split_by': split_by
}
def lookup_method(name):
""" Find a predefined function with given function name.
:param name: function name.
:return: A function with given name.
"""
return _extension_functions.get(name)
|
[
"[email protected]"
] | |
b2818ea2a586e30984accd3b80dcb0ae4df1e15b
|
13b535a8b0a482c7451a3bd318fee414adbbf706
|
/Student/migrations/0007_auto_20201023_0005.py
|
3a0298f826d8cda08365a643508d8df4aa3a5731
|
[] |
no_license
|
sharathcng/Placement_Assistant
|
512856183a4cd0b1a697bcee7cd10187aa60661c
|
635fb2d8dd189f0147c4353cf83ab5794ebd202f
|
refs/heads/master
| 2023-01-31T00:01:26.789835 | 2020-12-10T08:32:21 | 2020-12-10T08:32:21 | 304,419,705 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 589 |
py
|
# Generated by Django 3.0.5 on 2020-10-22 18:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Student', '0006_auto_20201022_2349'),
]
operations = [
migrations.AlterField(
model_name='academic_table',
name='username',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"[email protected]"
] | |
4aa37c206926ab0fb5ae01fe412c1b992fcad8eb
|
8fd5d776531023705cd19388657506bf9447647b
|
/flask/bin/migrate-repository
|
0603fdd30e27343597f68c4b06d8b23f53a1a25b
|
[] |
no_license
|
Nevyn2345/flask_tutorial
|
4f89441f4a0c23334b8bb05dcc4ac50223f77889
|
f256327c2242f8958c535b3e333884ca050b2796
|
refs/heads/master
| 2021-01-20T04:36:05.972933 | 2015-05-04T08:40:12 | 2015-05-04T08:40:12 | 32,762,722 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 198 |
#!/Users/sam/Documents/flask/bin/python
# PBR Generated from u'console_scripts'
import sys
from migrate.versioning.migrate_repository import main
if __name__ == "__main__":
sys.exit(main())
|
[
"[email protected]"
] | ||
133c284bffa840c787391f0ed653b5e7111d752b
|
3a44d57e63d31317329a857ca2d22221f996f560
|
/public_html/pruebaformularios/urls.py
|
f715b0b907c6e68a4fcc0ad032a5434977a8c959
|
[] |
no_license
|
charlierosario/iappweb
|
fdfd8374ea7b249ad72f0a058383233e83f70180
|
b8142afea46aa4049a22370044debea939b224b5
|
refs/heads/master
| 2023-02-28T00:22:25.402091 | 2021-01-25T23:50:04 | 2021-01-25T23:50:04 | 331,477,071 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 290 |
py
|
from django.contrib.auth.decorators import login_required
from django.urls import path
from .views import PersonaList, add_persona
prueba_patterns = ([
path('', add_persona, name='plist'),
path('listar/', login_required(PersonaList.as_view()), name='padd'),
], "inscripcion")
|
[
"[email protected]"
] | |
4b8f8854febf79bf31d89cf22f09b88e462d80be
|
6467e1fbd1c25841d127aa72dddfa51391d7f15b
|
/mydjangoproject/settings.py
|
b3972d903682ea4585589e839f7d48795c381281
|
[] |
no_license
|
mcteach21/Django-Start
|
f64edd0b133d3c95e1e8894f3b5456bc9c6aa77c
|
c65e9d5b45bfaa8e3587028718cbc96db0d28444
|
refs/heads/main
| 2023-06-23T19:31:45.001956 | 2021-07-21T16:26:02 | 2021-07-21T16:26:02 | 388,179,014 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,294 |
py
|
"""
Django settings for mydjangoproject project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-x#irey-=v=fb9^f_^#*y2s9gyd#i7u^q2hd786%nkfmvl!_9-_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'blog.apps.BlogConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mydjangoproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mydjangoproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"[email protected]"
] | |
af9b1e7baf542d634560f2491bfec00ceb007998
|
a3d8e72f87136ee3298629827f00cd8729ce53ed
|
/tests/fs_test.py
|
4c40b77de3381518033956897e9efa8205585a1d
|
[
"MIT"
] |
permissive
|
ecotricity/databricks_test
|
b6f132bd0a46a056e5b4c62aaf91590d0e4c4e01
|
f0560c84bdd0dd3eefa976a1af901927a4f299be
|
refs/heads/master
| 2023-02-10T04:23:47.325585 | 2020-12-08T14:55:11 | 2020-12-08T14:55:11 | 315,333,628 | 0 | 1 |
MIT
| 2021-01-06T13:14:58 | 2020-11-23T14:03:10 |
Python
|
UTF-8
|
Python
| false | false | 167 |
py
|
import databricks_test
def test_fs():
with databricks_test.session() as dbrickstest:
# Run notebook
dbrickstest.run_notebook(".", "fs_notebook")
|
[
"[email protected]"
] | |
76d05e8822d5cec18723335e7abde30589e814c0
|
d4a5824ea46412ad37ed2ce08ccf35be4452afd9
|
/virtual/bin/django-admin.py
|
c71413e86455e5e73d48218ea9b17792bf820c9b
|
[] |
no_license
|
somoire/gram
|
b6c452b61628e4aee93c458a29dc54ba906676a5
|
b02d88470d98c1b9486a238ee63d4f699b237431
|
refs/heads/master
| 2021-09-09T06:21:23.912307 | 2019-05-23T07:49:20 | 2019-05-23T07:49:20 | 187,171,533 | 0 | 0 | null | 2021-09-08T01:00:38 | 2019-05-17T07:49:16 |
Python
|
UTF-8
|
Python
| false | false | 167 |
py
|
#!/home/rodney/Desktop/moringa-core/insta/virtual/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"[email protected]"
] | |
dd01953c26c84e1f5fe62d164e4fe6e71c35e4a2
|
d4d3160eb066e9eacacad530669b5750b82358ee
|
/reactivemqtt/object_receiver.py
|
e9f5ef31a6597eda2434d8a027890f37c72f8c04
|
[
"MIT"
] |
permissive
|
jbarbadillo/reactivemqtt
|
bbffbf3371a9c8131edca41e36925395d3ca9ddf
|
f4f1d09b8d65d7ce9cc62de62f0908c4b4476a3f
|
refs/heads/master
| 2020-04-03T06:45:23.724831 | 2019-02-28T20:55:15 | 2019-02-28T20:55:15 | 155,083,249 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 970 |
py
|
import paho.mqtt.client as mqtt
class ObjectReceiver():
"""
Receives object identifications and position
"""
def __init__(self, observer):
self.observer = observer
client = mqtt.Client()
client.on_connect = self.on_connect
client.message_callback_add("data/position", self.on_position)
client.message_callback_add("data/object", self.on_object)
self.client = client
client.connect("iot.eclipse.org")
client.loop_start()
def on_connect(self, client, userdat, flags, rc):
print("Connected with result code " + str(rc))
self.client.subscribe("data/position", qos=2)
self.client.subscribe("data/object", qos=2)
def on_position(self, client, userdata, msg):
self.observer.on_next([msg.topic, msg.payload.decode("utf-8")])
def on_object(self, client, userdata, msg):
self.observer.on_next([msg.topic, msg.payload.decode("utf-8")])
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.