blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
33d6093618499e7fec08601c9e89c6294b4ab44c | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/materialistic/testcase/firstcases/testcase6_015.py | fd51f2cbcd30c5b02180cf32fadd398fc056fcad | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,621 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'io.github.hidroh.materialistic',
'appActivity' : 'io.github.hidroh.materialistic.LauncherActivity',
'resetKeyboard' : True,
'androidCoverage' : 'io.github.hidroh.materialistic/io.github.hidroh.materialistic.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase015
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"...\")", "new UiSelector().className(\"android.widget.TextView\").instance(8)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"...\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"List display options\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Show as cards\")", "new UiSelector().className(\"android.widget.TextView\").instance(11)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Medium\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Roboto Slab\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Extra small\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Libre Baskerville\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Extra large\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"Medium\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Comments\")", "new UiSelector().className(\"android.widget.TextView\").instance(7)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Readability\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Other\")", "new UiSelector().className(\"android.widget.TextView\").instance(10)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"Other\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Font\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Extra large\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Article\")", "new UiSelector().className(\"android.widget.TextView\").instance(7)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"Readability\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Large\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"6_015\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'io.github.hidroh.materialistic'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"[email protected]"
] | |
1bf10bf9ab943cb001ee4242ad9ccd3c70d30efe | 1514f1680eee6c08eed3d4e438e5e9a18e25a8c5 | /dw_service/core/job/jb_dim_dm.py | b6f47c9a004aad9e5ff5f0da3e7b2eac1de38ecb | [] | no_license | xiongjizhang/hive_dw | 8eaabec8b054534850aba174f8b8cd500a4c7c77 | d217974d4d2f347b9650ce9f1021c7a45809e282 | refs/heads/master | 2020-05-03T01:50:03.331222 | 2019-03-29T07:00:13 | 2019-03-29T07:00:13 | 178,349,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,012 | py | #coding=utf-8
import datetime
from util.base.read_conf import ReadConf
from util.mysql.mysql import MySql
from Crypto.Cipher import AES
import os,sys
import pymysql
import pyhs2
import ConfigParser
from io import BytesIO
import paramiko
import datetime
import logging
import time
class JobDimDm :
def __init__(self, job_info, batch_info):
self.job_info = job_info
self.batch_info = batch_info
self.etl_db_conf = ReadConf('dw_service/conf/etl_db_mysql.conf')
self.mysql_db = MySql(self.etl_db_conf.get_conf())
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
def run(self):
# 获取作业的相关配置信息
run_period = self.batch_info['run_period']
last_runperiod = self.batch_info['last_period']
job_id = self.job_info['id']
source_conn_id = self.job_info['source_conn_id']
check_file_name = self.job_info['source_info1'].replace("${RUNPERIOD}", run_period)
data_file_name = self.job_info['source_info2'].replace("${RUNPERIOD}", run_period)
target_conn_id = self.job_info['target_conn_id']
table_schema = self.job_info['target_info1']
table_name = self.job_info['target_info2']
retry_cnt = self.job_info['retry_cnt']
max_retry_cnt = self.job_info['max_retry_cnt']
target_conn_sql = "select a.* from hive_etl.etl_conf_conn_account a where a.id = " + str(target_conn_id)
self.mysql_db.query(target_conn_sql)
target_conn = self.mysql_db.fetchOneRow()
hive_host = target_conn['host']
hive_port = target_conn['port']
hive_user = target_conn['user_name']
hive_pw = target_conn['password']
hive_authmechanism = target_conn['attachment']
obj = AES.new('This is a key114'.encode('utf-8'), AES.MODE_CBC, 'This is an WZ456'.encode('utf-8'))
hive_pw = obj.decrypt(hive_pw.decode("string-escape")).rstrip('\0')
| [
"[email protected]"
] | |
74cd9576395c92418dfcdc48678e1d0f966022f4 | 1867d4895a597aa1a6725e5ae533a5a1a921d135 | /src/gradiente/gradiente.py | 4e8a435c4c9874bd582520ab5ca22c86e2986e1d | [] | no_license | adas-eye/TCC | e40a9e37e76cb3ef8ad0dcc90fd90a0b73c26317 | 11137a7d5ffa68eb0d0563ca9324ab909fa56140 | refs/heads/master | 2021-09-13T02:11:24.720927 | 2018-04-23T20:59:10 | 2018-04-23T20:59:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,880 | py | import sys
import numpy
import cv2
import math
def loadResource(source, out=None):
# source serve para definir se a fonte sera um arquivo de video ou a webcam
print("--------------Gradiente---------------")
cap = cv2.VideoCapture(source)
descritor_global = None
counter = 0
# if out is not None:
# saida = open(out, "w")
while (cap.isOpened()):
if counter % 24 == 0:
print("Segundo {}".format(counter/24))
counter += 1
# ler frame
ret, frame = cap.read()
if numpy.shape(frame) == ():
break
# transforma em escala de cinza
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# muda o tamanho para 500,500 (pois o algoritmo pede resizing)
gray = cv2.resize(gray, (320,240), fx=0, fy=0)
tamx = int(320 / 4)
tamy = int(240 / 3)
gradientex = getGradient(gray, 1, 0)
gradientey = getGradient(gray, 0, 1)
shape = gradientex.shape
orientacao = numpy.array([ [ math.atan2(gradientex[x,y],gradientey[x,y]) for y in range(shape[1]) ] for x in range(shape[0]) ])
magnitude = numpy.array([ [ math.sqrt(math.pow(gradientex[x,y],2) + math.pow(gradientey[x,y], 2)) for y in range(shape[1]) ] for x in range(shape[0]) ])
# cv2.imshow("gradientex", gradientex)
# cv2.imshow("gradientey", gradientey)
descritor = numpy.array([0.0])
for xquadro in range(3):
for yquadro in range(4):
dividendo = 0
denominador = 0
for x in range(tamx):
for y in range(tamy):
dividendo += magnitude[(xquadro * tamx) + x, (yquadro * tamy) + y] * orientacao[(xquadro * tamx) + x, (yquadro * tamy) + y]
denominador += magnitude[(xquadro * tamx) + x, (yquadro * tamy) + y]
try:
descritor = numpy.append(descritor, dividendo/denominador)
except:
print(dividendo)
print(denominador)
if out is not None:
saida.write(str(descritor))
if descritor_global is None:
descritor_global = numpy.array([descritor])
else:
descritor_global = numpy.append(descritor_global, [descritor], axis=0)
return descritor_global
cap.release()
# if out is not None:
# saida.release()
# cv2.destroyAllWindows()
def getGradient(frame, x=0, y=0):
return cv2.Sobel(frame, cv2.CV_64F, x, y, ksize=3)
def run(video_path, filename=None):
resultados = loadResource(video_path)
if filename is not None:
numpy.savetxt("{}_gradiente.csv".format(filename), resultados, delimiter=",")
return resultados
def main():
run(sys.argv[1], sys.argv[2])
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
50b4f7e014d52b8e64df0af1906cb98206770121 | 98555b88ed29aae3a8042ba6436ee293f60cbbfd | /store/models.py | 9a8214d20a110f7740a7a24af3fa976e8bac21c1 | [] | no_license | Shohruxshoh/ecom | d7585bca684d17c1ef6872fd8c5cf24b34bbb6c9 | 5c90773d8cda0cca9dca0a19043dabbe928acd05 | refs/heads/main | 2023-03-08T23:02:53.449481 | 2021-03-19T12:31:21 | 2021-03-19T12:31:21 | 348,427,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,799 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Customer(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True, blank=True)
name = models.CharField(max_length=200, null=True)
email = models.CharField(max_length=200, null=True)
def __str__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=200, null=True)
price = models.DecimalField(max_digits=7, decimal_places=2)
digital = models.BooleanField(default=False, null=True, blank=False)
image = models.ImageField(null=True, blank=True)
def __str__(self):
return self.name
@property
def imageURL(self):
try:
url = self.image.url
except:
url = ''
return url
class Order(models.Model):
customer = models.ForeignKey(Customer, on_delete=models.SET_NULL, blank=True, null=True)
date_orderd = models.DateTimeField(auto_now_add=True)
complete = models.BooleanField(default=False, null=True, blank=False)
transaction_id = models.CharField(max_length=200, null=True)
def __str__(self):
return str(self.id)
@property
def shipping(self):
shipping = False
orderitem = self.orderitem_set.all()
for i in orderitem:
if i.product.digital == False:
shipping = True
return shipping
@property
def get_cart_total(self):
orderitems = self.orderitem_set.all()
total = sum([item.get_total for item in orderitems])
return total
@property
def get_cart_items(self):
orderitems = self.orderitem_set.all()
total = sum([item.quantity for item in orderitems])
return total
class OrderItem(models.Model):
product = models.ForeignKey(Product, on_delete=models.SET_NULL, blank=True, null=True)
order = models.ForeignKey(Order, on_delete=models.SET_NULL, blank=True, null=True)
quantity = models.IntegerField(default=0, null=True, blank=True)
date_added = models.DateTimeField(auto_now_add=True)
@property
def get_total(self):
total = self.product.price * self.quantity
return total
class ShippingAddress(models.Model):
customer = models.ForeignKey(Customer, on_delete=models.SET_NULL, blank=True, null=True)
order = models.ForeignKey(Order, on_delete=models.SET_NULL, blank=True, null=True)
address = models.CharField(max_length=200, null=True)
city = models.CharField(max_length=200, null=True)
state = models.CharField(max_length=200, null=True)
zipcode = models.CharField(max_length=200, null=True)
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.address | [
"[email protected]"
] | |
7bf5ba484bf135d72ac22cf1e202ed7a7095f9ff | 5bb616be2a0d22e5f2aa79a8587c5d8dc101e872 | /Product/借你花.py | a51ce0a29ea32ad283dd7156b2dd5a168beed344 | [] | no_license | Chihihiro/Backstage | 81f0ca452dc9f05723f4557e842b4f0a2581e999 | 5b4dec28ce2ba15591f85ca4af44b24ceae58989 | refs/heads/master | 2020-04-14T18:57:12.849203 | 2019-01-24T01:15:03 | 2019-01-24T01:15:03 | 164,039,198 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,850 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/1/9 0009 15:12
# @Author : Chihiro
# @Site :
# @File : 借你花.py
# @Software: PyCharm
from selenium import webdriver
from requests import Session
from BaseSpider import BaseSpider
from DealWithCookie import cookie_to_dict
import json
from time import sleep
import time
class BJZ(BaseSpider):
def __init__(self, account):
super(BJZ, self).__init__(account)
def get_info(self):
xpath_info = {
"username": '//*[@id="itemBox"]/div[1]/input',
"password": '//*[@id="itemBox"]/div[2]/input',
"login_button": '//*[@id="main-content"]/div/div/form/div[2]/button',
"check_code": "",
"code_image_url": "",
"success_ele": ""
}
# 设置session
session = Session()
# 获取cookie
cookie = self.no_check_get_cookie(xpath_info)
# 给session设置cookie
session.cookies.update(cookie_to_dict(cookie))
print(cookie_to_dict(cookie))
# json的url
url = f"http://qd.jienihua100.com/Admin/Ditchs/indexAjax.html?start={self.today}&end={self.today}&url={self.channel}&sEcho=3&iColumns=3&sColumns=%2C%2C&iDisplayStart=0&iDisplayLength=10&mDataProp_0=date_text&sSearch_0=&bRegex_0=false&bSearchable_0=true&mDataProp_1=gl_nreg&sSearch_1=&bRegex_1=false&bSearchable_1=true&mDataProp_2=gl_login&sSearch_2=&bRegex_2=false&bSearchable_2=true&sSearch=&bRegex=false&_={str(int(time.time()*1000))}"#{str(int(time.time()*1000))}
# url = f"http://qd.jienihua100.com/Admin/Ditchs/indexAjax.html?start=2019-01-09&end=2019-01-09&url=wyx03&sEcho=13&iColumns=3&sColumns=%2C%2C&iDisplayStart=0&iDisplayLength=10&mDataProp_0=date_text&sSearch_0=&bRegex_0=false&bSearchable_0=true&mDataProp_1=gl_nreg&sSearch_1=&bRegex_1=false&bSearchable_1=true&mDataProp_2=gl_login&sSearch_2=&bRegex_2=false&bSearchable_2=true&sSearch=&bRegex=false&_=1547018390672"
# 设置头部
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36",
}
# 访问url
response = session.get(url, headers=headers)
json_info = response.json()['data'][0]
print(json_info)
result = {
"注册人数": json_info["gl_nreg"],
"实名人数": "null",
"申请人数": "null",
"放款人数": "null",
"备注": ""
}
self.write_sql(result)
SH = {
"login_url": "http://qd.jienihua100.com/Admin/Public/login.html",
"area": "",
"product": "借你花",
"username": "WYX",
"password": "WYX123",
"channel": "wyx03"
}
all_local = [SH]
for each_local in all_local:
spider = BJZ(each_local)
spider.get_info()
| [
"chihiro123"
] | chihiro123 |
dc227d9fa7bde815f238102c1e3140b10c28c18b | 3294208e214250df01df5b039d2bb4ba359e110a | /get_tweets.py | a611cab09f7e0f8eab532997ecd8d568f058a5ee | [] | no_license | TonyNguyen101/suicidecrowdsourcing | 064c0386bc9e53a41d331dd0a75f37c3a469ef0f | 27925d06c8b889a08f611da4ba1e8b88eb6bbf2b | refs/heads/master | 2021-01-18T11:25:08.828504 | 2016-04-20T02:40:28 | 2016-04-20T02:40:28 | 56,111,608 | 0 | 0 | null | 2016-04-13T01:41:10 | 2016-04-13T01:41:10 | null | UTF-8 | Python | false | false | 1,212 | py | from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import json
# consumer_key = 'NOT'
# consumer_secret = 'GETTING'
# access_token_key = 'MY'
# access_token_secret = 'KEYS'
consumer_key = 'viFcnMBuuVEEACYk8uHVJFxGl'
consumer_secret = 'EoDGnIUQ316VbzaMt3nfcvwj4qnhpz8RyWWgyO28LF4HMFbHTu'
access_token_key = '67738036-SM5J8ZMIqXTYkhLfcnrBphP4Y7QbUITZwZ6IWxsF0'
access_token_secret = 'FNaXS2d7JvdY4GLOMXHqWHR7iIMU10GR94GUmafBBClXn'
auth = OAuthHandler(consumer_key, consumer_secret) #OAuth object
auth.set_access_token(access_token_key, access_token_secret)
setTerms = ['kill myself', 'suicide', 'want to die', 'self-harm']
SF_BOX = [121.4900, 37.2200, 122.3100,37.5200]
class StdOutListener(StreamListener):
def on_data(self, data):
fhOut.write(data)
j=json.loads(data)
text=j["text"] #The text of the tweet
print(text) #Print it out
print('\n')
def on_error(self, status):
print("ERROR")
print(status)
if __name__ == '__main__':
try:
fhOut = open("test_tweets.json","a")
l = StdOutListener()
stream = Stream(auth, l,timeout=30)
stream.filter(track=setTerms, locations=SF_BOX)
except KeyboardInterrupt:
pass
fhOut.close()
| [
"[email protected]"
] | |
b53a9728a1f736baf5966da80314d7b3ee00d637 | cc3cceb34d7b03203f7c0c2ef901ae0b576201ea | /app/content.py | d4db2b9a8e60929bb40f914357338085bc298d16 | [
"BSD-2-Clause"
] | permissive | mikelmaron/kibera-school-project | be01a40d71b9a67e9decd7fc6f2116bd521ef0b7 | 825fb863e7198c2ff166dfa678b8abe89ee3a79f | refs/heads/master | 2020-12-26T04:55:22.071111 | 2014-05-15T21:12:18 | 2014-05-15T21:12:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,041 | py | # -*- coding: utf-8 -*-
"""
app.content
~~~~~~~~~~~
Load raw content for generating pages
Content comes from folders in the `content` folder. It is expected that
these subfolders each represent a sort of type of content, with all data
contained being of the same type.
One function should be written for each folder, decorated by
`load(type_name)`. It should return some kind of object representing all
of the content of that type.
That object will be keyed by `type_name` into an importable dict in this
module, called `content`.
At present, all content is loaded at import time.
"""
import os
import re
import json
import codecs
import unicodedata
import datetime
from markdown import markdown, Markdown # quick-use, extensions-able
from flask import Markup
from app import app
content = {}
content_path = os.path.join(app.root_path, app.config['CONTENT_FOLDER'])
meta_converters = {
'noop': lambda x: x,
'iso-date': lambda x: [datetime.date(*map(int, t.split('-'))) for t in x],
'float': lambda x: map(float, x),
'slugify': lambda x: [s.lowercase().replace(' ', '-') for s in x],
'one': lambda x: x[0],
}
def slugify(value):
"""Converts to lowerase, removes non-word characters, spaces to hyphens
Borrowed from django -- http://djangoproject.org -- BSD? licensed
"""
value = unicodedata.normalize('NFKD', value).\
encode('ascii', 'ignore').\
decode('ascii')
value = re.sub('[^\w\s-]', '', value).strip().lower()
value = re.sub('[-\s]+', '-', value)
return value
class MetaError(ValueError):
"""Errors arising from reading metadata"""
def apply_context(self, **context):
contextualized_message = self.message.format(**context)
self.__init__(contextualized_message)
def apply_field_constraints(field_val, required, filters):
if field_val is None:
if required is True:
raise KeyError('The blog {filename} is missing metadata: {field}')
return None
else:
filtered = field_val
for filter_name in reversed(filters):
try:
filtered = meta_converters[filter_name](filtered)
except Exception as e:
raise MetaError('Metadata {{field}} for blog post {{filename}}'
' has issues:\n{}'.format(e))
return filtered
def get_file_pairs(folder_path):
"""Provide (file_object, filename) pairs for all files in a given path.
This is a generator, you can iterate it.
"""
for filename in os.listdir(folder_path):
file_path = os.path.join(folder_path, filename)
try:
with codecs.open(file_path, 'r', encoding='utf-8') as file_object:
yield file_object, filename
except IOError as e:
raise e # todo: handle these nicely and make nice error messages
def load(type_name):
"""Wrap the loading procedure of each folder/type of content.
This decorator handles file I/O and object storage for the decorated
functions.
The wrapped function should take a single parameter which must match the
name of the folder.
Its return value must represent all of the content for that content type.
The decorated function will be called once, and will be passed an iterable
of tuples of `(fileObj, filename)` pairs, one for each file present in its
folder.
"""
assert type_name not in content, 'Content loader collision: {} already '\
'has a loader'.format(type_name)
def loader_decorator(loader_func):
path = os.path.join(content_path, type_name)
assert os.path.isdir(path), '{} does not seem to be a folder in {}'\
.format(type_name, content_path)
file_pairs = get_file_pairs(path)
content[type_name] = loader_func(file_pairs)
return loader_func # probably never used since it's called above
# ...kind of an abuse of decorators, I know...
return loader_decorator
@load('blog')
def load_blog(blogs):
"""Load a blog post and its metadata for each file.
Blog posts are expected to be markdown files with metadata at the top. The
following metadata is required:
* Title
* Date -- use iso format: yyyy-mm-dd)
* Authors -- if more than one, add a line-break and indent for each. eg:
Authors: Llanco Talamantes
Mikel Maron
These metadata fields are optional:
* Modified -- iso format like Data
The filename is used as the URL slug, minus the extension.
Imported blog posts are thrown onto a big list in dictionaries that look
like this:
{
'body': <html string>,
'title': <string>,
'slug': <string>,
'authors': <list of strings>,
'date': <datetime.Date>,
'modified': <datetime.Date or None>,
}
"""
meta = {
'title': (True, ('one',)),
'authors': (True, ('noop',)),
'date': (True, ('one', 'iso-date')),
'modified': (False, ('one', 'iso-date')),
}
markdowner = Markdown(extensions=['meta'], output_format='html5')
posts = []
for blog_file, filename in blogs:
post = {}
html = markdowner.convert(blog_file.read()) # also loads metadata
post['body'] = Markup(html)
post['slug'] = os.path.splitext(filename)[0]
for field, (required, filters) in meta.items():
field_val = markdowner.Meta.get(field)
try:
val = apply_field_constraints(field_val, required, filters)
except MetaError as e:
e.apply_context(filename=filename, field=field)
raise e
post[field] = val
posts.append(post)
markdowner.reset()
posts.sort(key=lambda post: post['date'], reverse=True)
return posts
def validate_school_geo(school_geo, _seen=set()):
"""Validate the geojson data as it comes in."""
assert school_geo['type'] == 'Feature'
assert school_geo['geometry']['type'] == 'MultiPoint'
assert 'properties' in school_geo
properties = school_geo['properties']
assert 'id' in properties
_id = properties['id'].rsplit('/', 1)[1]
assert _id not in _seen
_seen.add(_id)
assert 'name' in properties
school_text_slug = slugify(properties['name'])
school_geo['slug'] = '{}/{}'.format(_id, school_text_slug)
@load('schools')
def load_schools(school_stuff):
"""Load many schools' data from each (and likely only one) geojson file."""
schools = []
seen_slugs = set()
for school_file, filename in school_stuff:
school_data = json.load(school_file)
for school_geojson in school_data['features']:
validate_school_geo(school_geojson)
schools.append(school_geojson)
return schools
| [
"[email protected]"
] | |
d2b13de53846f6b3f71b27072bf88509b5e08225 | 15f14040b01fe7646f128461f4d6afa548ea0a21 | /testprograms/unused_scripts/births_analysis.py | 1fd804c55c0a27a20cc338b96f6cd78390485819 | [] | no_license | emjun/infuser | 32a8ca153062ab4e489fd70469025a236ea6338c | a0691dd6f49db612237561bb005619ad403784ea | refs/heads/master | 2020-05-18T04:07:42.471383 | 2019-06-14T02:31:02 | 2019-06-14T02:31:02 | 184,164,694 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,211 | py | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy.stats import ttest_1samp, normaltest, mannwhitneyu, norm
"""
https://github.com/kontrybutor/tsbd_sem2/blob/7e97b92900d092c2eb67a17167ae9f9df4ae0d0d/JPwAD/ex1/ex2.py
WORKS FINE WITH INFUSER
"""
"WRANGLING"
births_filename = '../data/births.csv'
def load_dataset(filename):
df = pd.read_csv(filename, sep=",")
df = df.drop('index', 1)
return df
def plot_histogram(data):
plt.figure(figsize=(12, 10))
sns.set_style('darkgrid')
mu, std = norm.fit(data)
ax = sns.distplot(data, kde=False, fit=norm)
plt.plot(10000, norm.pdf(10000, mu, std), marker='o', markersize=3, color="red")
ax.set(ylabel='count')
plt.legend(('Gauss approximation', 'Point', 'Births'))
plt.title('Histogram')
plt.xlabel('Births')
plt.ylabel('Amount')
plt.show()
births = load_dataset(births_filename).get('births')
plot_histogram(births)
"ANALYSIS"
def check_normality(data):
stat, p = normaltest(data) # test for normality
alpha = 0.05
print("p-value = {}".format(p))
if p < alpha: # null hypothesis: data comes from a normal distribution
print('The null hypothesis can be rejected')
else:
print('The null hypothesis cannot be rejected')
def test_hypothesis(data, hypothesis):
print("Test t-Studenta:")
alpha = 0.05
stat, p = ttest_1samp(a=data, popmean=hypothesis)
print("p-value = {}".format(p))
if p < alpha:
print('The null hypothesis can be rejected')
else:
print('The null hypothesis cannot be rejected')
print("Test U-Manna-Whitneya:")
# random = np.random.normal(10000, 0.1, 10000)
# stat, p = mannwhitneyu(data, random)
stat, p = mannwhitneyu(data, [hypothesis])
print("p-value = {}".format(p))
if p < alpha:
print('The null hypothesis can be rejected')
else:
print('The null hypothesis cannot be rejected')
print("Null hypothesis: data comes from a normal distribution")
check_normality(births)
print("--------------------------------")
print("Null hypothesis: Average of daily births is 10000")
test_hypothesis(births, 10000)
| [
"[email protected]"
] | |
bb77c5c2e36127f0a6780f264fae6d40e4ed2d37 | a436a3c22deff44179b4d66d1684a588f5bffd97 | /run_experiments.py | 8e339984e28b2becc08d32f6379da54b794a3eec | [
"Apache-2.0"
] | permissive | realityengines/post_hoc_debiasing | 1247d76d164a68aff84163ffd376e4d7e15d5dce | e0efc81e6e317b9c203e79106c3529e159bc3fa8 | refs/heads/master | 2022-11-30T03:44:33.064428 | 2020-07-14T01:22:09 | 2020-07-14T01:22:09 | 272,260,983 | 17 | 4 | Apache-2.0 | 2020-07-14T01:22:10 | 2020-06-14T18:31:59 | Jupyter Notebook | UTF-8 | Python | false | false | 507 | py | import argparse
import os
from pathlib import Path
def main(args):
configs = sorted(Path(args.config_directory).glob('*'))
for config in configs:
command = f"python posthoc.py {config}"
print(command)
os.system(command)
if __name__ == "__main__":
""" This is executed when run from the command line """
parser = argparse.ArgumentParser()
parser.add_argument("config_directory", help="directory with configs.")
args = parser.parse_args()
main(args)
| [
"[email protected]"
] | |
0a6a56b46d6de2843b504bcbd9a582237369aedf | 39a82800092110f4bde0ee702670691ae7c23943 | /models/usercentermodel.py | 2d96f18a792310df764f031493c13406b0acca9c | [] | no_license | stallman-cui/log_analytics | eea50bad10c07626d91558964b8cdd9b2e5108ef | f4446bfd8bf995f8f4de2750489346f598cefa8c | refs/heads/master | 2020-12-30T14:56:31.246330 | 2015-05-26T04:50:19 | 2015-05-26T04:50:19 | 28,953,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | from common.mongo import MongoModel
from configs.config import END_TOPO_SUCCESS
class UserCenterModel(MongoModel):
def get_db(self):
return 'usercenter'
def get_collection(self):
return 'user'
def get_keys(self):
return 'area', 'plat', 'uid'
def get_conf(self):
conf = {
'sub_conf' : ['syncuser', ],
'state' : 'usercenter'
}
return conf
def handle(self, recv_body):
if recv_body:
self.upsert(recv_body)
return END_TOPO_SUCCESS
| [
"[email protected]"
] | |
b26239e537f104c7472d1423aca55a7531c93338 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02381/s775970938.py | dc9fc2d44e446e67afb1c02cdf42c0b7747e41fb | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | #
# 10c
#
import math
def main():
while True:
n = int(input())
if n == 0:
break
s = list(map(int, input().split()))
m = sum(s) / n
aan = 0
for i in range(n):
aan += (s[i] - m)**2
print(f"{math.sqrt(aan/n):.4f}")
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
5994d6fc52dba85ed50e7e547aae183308ef917f | 7694412cbb088f299302648723aabe0522d8cc9a | /cart/cart.py | 2be988412a2674b23b9533d9dafa86ca62d4dd26 | [] | no_license | brave-jang/shopping_mall | 2a60a7e553cd812f7cd87bb4936631394aacc155 | 41705e9a1c313179364ceeea02b99652eae780cb | refs/heads/master | 2023-04-06T00:40:46.306204 | 2021-05-03T16:10:50 | 2021-05-03T16:10:50 | 362,702,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,311 | py | from coupon.models import Coupon
from decimal import Decimal
from django.conf import settings
from shop.models import Product
class Cart(object):
def __init__(self, request):
self.session = request.session
cart = self.session.get(settings.CART_ID)
if not cart:
cart = self.session[settings.CART_ID] = {}
self.cart = cart
self.coupon_id = self.session.get('coupon_id')
def __len__(self):
return sum(item['quantity'] for item in self.cart.values())
def __iter__(self):
product_ids = self.cart.keys()
products = Product.objects.filter(id__in=product_ids)
for product in products:
self.cart[str(product.id)]['product'] = product
for item in self.cart.values():
item['price'] = Decimal(item['price'])
item['total_price'] = item['price'] * item['quantity']
yield item
def add(self, product, quantity=1, is_update=False):
product_id = str(product.id)
if product_id not in self.cart:
self.cart[product_id] = {'quantity':0, 'price':str(product.price)}
if is_update:
self.cart[product_id]['quantity'] = quantity
else:
self.cart[product_id]['quantity'] += quantity
self.save()
def save(self):
self.session[settings.CART_ID] = self.cart
self.session.modified = True
def remove(self, product):
product_id = str(product.id)
if product_id in self.cart:
del(self.cart[product_id])
self.save()
def clear(self):
self.session[settings.CART_ID] = {}
self.session['coupon_id'] = None
self.session.modified = True
def get_product_total(self):
return sum(Decimal(item['price'])*item['quantity'] for item in self.cart.values())
@property
def coupon(self):
if self.coupon_id:
return Coupon.objects.get(id=self.coupon_id)
return None
def get_discount_total(self):
if self.coupon:
if self.get_product_total() >= self.coupon.amount:
return self.coupon.amount
return Decimal(0)
def get_total_price(self):
return self.get_product_total() - self.get_discount_total() | [
"[email protected]"
] | |
edc180f84d2369b1f99536ab30a78308d7c9deac | b657b5bb616c1286795d7c1e6c59df3c31fac43e | /Personality test.py | f5b39441a4e475620d840095b8b816e860525b25 | [] | no_license | Just-in-cider-711/Juco | c54461580958225fe1659fdef73ef561cd26a5a2 | 4a84c7baff9bc1444ebeea4580a0c8a5f9052eb9 | refs/heads/master | 2020-03-21T07:12:48.285495 | 2018-06-22T07:05:19 | 2018-06-22T07:05:19 | 138,267,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,396 | py | import tkinter as tk
import matplotlib
import matplotlib.pyplot as plt
import Vars
from Vars import r, g, b, y, number
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
matplotlib.use('TkAgg')
# from matplotlib.figure import Figure
background_color = "#4ed0fc"
button_color = "#abd897"
# Personality Color counters
# Window Class
labels = ['Red', 'Green', 'Blue', 'Yellow']
sizes = [r, b, g, y]
colors = ["Red", "Green", "Blue", "Yellow"]
explode = (0, 0, 0, 0)
class WindowMaker:
def __init__(self, name, width, height, title, color):
# SELF NAME & Safety Net
self.name = name
if type(name) == str:
pass
elif type(name) != str:
name = str(name)
# SELF width & Safety Net
self.width = width
if type(width) == int:
pass
elif type(width) != int:
width = len(width)
# SELF height & Safety Net
self.height = height
if type(height) == int:
pass
elif type(height) != int:
height = len(height)
# SELF title & Safety Net
self.title = title
if type(title) == str:
pass
elif type(title) != str:
title = str(title)
# SELF Color & Safety Net
self.color = color
name = tk.Tk()
name.title(title)
name.geometry("{0}x{1}+0+0".format(width, height))
name.maxsize(600, 600)
# Applying Background color To Window, Setting up fail Net
try:
name.config(bg=color)
except ValueError:
name.config(bg='white')
print("Color Unknown, Defaulting to white")
# MAIN CODE ________ BELOW _______ BELOW ____________ BELOW __________
def start():
name.destroy()
def qquit():
exit()
start_button = tk.Button(name, text='Start', command=start, bg='green')
quit_button = tk.Button(name, text='Quit', command=qquit, bg='red')
start_button.place(x=width/4, y=height/2.5)
quit_button.place(x=width/2 * 1.1, y = height/2.5)
name.mainloop()
class MainWindowMaker:
def __init__(self, name, width, height, title, color):
# SELF NAME & Safety Net
self.name = name
if type(name) == str:
pass
elif type(name) != str:
name = str(name)
# SELF width & Safety Net
self.width = width
if type(width) == int:
pass
elif type(width) != int:
width = len(width)
# SELF height & Safety Net
self.height = height
if type(height) == int:
pass
elif type(height) != int:
height = len(height)
# SELF title & Safety Net
self.title = title
if type(title) == str:
pass
elif type(title) != str:
title = str(title)
# SELF Color & Safety Net
self.color = color
name = tk.Tk()
name.title(title)
name.geometry("{0}x{1}+0+0".format(width, height))
name.maxsize(width, height)
# Applying Background color To Window, Setting up fail Net
try:
name.config(bg=color)
except ValueError:
name.config(bg='white')
print("Color Unknown, Defaulting to white")
#
#
#
#
#
# MAIN CODE ________ BELOW _______ BELOW ____________ BELOW __________
#
#
#
# SETS UPS AND STUFF FOR QUESTIONS WINDOWS
def question_one():
print("Question one")
title_label.config(text=title+" Question: " + str(number))
red_button.config(text='I am Red')
blue_button.config(text='I Am Blue')
yellow_button.config(text='I am Yellow')
green_button.config(text='I am Green')
question_label.config(text='What Color Are you?')
def question_two():
print("Question one")
title_label.config(text=title+" Question: " + str(number))
red_button.config(text='')
blue_button.config(text='')
yellow_button.config(text='')
green_button.config(text='')
question_label.config(text='')
def question_three():
print("Question one")
title_label.config(text=title+" Question: " + str(number))
red_button.config(text='')
blue_button.config(text='')
yellow_button.config(text='')
green_button.config(text='')
question_label.config(text='')
def question_four():
print("Question one")
title_label.config(text=title+" Question: " + str(number))
red_button.config(text='')
blue_button.config(text='')
yellow_button.config(text='')
green_button.config(text='')
question_label.config(text='')
def question_five():
print("Question one")
title_label.config(text=title+" Question: " + str(number))
red_button.config(text='')
blue_button.config(text='')
yellow_button.config(text='')
green_button.config(text='')
question_label.config(text='')
def question_six():
print("Question one")
title_label.config(text=title+" Question: " + str(number))
red_button.config(text='')
blue_button.config(text='')
yellow_button.config(text='')
green_button.config(text='')
question_label.config(text='')
def question_seven():
print("Question one")
title_label.config(text=title+" Question: " + str(number))
red_button.config(text='')
blue_button.config(text='')
yellow_button.config(text='')
green_button.config(text='')
question_label.config(text='')
def question_eight():
print("Question one")
title_label.config(text=title+" Question: " + str(number))
red_button.config(text='')
blue_button.config(text='')
yellow_button.config(text='')
green_button.config(text='')
question_label.config(text='')
def question_nine():
print("Question one")
title_label.config(text=title+" Question: " + str(number))
red_button.config(text='')
blue_button.config(text='')
yellow_button.config(text='')
green_button.config(text='')
question_label.config(text='')
def question_ten():
print("Question one")
title_label.config(text=title+" Question: " + str(number))
red_button.config(text='')
blue_button.config(text='')
yellow_button.config(text='')
green_button.config(text='')
question_label.config(text='')
# Ticker to keep count of what question were on!
def red():
global r, number
r += 1
number += 1
print(r, number)
if number == 1:
question_one()
elif number == 2:
question_two()
elif number == 3:
question_three()
elif number == 4:
question_four()
elif number == 5:
question_five()
elif number == 6:
question_six()
elif number == 7:
question_seven()
elif number == 8:
question_eight()
elif number == 9:
question_nine()
elif number == 10:
question_ten()
def blue():
global b, number
b += 1
number += 1
print(b, number)
if number == 0:
question_one()
elif number == 2:
question_two()
elif number == 3:
question_three()
elif number == 4:
question_four()
elif number == 5:
question_five()
elif number == 6:
question_six()
elif number == 7:
question_seven()
elif number == 8:
question_eight()
elif number == 9:
question_nine()
elif number == 10:
question_ten()
def green():
global g, number
g += 1
number += 1
print(g, number)
if number == 0:
question_one()
elif number == 2:
question_two()
elif number == 3:
question_three()
elif number == 4:
question_four()
elif number == 5:
question_five()
elif number == 6:
question_six()
elif number == 7:
question_seven()
elif number == 8:
question_eight()
elif number == 9:
question_nine()
elif number == 10:
question_ten()
def yellow():
global y, number
y += 1
number += 1
print(y, number)
if number == 0:
question_one()
elif number == 2:
question_two()
elif number == 3:
question_three()
elif number == 4:
question_four()
elif number == 5:
question_five()
elif number == 6:
question_six()
elif number == 7:
question_seven()
elif number == 8:
question_eight()
elif number == 9:
question_nine()
elif number == 10:
question_ten()
# TKINTER gui's and etc
title_label = tk.Label(name, text=title+" Question: " + str(number),fg='black', bg=background_color)
question_label = tk.Label(name, text='Question', bg=background_color)
red_button = tk.Button(name, text='Red', command=red, bg=button_color)
green_button = tk.Button(name, text='Green', command=green, bg=button_color)
blue_button = tk.Button(name, text='Blue', command=blue, bg=button_color)
yellow_button = tk.Button(name, text='Yellow', command=yellow, bg=button_color)
# TKINTER FRAME for macholib pie chart
dimensions_frame = 2
pie_chart_frame = tk.Frame(name, width=width/dimensions_frame, height=height/dimensions_frame - 20, bg="#d7d3ff")
'''
try:
donut.get_tk_widget().place(x=1000, y=11110)
del donut
except NameError:
pass
'''
plt.pie([r, b, g, y], explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, colors=colors)
# CENTURE CIRCLE
centre_circle = plt.Circle((0, 0), 0.75, color='Black', fc=background_color, linewidth=1.25)
fig = plt.gcf()
fig.gca().add_artist(centre_circle)
fig.set_facecolor(background_color)
donut = FigureCanvasTkAgg(fig, pie_chart_frame)
'''
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
ax1.axis('equal')
pie2 = FigureCanvasTkAgg(fig1, pie_chart_frame)
# plt.tight_layout()
# plt.show()
'''
# PLACING TKINTER GUI's
title_label.place(x=0, y=0)
question_label.place(x=1/width + width/10, y=height/2)
red_button.place(x=1/width, y=height/2 + height/4)
green_button.place(x=1/width + width/4, y=height/2 + height/4)
blue_button.place(x=1/width + width/4 * 2, y=height/2 + height/4)
yellow_button.place(x=1/width + width/4 * 3, y=height/2 + height/4)
pie_chart_frame.place(x=width - width/dimensions_frame, y=0)
donut.get_tk_widget().place(x=0, y=0)
question_one()
# start_window = WindowMaker("Window", 300, 300, "Personality Tester Start Up", "Blue")
main_window = MainWindowMaker("Window", 1200, 1000, "Personality Tester", background_color)
| [
"[email protected]"
] | |
1ea987a0784b10986ba617874e2578c39decc4fe | f65ddf63eab3b983b98bd4ec09d9102e70c53216 | /sparkplug/__init__.py | cd4ee99d51da4e369393165189e180839341f3ae | [
"Apache-2.0"
] | permissive | Quva/sparkplug | 0c8bdac5a64e2180c06fd1f96e4488bf7b9627da | c6ec310ae1f53067fece6e690d7b10c1eb69516e | refs/heads/master | 2021-07-16T15:09:22.940206 | 2020-08-14T14:53:44 | 2020-08-14T14:53:44 | 45,177,401 | 0 | 0 | Apache-2.0 | 2020-08-14T14:53:45 | 2015-10-29T10:53:06 | Python | UTF-8 | Python | false | false | 35 | py |
from .spark_plug import SparkPlug
| [
"[email protected]"
] | |
154fd0e012a5aceb740dd16e4990a57f2ffd39a6 | 3222db8d68d596860d21ca63226b40a1177d7445 | /Exercicios/ex099.py | 482180eb3bf961ccfc91e78bc9e95c4bf774114e | [] | no_license | AyrtonDev/Curso-de-Python | e6c4f00c6b5e5320d382ff8ff45afb13baff1fb4 | d2ae259d21fa81351122053f7f96b07ac0d70bbd | refs/heads/master | 2023-01-31T01:42:07.375896 | 2020-12-09T20:41:38 | 2020-12-09T20:41:38 | 319,462,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | from time import sleep
def maior(lst):
maior = 0
print('-=' * 30)
print('Analisando os valores passados...')
if len(lst) == 0:
print('Foram informados 0 valores ao todo.')
print('O maior valor informado foi 0.')
else:
for c in lst:
print(f'{c}', end=' ')
sleep(0.5)
if maior < c:
maior = c
print(f'Foram informados {len(lst)} valores ao todo.')
sleep(0.5)
print(f'O maior valor informado foi {maior}.')
maior([2, 9, 4, 5, 7, 1])
maior([4, 7, 0])
maior([1, 2])
maior([6])
maior([])
# Solução do Professor
'''def maior(* num):
cont = maior = 0
print('-=' * 30)
print('Analisando os valores passados... ')
for valor in num:
print(f'{valor} ', end='', flush=True)
sleep(0.3)
if cont == 0:
maior = valor
else:
if valor > maior:
maior = valor
cont += 1
print(f'Foram informados {cont} valores ao todo.')
print(f'O maor valor informado foi {maior}.')
maior(2, 9, 4, 5, 7, 1)
maior(4, 7, 0)
maior(1, 2)
maior(6)
maior()''' | [
"[email protected]"
] | |
e1a1eeb1c2e0921e59b2879d10eb41a8acfbd51f | 1186de8fb42e0bdf9f61708311c07a428aabadee | /python-scripts/xml2rdf.py | 47f4a4ac2ca8b9f2e31fac59b10627e827c1302a | [
"Apache-2.0"
] | permissive | zentim/knowledge-graph | adda2c464e405e6f895f13cb080595dfb1b743bf | b687bfefa233f43319b5906b81d81a1aa841893f | refs/heads/master | 2023-02-16T12:18:51.336457 | 2020-06-14T14:52:06 | 2020-06-14T14:52:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,322 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#####################################################################################################
# Data ingestion script for the TBFY Knowledge Graph (https://theybuyforyou.eu/tbfy-knowledge-graph/)
#
# This file contains a script that runs the RML Mapper on XML files and produces N-triples files.
#
# Copyright: SINTEF 2017-2020
# Author : Brian Elvesæter ([email protected])
# License : Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
# Project : Developed as part of the TheyBuyForYou project (https://theybuyforyou.eu/)
# Funding : TheyBuyForYou has received funding from the European Union's Horizon 2020
# research and innovation programme under grant agreement No 780247
#####################################################################################################
import config
import tbfy.statistics
import logging
import requests
import json
import os
import shutil
import sys
import getopt
import time
import datetime
from datetime import datetime
from datetime import timedelta
# **********
# Statistics
# **********
stats_xml2rdf = tbfy.statistics.xml2rdf_statistics_count.copy()
def write_stats(output_folder):
global stats_xml2rdf
if not os.path.exists(output_folder):
os.makedirs(output_folder)
sfile = open(os.path.join(output_folder, 'STATISTICS.TXT'), 'w+')
for key in stats_xml2rdf.keys():
sfile.write(str(key) + " = " + str(stats_xml2rdf[key]) + "\n")
sfile.close()
def reset_stats():
global stats_xml2rdf
stats_xml2rdf = tbfy.statistics.xml2rdf_statistics_count.copy()
# ****************
# Helper functions
# ****************
def is_openopps_json(filename):
if "-release" in str(filename):
return True
else:
return False
def is_opencorporates_json(filename):
if "-supplier" in str(filename):
return True
else:
return False
# *************
# Main function
# *************
def main(argv):
global stats_xml2rdf
logging.basicConfig(level=config.logging["level"])
start_date = ""
end_date = ""
rml_folder = ""
input_folder = ""
output_folder = ""
try:
opts, args = getopt.getopt(argv, "hs:e:r:i:o:")
except getopt.GetoptError:
print("xml2rdf.py -s <start_date> -e <end_date> -r <rml_folder> -i <input_folder> -o <output_folder>")
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print("xml2rdf.py -s <start_date> -e <end_date> -r <rml_folder> -i <input_folder> -o <output_folder>")
sys.exit()
elif opt in ("-s"):
start_date = arg
elif opt in ("-e"):
end_date = arg
elif opt in ("-r"):
rml_folder = arg
elif opt in ("-i"):
input_folder = arg
elif opt in ("-o"):
output_folder = arg
logging.debug("xml2rdf.py: start_date = " + start_date)
logging.debug("xml2rdf.py: end_date = " + end_date)
logging.debug("xml2rdf.py: rml_folder = " + rml_folder)
logging.debug("xml2rdf.py: input_folder = " + input_folder)
logging.debug("xml2rdf.py: output_folder = " + output_folder)
rml_filename = config.rml["rml_filename"]
openopps_mapping_filename = config.rml["openopps_mapping_filename"]
opencorporates_mapping_filename = config.rml["opencorporates_mapping_filename"]
rml_input_filename = config.rml["rml_input_filename"]
rml_output_filename = config.rml["rml_output_filename"]
logging.debug("xml2rdf.py: rml_filename = " + rml_filename)
logging.debug("xml2rdf.py: openopps_mapping_filename = " + openopps_mapping_filename)
logging.debug("xml2rdf.py: opencorporates_mapping_filename = " + opencorporates_mapping_filename)
logging.debug("xml2rdf.py: rml_input_filename = " + rml_input_filename)
logging.debug("xml2rdf.py: rml_output_filename = " + rml_output_filename)
rmlOpenOppsInputFilePath = os.path.join(rml_folder, rml_input_filename)
rmlOpenCorporatesInputFilePath = os.path.join(rml_folder, rml_input_filename)
rmlOutputFilePath = os.path.join(rml_folder, rml_output_filename)
start = datetime.strptime(start_date, "%Y-%m-%d")
stop = datetime.strptime(end_date, "%Y-%m-%d")
while start <= stop:
process_start_time = datetime.now()
created_date = datetime.strftime(start, "%Y-%m-%d")
dirname = created_date
dirPath = os.path.join(input_folder, dirname)
outputDirPath = os.path.join(output_folder, dirname)
if os.path.isdir(dirPath):
if not os.path.exists(outputDirPath):
os.makedirs(outputDirPath)
for filename in os.listdir(dirPath):
filePath = os.path.join(dirPath, filename)
outputFilePath = os.path.join(outputDirPath, str(filename).replace(".xml", ".nt"))
logging.info("xml2rdf.py: file = " + outputFilePath)
rmlInputFilePath = os.path.join(rml_folder, filename)
if is_openopps_json(filename):
release_start_time = datetime.now()
shutil.copy(filePath, rml_folder) # Copy release file to RML folder
shutil.copyfile(rmlInputFilePath, rmlOpenOppsInputFilePath) # Copy/rename relase file to input file for RML
os.chdir(rml_folder)
os.system('java -jar ' + rml_filename + ' -m ' + openopps_mapping_filename + ' -o ' + rml_output_filename)
shutil.copyfile(rmlOutputFilePath, outputFilePath) # Copy output file from RML to output folder
os.remove(rmlInputFilePath) # Remove release file in RML folder
os.remove(rmlOpenOppsInputFilePath) # Remove input file from RML
os.remove(rmlOutputFilePath) # Remove output file from RML
release_end_time = datetime.now()
release_duration_in_seconds = (release_end_time - release_start_time).total_seconds()
tbfy.statistics.update_stats_add(stats_xml2rdf, "release_files_processed_duration_in_seconds", release_duration_in_seconds)
tbfy.statistics.update_stats_count(stats_xml2rdf, "number_of_release_files")
tbfy.statistics.update_stats_count(stats_xml2rdf, "number_of_files")
if is_opencorporates_json(filename):
company_start_time = datetime.now()
shutil.copy(filePath, rml_folder) # Copy company file to RML folder
shutil.copyfile(rmlInputFilePath, rmlOpenCorporatesInputFilePath) # Copy/rename company file to input file for RML Mapper
os.chdir(rml_folder)
os.system('java -jar ' + rml_filename + ' -m ' + opencorporates_mapping_filename + ' -o ' + rml_output_filename)
shutil.copyfile(rmlOutputFilePath, outputFilePath) # Copy output file from RML to output folder
os.remove(rmlInputFilePath) # Remove company file in RML folder
os.remove(rmlOpenCorporatesInputFilePath) # Remove input file from RML
os.remove(rmlOutputFilePath) # Remove output file from RML
company_end_time = datetime.now()
company_duration_in_seconds = (company_end_time - company_start_time).total_seconds()
tbfy.statistics.update_stats_add(stats_xml2rdf, "company_files_processed_duration_in_seconds", company_duration_in_seconds)
tbfy.statistics.update_stats_count(stats_xml2rdf, "number_of_company_files")
tbfy.statistics.update_stats_count(stats_xml2rdf, "number_of_files")
process_end_time = datetime.now()
duration_in_seconds = (process_end_time - process_start_time).total_seconds()
tbfy.statistics.update_stats_value(stats_xml2rdf, "files_processed_duration_in_seconds", duration_in_seconds)
write_stats(outputDirPath) # Write statistics
reset_stats() # Reset statistics for next folder date
start = start + timedelta(days=1) # Increase date by one day
# *****************
# Run main function
# *****************
if __name__ == "__main__": main(sys.argv[1:])
| [
"[email protected]"
] | |
3dd964a79932cf2b519348ade89d3078ffdb6ad5 | 77ad7f88e5e732adb3f5a8ca28f52858395dba36 | /search.py | 1773f465e013687d58fab40487d55b5ecf2de1b1 | [] | no_license | yianjiajia/home_develop | 0ba841be4dab97ea4f48e405166197479884b914 | 794e1385aec3458b35dc6577b48675addba9fab1 | refs/heads/master | 2016-09-05T16:49:05.992504 | 2014-10-30T07:54:41 | 2014-10-30T07:54:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | #!/usr/bin/env python
#coding=utf-8
def search():
'''To search a file whose name have a key word what you want in local dirctory and sub dirctory'''
| [
"[email protected]"
] | |
3cf752b2e0002653fa61d493e6f7133a4e84087c | 3c64f296c9e86830a1146d63ab01fa545fe64e86 | /script/Sumgenev1.py | 5242d150e67fa539493363e04e3e6d2a8f2a6a0b | [] | no_license | bioCKO/PlantPseudo | 7a00f9ebd6b1752ce40d94a2210eebff0ba57bd3 | 5d8e688157902804ec9b75d9b42b3cb7857a86fe | refs/heads/master | 2021-05-24T15:29:31.418926 | 2019-03-25T05:34:59 | 2019-03-25T05:34:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,909 | py | #!/usr/bin/env python
import sys,re
IN=open(sys.argv[1],'r')
OUT=open(sys.argv[2],'w')
OUT1=open("Gene.Classifcation.xls",'w')
#type distance lncRChr lncRstart lncRend Chr start end
#genedist 193 Chr1 780106 780409 Chr1 780602 781285
#genedist 815 Chr1 2816 2497 At1NC000020 Chr1 3631 5899 AT1G01010
p=0
b=0
c=0
f=0 #(head to head)
e=0
d=0
fr=IN.readline()
distant=0
OUT1.write("\t".join(["Classification","Type","distance","lncRChr","lncRstart","lncRend","lnRNA","Chromosome","Start","End","Gene/Pseudogene"])+"\n")
for eachline in IN:
split=eachline.rstrip().split("\t")
la=int(split[3])
lb=int(split[4])
ga=int(split[7])
gb=int(split[8])
if int(split[1])<2000 and split[0]=="genedist":
if (lb<la and gb>ga):
if ga>lb:
p+=1
OUT1.write("%s\t%s\n"%("Promoter associated",eachline.rstrip()))
#print eachline
elif lb>gb:
f+=1
OUT1.write("%s\t%s\n"%("Tail to Tail",eachline.rstrip()))
#print eachline
elif lb<gb:
b+=1
OUT1.write("%s\t%s\n"%("Body associated",eachline.rstrip()))
#print eachline
elif (la<lb and ga>gb):
if ga<la:
p+=1
OUT1.write("%s\t%s\n"%("Promoter associated",eachline.rstrip()))
#print eachline
elif lb>gb:
b+=1
OUT1.write("%s\t%s\n"%("Body associated",eachline.rstrip()))
#print eachline
elif gb>lb:
f+=1
OUT1.write("%s\t%s\n"%("Tail to Tail",eachline.rstrip()))
#print eachline
elif (la<lb and ga<gb):
c+=1
OUT1.write("%s\t%s\n"%("Co-promoter associated",eachline.rstrip()))
#print eachline
else:
c+=1
OUT1.write("%s\t%s\n"%("Co-promoter associated",eachline.rstrip()))
#print eachline
elif int(split[1])>=2000 and split[0]=="genedist":
distant+=1
OUT.write("%s\t%s\n%s\t%s\n%s\t%s\n%s\t%s\n%s\t%s\n"%("promoter",p,"body",b,"Co",c,"f",f,"distant",distant))
IN.close()
OUT.close()
| [
"[email protected]"
] | |
bc3b75fa6e22f063f4237c8ad02fba63e6a5f5b9 | 7cf9af7035aac9fab7ccb50a328c527500245ad1 | /linkedlist/returnk2last.py | 029c850092a63741e38176e905c6e0a4ff1a84a2 | [] | no_license | karthikramesh36/Algorithms | 05ceab3b18d350ac5642a08f66439dba3602c3a1 | b47535ed37c41371792f5a7f6404239eedfa84ef | refs/heads/master | 2021-09-02T10:30:51.394407 | 2018-01-01T22:16:30 | 2018-01-01T22:16:30 | 115,951,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | from linkedlistnode import linkedlist
def return_kth_to_last(ll,k):
p1= ll.head
p2= ll.head
#we dont know the length of ll
for i in range(k):
if p1 is None:
return None
p1=p1.next
while p1.next != None:
p1=p1.next
p2=p2.next
return p2
| [
"[email protected]"
] | |
5a92f6571be961a4c65a3575a55a2e80b20e4677 | c614b694607dc5c3271c29aff2f9f8b8e200e68e | /doc/source/_ext/CppTransDslLexer.py | 37fa0a0ec179973e543ee21f605faa8ea31c3066 | [
"MIT"
] | permissive | gettogetto/trans-dsl-2 | f3bd6e91ddf5578bd9af0084c77537deefa1453a | 49f235166cea5f511c2c41c56abe554e60c60e86 | refs/heads/master | 2023-06-28T19:23:47.773849 | 2021-07-27T08:07:59 | 2021-07-27T08:07:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,227 | py |
from pygments.lexers.c_cpp import CppLexer
from pygments.token import Name, Keyword
from pygments.lexers import get_lexer_by_name # refer LEXERS
from pygments.lexers._mapping import LEXERS
class CppTransDslLexer(CppLexer):
EXTRA_KEYWORDS = set(('__sync', '__asyn', '__sequential', '__concurrent', '__time_guard',
'__transaction', '__fork', '__join', '__optional', '__switch', '__case',
'__otherwise', '__wait', '__peek', '__with_id', '__void', '__safe',
'__procedure', '__throw', '__is_status', '__loop',
'__break_if', '__redo_if', '__while', '__until', '__loop_max',
'__forever', '__multi_thread', '__on_fail', '__on_succ', '__on_status',
'__req', '__rsp',
'__params', '__def', '__as', '__thread_id', '__timer_id', '__recover',
'__as_trans', '__apply', '__with', '__is_failed', '__is_succ', '__not',
'__bind_listener', '__listeners'))
FUNC_KEYWORDS = set(('exec', 'handleEvent', 'kill', 'WAIT_ON', 'onActionStarting', 'onActionEventConsumed'
'onActionDone', 'onActionStopped', 'onActionKilled'))
TYPE_KEYWORDS = set(('ActionThreadId', 'TimerId', 'ActionId', 'Status', 'TransactionInfo', 'Event', 'ObservedActionIdRegistry'))
CONST_KEYWORDS = set(('SUCCESS', 'CONTINUE', 'UNKNOWN_EVENT', 'FAILED',
'DEF_SIMPLE_ASYNC_ACTION'))
def get_tokens_unprocessed(self, text):
for index, token, value in CppLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Keyword, value
elif token is Name and value in self.FUNC_KEYWORDS:
yield index, Name.Function, value
elif token is Name and value in self.TYPE_KEYWORDS:
yield index, Name.Class, value
elif token is Name and value in self.CONST_KEYWORDS:
yield index, Name.Constant, value
else:
yield index, token, value
def setup(app):
app.add_lexer('c++', CppTransDslLexer)
| [
"[email protected]"
] | |
d365c4742c13b5044cdda04b7787d84d8cf15a47 | c161788ed19589700655d476f277d147039e49e3 | /setup.py | 9ab973cc8d993ff863534c3dcaebbd4a59843914 | [
"MIT"
] | permissive | kateshostak/expander-lib | cbd3e9e50a674cac4fc52a8c2ef52f28355c4d3b | c1c7c0bec977fdc6492e4e08a1d1757783462c76 | refs/heads/master | 2021-07-06T08:32:53.675122 | 2017-09-27T12:44:09 | 2017-09-27T12:44:09 | 105,016,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | from setuptools import setup
setup(
name='expandergraphs',
version='1.2',
description='Implementation of several expander construction algorithms',
url='http://github.com/',
author='Kate Shostak',
author_email='[email protected]',
license='MIT',
packages=[
'expanders',
'graphs',
'expansion_constant',
],
zip_safe=False
)
| [
"[email protected]"
] | |
f992459a0c1f31d2551f162596e9cf570b4332b5 | 4c2c1775b6b319ae07155f46e70a6726ab0980c2 | /algo/algo_code/naga-algo/naga_interactive/cvr_space_ecom/calibrate/script/PIDControl_plan.py | 09c52e553f3e94d142a8e90b20226180e9d266a5 | [] | no_license | kiminh/util | 8e4b204849a57941120e37c9330772f03c8892d0 | 763a71031d9c0ef207b87dc03ebc55208a2dd5ad | refs/heads/master | 2022-06-09T06:09:13.221754 | 2020-04-27T04:23:00 | 2020-04-27T04:23:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,627 | py | #coding:utf-8
import json
import sys
import base64
import time
import os
import math
if len(sys.argv) < 3:
print("Usage: python join_transform.py plan_clc_trans version")
exit(1)
plan_clc_trans = {}
for raw in open(sys.argv[1]):
line_json = json.loads(raw.strip("\n\r "))
planid = line_json['planid']
adpcvr = float(line_json['adpcvr'])
pcvr_cal = float(line_json['pcvr_cal'])
plan_click = float(line_json['click'])
plan_trans = float(line_json['trans'])
hour = line_json['hour']
if planid not in plan_clc_trans:
plan_clc_trans[planid] = {}
if hour not in plan_clc_trans[planid]:
plan_clc_trans[planid][hour] = {'click': 0, 'trans': 0, 'adpcvr': 0, 'pcvr_cal': 0}
plan_clc_trans[planid][hour]['click'] = plan_click
plan_clc_trans[planid][hour]['trans'] = plan_trans
plan_clc_trans[planid][hour]['adpcvr'] = adpcvr
plan_clc_trans[planid][hour]['pcvr_cal'] = pcvr_cal
slot_error = {}
global_plan_error = {}
for planid, value in plan_clc_trans.items():
value_sort = sorted(value.items(), key=lambda d: d[0])
slot_error[planid] = []
global_plan_error[planid] = 0
plan_click = 0
plan_trans = 0
plan_pcvr = 0.
plan_pcvr_cal = 0.
for ts, item in value_sort:
plan_click += item['click']
plan_trans += item['trans']
plan_pcvr += item['adpcvr']
plan_pcvr_cal += item['pcvr_cal']
pcvr = item['adpcvr']
pcvr_cal = item['pcvr_cal']
real_trans = item['trans']
if item['click'] < 200 and item['trans'] == 0:
slot_error[planid].append((ts, 0))
continue
if item['click'] < 200 and item['trans'] < 3:
slot_error[planid].append((ts, 0))
continue
if item['click'] > 200 and item['trans'] == 0:
error = 1.0 - (pcvr_cal / (real_trans+1))
slot_error[planid].append((ts, error))
continue
diff = pcvr_cal / (real_trans+0.2)
error = 1.0 - diff
slot_error[planid].append((ts, error))
if plan_click < 200:
global_plan_diff = 0
else:
global_plan_diff = 1.0 - (plan_pcvr_cal / (plan_trans+0.2))
global_plan_error[planid] = {}
global_plan_error[planid]['diff'] = global_plan_diff
global_plan_error[planid]['pcvr'] = plan_pcvr_cal
global_plan_error[planid]['trans'] = plan_trans
global_plan_error[planid]['click'] = plan_click
json.dump(global_plan_error, open('global_plan_error.json', 'w'), indent=4)
lamb_p = 0.04 #error
lamb_d = 0.02
lamb_i = 0.9
cali = {}
cali_exp = {}
for planid, value in slot_error.items():
if len(value) == 0:
cali_exp[planid] = 1
continue
cali[planid] = lamb_p * value[-1][1]
cali[planid] += lamb_i * global_plan_error[planid]['diff']
if len(value) > 1:
cali[planid] += lamb_d * (value[-1][1] - value[-2][1])
cali_exp[planid] = math.exp(cali[planid])
cali_exp[planid] = min(max(cali_exp[planid], 0.05), 1.3)
print "==================================global error=============================="
print global_plan_error
print "======================================cali================================="
print cali
print "===================================cali exp================================"
print cali_exp
print "============================================================================"
out_path='./data/plan_cali_%s.json' % sys.argv[2]
out_path1='./data/plan_result_%s.json' % sys.argv[2]
json.dump(cali_exp, open(out_path, 'w'), indent=4)
json.dump(plan_clc_trans, open(out_path1, 'w'), indent=4)
| [
"[email protected]"
] | |
60bda65835915d9981c013605885efb4ad09febb | 3c78f38c97d9ea0435f61981783af650f78bb155 | /Tests/Acteurs.py | e64e6501234ecfe5b97c1c4ba1774c4a8a11b53b | [] | no_license | sclaoud/RAC | abdabe8955ae2d2aea2198bd54be0cde940c589d | c078e0ba16a9399aaad96983fdd680cc7a89a8fc | refs/heads/master | 2023-06-04T08:28:32.250182 | 2021-06-15T20:13:06 | 2021-06-15T20:13:06 | 331,352,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,658 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Acteurs.ui'
#
# Created by: PyQt5 UI code generator 5.15.3
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Acteurs(object):
def setupUi(self, Acteurs):
Acteurs.setObjectName("Acteurs")
Acteurs.resize(380, 269)
self.btnCloseActeur = QtWidgets.QPushButton(Acteurs)
self.btnCloseActeur.setGeometry(QtCore.QRect(250, 210, 75, 23))
self.btnCloseActeur.setObjectName("btnCloseActeur")
self.btnSaveActeur = QtWidgets.QPushButton(Acteurs)
self.btnSaveActeur.setGeometry(QtCore.QRect(150, 210, 75, 23))
self.btnSaveActeur.setObjectName("btnSaveActeur")
self.btnPrecActeur = QtWidgets.QPushButton(Acteurs)
self.btnPrecActeur.setGeometry(QtCore.QRect(50, 210, 31, 23))
self.btnPrecActeur.setObjectName("btnPrecActeur")
self.btnSvActeur = QtWidgets.QPushButton(Acteurs)
self.btnSvActeur.setGeometry(QtCore.QRect(100, 210, 31, 23))
self.btnSvActeur.setObjectName("btnSvActeur")
self.verticalLayoutWidget_3 = QtWidgets.QWidget(Acteurs)
self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(17, 10, 351, 181))
self.verticalLayoutWidget_3.setObjectName("verticalLayoutWidget_3")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_3)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.lbltitreActeur = QtWidgets.QLabel(self.verticalLayoutWidget_3)
self.lbltitreActeur.setObjectName("lbltitreActeur")
self.horizontalLayout_6.addWidget(self.lbltitreActeur)
self.TitreduFilm = QtWidgets.QLineEdit(self.verticalLayoutWidget_3)
self.TitreduFilm.setObjectName("TitreduFilm")
self.horizontalLayout_6.addWidget(self.TitreduFilm)
self.verticalLayout_3.addLayout(self.horizontalLayout_6)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.lblnomPersActeur = QtWidgets.QLabel(self.verticalLayoutWidget_3)
self.lblnomPersActeur.setObjectName("lblnomPersActeur")
self.horizontalLayout_5.addWidget(self.lblnomPersActeur)
self.NomPers = QtWidgets.QLineEdit(self.verticalLayoutWidget_3)
self.NomPers.setObjectName("NomPers")
self.horizontalLayout_5.addWidget(self.NomPers)
self.verticalLayout_3.addLayout(self.horizontalLayout_5)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.lbldebutempActeur = QtWidgets.QLabel(self.verticalLayoutWidget_3)
self.lbldebutempActeur.setObjectName("lbldebutempActeur")
self.horizontalLayout_4.addWidget(self.lbldebutempActeur)
self.dateDebut = QtWidgets.QDateEdit(self.verticalLayoutWidget_3)
self.dateDebut.setObjectName("dateDebut")
self.horizontalLayout_4.addWidget(self.dateDebut)
self.verticalLayout_3.addLayout(self.horizontalLayout_4)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.lblfinempActeur = QtWidgets.QLabel(self.verticalLayoutWidget_3)
self.lblfinempActeur.setObjectName("lblfinempActeur")
self.horizontalLayout_3.addWidget(self.lblfinempActeur)
self.DateFin = QtWidgets.QDateEdit(self.verticalLayoutWidget_3)
self.DateFin.setObjectName("DateFin")
self.horizontalLayout_3.addWidget(self.DateFin)
self.verticalLayout_3.addLayout(self.horizontalLayout_3)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.lblcachetActeur = QtWidgets.QLabel(self.verticalLayoutWidget_3)
self.lblcachetActeur.setObjectName("lblcachetActeur")
self.horizontalLayout_2.addWidget(self.lblcachetActeur)
self.cachet = QtWidgets.QLineEdit(self.verticalLayoutWidget_3)
self.cachet.setObjectName("cachet")
self.horizontalLayout_2.addWidget(self.cachet)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.retranslateUi(Acteurs)
QtCore.QMetaObject.connectSlotsByName(Acteurs)
def retranslateUi(self, Acteurs):
_translate = QtCore.QCoreApplication.translate
Acteurs.setWindowTitle(_translate("Acteurs", "Liste des personnages d\'un acteur"))
self.btnCloseActeur.setText(_translate("Acteurs", "Fermer"))
self.btnSaveActeur.setText(_translate("Acteurs", "Sauvegarder"))
self.btnPrecActeur.setText(_translate("Acteurs", "<"))
self.btnSvActeur.setText(_translate("Acteurs", ">"))
self.lbltitreActeur.setText(_translate("Acteurs", "Titre du film"))
self.lblnomPersActeur.setText(_translate("Acteurs", "Nom du personnage"))
self.lbldebutempActeur.setText(_translate("Acteurs", "Début d\'emploi"))
self.lblfinempActeur.setText(_translate("Acteurs", "Fin d\'emploi"))
self.lblcachetActeur.setText(_translate("Acteurs", "Cachet"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Acteurs = QtWidgets.QDialog()
ui = Ui_Acteurs()
ui.setupUi(Acteurs)
Acteurs.show()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
203e28811f3eb46a57a36d2f9c680ed5ebe8639c | 894551c32f23d96a3bb085b8bf9ff98a2c513349 | /data_structure/linked_list_singly.py | 56a8fb1bc5f82adf121e6b4be1f24dc0a5e14ded | [] | no_license | amirkhan1092/Oops_python | 8c1c4cf419beb94916f7a519d0e9e85b35484aaf | 5f285d74a03a214f6d0b599116330245d0af9d41 | refs/heads/master | 2020-04-25T12:13:06.438939 | 2019-07-19T05:26:16 | 2019-07-19T05:26:16 | 172,770,746 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,961 | py | class Node:
def __init__(self, data=None, next_node=None):
self.data = data
self.next_node = next_node
def get_data(self):
return self.data
def get_next(self):
return self.next_node
def set_next(self, new_next):
self.next_node = new_next
def __str__(self):
return ('(' + str(self.data) + ')')
# gh = Node(5)
# print(gh, id(gh))
class LinkedList:
def __init__(self, head=None):
self.head = head
def add(self, data):
new_node = Node(data)
new_node.set_next(self.head)
self.head = new_node
def size(self):
current = self.head
count = 0
while current:
count += 1
current = current.get_next()
return count
def find(self, data):
current = self.head
found = False
while current and found is False:
if current.get_data() == data:
found = True
else:
current = current.get_next()
if current is None:
raise ValueError("Data not in list")
return current
def remove(self, data):
current = self.head
previous = None
found = False
while current and found is False:
if current.get_data() == data:
found = True
else:
previous = current
current = current.get_next()
if current is None:
raise ValueError("Data not in list")
if previous is None:
self.head = current.get_next()
else:
previous.set_next(current.get_next())
def print_list(self):
current = self.head
while current is not None:
print(current,end='->')
current = current.get_next()
print('None')
mylist = LinkedList()
mylist.add(5)
mylist.add(8)
mylist.add(6)
mylist.print_list()
print(mylist.size())
| [
"[email protected]"
] | |
cca0a0ab434997892076d73b12f126520f45cf71 | d331587557ba867592786e258270f3f7055e1890 | /Unit 10 Advanced Topics in Python/01 Advanced Topics in Python/List Comprehentions/5-List Comprehension Syntax.py | 38c7fd6e55412281e6dd8b7ffcb796ba81ccf1c4 | [] | no_license | ranjit-manutd/Python3_CodeAcademy | 5813f4b61ee8208e1325850bf2367c501c47d1e7 | c183af42f9158daa7c0249e2ff7506d0997f7c55 | refs/heads/master | 2020-04-24T03:24:38.923752 | 2019-02-20T12:37:15 | 2019-02-20T12:37:15 | 171,669,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | doubles_by_3 = [x*2 for x in range(1,6) if (x*2) % 3 == 0]
# Complete the following line. Use the line above for help.
even_squares = [x**2 for x in range(2,11) if x%2 == 0]
print(even_squares)
| [
"[email protected]"
] | |
e2f7224431878f3297dd2e2c98252bbe54c55e43 | c5eecd89a263efae5d2d0bda04bb25c2d3cfda72 | /HelloWorld/blog/urls.py | 8fd209f3b0fc9a8738f377c356aaa555c3caac56 | [] | no_license | otischan/Web-Project | 6b04a4b452f0800ee083617531cf75df10cbfbc2 | 2144b995b7298fe607b835481073ed3f94d58771 | refs/heads/master | 2022-12-23T04:42:16.746518 | 2022-12-17T14:50:21 | 2022-12-17T15:05:15 | 150,969,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | from django.conf.urls import url
from blog.views import blog, response_date_time, response_user_info_submit
urlpatterns = {
url(r'^date_time/', response_date_time),
url(r'^user_info_submit/', response_user_info_submit)
}
| [
"[email protected]"
] | |
4613553d208cd92a04ba92d9f5c5430d97e4529e | 6b001ecb3ee4d69cbec92a14edb4b3d7a6158581 | /summarization/bertabs/Utility/clean_directories.py | 60a0914dc31421413a86c221b90db78f84123c7a | [
"Apache-2.0"
] | permissive | sebbersk/Surmize | 287f996e48b4e0304eb89261f48562ac88507444 | 4ec3fc1a86b7a43935cd454745187b5612ada467 | refs/heads/master | 2022-12-31T18:02:07.046351 | 2020-10-20T12:13:13 | 2020-10-20T12:13:13 | 264,194,104 | 0 | 0 | Apache-2.0 | 2020-10-20T12:13:14 | 2020-05-15T12:58:40 | null | UTF-8 | Python | false | false | 173 | py | import glob
import os
def clean_directories(Directories):
for dir in Directories:
files = glob.glob(dir + "/*")
for f in files:
os.remove(f)
| [
"[email protected]"
] | |
32a77e736720ddc838833b47fd0cbb182dbb2704 | 0a154beeec7d8f2f1b881698cb482011b051c6fe | /baekjoon/marathon2.py | 4f89159ca7a232f698baad3de9ae469c79191821 | [] | no_license | kimsungho97/Algorithms | 2326270cc4e6e3c1d704b5d00ab327f8ce56f197 | bdbd1e70949830024e2aaff8d8f78fdc720cb438 | refs/heads/main | 2023-08-18T16:58:11.121514 | 2021-10-23T10:55:21 | 2021-10-23T10:55:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | #https://www.acmicpc.net/problem/10653
import math
import itertools
line=input()
n=int(line.split(" ")[0])
k=int(line.split(" ")[1])
points=[]
def gettotal(tmp):
arr=list(tmp)
k=len(arr)
sum=0
for i in range(0,k-1):
x1=arr[i][0]
y1=arr[i][1]
x2=arr[i+1][0]
y2=arr[i+1][1]
length=abs(x1-x2)+abs(y1-y2)
sum+=length
return sum
for i in range(0,n):
line=input()
points.append([int(line.split(" ")[0]),int(line.split(" ")[1])])
tmp=[]
l=len(list(itertools.combinations(points[1:-1],n-k-2)))
total=(gettotal((list([points[0]])+list(list(itertools.combinations(points[1:-1],n-k-2))[0])+list([points[-1]]))))
for i in range(1,l):
t=gettotal((list([points[0]])+list(list(itertools.combinations(points[1:-1],n-k-2))[i])+list([points[-1]])))
if total>t:
total=t
print(total)
| [
"[email protected]"
] | |
4d610651d70a65c82c5240138392e346d9c43333 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_155/683.py | 2a9f05d2e65f8fdf217ad4b89face85a2697eb29 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | import collections
def f7(seq):
seen = set()
seen_add = seen.add
return [ x for x in seq if x not in seen and not seen_add(x)]
f = [i for i in open('A-large.in')]
f[-1] +='\n'
data = [i[:-1] for i in f]
cases = []
for i in range(int(data[0])):
people = data[i+1].split(' ')
print(people)
standing = int(people[1][0])
added = 0
for j in range(1, int(people[0])+1):
print('variant', j, standing)
if j>standing:
added+= j-standing
standing += j-standing
standing += int(people[1][j])
cases.append('Case #'+str(i+1)+': '+str(added))
print(cases)
f1 = open('output.txt','w')
[f1.write(i+'\n') for i in cases] | [
"[email protected]"
] | |
62d3af97ac5caaed3a5faf3a13843e5b5bd328cb | 7278b31ebd6362bebf6986c2f3eca89d87201eb2 | /exp/sandbox/predictors/test/TreeCriterionPyTest.py | 1024a52d21957dae7be17e6fb3ba91771dd5a990 | [] | no_license | malcolmreynolds/APGL | c19827b1b834d3491d98a751c91838177aedc29e | 1703510cbb51ec6df0efe1de850cd48ef7004b00 | refs/heads/master | 2020-12-25T05:52:45.826947 | 2013-03-26T12:30:00 | 2013-03-26T12:30:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,375 | py | import numpy
import unittest
import numpy.testing as nptst
from exp.sandbox.predictors.TreeCriterionPy import findBestSplit2, findBestSplitRand, findBestSplitRisk
from apgl.data.ExamplesGenerator import ExamplesGenerator
class TreeCriterionPyTest(unittest.TestCase):
def setUp(self):
numpy.random.seed(21)
numpy.seterr("raise")
def testFindBestSplit(self):
minSplit = 1
X = numpy.zeros((20, 10))
y = numpy.ones(20)
X[0:10, 2] = numpy.arange(10)
X[10:, 2] = numpy.arange(10)+10
y[0:10] = -1
nodeInds = numpy.arange(X.shape[0])
argsortX = numpy.zeros(X.shape, numpy.int)
for i in range(X.shape[1]):
argsortX[:, i] = numpy.argsort(X[:, i])
argsortX[:, i] = numpy.argsort(argsortX[:, i])
bestError, bestFeatureInd, bestThreshold, bestLeftInds, bestRightInds = findBestSplit2(minSplit, X, y, nodeInds, argsortX)
self.assertEquals(bestError, 0.0)
self.assertEquals(bestFeatureInd, 2)
self.assertEquals(bestThreshold, 9.5)
self.assertTrue((bestLeftInds == numpy.arange(0, 10)).all())
self.assertTrue((bestRightInds == numpy.arange(10, 20)).all())
#Test case where all values are the same
X = numpy.zeros((20, 10))
argsortX = numpy.zeros(X.shape, numpy.int)
for i in range(X.shape[1]):
argsortX[:, i] = numpy.argsort(X[:, i])
argsortX[:, i] = numpy.argsort(argsortX[:, i])
bestError, bestFeatureInd, bestThreshold, bestLeftInds, bestRightInds = findBestSplit2(minSplit, X, y, nodeInds, argsortX)
self.assertTrue(bestLeftInds.shape[0]==0)
self.assertTrue(bestRightInds.shape[0]==X.shape[0])
#Another simple example
X = numpy.random.rand(20, 1)
y = numpy.random.rand(20)
inds = [1, 3, 7, 12, 14, 15]
X[inds, 0] += 10
y[inds] += 1
argsortX = numpy.zeros(X.shape, numpy.int)
for i in range(X.shape[1]):
argsortX[:, i] = numpy.argsort(X[:, i])
argsortX[:, i] = numpy.argsort(argsortX[:, i])
bestError, bestFeatureInd, bestThreshold, bestLeftInds, bestRightInds = findBestSplit2(minSplit, X, y, nodeInds, argsortX)
nptst.assert_array_equal(bestRightInds, numpy.array(inds))
#Test minSplit
minSplit = 10
bestError, bestFeatureInd, bestThreshold, bestLeftInds, bestRightInds = findBestSplit2(minSplit, X, y, nodeInds, argsortX)
self.assertTrue(bestLeftInds.shape[0] >= minSplit)
self.assertTrue(bestRightInds.shape[0] >= minSplit)
#Vary nodeInds
minSplit = 1
nodeInds = numpy.arange(16)
bestError, bestFeatureInd, bestThreshold, bestLeftInds, bestRightInds = findBestSplit2(minSplit, X, y, nodeInds, argsortX)
nptst.assert_array_equal(bestRightInds, numpy.array(inds))
nptst.assert_array_equal(bestLeftInds, numpy.setdiff1d(nodeInds, numpy.array(inds)))
nodeInds = numpy.arange(10)
bestError, bestFeatureInd, bestThreshold, bestLeftInds, bestRightInds = findBestSplit2(minSplit, X, y, nodeInds, argsortX)
nptst.assert_array_equal(bestRightInds, numpy.array([1,3,7]))
nptst.assert_array_equal(bestLeftInds, numpy.setdiff1d(nodeInds, numpy.array([1,3,7])))
@unittest.skip("")
def testFindBestSplit2(self):
minSplit = 1
X = numpy.zeros((20, 10))
y = numpy.ones(20)
X[0:10, 2] = numpy.arange(10)
X[10:, 2] = numpy.arange(10)+10
y[0:10] = -1
bestError, bestFeatureInd, bestThreshold, bestLeftInds, bestRightInds = findBestSplit2(minSplit, X, y)
self.assertEquals(bestError, 0.0)
self.assertEquals(bestFeatureInd, 2)
self.assertEquals(bestThreshold, 9.5)
self.assertTrue((bestLeftInds == numpy.arange(0, 10)).all())
self.assertTrue((bestRightInds == numpy.arange(10, 20)).all())
#Test case where all values are the same
X = numpy.zeros((20, 10))
bestError, bestFeatureInd, bestThreshold, bestLeftInds, bestRightInds = findBestSplit2(minSplit, X, y)
self.assertTrue(bestRightInds.shape[0]==0)
#Another simple example
X = numpy.random.rand(20, 1)
y = numpy.random.rand(20)
inds = [1, 3, 7, 12, 14, 15]
X[inds, 0] += 10
y[inds] += 1
bestError, bestFeatureInd, bestThreshold, bestLeftInds, bestRightInds = findBestSplit2(minSplit, X, y)
for i in range(10):
numExamples = numpy.random.randint(1, 200)
numFeatures = numpy.random.randint(1, 10)
X = numpy.random.rand(numExamples, numFeatures)
y = numpy.random.rand(numExamples)
bestError, bestFeatureInd, bestThreshold, bestLeftInds, bestRightInds = findBestSplit(minSplit, X, y)
bestError2, bestFeatureInd2, bestThreshold2, bestLeftInds2, bestRightInds2 = findBestSplit2(minSplit, X, y)
self.assertEquals(bestFeatureInd, bestFeatureInd2)
self.assertAlmostEquals(bestThreshold, bestThreshold2)
nptst.assert_array_equal(bestLeftInds, bestLeftInds2)
nptst.assert_array_equal(bestRightInds, bestRightInds2)
def testFindBestSplitRand(self):
minSplit = 1
numExamples = 20
numFeatures = 10
X = numpy.zeros((numExamples, numFeatures))
y = numpy.ones(numExamples, numpy.int)
X[0:10, 2] = numpy.arange(10)
X[10:, 2] = numpy.arange(10)+10
y[0:10] = -1
y += 1
nodeInds = numpy.arange(X.shape[0])
argsortX = numpy.zeros(X.shape, numpy.int)
for i in range(X.shape[1]):
argsortX[:, i] = numpy.argsort(X[:, i])
argsortX[:, i] = numpy.argsort(argsortX[:, i])
errors, thresholds = findBestSplitRand(minSplit, X, y, nodeInds, argsortX)
#print(errors, thresholds)
def testFindBestSplitRisk(self):
minSplit = 1
numExamples = 20
numFeatures = 10
X = numpy.zeros((numExamples, numFeatures))
y = numpy.ones(numExamples, numpy.int)
X[0:10, 2] = numpy.arange(10)
X[10:, 2] = numpy.arange(10)+10
y[0:10] = -1
y += 1
nodeInds = numpy.arange(X.shape[0])
argsortX = numpy.zeros(X.shape, numpy.int)
for i in range(X.shape[1]):
argsortX[:, i] = numpy.argsort(X[:, i])
argsortX[:, i] = numpy.argsort(argsortX[:, i])
errors, thresholds = findBestSplitRisk(minSplit, X, y, nodeInds, argsortX)
print(errors, thresholds)
X = numpy.random.rand(numExamples, numFeatures)
errors, thresholds = findBestSplitRisk(minSplit, X, y, nodeInds, argsortX)
print(errors, thresholds)
if __name__ == "__main__":
unittest.main() | [
"[email protected]"
] | |
97e1e4e1847964f9c4778c78bead7e74358da060 | ae0182b61e0e875a2b1bddd58af02f86a580263b | /pwnpattern.py | 3ea02f634c151d6403b9222b79e33ad6fe853130 | [] | no_license | junk13/pwnpattern | cd92507053417339a133385ab052e0468208ad72 | 889dc45c1343657c5e2611682c21bfbcab691684 | refs/heads/master | 2020-04-07T19:29:14.116155 | 2016-05-15T03:21:17 | 2016-05-15T03:21:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,292 | py | """
Copyright 2016 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Description:
Generate patterns for finding offsets, typically used in exploiting memory
corruption vulnerabilities. This is intended to replicate the patterns produced
by metasploit's pattern_create.rb and pattern_offset.rb.
"""
import struct
import sys
_default_sets = [
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
]
_default_length = 1024
_stderr = sys.stderr.write
def pattern_create(length=_default_length, sets=None):
"""Generate a pattern."""
sets = sets or _default_sets
assert len(sets) in (2,3)
output = []
while len(output) < length:
l = len(output)
charset = sets[l % len(sets)]
set_interval = len(sets)
for s in sets[(l % len(sets)) + 1:]:
set_interval *= len(s)
output.append(charset[(l / set_interval) % len(charset)])
return ''.join(output)
def interpret_target(target):
fmts = {4: 'H', 8: 'L', 16: 'Q'}
if target.startswith('0x'):
# This is almost certainly hex
try:
val = int(target, 16)
fmt = '<' + fmts.get(len(target) - 2, 'L')
return struct.pack(fmt, val)
except ValueError:
pass
if len(target) in (8, 16):
# These lengths are commonly hex
try:
val = int(target, 16)
fmt = '<' + fmts.get(len(target), 'L')
return struct.pack(fmt, val)
except ValueError:
pass
return target
def pattern_offset(target, length=_default_length, sets=None):
"""Find the offset for the pattern."""
pattern = pattern_create(length, sets)
target = interpret_target(target)
try:
return pattern.index(target)
except ValueError:
pass
def main(argv):
"""Interactive use."""
binary = argv[0]
args = argv[1:]
if "create" in binary:
mode = "create"
elif "offset" in binary:
mode = "offset"
else:
try:
mode = argv[1]
args = args[1:]
except:
return usage()
if mode == "create":
length = int(args[0]) if args else _default_length
sets = args[1:] if len(args) > 2 else None
sys.stdout.write(pattern_create(length, sets))
return
if mode == "offset":
target = args[0]
length = int(args[1]) if len(args) > 1 else _default_length
sets = args[2:] if len(args) > 3 else None
sys.stdout.write('[*] Match at offset {}\n'.format(
pattern_offset(target, length, sets)))
return
usage()
def usage():
_stderr("{} <mode> [length] [set_a] [set_b] [set_c]\n".format(sys.argv[0]))
if __name__ == "__main__":
main(sys.argv)
| [
"[email protected]"
] | |
3346388ac3eaedb960a765084108296bd9e3ab73 | 8407a0d96a7122ef5c1bed2fae335b5c089ed6bf | /ggpy/cruft/autocode/GdlRenderer_Test.py | 4599b3da05b72ace5bdf88dbcb3333a7cc63b0be | [
"MIT"
] | permissive | hobson/ggpy | d90cdb4f545bab84e09a14cbf7a53c05ec573278 | 4e6e6e876c3a4294cd711647051da2d9c1836b60 | refs/heads/master | 2021-01-22T04:24:18.890860 | 2014-08-17T21:59:24 | 2014-08-17T21:59:24 | 18,973,392 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,033 | py | #!/usr/bin/env python
""" generated source for module GdlRenderer_Test """
# package: org.ggp.base.util.gdl.scrambler
import junit.framework.TestCase
import org.ggp.base.util.game.Game
import org.ggp.base.util.game.GameRepository
import org.ggp.base.util.gdl.grammar.Gdl
#
# * Unit tests for the GdlRenderer class, which provides a way
# * to render Gdl objects as Strings.
# *
# * @author Sam
#
class GdlRenderer_Test(TestCase):
""" generated source for class GdlRenderer_Test """
#
# * One important property for GdlRenderer is that it should generate
# * an identical rendering as if you had called the toString() method
# * on a Gdl object.
#
def testSimpleRendering(self):
""" generated source for method testSimpleRendering """
renderer = GdlRenderer()
repo = GameRepository.getDefaultRepository()
for gameKey in repo.getGameKeys():
for rule in game.getRules():
assertEquals(rule.__str__(), renderer.renderGdl(rule))
| [
"[email protected]"
] | |
57b951f3bc1c9049ca75ce3ea838e7c3c7f7b079 | 781e2692049e87a4256320c76e82a19be257a05d | /assignments/python/wc/src/761.py | a937fdbda8199778905f4e110d9ebe0db0ec8267 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 246 | py | import pdb
def word_count(word):
word_list = word.split()
word_dict = {}
return_string = ""
for words in word_list:
word_count = word_list.count(words)
if not word_dict.has_key(words):
word_dict[words] = (word_count)
return word_dict
| [
"[email protected]"
] | |
f2d83b02f04d91fd4dbf2257e53a179bd87bca8b | 1c64b8fef53b0ab8327fb8feae866c40c1e97fad | /examples/unit_tests/mock_examples/mock_urandom_2.py | 5e8380d1e75ecc20bdfc548377d40bf7c536931a | [] | no_license | idobleicher/pythonexamples | c9df5ee608dce248d18c254f379240a5a96f5801 | 3c2694f45f7937161a45d47cc20859b5944d1c36 | refs/heads/master | 2023-07-05T22:04:52.228188 | 2019-11-04T12:28:39 | 2019-11-04T12:28:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | import unittest
from os import urandom
from unittest import mock
#if we imported the urandom function using a from statement (from os import urandom) is a special case where you can use __main__ to mock the function:
def simple_urandom(length):
return 'f' * length
class TestRandom(unittest.TestCase):
@mock.patch('__main__.urandom', side_effect=simple_urandom)
def test_urandom(self, urandom_function):
assert urandom(5) == 'fffff' | [
"[email protected]"
] | |
f268b7597b4c355ea7aba0157cd56d33ace9614a | d3f3fcce645f827fd0247e35d30ed5c2516a9568 | /Assignments/Asg5/models/tables.py | 6df4e348707620b3325227f4fe5f0adf628a5196 | [
"LicenseRef-scancode-public-domain"
] | permissive | Jvrionis/cmps183 | 757b11b6bcb033d07974321ac2ba825b74c5f9b5 | c18e985785808f7dc2f6a57b5c4156d0bf6d6d15 | refs/heads/master | 2020-03-18T07:32:42.368953 | 2018-06-25T20:48:26 | 2018-06-25T20:48:26 | 134,459,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | # Define your tables below (or better in another model file) for example
#
# >>> db.define_table('mytable', Field('myfield', 'string'))
#
# Fields can be 'string','text','password','integer','double','boolean'
# 'date','time','datetime','blob','upload', 'reference TABLENAME'
# There is an implicit 'id integer autoincrement' field
# Consult manual for more options, validators, etc.
import datetime
def get_user_email():
return auth.user.email if auth.user else None
db.define_table('user_images',
Field('created_on','datetime', default=request.now),
Field('created_by', 'reference auth_user', default=auth.user_id),
Field('image_url'),
Field('price', 'float')
)
db.user_images.created_on.readable = db.user_images.created_on.writable = False
db.user_images.created_by.readable = db.user_images.created_by.writable = False
# after defining tables, uncomment below to enable auditing
#auth.enable_record_versioning(db)
| [
"[email protected]"
] | |
4bdcd3402005355f4eea8ea9a64df517fda4a710 | 5d5f931d4e097b1a8296596b38ad5e6e816033f4 | /registration.py | 46611cf95587734cbbc533e71b2768cdeaf13d09 | [] | no_license | aansong/Code2040-project | 7f6bd159f0e1fad15c4a30821509767a9716207e | f37465244c6df6de5374fab7675c1307b05f3f10 | refs/heads/master | 2020-07-28T21:10:12.744194 | 2016-09-08T14:14:10 | 2016-09-08T14:14:10 | 67,649,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py |
import requests
import json
api_key = '1a4b5af2e17859894a38860bf2d69de2'
git_url = 'https://github.com/aansong/Code2040-project'
payload = {'token':api_key, 'github':git_url}
endpoint = "http://challenge.code2040.org/api/register"
r = requests.post(endpoint, data =payload)
print (r.text) | [
"[email protected]"
] | |
c9cae9736e1ef7175335ee4d93a500cb343a2b1e | c3d0a0b6336a3ff73724fe1615eb1809dbdaaed8 | /NumPy Nit/NumPy_Reshape_Array.py | 8808f904a6489766a915915dac262c62b762853f | [] | no_license | Silentsoul04/FTSP_2020 | db0dae6cd9c371f3daa9219f86520dfa66348236 | 7e603af918da2bcfe4949a4cf5a33107c837894f | refs/heads/master | 2022-12-21T20:44:32.031640 | 2020-09-20T12:29:58 | 2020-09-20T12:29:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,084 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 21 13:45:30 2020
@author: Rajesh
"""
NumPy reshape :-
-------------
import numpy as np
my_list = [10,20,30,40,50,60]
arr = np.array(my_list)
print('The Array :', arr) # [10 20 30 40 50 60]
print('The size :', arr.size) # 6
print('The Dimensional :', arr.ndim) # 1
print('The dataType :', arr.dtype) # int32
print('******* After Reshaping the Array will ************')
res = arr.reshape(2,3)
print('The new Array :', res)
print('The Array :', res)
print('The size :', res.size) # 6
print('The Dimensional :', res.ndim) # 2
print('The dataType :', res.dtype) # int32
print('******* After Reshaping the Array will ************')
res = arr.reshape(3,2)
print('The new Array :', res)
print('The Array :', res)
print('The size :', res.size) # 6
print('The Dimensional :', res.ndim) # 2
print('The dataType :', res.dtype) # int32
----------------------------------- RESULT ---------------------------------------------------------
The Array : [10 20 30 40 50 60]
The size : 6
The Dimensional : 1
The dataType : int32
******* After Reshaping the Array will ************
The new Array : [[10 20 30]
[40 50 60]]
The Array : [[10 20 30]
[40 50 60]]
The size : 6
The Dimensional : 2
The dataType : int32
******* After Reshaping the Array will ************
The new Array : [[10 20]
[30 40]
[50 60]]
The Array : [[10 20]
[30 40]
[50 60]]
The size : 6
The Dimensional : 2
The dataType : int32
---------------------------------------------------------------------------------------------
import numpy as np
my_list = [10,20,30,40,50,60,70,80]
arr = np.array(my_list)
print('The Array :', arr) # [10 20 30 40 50 60]
print('The size :', arr.size) # 6
print('The Dimensional :', arr.ndim) # 1
print('The dataType :', arr.dtype) # int32
print('******* After Reshaping the Array will ************')
res = arr.reshape(2,2,2)
print('The new Array :', res)
print('The Array :', res)
print('The size :', res.size) # 6
print('The Dimensional :', res.ndim) # 2
print('The dataType :', res.dtype) # int32
| [
"[email protected]"
] | |
c3e3c720dc4958996d88101cf0e2ea5dcfcfb15f | 25f7db4a275bbfd71166b0d565b0c9d410e52bbd | /Quat/settings.py | 44e7538e609f8c35903b3d62b00bd5c9b6274ff5 | [] | no_license | mariovenegas/Experiencia3Backend_TapiaVenegas002D | 30bb6623f1624de6cc0366d1bc84ed02608c09f5 | 69eadd53e1dbd6a8f155fb48b48249633485b277 | refs/heads/main | 2023-06-02T08:06:52.117241 | 2021-06-20T23:05:53 | 2021-06-20T23:05:53 | 378,492,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,305 | py | """
Django settings for Quat project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-7*)d7n%7-ac-gny-5mz-@wgst8q3-1_g0am+ghqo1bn4=k*s28'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'appQuat'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Quat.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Quat.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.oracle',
'NAME': '127.0.0.1:1521/xe',
'USER': 'c##pruebaquat',
'PASSWORD': 'prueba',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"[email protected]"
] | |
af2943e9cfb20a4ec17909d740273fc71bd1fab4 | e02016a2fdca9073e917d241f2ccc6bdee10e855 | /tr1.py | 3013821faf7fd7761a1250d6452d08a2d944f971 | [] | no_license | Erynvorn/Training-Python-2019 | d9b47b00353dd8ea326d03486bb9196570b0d8ee | b00c52f9c6a83f143099da0d4e471377e49cfe03 | refs/heads/master | 2023-04-16T17:40:35.890063 | 2023-04-10T18:56:42 | 2023-04-10T18:56:42 | 167,865,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | print('Hello world')
print('What is your name?')
myName = input()
print(myName)
print('It is good to meet you, '+ myName)
print('The length of your name is: ')
print(len(myName))
print('What is your age?')
myAge = input()
print('You will be ' +str(int(myAge) +1) + ' in a year.')
print('Please enter password :')
password = input()
if myName == 'Daniel':
print('Hello Daniel')
if password == 'swordfish':
print('Access granted')
else:
print('Wrong password')
spam = 0
while spam < 5:
print('Hello, World')
spam += 1
| [
"[email protected]"
] | |
2f4a3ce85d1d9838e603945a68f020568ee60069 | d60948666831a29d1f16efb87f2e9d9fd5c7f741 | /RomanNumeralConvertor.py | b9c5c1f5d2647f3391c9b32ec1a1b0be86af7dd2 | [] | no_license | ParthShinde2004/PythonChallenges | 874f7d54d15dfa046166915e47710b10376915ac | 6db731d7ecac003bb245f71aff700304ae0d5227 | refs/heads/master | 2023-07-17T05:29:04.041947 | 2021-09-10T08:56:37 | 2021-09-10T08:56:37 | 293,010,931 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,304 | py | """This is a Roman Numeral and Integer converter that can convert number from roman to denary and back from 0-3999"""
RomanValues = {
"I": 1,
"V": 5,
"X": 10,
"L": 50,
"C": 100,
"D": 500,
"M": 1000,
}
def roman_to_int(str):
total = 0
i = 0
while (i < len(str)):
s1 = RomanValues[str[i]]
if (i+1 < len(str)):
s2 = RomanValues[str[i+1]]
if (s1 >= s2):
total = total + s1
i = i + 1
else:
total = total - s1
i = i + 1
else:
total = total + s1
i = i + 1
return total
def int_to_roman(number):
numbers = [1, 4, 5, 9, 10, 40, 50, 90, 100, 400, 500, 900, 1000]
symbols = ["I", "IV", "V", "IX", "X", "XL", "L", "XC", "C", "CD", "D", "CM", "M"]
j = 12
while number:
div = number // numbers[j]
number %= numbers[j]
while div:
print(symbols[j], end = "")
div -= 1
j -= 1
RomanNum = str(input("What is the roman value you want to convert? (Enter No if you want to convert integer)"))
if RomanNum == "No":
number = int(input("What is the integer value you want to convert?"))
int_to_roman(number)
else:
print(roman_to_int(RomanNum))
| [
"[email protected]"
] | |
02a68e29687ca3797d6fee75a4c95831ee725efb | 4c0f561bb14d9cbfd0d1abc0102e483fbe68c76c | /zaj1/zaj1/matrix_operations.py | 5c1cd37b43c4798b0597c6d555a2c7b00742bbdd | [] | no_license | kpodlaski/ProgramowanieZaawansowane2019 | 3330d125e01d07c97c4f29de022359136d2762c0 | dffcf31232456c824db7a13e8a3de333af338b6c | refs/heads/master | 2020-08-05T16:32:39.429269 | 2020-01-16T12:28:36 | 2020-01-16T12:28:36 | 212,615,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | def mat_mul(A,B):
result = []
for i in range(0, len(A)):
result.append([])
for k in range(0,len(B[0])):
v = 0
for j in range(0, min(len(A[i]), len(B))):
v+=A[i][j]*B[j][k]
result[i].append(v)
return result
| [
"[email protected]"
] | |
ede882b0357518467e8fafd4d515bc3709aeefae | 8957f0b42ba945399a2eeb71f796c11c9eb35b06 | /stripped/unittest/test/test_case.py | 93c27dfa34122e6a665c21ec9259208123d54483 | [] | no_license | notro/tmp_CircuitPython_stdlib | 4de177cbb45b2209f07171c27f844c7d377dffc9 | 641727294039a9441c35ba1a1d22de403664b710 | refs/heads/master | 2020-03-27T18:26:33.544047 | 2019-02-15T20:49:34 | 2019-02-15T20:49:34 | 146,922,496 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 40,500 | py | import contextlib
import difflib
import re
import sys
from test import support
import unittest
from unittest.test.support import (
TestEquality, TestHashing, LoggingResult, LegacyLoggingResult,
ResultWithNoStartTestRunStopTestRun
)
class Test(object):
"Keep these TestCase classes out of the main namespace"
class Foo(unittest.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
### Set up attributes used by inherited tests
################################################################
# Used by TestHashing.test_hash and TestEquality.test_eq
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
# Used by TestEquality.test_ne
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest')),
(Test.Foo('test1'), Test.Bar('test1')),
(Test.Foo('test1'), Test.Bar('test2'))]
################################################################
### /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# test that TestCase can be instantiated with no args
# primarily for use at the interactive interpreter
test = unittest.TestCase()
test.assertEqual(3, 3)
with test.assertRaises(test.failureException):
test.assertEqual(3, 2)
with self.assertRaises(AttributeError):
test.run()
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addError', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addFailure', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest.TestCase):
def defaultTestResult(self):
return ResultWithNoStartTestRunStopTestRun()
def test(self):
pass
Foo('test').run()
def _check_call_order__subtests(self, result, events, expected_events):
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
for i in [1, 2, 3]:
with self.subTest(i=i):
if i == 1:
self.fail('failure')
for j in [2, 3]:
with self.subTest(j=j):
if i * j == 6:
raise RuntimeError('raised by Foo.test')
1 / 0
# Order is the following:
# i=1 => subtest failure
# i=2, j=2 => subtest success
# i=2, j=3 => subtest error
# i=3, j=2 => subtest error
# i=3, j=3 => subtest success
# toplevel => error
Foo(events).run(result)
self.assertEqual(events, expected_events)
def test_run_call_order__subtests(self):
events = []
result = LoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSubTestFailure', 'addSubTestSuccess',
'addSubTestFailure', 'addSubTestFailure',
'addSubTestSuccess', 'addError', 'stopTest']
self._check_call_order__subtests(result, events, expected)
def _check_call_order__subtests_success(self, result, events, expected_events):
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
for i in [1, 2]:
with self.subTest(i=i):
for j in [2, 3]:
with self.subTest(j=j):
pass
Foo(events).run(result)
self.assertEqual(events, expected_events)
def test_run_call_order__subtests_success(self):
events = []
result = LoggingResult(events)
# The 6 subtest successes are individually recorded, in addition
# to the whole test success.
expected = (['startTest', 'setUp', 'test', 'tearDown']
+ 6 * ['addSubTestSuccess']
+ ['addSuccess', 'stopTest'])
self._check_call_order__subtests_success(result, events, expected)
def test_run_call_order__subtests_failfast(self):
events = []
result = LoggingResult(events)
result.failfast = True
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
with self.subTest(i=1):
self.fail('failure')
with self.subTest(i=2):
self.fail('failure')
self.fail('failure')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSubTestFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
def test_subtests_failfast(self):
# Ensure proper test flow with subtests and failfast (issue #22894)
events = []
class Foo(unittest.TestCase):
def test_a(self):
with self.subTest():
events.append('a1')
events.append('a2')
def test_b(self):
with self.subTest():
events.append('b1')
with self.subTest():
self.fail('failure')
events.append('b2')
def test_c(self):
events.append('c')
result = unittest.TestResult()
result.failfast = True
suite = unittest.makeSuite(Foo)
suite.run(result)
expected = ['a1', 'a2', 'b1']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertIs(Foo('test').failureException, AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), str)
# "If result is omitted or None, a temporary result object is created,
# used, and is made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
defaultResult = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return defaultResult
# Make run() find a result object on its own
result = Foo('test').run()
self.assertIs(result, defaultResult)
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "The result object is returned to run's caller"
def test_run__returns_given_result(self):
class Foo(unittest.TestCase):
def test(self):
pass
result = unittest.TestResult()
retval = Foo('test').run(result)
self.assertIs(retval, result)
# "The same effect [as method run] may be had by simply calling the
# TestCase instance."
def test_call__invoking_an_instance_delegates_to_run(self):
resultIn = unittest.TestResult()
resultOut = unittest.TestResult()
class Foo(unittest.TestCase):
def test(self):
pass
def run(self, result):
self.assertIs(result, resultIn)
return resultOut
retval = Foo('test')(resultIn)
self.assertIs(retval, resultOut)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertFalse(s1 == s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) == type(b) == SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
# Drop test of deprecated functions ###
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4,1]), frozenset([4,2])),
(frozenset([4,5]), set([2,3])),
(set([3,4]), set([5,4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertMultiLineEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertEqual_diffThreshold(self):
# check threshold value
self.assertEqual(self._diffThreshold, 2**16)
# disable madDiff to get diff markers
self.maxDiff = None
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 2**5
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
# under the threshold: diff marker (^) in error message
s = 'x' * (2**4)
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s + 'a', s + 'b')
self.assertIn('^', str(cm.exception))
self.assertEqual(s + 'a', s + 'a')
# over the threshold: diff not used and marker (^) not in error message
s = 'x' * (2**6)
# if the path that uses difflib is taken, _truncateMessage will be
# called -- replace it with explodingTruncation to verify that this
# doesn't happen
def explodingTruncation(message, diff):
raise SystemError('this should not be raised')
old_truncate = self._truncateMessage
self._truncateMessage = explodingTruncation
self.addCleanup(lambda: setattr(self, '_truncateMessage', old_truncate))
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
self.assertNotIn('^', str(cm.exception))
self.assertEqual(str(cm.exception), '%r != %r' % (s1, s2))
self.assertEqual(s + 'a', s + 'a')
def testAssertEqual_shorten(self):
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 0
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
s = 'x' * 100
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[35 chars]' + 'x' * 61
self.assertEqual(str(cm.exception), "'%sa' != '%sb'" % (c, c))
self.assertEqual(s + 'a', s + 'a')
p = 'y' * 50
s1, s2 = s + 'a' + p, s + 'b' + p
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[85 chars]xxxxxxxxxxx'
self.assertEqual(str(cm.exception), "'%sa%s' != '%sb%s'" % (c, p, c, p))
p = 'y' * 100
s1, s2 = s + 'a' + p, s + 'b' + p
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[91 chars]xxxxx'
d = 'y' * 40 + '[56 chars]yyyy'
self.assertEqual(str(cm.exception), "'%sa%s' != '%sb%s'" % (c, d, c, d))
# assertCountEqual is not implemented yet ###
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None, set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1, None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant')
# Try bytes
self.assertGreater(b'bug', b'ant')
self.assertGreaterEqual(b'bug', b'ant')
self.assertGreaterEqual(b'ant', b'ant')
self.assertLess(b'ant', b'bug')
self.assertLessEqual(b'ant', b'bug')
self.assertLessEqual(b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'bug')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, b'ant',
b'bug')
self.assertRaises(self.failureException, self.assertLess, b'bug', b'ant')
self.assertRaises(self.failureException, self.assertLess, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertLessEqual, b'bug', b'ant')
def testAssertMultiLineEqual(self):
sample_text = """\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = """\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = """\
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
self.maxDiff = None
try:
self.assertMultiLineEqual(sample_text, revised_sample_text)
except self.failureException as e:
# need to remove the first line of the error message
error = str(e).split('\n', 1)[1]
# no fair testing ourself with ourself, and assertEqual is used for strings
# so can't use assertEqual either. Just use assertTrue.
self.assertTrue(sample_text_error == error)
def testAsertEqualSingleLine(self):
sample_text = "laden swallows fly slowly"
revised_sample_text = "unladen swallows fly quickly"
sample_text_error = """\
- laden swallows fly slowly
? ^^^^
+ unladen swallows fly quickly
? ++ ^^^^^
"""
try:
self.assertEqual(sample_text, revised_sample_text)
except self.failureException as e:
error = str(e).split('\n', 1)[1]
self.assertTrue(sample_text_error == error)
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegex(self):
self.assertRegex('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegex,
'saaas', r'aaaa')
def testAssertRaisesCallable(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaises(ExceptionMock, Stub)
# A tuple of exception classes is accepted
self.assertRaises((ValueError, ExceptionMock), Stub)
# Failure when no exception is raised
with self.assertRaises(self.failureException):
self.assertRaises(ExceptionMock, lambda: 0)
# Failure when another exception is raised
with self.assertRaises(ExceptionMock):
self.assertRaises(ValueError, Stub)
def testAssertRaisesContext(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
with self.assertRaises(ExceptionMock):
Stub()
# A tuple of exception classes is accepted
with self.assertRaises((ValueError, ExceptionMock)) as cm:
Stub()
# The context manager exposes caught exception
self.assertIsInstance(cm.exception, ExceptionMock)
self.assertEqual(cm.exception.args[0], 'We expect')
# Failure when no exception is raised
with self.assertRaises(self.failureException):
with self.assertRaises(ExceptionMock):
pass
# Failure when another exception is raised
with self.assertRaises(ExceptionMock):
self.assertRaises(ValueError, Stub)
def testAssertRaisesRegex(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegex(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegex(ExceptionMock, 'expect$', Stub)
def testAssertNotRaisesRegex(self):
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, 'x',
lambda: None)
def testAssertRaisesRegexInvalidRegex(self):
# Issue 20145.
class MyExc(Exception):
pass
self.assertRaises(TypeError, self.assertRaisesRegex, MyExc, lambda: True)
def testAssertRaisesRegexMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception, '^Expected$',
Stub)
def testAssertRaisesExcValue(self):
class ExceptionMock(Exception):
pass
def Stub(foo):
raise ExceptionMock(foo)
v = "particular value"
ctx = self.assertRaises(ExceptionMock)
with ctx:
Stub(v)
e = ctx.exception
self.assertIsInstance(e, ExceptionMock)
self.assertEqual(e.args[0], v)
def testKeyboardInterrupt(self):
def _raise(self=None):
raise KeyboardInterrupt
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
with self.assertRaises(KeyboardInterrupt):
klass('test_something').run()
def testSkippingEverywhere(self):
def _skip(self=None):
raise unittest.SkipTest('some reason')
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _skip
class Test2(unittest.TestCase):
setUp = _skip
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _skip
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_skip)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.skipped), 1)
self.assertEqual(result.testsRun, 1)
def testSystemExit(self):
def _raise(self=None):
raise SystemExit
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.testsRun, 1)
| [
"[email protected]"
] | |
7d1bb4cb885599e99d52f02888cc43d65ec1b5a8 | 6e47be4e22ab76a8ddd7e18c89f5dc4f18539744 | /venv/openshift/lib/python3.6/site-packages/kubernetes/client/models/v1beta1_self_subject_rules_review_spec.py | 157a9fb9f02a1815a9cb0255fd75f425a7aca9a6 | [] | no_license | georgi-mobi/redhat_ocp4.5_training | 21236bb19d04a469c95a8f135188d3d1ae473764 | 2ccaa90e40dbbf8a18f668a5a7b0d5bfaa1db225 | refs/heads/main | 2023-03-30T10:47:08.687074 | 2021-04-01T05:25:49 | 2021-04-01T05:25:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,294 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1SelfSubjectRulesReviewSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'namespace': 'str'
}
attribute_map = {
'namespace': 'namespace'
}
def __init__(self, namespace=None):
"""
V1beta1SelfSubjectRulesReviewSpec - a model defined in Swagger
"""
self._namespace = None
self.discriminator = None
if namespace is not None:
self.namespace = namespace
@property
def namespace(self):
"""
Gets the namespace of this V1beta1SelfSubjectRulesReviewSpec.
Namespace to evaluate rules for. Required.
:return: The namespace of this V1beta1SelfSubjectRulesReviewSpec.
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""
Sets the namespace of this V1beta1SelfSubjectRulesReviewSpec.
Namespace to evaluate rules for. Required.
:param namespace: The namespace of this V1beta1SelfSubjectRulesReviewSpec.
:type: str
"""
self._namespace = namespace
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1SelfSubjectRulesReviewSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
25185a441791aa03ebc012b4240d85afbab4ee6f | 08ed86902b445bc5dbd4af72a9ace6f9af8efc55 | /d1d2.py | 5dcb2d16a4d2d409a5653e92a82010d7d7e48d11 | [] | no_license | lokeshkumar9600/python_train | 93e0a2713a1b939a07b3714b904681a274ce8364 | a5fddb167792d80f5e69b63e162650d287f8226a | refs/heads/master | 2023-02-04T15:38:30.626995 | 2020-12-27T17:25:28 | 2020-12-27T17:25:28 | 306,393,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | a = int(input())
b = int(input())
c = int(input())
n = int(input())
arr = [a,b,c]
d1 = arr[1]-arr[0]
d2 = arr[2]-arr[1]
for x in range(4,n+1):
if x%2 == 0 :
arr.append(arr[-1]+d1)
else:
arr.append(arr[-1]+d2)
print(arr[-1])
| [
"[email protected]"
] | |
303395ed9eb61d60563d0f1d1fe5871fa1f8a13e | f70dd0cc11d7760852eaedcb037f91cf255838d3 | /datasets/Part 3 - Classification/Section 16 - Support Vector Machine (SVM)/svm_sbn.py | 5613eb69988d5eceb85c730113ca1e18c165ec06 | [
"MIT"
] | permissive | jsguerrero/machinelearning-az | 47aadb190e55ba2eea5d1617152e8528cbb3dc47 | dde29500db2df576c965d7e228a94d166ea31e5c | refs/heads/master | 2020-12-07T04:53:05.249776 | 2020-06-29T17:33:09 | 2020-06-29T17:33:09 | 275,039,017 | 0 | 0 | null | 2020-06-26T00:02:59 | 2020-06-26T00:02:58 | null | UTF-8 | Python | false | false | 3,982 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 17 09:42:08 2020
@author: jguerrero
"""
# Maquina de soporte vectorial SVM
# Como importar las librerias
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importar el dataset
dataset = pd.read_csv('Social_Network_ads.csv')
X = dataset.iloc[:, 2:4].values
y = dataset.iloc[:, 4].values
# Dividir el dataset en entrenamiento y prueba
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) # random_state = 0 SOLO ES PARA QUE SIEMPRE SELECCIONE LOS MISMOS ELEMENTOS
# Escalado de variables
# Estandarizacion
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
# Escalar los datos de train y test con la misma formula
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# Ajustar la clasificacion con el conjunto de entrenamiento
from sklearn.svm import SVC
classifier = SVC(kernel = 'linear',
random_state = 0)
classifier.fit(X_train, y_train)
# Prediccion de la clasificacion con el conjunto de prueba
y_pred = classifier.predict(X_test)
# Evaluar resultados con matriz de confusion
from sklearn.metrics import confusion_matrix
conf_matrix = confusion_matrix(y_test, y_pred)
# Visualizacion de los resultados
from matplotlib.colors import ListedColormap
#X_set, y_set = X_train, y_train
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('SVM (Conjunto de Entrenamiento)')
plt.xlabel('Edad')
plt.ylabel('Sueldo Estimado')
plt.legend()
plt.show()
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
#X_set, y_set = X_test, y_test
X1_n, X2_n = np.meshgrid(np.arange(start = X_set[:, 0].min(),
stop = X_set[:, 0].max() + 1,
step = (abs(X_set[:, 0].min()) + abs(X_set[:, 0].max() + 1)) / 500),
#step = 1),
np.arange(start = X_set[:, 1].min(),
stop = X_set[:, 1].max() + 1,
step = (abs(X_set[:, 1].min()) + abs(X_set[:, 1].max() + 1)) / 500))
#step = 10000))
#X_set, y_set = sc_X.inverse_transform(X_train), y_train
X_set, y_set = sc_X.inverse_transform(X_test), y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min(),
stop = X_set[:, 0].max() + 10,
step = (abs(X_set[:, 0].max() + 10 - abs(X_set[:, 0].min())) / 500)),
np.arange(start = X_set[:, 1].min(),
stop = X_set[:, 1].max() + 10000,
#step = 0.01))
step = (abs(X_set[:, 1].max() + 10000 - abs(X_set[:, 1].min())) / 500)))
plt.contourf(X1,
X2,
classifier.predict(np.array([X1_n.ravel(), X2_n.ravel()]).T).reshape(X1_n.shape),
alpha = 0.75,
cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0],
X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('SVM (Conjunto de Entrenamiento)')
plt.xlabel('Edad')
plt.ylabel('Sueldo Estimado')
plt.legend()
plt.show()
| [
"[email protected]"
] | |
e9e77b70071eb7fae620d97ad7ed41f589c9d9bb | abb535f209a6ecda90b7c12243e37b461c787650 | /engblog/venv/Scripts/django-admin.py | e1440d313b1ce9296caa925042d0f80a415f21d8 | [
"MIT"
] | permissive | eltonrobaina/SITE_EngColab_project | 2a20657a56ce8d42bb94c2231a0230632a87f684 | f83ade1b495e7e7df5cacd3cdc2faeb5a52d4260 | refs/heads/main | 2023-03-09T15:09:10.239549 | 2021-02-20T15:46:24 | 2021-02-20T15:46:24 | 340,639,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | #!D:\REPOSITORIO\MeusProjetos\SITE_EngColab_project\engblog\venv\Scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"[email protected]"
] | |
9b6ed4e3f2406725844e14e5b798d501958159a4 | ef5020bb149898dcc2142c5fdc10bbdb5ced477e | /event_extractor/train/eval.py | 269dfe0e6e099a80d0da26b2c7b86b0953126f94 | [
"MIT"
] | permissive | dhzy0425/event_extract_master | 3fbbbe0f667ef697a6a3e13c4b23428a241b5e97 | 6b8d470d2caa5ec6785eae07bca04e66fb3734b7 | refs/heads/main | 2023-03-19T17:29:49.388643 | 2021-01-15T03:20:24 | 2021-01-15T03:20:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py | import numpy as np
def evaluate(event_model, dev_manager):
event_model.eval()
A, B, C = 1e-10, 1e-10, 1e-10
for batch in dev_manager.iter_batch(shuffle=True):
text, t1, t2, s1, s2, k1, k2, o1, o2 = batch
ps1_out, ps2_out, pn1_out, pn2_out, t_dgout, mask = event_model.trigger_model_forward(t1, t2)
po1_out, po2_out = event_model.argument_model_forward(k1, k2, pn1_out, pn2_out, t_dgout, mask)
s1_pre = ps1_out.detach().numpy()
s1_pre = np.where(s1_pre > 0.4, 1, 0)
s1 = s1.astype(np.int)
A += np.sum(s1 & s1_pre)
B += np.sum(s1_pre)
C += np.sum(s1)
s2_pre = ps2_out.detach().numpy()
s2_pre = np.where(s2_pre > 0.3, 1, 0)
s2 = s2.astype(np.int)
A += np.sum(s2 & s2_pre)
B += np.sum(s2_pre)
C += np.sum(s2)
o1_pre = po1_out.detach().numpy()
o1_pre = np.where(o1_pre > 0.3, 1, 0)
o1 = o1.astype(np.int)
A += np.sum(o1 & o1_pre)
B += np.sum(o1_pre)
C += np.sum(o1)
o2_pre = po2_out.detach().numpy()
o2_pre = np.where(o2_pre > 0.2, 1, 0)
o2 = o2.astype(np.int)
A += np.sum(o2 & o2_pre)
B += np.sum(o2_pre)
C += np.sum(o2)
return 2 * A / (B + C), A / B, A / C
| [
"[email protected]"
] | |
280e08aa5a71be49a6ff33f0eff15bc84e557215 | 42e4b4c175d343719391fcfd83e88a1a115472bb | /RNN/keras/tokenize_document.py | 9bd78a12526ccf0aad3a0bba4eaf4c36e8391f92 | [] | no_license | chensvm/FED | f78222490fa2d8d0bee64d06c032ea1283314e05 | c5b5576cac19d4179870efc8024b9cea26116068 | refs/heads/master | 2021-09-13T02:38:43.068661 | 2018-04-24T01:08:40 | 2018-04-24T01:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,683 | py | import numpy as np
import sys
import csv
import copy
from collections import Counter
from datetime import date, timedelta,datetime
import os.path
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.layers import Dropout
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing import sequence
import pickle
#article_path = '/tmp2/finance/nytimes/'# for hp machine
#article_train_path = '/tmp2/finance2/nytimes/training_data/' # for hp, cuda3 machine
#article_train_path = '/tmp2/finance2/nytimes/temp/'# 2005
#article_train_path = '/tmp2/finance2/nytimes/temp2/'# 2005~2007
#article_train_path = '/tmp2/finance2/nytimes/temp_2008/'#2008 data has problem
#article_test_path = '/tmp2/finance2/nytimes/testing1998_2004/'# 1998~2004
#article_test_path = '/tmp2/finance2/nytimes/temp_2004/'# 2004
#article_all_path = '/tmp2/finance2/nytimes/1998_2007/'#1998~2007
#article_all_path = '/tmp2/finance2/nytimes/1998_2008/'#1998~2008
article_all_path = '/tmp2/finance2/nytimes/2000_2001/'#2001
#article_all_path = '/tmp2/finance2/nytimes/2001/'#2001
#article_all_path = '/tmp2/finance2/nytimes/temp_2008/'#temp_2008
#token_file = 'token1998_2008_index.pkl'#1998~2008
token_path = './token_index/'
#token_file = token_path + 'token1998_2008_index.pkl'#1998~2008
token_file = token_path + 'token2000_2001_index.pkl'#2008
rates_path = '../fed_rates/'
rates_train_file = rates_path + 'fed_date_rate_training.csv'
rates_test_file = rates_path + 'fed_date_rate_testing.csv'
rates_all_file = rates_path + 'fed_date_rate_all.csv'
#IMDB word_index 88584
#length of word index
MAX_NB_WORDS = 88584 # 100
# truncate and pad input sequences
max_review_length = 500 # 500
# load the dataset but only keep the top n words, zero the rest
top_words = 20000 # 5000 20000
np.random.seed(7)
def choose_top_words(sequences, top_words_number):
#print('sequences')
#print(sequences)
#print('sequences length')
# print(len(sequences))
#print('sequences[0] length')
#print(len(sequences[0]))
all_top_element = []
for i in range(len(sequences)):
top_element = []
temp_sequence = copy.deepcopy(sequences[i])
count_temp_co = Counter(temp_sequence)
count_temp = count_temp_co.most_common(top_words_number)
for k in range(len(count_temp)):
top_element.append(count_temp[k][0])
all_top_element.append(top_element)
for j in range(len(sequences[i])):
temp_word = sequences[i][j]
if(temp_word not in top_element):
sequences[i][j] = 0
return sequences
def del_higher_top_words(sequences, top_words_number):
for i in range(len(sequences)):
temp_sequence = sequences[i]
for j in range(len(temp_sequence)):
if(temp_sequence[j]>=top_words_number):# maybe have bug
temp_sequence[j] = 0
return sequences
def load_data(article_path, rates_file, tokenizer):
start_end_rate = []
prev_date = ''
rates = {}
f = open(rates_file, 'r')
for row in csv.DictReader(f):
rates[row['date']] = row['rate']
t = []
t.append(prev_date)
t.append(row['date'])
t.append(int(row['rate']))
start_end_rate.append(t)
prev_date = row['date']
del start_end_rate[0]
## constructing array with all npy filenames for start_end_rate[]
article_file = []
for ser in start_end_rate:
start_date = datetime.strptime(ser[0],'%Y-%m-%d')
start_date = start_date + timedelta(days=1)
end_date = datetime.strptime(ser[1],'%Y-%m-%d')
t = []
count_date = start_date
while count_date < end_date:
t.append(str(count_date.year)+'/'+datetime.strftime(count_date,'%Y%m%d')+'.npy')
count_date = count_date + timedelta(days=1)
article_file.append(t)
#print('start_end_rate[0]')
#print('article_file[0]')
## getting all sentences and rate label for meeting_date
## data_for_date[date of meeting] = [array of sentences]
## X_data is composed of article
data_for_date = {}
rate_for_date = {}
date_of_meeting_list = []
#X_data = np.empty(0)#max_review_length
X_data = []
y_data = []
#tokenizer = Tokenizer(nb_words=MAX_NB_WORDS)
test_i = 0.0
test_count = 0
for ind,ser in enumerate(start_end_rate):
#date_of_meeting = datetime.strptime(ser[1],'%Y-%m-%d')
#date_of_meeting = datetime.strftime(date_of_meeting,'%Y%m%d')
#date_of_meeting_list.append(date_of_meeting)
#data_for_date[date_of_meeting] = []
#rate_for_date[date_of_meeting] = ser[2]
print(ser)
for f in article_file[ind]:
if os.path.isfile(article_path+f):
day = np.load(article_path+f)
if(len(day) == 0):
continue
tokenizer.fit_on_texts(day)
#counttt = 0
#sequences = tokenizer.texts_to_sequences(day)
#sequences = choose_top_words(sequences, top_words) # unfinished , hope it can get frequency order of words and give it ID
#sequences =del_higher_top_words(sequences, top_words) # only delete id that higher than top_words
#X_data = X_data + test
#for i in range(len(day)):
#y_data.append(rate_for_date[date_of_meeting])
test_i = test_i +1
if(test_i %10 == 0):
print(str('process:{}').format(test_i/len(article_file)))
#X_data = X_data[1:]
#y_data = np.asarray(y_data)
return tokenizer # X_data, y_data, tokenize
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True, split=" ", char_level=False)
#X_train , y_train, train_tokenizer = load_data(article_train_path, rates_train_file, top_words, tokenizer)
#X_test , y_test, test_tokenizer = load_data(article_test_path, rates_test_file, top_words, tokenizer)
tokenizer = load_data(article_all_path, rates_all_file, tokenizer)
print('tokenizer.word_index')
print(tokenizer.word_index)
print('tokenizer.word_index[good]')
print(tokenizer.word_index['good'])
with open(token_file, 'wb') as output:
pickle.dump(tokenizer, output, pickle.HIGHEST_PROTOCOL)
with open(token_file, 'rb') as input:
token_test = pickle.load(input)
print('token_test.word_index[good]')
print(token_test.word_index['good']) | [
"[email protected]"
] | |
eadc0f5424bbf0d72a7e881144c5af276bd26079 | b82a4db7d1df6879c9080997d8026edc50505fbb | /readNCFile_and_make_csv.py | 85d7e8068d08d42d9b3c701243c6082f911cbc05 | [] | no_license | hey-min/Python | a96b9c2f0ba3a9d9168ec92bd1e9b4aeaeeedd02 | 5dd55e615c947cc2c34c620a0551c6bdfd3da932 | refs/heads/main | 2023-07-12T05:40:25.352641 | 2021-08-10T01:33:07 | 2021-08-10T01:33:07 | 357,375,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,575 | py | from netCDF4 import Dataset
import pandas as pd
import numpy as np
import os
file_name = 'nc_name.nc'
fh = Dataset(file_name)
print('================= NC File Group List =================')
print(fh)
for item in fh.groups:
print('==================='+item+'===================')
print(fh[item])
# check fh['group_name']['variables'][...][...]
# check fh variables
# make file_list[] from folder_path
folder_path = '...'
file_list = []
if os.path.isdir(folder_path):
for file in os.listdir(folder_path):
# file name end with 'GEO.nc'
if file.endswith('GEO.nc'):
file_list.append(file)
# sample nc file name
nc_file = 'GK2B_GOCI2_L2_20210404_201630_FD_S000_G065_GEO.nc'
# make csv file from file_list[]'s value
csv_file_name= 'READ_GEO_NC.csv'
columns = ['nc_file', 'rslt']
df = pd.DataFrame(data=None, columns=columns)
for nc_file in file_list:
# fh groups: geophysical_data, navigation_data
fh = Dataset(folder_path+nc_file)
# ex) float SolZ_490(number_of_lines=3100, pixels_per_line=3100);
number_of_lines = 1553
pixels_per_line = 1551
# ex) group(geophysical_data)>variable(SolZ)>variable(SolZ_490)
rslt = fh['geophysical_data']['SolZ']['SolZ_490'][number_of_lines][pixels_per_line]
new_data = pd.DataFrame([{'nc_file': nc_file,
'rslt': rslt}])
df = df.append(new_data)
df.index = np.arange(1, len(file_list)+1)
print(df.tail(3))
# save csv file
csv_file = df.to_csv(csv_file_name)
| [
"[email protected]"
] | |
ba9e2aaeec56566e4c76fb23dedbe343dbe8a2f2 | 4be95feeacf0e76a30db2c147dd2bd924b6be1a9 | /torchfm/model/tpmn_afm.py | acbdc10b5fb38d1b1a00444b6839d3dcc83b6c53 | [
"MIT"
] | permissive | Moon-Seona/pytorch-fm | e8f4eda4830a22316d5002d30471feee871708f2 | f217d79de0317157a50b1b449ce98d5bf149d3f1 | refs/heads/master | 2022-12-18T10:03:01.744374 | 2020-09-23T09:35:49 | 2020-09-23T09:35:49 | 277,729,115 | 0 | 0 | null | 2020-07-07T05:52:08 | 2020-07-07T05:52:08 | null | UTF-8 | Python | false | false | 1,626 | py | import torch
import numpy as np
from torchfm.layer import FeaturesEmbedding, FeaturesLinear, AttentionalFactorizationMachine
class AttentionalFactorizationMachineModel(torch.nn.Module):
"""
A pytorch implementation of Attentional Factorization Machine.
Reference:
J Xiao, et al. Attentional Factorization Machines: Learning the Weight of Feature Interactions via Attention Networks, 2017.
"""
def __init__(self, field_dims, vocab_size, embed_dim, attn_size, dropouts):
super().__init__()
# except app_bundle
# field_dims = np.hstack((field_dims[0:1],field_dims[2:3],field_dims[4:8],field_dims[10:15],field_dims[17:19], field_dims[21:24], field_dims[26:]))
# include app_bundle
field_dims = np.hstack((field_dims[:3],field_dims[4:8],field_dims[10:15],field_dims[17:19], field_dims[21:24], field_dims[26:]))
self.num_fields = len(field_dims)
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.linear = FeaturesLinear(field_dims)
self.afm = AttentionalFactorizationMachine(embed_dim, attn_size, dropouts)
def forward(self, x, additional):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
# except app_bundle
# x = torch.cat((x[:,0:1],x[:,2:3],x[:,4:8],x[:,10:15],x[:,17:19],x[:,21:24], x[:,26:]), dim=1)
# include app_bundle
x = torch.cat((x[:,:3],x[:,4:8],x[:,10:15],x[:,17:19],x[:,21:24], x[:,26:]), dim=1)
x = self.linear(x) + self.afm(self.embedding(x))
return torch.sigmoid(x.squeeze(1))
| [
"[email protected]"
] | |
8017224993b84def64da1fb1cb8120e19a9fceb9 | 8aa2ff0c33fad8a2c5c903d8b59c5c2146a17451 | /rpm_web/migrations/0035_auto_20210315_1034.py | 0dd9722438b06695559add90b3a1637d56f4a407 | [] | no_license | alex-filgueira/RPM_plus | b8e0ba94a589f4ae9af78739ae261f5da30f0f7a | 7675a9032647f0ef1646e5cc3ca14c4004c690aa | refs/heads/master | 2023-04-18T11:53:33.508915 | 2021-05-03T09:16:30 | 2021-05-03T09:16:30 | 345,411,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | # Generated by Django 3.2.dev20200924115306 on 2021-03-15 09:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rpm_web', '0034_alter_mtype_input2_fig1_name'),
]
operations = [
migrations.AlterField(
model_name='mplan2',
name='fig1_border_w',
field=models.FloatField(default=1),
),
migrations.AlterField(
model_name='mplan2',
name='fig1_color_2',
field=models.CharField(blank=True, default='#000000', max_length=10),
),
]
| [
"[email protected]"
] | |
8abf9d009530f947beb82d9428695d41f1da05e9 | 24a64a302c41cca18e559b0bc2522090934f76a4 | /3_greedy_algo_and_dp/huffmans_algo.py | 297a0a480862d035eab2679750b88a79624f9c8b | [] | no_license | shubham18121993/algorithms-specilaization | 0b987153ee2081ca28c9f94d028ef3e9ada1fdbb | 9c895d12fcf8ee2a276617ac1a61c031ed64d154 | refs/heads/master | 2023-01-05T06:05:00.203128 | 2020-11-02T19:06:58 | 2020-11-02T19:06:58 | 272,154,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | """
huffman algo with heap data structure
"""
def heapify(lst):
l = len(lst)
l_2 = l//2 - 1
for i in range(l_2, -1, -1):
k = i
while lst[k] > min(lst[k*2+1:k*2+3]):
if min(lst[k*2+1:k*2+3]) == lst[k*2+1]:
lst[k*2+1], lst[k] = lst[k], lst[k*2+1]
k = k*2+1
else:
lst[k*2+2], lst[k] = lst[k], lst[k*2+2]
k = k*2+2
if k > l_2:
break
return lst
def insert_elem(elem):
lst.append(elem)
l = len(lst)
k = l-1
while k > 0 and lst[k] < lst[(k+1)//2-1]:
lst[k], lst[(k+1)//2-1] = lst[(k+1)//2-1], lst[k]
k = (k+1)//2-1
return lst
def pop_elem():
lst[0], lst[-1] = lst[-1], lst[0]
elem = lst.pop()
l = len(lst)
l_2 = l//2-1
k = 0
while k<=l_2 and lst[k] > min(lst[k*2+1: k*2+3]):
if min(lst[k*2+1:k*2+3]) == lst[k*2+1]:
lst[k*2+1], lst[k] = lst[k], lst[k*2+1]
k = k*2+1
else:
lst[k*2+2], lst[k] = lst[k], lst[k*2+2]
k = k*2+2
return elem
def huffman_algo(lst):
if len(lst) == 0:
return 0
while len(lst) > 1 :
weight1, len1 = pop_elem()
weight2, len2 = pop_elem()
weight = weight1 + weight2
length = max(len1, len2) + 1 # for max depth
length = min(len1, len2) + 1 # for min depth
insert_elem((weight, length))
print(lst)
return lst[0][1]
with open("../../dataset/course3/huffman.txt", 'r') as f0:
lines = f0.readlines()
lst = []
for line in lines[1:]:
lst.append((int(line.strip()), 0))
heapify(lst)
max_len = huffman_algo(lst)
print(max_len)
| [
"[email protected]"
] | |
142f8fae0c98b8404d87fd204a29810f922b20a7 | 51a630e660f7013ed6f6cc74d5a0fb6276b67f9f | /18_modules.py | 8217ed08fd3f50d1fb2cca92606bd107ff9eb31c | [] | no_license | sev-94/introPy | 2e1f7a5ce6f1a182ecdb61dfc7ced76534c61762 | bac0f1e71ed657fa4fe08dc1968e2bae0f1e553d | refs/heads/master | 2022-06-17T21:13:22.671777 | 2020-05-16T18:27:44 | 2020-05-16T18:27:44 | 259,551,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | import usefulStuff
print(usefulStuff.roll_dice(10)) | [
"[email protected]"
] | |
dba2334ad69ae1751f9d09ef8e122c744ac27af7 | 59082cec57ecae95339728223fde902ba93762e3 | /mainapp/views.py | f9cc491f1888658380a6743891cfe7614bb9c878 | [] | no_license | zaid-kamil/django_website_npgc | edfb2c0db82862f2c83f54e7f16bd86d4f46deb1 | e283a6f0823bf77390d83318aee2507000ccb5d1 | refs/heads/master | 2023-03-30T12:15:14.704500 | 2021-03-30T08:21:21 | 2021-03-30T08:21:21 | 352,924,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | from django.shortcuts import render
# Create your views here.
def index(request):
ctx = {'title':'Home page'} # data for html page
return render(request, 'index.html',context=ctx)
def show_data(request):
ctx = {'title':'Show Data'}
return render(request, 'data.html', context=ctx) | [
"[email protected]"
] | |
0aae1e46ae4db10f380e7b2262fb78ccca1a59af | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/fbs_0324+379/sdB_FBS_0324+379_coadd.py | b7c16a493a38e073e0279cda675a539ce6841559 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[51.906917,38.136472], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_FBS_0324+379/sdB_FBS_0324+379_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_FBS_0324+379/sdB_FBS_0324+379_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
6a888b76417ad7b700068ba4113851993d378431 | 680faa43b6c763438642e076099694c233122dd9 | /Chap06/for.py | 00fe1140ba97a18c312b7c954a0ee9256dbf75f4 | [] | no_license | OVUSA/PythonCourse | 8c1bd476a8fdc922d822cdfe8d11711fb6a78607 | 1a33a4f6616ee35c5ea70b03d880b6c8088fd3ac | refs/heads/main | 2023-04-26T06:26:05.557316 | 2021-05-19T00:29:12 | 2021-05-19T00:29:12 | 358,404,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | #!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
animals = ('bear', 'bunny', 'dog', 'cat', 'velociraptor')
for pet in animals:
print(pet)
if pet == 'dog':
continue
if pet == 'bunny':
break
print(pet)
else:
print("that is all of the animals")
mylist = ("wine", "vodka", "beer", "blood", "cider")
for drink in range(4):
print(drink)
| [
"[email protected]"
] | |
7f84692e020f69435e34c845f4ac82c34a901ff1 | 54da7a4705d6fda98107e185e352138236f1a04a | /card/urls.py | bc476ade43438a941f41666e478d973af581fafc | [] | no_license | KasiaWrzalka/eMenu | f7611bf3baf380ae18150f58877eced0eb7b8d93 | 4ee08d99cc9387915b3fd67ab65bd7091d5a724d | refs/heads/master | 2020-03-27T20:23:44.002047 | 2018-09-04T19:39:12 | 2018-09-04T19:39:12 | 147,065,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('get_cards/', views.getCards, name='get_cards'),
path('card_list/', views.CardList, name='card_list'),
path('items/<int:card_id>', views.CardItems, name='card_items'),
] | [
"[email protected]"
] | |
8242cab2e8becb92f7a019b83e90b12d8f90c412 | 5303dd477fc2dd5f0ec90d1a60a6ce62ec2b8862 | /venv/bin/django-admin | 4b77d0ba3310d3959474a049371458dff10385ab | [] | no_license | danielFilin/billboard | 633bfb4083ccc24411fa7ea46e56568d163a6077 | 02fdbdb2357ffcbdd84eeaf5498582f49249e0ce | refs/heads/master | 2022-12-16T12:36:36.503841 | 2019-08-03T18:35:38 | 2019-08-03T18:35:38 | 200,408,859 | 0 | 0 | null | 2022-12-08T01:48:08 | 2019-08-03T18:33:59 | Python | UTF-8 | Python | false | false | 312 | #!/Users/jeremyskornik/DjangoProjects/Billboard/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"[email protected]"
] | ||
ca461012a0b6d63e4279c679121b59c086e6eb05 | 3adf04f33a2d09feddadcd8b6c7ae1f1c3ced2e9 | /tests/linter/test_name_checks.py | 2519e1be12477f13ea2358b7210d4a21681dee0f | [
"MIT"
] | permissive | F0rsaken/godot-gdscript-toolkit | d2f0cede170b1ef0ae935598e1008c70f090abb7 | fd7a985cb8d2c8d926f4ecb19b01b9c3155a3dff | refs/heads/master | 2022-12-17T03:20:08.155492 | 2020-09-14T17:48:02 | 2020-09-14T17:48:30 | 295,524,729 | 0 | 0 | MIT | 2020-09-14T19:59:11 | 2020-09-14T19:59:11 | null | UTF-8 | Python | false | false | 5,835 | py | import pytest
from .common import simple_ok_check, simple_nok_check
# fmt: off
@pytest.mark.parametrize('code', [
"""
func foo():
pass
""",
"""
func foo_bar():
pass
""",
"""
func _foo():
pass
""",
"""
func _foo_bar():
pass
""",
"""
func _on_Button_pressed():
pass
""",
])
def test_function_name_ok(code):
simple_ok_check(code)
@pytest.mark.parametrize('code', [
"""
func some_Button_pressed():
pass
""",
"""
func SomeName():
pass
""",
])
def test_function_name_nok(code):
simple_nok_check(code, 'function-name')
@pytest.mark.parametrize('code', [
"""
class_name SomeClassName
""",
"""
class_name Some
""",
])
def test_class_name_ok(code):
simple_ok_check(code)
@pytest.mark.parametrize('code', [
"""
class_name some_class_name
""",
"""
class_name _Some
""",
])
def test_class_name_nok(code):
simple_nok_check(code, 'class-name')
@pytest.mark.parametrize('code', [
"""
class _SubClassName:
tool
""",
"""
class SubClassName:
tool
""",
])
def test_sub_class_name_ok(code):
simple_ok_check(code)
@pytest.mark.parametrize('code', [
"""
class SubClassName_:
tool
""",
"""
class sub_class_name:
tool
""",
])
def test_sub_class_name_nok(code):
simple_nok_check(code, 'sub-class-name')
@pytest.mark.parametrize('code', [
"""
signal some_signal
""",
"""
signal signal(a, b, c)
""",
])
def test_signal_name_ok(code):
simple_ok_check(code)
@pytest.mark.parametrize('code', [
"""
signal someSignal
""",
"""
signal Signal(a, b)
""",
])
def test_signal_name_nok(code):
simple_nok_check(code, 'signal-name')
@pytest.mark.parametrize('code', [
"""
enum Name {}
""",
"""
enum PascalCase { XXX }
""",
"""
enum PascalXCase { XXX }
""",
])
def test_enum_name_ok(code):
simple_ok_check(code)
@pytest.mark.parametrize('code', [
"""
enum some_name {}
""",
"""
enum camelCase { XXX }
""",
"""
enum PascalCase_ { XXX }
""",
])
def test_enum_name_nok(code):
simple_nok_check(code, 'enum-name')
@pytest.mark.parametrize('code', [
"""
enum Name { XXX }
""",
"""
enum { XXX, Y_Y_Y }
""",
])
def test_enum_element_name_ok(code):
simple_ok_check(code)
@pytest.mark.parametrize('code', [
"""
enum { X_, Y }
""",
"""
enum { _XXX }
""",
"""
enum { xx_xx }
""",
"""
enum { SomeStuff }
""",
])
def test_enum_element_name_nok(code):
simple_nok_check(code, 'enum-element-name')
@pytest.mark.parametrize('code', [
"""func foo():
for _x in y:
pass
""",
"""func foo():
for xyz in y:
pass
""",
"""func foo():
for aaa_bbb in y:
pass
""",
])
def test_loop_variable_name_ok(code):
simple_ok_check(code)
@pytest.mark.parametrize('code', [
"""func foo():
for x_ in y:
pass
""",
"""func foo():
for xX in y:
pass
""",
"""func foo():
for X_X in y:
pass
""",
])
def test_loop_variable_name_nok(code):
simple_nok_check(code, 'loop-variable-name')
@pytest.mark.parametrize('code', [
"""
func foo(a, _b, c_d := 123, xxx : int):
pass
""",
])
def test_function_argument_name_ok(code):
simple_ok_check(code, disable=['unused-argument'])
@pytest.mark.parametrize('code', [
"""
func foo(a_):
pass
""",
"""
func foo(xX):
pass
""",
"""
func foo(X_X):
pass
""",
])
def test_function_argument_name_nok(code):
simple_nok_check(code, 'function-argument-name', disable=['unused-argument'])
@pytest.mark.parametrize('code', [
"""
func foo():
var xxx
""",
"""
func foo():
var x_y = 1
""",
"""
func foo():
var y : int = 1
""",
"""
func foo():
var y := 1
""",
"""
func foo():
var y : int
""",
])
def test_function_variable_name_ok(code):
simple_ok_check(code)
@pytest.mark.parametrize('code', [
"""func foo():
var xxx_
""",
"""func foo():
var _x_y = 1
""",
"""func foo():
var X : int = 1
""",
"""func foo():
var yY := 1
""",
])
def test_function_variable_name_nok(code):
simple_nok_check(code, 'function-variable-name')
@pytest.mark.parametrize('code', [
"""
func foo():
var XxxYyy = preload()
""",
])
def test_function_preload_variable_name_ok(code):
simple_ok_check(code)
@pytest.mark.parametrize('code', [
"""func foo():
var x_y = preload()
""",
])
def test_function_preload_variable_name_nok(code):
simple_nok_check(code, 'function-preload-variable-name')
@pytest.mark.parametrize('code', [
"""
const X = 1
""",
"""
const X_Y_Z = 2
""",
])
def test_constant_name_ok(code):
simple_ok_check(code)
@pytest.mark.parametrize('code', [
"""
const Xx = 1
""",
"""
const _X = 2
""",
"""
const x_x = 2
""",
])
def test_constant_name_nok(code):
simple_nok_check(code, 'constant-name')
@pytest.mark.parametrize('code', [
"""
const Xx = load()
""",
"""
const XxYy = preload()
""",
"""
const X = load()
""",
"""
const X_Y_Z = preload()
""",
])
def test_load_constant_name_ok(code):
simple_ok_check(code)
@pytest.mark.parametrize('code', [
"""
const _Xx = preload()
""",
"""
const x_x = load()
""",
])
def test_load_constant_name_nok(code):
simple_nok_check(code, 'load-constant-name')
@pytest.mark.parametrize('code', [
"""
var x
""",
"""
var xx_yy : int
""",
"""
var _xx_yy := 1.3
""",
])
def test_class_variable_name_ok(code):
simple_ok_check(code)
@pytest.mark.parametrize('code', [
"""
var X_Y
""",
"""
var x_
""",
"""
var Xx
""",
"""
var XY_Z
""",
])
def test_class_variable_name_nok(code):
simple_nok_check(code, 'class-variable-name')
@pytest.mark.parametrize('code', [
"""
var x = load()
""",
"""
var xx_yy = preload()
""",
"""
var _xx_yy := load()
""",
"""
var XxYy := load()
""",
])
def test_class_load_variable_name_ok(code):
simple_ok_check(code)
@pytest.mark.parametrize('code', [
"""
var X_Y = load()
""",
"""
var x_ = load()
""",
"""
var XY_Z = load()
""",
])
def test_class_load_variable_name_nok(code):
simple_nok_check(code, 'class-load-variable-name')
| [
"[email protected]"
] | |
7ac749b1aab219ebdca48873f52c5834e5ed540e | 3fc01457951a956d62f5e8cc0a8067f6796ee200 | /misago/acl/tests/test_roleadmin_views.py | ad9551935eee6abcae3d0f1f90f7f6995fe2799b | [] | no_license | kinsney/education | 8bfa00d699a7e84701a8d49af06db22c384e0e8d | 48f832f17c2df7b64647b3db288abccf65868fe6 | refs/heads/master | 2021-05-04T01:15:03.078130 | 2016-12-04T03:18:20 | 2016-12-04T03:18:20 | 71,164,542 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,597 | py | from django.core.urlresolvers import reverse
from misago.admin.testutils import AdminTestCase
from ..models import Role
from ..testutils import fake_post_data
def fake_data(data_dict):
return fake_post_data(Role(), data_dict)
class RoleAdminViewsTests(AdminTestCase):
def test_link_registered(self):
"""admin nav contains user roles link"""
response = self.client.get(reverse('misago:admin:permissions:users:index'))
self.assertContains(response, reverse('misago:admin:permissions:users:index'))
def test_list_view(self):
"""roles list view returns 200"""
response = self.client.get(reverse('misago:admin:permissions:users:index'))
self.assertEqual(response.status_code, 200)
def test_new_view(self):
"""new role view has no showstoppers"""
response = self.client.get(reverse('misago:admin:permissions:users:new'))
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse('misago:admin:permissions:users:new'),
data=fake_data({'name': 'Test Role'})
)
self.assertEqual(response.status_code, 302)
test_role = Role.objects.get(name='Test Role')
response = self.client.get(reverse('misago:admin:permissions:users:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, test_role.name)
def test_edit_view(self):
"""edit role view has no showstoppers"""
self.client.post(
reverse('misago:admin:permissions:users:new'),
data=fake_data({'name': 'Test Role'})
)
test_role = Role.objects.get(name='Test Role')
response = self.client.get(reverse('misago:admin:permissions:users:edit', kwargs={'pk': test_role.pk}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Test Role')
response = self.client.post(
reverse('misago:admin:permissions:users:edit', kwargs={'pk': test_role.pk}),
data=fake_data({'name': 'Top Lel'})
)
self.assertEqual(response.status_code, 302)
test_role = Role.objects.get(name='Top Lel')
response = self.client.get(reverse('misago:admin:permissions:users:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, test_role.name)
def test_users_view(self):
"""users with this role view has no showstoppers"""
response = self.client.post(
reverse('misago:admin:permissions:users:new'),
data=fake_data({'name': 'Test Role'})
)
test_role = Role.objects.get(name='Test Role')
response = self.client.get(reverse('misago:admin:permissions:users:users', kwargs={'pk': test_role.pk}))
self.assertEqual(response.status_code, 302)
def test_delete_view(self):
"""delete role view has no showstoppers"""
self.client.post(
reverse('misago:admin:permissions:users:new'),
data=fake_data({'name': 'Test Role'})
)
test_role = Role.objects.get(name='Test Role')
response = self.client.post(reverse('misago:admin:permissions:users:delete', kwargs={'pk': test_role.pk}))
self.assertEqual(response.status_code, 302)
# Get the page twice so no alert is renderered on second request
self.client.get(reverse('misago:admin:permissions:users:index'))
response = self.client.get(reverse('misago:admin:permissions:users:index'))
self.assertNotContains(response, test_role.name)
| [
"[email protected]"
] | |
f556ceb4101e22b775517d35c7cb495b3d3de9c8 | 41be196cdb24b8a6e2b62ea6f6852d77115a8e05 | /ops.py | 5bf941de0f7366ff3e7d26f0ab58cb9b3f56f32d | [
"MIT"
] | permissive | sananand007/genericImageClassification | c55cc31d6fa8f216b4c99f47b2448f44ee978b4a | 68d17ac1f34b32143260e8c2c72e4b70a2ad26b3 | refs/heads/master | 2020-04-17T15:50:02.945362 | 2019-01-30T15:20:47 | 2019-01-30T15:20:47 | 166,715,259 | 0 | 0 | MIT | 2019-01-30T15:12:50 | 2019-01-20T22:15:23 | Python | UTF-8 | Python | false | false | 1,508 | py | '''
Defining Group Normalization here
'''
import tensorflow as tf
import tensorflow.contrib.slim as slim
def norm(x, norm_type, is_train, G=32, esp=1e-5):
with tf.variable_scope('{}_norm'.format(norm_type)):
if norm_type == 'none':
output = x
elif norm_type == 'batch':
output = tf.contrib.layers.batch_norm(
x, center=True, scale=True, decay=0.999,
is_training=is_train, updates_collections=None
)
elif norm_type == 'group':
# normalize
# tranpose: [bs, h, w, c] to [bs, c, h, w] following the paper
x = tf.transpose(x, [0, 3, 1, 2])
N, C, H, W = x.get_shape().as_list()
G = min(G, C)
x = tf.reshape(x, [-1, G, C // G, H, W])
mean, var = tf.nn.moments(x, [2, 3, 4], keep_dims=True)
x = (x - mean) / tf.sqrt(var + esp)
# per channel gamma and beta
gamma = tf.Variable(tf.constant(1.0, shape=[C]), dtype=tf.float32, name='gamma')
beta = tf.Variable(tf.constant(0.0, shape=[C]), dtype=tf.float32, name='beta')
gamma = tf.reshape(gamma, [1, C, 1, 1])
beta = tf.reshape(beta, [1, C, 1, 1])
output = tf.reshape(x, [-1, C, H, W]) * gamma + beta
# tranpose: [bs, c, h, w, c] to [bs, h, w, c] following the paper
output = tf.transpose(output, [0, 2, 3, 1])
else:
raise NotImplementedError
return output | [
"[email protected]"
] | |
4d043aa331d154a5b007be6ce7499ac3f66ae85c | 524934f74db0a84be9965859ea789112752439b7 | /test_basium.py | 30911b5390ac9ebcbbd9708922d456412f4f5dc8 | [] | no_license | lowinger42/basium | 241f983a83aba2f465ca315ca8b8261780a20521 | 1f12474f005915fc9b8b191e60ad1f4f1676b4f1 | refs/heads/master | 2022-03-11T00:25:25.340228 | 2022-02-25T23:07:57 | 2022-02-25T23:07:57 | 5,907,835 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,712 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2013, Anders Lowinger, Abundo AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Common code used for testing
To run this script without embeded http/json server, start with --noserver
Example
./test_basium --noserver
A suitable standalone server can be started with
export PYTHONPATH=/opt/basium
wsgi/handler --port 8051
Preparation to database before testing:
sqlite3
No preparation is needed, included in python
mysql
create a database called basium_db
CREATE DATABASE basium_db;
create a user username basium_user with password secret, with full rights to the basium_db database
GRANT ALL PRIVILEGES ON basium_db.* To 'basium_user'@'localhost' IDENTIFIED BY 'secret';
psql
create a user username basium_user with password secret, with full rights to the basium_db database
sudo -u postgres createuser basium_user --pwprompt
create a database called basium_db
sudo -u postgres createdb basium_db --owner=basium_user
json
uses the psql driver on the server side, see psql
"""
import sys
import time
import decimal
import datetime
import unittest
import logging
import basium_common as bc
import basium
import basium_model
import wsgi.handler
import test_tables
# ----- Start of module globals
log = basium.log
drivers = [
"psql",
"mysql",
"sqlite",
"json",
]
# ----- End of module globals
log.info("Python version %s" % str(sys.version_info))
log.logger.setLevel(logging.ERROR) # Keep the basium logger quiet
class ObjectFactory:
"""
Create a new object and initialize the columns with decent mostly
unique values that can be used during testing
"""
def __init__(self):
pass
def new(self, cls, p):
obj = cls()
for colname, column in obj._iterNameColumn():
if column.primary_key:
continue
val = None
if isinstance(column, basium_model.BooleanCol):
val = (p & 1) == 0
elif isinstance(column, basium_model.DateCol):
year = 2012
month = (p % 12) + 1
day = (p % 28) + 1
val = datetime.date(year, month, day)
elif isinstance(column, basium_model.DateTimeCol):
year = 2012
month = (p % 12) + 1
day = (p % 28) + 1
hour = (p % 24)
minute = (p % 60)
second = (p % 60)
val = datetime.datetime(year, month, day, hour, minute, second)
elif isinstance(column, basium_model.DecimalCol):
val = decimal.Decimal("%d.%02d" % (p, p % 100))
elif isinstance(column, basium_model.FloatCol):
val = float(str(p) + '.' + str(p))
elif isinstance(column, basium_model.IntegerCol):
val = p
elif isinstance(column, basium_model.VarcharCol):
val = "text räksmörgås RÄKSMÖRGÅS" + str(p)
else:
print("Unknown column type: %s" % column)
sys.exit(1)
obj._values[colname] = val
return obj
objFactory = ObjectFactory()
class TestFunctions(unittest.TestCase):
"""
TestFunctions, test the store, load, delete, filter orm functions
"""
def setUp(self):
# Based on driver, create a dbconf object
self.dbconf = None
if self.driver == 'psql':
self.dbconf = basium.DbConf(host='localhost', port=5432, username='basium_user', password='secret', database='basium_db')
elif self.driver == 'mysql':
self.dbconf = basium.DbConf(host='localhost', port=3306, username='basium_user', password='secret', database='basium_db')
elif self.driver == 'sqlite':
self.dbconf = basium.DbConf(database='/tmp/basium_db.sqlite')
elif self.driver == 'json':
self.dbconf = basium.DbConf(host='http://localhost:8051', username='basium_user',
password='secret', database='basium_db')
else:
self.fail("Unknown driver %s" % self.driver)
self.db = basium.Basium(driver=self.driver, dbconf=self.dbconf, checkTables=True) #, logger=logger)
self.db.log.logger.setLevel(logging.ERROR)
self.db.addClass(self.Cls)
if not self.db.start():
self.fail("Cannot start database driver")
def runtest2(self, obj1, obj2):
"""
Run test
Create an object, store in db
Read out an object with the _id from above
Compare and see if the two are equal
"""
log.info("Store object in table '%s'" % obj1._table)
try:
# data1 = self.db.store(obj1)
self.db.store(obj1)
except bc.Error as e:
self.assertFalse(True, msg="Could not store object %s" % e)
log.info("Load same object from table '%s'" % (obj1._table))
obj2._id = obj1._id
try:
rows = self.db.load(obj2)
except bc.Error as e:
self.assertFalse(True, msg="Could not load object %s" % e)
self.assertEqual( len(rows), 1, msg="Only expected one row in result, got %s" % len(rows))
obj2 = rows[0]
self.assertEqual(obj1, obj2, msg = "Stored and loaded object does not have same content")
log.info(" There is a total of %i rows in the '%s' table" % (self.db.count(obj1), obj1._table ) )
def testInsert(self):
"""
Store an object, read it out again and compare if they are equal
"""
obj1 = objFactory.new(self.Cls, 1)
obj2 = self.Cls()
self.runtest2(obj1, obj2)
obj3 = objFactory.new(self.Cls, 2)
obj4 = self.Cls()
self.runtest2(obj3, obj4)
def testUpdate(self):
"""
Test the update functionality
"""
test1 = objFactory.new(self.Cls, 1)
try:
data = self.db.store(test1)
except bc.Error as e:
self.assertFalse(True, msg="Can't store new object %s" % e)
test1.varcharTest += " more text"
try:
_id = self.db.store(test1)
except bc.Error as e:
self.assertFalse(True, msg="Can't update object %s" % e)
test2 = self.Cls(_id)
try:
data = self.db.load(test2)
except bc.Error as e:
self.assertFalse(True, msg="Can't load updated object %s" % e)
test2 = data[0]
self.assertEqual(test1.varcharTest, test2.varcharTest, msg=
"Update failed, expected '%s' in field, got '%s'" % (test1.varcharTest, test2.varcharTest))
def testQuery(self):
"""
Test the query functionality
"""
# first create the objects in the database
first = None
for rowid in range(100, 115):
obj1 = objFactory.new(self.Cls, rowid)
try:
data = self.db.store(obj1)
except bc.Error as e:
self.assertFalse(True, msg="Could not store object %s" % e)
if not first:
first = obj1._id
query = self.db.query()
obj = self.Cls()
query.filter(obj.q._id, '>', first + 2).filter(obj.q._id, '<', first + 13)
try:
data = self.db.load(query)
except bc.Error as e:
self.assertFalse(True, msg="Can't query objects %s" % e)
self.assertEqual(len(data), 10, msg="Wrong number of objects returned, expected %s got %s" % (10, len(data)))
if len(data) == 10:
for i in range(0, 10):
self.assertEqual(data[i].intTest, i+103)
def testDelete(self):
"""
Test the delete functionality
"""
test1 = objFactory.new(self.Cls, 1)
log.info("Store object in table '%s'" % test1._table)
try:
# _id = self.db.store(test1)
self.db.store(test1)
except bc.Error as e:
self.assertFalse(True, msg="Can't store new object %s" % e)
rowsaffected = None
log.info("Delete object in table '%s'" % test1._table)
try:
rowsaffected = self.db.delete(test1)
except bc.Error as e:
self.assertFalse(True, msg="Can't delete object %s" % e)
self.assertEqual(rowsaffected, 1)
# Try to get the object we just deleted
log.info("Trying to get deleted object in table '%s' (should fail)" % test1._table)
test2 = self.Cls()
try:
# data = self.db.load(test2)
self.db.load(test2)
except bc.Error as e:
self.assertTrue(True, msg="Expected error when loading deleted object %s" % e)
class TestModel(unittest.TestCase):
"""
Test the ORM model class
"""
class TestModel(basium_model.Model):
booleanTest = basium_model.BooleanCol()
dateTest = basium_model.DateCol()
datetimeTest = basium_model.DateTimeCol()
decimalTest = basium_model.DecimalCol()
floatTest = basium_model.FloatCol()
intTest = basium_model.IntegerCol()
varcharTest = basium_model.VarcharCol()
class TestModelDefault(basium_model.Model):
booleanTest = basium_model.BooleanCol(default=True)
dateTest = basium_model.DateCol(default="NOW")
datetimeTest = basium_model.DateTimeCol(default="NOW")
decimalTest = basium_model.DecimalCol(default=decimal.Decimal("1.23"))
floatTest = basium_model.FloatCol(default=2.78)
intTest = basium_model.IntegerCol(default=42)
varcharTest = basium_model.VarcharCol(default="default string")
def setUp(self):
pass
def test(self):
t = self.TestModel()
self.assertEqual(t.booleanTest, None)
self.assertEqual(t.dateTest, None)
self.assertEqual(t.datetimeTest, None)
self.assertEqual(t.decimalTest, None)
self.assertEqual(t.floatTest, None)
self.assertEqual(t.intTest, None)
self.assertEqual(t.varcharTest, None)
def testDefault(self):
t = self.TestModelDefault()
self.assertEqual(t.booleanTest, True)
self.assertEqual(t.dateTest, datetime.datetime.now().date())
self.assertEqual(t.datetimeTest, datetime.datetime.now().replace(microsecond=0))
self.assertEqual(t.decimalTest, decimal.Decimal("1.23"))
self.assertEqual(t.floatTest, 2.78)
self.assertEqual(t.intTest, 42)
self.assertEqual(t.varcharTest, "default string")
def get_suite():
"""
Return a testsuite with this modules all tests
"""
suite = unittest.TestSuite()
testloader = unittest.TestLoader()
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestModel))
for driver in drivers:
testnames = testloader.getTestCaseNames(TestFunctions)
for name in testnames:
t = TestFunctions(name)
setattr(t, "driver", driver)
setattr(t, "Cls", test_tables.BasiumTest)
suite.addTest(t)
return suite
def runServer():
"""
Start an WSGI server as a separate thread,
needed for the json driver test
"""
log.info("Starting embedded WSGI server")
driver = "psql"
dbconf = basium.DbConf(host='localhost', port=5432, username='basium_user', password='secret', database='basium_db')
db = basium.Basium(driver=driver, dbconf=dbconf, checkTables=True)
db.setDebug(bc.DEBUG_ALL)
db.log.logger.setLevel(logging.ERROR)
db.addClass(test_tables.BasiumTest)
if not db.start():
log.error("Cannot start database driver for wsgi server")
server = wsgi.handler.Server(basium=db)
server.daemon = True
server.start() # run in thread
while not server.ready:
time.sleep(0.1)
if __name__ == "__main__":
embeddedServer = True
if len(sys.argv) > 1:
if sys.argv[1] == '--noserver':
embeddedServer = False
if "json" in drivers and embeddedServer:
runServer()
suite = get_suite()
runner = unittest.TextTestRunner()
runner.run(suite)
| [
"[email protected]"
] | |
fc8b120b544b0a09985d1ede8655658e9c345fc9 | e3081df016b73719b51d6ae02a5d2af54aa21a8f | /VAMPIRE/clusterSM.py | a0c1d40381be451525e2804a31e7dc12a232817f | [] | no_license | samuramirez/cellmigration | 2b4008d9ffc030a8ae03edd5b503f2b8b8a8e78d | af64059da5f85f32e070a2dd679323ff12fef872 | refs/heads/master | 2022-05-04T19:17:01.843695 | 2022-03-23T15:49:17 | 2022-03-23T15:49:17 | 237,447,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,496 | py | #!/usr/bin/env python
# internal libraries
from __future__ import division
from copy import deepcopy
import time
import os
from tkinter import END
from datetime import datetime
# external libraries
import numpy as np
from scipy import stats, cluster, spatial, special
from sklearn.cluster import KMeans
from sklearn import preprocessing
import matplotlib as mpl
mpl.use("TkAgg")
from matplotlib import pyplot as plt
def clusterSM(outpth, score, bdpc, clnum, pcnum=None, VamModel=None, BuildModel=None,
condition=None,setID=None, entries=None):
realtimedate = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
start = time.time()
print('# clusterSM')
if not isinstance(condition, str):
condition = str(condition)
if BuildModel:
figdst = os.path.join(*[outpth, entries['Model name'].get(), 'Example model figures'])
else:
figdst = os.path.join(outpth, 'Result based on ' + os.path.splitext(os.path.basename(entries['Model to apply'].get()))[0])
if not os.path.exists(figdst):
try:
os.makedirs(figdst)
except:
entries['Status'].delete(0, END)
entries['Status'].insert(0, 'Please choose the right folder')
NN = 10
if pcnum is None:
pcnum = 20
if BuildModel:
VamModel['clnum'] = clnum
VamModel['pcnum'] = pcnum
else:
clnum = VamModel['clnum']
pcnum = VamModel['pcnum']
cms00 = score[:, 0:pcnum]
cms = deepcopy(cms00)
if BuildModel:
mincms = np.amin(cms, axis=0)
VamModel['mincms'] = mincms
VamModel['boxcoxlambda'] = np.zeros(len(cms.T))
VamModel['testmean'] = np.zeros(len(cms.T))
VamModel['teststd'] = np.zeros(len(cms.T))
else:
mincms = VamModel['mincms']
for k in range(len(cms.T)):
test = cms.T[k]
test = test - mincms[k] + 1
if BuildModel:
test[test < 0] = 0.000000000001
test, maxlog = stats.boxcox(test)
test = np.asarray(test)
VamModel['boxcoxlambda'][k] = maxlog
VamModel['testmean'][k] = np.mean(test)
VamModel['teststd'][k] = np.std(test)
cms.T[k] = (test - np.mean(test)) / np.std(test)
else:
test[test < 0] = 0.000000000001
test = stats.boxcox(test, VamModel['boxcoxlambda'][k])
cms.T[k] = (test - VamModel['testmean'][k]) / VamModel['teststd'][k]
cmsn = deepcopy(cms)
if BuildModel:
cmsn_Norm = preprocessing.normalize(cmsn)
if isinstance(clnum, str):
clnum = int(clnum)
kmeans = KMeans(n_clusters=clnum, init='k-means++', n_init=3, max_iter=300).fit(
cmsn_Norm) # init is plus,but orginally cluster, not available in sklearn
C = kmeans.cluster_centers_
VamModel['C'] = C
D = spatial.distance.cdist(cmsn, C, metric='euclidean')
IDX = np.argmin(D, axis=1)
IDX_dist = np.amin(D, axis=1)
else:
if isinstance(clnum, str):
clnum = int(clnum)
C = VamModel['C']
D = spatial.distance.cdist(cmsn, C, metric='euclidean')
# why amin? D shows list of distance to cluster centers.
IDX = np.argmin(D, axis=1)
IDX_dist = np.around(np.amin(D, axis=1), decimals=2)
goodness = special.softmax(D)
offx, offy = np.meshgrid(range(clnum), [0])
offx = np.multiply(offx, 1) + 1
offx = offx[0] * 1 - 0.5
offy = np.subtract(np.multiply(offy, 1), 1.5) + 1
offy = offy[0]
# define normalized colormap
bdst0 = np.empty(len(bdpc.T))
bdst = deepcopy(bdst0)
for kss in range(clnum):
c88 = IDX == kss
bdpcs = bdpc[c88, :]
mbd = np.mean(bdpcs, axis=0)
bdst0 = np.vstack((bdst0, mbd))
bdst0 = bdst0[1:]
# dendrogram of the difference between different shape
mpl.rcParams['lines.linewidth'] = 2
if BuildModel:
Y = spatial.distance.pdist(bdst0, 'euclidean')
Z = cluster.hierarchy.linkage(Y, method='complete') # 4th row is not in matlab
Z[:, 2] = Z[:, 2] * 5 # multiply distance manually 10times to plot better.
VamModel['Z'] = Z
else:
Z = VamModel['Z']
cluster.hierarchy.set_link_color_palette(['k'])
fig289, ax289 = plt.subplots(figsize=(6, 2), linewidth=2.0, frameon=False)
plt.yticks([])
R = cluster.hierarchy.dendrogram(Z, p=0, truncate_mode='mlab', orientation='bottom', ax=None,
above_threshold_color='k')
leaflabel = np.array(R['ivl'])
dendidx = leaflabel
cluster.hierarchy.set_link_color_palette(None)
mpl.rcParams['lines.linewidth'] = 1
plt.axis('equal')
plt.axis('off')
IDXsort = np.zeros(len(IDX))
for kss in range(clnum):
c88 = IDX == int(dendidx[kss])
IDXsort[c88] = kss
IDX = deepcopy(IDXsort)
fig922, ax922 = plt.subplots(figsize=(17, 2))
fig291, ax291 = plt.subplots(figsize=(6, 3))
for kss in range(int(max(IDX)) + 1):
c88 = IDXsort == kss
fss = 4
bdpcs = bdpc[c88]
mbd = np.mean(bdpcs, axis=0)
bdNUM = int(round(len(mbd) / 2))
bdst = np.vstack((bdst, mbd))
xaxis = np.add(np.divide(np.append(mbd[0:bdNUM], mbd[0]), fss), offx[kss]) * 10
yaxis = np.add(np.divide(np.append(mbd[bdNUM:], mbd[bdNUM]), fss), offy[kss]) * 10
plt.clf()
ax289.plot(xaxis, yaxis, '-', linewidth=2) # this is the shape of the dendrogram
plt.axis('equal')
plt.axis('off')
sid = np.argsort(np.random.rand(sum(c88), 1), axis=0)
if len(sid) < NN:
enum = len(sid)
else:
enum = NN
for knn in range(enum):
x99 = bdpcs[sid[knn], np.append(range(bdNUM), 0)]
y99 = bdpcs[sid[knn], np.append(np.arange(bdNUM, (bdNUM * 2), 1), bdNUM)]
xax = np.add(np.divide(x99, fss), offx[kss])
yax = np.add(np.divide(y99, fss), offy[kss])
ax922.plot(xax, yax, 'r-', linewidth=1)
ax922.axis('equal')
ax922.axis('off')
if BuildModel:
ax922.set_ylim(ax922.get_ylim()[::-1])
if os.path.exists(os.path.join(figdst, "Registered objects.png")):
f1 = os.path.join(figdst, "Registered objects "+realtimedate+".png")
f2 = os.path.join(figdst, "Shape mode dendrogram.png "+realtimedate+".png")
else:
f1 = os.path.join(figdst, "Registered objects.png")
f2 = os.path.join(figdst, "Shape mode dendrogram.png")
fig922.savefig(f1, format='png', transparent=True)
fig289.savefig(f2, format='png', transparent=True)
IDX = IDX + 1
n, bins, patches = plt.hist(IDX, bins=range(clnum + 2)[1:])
fig22, ax22 = plt.subplots(figsize=(10, 5))
n = np.divide(n, np.sum(n))
n = np.multiply(n, 100)
n = np.around(n, 2)
height = n
ax22.bar(x=(np.delete(bins, 0) - 1) / 2, height=height, width=0.4, align='center', color=(0.2, 0.4, 0.6, 1),
edgecolor='black')
ax22.set_ylabel('Abundance %', fontsize=15, fontweight='bold')
ax22.set_xlabel('Shape mode', fontsize=15, fontweight='bold')
# only for paper
ax22.set_ylim([0,np.max(height)+5])
ax22.set_title('Shape mode distribution (N=' + str(len(IDX_dist)) + ')',fontsize=18, fontweight='bold')
bartick = map(str, np.arange(int(np.max(IDX) + 1))[1:])
ax22.set_xticks((np.arange(np.max(IDX) + 1) / 2)[1:])
ax22.set_xticklabels(tuple(bartick), fontsize=13, fontweight='bold')
ax22.yaxis.set_tick_params(labelsize=13)
plt.setp(ax22.get_yticklabels(), fontweight="bold")
for i, v in enumerate(height):
ax22.text((i - 0.25 + 1) / 2, v + 0.25, str(np.around(v, decimals=1)), color='black', fontweight='bold', fontsize=13)
for axis in ['top', 'bottom', 'left', 'right']:
ax22.spines[axis].set_linewidth(3)
if not BuildModel:
if os.path.exists(os.path.join(figdst, 'Shape mode distribution_'+ setID + '_' + condition + '.png')):
f3 = os.path.join(figdst, 'Shape mode distribution_'+ setID + '_' + condition +'_'+realtimedate+'.png')
else:
f3 = os.path.join(figdst, 'Shape mode distribution_'+ setID + '_' + condition + '.png')
fig22.savefig(f3, format='png', transparent=True)
plt.close('all')
end = time.time()
print('For cluster, elapsed time is ' + str(end - start) + 'seconds...')
return IDX, IDX_dist, VamModel, goodness
| [
"[email protected]"
] | |
8cce6883eff65047f33f3b771610a6d86e01321f | 20936bf43b550ff8514164738529d0f78b8d8c14 | /Zadanie 3/zad3_10.py | 937218b415392093de7106e192f6fa4b197e9985 | [] | no_license | kamilck13/Python2019 | 125f993324df5c37c4307be6db14d478de032678 | 88bcc231c662e29efd7a164ad3a6cc3c4f75b4a2 | refs/heads/master | 2020-08-21T19:58:35.515530 | 2020-02-12T19:40:44 | 2020-02-12T19:40:44 | 216,235,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | #3.9
#program zakłada, że podana liczba jest prawidłowa;
#program tłumaczy wszysktie możliwe poprawne liczby rzymskie na arabskie;
#zastosowałem słownik wybierający liczby na podstawie dopasowania z tabeli
#zawierającej możliwe kombinacje symboli rzymskich
def roman2int(tekst):
D = {}
D['I'] = 1
D['II'] = 2
D['III'] = 3
D['IV'] = 4
D['V'] = 5
D['VI'] = 6
D['VII'] = 7
D['VIII'] = 8
D['IX'] = 9
D['X'] = 10
D['XX'] = 20
D['XXX'] = 30
D['XL'] = 40
D['L'] = 50
D['LX'] = 60
D['LXX'] = 70
D['LXXX'] = 80
D['XC'] = 90
D['C'] = 100
D['CC'] = 200
D['CCC'] = 300
D['CD'] = 400
D['D'] = 500
D['DC'] = 600
D['DCC'] = 700
D['DCCC'] = 800
D['CM'] = 900
D['M'] = 1000
D['MD'] = 1500
D['MM'] = 2000
D['MMM'] = 3000
wynik = 0
tymczas = 0
dl= len(tekst)
i = 0
s = ''
while i<dl:
s += tekst[i]
if s in D:
tymczas = D[s]
i += 1
else:
wynik += tymczas
s = ''
if s in D:
wynik += D[s]
return wynik
if __name__ == "__main__":
while True:
tekst = input("Podaj wartosc w systemie rzymskim: ")
if tekst == 'stop': break
x = roman2int(tekst)
print("liczba w systemie arabskim: ", x)
| [
"[email protected]"
] | |
74aa84bea1f33f8caee9faeb497d439917615c92 | 689b072a82862ec079d3490d131533517d551b06 | /mcan/utils.py | 9f2f00260edf39ca068d91d48264cb61a4ca5af1 | [] | no_license | BarCodeReader/MCAN | bd67b70b1ca5ecd6dce8c16e69bd65d063524701 | adad619b92c63a0829c7f1314e5ffc728be8697d | refs/heads/master | 2022-01-24T04:15:23.442051 | 2019-06-03T03:28:28 | 2019-06-03T03:28:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | import skimage
from skimage import measure
import numpy as np
def psnr(im1, im2):
def im2double(im):
min_val, max_val = 0, 255
out = (im.astype(np.float64)-min_val) / (max_val-min_val)
return out
im1 = im2double(im1)
im2 = im2double(im2)
psnr = skimage.measure.compare_psnr(im1, im2, data_range=1)
return psnr
| [
"[email protected]"
] | |
5117270c2355a3fed6020f437ca52e12b9b6122e | a543a24f1b5aebf500c2200cd1d139435948500d | /satory074/abc231/d/main.py | 3f5d5f0b8ce8511ac0c5d49cf2197084b908b845 | [] | no_license | HomeSox/AtCoder | 18c89660762c3e0979596f0bcc9918c8962e4abb | 93e5ffab02ae1f763682aecb032c4f6f4e4b5588 | refs/heads/main | 2023-09-05T03:39:34.591433 | 2023-09-04T13:53:36 | 2023-09-04T13:53:36 | 219,873,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | import sys
sys.setrecursionlimit(10**6)
N, M = map(int, input().split())
par = [-1 for _ in range(10 ** 5 +1)]
def root(x):
if par[x] < 0:
return x
par[x] = root(par[x])
return par[x]
def unite(x, y):
x = root(x)
y = root(y)
if x == y:
return
par[x] += par[y]
par[y] = x
degree = [0] * (10 ** 5 + 1)
for _ in range(M):
a, b = map(int, input().split())
degree[a] += 1
degree[b] += 1
if root(a) == root(b):
print('No')
exit()
unite(a, b)
if max(degree) <= 2:
print('Yes')
else:
print('No')
| [
"[email protected]"
] | |
1b0112a85f56594fa5eaa5cb3dd1c128f188dbe5 | 1dc1ba80709ed2d2c9f9ff0598bca5ae2ae22d7b | /block.py | 911532bff618736b77d44806a8c95f4ad7ee3200 | [] | no_license | ashok133/KelleyChain | 62a8eab821ab5ef39cbe87e15153c86522381df9 | 920a9f131203661c1443f37c67a6a85ce326194c | refs/heads/master | 2020-03-19T10:42:24.295119 | 2018-06-06T22:58:46 | 2018-06-06T22:58:46 | 136,394,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | import hashlib as hasher
import datetime as dt
KelleyChain = []
class KelleyCoin:
# init for creating a KelleyCoin object
def __init__(self, index, ts, value, prev_hash):
self.index = index
self.ts = ts
self.value = value
self.prev_hash = prev_hash
self.hash = self.hasher()
# method to return hash of value passed
def hasher(self):
sha = hasher.sha256()
value = str(self.index) + str(self.ts) + str(self.value) + str(self.prev_hash)
sha.update(value)
return sha.hexdigest()
# creating the genesis block
def genesis():
return KelleyCoin(0,dt.datetime.now(), 1, '0')
# creating a new block
def gen_new_block(prev_block):
new_index = prev_block.index + 1
new_ts = dt.datetime.now()
new_value = prev_block.value * 2
# the hash value here is the hash of previous block, to identify which block is where in the chain.
new_hash = prev_block.hash
return KelleyCoin(new_index, new_ts, new_value, new_hash)
# Initiating the blockchain with genesis block
KelleyChain.append(genesis())
previous_block = KelleyChain[0]
# number of blocks(coins) to be added after genesis
no_coins = input('How many coins would you like? ')
# add the coins in the chain
for i in range(0, no_coins):
new_coin = gen_new_block(previous_block)
KelleyChain.append(new_coin)
previous_block = new_coin
print "Added coin number", new_coin.index, "with value = $",new_coin.value
print "Hash value = ",new_coin.hash
| [
"[email protected]"
] | |
6bf3b982e3228dc763ab14b84530653fdead0ea9 | 5490b61d59191d3e256bde4ca9342eeff102d11a | /space/game_stats.py | e98f80e3ebc64ed9ae510c35e0be97b054c80d58 | [] | no_license | 91nickel/python | 9380b793792cf9ccf913fac356c8d046e5cedd8c | 7a42b23c49287df35bb8cb165b1ff6db65cfe718 | refs/heads/master | 2022-11-14T20:40:50.262905 | 2020-07-06T09:49:34 | 2020-07-06T09:49:34 | 277,503,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | class GameStats:
def __init__(self, ai_game):
"""Инициация статистики"""
# Рекорд не должен сбрасываться
self.high_score = 0
self.settings = ai_game.settings
self.reset_stats()
# Игра запускается в неактивном состоянии
self.game_active = False
def reset_stats(self):
"""Инициализирует статистику, изменяющуюся в ходе игры"""
self.ships_left = self.settings.ship_limit
self.score = 0
self.level = 1
| [
"[email protected]"
] | |
4b2654e1a3a60d27ce231eafe8eef4f846d999a6 | 2d681847518351886bb0db850a2fb3db3a08a1ba | /Python/basics/conversion_m-cm.py | bbed48d02824b78454046b600b84c6f112922475 | [
"MIT"
] | permissive | mdoprog/Warehouse | 8b3fe2f98453422a1d2d57951cd7bec2cce431cc | 4afb88a32fc20b613d008b86371d505ddeb07e5a | refs/heads/master | 2023-01-03T03:50:06.069214 | 2020-10-17T00:18:11 | 2020-10-17T00:18:11 | 295,555,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | # coding=utf8
# Entrada
metros = int (input("Informe a medida em metros: "))
# Processamento
centimetros = metros * 100
# Saída
print("{0} metro(s) em centímetros é: {1}cm ".format(metros, centimetros)) | [
"[email protected]"
] | |
fdc8d035a2eb1b715c747baa05916ccae0b8558e | 476415b07a8ab773ac240989b481464961119b6a | /Funcionalidades/Cuaderno_Notas/cuaderno.py | dfe118765fb22aafd2632615db582496d0639730 | [] | no_license | rtejada/Universidad-Python | c4d46a91eee2a15d3c72035aced9d28f1527fb69 | a2c8035f57f5ae269b1b968ef187180879d14163 | refs/heads/master | 2022-12-16T21:32:47.181638 | 2020-09-17T17:42:49 | 2020-09-17T17:42:49 | 284,633,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | from nota import Nota
class Cuaderno:
'''Representa una coleccion de notas que pueden ser ertiquetadas,
modificadas, y se pueden buscar'''
def __init__(self):
'''Inicializa un cuader con una lista vacia'''
self.notas = []
def nueva_nota(self, memo, tags=''):
'''Crea una nueva nota y la añade a lista notas'''
self.notas.append(Nota(memo, tags))
def modificar_memo(self, nota_id, memo):
'''Encuentra la nota con la id dada y cambia
su memo al valor dado.'''
nota = self._encontrar_nota(nota_id)
if nota:
nota.memo = memo
return True
return False
def modificar_tags(self, nota_id, tags):
'''Encuentra la nota con la id dada y cambia
sus tags al valor dado'''
nota = self._encontrar_nota(nota_id)
if nota:
nota.tags = tags
return True
return False
#self._encontrar_nota(nota_id).tags = tags
def search(self, filter):
'''Encuentra todas las notas que concuerdan con el
filtro string dado'''
return [nota for nota in self.notas if nota.match(filter)]
def _encontrar_nota(self, nota_id):
'''Localiza la nota con la id dada.'''
for nota in self.notas:
if str(nota.id) == str(nota_id):
return nota
return None
'''
c = Cuaderno()
c.nueva_nota('Primera Nota')
c.nueva_nota('Segunda Nota')
print(c.notas)
print(c.notas[0].id)
print(c.notas[1].id)
print(c.notas[0].memo)
print(c.notas[1].memo)
print(c.search('Nota'))
print(c.modificar_memo(1, 'Tercera Nota'))
print(c.notas[0].memo)'''
| [
"[email protected]"
] | |
cd08f2014e74c3f1479fc22bc7f78c10e2213441 | d338baf86800e3096a9c3c6ab3b234dc3f2354d5 | /milena/milena/wsgi.py | 409f5be81f2852d0ba00d278835ebf3de6cf6c13 | [] | no_license | rafaelakiyoshi/mimi | 0810c627604df9f2dfb1fa7b179a22d4b96f6710 | 204e63a53e991a6c063a7c5b730cb9f3238481cd | refs/heads/master | 2021-01-18T19:04:42.408172 | 2017-04-01T05:22:47 | 2017-04-01T05:22:47 | 86,886,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | """
WSGI config for milena project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "milena.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
bb14bb04807e9cca2a3b9a6955dc349377e3c09e | 9e96855f3c4bab694e1f421283edba0a2d22e17d | /webapp/urls.py | 613aaf53b45c7f5d17f16d603ca23b3ba72d0c7b | [] | no_license | Gorira-Banana/web_app | 01bfadaa349110f82491d778539e869904fd5eee | 215d039ea23e4bb2454287f0feadf583c6c01eda | refs/heads/master | 2020-03-23T23:15:16.462196 | 2018-07-29T19:31:10 | 2018-07-29T19:31:10 | 141,983,665 | 0 | 0 | null | 2018-07-23T08:17:59 | 2018-07-23T08:17:59 | null | UTF-8 | Python | false | false | 810 | py | """webapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include,url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^apple/',include('apple.urls'))
]
| [
"[email protected]"
] | |
c8e3605deea6b08e522d95e4e4de9321515ea8f4 | e9539de5b8832e2a09365917fe201a945bf5d99b | /leetcode257.py | a337d1ce04c41835465bc7fa045d9b29077f0f17 | [] | no_license | JoshuaW1990/leetcode-session1 | 56d57df30b21ccade3fe54e3fd56a2b3383bd793 | 6fc170c04fadec6966fb7938a07474d4ee107b61 | refs/heads/master | 2021-09-20T16:18:15.640839 | 2018-08-12T09:40:51 | 2018-08-12T09:40:51 | 76,912,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def binaryTreePaths(self, root):
"""
:type root: TreeNode
:rtype: List[str]
"""
result = []
def helper(cur, node):
if node.left is None and node.right is None:
result.append(cur + [str(node.val)])
return
if node.left is not None:
helper(cur + [str(node.val)], node.left)
if node.right is not None:
helper(cur + [str(node.val)], node.right)
if root is None:
return result
helper([], root)
for i, item in enumerate(result):
result[i] = '->'.join(item)
return result
| [
"[email protected]"
] | |
7dee9e54db2f61507fa445a883e0e7cae74ee732 | b78d49994cdedc2bde154be69dc9133fba9760e4 | /schedule/model_managers.py | 9e5ebbd22c5a295e2fb91fea59160867514ca304 | [] | no_license | Marik28/univer_api | 40c8863e6568757f09be06fb853f74504f659adb | f9dfb82d795e4367b74dfd0d9f88a5fc082a5e53 | refs/heads/main | 2023-06-25T11:38:34.315989 | 2021-02-28T14:14:38 | 2021-02-28T14:14:38 | 342,927,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | from django.db.models import Manager
class LessonManager(Manager):
def unarchived(self):
"""Пары, находящиеся в архиве"""
return self.filter(archived=False)
def archived(self):
"""Пары, не находящиеся в архиве"""
return self.filter(archived=True)
def numerator(self):
"""Пары, которые стоят по расписанию на числитель"""
return self.unarchived().exclude(parity='Знаменатель')
def denominator(self):
"""Пары, которые стоят по расписанию на числитель"""
return self.unarchived().exclude(parity='Числитель')
| [
"[email protected]"
] | |
b8cbb9dfd20b9ec3a2623bb6f7999a33b46231fe | 12555a9570d0b70ba4a95a53eeed0ab6f2961fe4 | /preprocess.py | 071a7c0e97056f42c6c9bad220b6aed4e5392339 | [] | no_license | terrypang/bupt-dr2 | 4f0428f8caa2291f36d3b001f51f94b4fb604f79 | 0762e527e6585d0aa81e390b2776c1b79cd02121 | refs/heads/master | 2021-01-01T04:48:15.595948 | 2017-03-19T13:50:07 | 2017-03-19T13:50:07 | 97,250,615 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,644 | py | from __future__ import division, print_function, absolute_import
import numpy as np
from glob import glob
from tqdm import tqdm
from keras.preprocessing import image
import os
import pandas as pd
from sklearn.utils.class_weight import compute_class_weight
from utils import utils
def main():
RAW_DIR = 'data/convert_1024/'
filenames = glob('{}/*'.format(RAW_DIR))
bs = 100
batches = [filenames[i * bs : (i + 1) * bs]
for i in range(int(len(filenames) / bs) + 1)]
STDs, MEANs = [], []
Us, EVs = [], []
for batch in tqdm(batches):
images = np.array([image.img_to_array(image.load_img(f, target_size=(512, 512))) for f in batch])
X = images.reshape(-1, 3)
STD = np.std(X, axis=0)
MEAN = np.mean(X, axis=0)
STDs.append(STD)
MEANs.append(MEAN)
X = np.subtract(X, MEAN)
X = np.divide(X, STD)
cov = np.dot(X.T, X) / X.shape[0]
U, S, V = np.linalg.svd(cov)
ev = np.sqrt(S)
Us.append(U)
EVs.append(ev)
print('STD')
print(np.mean(STDs, axis=0))
print('MEAN')
print(np.mean(MEANs, axis=0))
print('U')
print(np.mean(Us, axis=0))
print('EV')
print(np.mean(EVs, axis=0))
raw_images = [os.path.join(RAW_DIR, i) for i in os.listdir(RAW_DIR)]
names = [os.path.basename(x).split('.')[0] for x in raw_images]
labels = pd.read_csv('data/trainLabels.csv', index_col=0).loc[names].values.flatten()
cw = compute_class_weight('balanced', range(5), labels)
print(cw)
utils.data_split(datapath=RAW_DIR, test_size=0.1)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
6041f60f57429335fe494f76d88fe71aa0c4a946 | 8547ab61487580bbcbd82eca3700c256997116dd | /password_encrypter2.1/encrypt.py | 31d53359bdd54b58f5d18486c218665ef292d19f | [] | no_license | hishamanver/python_learn | ae933338018e8423c4924460d78ab2325234c8a2 | 71998e8a7237374fb59eed780a3dba749411246b | refs/heads/master | 2020-04-11T21:28:36.711517 | 2019-02-27T02:03:26 | 2019-02-27T02:03:26 | 162,106,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | def generateEncryptionKey(key):
encryptionKey = []
for i in range(len(key)):
encryptionKey = encryptionKey + [ord(key[i])*((-1)**i)]
return encryptionKey
def generateEncryptionKeyOffset(key,start):
encryptionKeyOffset = start
for i in range(len(key)):
if encryptionKeyOffset<510:
encryptionKeyOffset += ord(key[i])
if encryptionKeyOffset < 255:
encryptionKeyOffset = generateEncryptionKeyOffset(key,encryptionKeyOffset)
return encryptionKeyOffset
key = input("Input key: ")
encryptionKey = generateEncryptionKey(key)
encryptionKeyOffset = generateEncryptionKeyOffset(key, 0)
# print(encryptionKey)
# print(encryptionKeyOffset)
fileIn = open("testdata.txt")
data = (fileIn.read())
fileIn.close()
dataOut = []
for j in range(0,len(data),len(encryptionKey)):
for k in range(len(encryptionKey)):
try:
dataOut = dataOut + [ord(data[j+k])+encryptionKey[k]+encryptionKeyOffset]
except:
continue
dataOut = ''.join(str(k) for k in dataOut)
fileOut =open("crypt.txt", "w")
fileOut.write(dataOut)
fileOut.close()
print(dataOut)
| [
"[email protected]"
] | |
467e9c9e3f405462c8b33857ec240a11f34ada30 | c5c63f1de81cc3776189a8bdace8eb97eb671b9b | /make_enums.py | a9d5859fe24d5a58f1ee69c397c70a4857ecc657 | [
"Artistic-2.0"
] | permissive | rec/make_pyx | e2034ac8fdc20b48ac8cd5c7598a35fb8769a95a | 61790d5bd2664604b9788cbed5b70a9edea0772c | refs/heads/master | 2020-05-25T19:54:28.651141 | 2016-06-11T14:58:52 | 2016-06-11T14:58:52 | 58,406,182 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,648 | py | def make_enums(enum_classes, header_file, namespace, classname):
enums, declarations = [], []
for ec in enum_classes:
enum_name, parts = (i.strip() for i in ec)
parts = [p.strip() for p in parts.split(',')]
if parts and parts[-1].startswith('last ='):
parts.pop()
enums.append((enum_name, parts))
main = ENUM_CLASS_TEMPLATE.format(**locals())
defs = (' cdef %s %s' % (enum_name, p) for p in parts)
declarations.append(main + '\n'.join(defs))
decl = '\n\n'.join(declarations)
if decl:
decl += '\n'
enum_pyx = []
enum_types = {}
for name, values in enums:
enum_types[name] = set()
values = ', '.join("'%s'" % v for v in values)
enum_pyx.append(' %s_NAMES = %s' % (
name.upper(), values))
enum_pyx = '\n'.join(enum_pyx)
if enum_pyx:
enum_pyx = '\n%s\n' % enum_pyx
return decl, enum_pyx, enum_types
ENUM_CLASS_HEADER_TEMPLATE = """\
cdef extern from "<{header_file}>" namespace "{namespace}::{classname}":
"""
ENUM_CLASS_ENUM_TEMPLATE = """\
cdef cppclass {enum_name}:
pass
"""
ENUM_CLASS_NAME_TEMPLATE = """\
cdef extern from "<{header_file}>" namespace "{namespace}::{classname}::{enum_name}":
"""
ENUM_CLASS_TEMPLATE = """\
cdef extern from "<{header_file}>" namespace "{namespace}::{classname}":
cdef cppclass {enum_name}:
pass
cdef extern from "<{header_file}>" namespace "{namespace}::{classname}::{enum_name}":
"""
ENUM_CLASS_TEMPLATE = (
ENUM_CLASS_HEADER_TEMPLATE +
ENUM_CLASS_ENUM_TEMPLATE +
'\n' +
ENUM_CLASS_NAME_TEMPLATE)
| [
"[email protected]"
] | |
c326fc55d5140ef5f6cb40aa33c3f520b8b1e01c | 3b126fe5380cf6974305e931be0a44af9d3af83c | /worldometersCrawler/spiders/co2_emissions.py | 5baa979b9c332889870e8c5e54f60c26c2f60ec4 | [] | no_license | Noeti/worldometers-crawler | 621145daba0b9c74e85b5e20df3f0457345e49fb | 23b6135fe51d78c0127cd02a876c53e4a6c735e0 | refs/heads/main | 2023-05-30T21:26:34.272806 | 2021-06-25T07:55:27 | 2021-06-25T07:55:27 | 379,367,189 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | import scrapy
from ..utils import clean_commas
class Co2EmissionsSpider(scrapy.Spider):
name = 'co2_emissions'
allowed_domains = ['www.worldometers.info']
start_urls = ['https://www.worldometers.info/co2-emissions/co2-emissions-by-country']
def parse(self, response):
table = response.xpath('//table[@id="example2"]/tbody/tr')
for row in table:
country = row.xpath('.//td/a/text()').get()
co2_emissions = clean_commas(row.xpath('.//td[3]/text()').get())
one_year_change = row.xpath('.//td[4]/text()').get()
population_2016 = clean_commas(row.xpath('.//td[5]/text()').get())
per_capita = row.xpath('.//td[6]/text()').get()
share_of_world = row.xpath('.//td[7]/text()').get()
yield {
'name': country,
'co2_emissions_tons_2016': co2_emissions,
'co2_emiss_one_year_change': one_year_change,
'population_2016': population_2016,
'co2_emiss_per_capita': per_capita,
'country_share_of_world_co2': share_of_world
}
| [
"[email protected]"
] | |
eace0e45336cd4e4c5f4cb0395395d66f7ad620d | 4e369614a7435e7859600c2bd8fd95379a4d52d1 | /HelloWorld.py | 8d36c06abaf53df37e11096c0a869ed07e9ee9a3 | [] | no_license | MadanHar/python | 2fa8f647a985c6b23434edd8b39346560762e4bf | dc5ec3cc68c9c401a8c6c91ba698fa55c84f343c | refs/heads/master | 2020-04-19T07:51:20.706584 | 2019-01-29T00:19:39 | 2019-01-29T00:19:39 | 168,059,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py |
greeting = "Hello"
#name = input("Please enter your name ")
#print(greeting +" " + name)
splitString = "This string has been \n split over \n several\nlines"
print(splitString)
today = "friday"
print("fri" in today) | [
"[email protected]"
] | |
11f96f363a4c550e8ad97f1b6840ff3a79a12c05 | e5a1e680f0d0422c84e2affc17e9f9abe1a8a024 | /comments/migrations/0003_auto_20160917_1802.py | bd4b1d43ae49d9ff51fa8c168efe090d50803e0f | [] | no_license | rayiyaoyao/blog | 978f42cfbb4e9bf7f746ace5f74314bb56bfd79f | 2be576cc6f977ae974c31be00902b858a2c3d7ec | refs/heads/master | 2020-07-01T03:43:25.137966 | 2016-11-22T11:18:27 | 2016-11-22T11:18:27 | 74,100,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-09-17 10:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('comments', '0002_auto_20160917_1226'),
]
operations = [
migrations.AlterModelOptions(
name='comment',
options={'ordering': ['-timestamp']},
),
migrations.AddField(
model_name='comment',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='comments.Comment'),
),
]
| [
"[email protected]"
] | |
3e008e77c888a0bd9dac49b61201295d4ca312f2 | 4633791da7676bdc98c67ba01a5608a2f09258bd | /ESP4T-2021/Templates/Day 4/Python/Digital Speedometer/Speedometer_Template.py | 32f88fa340ca0a8bdd247e42de84e2b733e9efe0 | [] | no_license | jlundholm/RaspPi | 63d7f452f736a92a978480d86a5876cbecc48f9f | af8e7305e244ff1265e1e502e66ed7d6f945e6a7 | refs/heads/main | 2023-06-29T10:18:06.416361 | 2021-07-22T21:23:26 | 2021-07-22T21:23:26 | 387,916,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,440 | py | #Code for Speedometer module
#Import warnings to ignore warnings
import warnings
#Import Adafruit_CharLCD library (Adafruit_CharLCD.py)
import Adafruit_CharLCD as LCD
#Import DistanceSensor, LED, OutputDevice from gpiozero library
from gpiozero import DistanceSensor, LED, OutputDevice
#Import time to pause or delay the program
from time import sleep
#LCD display pin setup:
lcd_rs = 25
lcd_en = 24
lcd_d4 = 23
lcd_d5 = 17
lcd_d6 = 18
lcd_d7 = 22
lcd_backlight = 2
# Define LCD column and row size for 16x2 LCD.
lcd_columns = 16
lcd_rows = 2
#Initialize the lcd object
lcd = LCD.Adafruit_CharLCD(lcd_rs,lcd_en,lcd_d4,lcd_d5,lcd_d6,lcd_d7,lcd_columns,lcd_rows,lcd_backlight)
if __name__ == '__main__':
#Ignore the warnings when echo doesn't receive the input
warnings.filterwarnings("ignore")
#Set the pin for Ultrasonic sensor where echo is in pin 6 and trigger is in pin 5
# -->
#Set the red LED to pin 4
# -->
#Set the red LED to pin 13
# -->
#Set the vibration module to pin 26
# -->
#Display the message on the LCD screen
lcd.message('Starting the\nSpeedometer')
#Pause the program for a while
sleep(1)
#Clear the message on the LCD screen
lcd.clear()
#Display the message on the LCD screen
lcd.message('CTRL + X -> Exit')
#Pause the program for a while
sleep(1)
#Clear the message on the LCD screen
lcd.clear()
try:
while True:
#Find the distance in cm by multiplying with 100
Distance1 = float(sensor.distance * 100)
#Time interval of 0.5 seconds
sleep(0.5)
#Again, find the next distance in cm by multiplying with 100
Distance2 = float(sensor.distance*100)
#Calculate the speed by subtracting two distance and dividing by time interval
# -->
#Display the message on LCD screen
lcd.message('Speed:')
#Round the speed to two decimal place and convert it to string for displaying
lcd.message(str(round(speed,2)))
#Display the SI unit of speed
lcd.message(' cm/s')
#Pause the program for a while
sleep(0.35)
#Condition when speed is less than 30.0 cm/s
if (speed < 30.0):
#Turn off warning red light
# -->
#Turn on safe green light
# -->
#Condition when speed is greater than 30.0 cm/s
else:
#Turn off safe green light
green.off()
#Turn on vibration
vibration.on()
#Clear the LCD and display warning message
lcd.clear()
lcd.message('LIMIT REACHED!\nREDUCE SPEED')
#Blinking the warning red LED
for i in range (4):
red.on()
sleep(0.2)
red.off()
sleep(0.2)
#Clear the LCD
lcd.clear()
#Turn off vibration
vibration.off()
#Completely turn off vibration
vibration.off()
#Clear the screen
lcd.clear()
#When CTRL + X is entered
except KeyboardInterrupt:
#Turn off everything
vibration.off()
lcd.clear()
red.off()
green.off()
print('Exiting') | [
"[email protected]"
] | |
ccfeb33b542d5a5f8190aee7931880440b221942 | 0e215b6cfcb5f5d3aefb6c12e2c91e6687609736 | /novice/04-01/todolist/task/models.py | c743334f04776d99cab556c0a8bd11559e65d0a1 | [] | no_license | Idfa001/praxis-academy | e266963db45d5b2c623f51afd9893861db7c357f | 13da3b696b8575f961ea787f21e59e17c802f920 | refs/heads/master | 2023-01-22T15:13:45.497082 | 2020-11-17T13:50:12 | 2020-11-17T13:50:12 | 287,203,217 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | from django.db import models
# Create your models here.
class Task(models.Model):
name = models.CharField(max_length=255)
genre = models.CharField(max_length=255)
artis = models.CharField(max_length=255)
th = models.CharField(max_length=255)
lirik = models.CharField(max_length=255)
link = models.CharField(max_length=255)
class Movie(models.Model):
title = models.CharField(max_length=255)
gen = models.CharField(max_length=255)
rate = models.CharField(max_length=255)
years = models.CharField(max_length=255)
des = models.CharField(max_length=255) | [
"[email protected]"
] | |
2b83d02540acc758b0494239f5a7bff3d0febd56 | 3637ecfdd1d72e5d185c5750e8958726d209f408 | /bookaudi/settings.py | 838c2fc342fba284f4631575f24c0526242dee67 | [] | no_license | gaurav3030/auditorium | 9500adf7012d570e8891d36c5b8d30d962ee9287 | d34bbaf97b786a6549ccc63e3b3c13428951d1ec | refs/heads/master | 2020-07-31T06:30:58.619283 | 2019-09-24T06:33:15 | 2019-09-24T06:33:15 | 210,516,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,153 | py | """
Django settings for bookaudi project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '22w$srb4rtu7syy)ly!a6wa$uk8c2s_gi1-dc1&p*+g7jzdx!9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'booksite.apps.BooksiteConfig',
]
STATIC_URL = '/static/'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bookaudi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bookaudi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
3888552ed410581f959b7b2a94c250f2d5a6c385 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02594/s218362051.py | 9bf990062fc8899082dd871108c2046d91e7709c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | x = input()
x=int(x)
if (x<30):
print("No")
else:
print("Yes") | [
"[email protected]"
] | |
fa7feba504552bf6887288c421f6b17eb24aae55 | 36837f70b26579f6f31bbc1757b4aa3e09d3ce0c | /test/test_jieba.py | c4e996d5d0449ee52d887eb13c8500cd662c1ef2 | [
"BSD-3-Clause"
] | permissive | dzc0d3r/flask-msearch | bee2cb920b1d9f26686c4f134ce4073ba7901142 | 8fb3fd441f251d42558e775cce5ac04a0a5a4e92 | refs/heads/master | 2021-06-13T02:29:26.811313 | 2020-03-18T15:58:43 | 2020-03-18T15:58:43 | 254,414,109 | 1 | 0 | BSD-3-Clause | 2020-04-09T15:48:24 | 2020-04-09T15:48:24 | null | UTF-8 | Python | false | false | 2,523 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ********************************************************************************
# Copyright © 2018 jianglin
# File Name: test_jieba.py
# Author: jianglin
# Email: [email protected]
# Created: 2018-05-18 13:22:46 (CST)
# Last Update: Tuesday 2018-12-18 11:32:46 (CST)
# By:
# Description:
# ********************************************************************************
from test import (SearchTestBase, mkdtemp, Flask, SQLAlchemy, Search, unittest,
ModelSaveMixin)
from jieba.analyse import ChineseAnalyzer
titles = [
"买水果然后来世博园。",
"The second one 你 中文测试中文 is even more interesting! 吃水果",
"吃苹果",
"吃橘子",
]
class TestSearch(SearchTestBase):
def setUp(self):
class TestConfig(object):
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
DEBUG = True
TESTING = True
MSEARCH_INDEX_NAME = mkdtemp()
MSEARCH_BACKEND = 'whoosh'
self.app = Flask(__name__)
self.app.config.from_object(TestConfig())
self.db = SQLAlchemy(self.app)
self.search = Search(self.app, db=self.db, analyzer=ChineseAnalyzer())
db = self.db
class Post(db.Model, ModelSaveMixin):
__tablename__ = 'basic_posts'
__searchable__ = ['title', 'content']
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(49))
content = db.Column(db.Text)
def __repr__(self):
return '<Post:{}>'.format(self.title)
self.Post = Post
with self.app.test_request_context():
self.db.create_all()
for (i, title) in enumerate(titles, 1):
post = self.Post(title=title, content='content%d' % i)
post.save(self.db)
def test_basic_search(self):
with self.app.test_request_context():
results = self.Post.query.msearch('水果').all()
self.assertEqual(len(results), 2)
results = self.Post.query.msearch('苹果').all()
self.assertEqual(len(results), 1)
results = self.Post.query.msearch('世博园').all()
self.assertEqual(len(results), 1)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromNames([
'test_jieba.TestSearch',
])
unittest.TextTestRunner(verbosity=1).run(suite)
| [
"[email protected]"
] | |
4422e894f143c06a757ce613f676b66b6ab5b343 | 53dc2bac5b7eebd270f566d8a507be19f0dcb35a | /AI-work/sudoku.py | fbb5f805ee8887e5f8685e577c036934cb072c5d | [] | no_license | zetlan/dgz | eeaa08047032457b0273eb3a73aa97d9882c6a0f | 38fe658baab685114a0b2b818b5f52c7271ac011 | refs/heads/master | 2023-01-23T09:17:35.762713 | 2023-01-16T22:26:27 | 2023-01-16T22:26:27 | 91,821,079 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | #Name: REDACTED
#Date: November-11-2021
| [
"[email protected]"
] | |
eb2155693700ae589cb897972606b032e528eb04 | 55e841a3df04476771622444959b4a140ca173ce | /ddpg/log-files/Hopper-v1/no-of-update_20/1/ddpg.py | 6883ee62cf35c0bd8bf4d395a2ff2f72cccd9145 | [] | no_license | Patel-Dhaval-M/MSC_project | e33fa5a083931841a602070c4967d116da117fd2 | 1ad02052e8f8cd2cb985d75cca19591dad9d54e4 | refs/heads/master | 2020-03-27T01:30:34.087131 | 2018-08-22T14:21:16 | 2018-08-22T14:21:16 | 145,717,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,806 | py | """
Implementation of DDPG - Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
The algorithm is tested on the Pendulum-v0 OpenAI gym task
and developed with tflearn + Tensorflow
Author: Patrick Emami
"""
import tensorflow as tf
import numpy as np
import gym
from gym import wrappers
import tflearn
import argparse
import pprint as pp
import os
from datetime import datetime
from replay_buffer import ReplayBuffer
import csv
import glob
import shutil
# ===========================
# Actor and Critic DNNs
# ===========================
class ActorNetwork(object):
"""
Input to the network is the state, output is the action
under a deterministic policy.
The output layer activation is a tanh to keep the action
between -action_bound and action_bound
"""
def __init__(self, sess, state_dim, action_dim, action_bound, learning_rate, tau, batch_size):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.action_bound = action_bound
self.learning_rate = learning_rate
self.tau = tau
self.batch_size = batch_size
# Actor Network
self.inputs, self.out, self.scaled_out = self.create_actor_network()
self.network_params = tf.trainable_variables()
# Target Network
self.target_inputs, self.target_out, self.target_scaled_out = self.create_actor_network()
self.target_network_params = tf.trainable_variables()[
len(self.network_params):]
# Op for periodically updating target network with online network
# weights
self.update_target_network_params = \
[self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) +
tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# This gradient will be provided by the critic network
self.action_gradient = tf.placeholder(tf.float32, [None, self.a_dim])
# Combine the gradients here
self.unnormalized_actor_gradients = tf.gradients(
self.scaled_out, self.network_params, -self.action_gradient)
self.actor_gradients = list(map(lambda x: tf.div(x, self.batch_size), self.unnormalized_actor_gradients))
# Optimization Op
self.optimize = tf.train.AdamOptimizer(self.learning_rate).\
apply_gradients(zip(self.actor_gradients, self.network_params))
self.num_trainable_vars = len(
self.network_params) + len(self.target_network_params)
def create_actor_network(self):
inputs = tflearn.input_data(shape=[None, self.s_dim])
hid1_size = self.s_dim * 10 # 10 empirically determined
hid3_size = self.a_dim * 10 # 10 empirically determined
hid2_size = int(np.sqrt(hid1_size * hid3_size))
net = tflearn.fully_connected(inputs, hid1_size)
net = tflearn.activations.tanh(net)
net = tflearn.fully_connected(net, hid2_size)
net = tflearn.activations.tanh(net)
net = tflearn.fully_connected(net, hid3_size)
net = tflearn.activations.tanh(net)
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
out = tflearn.fully_connected(
net, self.a_dim, activation='tanh', weights_init=w_init)
# Scale output to -action_bound to action_bound
scaled_out = tf.multiply(out, self.action_bound)
return inputs, out, scaled_out
def train(self, inputs, a_gradient):
self.sess.run(self.optimize, feed_dict={
self.inputs: inputs,
self.action_gradient: a_gradient
})
def predict(self, inputs):
return self.sess.run(self.scaled_out, feed_dict={
self.inputs: inputs
})
def predict_target(self, inputs):
return self.sess.run(self.target_scaled_out, feed_dict={
self.target_inputs: inputs
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
def get_num_trainable_vars(self):
return self.num_trainable_vars
class CriticNetwork(object):
"""
Input to the network is the state and action, output is Q(s,a).
The action must be obtained from the output of the Actor network.
"""
def __init__(self, sess, state_dim, action_dim, learning_rate, tau, gamma, num_actor_vars):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.gamma = gamma
# Create the critic network
self.inputs, self.action, self.out = self.create_critic_network()
self.network_params = tf.trainable_variables()[num_actor_vars:]
# Target Network
self.target_inputs, self.target_action, self.target_out = self.create_critic_network()
self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]
# Op for periodically updating target network with online network
# weights with regularization
self.update_target_network_params = \
[self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) \
+ tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# Network target (y_i)
self.predicted_q_value = tf.placeholder(tf.float32, [None, 1])
# Define loss and optimization Op
self.loss = tflearn.mean_square(self.predicted_q_value, self.out)
self.optimize = tf.train.AdamOptimizer(
self.learning_rate).minimize(self.loss)
# Get the gradient of the net w.r.t. the action.
# For each action in the minibatch (i.e., for each x in xs),
# this will sum up the gradients of each critic output in the minibatch
# w.r.t. that action. Each output is independent of all
# actions except for one.
self.action_grads = tf.gradients(self.out, self.action)
def create_critic_network(self):
hid1_size = self.s_dim * 10 # 10 empirically determined
hid3_size = self.a_dim * 10 # 10 empirically determined
inputs = tflearn.input_data(shape=[None, self.s_dim])
action = tflearn.input_data(shape=[None, self.a_dim])
net = tflearn.fully_connected(inputs, hid1_size)
#net = tflearn.layers.normalization.batch_normalization(net)
net = tflearn.activations.tanh(net)
# Add the action tensor in the 2nd hidden layer
# Use two temp layers to get the corresponding weights and biases
t1 = tflearn.fully_connected(net, hid3_size)
t2 = tflearn.fully_connected(action, hid3_size)
net = tflearn.activation(
tf.matmul(net, t1.W) + tf.matmul(action, t2.W) + t2.b, activation='tanh')
# linear layer connected to 1 output representing Q(s,a)
# Weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
out = tflearn.fully_connected(net, 1, weights_init=w_init)
return inputs, action, out
def train(self, inputs, action, predicted_q_value):
return self.sess.run([self.out, self.optimize], feed_dict={
self.inputs: inputs,
self.action: action,
self.predicted_q_value: predicted_q_value
})
def predict(self, inputs, action):
return self.sess.run(self.out, feed_dict={
self.inputs: inputs,
self.action: action
})
def predict_target(self, inputs, action):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: inputs,
self.target_action: action
})
def action_gradients(self, inputs, actions):
return self.sess.run(self.action_grads, feed_dict={
self.inputs: inputs,
self.action: actions
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
# Taken from https://github.com/openai/baselines/blob/master/baselines/ddpg/noise.py, which is
# based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab
class OrnsteinUhlenbeckActionNoise:
def __init__(self, mu, sigma=0.3, theta=.15, dt=1e-2, x0=None):
self.theta = theta
self.mu = mu
self.sigma = sigma
self.dt = dt
self.x0 = x0
self.reset()
def __call__(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + \
self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)
self.x_prev = x
return x
def reset(self):
self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)
def __repr__(self):
return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
# ===========================
# Tensorflow Summary Ops
# ===========================
def build_summaries():
episode_reward = tf.Variable(0.)
tf.summary.scalar("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.summary.scalar("Qmax Value", episode_ave_max_q)
summary_vars = [episode_reward, episode_ave_max_q]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
class Logger(object):
""" Simple training logger: saves to file and optionally prints to stdout """
def __init__(self, logname, args, now):
"""
Args:
logname: name for log (e.g. 'Hopper-v1')
now: unique sub-directory name (e.g. date/time string)
"""
path = os.path.join('log-files', logname, "no-of-update_"+args["no_of_updates"], ""+str(args["random_seed"]))
os.makedirs(path)
filenames = glob.glob('*.py') # put copy of all python files in log_dir
for filename in filenames: # for reference
shutil.copy(filename, path)
path = os.path.join(path, 'log.csv')
self.write_header = True
self.log_entry = {}
self.f = open(path, 'w')
self.writer = None # DictWriter created with first call to write() method
def write(self, display=True):
""" Write 1 log entry to file, and optionally to stdout
Log fields preceded by '_' will not be printed to stdout
Args:
display: boolean, print to stdout
"""
if display:
self.disp(self.log_entry)
if self.write_header:
fieldnames = [x for x in self.log_entry.keys()]
self.writer = csv.DictWriter(self.f, fieldnames=fieldnames)
self.writer.writeheader()
self.write_header = False
self.writer.writerow(self.log_entry)
self.log_entry = {}
@staticmethod
def disp(log):
"""Print metrics to stdout"""
log_keys = [k for k in log.keys()]
log_keys.sort()
print('***** Episode {}, Mean R = {:.1f} *****'.format(log['Total_Episode'],
log['Target_Reward']))
#for key in log_keys:
# if key[0] != '_': # don't display log items with leading '_'
# print('{:s}: {:.3g}'.format(key, log[key]))
print('\n')
def log(self, items):
""" Update fields in log (does not write to file, used to collect updates.
Args:
items: dictionary of items to update
"""
self.log_entry.update(items)
def close(self):
""" Close log file - log cannot be written after this """
self.f.close()
def run_episode(env, actor, animate=False):
""" Run single episode with option to animate
Args:
env: ai gym environment
policy: policy object with sample() method
scaler: scaler object, used to scale/offset each observation dimension
to a similar range
animate: boolean, True uses env.render() method to animate episode
Returns: 4-tuple of NumPy arrays
observes: shape = (episode len, obs_dim)
actions: shape = (episode len, act_dim)
rewards: shape = (episode len,)
unscaled_obs: useful for training scaler, shape = (episode len, obs_dim)
"""
obs = env.reset()
observes, actions, rewards, unscaled_obs = [], [], [], []
done = False
step = 0.0
while not done:
if animate:
env.render()
#obs = obs.astype(np.float32).reshape((1, -1))
#obs = np.append(obs, [[step]], axis=1) # add time step feature
unscaled_obs.append(obs)
observes.append(obs)
action = actor.predict_target(np.reshape(obs, (1, actor.s_dim))).reshape((1, -1)).astype(np.float32)
actions.append(action)
obs, reward, done, _ = env.step(np.squeeze(action, axis=0))
if not isinstance(reward, float):
reward = np.asscalar(reward)
rewards.append(reward)
step += 1e-3 # increment time step feature
return (np.concatenate(observes), np.concatenate(actions),
np.array(rewards, dtype=np.float64), np.concatenate(unscaled_obs))
def run_policy(env, actor, logger, episodes):
""" Run policy and collect data for a minimum of min_steps and min_episodes
Args:
env: ai gym environment
policy: policy object with sample() method
scaler: scaler object, used to scale/offset each observation dimension
to a similar range
logger: logger object, used to save stats from episodes
episodes: total episodes to run
Returns: list of trajectory dictionaries, list length = number of episodes
'observes' : NumPy array of states from episode
'actions' : NumPy array of actions from episode
'rewards' : NumPy array of (un-discounted) rewards from episode
'unscaled_obs' : NumPy array of (un-discounted) rewards from episode
"""
total_steps = 0
trajectories = []
for e in range(episodes):
observes, actions, rewards, unscaled_obs = run_episode(env, actor)
total_steps += observes.shape[0]
trajectory = {'observes': observes,
'actions': actions,
'rewards': rewards,
'unscaled_obs': unscaled_obs}
trajectories.append(trajectory)
return np.mean([t['rewards'].sum() for t in trajectories])
# ===========================
# Agent Training
# ===========================
def train(sess, env, args, actor, critic, actor_noise, logger, monitor_env):
# Set up summary Ops
sess.run(tf.global_variables_initializer())
#writer = tf.summary.FileWriter(log_dir, sess.graph)
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
# Initialize replay memory
replay_buffer = ReplayBuffer(int(args['buffer_size']), int(args['random_seed']))
# Needed to enable BatchNorm.
# This hurts the performance on Pendulum but could be useful
# in other environments.
# tflearn.is_training(True)
env._max_episode_steps = int(args['max_episode_len'])
for i in range(int(args['max_episodes'])):
s = env.reset()
ep_reward = 0
ep_ave_max_q = 0
# for j in range(env.spec.max_episode_steps or int(args['max_episode_len'])):
#terminal = False
while True:
#for j in range(int(args['max_episode_len'])):
if args['render_env']:
env.render()
# Added exploration noise
#a = actor.predict(np.reshape(s, (1, 3))) + (1. / (1. + i))
a = actor.predict(np.reshape(s, (1, actor.s_dim))) + actor_noise()
s2, r, terminal, info = env.step(a[0])
replay_buffer.add(np.reshape(s, (actor.s_dim,)), np.reshape(a, (actor.a_dim,)), r,
terminal, np.reshape(s2, (actor.s_dim,)))
# Keep adding experience to the memory until
# there are at least minibatch size samples
if replay_buffer.size() > int(args['minibatch_size']):
for _ in range(int(args["no_of_updates"])):
s_batch, a_batch, r_batch, t_batch, s2_batch = \
replay_buffer.sample_batch(int(args['minibatch_size']))
# Calculate targets
target_q = critic.predict_target(
s2_batch, actor.predict_target(s2_batch))
y_i = []
for k in range(int(args['minibatch_size'])):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + args['lambda'] * critic.gamma * target_q[k])
# Update the critic given the targets
predicted_q_value, _ = critic.train(
s_batch, a_batch, np.reshape(y_i, (int(args['minibatch_size']), 1)))
ep_ave_max_q += np.amax(predicted_q_value)
# Update the actor policy using the sampled gradient
a_outs = actor.predict(s_batch)
grads = critic.action_gradients(s_batch, a_outs)
actor.train(s_batch, grads[0])
# Update target networks
actor.update_target_network()
critic.update_target_network()
s = s2
ep_reward += r
if terminal:
if i % 20 == 0:
mean_reward = run_policy(monitor_env, actor, logger, 10)
logger.log({'Target_Reward': int(mean_reward),
'Total_Episode': i
})
logger.write(display=True)
#print('| Reward: {:d} | Episode: {:d} |'.format(int(ep_reward), \
# i))
break
def main(args):
now = datetime.utcnow().strftime("%b_%d_%H_%M_%S")
monitor_dir = os.path.join('videos', args['env'], "no-of-update_"+args["no_of_updates"], "random_seed"+str(args["random_seed"]))
logger = Logger(logname=args['env'], args=args, now=now)
with tf.Session() as sess:
env = gym.make(args['env'])
monitor_env = gym.make(args['env'])
np.random.seed(int(args['random_seed']))
tf.set_random_seed(int(args['random_seed']))
env.seed(int(args['random_seed']))
monitor_env.seed(int(args['random_seed']))
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
action_bound = env.action_space.high
print("****** state dimension", state_dim)
print("****** actions dimension", action_dim)
# Ensure action bound is symmetric
assert (np.array_equal(env.action_space.high, -env.action_space.low))
actor = ActorNetwork(sess, state_dim, action_dim, action_bound,
float(args['actor_lr']), float(args['tau']),
int(args['minibatch_size']))
critic = CriticNetwork(sess, state_dim, action_dim,
float(args['critic_lr']), float(args['tau']),
float(args['gamma']),
actor.get_num_trainable_vars())
actor_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(action_dim))
if args['use_gym_monitor']:
monitor_env = wrappers.Monitor(monitor_env, monitor_dir, force=True)
train(sess, env, args, actor, critic, actor_noise, logger, monitor_env)
logger.close()
if args['use_gym_monitor']:
env.monitor.close()
monitor_env.monitor.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='provide arguments for DDPG agent')
# agent parameters
parser.add_argument('--actor-lr', help='actor network learning rate', default=0.0001)
parser.add_argument('--critic-lr', help='critic network learning rate', default=0.001)
parser.add_argument('--gamma', help='discount factor for critic updates', default=0.99)
parser.add_argument('--lambda', help='discount factor for critic updates', default=1)
parser.add_argument('--tau', help='soft target update parameter', default=0.001)
parser.add_argument('--buffer-size', help='max size of the replay buffer', default=1000000)
parser.add_argument('--minibatch-size', help='size of minibatch for minibatch-SGD', default=256)
parser.add_argument('--no_of_updates', help='no of inner updates', default=1)
# run parameters
parser.add_argument('--env', help='choose the gym env- tested on {Pendulum-v0}', default='Pendulum-v0')
parser.add_argument('--random-seed', help='random seed for repeatability', default=1234)
parser.add_argument('--max-episodes', help='max num of episodes to do while training', default=50000)
parser.add_argument('--max-episode-len', help='max length of 1 episode', default=1000)
parser.add_argument('--render-env', help='render the gym env', action='store_true')
parser.add_argument('--use-gym-monitor', help='record gym results', action='store_true')
parser.set_defaults(render_env=False)
parser.set_defaults(use_gym_monitor=True)
args = vars(parser.parse_args())
pp.pprint(args)
main(args)
| [
"[email protected]"
] | |
eba16916ccb2a1173dce4d26831a9e14a24a5706 | caede12c673bd4d5866e7084fc850f36920f6e2c | /keylogger/control.py | 88c19ffc0afbebad897e9bd435e9ef6c813ed25f | [] | no_license | abubakr223/keylogger | 2b600fda9346cbf8202a4e39f52493e919644ff9 | 89f43439d56c4248ff6472c28dacf10a2d5e4030 | refs/heads/master | 2022-04-24T17:03:31.088415 | 2020-04-24T10:16:35 | 2020-04-24T10:16:35 | 258,477,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | from pynput.mouse import Controller
from pynput.keyboard import Controller
def controlMouse():
mouse = Controller()
mouse.position = (10,20)
def controlKeyboard():
keyboard = Controller()
keyboard.type("i am hungry again")
controlKeyboard() | [
"[email protected]"
] | |
977a8cefbd1ec35f6ef185b651476da96f58857f | c6bc01937480d68a9a272405d40695590c25fc3e | /src/cutout.py | 96cafd2001a3fb530be7a5a0dd7e47e94d20a89d | [
"HPND-sell-variant",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unicode",
"MIT",
"MIT-Modern-Variant"
] | permissive | LegalizeAdulthood/fontconfig | a01a1934ad96c90f366eca7d45bd84b3001ce4c3 | 401d521c1f68ce16c1fa62caf307a1869ba63c48 | refs/heads/master | 2023-03-03T14:39:59.924462 | 2021-01-16T14:20:41 | 2021-01-30T10:21:33 | 338,185,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | import argparse
import subprocess
import os
import re
if __name__== '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
args = parser.parse_known_args()
print (args[0].output)
cpp = args[1]
ret = subprocess.run(cpp + [args[0].input], stdout=subprocess.PIPE, check=True)
stdout = ret.stdout.decode('utf8')
with open(args[0].output, 'w') as out:
write = True
for l in stdout.split('\n'):
l = l.strip('\r')
if l.startswith('CUT_OUT_BEGIN'):
write = False
if write and l:
stripped = re.sub('^\s+', '', l)
stripped = re.sub('\s*,\s*', ',', stripped)
if not stripped.isspace() and stripped:
out.write('%s\n' % stripped)
if l.startswith('CUT_OUT_END'):
write = True
| [
"[email protected]"
] | |
6c3e6eb4b5a783eaee91b0e08d792d179b4b47a1 | 3e49539cea2df0b22ed1c745f7a2ffa972ad1eea | /apps/profiles/migrations/0004_profile_occupation.py | e7636cc130bb981d21703ad1c10e17234614a9c8 | [] | no_license | nrzonline/portfolio_backend | 5c578c3de29634f71370a41f7ba4f9344a4cc00d | dfdfc35da299db11aef061b196d4d0e0795a95f3 | refs/heads/master | 2021-07-20T09:55:03.577312 | 2017-10-30T17:53:44 | 2017-10-30T17:53:44 | 107,800,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-09-08 18:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0003_profile_account'),
]
operations = [
migrations.AddField(
model_name='profile',
name='occupation',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Occupation'),
),
]
| [
"[email protected]"
] | |
ce32ce9f88f4ffa2802b6e394ff85ff44f9043ce | 02aa7379cf612d717f4ac2de9033fa6d032440e3 | /MasterThesis/ReportGenerator/Utility.py | 04c3ad128324b442a2c9f54601d256939e1ac1fa | [] | no_license | HenryYen/ITRI_RemAnalysisMethod | 6ff0fb298f8875167eba84bd2d42a62939cd69aa | aa397cbf84745461ded1d5c2642a6142d3553cf5 | refs/heads/master | 2021-09-22T11:47:49.842102 | 2018-09-10T06:30:41 | 2018-09-10T06:30:41 | 109,849,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,930 | py | import Parameter as pr
from Cell import *
from User import *
import random as rd
from math import sqrt, pi, degrees, atan2, log, pow
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import pickle as pk
import numpy as np
def get_dist(x1, y1, x2, y2):
return sqrt((x1-x2)**2 + (y1-y2)**2)
def get_pathloss(c, u): # argument:cell, user # (db), path loss = a * log10(d) + b + c * log10(fc) = 16.9 * log10(d) + 32.8 + 20 * log10(fc)
dist = get_dist(u.x, u.y, c.x, c.y)
dist = pr.ref_dist if dist < pr.ref_dist else dist
#fading = float(np.random.normal(loc=0, scale=7, size=1))
#return pr.a * log(dist, 10) + pr.b + pr.c * log(pr.fc, 10) + fading
# model refers to 'VTC2007_Path loss measurements and modeling for indoor office scenario at 28 and 38 GHz'
return 29.3 + 10*6.05*log(dist, 10) + float(np.random.normal(loc=0, scale=4.9, size=1))
def getID(type):
if type == 'cell':
getID.cell_id = getattr(getID, 'cell_id', -1) + 1
return getID.cell_id
if type == 'user':
getID.user_id = getattr(getID, 'user_id', -1) + 1
return getID.user_id
def write_csv(data): # parameter DATA is a collection of user reports. each report includes these string-type feature: [user id, x, y, serving id, serving rx, neighbor id, neighbor rx]
with open(pr.fn_output, 'w'):
with open(pr.fn_output, 'a+') as f:
f.write(','.join(pr.header) + '\n')
for report in data:
f.write(','.join([str(e) for e in report]) + '\n')
def generate_scenario(MAX_CELL = 20000, MAX_USER = 100000): # [ [(cell1_x, cell1_y, cell1_power), (cell2_x, cell2_y, cell2_power)...], [(user1_x, user1_y), (user2_x, user2_y)...] ]
cm_pos = []
um_pos = []
for _ in range(MAX_CELL):
x = rd.uniform(0, pr.map_size[0])
y = rd.uniform(0, pr.map_size[1])
power = int(rd.uniform(pr.Pmin, pr.Pmax))
cm_pos.append((x, y, power))
for _ in range(MAX_USER):
x = rd.uniform(0, pr.map_size[0])
y = rd.uniform(0, pr.map_size[1])
um_pos.append((x, y))
with open (pr.fn_pos_random, 'wb') as f:
pk.dump([cm_pos, um_pos], f)
print "### rd_pos.pkl is succefully created!"
def draw_system(cm, um):
color = ['b', 'g', 'r', 'c', 'k', 'y', 'm' ,'w', '#8172B2', '#56B4E9', '#D0BBFF', '#D65F5F', '#017517', '#e5ae38', '#001C7F', '#6ACC65', '#8EBA42', '#EAEAF2', '#7600A1', '#E8000B']
for c in cm:
color_no = 0
plt.text(c.x, c.y, c.ID)
plt.plot(c.x, c.y, color = color[color_no], marker='^')
for u in um:
color_no = 1
#if min([get_dist(u.x, u.y, c.x, c.y) for c in cm]) > 10:
plt.plot(u.x, u.y, color = color[color_no], marker='.')
img = plt.imread("./pic/51_5F.jpg")
plt.imshow(img, zorder=0, extent=[0, 104, 0, 26])
plt.axis([0, pr.map_size[0], 0, pr.map_size[1]])
plt.xlabel('x')
plt.ylabel('y')
plt.title('System Overview')
plt.savefig('./pic/overview.png', dpi=200)
plt.show()
#--------------------------------------------------------
"""
def load_pos_cell():
pos = []
with open(pr.fn_pos_bs, 'r') as f:
for line in f:
parts = line.split()
pos.append([float(e) for e in parts])
return pos
def load_pos_user():
pos = []
with open(pr.fn_pos_ue, 'r') as f:
for line in f:
parts = line.split(',')
pos.append([float(e) for e in parts])
return pos
def get_snr(cell, user, cm, isInterfere, isAddGain): # return power ratio not db
def dbm2mwatt(dbm):
return pow(10, dbm/10.)
def ratio2db(ratio): # SNR in db
return 10 * log(ratio, 10)
dist = get_dist(cell.pos_x, cell.pos_y, user.pos_x, user.pos_y)
gain = rp.beam_gain(cell.get_beam_pattern(), get_angle(cell, user)) if isAddGain else 0
signal_power = dbm2mwatt(cell.power - get_pathloss(dist) + gain) # in watt
interfere_power = 0. if len(cm)!=1 else 0.01 # in watt
for e in cm:
if e is cell:
continue
dist = get_dist(e.pos_x, e.pos_y, user.pos_x, user.pos_y)
interfere_power += dbm2mwatt(e.power - get_pathloss(dist))
#interfere_power += dbm2mwatt(pr.gaussian_noise)
interfere_power = interfere_power if isInterfere else 1.
return (signal_power / interfere_power) * pr.interfereReduction
def print_all_status(cm, um):
cover = print_cover_rate(um)
power = print_cm_power(cm)
capacity = print_capacity(cm ,um)
#draw_system(cm, um)
#print_cm_client(cm)
#print_power_reduce(cm)
#print_interfere_reduce(cm, um)
return (cover, power, capacity)
def get_cover_nb(um):
covered = 0.
for u in um:
if u.master != None:
covered += 1.
return covered
def print_capacity(cm, um):
capacity = 0.
for c in cm:
nb_client = c.get_client_no()
if nb_client == 0:
continue
Bk = pr.bandwidth / nb_client
capacity += sum([Bk * log(1 + get_snr(c, u, cm, pr.isInterfere, False), 2) for sec in c.client for u in sec])
avg_capacity = capacity / get_cover_nb(um) if get_cover_nb(um) != 0 else 0
#print('[Average user capacity] :', avg_capacity)
return avg_capacity
def print_cover_rate(um):
covered = get_cover_nb(um)
#print ('[Cover rate] :%.3f%% (%d/%d)' % (covered/pr.user_no*100, covered, pr.user_no))
return covered/pr.user_no
def print_cm_power(cm):
avg_power = sum([e.power for e in cm])/len(cm)
#print('[Average cell power] :', avg_power)
#print('[Cell power] :' , [e.power for e in cm])
return avg_power
def print_cm_client(cm):
print('[Cell client]:')
for c in cm:
print(' ', c.get_client_no(), '/', pr.max_client_no, [len(e) for e in c.client])
def print_power_reduce(cm):
nb_sector = pr.cell_no * pr.sector_no
opened = sum([int(len(sec) > 0) for c in cm for sec in c.client ])
print ('[Power saving] : from %d sectors opened to %d sectors' % (nb_sector, opened))
#print ('[Power saving] :%f (%d/%d)' % (opened/nb_sector*100, opened, nb_sector))
def print_interfere_reduce(cm, um):
covered = 0.;
intersect = 0.
tmp_p = [c.power for c in cm]
for c in cm:
c.power = pr.Pmax
for u in um:
counter = 0.
for c in cm:
snr = get_snr(c, u, cm, pr.isInterfere, False)
dist = get_dist(c.pos_x, c.pos_y, u.pos_x, u.pos_y)
if snr >= pr.snr_threshold and dist <= c.radius:
counter += 1 # if counter >= 2, means this user is in the intersection of two cell's coverage
if counter >= 2:
intersect += 1
if u.master != None:
covered += 1
for c in cm:
c.power = tmp_p[cm.index(c)]
rate = covered/intersect if intersect != 0 else 0
print ('[Interference reduction] :%.3f%% (%d/%d)' % (rate * 100, covered, intersect))
def scenario1():
cm = [sc.Cell(500, 500), sc.Cell(480, 500), sc.Cell(450, 500)]
um = [ur.User(i*5, 500) for i in range(80, 101)]
cell = cm[0]
import Simulate as si
um[0].master = cell
cell.client[0].append(um[0])
print (si.objective_func(cell, cm, um))
for i in range(-31, 20):
cell.power = i
print (si.objective_func(cell, cm, um))
"""
| [
"[email protected]"
] | |
88f69ea1d7520d5cab4e7bd9452368a1f0c34d3e | 991321d1a26a520dce563fa117e533787de1fc62 | /lib/environ_skew.py | c218ba7d760077615f18ba782074d7ee568c8afe | [] | no_license | Ryan-Ray-Martin/PPO_Stock_Trader | 6dd171e714db8c0eddd3ea1556b2d983878ab513 | 376759d66f68e26e8703fc5bd4e858e2a5be16c2 | refs/heads/main | 2023-04-06T17:34:30.863409 | 2021-03-18T04:41:59 | 2021-03-18T04:41:59 | 348,217,449 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 6,783 | py | import gym
import gym.spaces
from gym.utils import seeding
from gym.envs.registration import EnvSpec
import enum
import numpy as np
from . import data
DEFAULT_BARS_COUNT = 1200
DEFAULT_COMMISSION_PERC = 0.0
SKEW = .05
class Actions(enum.Enum):
Skip = 0
Buy = 1
Close = 2
class State:
def __init__(self, bars_count, commission_perc,
reset_on_close, reward_on_close=True,
volumes=True):
assert isinstance(bars_count, int)
assert bars_count > 0
assert isinstance(commission_perc, float)
assert commission_perc >= 0.0
assert isinstance(reset_on_close, bool)
assert isinstance(reward_on_close, bool)
self.bars_count = bars_count
self.commission_perc = commission_perc
self.reset_on_close = reset_on_close
self.reward_on_close = reward_on_close
self.volumes = volumes
def reset(self, prices, offset):
assert offset >= self.bars_count-1
self.have_position = False
self.open_price = 0.0
self._prices = prices
self._offset = offset
@property
def shape(self):
# [h, l, c] * bars + position_flag + rel_profit
if self.volumes:
return 4 * self.bars_count + 1 + 1,
else:
return 3 * self.bars_count + 1 + 1,
def encode(self):
"""
Convert current state into numpy array.
"""
res = np.ndarray(shape=self.shape, dtype=np.float32)
shift = 0
for bar_idx in range(-self.bars_count+1, 1):
ofs = self._offset + bar_idx
res[shift] = self._prices.high[ofs]
shift += 1
res[shift] = self._prices.low[ofs]
shift += 1
res[shift] = self._prices.close[ofs]
shift += 1
if self.volumes:
res[shift] = self._prices.volume[ofs]
shift += 1
res[shift] = float(self.have_position)
shift += 1
if not self.have_position:
res[shift] = 0.0
else:
res[shift] = self._cur_close() / self.open_price - 1.0
return res
def _cur_close(self):
"""
Calculate real close price for the current bar
"""
open = self._prices.open[self._offset]
rel_close = self._prices.close[self._offset]
return (open * (1.0 + rel_close)) - SKEW
def step(self, action):
"""
Perform one step in our price, adjust offset, check for the end of prices
and handle position change
:param action:
:return: reward, done
"""
assert isinstance(action, Actions)
reward = 0.0
done = False
close = self._cur_close()
if action == Actions.Buy and not self.have_position:
self.have_position = True
self.open_price = close
reward -= self.commission_perc
elif action == Actions.Close and self.have_position:
reward -= self.commission_perc
done |= self.reset_on_close
if self.reward_on_close:
reward += 100.0 * (close / self.open_price - 1.0)
self.have_position = False
self.open_price = 0.0
self._offset += 1
prev_close = close
close = self._cur_close()
done |= self._offset >= self._prices.close.shape[0]-1
if self.have_position and not self.reward_on_close:
reward += 100.0 * (close / prev_close - 1.0)
return reward, done
class State1D(State):
"""
State with shape suitable for 1D convolution
"""
@property
def shape(self):
if self.volumes:
return (6, self.bars_count)
else:
return (5, self.bars_count)
def encode(self):
res = np.zeros(shape=self.shape, dtype=np.float32)
start = self._offset-(self.bars_count-1)
stop = self._offset+1
res[0] = self._prices.high[start:stop]
res[1] = self._prices.low[start:stop]
res[2] = self._prices.close[start:stop]
if self.volumes:
res[3] = self._prices.volume[start:stop]
dst = 4
else:
dst = 3
if self.have_position:
res[dst] = 1.0
res[dst+1] = self._cur_close() / self.open_price - 1.0
return res
class StocksEnv(gym.Env):
metadata = {'render.modes': ['human']}
spec = EnvSpec("StocksEnv-v0")
def __init__(self, prices, bars_count=DEFAULT_BARS_COUNT,
commission=DEFAULT_COMMISSION_PERC,
reset_on_close=True, state_1d=False,
random_ofs_on_reset=True, reward_on_close=False,
volumes=False):
assert isinstance(prices, dict)
self._prices = prices
if state_1d:
self._state = State1D(
bars_count, commission, reset_on_close,
reward_on_close=reward_on_close, volumes=volumes)
else:
self._state = State(
bars_count, commission, reset_on_close,
reward_on_close=reward_on_close, volumes=volumes)
self.action_space = gym.spaces.Discrete(n=len(Actions))
self.observation_space = gym.spaces.Box(
low=-np.inf, high=np.inf,
shape=self._state.shape, dtype=np.float32)
self.random_ofs_on_reset = random_ofs_on_reset
self.seed()
def reset(self):
# make selection of the instrument and it's offset. Then reset the state
self._instrument = self.np_random.choice(
list(self._prices.keys()))
prices = self._prices[self._instrument]
bars = self._state.bars_count
if self.random_ofs_on_reset:
offset = self.np_random.choice(abs(
prices.high.shape[0]-bars*10)) + bars
else:
offset = bars
self._state.reset(prices, offset)
return self._state.encode()
def step(self, action_idx):
action = Actions(action_idx)
reward, done = self._state.step(action)
obs = self._state.encode()
info = {
"instrument": self._instrument,
"offset": self._state._offset
}
return obs, reward, done, info
def render(self, mode='human', close=False):
pass
def close(self):
pass
def seed(self, seed=None):
self.np_random, seed1 = seeding.np_random(seed)
seed2 = seeding.hash_seed(seed1 + 1) % 2 ** 31
return [seed1, seed2]
@classmethod
def from_dir(cls, data_dir, **kwargs):
prices = {
file: data.load_relative(file)
for file in data.price_files(data_dir)
}
return StocksEnv(prices, **kwargs) | [
"[email protected]"
] | |
7c3290c6d79e793891bac2788edc3d45daa2fdd6 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03796/s109452428.py | 7da43cd4b9f93444d016f1ca7372416bb9a2f552 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | import math
n = int(input())
result = math.factorial(n)
dividor = 10 ** 9 + 7
print (result % dividor) | [
"[email protected]"
] | |
681c635d914e2b9de0e49bcab26a6ed36e8405a2 | 002df0ea1b3486456864b0c4f9c59db1dc9ae4b6 | /constants.py | 9f49a6850af08a21caa00b6d2d08d5f9cded32b5 | [
"MIT"
] | permissive | cosgais/Game | 998620ea16f982f6b12661f348808761c9f1d586 | 9b65600e7abde92aed22e08f9a3e18e637fdb475 | refs/heads/master | 2020-07-10T18:19:35.281762 | 2019-08-25T19:38:13 | 2019-08-25T19:38:13 | 204,333,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | from pygame.locals import *
# game constants
SCREENRECT = Rect(0, 0, 0, 0)
FPS = 60.0
| [
"[email protected]"
] | |
3fb11e79d90e6b79d17c275936cf0226a510c486 | 3f40191edb4e876c3734e44bfd73fa294247d083 | /p3iv_modules/src/p3iv_modules/interfaces/perception.py | 55ac7729c90dd59707ac6453a3ca0b8024bbef18 | [
"BSD-3-Clause"
] | permissive | philippbrusius/P3IV | 8f85920cb69c45cf896a076393d371d4d7d36269 | 2b0e44b166d699ace2b103d064e45e8da997bdb5 | refs/heads/master | 2023-08-06T00:39:23.988422 | 2021-10-11T07:44:03 | 2021-10-11T07:44:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,452 | py | # This file is part of the P3IV Simulator (https://github.com/fzi-forschungszentrum-informatik/P3IV),
# copyright by FZI Forschungszentrum Informatik, licensed under the BSD-3 license (see LICENSE file in main directory)
import abc
import numpy as np
import p3iv_types
class PerceptInterface(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, *args, **kwargs):
"""
Parameters
----------
ego_v_id: int
ID of the vehicle, which the perception module belongs to
per_pos_sigma_x:
Position perception variance in longitudinal direction
per_pos_sigma_y:
Position perception variance in lateral direction
per_pos_cross_corr:
Position perception cross-correlation factor
per_vel_sigma_x:
Velocity perception variance in longitudinal direction
per_vel_sigma_y:
Velocity perception variance in lateral direction
per_vel_cross_corr:
Velocity perception cross-correlation factor
loc_pos_sigma_x:
Position localization variance in longitudinal direction
loc_pos_sigma_y:
Position localization variance in lateral direction
loc_pos_cross_corr:
Position localization cross-correlation factor
loc_vel_sigma_x:
Velocity localization variance in longitudinal direction
loc_vel_sigma_y:
Velocity localization variance in lateral direction
loc_vel_cross_corr:
Velocity localization cross-correlation factor
"""
pass
@abc.abstractmethod
def __call__(self, timestamp, ground_truth, *args, **kwargs):
"""
Perform environment perception and return an environment model.
Parameters
----------
timestamp: int
Current timestamp value.
ground_truth: GroundTruth
Ground truth data of the current timestamp.
Returns
-------
environment_model: EnvironmentModel
Environment model with visible areas, percepted objects.
"""
self.type_check(timestamp, ground_truth)
pass
@staticmethod
def type_check(timestamp, ground_truth):
"""
Utility type check function.
"""
assert isinstance(timestamp, int)
assert isinstance(ground_truth, p3iv_types.GroundTruth)
| [
"[email protected]"
] | |
f8a20cb8a638e3e55609979a69fadad0ab2becf1 | b722cd9b294651ce8c1d6f25867f4d72853c00a3 | /chengdu_ershoufang_guapai.py | 4dfa5a2def0410f63cbda3bf8c1a07444a589550 | [] | no_license | QZ1220/lianjia_spider | 838e9e2079d01acf9695307e4406864f92405c23 | 9b5487adce2f0e1e6fdf7fa6aa86defeb2f80bc9 | refs/heads/master | 2023-07-01T15:29:07.132279 | 2021-05-15T08:11:18 | 2021-05-15T08:11:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,636 | py | import requests
import parsel
import time
import csv
import datetime
def parse_one_page(url, region, csv_writer):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
}
response = requests.get(url=url, headers=headers)
selector = parsel.Selector(response.text)
total = selector.css('.total span::text').get()
iter = int(int(total) / 30)
for page in range(1, iter):
if page > 100:
break
print('===========================正在下载第{}页数据================================'.format(page))
time.sleep(1)
url = 'https://cd.lianjia.com/ershoufang/' + region + '/pg' + str(page) + '/'
response = requests.get(url=url, headers=headers)
selector = parsel.Selector(response.text)
lis = selector.css('.sellListContent li')
dit = {}
for li in lis:
title = li.css('.title a::text').get()
dit['标题'] = title
positionInfo = li.css('.positionInfo a::text').getall()
info = '-'.join(positionInfo)
dit['开发商'] = info
houseInfo = li.css('.houseInfo::text').get()
dit['房子信息'] = houseInfo
followInfo = li.css('.followInfo::text').get()
dit['发布周期'] = followInfo
Price = li.css('.totalPrice span::text').get()
dit['售价/万'] = Price
unitPrice = li.css('.unitPrice span::text').get()
dit['单价'] = unitPrice
print(dit)
csv_writer.writerow(dit)
def main(offset, csv_writer):
regions = ['jinjiang', 'qingyang', 'wuhou', 'gaoxin7', 'chenghua', 'jinniu', 'tianfuxinqu', 'gaoxinxi1',
'shuangliu', 'wenjiang'
, 'pidou', 'longquanyi', 'xindou', 'tianfuxinqunanqu', 'qingbaijiang', 'doujiangyan', 'pengzhou', 'jianyang',
'xinjin',
'chongzhou1', 'dayi', 'jintang', 'pujiang', 'qionglai']
for region in regions:
for i in range(1, offset):
url = 'https://cd.lianjia.com/ershoufang/' + region + '/pg' + str(i) + '/'
parse_one_page(url, region, csv_writer)
time.sleep(1)
print('{} has been writen.'.format(region))
file_name = './data/成都二手房信息_' + str(datetime.date.today()) + '.csv'
f = open(file_name, mode='a', encoding='utf-8-sig', newline='')
csv_writer = csv.DictWriter(f, fieldnames=['标题', '开发商', '房子信息', '发布周期', '售价/万', '单价'])
csv_writer.writeheader()
main(2, csv_writer)
| [
"[email protected]"
] | |
e297b38f6e2cdf429d8572fcf27d6ba7f9777a03 | dbbd518e46560bf7b26f6a494243d27f8d68709a | /core/base_test.py | 9dd2cd176213e03da4d3d2073e0b2ab082029407 | [] | no_license | b-kod/API-autotest | 063fa3d5a74056b03b229467e043778f1daa33f9 | 7bc9fb9550011632520f8277bd80357747c8301b | refs/heads/main | 2023-04-13T04:58:17.258852 | 2021-04-02T10:19:57 | 2021-04-02T10:19:57 | 353,649,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 968 | py | from datetime import datetime
from requests import Response
from core.logger import Logger
class BaseCase:
def setup(self):
pass
def teardown(self):
Logger.get_instance().write_log_to_file()
@staticmethod
def get_cookie(response: Response, cookie_name):
if cookie_name in response.cookies:
return {cookie_name: response.cookies[cookie_name]}
else:
raise Exception(f"Cannot find cookie with the name {cookie_name} in the last response")
@staticmethod
def get_header(response: Response, headers_name):
if headers_name in response.headers:
return {headers_name: response.headers[headers_name]}
else:
raise Exception(f"Cannot find header with the name {headers_name} in the last response")
@staticmethod
def create_unique_email(base: str, domain="example.com"):
return f'{base}+{datetime.now().strftime("%m%d%Y%H%M%S")}@{domain}'
| [
"[email protected]"
] | |
69e20327eed61ccc8f6f46beb0c063a9bc435f12 | 094b77875a337de195e1df89cdb89a2acf9b60e8 | /app/templates/untitled.py | 274179fac1b12cb777e4816118e526a9e9abe67e | [] | no_license | jayarajsajjanar/linuxconfig | 0012bec91fccca0676b5a45c8ab2c7bb791383b5 | 9a271387f25c6e6ec59ac1c3f5584a9e3e37f95c | refs/heads/master | 2022-08-09T16:10:27.902279 | 2016-06-03T20:29:53 | 2016-06-03T20:29:53 | 60,275,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test1.db'
db = SQLAlchemy(app)
class Items(db.Model):
id = db.Column(db.Integer, primary_key=True)
Naming = db.Column(db.String(80))
Description = db.Column(db.String)
Cat_id = db.Column(db.Integer, db.ForeignKey('Cat.id'))
Cat= db.relationship('Cat',
backref=db.backref('category_items', lazy='dynamic'))
def __init__(self, Naming, Description,Categoriess):
self.Naming = Naming
self.Description = Description
self.Categoriess=Categoriess
class Cat(db.Model):
__tablename__ ='Cat'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
def __init__(self, name):
self.name = name | [
"[email protected]"
] | |
cd78fd4a072a6b9d1afdc776829877011e52a27b | 7be31cf11c0277d988b081ed4047dbef221094cc | /canary/format.py | d73c51441d4e73610261c66952d3e5aca5e5730a | [
"BSD-3-Clause"
] | permissive | ryanpetrello/canary | a5f46b65febcaea889542421286de27f976f1998 | e9e327eec5eae992f3573e7ca4548d480da62a9c | refs/heads/master | 2020-12-25T18:18:13.010976 | 2014-06-10T11:54:21 | 2014-06-10T11:54:21 | 8,144,379 | 2 | 0 | null | 2014-04-03T02:18:16 | 2013-02-11T19:06:54 | Python | UTF-8 | Python | false | false | 1,466 | py | import json
import logging
import re
import socket
class LogstashFormatter(logging.Formatter):
"""
A custom JSON formatter intended for consumption by logstash
"""
DEFAULT_KEYS = ['fields']
def __init__(self, keys=DEFAULT_KEYS, *args, **kwargs):
log_format = ' '.join(['%({0})'] * len(keys))
custom_format = log_format.format(*keys)
logging.Formatter.__init__(
self,
fmt=custom_format,
*args,
**kwargs
)
def parse(self):
standard_formatters = re.compile(r'\((.*?)\)', re.IGNORECASE)
return standard_formatters.findall(self._fmt)
def serialize(self, record):
formatters = self.parse()
record.message = record.getMessage()
log_record = {
'@message': record.message,
'@source_host': socket.gethostname(),
}
if getattr(record, 'exc_info', None):
log_record['traceback'] = getattr(
record,
'filter_sensitive',
lambda x: x
)(self.formatException(record.exc_info))
for formatter in formatters:
if formatter in record.__dict__:
log_record[formatter] = record.__dict__[formatter]
return log_record
def format(self, record):
"""Formats a log record and serializes to JSON"""
record = self.serialize(record)
return json.dumps(record)
| [
"[email protected]"
] | |
ed3d5f9234b618fc176362712257fa446d816471 | a1f7fa39e4884c60223d1000670ed918f2ca4dc5 | /dm_control/locomotion/arenas/corridors.py | eb6c410ec05b1d5ba9521afb53ca5fcfb141284d | [
"Apache-2.0"
] | permissive | wilson1yan/dm_control | 7fb74e4b6a428a778d773230ec22d54f87d0ceef | 9b908bb501643a1152cc95b9dfa29b96fd3e7061 | refs/heads/master | 2021-07-12T04:56:09.951104 | 2020-04-10T23:40:00 | 2020-04-10T23:40:00 | 201,571,408 | 8 | 6 | Apache-2.0 | 2020-04-16T17:16:48 | 2019-08-10T03:01:12 | Python | UTF-8 | Python | false | false | 14,030 | py | # Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Corridor-based arenas."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from dm_control import composer
from dm_control.composer import variation
import six
_SIDE_WALLS_GEOM_GROUP = 3
_CORRIDOR_X_PADDING = 2.0
_WALL_THICKNESS = 0.16
_SIDE_WALL_HEIGHT = 4.0
_DEFAULT_ALPHA = 0.5
@six.add_metaclass(abc.ABCMeta)
class Corridor(composer.Arena):
"""Abstract base class for corridor-type arenas."""
@abc.abstractmethod
def regenerate(self, random_state):
raise NotImplementedError
@abc.abstractproperty
def corridor_length(self):
raise NotImplementedError
@abc.abstractproperty
def corridor_width(self):
raise NotImplementedError
@abc.abstractproperty
def ground_geoms(self):
raise NotImplementedError
def is_at_target_position(self, position, tolerance=0.0):
"""Checks if a `position` is within `tolerance' of an end of the corridor.
This can also be used to evaluate more complicated T-shaped or L-shaped
corridors.
Args:
position: An iterable of 2 elements corresponding to the x and y location
of the position to evaluate.
tolerance: A `float` tolerance to use while evaluating the position.
Returns:
A `bool` indicating whether the `position` is within the `tolerance` of an
end of the corridor.
"""
x, _ = position
return x > self.corridor_length - tolerance
class EmptyCorridor(Corridor):
"""An empty corridor with planes around the perimeter."""
def _build(self,
corridor_width=4,
corridor_length=40,
visible_side_planes=True,
name='empty_corridor'):
"""Builds the corridor.
Args:
corridor_width: A number or a `composer.variation.Variation` object that
specifies the width of the corridor.
corridor_length: A number or a `composer.variation.Variation` object that
specifies the length of the corridor.
visible_side_planes: Whether to the side planes that bound the corridor's
perimeter should be rendered.
name: The name of this arena.
"""
super(EmptyCorridor, self)._build(name=name)
self._corridor_width = corridor_width
self._corridor_length = corridor_length
self._walls_body = self._mjcf_root.worldbody.add('body', name='walls')
self._mjcf_root.visual.map.znear = 0.0005
self._mjcf_root.asset.add(
'texture', type='skybox', builtin='gradient',
rgb1=[0.4, 0.6, 0.8], rgb2=[0, 0, 0], width=100, height=600)
self._mjcf_root.visual.headlight.set_attributes(
ambient=[0.4, 0.4, 0.4], diffuse=[0.8, 0.8, 0.8],
specular=[0.1, 0.1, 0.1])
alpha = _DEFAULT_ALPHA if visible_side_planes else 0.0
self._ground_plane = self._mjcf_root.worldbody.add(
'geom', type='plane', rgba=[0.5, 0.5, 0.5, 1], size=[1, 1, 1])
self._left_plane = self._mjcf_root.worldbody.add(
'geom', type='plane', xyaxes=[1, 0, 0, 0, 0, 1], size=[1, 1, 1],
rgba=[1, 0, 0, alpha], group=_SIDE_WALLS_GEOM_GROUP)
self._right_plane = self._mjcf_root.worldbody.add(
'geom', type='plane', xyaxes=[-1, 0, 0, 0, 0, 1], size=[1, 1, 1],
rgba=[1, 0, 0, alpha], group=_SIDE_WALLS_GEOM_GROUP)
self._near_plane = self._mjcf_root.worldbody.add(
'geom', type='plane', xyaxes=[0, 1, 0, 0, 0, 1], size=[1, 1, 1],
rgba=[1, 0, 0, alpha], group=_SIDE_WALLS_GEOM_GROUP)
self._far_plane = self._mjcf_root.worldbody.add(
'geom', type='plane', xyaxes=[0, -1, 0, 0, 0, 1], size=[1, 1, 1],
rgba=[1, 0, 0, alpha], group=_SIDE_WALLS_GEOM_GROUP)
self._current_corridor_length = None
self._current_corridor_width = None
def regenerate(self, random_state):
"""Regenerates this corridor.
New values are drawn from the `corridor_width` and `corridor_height`
distributions specified in `_build`. The corridor is resized accordingly.
Args:
random_state: A `numpy.random.RandomState` object that is passed to the
`Variation` objects.
"""
self._walls_body.geom.clear()
corridor_width = variation.evaluate(self._corridor_width,
random_state=random_state)
corridor_length = variation.evaluate(self._corridor_length,
random_state=random_state)
self._current_corridor_length = corridor_length
self._current_corridor_width = corridor_width
self._ground_plane.pos = [corridor_length / 2, 0, 0]
self._ground_plane.size = [
corridor_length / 2 + _CORRIDOR_X_PADDING, corridor_width / 2, 1]
self._left_plane.pos = [
corridor_length / 2, corridor_width / 2, _SIDE_WALL_HEIGHT / 2]
self._left_plane.size = [
corridor_length / 2 + _CORRIDOR_X_PADDING, _SIDE_WALL_HEIGHT / 2, 1]
self._right_plane.pos = [
corridor_length / 2, -corridor_width / 2, _SIDE_WALL_HEIGHT / 2]
self._right_plane.size = [
corridor_length / 2 + _CORRIDOR_X_PADDING, _SIDE_WALL_HEIGHT / 2, 1]
self._near_plane.pos = [
-_CORRIDOR_X_PADDING, 0, _SIDE_WALL_HEIGHT / 2]
self._near_plane.size = [corridor_width / 2, _SIDE_WALL_HEIGHT / 2, 1]
self._far_plane.pos = [
corridor_length + _CORRIDOR_X_PADDING, 0, _SIDE_WALL_HEIGHT / 2]
self._far_plane.size = [corridor_width / 2, _SIDE_WALL_HEIGHT / 2, 1]
@property
def corridor_length(self):
return self._current_corridor_length
@property
def corridor_width(self):
return self._current_corridor_width
@property
def ground_geoms(self):
return (self._ground_plane,)
class GapsCorridor(EmptyCorridor):
"""A corridor that consists of multiple platforms separated by gaps."""
def _build(self,
platform_length=1.,
gap_length=2.5,
corridor_width=4,
corridor_length=40,
ground_rgba=(0.5, 0.5, 0.5, 1),
visible_side_planes=False,
name='gaps_corridor'):
"""Builds the corridor.
Args:
platform_length: A number or a `composer.variation.Variation` object that
specifies the size of the platforms along the corridor.
gap_length: A number or a `composer.variation.Variation` object that
specifies the size of the gaps along the corridor.
corridor_width: A number or a `composer.variation.Variation` object that
specifies the width of the corridor.
corridor_length: A number or a `composer.variation.Variation` object that
specifies the length of the corridor.
ground_rgba: A sequence of 4 numbers or a `composer.variation.Variation`
object specifying the color of the ground.
visible_side_planes: Whether to the side planes that bound the corridor's
perimeter should be rendered.
name: The name of this arena.
"""
super(GapsCorridor, self)._build(
corridor_width=corridor_width,
corridor_length=corridor_length,
visible_side_planes=visible_side_planes,
name=name)
self._platform_length = platform_length
self._gap_length = gap_length
self._ground_rgba = ground_rgba
self._ground_body = self._mjcf_root.worldbody.add('body', name='ground')
def regenerate(self, random_state):
"""Regenerates this corridor.
New values are drawn from the `corridor_width` and `corridor_height`
distributions specified in `_build`. The corridor resized accordingly, and
new sets of platforms are created according to values drawn from the
`platform_length`, `gap_length`, and `ground_rgba` distributions specified
in `_build`.
Args:
random_state: A `numpy.random.RandomState` object that is passed to the
`Variation` objects.
"""
# Resize the entire corridor first.
super(GapsCorridor, self).regenerate(random_state)
# Move the clear ground plane down.
self._ground_plane.pos = [self._current_corridor_length / 2, 0, -10]
# Clear the existing platform pieces.
self._ground_body.geom.clear()
# Make the first platform larger.
platform_length = 3. * _CORRIDOR_X_PADDING
platform_pos = [
platform_length / 2,
0,
-_WALL_THICKNESS,
]
platform_size = [
platform_length / 2,
self._current_corridor_width / 2,
_WALL_THICKNESS,
]
self._ground_body.add(
'geom',
type='box',
rgba=variation.evaluate(self._ground_rgba, random_state),
name='start_floor',
pos=platform_pos,
size=platform_size)
current_x = platform_length
platform_id = 0
while current_x < self._current_corridor_length:
platform_length = variation.evaluate(
self._platform_length, random_state=random_state)
platform_pos = [
current_x + platform_length / 2.,
0,
-_WALL_THICKNESS,
]
platform_size = [
platform_length / 2,
self._current_corridor_width / 2,
_WALL_THICKNESS,
]
self._ground_body.add(
'geom',
type='box',
rgba=variation.evaluate(self._ground_rgba, random_state),
name='floor_{}'.format(platform_id),
pos=platform_pos,
size=platform_size)
platform_id += 1
# Move x to start of the next platform.
current_x += platform_length + variation.evaluate(
self._gap_length, random_state=random_state)
@property
def ground_geoms(self):
return (self._ground_plane,) + tuple(self._ground_body.find_all('geom'))
class WallsCorridor(EmptyCorridor):
"""A corridor obstructed by multiple walls aligned against the two sides."""
def _build(self,
wall_gap=2.5,
wall_width=2.5,
wall_height=2.0,
swap_wall_side=True,
wall_rgba=(1, 1, 1, 1),
corridor_width=4,
corridor_length=40,
visible_side_planes=False,
name='walls_corridor'):
"""Builds the corridor.
Args:
wall_gap: A number or a `composer.variation.Variation` object that
specifies the gap between each consecutive pair obstructing walls.
wall_width: A number or a `composer.variation.Variation` object that
specifies the width that the obstructing walls extend into the corridor.
wall_height: A number or a `composer.variation.Variation` object that
specifies the height of the obstructing walls.
swap_wall_side: A boolean or a `composer.variation.Variation` object that
specifies whether the next obstructing wall should be aligned against
the opposite side of the corridor compared to the previous one.
wall_rgba: A sequence of 4 numbers or a `composer.variation.Variation`
object specifying the color of the walls.
corridor_width: A number or a `composer.variation.Variation` object that
specifies the width of the corridor.
corridor_length: A number or a `composer.variation.Variation` object that
specifies the length of the corridor.
visible_side_planes: Whether to the side planes that bound the corridor's
perimeter should be rendered.
name: The name of this arena.
"""
super(WallsCorridor, self)._build(
corridor_width=corridor_width,
corridor_length=corridor_length,
visible_side_planes=visible_side_planes,
name=name)
self._wall_height = wall_height
self._wall_rgba = wall_rgba
self._wall_gap = wall_gap
self._wall_width = wall_width
self._swap_wall_side = swap_wall_side
def regenerate(self, random_state):
"""Regenerates this corridor.
New values are drawn from the `corridor_width` and `corridor_height`
distributions specified in `_build`. The corridor resized accordingly, and
new sets of obstructing walls are created according to values drawn from the
`wall_gap`, `wall_width`, `wall_height`, and `wall_rgba` distributions
specified in `_build`.
Args:
random_state: A `numpy.random.RandomState` object that is passed to the
`Variation` objects.
"""
super(WallsCorridor, self).regenerate(random_state)
wall_x = variation.evaluate(
self._wall_gap, random_state=random_state) - _CORRIDOR_X_PADDING
wall_side = 0
wall_id = 0
while wall_x < self._current_corridor_length:
wall_width = variation.evaluate(
self._wall_width, random_state=random_state)
wall_height = variation.evaluate(
self._wall_height, random_state=random_state)
wall_rgba = variation.evaluate(self._wall_rgba, random_state=random_state)
if variation.evaluate(self._swap_wall_side, random_state=random_state):
wall_side = 1 - wall_side
wall_pos = [
wall_x,
(2 * wall_side - 1) * (self._current_corridor_width - wall_width) / 2,
wall_height / 2
]
wall_size = [_WALL_THICKNESS / 2, wall_width / 2, wall_height / 2]
self._walls_body.add(
'geom',
type='box',
name='wall_{}'.format(wall_id),
pos=wall_pos,
size=wall_size,
rgba=wall_rgba)
wall_id += 1
wall_x += variation.evaluate(self._wall_gap, random_state=random_state)
@property
def ground_geoms(self):
return (self._ground_plane,)
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.