seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8245327650
|
from pymongo import MongoClient
from fastapi import HTTPException
from datetime import datetime
class ModekenSystemManager:
def __init__(self, username, password):
self.client = MongoClient(f'mongodb://{username}:{password}@db.modeken-system.com:27017')
self.db = self.client['modeken-system']
self.collections = self.db['collections']
self.manager = {}
def new(self, item_type):
self.manager[f'{item_type}'] = Manager(self.db[f'tickets-{item_type}'], self.collections, item_type)
if not self.collections.find_one({'item_type': item_type}):
self.collections.insert_one({'item_type': item_type, 'count': 0})
return self.manager[f'{item_type}']
class CollectionManager:
def __init__(self, collections, item_type):
self.collections = collections
self.item_type = item_type
def add_collection(self):
if not self.collections.update_one({'item_type': self.item_type}, {'$inc': {'count':1}}):
raise HTTPException(status_code=500, detail='Internal Server Error')
def get_collection(self):
d = self.collections.find_one({'item_type': self.item_type})
if not d:
raise HTTPException(status_code=500, detail='Internal Server Error')
del d['_id']
del d['item_type']
return d
def reset_collection(self):
if not self.collections.update_one({'item_type': self.item_type}, {'$set': {'count':0}}):
raise HTTPException(status_code=500, detail='Internal Server Error')
class TicketManager:
def __init__(self, tickets):
self.tickets = tickets
self.last_ticket = 0
def get_tickets_wait(self, item_type):
data = []
for i in self.tickets.find({'status': 'wait'}):
del i['_id']
del i['status']
i['item_number'] = item_type + str(i['item_number'])
data.append(i)
return data
def get_tickets_ready(self, item_type):
data = []
for i in self.tickets.find({'status': 'ready'}):
del i['_id']
del i['status']
i['item_number'] = item_type + str(i['item_number'])
data.append(i)
return data
def get_tickets(self):
data = []
for i in self.tickets.find():
del i['_id']
data.append(i)
return data
def to_ready_ticket(self, item_number):
t = self.tickets.find_one({'$and': [{'item_number': item_number}, {'status': 'wait'}]})
if not t:
raise HTTPException(status_code=404, detail=f'Not Found: {item_number}')
time = t['created_time']
return self.tickets.update_one({'$and': [{'item_number': item_number}, {'status': 'wait'}]}, {'$set': {'status': 'ready'}}), time
def to_wait_ticket(self, item_number):
t = self.tickets.find_one({'$and': [{'item_number': item_number}, {'status': 'ready'}]})
if not t:
raise HTTPException(status_code=404, detail=f'Not Found: {item_number}')
time = t['created_time']
return self.tickets.update_one({'$and': [{'item_number': item_number}, {'status': 'ready'}]}, {'$set': {'status': 'wait'}}), time
def cancel_ticket(self, item_number):
t = self.tickets.find_one({'$and': [{'item_number': item_number}, {'status': 'wait'}]})
if not t:
raise HTTPException(status_code=404, detail=f'Not Found: {item_number}')
return self.tickets.update_one({'$and': [{'item_number': item_number}, {'status': 'wait'}]}, {'$set': {'status': 'cancel'}})
def delete_ticket(self, item_number):
t = self.tickets.find_one({'$and': [{'item_number': item_number}, {'status': 'ready'}]})
if not t:
raise HTTPException(status_code=404, detail=f'Not Found: {item_number}')
return self.tickets.update_one({'$and': [{'item_number': item_number}, {'status': 'ready'}]}, {'$set': {'status': 'delete'}})
def add_ticket(self):
self.last_ticket += 1
item_number = self.last_ticket
now = datetime.now().strftime('%H:%M')
data = {'item_number': item_number, 'status': 'wait', 'created_time': now}
if self.tickets.find_one({'item_number': item_number}):
return self.add_ticket()
else:
if not self.tickets.insert_one(data):
raise HTTPException(status_code=500, detail='Internal Server Error')
return item_number, now
def reset_tickets(self):
if not self.tickets.delete_many({}):
raise HTTPException(status_code=500, detail='Internal Server Error')
else:
self.last_ticket = 0
class Manager(CollectionManager, TicketManager):
def __init__(self, tickets, collections, item_type):
CollectionManager.__init__(self, collections, item_type)
TicketManager.__init__(self, tickets)
def delete_ticket(self, item_number):
super().delete_ticket(item_number)
super().add_collection()
|
tochiman/modeken-ticket-system
|
backend/src/mongo.py
|
mongo.py
|
py
| 5,068 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pymongo.MongoClient",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 112,
"usage_type": "call"
}
] |
8366348060
|
# /usr/bin/env python
# -*- coding:utf-8 -*-
import sys
import math
from dataclasses import dataclass
from functools import reduce
from functools import wraps
"""
迭代器
迭代是Python最强大的功能之一,是访问集合元素的一种方式。
迭代器是一个可以记住遍历的位置的对象。
迭代器对象从集合的第一个元素开始访问,直到所有的元素被访问完结束。迭代器只能往前不会后退
迭代器有两个基本的方法:
iter() 创建迭代器
next() 获取迭代器的下一个元素
字符串、列表、元组对象 都可用于创建迭代器
迭代器结束的异常
StopIteration
创建迭代器
把一个类作为一个迭代器使用需要在类中实现两个方法__iter__() 与 __next__()
__iter__()方法返回一个特殊的迭代器对象,这个迭代器对象实现了__next__()方法并通过StopIteration异常标识迭代的完成。
__next__()方法会返回下一个迭代器对象
只有实现了__iter__(),才能在使用iter()的时候创建一个迭代器
只有实现了__next__(),才能在使用next()的时候通过__next__()按照规则获取下一个值
注意
只有实现了__iter__()方法,才能使用iter(),对该类的对象创建迭代器。但是不是说实现了__iter__()方法,就已经创建了迭代器。这是单独的概念。
只有实现了__next__(),才能在使用next()的时候通过__next__()按照规则获取下一个值。但是不是说没有实现__next__(),就不能创建迭代器了。
"""
"""
实现了迭代器函数的类
"""
class iterationNum:
def __iter__(self):
self.num = 1
# 迭代器迭代的是对象。所以这里需要返回self。self表示对象
return self
def __next__(self):
# 需要先把self.a保存返回,再加1
# 需要先把self.num保存,用于后序返回,然后再+1。否则开始的地方就不是1了。就是2了
x = self.num
self.num += 1
return x
"""
迭代器的多种使用方式
"""
def mapLearn():
list = [1, 2, 3, 4] # 列表
print("迭代器使用方式01")
mapL01 = iter(list) # 使用列表创建迭代器
for mapl in mapL01:
# print(mapl)
# 使用end修改输出的最后字符,这里将换行替换为空格
print(mapl, end=" ")
print("迭代器使用方式02")
# 迭代器输出完后,不能在用next,否则会报错StopIteration
mapL02 = iter(list) # 使用列表创建迭代器
print(next(mapL02))
print(next(mapL02))
print(next(mapL02))
print(next(mapL02))
# print(next(mapL02))
print("迭代器使用方式03")
mapL03 = iter(list) # 使用列表创建迭代器
# i = True
# while i:
# try:
# mapl03 = next(mapL03)
# print(mapl03)
# except StopIteration:
# i = False
# print("迭代器03已遍历结束")
while True:
try:
print(next(mapL03))
except StopIteration:
print("迭代器03已遍历结束")
break
print("迭代器对象创建及使用")
itClass = iterationNum()
itN = iter(itClass)
# while True:
# try:
# b = next(itN)
# if b == 10:
# break;
# else:
# print(b)
# except StopIteration:
# break
while True:
try:
print(next(itN))
except StopIteration:
break
"""
生成器
用于生成迭代器
在Python中,使用yield的函数被称为生成器(generator)
跟普通函数不同的是,生成器是一个返回迭代器的函数,只能用于迭代操作。
简单理解:生成器就是一个迭代器
调用一个生成器函数,返回的是一个迭代器对象
在调用生成器运行的过程中,每次遇到yield时函数会暂停并保存当前所有的运行信息,返回yield的值,并在下一次执行next()方法时从当前位置继续运行。
使用yield生成迭代器需要配合循环使用
所谓生成器就是利用循环将某个变量或某个值整合到一起,作为一个迭代器返回
也可以理解为将想要的数整合到一起,作为一个迭代器返回
"""
"""
生成器函数
斐波那契数列
"""
def fibonacci(n):
a, b, counter = 0, 1, 0
while True:
if (counter > n):
return
yield a
a, b = b, a + b
counter += 1
"""
生成器函数练习
使用yield生成迭代器,需要配合循环使用
"""
def yieldTest(n):
i = 0
while True:
yield i
if i <= n:
i += 1
else:
return
"""
生成器的使用练习
"""
def yieldLearn():
print("通过生成器实现:斐波那契数列")
fi = fibonacci(10)
while True:
try:
print(next(fi))
except StopIteration:
break
print("自定义生成器实现")
yt = yieldTest(10)
while True:
try:
print(next(yt))
except StopIteration:
break
"""
map&filter&reduce
函数式编程的代表
内置函数map和filter是在列表(或类似的称为迭代的对象)上运行的非常有用的高阶函数
函数map接受一个函数和一个迭代器作为参数,并返回一个新的迭代器,该函数应用于每个参数
map()
使用迭代器按指定规则生成迭代器
根据提供的函数对指定序列做映射
语法:
map(function, iterable, ...)
参数:
function 函数
iterable 一个或多个序列
返回值:
Python2.x 返回列表
Python3.x 返回迭代器
解释:
第一个参数function以参数序列iterable中的每一个元素调用function函数
返回包含每次function函数返回值的新列表
"""
"""
函数式编程map()高阶函数的练习
"""
"""
定义一个函数用于测试map
计算平方数
"""
def square(x):
return x ** 2
"""
map()使用练习
"""
def mapLearn():
list = [1, 2, 3, 4, 5]
# 对list中的每一个元素按照square进行处理,并返回结果集,作为迭代器返回
print("使用函数实现map()")
mL01 = map(square, list)
for ml in mL01:
print(ml)
# 使用lambda实现
print("使用lambda表达式实现map()")
mL02 = map(lambda x: x ** 2, list)
while True:
try:
print(next(mL02))
except StopIteration:
break
"""
使用两个列表作为两个参数
使用lambda接受两个参数进行map()计算
"""
print("使用lambda计算两个迭代器实现map()")
listX = [1, 2, 3, 4, 5]
listY = [-1, -2, -3, -4, -5]
mL03 = map(lambda x, y: x + y, listX, listY)
for ml03 in mL03:
print(ml03)
"""
filter()
filter()函数用于过滤序列,过滤掉不符合条件的元素,返回由符合条件元素组成的新列表
语法:
filter(function, iterable)
参数:
function 判断函数
iterable 可迭代对象
返回值:
Python2.x 返回列表
Python3.x 返回可迭代对象
解释:
接收两个参数,第一个是函数,第二个是序列,序列的每个元素作为参数传递给函数进行判断。
然后返回True或False,最后将返回True的元素放到新列表中
"""
"""
函数式编程filter()高阶函数的练习
"""
"""
定义一个函数用于测试filter
判断是否单数
"""
def isOdd(n):
return n % 2 == 1
"""
filter使用练习
"""
def filterLearn():
list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print("使用函数实现filter")
fL01 = filter(isOdd, list)
for fl in fL01:
print(fl)
print("使用lambda实现filter")
fL02 = filter(lambda x: x % 2 == 1, list)
while True:
try:
print(next(fL02))
except StopIteration:
break
print("使用filter过滤除1~100中平方根是整数的数")
fL03 = filter(lambda x: math.sqrt(x) % 1 == 0, range(1, 101, 1))
for fl03 in fL03:
print(fl03)
"""
reduce()
reduce()函数会对参数序列中元素进行累积
语法:
reduce(function, iterable[, initializer])
参数:
function 函数,有两个参数
iterable 可迭代对象
initializer 可选,初始参数
返回值:
函数计算结果
解释:
函数将一个数据集合(链表,元组等)中的所有数据进行下列操作:
用传给reduce中的函数function(有两个参数)先对集合中的第1、2个元素进行操作
得到的结果再与第三个数据用function函数运算,最后得到一个结果
也就是说把iterable中的所有值进行function计算,得到一个计算结果
注意:
Python3.x reduce()已经被移动到functools模块里,如果要使用,需要引入functools模块来调用reduce()函数
from functools import reduce
"""
"""
函数式编程reduce()高阶函数的练习
"""
"""
定义一个函数用于测试reduce
加法
"""
def addRed(x, y):
return x + y
"""
reduce使用练习
"""
def reduceLearn():
list = [1, 2, 3, 4, 5]
rL01 = reduce(addRed, list)
print("使用函数实现reduce")
print(rL01)
print("使用lambda实现reduce")
rL02 = reduce(lambda x, y: x + y, list)
print(rL02)
print("使用reduce计算1-100的和")
rL03 = reduce(lambda x, y: x + y, range(1, 101, 1))
print(rL03)
print("使用reduce计算1-100的积")
rL04 = reduce(lambda x, y: x * y, range(1, 101, 1))
print(rL04)
"""
装饰器
装饰器(Decorators)是Python的一个重要部分。
简单的说:他们是修改其他函数的功能的函数。
有助于让代码更简单,更Pythonic(Python范儿)。
需要知道在哪里使用装饰器,以及如何开发装饰器。
装饰器是得在一个函数的前后执行代码
工厂模式?
装饰器助力用更少、更简单的代码来实现复杂的逻辑,并在其他地方实现重用。
装饰器就是定义一个嵌套函数,已一个函数作为参数,在嵌套函数中把参数函数前后加一下语句,然后把嵌套函数作为返回值。这样就相当于修改了参数函数的功能
注意:
装饰器可以使用类的方式实现。在类中用下面的函数和标记实现装饰器类
__call__
@wraps(func)
装饰器包装
functools.wraps 装饰器修饰函数
@wraps
接受一个函数来进行装饰,并加入了复制函数名称、注释文档、参数列表等功能。
这可以让我们在装饰器里面访问在装饰之前的函数的属性
@装饰器名称
在函数前面加@+装饰器名称,表示使用指定的装饰器对该函数进行装饰。
在后面使用该函数的是,就是已经经过装饰的功能了
装饰器常用实现
授权检查
日志实现
装饰器类
发送邮件
...
装饰器定义及使用标准语句
from functools import wraps
def decorator_name(f):
@wraps(f)
def decorated(*args, **kwargs):
if not can_run:
return "Function will not run"
return f(*args, **kwargs)
return decorated
@decorator_name
def func():
return("Function is running")
can_run = True
print(func())
# Output: Function is running
can_run = False
print(func())
# Output: Function will not run
常见装饰器
https://zhuanlan.zhihu.com/p/602457512
@classmethod: 声明一个类方法,可以通过类名直接调用。
python类中有三种方法类型:
Instance methods(实例方法):
绑定一个实例的方法,利用这种方法可以访问和修改实例数据。
通过类的实例调用实例方法,通过self参数访问实例数据。
第一个参数是自身。
Class methods(类方法):
@classmethod
绑定一个类的方法,无法利用该方法修改实例数据。
是调用类自身的一种方法,它将类作为第一个参数,通常将其命名为cls
Static methods(静态方法):
@staticmethod
不绑定实例或类的方法。仅仅因为他们在逻辑上属于那个类,才被包含进来。
静态方法通常用于执行一组相关任务的使用程序类中,如数学计算。通过将相关函数组织成类的静态方法,使代码变得更加有组织、更容易理解。
@staticmethod: 声明一个静态方法,可以通过类名直接调用。
@property: 为Python类设置处理程序和设置程序。
将一个方法转换为只读属性。也可以理解成,将一个方法改成了 __getter__方法。并且可以拿这个方法继续对后面的方法进行装饰。
用于对属性进行保护
Getters和Setters是面向对象编程(OOP)中的重要概念。
对于类中的每个实例变量,getter方法返回其值,而setter方法设置或更新其值。鉴于此,Getters和Setters又分别称为Accessors和Mutators。
它们用于保护数据不被直接意外访问或修改。
不同的OOP语言有不同的机制来定义获取器getters和setters。在Python中,可以简单地使用@property装饰器。
__getter__ __setter__
通过property装饰后,可以直接取变量,也可以通过函数取变量。函数不能加()
@abstractmethod: 声明一个抽象方法,子类必须实现它。
@wraps: 用于保留原始函数的元数据(如函数名、注释等)。
创建装饰器的时候使用。用于保留原始函数的元数据(如函数名、注释等)。
@lru_cache:利用缓存提速程序。是提速Python函数最简易的方法
此装饰器将函数的结果放入缓存,供后续具有相同参数的函数调用,无需再次执行具有相同参数的函数。
@total_ordering: 填充缺失排序方法的类装饰器
函数工具模块中的@total_sordeng装饰器为预定义Python类生成缺失比较方法。
在类中没有对__ge__、__gt__和__le__方法进行定义。对该类的对象进行比较是会有问题。这个装饰器会补充缺失的比较方法
一些旧的类可能未充分定义比较方法,将@total_ordering装饰器添加到其中之后,后续的使用更加安全。
@contextmanager:定制的语境管理器
可以使用with语句打开文件,在写入之后将自动关闭。无需显式地调用f.close()函数来关闭该文件。
@cached_property:将方法的结果作为属性放入缓存
Python 3.8的函数工具模块引入了一个新的功能强大的装饰器-@cached_property,它将类的方法转换为一个属性,计算出该属性的值之后,将其作为实例的普通属性放入缓存。
@dataclass:用更少的代码定义专用类
(在Python3.7中引入)可以自动为一个类生成几种专用的方法,如__init__、__repr__、__eq__、__lt__等。
@atexit.register:注册一个程序正常终止的函数
atexit模块的@register装饰器允许在Python解释器退出时执行一个函数。
@login_required: 用于限制需要用户登录才能访问的视图函数。
@cache: 缓存函数的结果,避免重复计算。
@retry: 在发生错误时自动重试代码块一定次数。
需要自己定义
函数
函数可以赋值
函数中可以定义函数(函数中定义的函数,在函数外无法访问)
函数可以返回函数
funcName() 执行函数
funcName()
funcName 把函数整体赋值给另外一个变量
a1 = funcName
a1()
"""
"""
一切皆对象
"""
"""
函数赋值使用实例
"""
def hi(name="renxw"):
return "hi " + name
"""
函数中定义函数
函数中定义的函数,在函数外不能被访问
"""
def hiFun01(name="renxw"):
print("now you are inside the hiFun01() function")
def hiFun02():
return "now you are in the hiFun02() function"
def hiFun03():
return "now you are in the hiFun03() function"
print(hiFun02())
print(hiFun03())
print("now you are back in the hiFun01() function")
"""
从函数中返回函数
"""
def hiFun04(name="renxw"):
print("now you are inside the hiFun04() function")
def hiFun02():
return "now you are in the hiFun02() function"
def hiFun03():
return "now you are in the hiFun03() function"
if name == "renxw":
return hiFun02
else:
return hiFun03
"""
函数练习
"""
def hiFunTest():
print("函数赋值")
print(hi())
hi01 = hi
print(hi01())
# 可以删除hi01,删除hi报错
# del hi01
# del hi
# print(hi())
# print(hi01())
print("在函数中定义函数")
hiFun01()
print("从函数中返回函数")
hiFun05 = hiFun04()
print(hiFun05())
"""
装饰器练习
"""
"""
装饰器函数定义
functools.wraps 指明装饰的函数
@wraps(a01Fun)
接受一个函数来进行装饰,并加入了复制函数名称、注释文档、参数列表等功能。
这可以让我们在装饰器里面访问在装饰之前的函数的属性
"""
def a01Decorator(a01Fun):
@wraps(a01Fun)
def wrapTheFunction():
print("I am doing some boring work before executing a01Fun()")
a01Fun()
print("I am doing some boring work after executing a01Fun()")
return wrapTheFunction
"""
@指明装饰器
在指明装饰器后,在使用函数的时候就可以直接使用装饰后的功能,不需要再使用装饰器进行包装赋值了
"""
@a01Decorator
def a02Fun():
print("I am the function which needs some decoration to remove my foul smell")
"""
装饰器练习
"""
def decoratorLearn():
a02Fun()
print(a02Fun.__name__)
# a03Fun = a01Decorator(a02Fun)
# a03Fun()
# print(a03Fun.__name__)
return
"""
内置装饰器练习
"""
class IterClass:
def __init__(self):
self._score = 0
@property
def score1(self):
return self._score
@score1.setter
def score(self, s):
if 0 <= s <= 100:
self._score = s
else:
raise Exception("参数太大,只允许0-100")
@dataclass
class Point:
"""@dataclass装饰器练习"""
x: float
y: float
def point_func():
point = Point(1.0, 2.0)
print(point)
"""自定义装饰器retry"""
def retry(max_retries=3, timeout=1):
"""
定义装饰:用于函数重试,和等待重试时长。
这里面的参数时装饰器函数本身的参数
:param max_retries: 最大重试次数
:param timeout: 设置超时重试时长
:return:
"""
def decorator(func):
"""
定义装饰器和将装饰器返回
:param func: 以函数式编程的方式,使用参数接收被装饰函数的函数名,在装饰器中使用 func() 进行执行函数
:return: wrapper(被装饰之后的函数名,在函数被装饰之后,调用别装饰函数的时候,实际上调用的就是wrapper函数)
"""
@wraps(func)
def wrapper(*args, **kwargs):
"""
定义被装饰函数,被装饰之后的函数功能
这里的参数时被装饰函数的参数
@wraps(func): 这里可以使用wraps用来标识装饰器,并且接收func函数名。不写也可以
@wraps接受一个函数来进行装饰,并加入了复制函数名称、注释文档、参数列表等等的功能。这可以让我们在装饰器里面访问在装饰之前的函数的属性。
:param args: (形参)(元组)用于接收被装饰函数的所有非关键字参数
:param kwargs: (形参)(字典)用于接收被装饰函数的所有关键字参数
:return:
"""
"""使用循环的方式对被装饰函数进行重试"""
retries = 0
exce_type = 0
while retries < max_retries:
try:
"""
args: (实参)将通过wrapper函数接收到的被装饰函数的非关键字参数的参数集合(元组),使用*进行展开,将其中所有元素单独传递给被装饰函数
kwargs: (实参)将通过wrapper函数接收到的被装饰函数的关键字参数的参数集合(字典),使用**进行展开,将其中所有元素(键:值对)单独传递给被装饰函数
1、将被装饰函数所有参数传递给被装饰函数
2、执行被装饰函数
3、返回被装饰函数的return值(被装饰函数的返回值需要单独返回,要不然无法被引用)
注意点:
如果func执行没有异常,就会直接执行return语句将func结果返回,那就不会再进行循环了。而不会在func正常的情况下还将func循环执行三次
"""
return func(*args, **kwargs)
except BaseException as e:
"""重试次数+1"""
retries += 1
"""最后失败时,将失败原因进行保存,进行输出!"""
exce_type = e if retries == max_retries else None
"""输出失败日志"""
print(f"执行 {func.__name__} 失败,正在进行第 {retries} 次重试!")
"""失败重试,等待时间"""
time.sleep(timeout)
"""
最终失败后将异常抛出,并且将失败信息进行提示!
异常抛出的时候使用之前存储好的异常遍历,获取异常类型,准确的进行异常信息输出
"""
raise exce_type.__class__(f"执行 {func.__name__} 失败,已达到最大重试次数,最终的失败原因是 {exce_type}!")
"""函数式编程:将被装饰器装饰后的的函数名返回"""
return wrapper
"""函数式编程:将装饰器函数的函数名返回"""
return decorator
@retry(max_retries=3, timeout=1)
def retry_test():
"""
@retry: 使用装饰器retry对函数进行装饰
max_retries: 传递给装饰器的参数
timeout: 传递给装饰器的参数
:return:
"""
print("retry测试")
raise(IOError, "主动抛出异常,用于装饰器测试!")
return 1
@retry(max_retries=10, timeout=3)
def sum_num(x, y):
a = x + y
# raise (IOError, "主动抛出异常,用于装饰器测试!")
return a
"""
递归练习
x! x的阶乘的实现
"""
def factorial(x):
if x == 1:
return 1
else:
return x * factorial(x - 1)
"""
递归练习
"""
def recursionLearn():
print(factorial(10))
if __name__ == "__main__":
# print("迭代器练习")
# mapLearn()
# print("生成器练习")
# yieldLearn()
# print("函数式编程高阶函数-map练习")
# mapLearn()
# print("函数式编程高阶函数-filter练习")
# filterLearn()
# print("函数式编程高阶函数-reduce练习")
# reduceLearn()
# print("函数练习")
# hiFunTest()
# print("装饰器练习")
# decoratorLearn()
print("内置装饰器练习")
ic = IterClass()
ic.score = 10
print(ic.score)
print(ic.score1)
ic.score = 100
print(ic.score)
print(ic.score1)
point_func()
# print("递归练习")
# recursionLearn()
|
renxiaowei-1991/pythonLearn
|
a01PythonLearn/package/b01PythonLearn/c23ClassHighLearn.py
|
c23ClassHighLearn.py
|
py
| 24,298 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "math.sqrt",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 593,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 646,
"usage_type": "name"
},
{
"api_name": "functools.wraps",
"line_number": 673,
"usage_type": "call"
}
] |
20972559660
|
import torch
from tools.image import transforms, cv
from tools.image.index_map import default_map
from tools import tensor
def to_rgb(hex):
return ((hex >> 16) & 255, (hex >> 8) & 255, (hex >> 0) & 255)
def draw_box(image, box, scale=1.0, name=None, confidence=None, thickness=2, color=(255, 0, 0), text_color=None):
text_color = text_color or color
image = cv.rectangle(image, box[:2], box[2:], color=color, thickness=int(thickness * scale))
if not (name is None):
image = cv.putText(image, name, (box[0], box[1] + int(8 * scale)), scale = 0.7 * scale, color=text_color, thickness=int(1*scale))
if not (confidence is None):
str = "{:.2f}".format(confidence)
image = cv.putText(image, str, (box[0], box[3] - 2), scale = 0.7 * scale, color=text_color, thickness=int(1*scale))
return image
def overlay(eval, mode='target', threshold = 0.5, scale=1.0, classes=None):
image = eval.image.clone()
def overlay_prediction():
for detection in eval.detections._sequence():
if detection.confidence < threshold:
break
label_class = classes[detection.label]
draw_box(image, detection.bbox, scale=scale, confidence=detection.confidence, name=label_class.name, color=to_rgb(label_class.colour))
def overlay_target():
for target in eval.target._sequence():
label_class = classes[target.label]
draw_box(image, target.bbox, scale=scale, name=label_class.name, color=to_rgb(label_class.colour))
def overlay_anchors():
overlay_target()
for anchor in eval.anchors:
label_class = classes[anchor.label]
draw_box(image, anchor.bbox, scale=scale, color=to_rgb(label_class.colour), thickness=1)
def overlay_matches():
unmatched = dict(enumerate(eval.target._sequence()))
# print(unmatched)
for m in eval.matches:
if m.confidence < threshold: break
if m.match is not None:
k, _ = m.match
del unmatched[k]
for (i, target) in enumerate(eval.target._sequence()):
label_class = classes[target.label]
color = (255, 0, 0) if i in unmatched else (0, 255, 0)
draw_box(image, target.bbox, scale=scale, name=label_class.name, color=color)
for m in eval.matches:
if m.confidence < threshold: break
color = (255, 0, 0)
if m.match is not None:
color = (0, 255, 0)
label_class = classes[m.label]
draw_box(image, m.bbox, scale=scale, color=color, confidence=m.confidence, name=label_class.name, thickness=1)
target = {
'matches' : overlay_matches,
'prediction' : overlay_prediction,
'target' : overlay_target
}
if mode in target:
target[mode]()
if eval.debug is not None:
image = cv.blend_over(image, eval.debug)
cv.putText(image, eval.id, (0, int(24 * scale)), scale = 2*scale, color=(64, 64, 192), thickness=int(2*scale))
cv.putText(image, "[email protected] " + str(eval.mAP), (0, int(48 * scale)), scale = 2*scale, color=(64, 64, 192), thickness=int(2*scale))
return image
def overlay_batch(batch, mode='target', scale=1.0, threshold = 0.5, cols=6, classes=None):
images = []
for eval in batch:
images.append(overlay(eval, scale=scale, mode=mode, threshold=threshold, classes=classes))
return tensor.tile_batch(torch.stack(images, 0), cols)
|
oliver-batchelor/detection
|
detection/display.py
|
display.py
|
py
| 3,548 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tools.image.cv.rectangle",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tools.image.cv",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "tools.image.cv.putText",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tools.image.cv",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "tools.image.cv.putText",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tools.image.cv",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "tools.image.cv.blend_over",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "tools.image.cv",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "tools.image.cv.putText",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "tools.image.cv",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "tools.image.cv.putText",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "tools.image.cv",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "tools.tensor.tile_batch",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "tools.tensor",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "torch.stack",
"line_number": 112,
"usage_type": "call"
}
] |
14242354466
|
#基类,将原生的方法都封装一遍,让继承的类去调用
import csv
import logging
import os
from time import sleep, strftime
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from baseView.desired_caps import appium_desired
class BaseView(object):
def __init__(self,driver): #初始化方法
self.driver = driver
def find_element(self,*loc): #传入可变参数,元素定位的方法
return self.driver.find_element(*loc)
def find_elements(self,*loc): #传入可变参数,元素定位的方法
return self.driver.find_element(*loc)
def get_window_size(self): #获取屏幕大小的方法
return self.driver.get_window_size()
def swipe(self,start_x, start_y, end_x, end_y, duration=None): #滑动屏幕的方法
return self.driver.swipe(start_x, start_y, end_x, end_y, duration=None)
# 获取屏幕大小方法
def get_size(self):
x = self.get_window_size()['width']
y = self.get_window_size()['height']
return x, y
loginBtn = (By.ID, 'com.chan.iamabuyer:id/btn_login') # 登录按钮元素
jurBtn = (By.ID, 'com.android.packageinstaller:id/permission_allow_button') # 获取权限按钮元素
#定义一个根据坐标相对定位点击相册照片的方法
def photoSalbum(self):
logging.info('调用photosalbum,进行屏幕相对定位点击')
l = self.get_size()
self.driver.tap([((l[0] * 0.5), (l[1] * 0.14))], 500)
sleep(2)
self.driver.tap([((l[0] * 0.1), (l[1] * 0.14))], 500)
# 从引导页右往左滑动方法
# @classmethod
def swipeLeft(self):
logging.info('调用swipeLeft滑动引导页')
l = self.get_size() # 调用方法获取屏幕大小
x1 = int(l[0] * 0.9) # x1点在最右边
x2 = int(l[0] * 0.1) # x2点在最左边
y1 = int(l[1] * 0.5) # Y1点
self.swipe(x1, y1, x2, y1, 1000) # 从右边划向最左边,y轴不变,持续1秒(1000毫秒)
#删除滑动
def addressLeft(self):
logging.info('调用addressLeft滑动地址管理')
l = self.get_size() # 调用方法获取屏幕大小
x1 = l[0] * 0.7 # x1点在最右边
x2 = l[0] * 0.4 # x2点在最左边
y1 = l[1] * 0.12 # Y1点
self.swipe(x1, y1, x2, y1, 1000) # 从右边划向最左边,y轴不变,持续1秒(1000毫秒)
# 定义一个接受当前时间的方法,用来做命名
def getTime(self):
# 接收以时间元组,并返回以可读字符串表示的当地时间
self.now = strftime("%Y-%m-%d %H_%M_%S")
return self.now
# 定义截屏的方法,已传过来的module参数加上日期命名
def getScreenShot(self, module):
time = self.getTime()
image_file = os.path.dirname(os.path.dirname(__file__)) + '/screenshots/%s_%s.png' % (module, time)
logging.info('获取 %s 截图' % module)
# get_screenshot_as_file(image_file)是webdriver中的截图方法
self.driver.get_screenshot_as_file(image_file)
# 拿account.csv的账户密码
def get_csv_data(self, csv_file, line): # 传过来的是文件地址和账户位置
#logging.info('调用csv文件')
with open(csv_file, 'r', encoding='utf-8-sig') as file:
reader = csv.reader(file) # 前面的reader是自定义变量,后面的是方法,读取问文件
for i, row in enumerate(reader, 1): # 1为设置初始下标为1
if i == line:
return row # 返回找到的位置的账号密码内容
# 定义一个方法,启动页面获取权限,如果没有找到元素就报异常打印没有获取到元素,否则进行点击上
def check_jurisdiction(self):
# 因为有写手机打开会有3秒的广告停留,有些手机时间长一点没有写手机时间短一点,所以设置一个隐式等待,在5秒时间内不断扫描元素,全局。
self.driver.implicitly_wait(5)
try:
# self.driver.implicitly_wait(5)
self.driver.find_element(*self.loginBtn) # 登录按钮元素
except NoSuchElementException:
try:
logging.info("第一次启动app")
# 获取权限按钮元素
ok = self.driver.find_element(*self.jurBtn)
except NoSuchElementException:
# sleep(3) 3秒广告
logging.info("无需手动获取权限,直接滑动引导页")
for i in range(0, 3):
self.swipeLeft()
logging.info('第%d次调用向左滑动', i)
# sleep(0.5)
else:
for i in range(0, 2):
ok.click()
sleep(0.5)
# 第一次启动引导页
# sleep(3) # 3秒广告
for i in range(0, 3):
self.swipeLeft()
logging.info('第%d次调用向左滑动', i)
sleep(0.5)
else:
logging.info("获取到登录按钮元素,不是第一次启动")
# 浏览器前进操作
if __name__=='__main__':
driver=appium_desired()
ba = BaseView(driver)
ba.check_jurisdiction() #然后调用Commom类中的方法
#file = '../data/account.csv'
#ba.get_csv_data(file,1)
|
XiaoChang97/maishou_testProject
|
baseView/baseView.py
|
baseView.py
|
py
| 5,558 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "selenium.common.exceptions.NoSuchElementException",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "selenium.common.exceptions.NoSuchElementException",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "baseView.desired_caps.appium_desired",
"line_number": 119,
"usage_type": "call"
}
] |
32381382238
|
import string
import os
import random
import bs4
from urllib import request
url = "http://reddit.com"
urlPage = request.urlopen(url).read()
soup = bs4.BeautifulSoup(urlPage, "html.parser")
img_list = []
for img in soup.find_all('img'):
img_item = str(img.get('src')).split('//')
img_list.append(img_item[1])
#print(img_list)
r = 8
all_chars = string.ascii_letters
# replace this folder_address variable to the directory which you wish to save
# the image files on your computer
folder_address = "C:\\Users\\User\\Documents\\Codes\\Python Scripts\\Reddit\\Pics\\"
for item in img_list:
request.urlretrieve("http://" + item,
+ ''.join(random.choice(all_chars) for x in range(0, r)) + ".jpg")
print("finished downloading images")
|
techreign/Webscraping
|
ImageScraper.py
|
ImageScraper.py
|
py
| 739 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "urllib.request.urlopen",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "string.ascii_letters",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.urlretrieve",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 26,
"usage_type": "call"
}
] |
10502966682
|
from rest_framework.test import APITestCase
from ...services.notification_service import NotificationService
from django.urls import reverse
class TestNotificationAPI(APITestCase):
def setUp(self):
self.payload = {
"title": "Winter discount sale started",
"description": "Enter coupon-code to get flat 10% discount"
}
self.notification_id = NotificationService().create(self.payload).id
self.url = reverse("notification")
def test_create_notification(self):
data = {
"title": "Summer discount sale started",
"description": "Enter coupon-code to get flat 10% discount"
}
notification = self.client.post(self.url, data=data, format='json')
self.assertEqual(notification.data.get('title'), data.get('title'))
def test_get_all_notification(self):
notification = self.client.get(self.url)
self.assertNotEqual(len(notification.data), 0)
def test_get_notification_by_id(self):
notification = NotificationService().get_by_id(self.notification_id)
self.assertEqual(notification.id, self.notification_id)
def test_delete_notification_by_id(self):
response = self.client.delete(self.url+"?id={}".format(self.notification_id), data={}, format='json')
self.assertEqual(response.data.get('success'), True)
|
anojkr/onboarding-project
|
push_notification/apps/notification/tests/unit/test_notification.py
|
test_notification.py
|
py
| 1,381 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rest_framework.test.APITestCase",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "services.notification_service.NotificationService",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "services.notification_service.NotificationService",
"line_number": 29,
"usage_type": "call"
}
] |
43447073770
|
import sys, re
from argparse import ArgumentParser
parser = ArgumentParser(description = 'Calculate the percentage of each nucleotide in the sequence')
parser.add_argument("-s", "--seq", type = str, required = True, help = "Input sequence")
args = parser.parse_args()
seq = seq.upper()
if re.search('^[ACGTU]+$', args.seq):
u= seq.count("U") #this counts the U Percentage
a= seq.count("A") #this counts the A Percentage
c= seq.count("C") #this counts the C Percentage
t= seq.count("T") #this counts the T Percentage
g= seq.count("G") #this counts the G Percentage
u_content= (u/len(seq)) * 100 #Calculate the U percentage
a_content= (a/len(seq)) * 100 #Calculate the A percentage
c_content= (c/len(seq)) * 100 #Calculate the C percentage
t_content= (t/len(seq)) * 100 #Calculate the T percentage
g_content= (g/len(seq)) * 100 #Calculate the G percentage
print (f"The % of U in sequence is: {u_content}") #Print the U Percentage
print (f"The % of A in sequence is: {a_content}") #Print the A Percentage
print (f"The % of C in sequence is: {c_content}") #Print the C Percentage
print (f"The % of T in sequence is: {t_content}") #Print the T Percentage
print (f"The % of G in sequence is: {g_content}") #Print the G Percentage
else:
print ('The sequence is not DNA or RNA')
|
stepsnap/git_HandsOn
|
Percentage.py
|
Percentage.py
|
py
| 1,350 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 9,
"usage_type": "call"
}
] |
27520740533
|
import os
import datetime
import netCDF4
def handle_netcdf_scheme_wrf(var_names, file_name, run, model, st):
# try:
if os.path.exists(file_name):
data_list = []
file = netCDF4.Dataset(file_name)
var1 = file.variables[var_names[0]][:]
var2 = file.variables[var_names[1]][:]
times = file.variables["Times"][:].astype("str")
var_latlon = (file.variables["XLAT"][0, :, :],
file.variables["XLONG"][0, :, :])
file.close()
# file de log
log = open(st["ensemble_log_directory"][0] +
"Loaded_%s_%s.log" %
(model, run.strftime("%Y%m%d%H")), 'w')
for t in range(times.shape[0]):
var_date = datetime.datetime.strptime(
"".join(list(times[t])), "%Y-%m-%d_%H:00:00")
if var_date <= (st['run'][0] +
datetime.
timedelta(hours=int(st['hours_ensemble'][0]))):
var_value = var1[t, :, :] + var2[t, :, :]
accumulation = t
if t == 0:
data_list.append((model,
run,
var_date,
(var_value, accumulation),
"latlon", var_latlon))
else:
data_list.append((model,
run,
var_date,
(var_value, accumulation)))
log.write("run: %s --- Data: %s --- file: %s \n" %
(run.strftime("%Y%m%d%H"),
var_date.strftime("%Y%m%d%H"), file_name))
log.close()
return data_list
# except:
# return None
|
RevertonLuis/Ensemble
|
lib/handle_netcdfs.py
|
handle_netcdfs.py
|
py
| 2,022 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 33,
"usage_type": "call"
}
] |
200126759
|
import torch
import torchvision.transforms as transforms
from PIL import Image
from model import LeNet
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def main():
transform = transforms.Compose(
[transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
net = LeNet()
net = net.to(device)
net.load_state_dict(torch.load('Lenet.pth'))
im = Image.open(r'data/plane1.jpg')
im = transform(im) # [C, H, W]
im = torch.unsqueeze(im, dim=0).to(device) # [N, C, H, W]
with torch.no_grad():
outputs = net(im)
predict = torch.max(outputs, dim=1)[1].cpu().numpy()
print(classes[int(predict)])
if __name__ == '__main__':
main()
|
ChengZhangX/Deeplearning-for-cv
|
LetNet model/predict.py
|
predict.py
|
py
| 893 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.device",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "model.LeNet",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "torch.unsqueeze",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 29,
"usage_type": "call"
}
] |
20164701509
|
import cv2
import mediapipe as mp
from handLandmarksDefine import *
from objectCoords import *
import time
import numpy as np
import math
import serial
start_time = 0
end_time = 0
try:
arduino = serial.Serial('COM3', 9600)
except serial.serialutil.SerialException:
print("Arduino not connected")
# lm = landmark
class HandLandmarkDetector:
def __init__(self, static_image_mode, max_num_hands, model_complexity, min_detection_confidence,
min_tracking_confidence):
self.static_image_mode = static_image_mode
self.max_num_hands = max_num_hands
self.model_complexity = model_complexity
self.min_detection_confidence = min_detection_confidence
self.min_tracking_confidence = min_tracking_confidence
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.static_image_mode, self.max_num_hands, self.model_complexity,
self.min_detection_confidence, self.min_tracking_confidence)
self.mpDraw = mp.solutions.drawing_utils
self.results = None
def draw_hand_landmarks(self, img):
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(img_rgb)
if self.results.multi_hand_landmarks:
for self.handLMS in self.results.multi_hand_landmarks:
self.mpDraw.draw_landmarks(img, self.handLMS, self.mpHands.HAND_CONNECTIONS)
return img
def find_hand_landmark_coordinates(self, img):
landmark_list = []
if self.results.multi_hand_landmarks:
for id, lm in enumerate(self.handLMS.landmark):
print(id,lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
landmark_list.append([id, cx, cy])
cv2.circle(img, (cx, cy), 7, (255, 255, 255), cv2.FILLED)
return landmark_list
class HandTracker:
def __init__(self, width, height, top_left_x, top_left_y, color, thickness):
self.width = width
self.height = height
self.top_left_x = top_left_x
self.top_left_y = top_left_y
self.color = color
self.thickness = thickness
self.object_assembled = False
def reset_sizes(self, width, height, top_left_x, top_left_y, color, thickness):
self.width = width
self.height = height
self.top_left_x = top_left_x
self.top_left_y = top_left_y
self.color = color
self.thickness = thickness
def draw(self, img):
cv2.rectangle(img, (self.top_left_x, self.top_left_y), (self.top_left_x + self.width, self.top_left_y +
self.height), self.color, self.thickness)
def set_green_color(self):
self.color = (0, 255, 0)
self.object_assembled = True
def set_red_color(self):
self.color = (0, 0, 255)
self.object_assembled = False
def set_object_assembled_false(self):
self.object_assembled = False
def set_object_assembled_true(self):
self.object_assembled = True
def detect_hand_inside_area(self, landmark_list):
global start_time
global end_time
if len(landmark_list) != 0 and self.object_assembled is False:
if self.top_left_x <= landmark_list[INDEX_FINGER_TIP][1] <= self.top_left_x + self.width and self.top_left_y <= landmark_list[INDEX_FINGER_TIP][2] <= self.top_left_y + self.height\
and self.top_left_x <= landmark_list[THUMB_TIP][1] <= self.top_left_x + self.width and self.top_left_y <= landmark_list[THUMB_TIP][2] <= self.top_left_y + self.height:
self.color = (0, 255, 0)
if start_time == 0 and end_time == 0:
start_time = time.time()
end_time = start_time + 3
# print("end time: ")
# print(end_time)
# print("current time: ")
# print(time.time())
else:
if time.time() > end_time:
start_time = 0
end_time = 0
return 1
else:
self.color = (0, 0, 255)
def gesture_control(self, landmark_list, resized_frame, arduino):
global start_time
global end_time
if len(landmark_list) != 0:
x1, y1 = landmark_list[THUMB_TIP][1], landmark_list[THUMB_TIP][2]
x2, y2 = landmark_list[INDEX_FINGER_TIP][1], landmark_list[INDEX_FINGER_TIP][2]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
cv2.circle(resized_frame, (x1, y1), 15, (255, 0, 255), cv2.FILLED)
cv2.circle(resized_frame, (x2, y2), 15, (255, 0, 255), cv2.FILLED)
cv2.line(resized_frame, (x1, y1), (x2, y2), (255, 0, 255), 3)
cv2.circle(resized_frame, (cx, cy), 15, (255, 0, 255), cv2.FILLED)
length = math.hypot(x2 - x1, y2 - y1)
vol = np.interp(length, [70, 250], [0, 250])
if start_time == 0 and end_time == 0:
start_time = time.time()
end_time = start_time + 1
# print("end time: ")
# print(end_time)
# print("current time: ")
# print(time.time())
else:
if time.time() > end_time:
start_time = 0
end_time = 0
arduino.write(str(vol).encode())
class ObjectAssembler:
def draw_work_area(self, resized_frame, work_area_top_left, work_area_bottom_right, work_area_color,
work_area_thickness, text_content, text_font, text_font_scale, text_color, text_thickness):
cv2.rectangle(resized_frame, work_area_top_left, work_area_bottom_right, work_area_color, work_area_thickness)
text_x = work_area_top_left[0] + 5
text_y = work_area_top_left[1] + 30
cv2.putText(resized_frame, text_content, (text_x, text_y), text_font, text_font_scale, text_color,
text_thickness)
def draw_component_area(self, resized_frame,component_area_top_left, component_area_bottom_right,
component_area_color, component_area_thickness, text_content, text_font, text_font_scale,
text_color, text_thickness):
cv2.rectangle(resized_frame, component_area_top_left, component_area_bottom_right, component_area_color,
component_area_thickness)
text_x = component_area_top_left[0] + 5
text_y = component_area_top_left[1] + 30
cv2.putText(resized_frame, text_content, (text_x, text_y), text_font, text_font_scale, text_color,
text_thickness)
def draw_breadboard_outline(self, resized_frame, breadboard_top_left, breadboard_bottom_right,
breadboard_outline_color, breadboard_outline_thickness):
cv2.rectangle(resized_frame, breadboard_top_left, breadboard_bottom_right, breadboard_outline_color,
breadboard_outline_thickness)
def draw_next_component(self, resized_frame, next_top_left, next_bottom_right, next_color, next_thickness,
text_content, text_font, text_font_scale, text_color, text_thickness):
cv2.rectangle(resized_frame, next_top_left, next_bottom_right, next_color, next_thickness)
text_x = next_top_left[0] + 40
text_y = next_top_left[1] + 500
cv2.putText(resized_frame, text_content, (text_x, text_y), text_font, text_font_scale, text_color,
text_thickness)
def draw_previous_component(self, resized_frame, previous_top_left, previous_bottom_right, previous_color,
previous_thickness, text_content, text_font, text_font_scale, text_color,
text_thickness):
cv2.rectangle(resized_frame, previous_top_left, previous_bottom_right, previous_color, previous_thickness)
text_x = previous_top_left[0] + 10
text_y = previous_top_left[1] + 500
cv2.putText(resized_frame, text_content, (text_x, text_y), text_font, text_font_scale, text_color,
text_thickness)
def detect_finger_inside_next_component(self, landmark_list, next_top_left, next_bottom_right):
global start_time
global end_time
if len(landmark_list) != 0:
if next_top_left[0] <= landmark_list[PINKY_TIP][1] <= next_bottom_right[0] and \
next_top_left[1] <= landmark_list[PINKY_TIP][2] <= next_bottom_right[1]:
if start_time == 0 and end_time == 0:
start_time = time.time()
end_time = start_time + 3
else:
if time.time() > end_time:
start_time = 0
end_time = 0
return 1
def detect_finger_inside_previous_component(self, landmark_list, previous_top_left, previous_bottom_right):
global start_time
global end_time
if len(landmark_list) != 0:
if previous_top_left[0] <= landmark_list[THUMB_TIP][1] <= previous_bottom_right[0] and \
previous_top_left[1] <= landmark_list[THUMB_TIP][2] <= previous_bottom_right[1]:
if start_time == 0 and end_time == 0:
start_time = time.time()
end_time = start_time + 3
else:
if time.time() > end_time:
start_time = 0
end_time = 0
return 1
def resize_window(img, max_width, max_height):
original_height, original_width, _ = img.shape
scale_x = max_width / original_width
scale_y = max_height / original_height
scale = min(scale_x, scale_y)
new_width = int(original_width * scale)
new_height = int(original_height * scale)
return new_width, new_height
def main():
cap = cv2.VideoCapture(0)
success, img = cap.read()
new_width, new_height = resize_window(img, max_width=1680, max_height=1050)
cv2.namedWindow('Scaled Video', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Scaled Video', new_width, new_height)
cv2.setWindowProperty('Scaled Video', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
hand_detector = HandLandmarkDetector(static_image_mode=False, max_num_hands=1, model_complexity=1,
min_detection_confidence=0.1, min_tracking_confidence=0.1)
i = 0
gestureControl = False
rectangle_width = [ARDUINO_WIDTH, LED_WIDTH, LED_BAR_WIDTH]
rectangle_height = [ARDUINO_HEIGHT, LED_HEIGHT, LED_BAR_HEIGHT]
rectangle_top_left_x = [ARDUINO_X, LED_X, LED_BAR_X]
rectangle_top_left_y = [ARDUINO_Y, LED_Y, LED_BAR_Y]
rectangle_color = (0, 0, 255)
rectangle_thickness = 2
hand_tracker = HandTracker(rectangle_width[0], rectangle_height[0], rectangle_top_left_x[0],
rectangle_top_left_y[0], rectangle_color, rectangle_thickness)
component_name = ["Arduino", "LED to PIN D9", "LED bar", "LED dimming"]
object_assembler = ObjectAssembler()
while True:
success, img = cap.read()
resized_frame = cv2.resize(img, (new_width, new_height))
hand_tracker.draw(resized_frame)
resized_frame = hand_detector.draw_hand_landmarks(resized_frame)
landmark_list = hand_detector.find_hand_landmark_coordinates(resized_frame)
if hand_tracker.detect_hand_inside_area(landmark_list) == 1:
hand_tracker.set_green_color()
hand_tracker.set_object_assembled_true()
object_assembler.draw_previous_component(resized_frame=resized_frame, previous_top_left=(1, 1),
previous_bottom_right=(146, 1061), previous_color=(0, 0, 255),
previous_thickness = 20, text_content="Previous",
text_font=cv2.FONT_HERSHEY_SIMPLEX, text_font_scale=1.0,
text_color=(0, 0, 255), text_thickness=2)
object_assembler.draw_next_component(resized_frame=resized_frame, next_top_left=(1250, 1),
next_bottom_right=(1395, 1061), next_color=(0, 255, 0), next_thickness=20,
text_content="Next", text_font=cv2.FONT_HERSHEY_SIMPLEX,
text_font_scale=1.0, text_color=(0, 255, 0), text_thickness=2)
object_assembler.draw_work_area(resized_frame=resized_frame, work_area_top_left=(145, 300),
work_area_bottom_right=(1245, 1050), work_area_color=(255, 255, 255),
work_area_thickness=3, text_content="Work Area",
text_font=cv2.FONT_HERSHEY_SIMPLEX, text_font_scale=1.0,
text_color=(255, 255, 255), text_thickness=2)
object_assembler.draw_breadboard_outline(resized_frame=resized_frame, breadboard_top_left=(550, 400),
breadboard_bottom_right=(850, 850),
breadboard_outline_color=(0, 0, 255), breadboard_outline_thickness=2)
object_assembler.draw_component_area(resized_frame=resized_frame, component_area_top_left=(145, 1),
component_area_bottom_right=(1245, 295),
component_area_color=(255, 255, 255), component_area_thickness=2,
text_content="Component Area", text_font=cv2.FONT_HERSHEY_SIMPLEX,
text_font_scale=1.0, text_color=(255, 255, 255), text_thickness=2)
if object_assembler.detect_finger_inside_next_component(landmark_list, next_top_left=(1250, 1),
next_bottom_right=(1395, 1061)) == 1:
hand_tracker.set_object_assembled_false()
if i < 2:
i = i + 1
hand_tracker.reset_sizes(rectangle_width[i], rectangle_height[i], rectangle_top_left_x[i],
rectangle_top_left_y[i], rectangle_color, rectangle_thickness)
else:
gestureControl = True
if gestureControl == True:
i=3
try:
hand_tracker.gesture_control(landmark_list, resized_frame, arduino)
except UnboundLocalError:
cv2.putText(resized_frame, "Connect the Arduino and restart!", (170, 400), cv2.FONT_HERSHEY_SIMPLEX,
2.0, (0, 0, 255), 6)
except serial.serialutil.SerialException:
cv2.putText(resized_frame, "Connect the Arduino and restart!", (170, 400), cv2.FONT_HERSHEY_SIMPLEX,
2.0, (0, 0, 255), 6)
except NameError:
cv2.putText(resized_frame, "Connect the Arduino and restart!", (170, 400), cv2.FONT_HERSHEY_SIMPLEX,
2.0, (0, 0, 255), 6)
if object_assembler.detect_finger_inside_previous_component(landmark_list, previous_top_left=(1, 1),
previous_bottom_right=(146, 1061)) == 1:
hand_tracker.set_object_assembled_false()
gestureControl = False
if i == 0:
i = 2
else:
i = i - 1
hand_tracker.reset_sizes(rectangle_width[i], rectangle_height[i], rectangle_top_left_x[i],
rectangle_top_left_y[i], rectangle_color, rectangle_thickness)
if hand_tracker.object_assembled == True:
cv2.putText(resized_frame, component_name[i], (650, 280), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 255, 0), 4)
else: cv2.putText(resized_frame, component_name[i], (650, 280), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 0, 255), 4)
cv2.imshow('Scaled Video', resized_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == "__main__":
main()
# counter = 1
# a = 0
# b = 1
# print(a)
# print(b)
#
# while counter < 10:
# c = a+b
# a = b
# b = c
# print(c)
# counter = counter + 1
|
vladpasat/HandFlow0
|
handTrackingModule.py
|
handTrackingModule.py
|
py
| 16,607 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "serial.Serial",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "serial.serialutil",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "mediapipe.solutions",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "mediapipe.solutions",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "cv2.circle",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "cv2.FILLED",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "cv2.rectangle",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "cv2.FILLED",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "cv2.circle",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "cv2.FILLED",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "cv2.line",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "cv2.FILLED",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "math.hypot",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "numpy.interp",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "cv2.WINDOW_NORMAL",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "cv2.resizeWindow",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "cv2.setWindowProperty",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "cv2.WND_PROP_FULLSCREEN",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "cv2.WINDOW_FULLSCREEN",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 265,
"usage_type": "attribute"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "serial.serialutil",
"line_number": 306,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 307,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 325,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 326,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 329,
"usage_type": "call"
}
] |
43243820127
|
from PyQt5.QtCore import QThread, pyqtSignal
from simplekml import Kml, Snippet, Types
from math import radians, cos, sin, asin, degrees, atan2
import os
class PlotFiles(QThread):
progressSignal = pyqtSignal(int)
threadMessage = pyqtSignal(str)
def __init__(self, results, issilist, google, gps):
QThread.__init__(self)
self.gps = gps
self.results = results
self.issilist = issilist
self.google = google
self.maxRange = len(self.issilist)
self.stopped = 0
def __del__(self):
self.wait()
def plot_the_files(self, results, issi, google, gps, firstplot):
"""
Receives the results and an issi's to plot
:param firstplot:
:param gps:
:param google:
:param results:
:param issi:
:return:
"""
when = []
coord = []
speeds = []
headings = []
times = []
year = results[issi][0][1][6:10]
month = results[issi][0][1][3:5]
day = results[issi][0][1][0:2]
kml = Kml(name="{}_{}-{}-{}".format(issi, year, month, day), open=1)
doc = kml.newdocument(name="{}".format(issi),
snippet=Snippet('Created {}-{}-{}'.format(year, month, day)))
for x in range(0, len(results[issi])):
tup = (results[issi][x][3], results[issi][x][2])
theTime = results[issi][x][1][11:]
when.append("{}-{}-{}T{}Z".format(year, month, day, theTime))
coord.append(tup)
speeds.append(int(results[issi][x][4]))
headings.append(int(results[issi][x][5]))
times.append(results[issi][x][1])
# Create circle track
if gps[0] != 0 and firstplot:
R = 6378.1
d = float(gps[2]) # distance
circle_coords = []
lat1 = radians(float(gps[0]))
lon1 = radians(float(gps[1]))
for b in range(1, 360):
brng = radians(b)
lat2 = asin(sin(lat1) * cos(d / R) + cos(lat1) * sin(d / R) * cos(brng))
lon2 = lon1 + atan2(sin(brng) * sin(d / R) * cos(lat1), cos(d / R) - sin(lat1) * sin(lat2))
lat2 = degrees(lat2)
lon2 = degrees(lon2)
circle_coords.append((lon2, lat2))
doc2 = kml.newdocument(name="Search Area",
snippet=Snippet('{}-{}-{}'.format(gps[0], gps[1], gps[2])))
fol2 = doc2.newfolder(name='Search Area')
trk2 = fol2.newgxtrack(name='search area')
trk2.newgxcoord(circle_coords)
trk2.stylemap.normalstyle.linestyle.color = '641400FF'
trk2.stylemap.normalstyle.linestyle.width = 6
# Folder
fol = doc.newfolder(name='Tracks')
# schema for extra data
schema = kml.newschema()
schema.newgxsimplearrayfield(name='speed', type=Types.int, displayname='Speed')
schema.newgxsimplearrayfield(name='heading', type=Types.int, displayname='Heading')
schema.newgxsimplearrayfield(name='time', type=Types.string, displayname='Time')
# New Track
trk = fol.newgxtrack(name=issi)
# Apply Schema
trk.extendeddata.schemadata.schemaurl = schema.id
# add all info to track
trk.newwhen(when)
trk.newgxcoord(coord)
trk.extendeddata.schemadata.newgxsimplearraydata('time', times)
trk.extendeddata.schemadata.newgxsimplearraydata('speed', speeds)
trk.extendeddata.schemadata.newgxsimplearraydata('heading', headings)
# Styling
trk.stylemap.normalstyle.iconstyle.icon.href = 'http://maps.google.com/mapfiles/kml/shapes/track.png'
trk.stylemap.normalstyle.linestyle.color = '99ffac59'
trk.stylemap.normalstyle.linestyle.width = 6
trk.stylemap.highlightstyle.iconstyle.icon.href = 'http://maps.google.com/mapfiles/kml/shapes/track.png'
trk.stylemap.highlightstyle.iconstyle.scale = 1.2
trk.stylemap.highlightstyle.linestyle.color = '99ffac59'
trk.stylemap.highlightstyle.linestyle.width = 8
kml.save("results/{}_{}-{}-{}.kml".format(issi, year, month, day))
if google:
try:
os.system("start " + "results/{}_{}-{}-{}.kml".format(issi, year, month, day))
except:
pass
def run(self):
firstplot = 1
maxPercent = len(self.issilist)
for i in range(len(self.issilist)):
if not self.stopped:
self.plot_the_files(self.results, self.issilist[i], self.google, self.gps, firstplot)
update = ((i + 1) / maxPercent) * 100
self.progressSignal.emit(update)
if firstplot:
self.sleep(4)
firstplot = 0
else:
break
self.threadMessage.emit('Plotting completed')
def stop(self):
self.stopped = 1
self.threadMessage.emit('Plotting stopped')
|
stephenmhall/Coordinator_parser
|
plotfile.py
|
plotfile.py
|
py
| 5,066 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread.__init__",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "simplekml.Kml",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "simplekml.Snippet",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "math.radians",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "math.radians",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "math.radians",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "math.asin",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "math.atan2",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "math.degrees",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "math.degrees",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "simplekml.Snippet",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "simplekml.Types.int",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "simplekml.Types",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "simplekml.Types.int",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "simplekml.Types",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "simplekml.Types.string",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "simplekml.Types",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "os.system",
"line_number": 117,
"usage_type": "call"
}
] |
26247964346
|
import six
import webob.exc
from oslo_log import log
from delfin.i18n import _
LOG = log.getLogger(__name__)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, exception):
self.code = exception.code
self.title = ''
self.explanation = exception.msg
self.error_code = exception.error_code
self.error_args = exception.error_args
super(ConvertedException, self).__init__()
class DelfinException(Exception):
"""Base Delfin Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the tuple arguments provided to the constructor.
"""
msg_fmt = _("An unknown exception occurred.")
code = 500
def __init__(self, *args, **kwargs):
self.error_args = args
message = kwargs.get('message')
try:
if not message:
message = self.msg_fmt.format(*args)
else:
message = six.text_type(message)
except Exception:
LOG.error("Failed to format message: {0}".format(args))
message = self.msg_fmt
self.msg = message
super(DelfinException, self).__init__(message)
@property
def error_code(self):
return self.__class__.__name__
class NotAuthorized(DelfinException):
msg_fmt = _("Not authorized.")
code = 403
class Invalid(DelfinException):
msg_fmt = _("Unacceptable parameters.")
code = 400
class BadRequest(Invalid):
msg_fmt = _('The server could not comply with the request since\r\n'
'it is either malformed or otherwise incorrect.\r\n')
code = 400
class MalformedRequestBody(Invalid):
msg_fmt = _("Malformed request body: {0}.")
class MalformedRequestUrl(Invalid):
msg_fmt = _("Malformed request url.")
class InvalidCredential(Invalid):
msg_fmt = _("The credentials are invalid.")
class InvalidResults(Invalid):
msg_fmt = _("The results are invalid. {0}")
class InvalidInput(Invalid):
msg_fmt = _("Invalid input received. {0}")
class InvalidName(Invalid):
msg_fmt = _("An invalid 'name' value was provided. {0}")
class InvalidContentType(Invalid):
msg_fmt = _("Invalid content type: {0}.")
class StorageSerialNumberMismatch(Invalid):
msg_fmt = _("Storage serial number mismatch. {0}")
class StorageAlreadyExists(Invalid):
msg_fmt = _("Storage already exists.")
class InvalidSNMPConfig(Invalid):
msg_fmt = _("Invalid SNMP configuration: {0}")
class NotFound(DelfinException):
msg_fmt = _("Resource could not be found.")
code = 404
class NoSuchAction(NotFound):
msg_fmt = _("There is no such action: {0}")
class AccessInfoNotFound(NotFound):
msg_fmt = _("Access information for storage {0} could not be found.")
class AlertSourceNotFound(NotFound):
msg_fmt = _("Alert source for storage {0} could not be found.")
class AlertSourceNotFoundWithHost(NotFound):
msg_fmt = _("Alert source could not be found with host {0}.")
class SNMPConnectionFailed(BadRequest):
msg_fmt = _("Connection to SNMP server failed: {0}")
class StorageNotFound(NotFound):
msg_fmt = _("Storage {0} could not be found.")
class StorageBackendNotFound(NotFound):
msg_fmt = _("Storage backend could not be found.")
class StoragePoolNotFound(NotFound):
msg_fmt = _("Storage pool {0} could not be found.")
class VolumeNotFound(NotFound):
msg_fmt = _("Volume {0} could not be found.")
class StorageHostInitiatorNotFound(NotFound):
msg_fmt = _("Storage host initiator {0} could not be found.")
class StorageHostNotFound(NotFound):
msg_fmt = _("Storage host {0} could not be found.")
class StorageHostGroupNotFound(NotFound):
msg_fmt = _("Storage host group {0} could not be found.")
class PortGroupNotFound(NotFound):
msg_fmt = _("Port group {0} could not be found.")
class VolumeGroupNotFound(NotFound):
msg_fmt = _("Volume group {0} could not be found.")
class MaskingViewNotFound(NotFound):
msg_fmt = _("Masking View {0} could not be found.")
class StorageHostGrpHostRelNotFound(NotFound):
msg_fmt = _("Storage Host Group Host Relation {0} could not be found.")
class PortGrpPortRelNotFound(NotFound):
msg_fmt = _("Port Group Port Relation {0} could not be found.")
class VolGrpVolRelationNotFound(NotFound):
msg_fmt = _("Volume Group Volume Relation {0} could not be found.")
class ControllerNotFound(NotFound):
msg_fmt = _("Controller {0} could not be found.")
class ControllerListNotFound(NotFound):
msg_fmt = _("Controller List for {0} could not be found.")
class PortNotFound(NotFound):
msg_fmt = _("Port {0} could not be found.")
class PortListNotFound(NotFound):
msg_fmt = _("Port List for {0} could not be found.")
class DiskNotFound(NotFound):
msg_fmt = _("Disk {0} could not be found.")
class FilesystemNotFound(NotFound):
msg_fmt = _("Filesystem {0} could not be found.")
class QtreeNotFound(NotFound):
msg_fmt = _("Qtree {0} could not be found.")
class QuotaNotFound(NotFound):
msg_fmt = _("Quota {0} could not be found.")
class ShareNotFound(NotFound):
msg_fmt = _("Share {0} could not be found.")
class StorageDriverNotFound(NotFound):
msg_fmt = _("Storage driver '{0}'could not be found.")
class TaskNotFound(NotFound):
msg_fmt = _("Task {0} could not be found.")
class FailedTaskNotFound(NotFound):
msg_fmt = _("Failed task {0} could not be found.")
class ConfigNotFound(NotFound):
msg_fmt = _("Could not find config at {0}.")
class PasteAppNotFound(NotFound):
msg_fmt = _("Could not load paste app '{0}' from {1}.")
class StorageBackendException(DelfinException):
msg_fmt = _("Exception from Storage Backend: {0}.")
class SSHException(DelfinException):
msg_fmt = _("Exception in SSH protocol negotiation or logic. {0}")
class SSHInjectionThreat(DelfinException):
msg_fmt = _("SSH command injection detected: {0}.")
# Tooz locking
class LockCreationFailed(DelfinException):
msg_fmt = _('Unable to create lock. Coordination backend not started.')
class LockAcquisitionFailed(DelfinException):
msg_fmt = _('Lock acquisition failed.')
class DuplicateExtension(DelfinException):
msg_fmt = _('Found duplicate extension: {0}.')
class ImproperIPVersion(DelfinException):
msg_fmt = _("Provided improper IP version {0}.")
class ConnectTimeout(DelfinException):
msg_fmt = _("Connect timeout.")
code = 500
class InvalidUsernameOrPassword(DelfinException):
msg_fmt = _("Invalid username or password.")
code = 400
class BadResponse(Invalid):
msg_fmt = _('Bad response from server')
code = 500
class InvalidPrivateKey(DelfinException):
msg_fmt = _("not a valid RSA private key.")
code = 400
class SSHConnectTimeout(DelfinException):
msg_fmt = _("SSH connect timeout.")
code = 500
class SSHNotFoundKnownHosts(NotFound):
msg_fmt = _("{0} not found in known_hosts.")
code = 400
class StorageClearAlertFailed(DelfinException):
msg_fmt = _("Failed to clear alert. Reason: {0}.")
class StorageListAlertFailed(DelfinException):
msg_fmt = _("Failed to list alerts. Reason: {0}.")
class HTTPConnectionTimeout(DelfinException):
msg_fmt = _("HTTP connection timeout: {0}.")
class InvalidCAPath(DelfinException):
msg_fmt = _("Invalid CA path: {0}.")
class StoragePerformanceCollectionFailed(DelfinException):
msg_fmt = _("Failed to collect performance metrics. Reason: {0}.")
class SSLCertificateFailed(Invalid):
msg_fmt = _("SSL Certificate Failed.")
code = 400
class SSLHandshakeFailed(Invalid):
msg_fmt = _("SSL handshake failure.")
class StorageIsSyncing(Invalid):
msg_fmt = _("Storage {0} is syncing now, please try again later.")
class InvalidIpOrPort(DelfinException):
msg_fmt = _("Invalid ip or port.")
code = 400
class InvalidStorageCapability(Invalid):
msg_fmt = _("Invalid capability response: {0}")
code = 500
class StorageCapabilityNotSupported(Invalid):
msg_fmt = _("Capability feature not supported by storage")
code = 501
class EmptyResourceMetrics(DelfinException):
msg_fmt = _("Empty resource metric in capabilities")
code = 501
class TelemetryTaskExecError(DelfinException):
msg_fmt = _("Failure in telemetry task execution")
class ComponentNotFound(NotFound):
msg_fmt = _("Component {0} could not be found.")
class IncompleteTrapInformation(DelfinException):
msg_fmt = _("Incomplete trap information."
"Storage {0} alert information needs to be synchronized.")
class StorageMaxUserCountException(DelfinException):
msg_fmt = _(
"Exception from storage of users has reached the upper limit: {0}.")
|
sodafoundation/delfin
|
delfin/exception.py
|
exception.py
|
py
| 8,827 |
python
|
en
|
code
| 201 |
github-code
|
6
|
[
{
"api_name": "oslo_log.log.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "oslo_log.log",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "webob.exc.exc",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "webob.exc",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "delfin.i18n._",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "six.text_type",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "delfin.i18n._",
"line_number": 366,
"usage_type": "call"
}
] |
6250184169
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 19 12:46:12 2018
@author: CTF Team
"""
from PyQt5 import uic,QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QSize, Qt
import CompanyTaxUI
import sys
import pandas as pd
import csv
import numpy as np
class ApplicationWindow(QtWidgets.QMainWindow):
def __init__(self):
super(ApplicationWindow, self).__init__()
self.ui = CompanyTaxUI.Ui_MainWindow()
self.ui.setupUi(self)
# Connects the calculate button in CompanyTaxUI to CompanyTaxSavingsApp.py
self.ui.calculate.clicked.connect(self.taxCalculate)
def taxCalculate(self):
# Gets the string input from company_netIncome
companySGDIncome = self.ui.company_netIncome.text()
# Checks if companySGDIncome is empty
if not companySGDIncome:
self.ui.list_top10.setColumnCount(1)
self.ui.list_top10.setHorizontalHeaderLabels(["Output"])
self.ui.list_top10.setRowCount(1)
self.ui.list_top10.horizontalHeaderItem(0).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeader().setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
self.ui.list_top10.setItem(0 , 0, QtWidgets.QTableWidgetItem("You have not inputted any SGD Net Income !"))
else:
# Gets the category input from list_companyindustry
selectedCategoryData = self.ui.list_companyindustry.currentText()
calCountriesTaxAmt = ApplicationWindow.taxComputation(companySGDIncome, selectedCategoryData)
# Gets the option 1 - 5 to indicate the option to generate the tax output
if self.ui.option1.isChecked():
# Filter countries that have 0% tax rates for the respective tax rates
# Looking at 0 index value for national + branch rate
filteredCountries1 = {k:v for k, v in calCountriesTaxAmt.items() if v[0] > 0}
minimumTaxCountry1 = min(filteredCountries1.items(), key = lambda x : x[1][0])
# Set ui list to the following parameters for the required output for option 5
self.ui.list_top10.setColumnCount(3)
self.ui.list_top10.setHorizontalHeaderLabels(["Country", "Tax Amount", "Tax Option #"])
self.ui.list_top10.setRowCount(1)
self.ui.list_top10.horizontalHeaderItem(0).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(1).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(2).setTextAlignment(Qt.AlignLeft)
# Setting output for option 1
self.ui.list_top10.setItem(0 , 0, QtWidgets.QTableWidgetItem(minimumTaxCountry1[0]))
value = '%.3f' % minimumTaxCountry1[1][0]
self.ui.list_top10.setItem(0 , 1, QtWidgets.QTableWidgetItem(value))
self.ui.list_top10.setItem(0 , 2, QtWidgets.QTableWidgetItem("Tax Option 1"))
elif self.ui.option2.isChecked():
# Looking at index 1 value for min tax
filteredCountries2 = {k:v for k, v in calCountriesTaxAmt.items() if v[1] > 0}
minimumTaxCountry2 = min(filteredCountries2.items(), key = lambda x: x[1][1])
self.ui.list_top10.setColumnCount(3)
self.ui.list_top10.setHorizontalHeaderLabels(["Country", "Tax Amount", "Tax Option #"])
self.ui.list_top10.setRowCount(1)
self.ui.list_top10.horizontalHeaderItem(0).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(1).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(2).setTextAlignment(Qt.AlignLeft)
# Setting output for option 2
self.ui.list_top10.setItem(0 , 0, QtWidgets.QTableWidgetItem(minimumTaxCountry2[0]))
value = '%.3f' % minimumTaxCountry2[1][1]
self.ui.list_top10.setItem(0 , 1, QtWidgets.QTableWidgetItem(value))
self.ui.list_top10.setItem(0 , 2, QtWidgets.QTableWidgetItem("Tax Option 2"))
elif self.ui.option3.isChecked():
# Looking at index 2 value for progressive tax
filteredCountries3 = {k:v for k, v in calCountriesTaxAmt.items() if v[2] > 0}
minimumTaxCountry3 = min(filteredCountries3.items(), key = lambda x: x[1][2])
self.ui.list_top10.setColumnCount(3)
self.ui.list_top10.setHorizontalHeaderLabels(["Country", "Tax Amount", "Tax Option #"])
self.ui.list_top10.setRowCount(1)
self.ui.list_top10.horizontalHeaderItem(0).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(1).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(2).setTextAlignment(Qt.AlignLeft)
# Setting output for option 3
self.ui.list_top10.setItem(0 , 0, QtWidgets.QTableWidgetItem(minimumTaxCountry3[0]))
value = '%.3f' % minimumTaxCountry3[1][2]
self.ui.list_top10.setItem(0 , 1, QtWidgets.QTableWidgetItem(value))
self.ui.list_top10.setItem(0 , 2, QtWidgets.QTableWidgetItem("Tax Option 3"))
elif self.ui.option4.isChecked():
# Looking at index 3 value for category tax
filteredCountries4 = {k:v for k, v in calCountriesTaxAmt.items() if v[3] > 0}
# If Category is not inputted
if bool(filteredCountries4) == False :
self.ui.list_top10.setColumnCount(1)
self.ui.list_top10.setHorizontalHeaderLabels(["Output"])
self.ui.list_top10.setRowCount(1)
self.ui.list_top10.horizontalHeaderItem(0).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeader().setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
self.ui.list_top10.setItem(0 , 0, QtWidgets.QTableWidgetItem("You have not chosen any category !"))
# Else shows the category data
else:
minimumTaxCountry4 = min(filteredCountries4.items(), key=lambda x: x[1][3])
self.ui.list_top10.setColumnCount(3)
self.ui.list_top10.setHorizontalHeaderLabels(["Country", "Tax Amount", "Tax Option #"])
self.ui.list_top10.setRowCount(1)
self.ui.list_top10.horizontalHeaderItem(0).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(1).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(2).setTextAlignment(Qt.AlignLeft)
# Setting output for option 4
self.ui.list_top10.setItem(0 , 0, QtWidgets.QTableWidgetItem(minimumTaxCountry4[0]))
value = '%.3f' % minimumTaxCountry4[1][3]
self.ui.list_top10.setItem(0 , 1, QtWidgets.QTableWidgetItem(value))
self.ui.list_top10.setItem(0 , 2, QtWidgets.QTableWidgetItem("Tax Option 3"))
elif self.ui.option5.isChecked():
# Loops through calCountrieTaxAmt and store least tax amount and index as a tuple for key countryName
topTenCountriesLowestTaxes = {}
for value in calCountriesTaxAmt.items():
val = min((x for x in value[1] if x > 0), default = 0)
index = value[1].index(val)
topTenCountriesLowestTaxes[value[0]] = (val,index)
# Filters countries with 0 values
filteredCountries5 = {k:v for k, v in topTenCountriesLowestTaxes.items() if v[0] > 0}
minimumTaxCountry5 = sorted(filteredCountries5.items(), key=lambda x:x[1])
self.ui.list_top10.setColumnCount(3)
self.ui.list_top10.setHorizontalHeaderLabels(["Country", "Tax Amount", "Tax Option #"])
self.ui.list_top10.setRowCount(10)
self.ui.list_top10.horizontalHeaderItem(0).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(1).setTextAlignment(Qt.AlignLeft)
self.ui.list_top10.horizontalHeaderItem(2).setTextAlignment(Qt.AlignLeft)
# Setting the top 10 least minimum tax and their options onto the output
for row in range(10):
self.ui.list_top10.setItem(row, 0, QtWidgets.QTableWidgetItem(minimumTaxCountry5[row][0]))
value = '%.3f' % minimumTaxCountry5[row][1][0]
self.ui.list_top10.setItem(row , 1, QtWidgets.QTableWidgetItem(value))
option = minimumTaxCountry5[row][1][1] + 1
option = "Tax Option " + '%.f' % + option
self.ui.list_top10.setItem(row, 2, QtWidgets.QTableWidgetItem(option))
# Convert SGD Net Income to USD Net Income
def convertSGDToUSD(companySGDIncome):
usdIncome = companySGDIncome * 0.75
return usdIncome
# Generate dictionary with key as Country and tuple with 4 spaces containing the different tax rates
def generateTaxForOptions(taxData, companyUSDIncome, companyCode):
countryTaxAmount = {}
for row in taxData.itertuples(index=False, name="None"):
# Initialize 4 taxes amount to be stored
# 1st tax amount is normal rate + branch rate
# 2nd tax amount is minimum tax rate
# 3rd tax amount is progressive tax rate
# 4th tax amount is pertaining to the specific type of industry
differentTaxAmount = [0,0,0,0]
# 1st Tax
# Finding the tax in USD for tax amount # 1 with normal rate + branch rate
differentTaxAmount[0] = round(companyUSDIncome * (row[1] + row[8]), 3)
# 2nd Tax
# Find the tax in USD for tax amount # 2 with minimum tax rate
differentTaxAmount[1] = round(companyUSDIncome * (row[4]), 3)
# 3rd Tax
# If native currency is not in USD, find the tax in USD and convert to native currency for progressive tax computation
nativeCurrency = companyUSDIncome
if row[2] != "USD":
nativeCurrency = (1.0 / row[3]) * nativeCurrency
# Evaluates for fields that are not empty in ProgressiveTaxRate
if row[7]:
# Split by , for progressive tax condition
progressiveTax = row[7].split(',')
# For loop inside the progressiveTax and split by white space
conditionStatement = [x.split() for x in progressiveTax]
# For loop through the condition statement for each list of conditions
for x in conditionStatement:
# If value is present, break out of loop
valuePresent = False
# Round off native currency to 3 decimal places and declare it as a string
roundedNativeCurrency = round(nativeCurrency, 3)
strRoundedNativeCurrency = '%.3f' % roundedNativeCurrency
# Use if condition to check for the length of the list of conditionStatement
if len(x) == 5:
# Evaluate the conditions before final statement
lowerBound = x[0]
evaluationCondition = x[1]
upperBound = x[2]
taxPercentage = x[4]
# Use eval function to test the test case
lowerBoundStatement = "> " + lowerBound
upperBoundStatement = evaluationCondition + " " + upperBound
# 1st condition to check if figure is bigger than amount
lowerCondition = strRoundedNativeCurrency + " " + lowerBoundStatement
# 2nd condition to check if figure is smaller than or equal to amount
upperCondition = strRoundedNativeCurrency + " " + upperBoundStatement
# Checks if 1st and 2nd condition is fulfilled to know if nativeCurrency falls within this range
if (eval(lowerCondition)) and (eval(upperCondition)):
nativeCalculatedTax = roundedNativeCurrency * float(taxPercentage)
# Calculate back the amount in USD
USDCalTax1 = nativeCalculatedTax * (row[3])
USDCalTax1 = round(USDCalTax1, 3)
# Assign the CalTax into differentTaxAmount
differentTaxAmount[2] = USDCalTax1
valuePresent = True
break
if (valuePresent == True):
break
elif len(x) == 4:
# Evaluate the conditions for final statement
lastEvaluationCondition = x[0]
lastLowerBound = x[1]
lastTaxPercentage = x[3]
# last condition to check if figure is bigger than last lower bound
lastLowerBoundStatement = lastEvaluationCondition + " " + lastLowerBound
# Adding strRoundedNativeCurrency to lastCondition
lastCondition = strRoundedNativeCurrency + " " + lastLowerBoundStatement
# Checks if last condition is fulfilled
if eval(lastCondition):
nativeCalculatedTax = roundedNativeCurrency * float(lastTaxPercentage)
# Calculate back the amount in USD
USDCalTax2 = nativeCalculatedTax * (row[3])
USDCalTax2 = round(USDCalTax2, 3)
# Assign the CalTax into differentTaxAmount
differentTaxAmount[2] = USDCalTax2
valuePresent = True
break
# 4th Tax
# Calculates the tax amount if categoryTaxCondition1 fulfils the companyCode defined by the user
if row[9]:
if "," in row[9]:
# Split the string by , to get the string statement out
categoryStatement1 = row[9].split(',')
# For loop inside the categoryStatement and split by white space
categoryTaxCondition1 = [x.split() for x in categoryStatement1]
# For loop inside the tuple and retrieve dictCode for comparison and multiplication by assigned tax rate if it matches
for x in categoryTaxCondition1:
dictCode1 = x[0]
categoryTax1 = x[2]
if (companyCode == dictCode1):
categoryTaxAmount1 = companyUSDIncome * float(categoryTax1)
differentTaxAmount[3] = categoryTaxAmount1
break
# For loop inside the tuple and multiply by taxRate if it matches
else:
# Account for countries with only 1 type of category special tax rate
categoryTaxCondition2 = row[9].split()
dictCode2 = categoryTaxCondition2[0]
categoryTax2 = categoryTaxCondition2[2]
if (companyCode == dictCode2):
categoryTaxAmount2 = companyUSDIncome * float(categoryTax2)
differentTaxAmount[3] = categoryTaxAmount2
# Assigning the countryName as key, the differentTaxAmount tuple as the value
countryTaxAmount[row[0]] = differentTaxAmount
return countryTaxAmount
# Generate dictionary with key as CategoryName and value as 3 characters code for category
def generateCategoryData(categoryData):
# Use list comprehension to assign key and data to categoryDict
categoryDict = {x['CategoryName']: x['CategoryCode'] for x in categoryData.to_dict(orient="records")}
return categoryDict
def taxComputation(companySGDIncome, selectedCategoryData):
# Load csv data into pandas and na values are not being evaluated as NaN
taxData = pd.read_csv('countryTax.csv', keep_default_na=False)
# Fill empty fields with blank spaces
taxData.fillna({'ProgressiveTaxRange':'', 'CategoryRate': ''})
# Load csv data for company category and load into companyDict dictionary
categoryData = pd.read_csv('categoryDict.csv', keep_default_na=False)
# Generate categoryDict for categoryData
categoryDict = ApplicationWindow.generateCategoryData(categoryData)
companyCode = categoryDict.get(selectedCategoryData)
companySGDIncome = float(companySGDIncome)
companyUSDIncome = ApplicationWindow.convertSGDToUSD(companySGDIncome)
# Assign countryName as key, and calculate the value for differentTaxAmount in option 1, 2, 3, 4 in USD
countriesTaxAmt = ApplicationWindow.generateTaxForOptions(taxData, companyUSDIncome, companyCode)
return countriesTaxAmt
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
application = ApplicationWindow()
application.show()
sys.exit(app.exec_())
|
larry2967/academic-projects
|
taxcalculator/CompanyTaxSavingsApp.py
|
CompanyTaxSavingsApp.py
|
py
| 14,683 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "CompanyTaxUI.Ui_MainWindow",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QHeaderView",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QHeaderView",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignLeft",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 327,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 327,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 330,
"usage_type": "call"
}
] |
23722748410
|
from django.urls import path
from gisapp.views import HomeView, county_datasets, point_datasets
from gisapp.api.views import ProvincesListAPIView,ProvincesDetailAPIView
urlpatterns = [
path('', HomeView.as_view(), name='home'),
path('county_data/', county_datasets, name = 'county'),
path('incidence_data/', point_datasets, name = 'incidences'),
path("provinces/", ProvincesListAPIView.as_view(), name="pr"),
path("provinces/<int:pk>/", ProvincesDetailAPIView.as_view(), name=""),
]
|
shwky56/geo-django
|
gisapp/urls.py
|
urls.py
|
py
| 506 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "gisapp.views.HomeView.as_view",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "gisapp.views.HomeView",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "gisapp.views.county_datasets",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "gisapp.views.point_datasets",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "gisapp.api.views.ProvincesListAPIView.as_view",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "gisapp.api.views.ProvincesListAPIView",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "gisapp.api.views.ProvincesDetailAPIView.as_view",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "gisapp.api.views.ProvincesDetailAPIView",
"line_number": 11,
"usage_type": "name"
}
] |
42543026750
|
"""Utility related functions.
"""
import sys
import os
import ctypes
import pygame
from .window import *
def quit():
"""Shuts down ag_py the correct way."""
destroy_window()
pygame.quit()
sys.exit()
def is_admin() -> bool:
"""Determines if the user is running your game as admin."""
try:
return os.getuid() == 0
except AttributeError:
return ctypes.windll.shell32.IsUserAnAdmin() != 0
def percent(n1: int, n2: int) -> int:
"""Finds the percentage of n1 out of n2.
For example, if n1 was 5, and n2 was 10, it would return 50.
"""
return (n1 / n2) * 100
def generate_string(length: int) -> str:
"""Generates a random string of a given lentgh."""
symbols: str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
final_string: str = ""
for i in range(length):
final_string += symbols[random.randint(0, len(symbols) - 1)]
return final_string
|
trypolis464/ag_py
|
agpy/utils.py
|
utils.py
|
py
| 951 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.quit",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.getuid",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "ctypes.windll.shell32.IsUserAnAdmin",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "ctypes.windll",
"line_number": 25,
"usage_type": "attribute"
}
] |
19528118390
|
__author__ = 'zhaicao'
import sys
from PyQt5.QtWidgets import (QWidget, QPushButton, QLineEdit, QInputDialog, QApplication, QColorDialog,
QFrame, QVBoxLayout, QSizePolicy, QLabel, QFontDialog, QTextEdit, QAction, QFileDialog ,QMainWindow)
from PyQt5.QtGui import QColor, QIcon
#通过输入框改变文字
class simple_1(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
col = QColor()
self.btn = QPushButton('Dialog', self)
self.btn.move(20, 20)
#绑定按钮点击信号和槽函数
self.btn.clicked.connect(self.showDialog)
self.el = QLineEdit(self)
self.el.move(130, 22)
self.setGeometry(300, 300, 290, 150)
self.setWindowTitle('Dialog')
self.show()
def showDialog(self):
text, ok = QInputDialog.getText(self, 'Input Dialog', 'Enter your name:')
if ok:
self.el.setText(str(text))
#通过对话框改变颜色
class simple_2(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
col = QColor(0, 0, 0)
self.btn = QPushButton('Dialog', self)
self.btn.move(20, 20)
#绑定按钮点击信号和槽函数
self.btn.clicked.connect(self.showDialog)
self.frm = QFrame(self)
self.frm.setStyleSheet("QWidget { background-color: %s }" % col.name())
self.frm.setGeometry(130, 22, 100, 100)
self.setGeometry(300, 300, 250, 180)
self.setWindowTitle('Color Dialog')
self.show()
def showDialog(self):
col = QColorDialog.getColor()
if col.isValid():
self.frm.setStyleSheet("QWidget { background-color: %s }" % col.name())
#通过对话框改变字体
class simple_3(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
vbox = QVBoxLayout()
btn = QPushButton('Dialog', self)
btn.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
btn.move(20, 20)
vbox.addWidget(btn)
btn.clicked.connect(self.showDialog)
self.lbl = QLabel('Knowledge only matters', self)
self.lbl.move(130, 20)
vbox.addWidget(self.lbl)
self.setLayout(vbox)
self.setGeometry(300, 300, 250, 180)
self.setWindowTitle('Color Dialog')
self.show()
def showDialog(self):
font, ok = QFontDialog.getFont()
if ok:
self.lbl.setFont(font)
#通过对话框选择文件或目录
class simple_4(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.textEdit = QTextEdit()
self.setCentralWidget(self.textEdit)
self.statusBar()
openFile = QAction(QIcon('web.png'), 'OPen', self)
openFile.setShortcut('Ctrl+o')
openFile.setStatusTip('Open new file')
openFile.triggered.connect(self.showDialog)
menuBar = self.menuBar()
fileMenu = menuBar.addMenu('&File')
fileMenu.addAction(openFile)
self.setGeometry(300, 300, 250, 180)
self.setWindowTitle('Color Dialog')
self.show()
def showDialog(self):
fname = QFileDialog.getOpenFileName(self,
'Open File',
'C:/Users/slave/Desktop',
'Xml Files(*.xml);;Excel Files(*.xls *.xlsx);;Word Files(*.doc)')
print(fname)
if fname[0]:
f = open(fname[0], 'r')
with f:
data = f.read()
self.textEdit.setText(str(data))
if __name__=='__main__':
app = QApplication(sys.argv)
s = simple_4()
sys.exit(app.exec_())
|
zhaicao/pythonWorkspace
|
Pyqt5Practice/demo_8.py
|
demo_8.py
|
py
| 3,852 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtGui.QColor",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QPushButton",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QInputDialog.getText",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QInputDialog",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtGui.QColor",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QPushButton",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QFrame",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QColorDialog.getColor",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QColorDialog",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QPushButton",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QSizePolicy.Fixed",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets.QSizePolicy",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QFontDialog.getFont",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QFontDialog",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QTextEdit",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QAction",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QIcon",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 139,
"usage_type": "call"
}
] |
9518060798
|
from ast import Lambda
from functools import reduce
l2=[1,2,3,4,5,6,7,8,9]
l=[1,2,3,4,5,6,7,8,9,10]
a=list(filter(lambda x : x>5,l))
print (a)
b=list(map(pow,a,l2))
print(b)
sum=(reduce(lambda x, y: x + y,b))
print(sum)
|
SouvikPaul2000/Souvik-paul-2
|
Basic code/MapFilterLamdaReduce.py
|
MapFilterLamdaReduce.py
|
py
| 222 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "functools.reduce",
"line_number": 11,
"usage_type": "call"
}
] |
41234449235
|
# create by andy at 2022/4/21
# reference:
import torchvision
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from torch.utils.data import DataLoader
dataset_transform = transforms.Compose([
transforms.ToTensor(),
])
train_set = torchvision.datasets.CIFAR10(root="./dataset",
train=True,
download=True,
transform=dataset_transform)
test_set = torchvision.datasets.CIFAR10(root="./dataset",
train=True,
download=False,
transform=dataset_transform)
test_loader = DataLoader(dataset=test_set,
batch_size=64,
shuffle=True,
num_workers=0,
drop_last=False)
writer = SummaryWriter("logs")
step = 0
for data in test_loader:
imgs, targets = data
print(imgs.shape)
print(targets)
writer.add_images("test_batch", imgs, step)
step += 1
# writer = SummaryWriter("logs")
# for i in range(10):
# img, target = test_set[i]
# writer.add_image("test_set", img, i)
#
# writer.close()
if __name__ == '__main__':
pass
|
beishangongzi/study_torch
|
p10_dataset/dataset_download.py
|
dataset_download.py
|
py
| 1,327 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torchvision.transforms.Compose",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torchvision.datasets.CIFAR10",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torchvision.datasets.CIFAR10",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.utils.tensorboard.SummaryWriter",
"line_number": 29,
"usage_type": "call"
}
] |
17459989896
|
from flask import current_app, render_template, make_response, session
from ecom.datastore import db,redis_store
from ecom.exceptions import ServiceUnavailableException
from ecom.models import Item, Cart, Account
import json
import razorpay
from ecom.utils import general_util
class PaymentManager():
@staticmethod
def pay():
category_map = general_util.get_category_map()
print ("payment manager pay")
if 'email' in session:
query = Account.query.filter(Account.email == session['email'])
account = query.first()
query = Cart.query.filter_by(account_id=account.id, active=True)
cart = query.first()
print (cart.items)
value = 0
for item in cart.items:
value += item.price
amount = int(value*100)
print ("checcccc")
print (amount)
resp = make_response(render_template('payment.html',name=account.name,email=account.email,contact=account.mobile,amount=amount, category_map=category_map))
resp.headers['Content-type'] = 'text/html; charset=utf-8'
return resp
else:
status_message = "SignUp or Login to continue shopping"
resp = make_response(render_template('signup.html',category_map=category_map, status_message=status_message))
resp.headers['Content-type'] = 'text/html; charset=utf-8'
return resp
@staticmethod
def charge(data):
category_map = general_util.get_category_map()
print ("payment manager charge")
print (data)
client = razorpay.Client(auth=(current_app.config.get('RAZORPAY_KEY'), current_app.config.get('RAZORPAY_SECRET')))
client.set_app_details({"title" : "mmmkart", "version" : "1.0"})
#data = {"amount": 1000, "currency": "INR", "receipt": "654", "payment_capture": 1}
#client.order.create(data=data)
query = Account.query.filter(Account.email == session['email'])
account = query.first()
query = Cart.query.filter_by(account_id=account.id, active=True)
cart = query.first()
print (cart.items)
value = 0
for item in cart.items:
value += item.price
amount = int(value*100)
payment_id = data['razorpay_payment_id']
resp = client.payment.capture(payment_id, amount)
print (resp)
print (resp['status'])
if resp["status"] == "captured":
print ("sycccecece")
cart.active = False
try:
print ("inserrr")
db.session.add(cart)
db.session.commit()
except Exception as e:
print (e)
db.session.rollback()
message = "Congratulations !!! Your payment is successful"
resp = make_response(render_template('paymentresponse.html',message=message,success=1,category_map=category_map,name=session.get('name')))
resp.headers['Content-type'] = 'text/html; charset=utf-8'
return resp
else:
print ("fsasasas")
message = "Oops !!! Your payment got declined. Please retry payment"
resp = make_response(render_template('paymentresponse.html',message=message,category_map=category_map,name=session.get('name')))
resp.headers['Content-type'] = 'text/html; charset=utf-8'
return resp
|
ubamax/esocial-app
|
ecom/managers/payment_manager.py
|
payment_manager.py
|
py
| 3,473 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "ecom.utils.general_util.get_category_map",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "ecom.utils.general_util",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "ecom.models.Account.query.filter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "ecom.models.Account.query",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "ecom.models.Account",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "ecom.models.Account.email",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "flask.session",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "ecom.models.Cart.query.filter_by",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "ecom.models.Cart.query",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "ecom.models.Cart",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "flask.make_response",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "ecom.utils.general_util.get_category_map",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "ecom.utils.general_util",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "razorpay.Client",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "flask.current_app.config.get",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "flask.current_app.config",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "flask.current_app",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "ecom.models.Account.query.filter",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "ecom.models.Account.query",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "ecom.models.Account",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "ecom.models.Account.email",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "flask.session",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "ecom.models.Cart.query.filter_by",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "ecom.models.Cart.query",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "ecom.models.Cart",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "ecom.datastore.db.session.add",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "ecom.datastore.db.session",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "ecom.datastore.db",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "ecom.datastore.db.session.commit",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "ecom.datastore.db.session",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "ecom.datastore.db",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "ecom.datastore.db.session.rollback",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "ecom.datastore.db.session",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "ecom.datastore.db",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "flask.make_response",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "flask.session.get",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "flask.make_response",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "flask.session.get",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 79,
"usage_type": "name"
}
] |
11038610201
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 19 14:24:54 2018
@author: henyd
"""
from sklearn import tree
from sklearn import svm
import numpy as np
from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
def get_response(usrText):
bot = ChatBot('Couns',
storage_adapter='chatterbot.storage.SQLStorageAdapter',
logic_adapters=[
'chatterbot.logic.BestMatch',
'chatterbot.logic.MathematicalEvaluation',
{
'import_path': 'chatterbot.logic.LowConfidenceAdapter',
'threshold': 0.70,
'default_response': "Sorry, I didn't understand."
}
],
trainer='chatterbot.trainers.ListTrainer')
bot.set_trainer(ListTrainer)
while True:
if usrText.strip()!= 'start':
result = bot.get_response(usrText)
reply = str(result)
return(reply)
if usrText.strip()== 'start':
z = []
with open('features1.txt', 'r') as xf:
for xline in xf:
x = xline.split(',')
for i in range(0,len(x)):
x[i] = int(x[i])
z.append(x)
w = []
with open('labels1.txt', 'r') as yf:
for yline in yf:
y = yline
w.append(y)
clf = tree.DecisionTreeClassifier()
clf = clf.fit(z, w)
abroad = input("Do you see yourself in some foreign country like USA, Canada, UK, Australia, Russia in about 5 years? [1/0]: ")
job = input("Are you satisfied with your bachelor's degree and want to do no more furthur studies ? [1/0]: ")
interest = input("Do you love doing coding and are you confident that you can solve the hardest problem if you are given plentiful of time ? [1/0]: ")
mba = input("Do you feel so as if you can't perform well in IT and you think that you can't compete well with others from our branch ?: [1/0]: ")
prediction = clf.predict([[abroad, job, interest, mba]])
print("You should think of doing: ",prediction)
return("Thats all")
|
henydave/career-counselling-chatbot
|
chatbot_final2.py
|
chatbot_final2.py
|
py
| 2,211 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "chatterbot.ChatBot",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "chatterbot.trainers.ListTrainer",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "sklearn.tree.DecisionTreeClassifier",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "sklearn.tree",
"line_number": 44,
"usage_type": "name"
}
] |
10966117387
|
import os
import clip
import torch.nn as nn
from datasets import Action_DATASETS
from torch.utils.data import DataLoader
from tqdm import tqdm
import wandb
import argparse
import shutil
from pathlib import Path
import yaml
from dotmap import DotMap
import pprint
import numpy
from modules.Visual_Prompt import visual_prompt
from utils.Augmentation import get_augmentation
import torch
from utils.Text_Prompt import *
import pdb
from sklearn.metrics import f1_score
from sklearn.metrics import balanced_accuracy_score
import pandas as pd
import numpy as np
import logging
class TextCLIP(nn.Module):
def __init__(self, model):
super(TextCLIP, self).__init__()
self.model = model
def forward(self, text):
return self.model.encode_text(text)
class ImageCLIP(nn.Module):
def __init__(self, model):
super(ImageCLIP, self).__init__()
self.model = model
def forward(self, image):
return self.model.encode_image(image)
def val_metrics(pred, logger):
# pdb.set_trace()
test_num_each = [5464, 5373, 27014, 4239, 3936, 6258, 10474, 6273,
10512, 6667, 22131, 4661, 8855, 14047, 28896, 4209]
test_num_snippet = [43, 42, 212, 34, 31, 49, 82, 50, 83, 53, 173, 37, 70, 110, 226, 33]
# test_num_rem = [88, 125, 6, 15, 96, 114, 106, 1, 16, 11, 115, 53, 23, 95, 96, 113]
mean_weighted_f1 = 0.0
mean_unweighted_f1 = 0.0
mean_global_f1 = 0.0
mean_balanced_acc = 0.0
each_wf1 = []
each_unf1 = []
each_gf1 = []
each_bacc = []
test_labels_pth = ''
for i in range(16):
predi = pred[sum(test_num_snippet[:i]): sum(test_num_snippet[:i+1])]
predi = [p for p in predi for _ in range(128)]
predi = predi[:test_num_each[i]]
tl_pth = test_labels_pth + '/test_video_' + str(i).zfill(4) + '.csv'
ls = np.array(pd.read_csv(tl_pth, usecols=['frame_label']))
label = []
predict = []
for idx, l in enumerate(ls):
if not np.isnan(l):
label.append(int(l))
predict.append(predi[idx])
# pdb.set_trace()
mean_weighted_f1 += f1_score(label, predict, average='weighted')/16.0
mean_unweighted_f1 += f1_score(label, predict, average='macro') / 16.0
mean_global_f1 += f1_score(label, predict, average='micro') / 16.0
mean_balanced_acc += balanced_accuracy_score(label, predict) / 16.0
each_wf1.append(f1_score(label, predict, average='weighted'))
each_unf1.append(f1_score(label, predict, average='macro'))
each_gf1.append(f1_score(label, predict, average='micro'))
each_bacc.append(balanced_accuracy_score(label, predict))
# print('video: ', i, 'label: ', label, 'predict: ', predict)
logger.info('wf1: {}'.format(each_wf1))
logger.info('unf1:{}'.format(each_unf1))
logger.info('gf1:{}'.format(each_gf1))
logger.info('bacc:{}'.format(each_bacc))
return mean_weighted_f1, mean_unweighted_f1, mean_global_f1, mean_balanced_acc
def validate_val(epoch, val_loader, classes, device, model, fusion_model, config, num_text_aug):
model.eval()
fusion_model.eval()
num = 0
corr_1 = 0
corr_5 = 0
predict_list = []
label_list = []
label2 = []
pred2 = []
with torch.no_grad():
text_inputs = classes.to(device)
text_features = model.encode_text(text_inputs) # (bs*num_classes, 512)
for iii, (image, class_id) in enumerate(tqdm(val_loader)):
# image: (bs, 24, 224, 224)
image = image.view((-1, config.data.num_segments, 3) + image.size()[-2:])
# image: (16, 8, 3, 224, 224)
b, t, c, h, w = image.size()
class_id = class_id.to(device)
image_input = image.to(device).view(-1, c, h, w)
image_features = model.encode_image(image_input).view(b, t, -1)
image_features = fusion_model(image_features) # (bs, 512)
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)
similarity = (100.0 * image_features @ text_features.T)
similarity = similarity.view(b, num_text_aug, -1)
# pdb.set_trace()
similarity = similarity.softmax(dim=-1)
similarity = similarity.mean(dim=1, keepdim=False)
values_1, indices_1 = similarity.topk(1, dim=-1)
# values_5, indices_5 = similarity.topk(5, dim=-1)
num += b
# print(indices_1)
# print(class_id)
# pdb.set_trace()
for i in range(b):
if values_1[i] < 0.5:
indices_1[i] = -1
# pdb.set_trace()
label_list.append(int(class_id[i].cpu().numpy()))
predict_list.append(indices_1[i].cpu().numpy()[0])
# if indices_1[i] == class_id[i]:
# corr_1 += 1
# if class_id[i] in indices_5[i]:
# corr_5 += 1
# pdb.set_trace()
# f1score = f1_score(label2, pred2, average='weighted')
# acc = accuracy_score(label2, pred2)
# pdb.set_trace()
bacc = balanced_accuracy_score(label_list, predict_list)
print('Epoch: [{}/{}]: bacc:{}'.format(epoch, config.solver.epochs, bacc))
return bacc
def validate(epoch, val_loader, classes, device, model, fusion_model, config, num_text_aug, logger):
model.eval()
fusion_model.eval()
num = 0
corr_1 = 0
corr_5 = 0
predict_list = []
label_list = []
label2 = []
pred2 = []
with torch.no_grad():
text_inputs = classes.to(device)
text_features = model.encode_text(text_inputs) # (bs*num_classes, 512)
for iii, (image, class_id) in enumerate(tqdm(val_loader)):
# image: (bs, 24, 224, 224)
image = image.view((-1, config.data.num_segments, 3) + image.size()[-2:])
# image: (16, 8, 3, 224, 224)
b, t, c, h, w = image.size()
class_id = class_id.to(device)
image_input = image.to(device).view(-1, c, h, w)
image_features = model.encode_image(image_input).view(b, t, -1)
image_features = fusion_model(image_features) # (bs, 512)
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)
similarity = (100.0 * image_features @ text_features.T)
similarity = similarity.view(b, num_text_aug, -1)
# pdb.set_trace()
similarity = similarity.softmax(dim=-1)
similarity = similarity.mean(dim=1, keepdim=False)
values_1, indices_1 = similarity.topk(1, dim=-1)
# values_5, indices_5 = similarity.topk(5, dim=-1)
num += b
# print(indices_1)
# print(class_id)
# pdb.set_trace()
for i in range(b):
# if values_1[i] < 0.5:
# indices_1[i] = -1
# pdb.set_trace()
# label_list.append(int(class_id[i].cpu().numpy()))
predict_list.append(indices_1[i].cpu().numpy()[0])
# if indices_1[i] == class_id[i]:
# corr_1 += 1
# if class_id[i] in indices_5[i]:
# corr_5 += 1
# pdb.set_trace()
# f1score = f1_score(label2, pred2, average='weighted')
# acc = accuracy_score(label2, pred2)
wf1, unf1, gf1, bacc = val_metrics(predict_list, logger)
# top1 = f1score
# top5 = float(corr_5) / num * 100
# wandb.log({"top1": top1})
# wandb.log({"top5": top5})
# print('Epoch: [{}/{}]: Top1: {}, Top5: {}'.format(epoch, config.solver.epochs, top1, top5))
logger.info('Epoch: [{}/{}]: wf1:{:.3f} unf1:{:.3f} gf1:{:.3f} bacc:{:.3f}'.format(epoch, config.solver.epochs, wf1, unf1, gf1, bacc))
return wf1
def main():
global args, best_prec1
global global_step
parser = argparse.ArgumentParser()
parser.add_argument('--config', '-cfg', default='')
parser.add_argument('--log_time', default='')
args = parser.parse_args()
with open(args.config, 'r') as f:
config = yaml.load(f)
working_dir = os.path.join('./exp', config['network']['type'], config['network']['arch'], config['data']['dataset'],
args.log_time)
wandb.init(project=config['network']['type'],
name='{}_{}_{}_{}'.format(args.log_time, config['network']['type'], config['network']['arch'],
config['data']['dataset']))
print('-' * 80)
print(' ' * 20, "working dir: {}".format(working_dir))
print('-' * 80)
print('-' * 80)
print(' ' * 30, "Config")
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(config)
print('-' * 80)
config = DotMap(config)
Path(working_dir).mkdir(parents=True, exist_ok=True)
shutil.copy(args.config, working_dir)
shutil.copy('test.py', working_dir)
device = "cuda" if torch.cuda.is_available() else "cpu" # If using GPU then use mixed precision training.
model, clip_state_dict = clip.load(config.network.arch, device=device, jit=False, tsm=config.network.tsm,
T=config.data.num_segments, dropout=config.network.drop_out,
emb_dropout=config.network.emb_dropout) # Must set jit=False for training ViT-B/32
transform_val = get_augmentation(False, config)
fusion_model = visual_prompt(config.network.sim_header, clip_state_dict, config.data.num_segments)
model_text = TextCLIP(model)
model_image = ImageCLIP(model)
model_text = torch.nn.DataParallel(model_text).cuda()
model_image = torch.nn.DataParallel(model_image).cuda()
fusion_model = torch.nn.DataParallel(fusion_model).cuda()
wandb.watch(model)
wandb.watch(fusion_model)
val_data = Action_DATASETS(config.data.val_list, config.data.label_list, num_segments=config.data.num_segments,
image_tmpl=config.data.image_tmpl,
transform=transform_val, random_shift=config.random_shift)
val_loader = DataLoader(val_data, batch_size=config.data.batch_size, num_workers=config.data.workers, shuffle=False,
pin_memory=True, drop_last=True)
if device == "cpu":
model_text.float()
model_image.float()
else:
clip.model.convert_weights(
model_text) # Actually this line is unnecessary since clip by default already on float16
clip.model.convert_weights(model_image)
start_epoch = config.solver.start_epoch
if config.pretrain:
if os.path.isfile(config.pretrain):
print(("=> loading checkpoint '{}'".format(config.pretrain)))
checkpoint = torch.load(config.pretrain)
model.load_state_dict(checkpoint['model_state_dict'])
fusion_model.load_state_dict(checkpoint['fusion_model_state_dict'])
del checkpoint
else:
print(("=> no checkpoint found at '{}'".format(config.pretrain)))
classes, num_text_aug, text_dict = text_prompt(val_data)
best_prec1 = 0.0
prec1 = validate(start_epoch, val_loader, classes, device, model, fusion_model, config, num_text_aug)
if __name__ == '__main__':
main()
|
Lycus99/SDA-CLIP
|
test.py
|
test.py
|
py
| 11,801 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.f1_score",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.f1_score",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.f1_score",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.balanced_accuracy_score",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.f1_score",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.f1_score",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.f1_score",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.balanced_accuracy_score",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.balanced_accuracy_score",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "yaml.load",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "wandb.init",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "pprint.PrettyPrinter",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "dotmap.DotMap",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "shutil.copy",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "shutil.copy",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "clip.load",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "utils.Augmentation.get_augmentation",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "modules.Visual_Prompt.visual_prompt",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "torch.nn.DataParallel",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 277,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.DataParallel",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 278,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.DataParallel",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 279,
"usage_type": "attribute"
},
{
"api_name": "wandb.watch",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "wandb.watch",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "datasets.Action_DATASETS",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "clip.model.convert_weights",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "clip.model",
"line_number": 293,
"usage_type": "attribute"
},
{
"api_name": "clip.model.convert_weights",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "clip.model",
"line_number": 295,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 300,
"usage_type": "attribute"
},
{
"api_name": "torch.load",
"line_number": 302,
"usage_type": "call"
}
] |
39480165735
|
import numpy as np
import scipy.sparse as sp
agg1_index = np.load("../trace/agg1_index.npy")
agg1_adj = np.load("../trace/agg1_adj.npy")
input_feature = np.load("../trace/feat1.npy")
coo_row = agg1_index[0]
coo_col = agg1_index[1]
num_nodes = input_feature.shape[0]
"""
# Use adjacency matrix to generate norm
num_nodes = input_feature.shape[0]
agg1_coo_ones = sp.coo_matrix((np.ones(agg1_adj.shape), (coo_row, coo_col)), shape=(num_nodes, num_nodes))
agg1_coo_ones_T = sp.coo_matrix((np.ones(agg1_adj.shape), (coo_col, coo_row)), shape=(num_nodes, num_nodes))
temp_ones = np.ones((num_nodes, num_nodes))
right = agg1_coo_ones.multiply(agg1_coo_ones.dot(temp_ones))
right.data = 1 / right.data
left = (agg1_coo_ones_T.multiply(agg1_coo_ones_T.dot(temp_ones))).transpose()
left.data = 1 / left.data
both = left.multiply(right)
both.data = np.sqrt(both.data)
"""
agg1_coo = sp.coo_matrix((agg1_adj, (coo_row, coo_col)), shape=(num_nodes, num_nodes))
fc1_weight = np.load("../trace/fc1_weight.npy")
feat1 = agg1_coo.dot(input_feature.dot(fc1_weight))
feat1 = feat1 * (feat1 > 0)
agg2_index = np.load("../trace/agg2_index.npy")
agg2_adj = np.load("../trace/agg2_adj.npy")
coo_row = agg2_index[0]
coo_col = agg2_index[1]
num_nodes = feat1.shape[0]
agg2_coo = sp.coo_matrix((agg2_adj, (coo_row, coo_col)), shape=(num_nodes, num_nodes))
fc2_weight = np.load("../trace/fc2_weight.npy")
feat2 = agg2_coo.dot(feat1.dot(fc2_weight))
from dgl.data import PubmedGraphDataset
from dgl import AddSelfLoop
from inference import dgl_GraphConv
import torch.nn.functional as F
import torch
raw_dir = "../data/dgl"
transform = AddSelfLoop()
data = PubmedGraphDataset(raw_dir=raw_dir, transform=transform)
g = data[0].int()
input_feature_t = torch.from_numpy(input_feature)
fc1_weight_t = torch.from_numpy(fc1_weight)
feat1_dgl = dgl_GraphConv(fc1_weight.shape[0], fc1_weight.shape[1], g, input_feature_t, fc1_weight_t, norm='both')
feat1_dgl = F.relu(feat1_dgl)
fc2_weight_t = torch.from_numpy(fc2_weight)
feat2_dgl = dgl_GraphConv(fc2_weight.shape[0], fc2_weight.shape[1], g, feat1_dgl, fc2_weight_t, norm='both')
feat2_dgl_np = np.array(feat2_dgl)
print(f"GraphConv vs. MM-AGG: {np.all(np.isclose(feat2_dgl_np, feat2, rtol=1e-5, atol=1e-6), axis=0)}")
print((feat2_dgl_np-feat2)[np.nonzero(np.isclose(feat2_dgl_np, feat2, rtol=1e-5, atol=1e-7) == False)])
import yaml
from os import path
root = "../trace/"
f = open(path.join(root,"ir_generated.yaml"), "r")
totinfo = yaml.safe_load(f)
bias = None
feat = 0
for info in totinfo:
if info['op_type'] == 'mm':
input_feat = np.load(path.join(root,info['op_input_data']['read_data_path']))
weight = np.load(path.join(root,info['op_weight']['read_data_path']))
feat_shape = info['op_weight']['data_shape']
feat = input_feat.dot(weight)
elif info['op_type'] == 'agg':
if info['reduce_type'] == 'sum':
index = np.load(path.join(root,info['op_adj']['read_index_path']))
adj = np.load(path.join(root,info['op_adj']['read_data_path']))
num_nodes = info['op_adj']['data_shape'][0]
agg_coo = sp.coo_matrix((adj, (index[0], index[1])), shape=(num_nodes, num_nodes))
input_feat = np.load(path.join(root,info['op_input_data']['read_data_path']))
feat = agg_coo.dot(input_feat)
if info['bias'] == True:
bias = np.load(path.join(root,info['op_bias']['read_data_path']))
feat = feat + bias
if info['relu'] == True:
feat = feat * (feat > 0)
np.save(path.join(root,info['op_output_data']['write_data_path']), feat)
ir_feat = np.load(path.join(root,"feat5.npy"))
print(f"MM-AGG vs. IR: {np.all(np.isclose(ir_feat, feat2, rtol=1e-5, atol=1e-6), axis=0)}")
print((ir_feat-feat2)[np.nonzero(np.isclose(ir_feat, feat2, rtol=1e-5, atol=1e-7) == False)])
from utils import enlarge_and_save
enlarge_and_save(torch.from_numpy(np.load(path.join(root,"true_output.npy"))), 1, "enlarge_true_output")
true_output = np.load(path.join(root,"enlarge_true_output.npy"))
print(f"DGL vs. IR: {np.all(np.isclose(ir_feat, true_output, rtol=1e-2, atol=0), axis=0)}")
print((np.abs(ir_feat-true_output) / true_output)[np.nonzero(np.isclose(ir_feat, true_output, rtol=1e-2, atol=0) == False)])
|
zhang677/SparseAcc
|
train/dgl/check.py
|
check.py
|
py
| 4,271 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.load",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.coo_matrix",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "numpy.load",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.coo_matrix",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "numpy.load",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "dgl.AddSelfLoop",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "dgl.data.PubmedGraphDataset",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "inference.dgl_GraphConv",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "torch.from_numpy",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "inference.dgl_GraphConv",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.nonzero",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "yaml.safe_load",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "numpy.load",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "numpy.load",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "numpy.load",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "scipy.sparse.coo_matrix",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "numpy.load",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "numpy.load",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "numpy.save",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "numpy.load",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "numpy.all",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.nonzero",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "utils.enlarge_and_save",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "numpy.load",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "numpy.all",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.nonzero",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 102,
"usage_type": "call"
}
] |
23589518050
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import mne
fname ="oddball_example_small-fif.gz"
raw = mne.io.read_raw_fif(fname)
raw = mne.io.read_raw_fif(fname,preload=True)
raw.plot_psd();
raw.plot();
#%%data in 20 different ica components
ica = mne.preprocessing.ICA(n_components=20, random_state=0)
#%%
ica.fit(raw.copy().filter(12,35))
#%%
ica.plot_components(outlines="skirt");
#%%we store bad components in ica object
ica.exclude=[10,13,15,16,17,18]
#%%we could also use one of the automatic algorithms
bad_idx,scores = ica.find_bads_eog(raw,"SO2",threshold=1.5)
print(bad_idx)
#%%
ica.exclude=bad_idx
#%%
raw.plot();
#%%
raw=ica.apply(raw.copy(),exclude=ica.exclude)
ica.apply(raw.copy(),exclude=ica.exclude).plot();
#%%for epoching the data we need event markers
events= mne.find_events(raw)
#%%
events
#%%
mne.viz.plot_events(events[:100]);
#%%create event ids
event_ids= {"standard/stimulus":200,
"target/stimulus":100}
#%%epochs
epochs=mne.Epochs(raw,events,event_id=event_ids)
#%%
epochs.plot();
#%%
#%%
epochs=mne.Epochs(raw,events,event_id=event_ids,preload=True)
epochs= ica.apply(epochs, exclude=ica.exclude)
#%%baseline
epochs.apply_baseline((None,0))
#%%
epochs["target"]
|
Sarah436/EEG_Control_Drone
|
EEG_Drone.py
|
EEG_Drone.py
|
py
| 1,304 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "mne.io.read_raw_fif",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "mne.io",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "mne.io.read_raw_fif",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "mne.io",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "mne.preprocessing.ICA",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "mne.preprocessing",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "mne.find_events",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "mne.viz.plot_events",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "mne.viz",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "mne.Epochs",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "mne.Epochs",
"line_number": 48,
"usage_type": "call"
}
] |
27591581690
|
import logging
from pymongo import MongoClient
from src.utils import Singleton
logger = logging.getLogger(__name__)
class DBConnection(metaclass=Singleton):
def __init__(self, db_settings):
self.db_settings = db_settings
self.client = MongoClient(
host=db_settings['host'],
port=int(db_settings['port']),
username=db_settings['username'],
password=db_settings['password'],
authSource='admin'
)
def _get_database(self):
""" get database
:return: database client object
"""
return self.client['fashion-mnist']
def save_request_info(self, request_id, raw, processed, predictions):
""" Save request_id, raw, processed and predictions on mongoDB
:param request_id: request id
:param raw: raw data (input)
:param processed: processed data
:param predictions: predictions
:return: None
"""
db = self._get_database()
logger.info(f'saving raw_data, processed_data and predictions for {len(raw)} predictions')
db['predictions'].insert_many([
{
'request_id': request_id,
'raw_data': raw[i],
'processed_data': processed[i],
'predictions': predictions[i]
} for i in range(len(raw))]
)
|
andre1393/fashion-mnist
|
src/database/mongo_connection.py
|
mongo_connection.py
|
py
| 1,388 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "src.utils.Singleton",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 13,
"usage_type": "call"
}
] |
27278546383
|
from cProfile import label
from genericpath import exists
from math import ceil
import os
import random
import numpy as np
import argparse
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim, autograd
from matplotlib import pyplot as plt
import seaborn as sns
from mpl_toolkits.axes_grid1 import make_axes_locatable
from tqdm import tqdm
from utils import *
from model import *
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--epochs", type=int, default=30000)
parser.add_argument("-lr", "--learningrate", type=float, default=3e-3)
parser.add_argument("-i", "--indim", type=int, default=2)
parser.add_argument("-o", "--outdim", type=int, default=1)
parser.add_argument("-hd", "--hiddim", type=int, default=10)
parser.add_argument("-hn", "--hidnum", type=int, default=5)
parser.add_argument("-p", "--pretrained", type=bool, default=False)
parser.add_argument("-s", "--seed", type=int, default=2022)
parser.add_argument("-sp", "--sample", type=int, default=100)
parser.add_argument("-skip", "--skip", type=bool, default=True)
args = parser.parse_args()
EPOCHS = args.epochs
LR = args.learningrate
IN_DIM = args.indim
OUT_DIM = args.outdim
HIDDEN_DIM = args.hiddim
NUM_HIDDENS = args.hidnum
PRETRAINED = args.pretrained
SEED = args.seed
SAMPLE = args.sample
SKIP = args.skip
sns.set_style("white")
exp_name = "actSiLU"
path = os.path.join("./results/", exp_name)
os.makedirs(path, exist_ok=True)
def u_real(x):
return (1 - x[:, 0]**2) * (1 - x[:, 1]**2)
def fetch_interior_points(N=128, d=2):
return torch.rand(N, d) * 2 - 1
def fetch_boundary_points(N=33):
index = torch.rand(N, 1)
index1 = torch.rand(N, 1) * 2 - 1
xb1 = torch.cat((index1, torch.ones_like(index1)), dim=1)
xb2 = torch.cat((index1, torch.full_like(index1, -1)), dim=1)
xb3 = torch.cat((torch.ones_like(index1), index1), dim=1)
xb4 = torch.cat((torch.full_like(index1, -1), index1), dim=1)
xb = torch.cat((xb1, xb2, xb3, xb4), dim=0)
return xb
if __name__ == "__main__":
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Training Device: {device}")
seed_everything(SEED)
print(f"Random Seed: {SEED}")
model = FullConnected_DNN(
in_dim=IN_DIM,
out_dim=OUT_DIM,
hidden_dim=HIDDEN_DIM,
num_blks=NUM_HIDDENS,
skip=SKIP,
act=nn.SiLU(),
).to(device)
losses = []
losses_r = []
losses_b = []
model.apply(initialize_weights)
optimizer = optim.Adam(model.parameters(), lr=LR)
scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer,
T_0=500,
T_mult=2,
last_epoch=-1)
best_loss, best_b, best_r, best_epoch = 0x3F3F3F, 0x3F3F3F, 0x3F3F3F, 0
bar = tqdm(range(EPOCHS))
model.train()
t0 = time.time()
for epoch in bar:
bar.set_description("Training Epoch " + str(epoch))
# generate the data set
xr = fetch_interior_points()
xb = fetch_boundary_points()
xr = xr.to(device)
xb = xb.to(device)
xr.requires_grad_()
output_r = model(xr)
output_b = model(xb)
# print(output_r.shape, output_b.shape)
grads = autograd.grad(
outputs=output_r,
inputs=xr,
grad_outputs=torch.ones_like(output_r),
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
# print(torch.sum(torch.pow(grads, 2), dim=1).shape, output_r.shape)
# loss_r = 0.5 * torch.sum(torch.pow(grads, 2), dim=1) - output_r
# loss_r = 0.5 * torch.sum(torch.pow(grads, 2), dim=1)
loss_r = (0.5 * torch.sum(torch.pow(grads, 2), dim=1) -
(4 - 2 * torch.sum(torch.pow(xr, 2), dim=1)) * output_r)
loss_r = torch.mean(loss_r)
loss_b = torch.mean(torch.pow(output_b, 2))
# loss = 4 * loss_r + 9 * 500 * loss_b
loss = loss_r + 100 * loss_b
# loss = loss_r + 500 * loss_b
bar.set_postfix({
"Tol Loss": "{:.4f}".format(abs(loss)),
"Var Loss": "{:.4f}".format(abs(loss_r)),
"Bnd Loss": "{:.4f}".format(abs(loss_b)),
})
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
loss_t = loss.detach().numpy()
losses.append(abs(loss_t))
loss_r = loss_r.detach().numpy()
losses_r.append(abs(loss_r))
loss_b = loss_b.detach().numpy()
losses_b.append(abs(loss_b))
saved = False
if epoch > int(4 * EPOCHS / 5):
if torch.abs(loss) < best_loss:
best_loss = torch.abs(loss).item()
best_b = loss_b
best_r = loss_r
best_epoch = epoch
torch.save(model.state_dict(), os.path.join(path, "ckpt.bin"))
if not saved:
torch.save(model.state_dict(), os.path.join(path, "ckpt.bin"))
print(
"Best epoch:",
best_epoch,
"Best loss:",
best_loss,
"Best Var loss:",
best_r,
"Best Boundary loss",
best_b,
)
elapse = time.time() - t0
print(f"Training time: {elapse}")
print(f"# of parameters: {get_param_num(model)}")
# plot figure
model.eval()
model.load_state_dict(torch.load(os.path.join(path, "ckpt.bin")))
print("Load weights from checkpoint!")
with torch.no_grad():
x1 = torch.linspace(-1, 1, 1001)
x2 = torch.linspace(-1, 1, 1001)
X, Y = torch.meshgrid(x1, x2)
Z = torch.cat((Y.flatten()[:, None], Y.T.flatten()[:, None]), dim=1)
Z = Z.to(device)
pred = model(Z)
plot_loss_and_save(
EPOCHS=EPOCHS,
losses=losses,
losses_r=losses_r,
losses_b=losses_b,
SAMPLE=SAMPLE,
path=path,
)
pred = pred.cpu().numpy()
pred = pred.reshape(1001, 1001)
plot_result_and_save(pred, path)
# print(type(pred), type(u_real(Z)))
l2_loss = (np.sqrt(
np.sum(np.square(pred - u_real(Z).cpu().numpy().reshape(1001, 1001))))
* (2 / 1000)**2)
print(f"l2 Loss: {l2_loss}")
print("Output figure saved!")
|
JiantingFeng/Deep-Ritz-PDE-Solver
|
new_train.py
|
new_train.py
|
py
| 6,408 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "seaborn.set_style",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.ones_like",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "torch.full_like",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "torch.ones_like",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.full_like",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.SiLU",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "model.apply",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "model.parameters",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.optim.lr_scheduler.CosineAnnealingWarmRestarts",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.optim.lr_scheduler",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "torch.optim",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "model.train",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "torch.autograd.grad",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "torch.ones_like",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "torch.pow",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "torch.pow",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "torch.pow",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "model.state_dict",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "torch.save",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "model.state_dict",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "model.eval",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "model.load_state_dict",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "torch.linspace",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "torch.linspace",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "torch.meshgrid",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 192,
"usage_type": "call"
}
] |
22989091213
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# utf-8 中文编码
u"""
>>> m=encrypt('123456789','1'*16)
>>> m
'34430c0e47da2207d0028e778c186d55ba4c1fb1528ee06b09a6856ddf8a9ced'
>>> decrypt('123456789',m)
'1111111111111111'
m = encrypt_verify('123','0123456789')
print m
print decrypt_verify('123',m)
"""
from Crypto.Cipher import AES
import hashlib
from binascii import b2a_hex, a2b_hex
import json
IV = b'dsfgh478fdshg4gf'
def encrypt(key,text):
# 密钥key 长度必须为16(AES-128), 24(AES-192),或者32 (AES-256)Bytes 长度
# 所以直接将用户提供的 key md5 一下变成32位的。
key_md5 = hashlib.md5(key).hexdigest()
# AES.MODE_CFB 是加密分组模式,详见 http://blog.csdn.net/aaaaatiger/article/details/2525561
# b'0000000000000000' 是初始化向量IV ,16位要求,可以看做另一个密钥。在部分分组模式需要。
# 那个 b 别漏下,否则出错。
cipher = AES.new(key_md5,AES.MODE_CFB,IV)
# AES 要求需要加密的内容长度为16的倍数,当密文长度不够的时候用 '\0' 不足。
ntext = text + ('\0' * (16-(len(text)%16)))
# b2a_hex 转换一下,默认加密后的字符串有很多特殊字符。
return b2a_hex(cipher.encrypt(ntext))
def decrypt(key,text):
key_md5 = hashlib.md5(key).hexdigest()
cipher = AES.new(key_md5,AES.MODE_CFB,IV)
t=cipher.decrypt(a2b_hex(text))
return t.rstrip('\0')
def encrypt_verify(key,text):
"""加密数据,并附带验证信息
key 加密 key
text 需要加密字符串
返回data
"""
data_dict = {'value':text,'security':hashlib.md5(hashlib.md5(key + IV).hexdigest()).hexdigest()}
data_json = json.dumps(data_dict,encoding='utf8')
return encrypt(key,data_json)
def decrypt_verify(key,aes_data):
"""解密数据,并验证
key 解密 key
text 需要解密字符串
解密正常返回数据,否则返回 None
"""
data = None
try:
data_json = decrypt(key,aes_data)
data = json.loads(data_json,encoding='utf8')
except :
return None
if data['security'] == hashlib.md5(hashlib.md5(key + IV).hexdigest()).hexdigest():
return data['value']
return None
|
GameXG/shadowsocks_admin
|
mycrypto.py
|
mycrypto.py
|
py
| 2,044 |
python
|
zh
|
code
| 175 |
github-code
|
6
|
[
{
"api_name": "hashlib.md5",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "Crypto.Cipher.AES.new",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "Crypto.Cipher.AES",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "Crypto.Cipher.AES.MODE_CFB",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "binascii.b2a_hex",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "hashlib.md5",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "Crypto.Cipher.AES.new",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "Crypto.Cipher.AES",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "Crypto.Cipher.AES.MODE_CFB",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "binascii.a2b_hex",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "hashlib.md5",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "hashlib.md5",
"line_number": 70,
"usage_type": "call"
}
] |
74130707068
|
import datetime
def get_next_friday(some_date: datetime.datetime, if_friday_same=True):
if if_friday_same: # if a Friday, then same day
return some_date + datetime.timedelta((4 - some_date.weekday()) % 7)
else: # if a Friday, then next one
return some_date + datetime.timedelta((3 - some_date.weekday()) % 7 + 1)
if __name__ == "__main__":
today = datetime.date.today()
print(get_next_friday(today))
another_day = datetime.datetime(2022, 7, 28) # Thursday
print(get_next_friday(another_day))
a_friday = datetime.datetime(2022, 7, 29) # Friday
print(get_next_friday(a_friday))
a_friday = datetime.datetime(2022, 7, 29) # Friday
print(get_next_friday(a_friday, False))
|
alex-muci/small-projects
|
snippets/five_min_test.py
|
five_min_test.py
|
py
| 748 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 21,
"usage_type": "call"
}
] |
44091100190
|
import os
import os.path
import numpy as np
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
_logo_fonts = { "Arial" : ImageFont.truetype(os.path.abspath(os.path.dirname(__file__))+"/arial.ttf", 200),
"ArialBold" : ImageFont.truetype(os.path.abspath(os.path.dirname(__file__))+"/arialbd.ttf", 200),
"Courier" : ImageFont.truetype(os.path.abspath(os.path.dirname(__file__))+"/cour.ttf", 200) }
_lettercache_unsized = {}
_lettercache = {}
def _autocrop(I):
I = np.array(I)
# Cut out all the rows/columns that are all white
I = I[np.where(np.any(I[:,:,:3].min(axis=1)!=255,axis=1))[0],:,:] # Crop vertical
I = I[:,np.where(np.any(I[:,:,:3].min(axis=0)!=255,axis=1))[0],:] # Crop horizontal
# Add white border. Helps avoid edge artifacts when resizing down with anti-aliasing
pad1 = 255*np.ones_like(I[:1,:,:]); pad1[:,:,3] = 0
I = np.vstack([pad1, I, pad1])
pad2 = 255*np.ones_like(I[:,:3,:]); pad2[:,:,3] = 0
I = np.hstack([pad2, I, pad2])
return Image.fromarray(I)
uparrow_chr = u'\u25B2'
def _get_letterimg_unsized(letter, font):
global _lettercache_unsized
global _logo_fonts
colors = { "A" : (0,200,0),
"C" : (0,0,200),
"G" : (235,140,0),
"T" : (200,0,0),
"U" : (200,0,0),
"N" : (128,128,128),
uparrow_chr : (128,128,128) }
assert letter in colors, "Unrecognized letter"
assert font in _logo_fonts, "Unrecognized font"
if (letter,font) not in _lettercache_unsized:
# Draw full-sized versions of this letter
letterimg = 255*np.ones((256,256,4), np.uint8)
letterimg[:,:,3] = 0 # Transparent by default
letterimg = Image.fromarray(letterimg)
draw = ImageDraw.Draw(letterimg)
draw.text((1,1), letter, colors[letter], font=_logo_fonts[font])
letterimg = _autocrop(letterimg)
_lettercache_unsized[(letter,font)] = letterimg
return _lettercache_unsized[(letter,font)]
def get_letterimg(letter, width, height, font="ArialBold"):
global _lettercache
assert width and height
# If we've never been asked for a letter of this width/zheight before,
# then we use Image.resize to generate a new one.
if (letter,width,height,font) not in _lettercache:
letterimg = _get_letterimg_unsized(letter, font)
letterimg = letterimg.resize((width, height), Image.ANTIALIAS)
_lettercache[(letter,width,height,font)] = np.array(letterimg).reshape((height,width,4))
return _lettercache[(letter,width,height,font)]
def tape2logo(tape, height=51, letterwidth=6, bufferzoom=4, refseq=None, vmax=None, style=None, rna=False, transparent=False, complement=False):
# Styles "stack" "grow" "growclip" "growfade" "bars" "bar"
tapedim,tapelen = tape.shape # n = number of filters
if tapedim != 4:
raise NotImplementedError("Expected tape with 4 rows")
if vmax is not None:
tape = np.maximum(-vmax, np.minimum(vmax, tape))
zheight = height*bufferzoom
zletterwidth = letterwidth*bufferzoom
mid1 = (zheight-bufferzoom)//2
mid2 = (zheight-bufferzoom)//2 + bufferzoom
if refseq:
assert len(refseq) == tapelen
refseq_height = int(letterwidth*bufferzoom*1.1)
# Create an up-arrow image
arrowheight = int(refseq_height*0.15)
uparrow_img = get_letterimg(uparrow_chr, zletterwidth//2, arrowheight, font="Arial")
pad1 = 255*np.ones((arrowheight, zletterwidth//4, 4))
pad1[:,:,3] = 0
uparrow_img = np.hstack([pad1, uparrow_img])
pad2 = 255*np.ones((arrowheight, zletterwidth-uparrow_img.shape[1], 4))
pad2[:,:,3] = 0
uparrow_img = np.hstack([uparrow_img, pad2])
mid1 -= refseq_height//2+2*bufferzoom
mid2 = mid1+refseq_height+4*bufferzoom
positive_only = bool(np.all(tape.ravel() >= 0)) or (style in ("grow", "growfade","bar"))
if positive_only:
mid1 = zheight
mid2 = zheight
translate = { "A":"A", "C":"C", "G":"G", "T":"T", "U":"U", "N":"N" }
if complement:
translate = { "A":"T", "C":"G", "G":"C", "T":"A", "U":"A", "N":"N" }
lettertable = ["A","C","G","U" if rna else "T"]
barcolors = { "A" : (128,220,128),
"C" : (128,128,220),
"G" : (245,200,90),
"T" : (220,128,128),
"U" : (220,128,128),
"N" : (192,192,192) }
def make_lettercol(t, colheight, reverse):
# Only show letters with positive coefficient in f
idx = [i for i in range(4) if t[i] > 0]
# Put largest positive value first in "above", and put largest negative value last in "below"
idx = sorted(idx, key=lambda i: t[i])
# Calculate the individual zheight of each letter in pixels
zheights = [int(round(t[i]/sum(t[idx])*colheight)) for i in idx]
idx = [i for i,h in zip(idx,zheights) if h > 0]
zheights = [h for h in zheights if h > 0]
# While the stack of letters is too tall, remove pixel rows from the smallest-zheight entries
#print sum(zheights) - mid1
while sum(zheights) > mid1:
zheights[-1] -= 1
if zheights[-1] == 0:
zheights.pop()
idx.pop()
# Make the individual images, reversing their order if so requested
imgs = [get_letterimg(lettertable[i], zletterwidth, h) for i,h in zip(idx, zheights)]
if reverse:
imgs = [img for img in reversed(imgs)]
return np.vstack(imgs) if imgs else np.empty((0, zletterwidth, 4))
if style == "seqlogo":
assert positive_only
L = 255*np.ones((zheight,tapelen*zletterwidth,4), np.uint8)
L[:,:,3] = 0 # Transparent
for j in range(tapelen):
bits = 2 + np.sum(tape[:,j] * np.log2(tape[:,j]))
letterimg = make_lettercol( tape[:,j], mid1 * bits/2., reverse=True)
L[mid1-letterimg.shape[0]:mid1,j*zletterwidth:(j+1)*zletterwidth,:] = letterimg
# Rescale it down to the original requested size
L = np.array(Image.fromarray(L).resize((tapelen*letterwidth, height), Image.ANTIALIAS))
if not transparent:
L[:,:,3] = 255 # full opacity
return L
pos_tape = np.maximum(1e-16, tape)
neg_tape = np.maximum(1e-16,-tape)
pos_colheights = pos_tape.max(axis=0)
neg_colheights = neg_tape.max(axis=0)
#max_colheight = np.maximum(pos_colheights, neg_colheights).max()
#max_colheight = (pos_colheights + neg_colheights).max()
max_colheight = neg_colheights.max()
#neg_colheights = np.minimum(max_colheight,neg_colheights)
pos_colheights /= max_colheight
neg_colheights /= max_colheight
# If we've been told to scale everything relative to a certain maximum, then adjust our scales accordinly
if vmax:
pos_colheights *= pos_tape.max() / vmax
neg_colheights *= neg_tape.max() / vmax
L = 255*np.ones((zheight,tapelen*zletterwidth,4), np.uint8)
L[:,:,3] = 0 # Start transparent
# For each column of the filter, generate a stack of letters for the logo
for j in range(tapelen):
if style in (None,"stack"):
# Generate the stack of letters that goes above, and below, the dividing ling
aboveimg = make_lettercol( tape[:,j], mid1 * pos_colheights[j], reverse=True)
belowimg = make_lettercol(-tape[:,j], mid1 * neg_colheights[j], reverse=False) if not positive_only else None
# Insert the stacked images into column j of the logo image
L[mid1-aboveimg.shape[0]:mid1,j*zletterwidth:(j+1)*zletterwidth,:] = aboveimg
if not positive_only:
L[mid2:mid2+belowimg.shape[0],j*zletterwidth:(j+1)*zletterwidth,:] = belowimg
if refseq:
letterimg = get_letterimg(refseq[j], zletterwidth, refseq_height, font="ArialBold")
L[mid1+2*bufferzoom:mid2-2*bufferzoom,j*zletterwidth:(j+1)*zletterwidth,:] = letterimg
elif style == "growclip":
# Grow the height of each letter based on binding
zletterheight = int(mid1 * neg_colheights[j])
if zletterheight:
letterimg = get_letterimg(refseq[j] if refseq else "N", zletterwidth, zletterheight, font="ArialBold")
L[mid1-letterimg.shape[0]:mid1,j*zletterwidth:(j+1)*zletterwidth,:] = letterimg
elif style == "refseq":
letterimg = get_letterimg(refseq[j], zletterwidth, refseq_height, font="Arial")
L[mid1-letterimg.shape[0]:mid1,j*zletterwidth:(j+1)*zletterwidth,:] = letterimg
elif style == "growfade" or style == "grow":
# Grow the height of each letter based on binding
arrowpad_top = 3*bufferzoom
arrowpad_btm = 4*bufferzoom
arrowheight_padded = 0#arrowheight+arrowpad_top+arrowpad_btm
growheight = int((mid1-arrowheight_padded-refseq_height) * neg_colheights[j])
fademin = refseq_height
fademax = refseq_height+0.333*(mid1-arrowheight_padded-refseq_height)
zletterheight = refseq_height + growheight
fade = max(0, min(0.85, (fademax-zletterheight)/(fademax-fademin)))
letterimg = get_letterimg(translate[refseq[j]] if refseq else "N", zletterwidth, zletterheight, font="ArialBold")
if style == "growfade":
letterimg = letterimg*(1-fade) + 255*fade
mid0 = mid1-letterimg.shape[0]
L[mid0:mid1,j*zletterwidth:(j+1)*zletterwidth,:] = letterimg[::-1,::] if complement else letterimg
"""
#aboveimg = make_lettercol(tape[:,j], (mid1-bufferzoom*2) * pos_colheights[j], reverse=True)
#intensity = max(0, min(1.0, (pos_colheights[j]-0.4*refseq_height/mid1)/(1.5*refseq_height/mid1)))
#aboveimg = aboveimg*intensity + 255*(1-intensity)
tapej = tape[:,j].copy()
tapej[tapej < 0.10*abs(tape).max()] = 0.0
#if pos_colheights[j] >= 0.15*max(pos_colheights.max(),neg_colheights[j].max()):
if np.any(tapej > 0):
aboveimg = make_lettercol(tapej, (mid1-bufferzoom*3) * pos_colheights[j], reverse=True)
aboveimg = np.minimum(255,aboveimg*0.61 + 255*0.4)
assert mid0-arrowheight-arrowpad_btm >= 0
assert mid0-arrowheight_padded-aboveimg.shape[0] >= 0
L[mid0-arrowheight-arrowpad_btm:mid0-arrowpad_btm,j*zletterwidth:(j+1)*zletterwidth,:] = uparrow_img
L[mid0-arrowheight_padded-aboveimg.shape[0]:mid0-arrowheight_padded,j*zletterwidth:(j+1)*zletterwidth,:] = aboveimg
#grey = aboveimg.mean(axis=2).reshape(aboveimg.shape[:2]+(1,))
#aboveimg[:,:,:] = np.minimum(255,grey.astype(np.float32)*160./grey.min())
#L[mid0-arrowpad_btm-aboveimg.shape[0]:mid0-arrowpad_btm,j*zletterwidth:(j+1)*zletterwidth,:] = aboveimg
"""
elif style == "bar":
assert refseq, "style topbar needs refseq"
# Put the refseq letter, with fixed height
letterimg = get_letterimg(refseq[j], zletterwidth, refseq_height, font="Arial")
L[mid1-letterimg.shape[0]:mid1,j*zletterwidth:(j+1)*zletterwidth,:] = letterimg
# Draw a bar plot along the top based on neg_colheights
barheight = int((mid1-refseq_height-2*bufferzoom) * neg_colheights[j])
L[mid1-letterimg.shape[0]-barheight-2*bufferzoom:mid1-letterimg.shape[0]-2*bufferzoom,j*zletterwidth:(j+1)*zletterwidth,:] = np.array(barcolors[refseq[j]]).reshape((1,1,4))
elif style == "bars":
assert refseq, "style topbar needs refseq"
# Put the refseq letter, with fixed height
letterimg = get_letterimg(refseq[j], zletterwidth, refseq_height, font="Arial")
L[mid1+2*bufferzoom:mid2-2*bufferzoom,j*zletterwidth:(j+1)*zletterwidth,:] = letterimg
# Draw a bar plot along the top based on neg_colheights
aboveheight = int(mid1 * neg_colheights[j])
belowheight = int(mid1 * pos_colheights[j])
L[mid1-aboveheight:mid1,j*zletterwidth:(j+1)*zletterwidth,:] = np.array(barcolors[refseq[j]]).reshape((1,1,4))
L[mid2:mid2+belowheight,j*zletterwidth:(j+1)*zletterwidth,:] = np.array(barcolors[refseq[j]]).reshape((1,1,4))
else:
raise NotImplementedError("Unrecognzied style type")
if style in (None, "stack") and not refseq:
# Put a horizontal line across the middle of this logo
L[mid1:mid1+bufferzoom,:,:] = 100
if not positive_only:
L[mid2-bufferzoom:mid2,:,:] = 100
if not transparent:
L[:,:,3] = 255 # full opacity
# Rescale it down to the original requested size
L = np.array(Image.fromarray(L).resize((tapelen*letterwidth, height), Image.ANTIALIAS))
if complement:
L = L[::-1,:,:] # vertical flip
return L
|
jisraeli/DeepBind
|
code/libs/deepity/deepity/tape2logo.py
|
tape2logo.py
|
py
| 13,154 |
python
|
en
|
code
| 85 |
github-code
|
6
|
[
{
"api_name": "PIL.ImageFont.truetype",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont.truetype",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont.truetype",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.any",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.any",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "numpy.ones",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.minimum",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "numpy.log2",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "numpy.maximum",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 289,
"usage_type": "attribute"
}
] |
5897393360
|
from fastapi import FastAPI, HTTPException
from model import Todo
from fastapi.middleware.cors import CORSMiddleware
from database import(
fetch_all_todos,
fetch_one_todo,
create_todo,
remove_todo,
patch_todo
)
app = FastAPI()
origins = [
"https://localhost:3000",
"http://localhost:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins= origins,
allow_credentials= True,
allow_methods= ["*"],
allow_headers= ["*"]
)
@app.get("/")
async def read_root():
return {"Hello": "Ahosan"}
@app.get("/api/todo")
async def get_todo():
response = await fetch_all_todos()
return response
@app.get("/api/todo/{id}", response_model=Todo)
async def get_todo_by_id(id):
response = await fetch_one_todo(id)
if response:
return response
raise HTTPException(404, f"There is no todo with {id}")
@app.post("/api/todo/", response_model=Todo)
async def post_todo(todo: Todo):
response = await create_todo(todo.dict())
if response:
return response
raise HTTPException(400, "Something went wrong")
@app.patch("/api/todo/update/{id}/", response_model=Todo)
async def update_todo(id: int, todo: Todo):
response = await patch_todo(id, todo)
return response
@app.delete("/api/todo/{id}")
async def delete_todo(id):
response = await remove_todo(id)
if response:
return "Successfully deleted Todo"
raise HTTPException(404, f"There is no todo with the id {id}")
|
MdAhosanHabib/Redis-Kafka-FastAPI-React
|
main.py
|
main.py
|
py
| 1,538 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fastapi.FastAPI",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "fastapi.middleware.cors.CORSMiddleware",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "database.fetch_all_todos",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "database.fetch_one_todo",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "model.Todo",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "model.Todo",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "database.create_todo",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "model.Todo",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "model.Todo",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "database.patch_todo",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "model.Todo",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "database.remove_todo",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 61,
"usage_type": "call"
}
] |
15169928273
|
from fastapi import Depends, Request
from fastapi_utils.cbv import cbv
from fastapi_utils.inferring_router import InferringRouter
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from admins.models import TemplateField, Template
from permissions import manage_helpdesk
from admins.schemas import TemplateFieldSchemaCreate, TemplateFieldSchemaReturn
from crud_handler import BaseHandler
from database import get_async_session
template_fields_router = InferringRouter(tags=["TemplateFields"])
ROUTE = "/api/template_fields"
@cbv(template_fields_router)
class TemplateFieldView(BaseHandler):
session: AsyncSession = Depends(get_async_session)
def __init__(self):
super().__init__(TemplateField)
@template_fields_router.post(f"{ROUTE}/", response_model=TemplateFieldSchemaReturn, status_code=201)
async def create_item(self, template_field_object: TemplateFieldSchemaCreate, request: Request):
template_field_dict = template_field_object.dict()
obj = await self.get_obj(select(Template), self.session, {"id": template_field_dict.get("template").get("id")})
template_field_dict["template"] = obj
return await self.create(self.session, template_field_dict)
@template_fields_router.delete(f"{ROUTE}/" + "{template_field_id}", status_code=204)
async def delete_template_field(self, template_field_id: int, request: Request):
return await self.delete(self.session, template_field_id)
@template_fields_router.put(f"{ROUTE}/" + "{template_field_id}",
response_model=TemplateFieldSchemaReturn,
status_code=200)
async def update_template_field(self, request: Request, template_field_id: int,
template_field: TemplateFieldSchemaReturn):
template_field_dict = template_field.dict()
template_data = template_field_dict.pop("template")
fk_obj = {"template_id": template_data["id"]}
template_field_obj = await self.update(
session=self.session,
id=template_field_id,
data=template_field_dict,
fk_obj=fk_obj,
update_fk=True
)
await self.session.commit()
return template_field_obj
|
AlexeyShakov/helpdesk_fast_api
|
src/admins/endpoints/template_fields.py
|
template_fields.py
|
py
| 2,294 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fastapi_utils.inferring_router.InferringRouter",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "crud_handler.BaseHandler",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.ext.asyncio.AsyncSession",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "database.get_async_session",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "admins.models.TemplateField",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "admins.schemas.TemplateFieldSchemaCreate",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "fastapi.Request",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.select",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "admins.models.Template",
"line_number": 27,
"usage_type": "argument"
},
{
"api_name": "admins.schemas.TemplateFieldSchemaReturn",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "fastapi.Request",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "fastapi.Request",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "admins.schemas.TemplateFieldSchemaReturn",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "admins.schemas.TemplateFieldSchemaReturn",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "fastapi_utils.cbv.cbv",
"line_number": 17,
"usage_type": "call"
}
] |
24354887610
|
import csv
import requests
import xml.etree.ElementTree as ET
def parseXML(xmlfile):
tree = ET.parse(xmlfile)
root = tree.getroot()
orderitems = []
for item in root.findall('./AddOrder'):
orders = {}
for child in item:
orders[child.tag] = child.text.encode('utf8')
orderitems.append(orders)
for item in root.findall('./AddOrder'):
orders = {}
for child in item:
orders[child.tag] = child.text.encode('utf8')
orderitems.append(orders)
return orderitems
def savetoCSV(newsitems, filename):
fields = ['book', 'operation', 'price', 'volume', 'orderId']
with open(filename, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames = fields)
writer.writeheader()
writer.writerows(newsitems)
def main():
orderitems = parseXML('orders.xml')
savetoCSV(orderitems, 'orders.csv')
if __name__ == "__main__":
# calling main function
main()
|
actioncamen13/Orderbook-submission
|
csv_convert.py
|
csv_convert.py
|
py
| 1,032 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "csv.DictWriter",
"line_number": 32,
"usage_type": "call"
}
] |
74849030268
|
"""
@author: Tobias Carryer
"""
import numpy as np
import pandas as pd
from pyts.image import GASF, GADF, MTF
from splitting_data import get_subject_data
from matplotlib import pyplot as plt
def create_gasf_gadf_mtf_compound_images(observations, image_size=128):
"""
Designed to take observations of time series data and create compound images from it to analyze with a CNN.
The research paper that came up with GASF-GADF-MTF images can be read here: https://arxiv.org/pdf/1506.00327.pdf
:param observations: A 2D array. Shape: [n_observations, observation_window_length]
:param image_size: Size of the images to create. Must be equal to or smaller than the length of the
time series data in each observation.
:raises ValueError: If observations is empty.
:return: An array of images ready to be used in a CNN. Shape: [n_observations, image_size, image_size, 3]
The origin of each image is the top-left corner. When plotted, it would be the point (0,0).
"""
if len(observations) == 0:
raise ValueError("Observations cannot be empty.")
gasf_transformer = GASF(image_size)
gadf_transformer = GADF(image_size)
mtf_transformer = MTF(image_size)
gasf = gasf_transformer.fit_transform(observations)
gadf = gadf_transformer.fit_transform(observations)
mtf = mtf_transformer.fit_transform(observations)
return np.stack((gasf, gadf, mtf), axis=3)
if __name__ == "__main__":
subject_num_to_study = 4
feature_to_study = "HR"
# Generate a compound image and display it to the user
all_subject_data = pd.read_csv("confocal_all_patient_phys_data.txt", sep="\t")
subject_data = get_subject_data(all_subject_data, subject_num_to_study)
i = 0
observation = subject_data[feature_to_study].iloc[i:i+128]
while observation.isnull().values.any():
observation = subject_data[feature_to_study].iloc[i:i+128]
i += 1
observation = observation.values
observations = [observation]
images = create_gasf_gadf_mtf_compound_images(observations, image_size=128)
gasf = images[:,:,:,0]
gadf = images[:,:,:,1]
mtf = images[:,:,:,2]
plt.figure(figsize=(8, 8))
plt.subplot(221)
plt.imshow(gasf[0], cmap='rainbow')
plt.title("Gramian Angular Summation Field", fontsize=8)
plt.tick_params(axis='x', colors=(0, 0, 0, 0))
plt.tick_params(axis='y', colors=(0, 0, 0, 0))
plt.subplot(222)
plt.imshow(gadf[0], cmap='rainbow')
plt.title("Gramian Angular Difference Field", fontsize=8)
plt.tick_params(axis='x', colors=(0, 0, 0, 0))
plt.tick_params(axis='y', colors=(0, 0, 0, 0))
plt.subplot(223)
plt.imshow(mtf[0], cmap='rainbow')
plt.title("Markov Transition Field", fontsize=8)
plt.tick_params(axis='x', colors=(0, 0, 0, 0))
plt.tick_params(axis='y', colors=(0, 0, 0, 0))
plt.subplot(224)
plt.plot(observation)
plt.title("Heart Rate", fontsize=8)
plt.suptitle("Fields generated for a window of heart rate data")
plt.show()
|
TobCar/delirium
|
demo_compound_images.py
|
demo_compound_images.py
|
py
| 3,064 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pyts.image.GASF",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pyts.image.GADF",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pyts.image.MTF",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "splitting_data.get_subject_data",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tick_params",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tick_params",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tick_params",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tick_params",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tick_params",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tick_params",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.suptitle",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 86,
"usage_type": "name"
}
] |
23121624875
|
from flask import render_template, flash
from app.giturl_class.url_form import UrlForm
from app.giturl_class import bp
import json
@bp.route('/index', methods = ['GET', 'POST'])
def urlPage():
form = UrlForm()
citation = None
installation = None
invocation = None
description = None
if form.validate_on_submit():
flash("Classifying data")
with open('data/output.json') as json_file:
data = json.load(json_file)
citation = data['citation.sk']
installation = data['installation.sk']
invocation = data['invocation.sk']
description = data['description.sk']
return render_template('giturl_class/giturl.html',
form = form,
citation = citation,
installation = installation,
invocation = invocation,
description = description)
@bp.route('/about', methods = ['GET'])
def aboutPage():
return render_template('aboutpage/aboutpage.html')
@bp.route('/help', methods = ['GET'])
def helpPage():
return render_template('helppage/helppage.html')
|
quiteaniceguy/SM2KG-WebApp
|
app/giturl_class/routes.py
|
routes.py
|
py
| 1,182 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "app.giturl_class.url_form.UrlForm",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.flash",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "app.giturl_class.bp.route",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "app.giturl_class.bp",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "app.giturl_class.bp.route",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "app.giturl_class.bp",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "app.giturl_class.bp.route",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "app.giturl_class.bp",
"line_number": 36,
"usage_type": "name"
}
] |
19990844115
|
from django.conf.urls import url
from . import views, verifycode
urlpatterns = [
url(r'^$', views.index),
# 生成验证码
url(r'^verifycode/$', verifycode.Verifycode),
# 输入验证码试验
url(r'^verifycodeinput/$', views.verifycodeinput),
url(r'^verifycodecheck/$', views.checkcode),
# 反向解析(针对的是超链接,千万别弄错)
# 成功的
# url(r'^$', views.student),
# url(r'^return/(\d+)/$', views.retu, name='return'),
# 一开始不成功,上面成功后就成功
url(r'^student/$', views.student),
url(r'^student/return/(\d+)/$', views.retu, name='return'),
# 模板继承
url(r"^main/$", views.main),
]
|
Evanavevan/Django_Project
|
Project3/myApp/urls.py
|
urls.py
|
py
| 700 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 25,
"usage_type": "call"
}
] |
19065408732
|
from confluent_kafka import Consumer
import redis
import time
from datetime import datetime
################
r = redis.Redis(host='localhost', port=6379)
c=Consumer({'bootstrap.servers':'localhost:9092','group.id':'python-consumer','auto.offset.reset':'earliest'})
print('Available topics to consume: ', c.list_topics().topics)
c.subscribe(['Tragos'])
################
def main():
i = 0
while True:
msg=c.poll(1.0) #timeout
dt = datetime.now()
time_p = datetime.timestamp(dt)
if msg is None:
continue
if msg.error():
print('Error: {}'.format(msg.error()))
continue
data=msg.value().decode('utf-8')
print(data)
r.set(i,data)
i+=1
print("tiempo mensaje: ",time_p);
c.close()
if __name__ == '__main__':
main()
|
Failedvixo/SD-Tarea2
|
kafka_consumer.py
|
kafka_consumer.py
|
py
| 857 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "redis.Redis",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "confluent_kafka.Consumer",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.timestamp",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 22,
"usage_type": "name"
}
] |
14205921043
|
import collections
class Node:
def __init__(self, value):
self.value = value
self.neighbours = []
def make_graph(n_of_nodes, edges):
graph = []
for i in range(n_of_nodes):
node = Node(i+1)
graph.append(node)
for first_node, second_node in edges:
graph[first_node-1].neighbours.append(graph[second_node-1])
graph[second_node-1].neighbours.append(graph[first_node-1])
return graph
def calculate_distances_with_bfs(starting_node, n_of_nodes):
visited = set()
distances = [-1]*n_of_nodes
queue = collections.deque()
queue.append((starting_node, 0))
while queue:
current_node, distance = queue.popleft()
if current_node not in visited:
visited.add(current_node)
distances[current_node.value-1] = distance
for neighbour in current_node.neighbours:
queue.append((neighbour, distance+6))
return distances
def get_distances(n_of_nodes, edges, starting_node_value):
graph = make_graph(n_of_nodes, edges)
starting_node = graph[starting_node_value-1]
distances = calculate_distances_with_bfs(starting_node, n_of_nodes)
distances.remove(0)
return distances
|
jdalbosco/hackerrank
|
preparation-kits/one-week/d6-mock_test.py
|
d6-mock_test.py
|
py
| 1,287 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 25,
"usage_type": "call"
}
] |
13922774332
|
import requests
from authid_agent_client.listeners.request_listener import RequestListener
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8080
API_PATH = "/api/v0.0.1/"
IDS_PATH = API_PATH + "ids/"
PROCESSOR_KEYS_PATH = API_PATH + "processorKeys/"
REQUESTS_PATH = API_PATH + "requests/"
ADDRESSES_PATH = API_PATH + "addresses/"
TRANSFER_PATH = API_PATH + "ids:transfer/"
CHALLENGES_PATH = API_PATH + "challenges/"
SIGN_CHALLENGE_PATH = API_PATH + "challenges:sign"
VERIFY_CHALLENGE_PATH = API_PATH + "challenges:verify"
VERIFY_CERTS_PATH = API_PATH + "certs:verify"
SIGN_CERT_PATH = API_PATH + "certs:sign"
class AuthIDAgentClient:
def __init__(self, host: str = DEFAULT_HOST, port: str = DEFAULT_PORT,
request_callback=None):
self.__host = host
self.__port = port
self.__base_url = "http://" + host + ":" + str(port)
self.__ids_url = self.__base_url + IDS_PATH
self.__requests_url = self.__base_url + REQUESTS_PATH
self.__addresses_url = self.__base_url + ADDRESSES_PATH
self.__transfer_url = self.__base_url + TRANSFER_PATH
self.__processor_keys_path = self.__base_url + PROCESSOR_KEYS_PATH
self.__challenges_path = self.__base_url + CHALLENGES_PATH
self.__sign_challenge_path = self.__base_url + SIGN_CHALLENGE_PATH
self.__verify_challenge_path = self.__base_url + VERIFY_CHALLENGE_PATH
self.__verify_certs_path = self.__base_url + VERIFY_CERTS_PATH
self.__sign_cert_path = self.__base_url + SIGN_CERT_PATH
self.__request_callback = request_callback
def get_authid(self, authid: str):
request_url = self.__ids_url + authid
request = requests.get(request_url)
return request.status_code, request.json()
def register_authid(self, id: str, protocol: str, address: str, fee: str):
request_url = self.__ids_url + id
request = requests.post(request_url, {"protocol": protocol, "address": address, "fee": fee})
if request.status_code == 200:
self.add_request_listener(request.json()["requestID"])
return request.status_code, request.json()
def transfer_authid(self, id: str, protocol: str, address: str):
request = requests.post(self.__transfer_url, {"id": id, "protocol": protocol, "address": address})
if request.status_code == 200:
self.add_request_listener(request.json()["requestID"])
return request.status_code, request.json()
def generate_processor_keys(self, id: str):
request_url = self.__processor_keys_path + id
request = requests.post(request_url)
if request.status_code == 200:
self.add_request_listener(request.json()["requestID"])
return request.status_code, request.json()
def new_address(self, protocol: str):
request_url = self.__addresses_url + "/" + protocol
request = requests.post(request_url)
if request.status_code == 200:
self.add_request_listener(request.json()["requestID"])
return request.status_code, request.json()
"""
The authentication functions
"""
def create_challenge(self, challenger_id: str, receiver_id: str):
request = requests.post(self.__challenges_path,
params={"challengerID": challenger_id, "receiverID": receiver_id},
headers={"Content-Type": "application/json"})
return request.status_code, request.json()
def sign_challenge(self, challenge: dict):
request = requests.post(self.__sign_challenge_path, json=challenge)
if request.status_code == 200:
self.add_request_listener(request.json()["requestID"])
return request.status_code, request.json()
def verify_challenge(self, signed_challenge: dict):
request = requests.post(self.__verify_challenge_path, json=signed_challenge)
return request.status_code, request.json()
def verify_cert(self, cert: dict):
request = requests.post(self.__verify_certs_path, json=cert)
return request.status_code, request.json()
def sign_cert(self, authid: str, cert: dict):
request = requests.post(self.__sign_cert_path, params={"id": authid}, json=cert)
if request.status_code == 200:
self.add_request_listener(request.json()["requestID"])
return request.status_code, request.json()
def add_request_listener(self, request_id: str):
listener = RequestListener(request_id, self.__requests_url, self.__request_callback)
listener.start()
|
OnePair/authid-agent-client-py
|
authid_agent_client/authid_agent_client.py
|
authid_agent_client.py
|
py
| 4,618 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "authid_agent_client.listeners.request_listener.RequestListener",
"line_number": 121,
"usage_type": "call"
}
] |
26624814566
|
#
# SERMEPA / ServiRed payments module for Satchmo
#
# Author: Michal Salaban <michal (at) salaban.info>
# with a great help of Fluendo S.A., Barcelona
#
# Based on "Guia de comercios TPV Virtual SIS" ver. 5.18, 15/11/2008, SERMEPA
# For more information about integration look at http://www.sermepa.es/
#
# TODO: SERMEPA interface provides possibility of recurring payments, which
# could be probably used for SubscriptionProducts. This module doesn't support it.
#
from datetime import datetime
from decimal import Decimal
from django.core import urlresolvers
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound, HttpResponseBadRequest
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from livesettings import config_get_group, config_value
from payment.utils import get_processor_by_key
from payment.views import payship
from satchmo_store.shop.models import Order, Cart
from satchmo_store.shop.satchmo_settings import get_satchmo_setting
from satchmo_utils.dynamic import lookup_url, lookup_template
import logging
try:
from hashlib import sha1
except ImportError:
# python < 2.5
from sha import sha as sha1
log = logging.getLogger()
def pay_ship_info(request):
return payship.base_pay_ship_info(
request,
config_get_group('PAYMENT_SERMEPA'), payship.simple_pay_ship_process_form,
'shop/checkout/sermepa/pay_ship.html'
)
pay_ship_info = never_cache(pay_ship_info)
def _resolve_local_url(payment_module, cfgval, ssl=False):
try:
return lookup_url(payment_module, cfgval.value, include_server=True, ssl=ssl)
except urlresolvers.NoReverseMatch:
return cfgval.value
def confirm_info(request):
payment_module = config_get_group('PAYMENT_SERMEPA')
try:
order = Order.objects.from_request(request)
except Order.DoesNotExist:
url = lookup_url(payment_module, 'satchmo_checkout-step1')
return HttpResponseRedirect(url)
tempCart = Cart.objects.from_request(request)
if tempCart.numItems == 0:
template = lookup_template(payment_module, 'shop/checkout/empty_cart.html')
return render_to_response(template,
context_instance=RequestContext(request))
# Check if the order is still valid
if not order.validate(request):
context = RequestContext(request, {'message': _('Your order is no longer valid.')})
return render_to_response('shop/404.html', context_instance=context)
# Check if we are in test or real mode
live = payment_module.LIVE.value
if live:
post_url = payment_module.POST_URL.value
signature_code = payment_module.MERCHANT_SIGNATURE_CODE.value
terminal = payment_module.MERCHANT_TERMINAL.value
else:
post_url = payment_module.POST_TEST_URL.value
signature_code = payment_module.MERCHANT_TEST_SIGNATURE_CODE.value
terminal = payment_module.MERCHANT_TEST_TERMINAL.value
# SERMEPA system does not accept multiple payment attempts with the same ID, even
# if the previous one has never been finished. The worse is that it does not display
# any message which could be understood by an end user.
#
# If user goes to SERMEPA page and clicks 'back' button (e.g. to correct contact data),
# the next payment attempt will be rejected.
#
# To provide higher probability of ID uniqueness, we add mm:ss timestamp part
# to the order id, separated by 'T' character in the following way:
#
# ID: oooooooTmmss
# c: 123456789012
#
# The Satchmo's Order number is therefore limited to 10 million - 1.
now = datetime.now()
xchg_order_id = "%07dT%02d%02d" % (order.id, now.minute, now.second)
amount = "%d" % (order.balance * 100,) # in cents
signature_data = ''.join(
map(str, (
amount,
xchg_order_id,
payment_module.MERCHANT_FUC.value,
payment_module.MERCHANT_CURRENCY.value,
signature_code,
)
)
)
signature = sha1(signature_data).hexdigest()
template = lookup_template(payment_module, 'shop/checkout/sermepa/confirm.html')
url_callback = _resolve_local_url(payment_module, payment_module.MERCHANT_URL_CALLBACK, ssl=get_satchmo_setting('SSL'))
url_ok = _resolve_local_url(payment_module, payment_module.MERCHANT_URL_OK)
url_ko = _resolve_local_url(payment_module, payment_module.MERCHANT_URL_KO)
ctx = {
'live': live,
'post_url': post_url,
'MERCHANT_CURRENCY': payment_module.MERCHANT_CURRENCY.value,
'MERCHANT_FUC': payment_module.MERCHANT_FUC.value,
'terminal': terminal,
'MERCHANT_TITULAR': payment_module.MERCHANT_TITULAR.value,
'url_callback': url_callback,
'url_ok': url_ok,
'url_ko': url_ko,
'order': order,
'xchg_order_id' : xchg_order_id,
'amount': amount,
'signature': signature,
'default_view_tax': config_value('TAX', 'DEFAULT_VIEW_TAX'),
}
return render_to_response(template, ctx, context_instance=RequestContext(request))
confirm_info = never_cache(confirm_info)
def notify_callback(request):
payment_module = config_get_group('PAYMENT_SERMEPA')
if payment_module.LIVE.value:
log.debug("Live IPN on %s", payment_module.KEY.value)
signature_code = payment_module.MERCHANT_SIGNATURE_CODE.value
terminal = payment_module.MERCHANT_TERMINAL.value
else:
log.debug("Test IPN on %s", payment_module.KEY.value)
signature_code = payment_module.MERCHANT_TEST_SIGNATURE_CODE.value
terminal = payment_module.MERCHANT_TEST_TERMINAL.value
data = request.POST
log.debug("Transaction data: " + repr(data))
try:
sig_data = "%s%s%s%s%s%s" % (
data['Ds_Amount'],
data['Ds_Order'],
data['Ds_MerchantCode'],
data['Ds_Currency'],
data['Ds_Response'],
signature_code
)
sig_calc = sha1(sig_data).hexdigest()
if sig_calc != data['Ds_Signature'].lower():
log.error("Invalid signature. Received '%s', calculated '%s'." % (data['Ds_Signature'], sig_calc))
return HttpResponseBadRequest("Checksum error")
if data['Ds_MerchantCode'] != payment_module.MERCHANT_FUC.value:
log.error("Invalid FUC code: %s" % data['Ds_MerchantCode'])
return HttpResponseNotFound("Unknown FUC code")
if int(data['Ds_Terminal']) != int(terminal):
log.error("Invalid terminal number: %s" % data['Ds_Terminal'])
return HttpResponseNotFound("Unknown terminal number")
# TODO: fields Ds_Currency, Ds_SecurePayment may be worth checking
xchg_order_id = data['Ds_Order']
try:
order_id = xchg_order_id[:xchg_order_id.index('T')]
except ValueError:
log.error("Incompatible order ID: '%s'" % xchg_order_id)
return HttpResponseNotFound("Order not found")
try:
order = Order.objects.get(id=order_id)
except Order.DoesNotExist:
log.error("Received data for nonexistent Order #%s" % order_id)
return HttpResponseNotFound("Order not found")
amount = Decimal(data['Ds_Amount']) / Decimal('100') # is in cents, divide it
if int(data['Ds_Response']) > 100:
log.info("Response code is %s. Payment not accepted." % data['Ds_Response'])
return HttpResponse()
except KeyError:
log.error("Received incomplete SERMEPA transaction data")
return HttpResponseBadRequest("Incomplete data")
# success
order.add_status(status='New', notes=u"Paid through SERMEPA.")
processor = get_processor_by_key('PAYMENT_SERMEPA')
payment = processor.record_payment(
order=order,
amount=amount,
transaction_id=data['Ds_AuthorisationCode'])
# empty customer's carts
for cart in Cart.objects.filter(customer=order.contact):
cart.empty()
return HttpResponse()
|
dokterbob/satchmo
|
satchmo/apps/payment/modules/sermepa/views.py
|
views.py
|
py
| 8,365 |
python
|
en
|
code
| 30 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "payment.views.payship.base_pay_ship_info",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "payment.views.payship",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "livesettings.config_get_group",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "payment.views.payship.simple_pay_ship_process_form",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "payment.views.payship",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "django.views.decorators.cache.never_cache",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "satchmo_utils.dynamic.lookup_url",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.NoReverseMatch",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "django.core.urlresolvers",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "livesettings.config_get_group",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "satchmo_store.shop.models.Order.objects.from_request",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "satchmo_store.shop.models.Order.objects",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "satchmo_store.shop.models.Order",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "satchmo_store.shop.models.Order.DoesNotExist",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "satchmo_store.shop.models.Order",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "satchmo_utils.dynamic.lookup_url",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "satchmo_store.shop.models.Cart.objects.from_request",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "satchmo_store.shop.models.Cart.objects",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "satchmo_store.shop.models.Cart",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "satchmo_utils.dynamic.lookup_template",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "django.template.RequestContext",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "django.template.RequestContext",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "sha.sha",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "satchmo_utils.dynamic.lookup_template",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "satchmo_store.shop.satchmo_settings.get_satchmo_setting",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "livesettings.config_value",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "django.template.RequestContext",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.cache.never_cache",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "livesettings.config_get_group",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "sha.sha",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseBadRequest",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseNotFound",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseNotFound",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseNotFound",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "satchmo_store.shop.models.Order.objects.get",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "satchmo_store.shop.models.Order.objects",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "satchmo_store.shop.models.Order",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "satchmo_store.shop.models.Order.DoesNotExist",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "satchmo_store.shop.models.Order",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseNotFound",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseBadRequest",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "payment.utils.get_processor_by_key",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "payment.utils",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "satchmo_store.shop.models.Cart.objects.filter",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "satchmo_store.shop.models.Cart.objects",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "satchmo_store.shop.models.Cart",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 200,
"usage_type": "call"
}
] |
39122527665
|
import yaml, tempfile, os, libUi
import PinshCmd, ConfigField
from bombardier_core.static_data import OK, FAIL
from SystemStateSingleton import SystemState
system_state = SystemState()
class CreateType(PinshCmd.PinshCmd):
'''A thing that can be created.'''
def __init__(self, name, help_text):
PinshCmd.PinshCmd.__init__(self, name, help_text)
self.config_field = None
self.cmd_owner = 1
def cmd(self, command_line):
'''Create a thing that the user is interested in'''
if command_line.no_flag:
return FAIL, []
if len(command_line) < 3:
return FAIL, ["Incomplete command."]
conf_str = self.config_field.get_default_data()
#conf_str = yaml.dump(current_dict, default_flow_style=False)
file_descriptor, file_name = tempfile.mkstemp(suffix=".yml", text=True)
file_handle = os.fdopen(file_descriptor, 'w+b')
file_handle.write(conf_str)
file_handle.close()
os.system("vim %s" % file_name)
post_data = yaml.load(open(file_name).read())
submit = libUi.ask_yes_no("Commit changes to server", libUi.YES)
if submit:
_status, output = self.config_field.post_specific_data(command_line, 2, post_data)
os.unlink(file_name)
return OK, [output]
else:
msg = "Discarded changes. Edits can be found here: %s" % file_name
return OK, [msg]
class Machine(CreateType):
'Create a default machine configuration data from the server'
def __init__(self):
CreateType.__init__(self, "machine", "machine\tcreate a new machine configuration")
self.config_field = ConfigField.ConfigField(data_type=ConfigField.MACHINE, new=True)
self.children = [self.config_field]
class Include(CreateType):
'Create an include file on the server'
def __init__(self):
CreateType.__init__(self, "include", "include\tcreate a shared include file")
self.config_field = ConfigField.ConfigField(data_type=ConfigField.INCLUDE, new=True)
self.children = [self.config_field]
class User(CreateType):
'Create a package file'
def __init__(self):
CreateType.__init__(self, "user", "user\tcreate a new user to log in to Bombardier")
self.config_field = ConfigField.ConfigField(data_type=ConfigField.USER, new=True)
self.children = [self.config_field]
class Package(CreateType):
'Create a package file'
def __init__(self):
CreateType.__init__(self, "package", "package\tcreate new package metadata")
self.config_field = ConfigField.ConfigField(data_type=ConfigField.PACKAGE, new=True)
self.children = [self.config_field]
class Bom(CreateType):
'create new bill-of-materials ("bom") file from the server'
def __init__(self):
CreateType.__init__(self, "bom", "bom\tcreate a bill of materials")
self.config_field = ConfigField.ConfigField(data_type=ConfigField.BOM, new=True)
self.children = [self.config_field]
class Create(PinshCmd.PinshCmd):
'''Create a file'''
def __init__(self):
PinshCmd.PinshCmd.__init__(self, "create")
self.help_text = "create\tcreate a new system component"
machine = Machine()
include = Include()
package = Package()
user = User()
bom = Bom()
self.children = [machine, include, bom, package, user]
self.cmd_owner = 1
|
psbanka/bombardier
|
cli/lib/Create.py
|
Create.py
|
py
| 3,460 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "SystemStateSingleton.SystemState",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "PinshCmd.PinshCmd",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "PinshCmd.PinshCmd.__init__",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PinshCmd.PinshCmd",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "bombardier_core.static_data.FAIL",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "bombardier_core.static_data.FAIL",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "tempfile.mkstemp",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.fdopen",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "yaml.load",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "libUi.ask_yes_no",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "libUi.YES",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.unlink",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "bombardier_core.static_data.OK",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "bombardier_core.static_data.OK",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "ConfigField.ConfigField",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "ConfigField.MACHINE",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "ConfigField.ConfigField",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "ConfigField.INCLUDE",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "ConfigField.ConfigField",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "ConfigField.USER",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "ConfigField.ConfigField",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "ConfigField.PACKAGE",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "ConfigField.ConfigField",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "ConfigField.BOM",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "PinshCmd.PinshCmd",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "PinshCmd.PinshCmd.__init__",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "PinshCmd.PinshCmd",
"line_number": 76,
"usage_type": "attribute"
}
] |
37108029587
|
"""2 question 6 sprint"""
import json
import logging
logging.basicConfig(filename='app.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')
def parse_user(output_file, *input_files):
def get_name(dct):
if "name" in dct and dct["name"] not in list(map(lambda x: x["name"], users_list)):
users_list.append(dct)
with open(output_file, "w") as res_file:
users_list = []
for file in input_files:
try:
with open(file, 'r') as f:
json.load(f, object_hook=get_name)
except FileNotFoundError:
logging.error(f"File {file} doesn't exist")
json.dump(users_list, res_file, indent=4)
if __name__ == '__main__':
parse_user("user3.json", "user1.json", "user2.json")
|
Misha86/python-online-marathon
|
6_sprint/6_2question.py
|
6_2question.py
|
py
| 803 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 20,
"usage_type": "call"
}
] |
910975330
|
import sys, argparse, os, numpy as np
from horton import __version__, IOData
# All, except underflows, is *not* fine.
np.seterr(divide='raise', over='raise', invalid='raise')
def parse_args():
parser = argparse.ArgumentParser(prog='horton-convert.py',
description='Convert between file formats supported in HORTON. This '
'only works of the input contains sufficient data for the '
'output')
parser.add_argument('-V', '--version', action='version',
version="%%(prog)s (HORTON version %s)" % __version__)
parser.add_argument('input',
help='The input file. Supported file types are: '
'*.h5 (HORTON\'s native format), '
'*.cif (Crystallographic Information File), '
'*.cp2k.out (Output from a CP2K atom computation), '
'*.cube (Gaussian cube file), '
'*.log (Gaussian log file), '
'*.fchk (Gaussian formatted checkpoint file), '
'*.molden.input (Molden wavefunction file), '
'*.mkl (Molekel wavefunction file), '
'*.wfn (Gaussian/GAMESS wavefunction file), '
'CHGCAR, LOCPOT or POSCAR (VASP files), '
'*.xyz (The XYZ format).')
parser.add_argument('output',
help='The output file. Supported file types are: '
'*.h5 (HORTON\'s native format), '
'*.cif (Crystallographic Information File), '
'*.cube (Gaussian cube file), '
'*.molden.input (Molden wavefunction file), '
'POSCAR (VASP files), '
'*.xyz (The XYZ format).')
return parser.parse_args()
def main():
args = parse_args()
mol = IOData.from_file(args.input)
mol.to_file(args.output)
if __name__ == '__main__':
main()
|
theochem/horton
|
scripts/horton-convert.py
|
horton-convert.py
|
py
| 1,810 |
python
|
en
|
code
| 83 |
github-code
|
6
|
[
{
"api_name": "numpy.seterr",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "horton.__version__",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "horton.IOData.from_file",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "horton.IOData",
"line_number": 45,
"usage_type": "name"
}
] |
4582056106
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from scipy import stats
import collections
import time
from sklearn import cluster
from sklearn.metrics import adjusted_rand_score
import scipy as sp
from tqdm import tqdm
from sklearn.manifold import MDS
from run_dist_mat import *
from scipy.cluster.hierarchy import dendrogram, linkage
import itertools
from mpl_toolkits.mplot3d import Axes3D
from multiprocessing import Pool
from itertools import repeat
def get_sex_of_cell(cell_data):
assert cell_data.loc[cell_data.chr == 20].shape[0] > 1, print("data matrix must have sex chromosomes")
if cell_data.loc[cell_data.chr == 21].shape[0] > 1: return 'm' ##check this
else: return 'f'
def make_groups_by_bins(cell_data, bin_size, cum_lens, include_sex_chromosomes = False):
if include_sex_chromosomes == False:
bins, num_bins_per_chr = get_bins(bin_size, cum_lens, num_chrs = 19)
num_bins = np.sum(list(num_bins_per_chr.values()))
cell_data = cell_data.loc[cell_data.chr < 20].copy()
cell_data['abs_pos'] = -1
cell_data['abs_pos'] = cell_data.pos.copy() + [cum_lens[ch-1] for ch in cell_data.chr] #encodes the absolute position of the reads along the linear genome
groups = cell_data.groupby([pd.cut(cell_data.abs_pos, bins),pd.cut(cell_data.pckmeans_cluster, [-0.1,0.9,2])]).mean().reindex(pd.MultiIndex.from_product([bins[1:], [0,1]]), fill_value = np.nan)
assert groups.shape[0] == 2 * num_bins
return groups
elif include_sex_chromosomes == True:
cell_data = cell_data.loc[cell_data.chr < 22].copy()
if get_sex_of_cell(cell_data) == 'f':
print("female cell")
bins, num_bins_per_chr = get_bins(bin_size, cum_lens, num_chrs = 20)
autosome_num_bins = np.sum(list(num_bins_per_chr.values())[0:20]) #sum of all autosome chromosomes
x_num_bins = num_bins_per_chr[20]
cell_data = cell_data.loc[cell_data.chr != 21] #getting rid of the noisy y chromosome reads
assert cell_data.loc[cell_data.chr == 20, 'pckmeans_cluster'].unique().shape[0] == 2, "x chromosome must have 2 clusters"
assert cell_data.loc[cell_data.chr == 21, 'pckmeans_cluster'].unique().shape[0] == 0, "y chromosome must have no clusters"
cell_data['abs_pos'] = -1
cell_data['abs_pos'] = cell_data.pos.copy() + [cum_lens[ch-1] for ch in cell_data.chr] #encodes the absolute position of the reads along the linear genome
groups = cell_data.groupby([pd.cut(cell_data.abs_pos, bins),pd.cut(cell_data.pckmeans_cluster, [-0.1,0.9,2])]).mean().reindex(pd.MultiIndex.from_product([bins[1:], [0,1]]), fill_value = np.nan)
assert groups.shape[0] == 2 * autosome_num_bins + x_num_bins
return groups
else: #male cells
assert cell_data.loc[cell_data.chr == 20, 'pckmeans_cluster'].unique().shape[0] == 1, "x chromosome must have 2 clusters in male embryo"
assert cell_data.loc[cell_data.chr == 21, 'pckmeans_cluster'].unique().shape[0] == 1, "y chromosome must have 2 clusters in male embryo"
cell_data['abs_pos'] = -1
cell_data['abs_pos'] = cell_data.pos.copy() + [cum_lens[ch-1] for ch in cell_data.chr] #encodes the absolute position of the reads along the linear genome
bins, num_bins_per_chr = get_bins(bin_size, cum_lens, num_chrs = 21)
autosome_num_bins = np.sum(list(num_bins_per_chr.values())[0:20]) #sum of all autosome chromosomes
x_num_bins = num_bins_per_chr[20]
y_num_bins = num_bins_per_chr[21]
autosome_bins = bins[0:autosome_num_bins+1]
x_bins = bins[autosome_num_bins: autosome_num_bins+x_num_bins+1]
y_bins = bins[autosome_num_bins+x_num_bins:]
autosome_chrs = cell_data.loc[cell_data.chr <= 19]
x_chr = cell_data.loc[cell_data.chr == 20]
y_chr = cell_data.loc[cell_data.chr == 21]
autosome_chr_groups = autosome_chrs.groupby([pd.cut(autosome_chrs.abs_pos, autosome_bins),pd.cut(autosome_chrs.pckmeans_cluster, [-0.1,0.9,2])]).mean().reindex(pd.MultiIndex.from_product([autosome_bins[1:], [0,1]]), fill_value = np.nan)
x_chr_groups = x_chr.groupby([pd.cut(x_chr.abs_pos, x_bins),pd.cut(x_chr.pckmeans_cluster, [-0.5,0.5])]).mean().reindex( pd.MultiIndex.from_product([x_bins[1:], [0]]), fill_value = np.nan)
y_chr_groups = y_chr.groupby([pd.cut(y_chr.abs_pos, y_bins),pd.cut(y_chr.pckmeans_cluster, [-0.5,0.5])]).mean().reindex(pd.MultiIndex.from_product([y_bins[1:], [0]]), fill_value = np.nan)
groups = pd.concat([autosome_chr_groups,x_chr_groups, y_chr_groups], axis = 0)
assert groups.shape[0] == 2 * autosome_num_bins + x_num_bins + y_num_bins
return groups
else:
raise ValueError
print("please indicate whether sex chromosomes should be included or not")
def get_inter_cell_dist(m0,m1):
n = m0.shape[0]
k = 1 #we don't want to include the diagonal for the 38x38 representations!!!! b/c the 0s on the diagonal artifically raise the correlation value!
ut_ind = np.triu_indices(n, k)
assert ut_ind[0].shape[0] == n*(n-1)/2
m0_unrav = m0[ut_ind] #len is n*(n+1)/2
m1_unrav = m1[ut_ind]
#find indices where both unraveled matrices are not nan
filt = (np.isnan(m0_unrav)+np.isnan(m1_unrav))==0
#reduce the matrices to only indices that are not nan for both
m0_filt = m0_unrav[filt]
m1_filt = m1_unrav[filt]
#if the two matrices share one or no indices that are not nan, return nan. Otherwise, findn the pearson correlation.
if sum(~np.isnan(m0_filt))<=1:
r=np.nan
else:
#get pearson's r
r = sp.stats.pearsonr(m0_filt,m1_filt)[0]
return 1 - r, np.sum(filt) #r is the correlation, len(filt) is the size of the intersection
"""wrapper (utility) function. using this to do data parallelism"""
def align_cell_i(cell_id_i, bin_size, sample_from_bin):
# random_state = 500
num_samples = 50
print("aligning cell {}".format(cell_id_i))
bins, num_bins_per_chr = get_bins(bin_size, cum_lens, num_chrs)
cell_i = data.loc[(data.cell_index==cell_id_i) & (data.chr <= num_chrs)].copy()
#encodes the absolute position of the reads along the linear genome--> used for binning
cell_i['abs_pos'] = -1
cell_i['abs_pos'] = cell_i.pos.copy() + [cum_lens[ch-1] for ch in cell_i.chr]
cell_i_dist_mat, _ = pckmeans_get_dist_mat_binned(cell_i, bins, num_bins_per_chr, sample_from_bin)
cell_i_dists = []
cell_i_intersection_sizes = []
cids_after_i = data.loc[data.cell_index >= cell_id_i, 'cell_index'].unique()
for cell_id_j in cids_after_i:
cell_j = data.loc[(data.cell_index==cell_id_j) & (data.chr <= num_chrs)].copy()
cell_j['abs_pos'] = -1
cell_j['abs_pos'] = cell_j.pos.copy() + [cum_lens[ch-1] for ch in cell_j.chr]
cell_j_dist_mat, _ = pckmeans_get_dist_mat_binned(cell_j, bins, num_bins_per_chr, sample_from_bin)
cell_j_dists = []
cell_j_intersection_sizes = []
for sample in range(num_samples): #in order to align cell j with cell i, we run the sequential algorithm on 50 random sequences
order = np.arange(1,20)
np.random.shuffle(order)
#bit_seq is something like x = [0,1,1,1,0,...] of length 19 where x[i]=0 means that in cell j we don't swap the copies of chromosome i. #bin_seq is something like [23,24,12,11,...] which has the actual sequnce of the aligned bins
(dist, intersection_size), bit_seq, bin_seq, _ = get_aligned_inter_cell_dist(cell_i_dist_mat, cell_j_dist_mat, num_bins_per_chr, chr_seq = order) #np.arange(19,0,-1)
cell_j_dists.append(dist)
cell_j_intersection_sizes.append(intersection_size)
cell_i_dists.append(np.min(cell_j_dists))
cell_i_intersection_sizes.append(cell_j_intersection_sizes[np.argmin(cell_j_dists)])
np.save("data/temp/aligned_dist_{}_bin_size_{}_{}_numchrs_{}_cell{}.npy".format(reads_to_include, int(bin_size/1e6), sample_from_bin, num_chrs, cell_id_i),np.array(cell_i_dists))
np.save("data/temp/aligned_dist_{}_intersection_size_bin_size_{}_{}_numchrs_{}_cell{}.npy".format(reads_to_include, int(bin_size/1e6), sample_from_bin, num_chrs, cell_id_i),np.array(cell_i_intersection_sizes))
return
def read_data(clustering_method, reads_to_inlcude):
if clustering_method == "igs":
data = pd.read_csv('data/embryo_data.csv')
data = data.loc[~data.cell_index.isin([ 80., 84., 105., 113.])] #getting rid of cells with less than 150 reads
if reads_to_inlcude == "inliers":
data = data.loc[data.inlier == 1]
elif clustering_method == "pckmeans":
data = pd.read_csv('data/pckmeans_embryo_data.csv')
data = data.loc[~data.cell_index.isin([ 80., 84., 105., 113.])]
if reads_to_inlcude == "inliers":
data = data.loc[data.outlier == 0]
return data
#the order of chromosomes to consider is 0,1,2,3...
"""
finds the best chromosome alignment sequentially, for now considering the chromosomes in the order chr_seq
num_bins_per_chr: dictionary holding the number or bins for each chromosome (first element is 0:0)
{0: 0,
1: 2,
2: 2,
3: 2,
4: 2,
5: 2,...}
num_chrs: the number of chromosomes to align
assumes the distance matrices to have the following order:
chr 1 cluster 0 bin 1
chr 1 cluster 0 bin 2
chr 1 cluster 1 bin 1
chr 1 cluster 1 bin 2
...
chr 19 cluster 0 bin 1
chr 19 cluster 0 bin 2
chr 19 cluster 1 bin 1
chr 19 cluster 1 bin 2
"""
def get_aligned_inter_cell_dist(cell_i_dist, cell_j_dist, num_bins_per_chr, num_chrs= 19,
chr_seq = None, visualize = False):
if chr_seq is None:
print("default chromosome sequence")
chr_seq = np.arange(1,20)
if visualize: fig, axes = plt.subplots(num_chrs,2, figsize = (7,15))
total_haploid_bins = np.sum([val for key,val in num_bins_per_chr.items()][:num_chrs+1]) #total number of bins for the first num_chrschromosomes
cum_num_bins = np.cumsum([val for key,val in num_bins_per_chr.items()]) #[0,bins_chr1, bins_chr1+chr2,...] HAPLOID number of bins
cell_i_seq = []
cell_j_seq = []
bit_wise_seq = {} # i: 0 --> chromosome i hasn't been switched, 1 means it has been switched
for i in chr_seq:
i = int(i)
if visualize:
sns.heatmap(cell_i_dist_subset, square = True, ax = axes[i,0], vmin = 0, vmax = 22, cbar = False)
sns.heatmap(cell_j_dist_subset, square = True, ax = axes[i,1], vmin = 0, vmax = 22, cbar = False)
cell_i_seq = cell_i_seq + list(np.arange(2*cum_num_bins[i-1], 2*cum_num_bins[i-1] + 2*num_bins_per_chr[i])) #this is the default sequence where we don't touch the order of copies
seq1 = cell_j_seq + list(np.arange(2*cum_num_bins[i-1], 2*cum_num_bins[i-1] + 2*num_bins_per_chr[i]))
seq2 = cell_j_seq + list(np.arange(2*cum_num_bins[i-1] + num_bins_per_chr[i], 2*cum_num_bins[i-1] + 2*num_bins_per_chr[i])) +\
list(np.arange(2*cum_num_bins[i-1] , 2*cum_num_bins[i-1] + num_bins_per_chr[i]))
dist1, inter_size1 = get_inter_cell_dist(cell_i_dist[np.ix_(cell_i_seq, cell_i_seq)], cell_j_dist[np.ix_(seq1, seq1)])
dist2, inter_size2 = get_inter_cell_dist(cell_i_dist[np.ix_(cell_i_seq, cell_i_seq)], cell_j_dist[np.ix_(seq2, seq2)])
# print(seq1, seq2)
if dist1 <= dist2:
bit_wise_seq[i] = 0
cell_j_seq = seq1
elif dist2 < dist1:
bit_wise_seq[i] = 1
cell_j_seq = seq2
else: #dists will be nan when we only have one value in each distance matrix
cell_j_seq = seq1
bit_wise_seq[i] = 0
bit_wise_seq_list = [bit_wise_seq[i] for i in np.arange(1, 20)]
return get_inter_cell_dist(cell_i_dist[np.ix_(cell_i_seq, cell_i_seq)], cell_j_dist[np.ix_(cell_j_seq, cell_j_seq)]), bit_wise_seq_list, cell_j_seq, cell_i_seq ##############EXTEA OUTPUT
def main():
global cum_lens
global num_chrs
global data
global reads_to_include
num_chrs = 19
cum_lens = get_chr_cumulative_lengths()
clustering_method = "pckmeans"
reads_to_include = "inliers"
print("clustering method: ", clustering_method)
print("including {} reads".format(reads_to_include))
data = read_data(clustering_method, reads_to_include) #global variables
data = data.loc[data.stage == "4cell"]
cids_4cell = data.cell_index.unique()
for bin_size in [30e6]:#200e6, 100e6, 50e6,
for sample in ["mean", "first", "last"]:
print("bin size: {}, sample {}, Number of chromosomes: {}".format(int(bin_size/1e6), sample, num_chrs))
with Pool(6) as p:
p.starmap(align_cell_i, zip(cids_4cell, repeat(bin_size), repeat(sample)))
def consistency_analysis():
reads_to_inlcude = "inliers" #"all"
clustering_method = "pckmeans" # "igs"
num_chrs = 19
data = read_data(clustering_method, reads_to_inlcude) #cells with less than 150 reads are deleted: 80., 84., 105., 113.
cum_lens = get_chr_cumulative_lengths()
fig, axes = plt.subplots(4,4, figsize = (20,20))
for i, bin_size in tqdm(enumerate([50e6, 25e6, 10e6, 1e6])):
for j, num_samples in tqdm(enumerate([5, 25, 50, 75])):
print("\n bin size: ", bin_size)
print("\n num samples: ", num_samples)
proportion_matching = []
variances = []
cell_i_index = 91
cell_j_index = 93
# for cell_i_index in tqdm(data.loc[data.stage == '4cell', 'cell_index'].unique()[0:2]):
# cids_after_i = data.loc[data.cell_index >= cell_i_index, 'cell_index'].unique()[1:3]
# for cell_j_index in cids_after_i:
cell_i = data.loc[(data.cell_index==cell_i_index) & (data.chr < 20)].copy()
cell_i['abs_pos'] = -1
cell_i['abs_pos'] = cell_i.pos.copy() + [cum_lens[ch-1] for ch in cell_i.chr] #encodes the absolute position of the reads along the linear genome
cell_j = data.loc[(data.cell_index==cell_j_index) & (data.chr < 20)].copy()
cell_j['abs_pos'] = -1
cell_j['abs_pos'] = cell_j.pos.copy() + [cum_lens[ch-1] for ch in cell_j.chr] #encodes the absolute position of the reads along the linear genome
bins, num_bins_per_chr = get_bins(bin_size, cum_lens, num_chrs)
cell_i_dist,_ = pckmeans_get_dist_mat_binned(cell_i, bins, num_bins_per_chr)
cell_j_dist,_ = pckmeans_get_dist_mat_binned(cell_j, bins, num_bins_per_chr)
# print("intra cell distance matrix shape: ", cell_i_dist.shape)
min_dists = []
num_trials = 100
for trial in range(num_trials):
dists = []
for sample in range(num_samples):
if sample == 0:
order = np.arange(1,20)
elif sample == 1:
order = np.arange(19,0,-1)
else:
order = np.arange(1,20)
np.random.shuffle(order)
d, bit_seq, bin_seq, _ = get_aligned_inter_cell_dist(cell_i_dist, cell_j_dist, num_bins_per_chr, chr_seq = order) #np.arange(19,0,-1)
dists.append(d[0])
min_dists.append(np.round(np.min(dists), 4))
# proportion_matching.append(np.mean(dists < np.min(dists) +0.05))
# variances.append(np.var(dists))
print(min_dists)
axes[j,i].hist(min_dists, bins = 8)
axes[j,i].set_title("bin size {}".format(bin_size/1e6))
axes[j,i].set_ylabel("sample size: {}".format(num_samples))
# axes[1,i].hist(variances, bins = 20)
# axes[1,i].set_xlabel("variances")
plt.suptitle("cell indeces {} and {}".format(cell_i_index, cell_j_index))
plt.savefig("figures/sequential_algorithm_consistency_min_distance_distribution_cells{}_{}.png".format(cell_i_index, cell_j_index))
if __name__ == "__main__":
main()
# consistency_analysis()
|
pdavar/Analysis-of-3D-Mouse-Genome-Organization
|
chromosome_alignment.py
|
chromosome_alignment.py
|
py
| 16,670 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.sum",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pandas.cut",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.MultiIndex.from_product",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.MultiIndex",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pandas.cut",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pandas.MultiIndex.from_product",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pandas.MultiIndex",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pandas.cut",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pandas.MultiIndex.from_product",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pandas.MultiIndex",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "pandas.cut",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pandas.MultiIndex.from_product",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pandas.MultiIndex",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "pandas.cut",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "pandas.MultiIndex.from_product",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "pandas.MultiIndex",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "pandas.concat",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.triu_indices",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "scipy.stats.pearsonr",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "numpy.min",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "numpy.sum",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "seaborn.heatmap",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "seaborn.heatmap",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "numpy.ix_",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "numpy.ix_",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "numpy.ix_",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "itertools.repeat",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 344,
"usage_type": "attribute"
},
{
"api_name": "numpy.round",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.suptitle",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 358,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 359,
"usage_type": "name"
}
] |
19492440920
|
# -*- coding: utf-8 -*-
from flask import Flask,request,abort # 引用flask库
import os
import time
import sys, getopt
from dd.mylog import TNLog
import dd
logger = TNLog()
print(__name__)
app= Flask(__name__)
app.config.update(DEBUG=True)
# 定义路由
@app.route('/')
def hello_world():
out = os.popen('docker info').read()
return out
@app.route('/docker/Info')
def getDockerInfo():
out = os.popen('docker info').read()
return out
@app.route('/docker/deploy',methods=['POST'])
def deploy():
if request.method == 'POST':
image_name = request.form['image_name']
run_name = request.form['run_name']
port = request.form['port']
other = request.form['other']
loginfo = ''
cmd = 'docker pull %s'%(image_name)
os.popen(cmd)
loginfo+=cmd+'\n'
cmd = 'docker stop %s'%(run_name)
loginfo+=cmd+'\n'
print(os.popen(cmd).read())
cmd = 'docker rm %s'%(run_name)
loginfo+=cmd+'\n'
print(os.popen(cmd).read())
cmd = 'docker run -d --restart=always --name=%s -p %s %s %s'%(run_name,port,other,image_name)
print(os.popen(cmd).read())
loginfo+=cmd+'\n'
logger.info(loginfo)
cmd = "docker ps |grep '%s'"%(run_name)
ret = os.popen(cmd).read()
print(ret)
if ret == '':
return '0'
else:
return '1'
@app.route('/docker/login',methods=['POST'])
def dockerLogin():
if request.method == 'POST':
user = request.form['u']
pwd = request.form['p']
host = request.form['host']
outs = os.popen('docker login -u %s -p %s %s' %(user,pwd,host)).readlines()
is_successful = False
for l in outs:
print(l)
if l.startswith('Login Succeeded'):
is_successful=True
return '1'
if not is_successful:
return '0'
def getPort():
usage = '''
usage:
python3 -m dd [-v | -h | -p <port>]
ddService [-v | -h | -p <port>]
'''
port = 8866
argv = sys.argv[1:]
try:
opts, argvs = getopt.getopt(argv,"p:hv",["port=","help","version"])
except getopt.GetoptError:
print("parameter format error")
print(usage)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(usage)
sys.exit()
elif opt in ("-p", "--port"):
port = arg
elif opt == '-v':
print(dd.__version__)
sys.exit()
return port
def main():
app.run(host='0.0.0.0',port=getPort(),threaded=True,debug=False) # 开启调试模式,程序访问端口为8080
if __name__=="__main__":
#main()
if(not os.path.exists('log')):
os.mkdir('log')
#logger.info("info")
app.run(host='0.0.0.0',debug=True,port=getPort(),threaded=True) # 开启调试模式,程序访问端口为8080
#http_server = WSGIServer(('', 5001), app)
#http_server.serve_forever()
|
stosc/dockerDeployer
|
dd/run.py
|
run.py
|
py
| 3,114 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dd.mylog.TNLog",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.popen",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.popen",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "os.popen",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.popen",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.popen",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.popen",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.popen",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "os.popen",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "getopt.getopt",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "getopt.GetoptError",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "dd.__version__",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 115,
"usage_type": "call"
}
] |
72995183868
|
import os
import sys
import time
import struct
import debug
import eosapi
from eosapi import N, push_transactions
from common import prepare, Sync
from tools import cpp2wast
def init_debug(wasm=True):
def init_decorator(func):
def func_wrapper(*args, **kwargs):
if wasm:
_src_dir = os.path.dirname(os.path.abspath(__file__))
cpp2wast.set_src_path(_src_dir)
cpp2wast.build_native('lab.cpp', 'lab', debug=False)
lib_file = os.path.join(_src_dir, 'liblab.dylib')
# debug.set_debug_contract('lab', lib_file)
return func(*args, **kwargs)
return func_wrapper
return init_decorator
def init(wasm=True):
def init_decorator(func):
def func_wrapper(*args, **kwargs):
if wasm:
prepare('lab', 'lab.wast', 'lab.abi', __file__)
return func(*args, **kwargs)
else:
prepare('lab', 'lab.py', 'lab.abi', __file__)
return func(*args, **kwargs)
return func_wrapper
return init_decorator
_dir = os.path.dirname(os.path.abspath(__file__))
sync = Sync(_account = 'lab', _dir = _dir, _ignore = ['lab.py'])
@init(True)
def test(msg='hello,world'):
r = eosapi.push_action('lab', 'sayhello', msg, {'lab':'active'})
assert r
@init()
def deploy():
sync.deploy_all()
@init()
def deploy_mpy():
sync.deploy_all_mpy()
@init()
def test2(count=100):
import time
import json
actions = []
for i in range(count):
action = ['lab', 'sayhello', str(i), {'lab':'active'}]
actions.append(action)
ret, cost = eosapi.push_actions(actions)
assert ret
print(ret['elapsed'])
print(cost)
print('total cost time:%.3f s, cost per action: %.3f ms, actions per second: %.3f'%(cost/1e6, cost/count/1000, 1*1e6/(cost/count)))
def set_contract(account, src_file, abi_file, vmtype=1, sign=True):
'''Set code and abi for the account
Args:
account (str) : account name
src_file (str) : source file path
abi_file (str) : abi file path
vmtype : virtual machine type, 0 for wasm, 1 for micropython, 2 for evm
sign (bool) : True to sign transaction
Returns:
JsonStruct|None:
'''
account = eosapi.N(account)
code = struct.pack('QBB', account, vmtype, 0)
if vmtype == 0:
with open(src_file, 'rb') as f:
wasm = eosapi.wast2wasm(f.read())
code += eosapi.pack_bytes(wasm)
setcode = [N('eosio'), N('setcode'), [[account, N('active')]], code]
return push_transactions([[setcode]], sign, compress = True)
def build_native():
_src_dir = os.path.dirname(os.path.abspath(__file__))
cpp2wast.set_src_path(_src_dir)
cpp2wast.build_native('lab.cpp', 'lab', debug=False)
lib_file = os.path.join(_src_dir, 'liblab.dylib')
debug.set_debug_contract('lab', lib_file)
@init()
#@init_debug()
def test3(count=200):
actions = []
for i in range(count):
action = ['counter', 'count', str(i), {'counter':'active'}]
actions.append([action])
ret, cost = eosapi.push_transactions(actions)
assert ret
print('total cost time:%.3f s, cost per action: %.3f ms, transaction per second: %.3f'%(cost/1e6, cost/count/1000, 1*1e6/(cost/count)))
|
learnforpractice/pyeos
|
programs/pyeos/tests/wasm/lab/t.py
|
t.py
|
py
| 3,368 |
python
|
en
|
code
| 131 |
github-code
|
6
|
[
{
"api_name": "os.path.dirname",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tools.cpp2wast.set_src_path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tools.cpp2wast",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "tools.cpp2wast.build_native",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tools.cpp2wast",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "common.prepare",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "common.prepare",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "common.Sync",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "eosapi.push_action",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "eosapi.push_actions",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "eosapi.N",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "eosapi.wast2wasm",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "eosapi.pack_bytes",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "eosapi.N",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "eosapi.push_transactions",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "tools.cpp2wast.set_src_path",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "tools.cpp2wast",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "tools.cpp2wast.build_native",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "tools.cpp2wast",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "debug.set_debug_contract",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "eosapi.push_transactions",
"line_number": 110,
"usage_type": "call"
}
] |
24998585781
|
import wizard
import pooler
import datetime
import time
from copy import deepcopy
import netsvc
from tools.translate import _
_schedule_form = '''<?xml version="1.0"?>
<form string="Interview Scheduling Of Candidate">
<field name="start_interview"/>
<field name="end_interview"/>
<field name="interval_time"/>
</form>'''
_schedule_fields = {
'start_interview' : {'string' : 'Start Interview Time', 'type' : 'datetime','required':True },
'end_interview' : {'string' : 'End Interview Time', 'type' : 'datetime','required':True },
'interval_time' : {'string' : 'Interval(Approximate Evaluation Time) ', 'type' : 'integer','required':True },
}
form = """<?xml version="1.0"?>
<form string="Use Model">
<separator string="Scheduled Candidate List " colspan="4"/>
<field name="list_all" nolabel="1"/>
<separator string="Some Candidate Still Remaining " colspan="4"/>
<field name="list" nolabel="1"/>
</form>
"""
fields = {
'list' : {'string': "",'type':'text','readonly':True},
'list_all' : {'string': "",'type':'text','readonly':True}
}
class wiz_schedule(wizard.interface):
def _scheduling(self, cr, uid, data, context):
pool = pooler.get_pool(cr.dbname)
hr_int_obj = pool.get("hr.interview")
if time.strptime(str(data['form']['start_interview']),"%Y-%m-%d %H:%M:%S") < time.strptime(data['form']['end_interview'],"%Y-%m-%d %H:%M:%S") and time.strptime(str(data['form']['start_interview']),"%Y-%m-%d %H:%M:%S")[:3] ==time.strptime(str(data['form']['end_interview']),"%Y-%m-%d %H:%M:%S")[:3] :
if datetime.datetime(*time.strptime(str(data['form']['end_interview']),"%Y-%m-%d %H:%M:%S")[:6]) >= datetime.datetime(*time.strptime(str(data['form']['start_interview']),"%Y-%m-%d %H:%M:%S")[:6]) + datetime.timedelta(minutes=int(data['form']['interval_time'])):
cur_time = data['form']['start_interview']
re_id = deepcopy(data['ids'])
list_all="Interview ID \t Name "
for rec in data['ids']:
wf_service = netsvc.LocalService('workflow')
wf_service.trg_validate(uid, 'hr.interview', rec, 'state_scheduled', cr)
record = hr_int_obj.read(cr,uid,rec,['hr_id','name'])
list_all +="\n" + record['hr_id']+"\t\t" + record['name']
id = hr_int_obj.write(cr,uid,rec,{'date':cur_time,'state':'scheduled'})
cur_time = datetime.datetime(*time.strptime(str(cur_time),"%Y-%m-%d %H:%M:%S")[:6]) + datetime.timedelta(minutes=int(data['form']['interval_time']))
re_id.remove(rec)
end_time = datetime.datetime(*time.strptime(str(cur_time),"%Y-%m-%d %H:%M:%S")[:6]) + datetime.timedelta(minutes=int(data['form']['interval_time']))
if len(re_id) > 0 and time.strptime(str(end_time),"%Y-%m-%d %H:%M:%S") > time.strptime(data['form']['end_interview'],"%Y-%m-%d %H:%M:%S") :
remain="Interview ID \t Name "
for record in hr_int_obj.read(cr,uid,re_id,['hr_id','name']):
remain +="\n" + record['hr_id']+"\t\t" + record['name']
data['form']['list']=remain
data['form']['list_all']=list_all
return data['form']
else :
raise wizard.except_wizard(_('UserError'),_('Insert appropriate interval time!!!'))
return {}
else :
raise wizard.except_wizard(_('UserError'),_('The Scheduling is not Appropriate. Enter appropriate date and time '))
return {}
data['form']['list_all']= list_all
data['form']['list']= "None"
return data['form']
states = {
'init': {
'actions': [],
'result': {'type': 'form', 'arch':_schedule_form, 'fields':_schedule_fields, 'state':[('schedule','Schedule','gtk-ok'),('end','Cancel','gtk-cancel')]}
},
'schedule': {
'actions': [_scheduling],
'result': {'type': 'form','arch':form, 'fields':fields, 'state':[('end','Ok')]}
},
}
wiz_schedule('wiz_interview_scheduling')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
factorlibre/openerp-extra-6.1
|
hr_interview/wizard/wiz_schedule.py
|
wiz_schedule.py
|
py
| 4,374 |
python
|
en
|
code
| 9 |
github-code
|
6
|
[
{
"api_name": "wizard.interface",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "pooler.get_pool",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "time.strptime",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "time.strptime",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "netsvc.LocalService",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "time.strptime",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "time.strptime",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "time.strptime",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "wizard.except_wizard",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "tools.translate._",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "wizard.except_wizard",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "tools.translate._",
"line_number": 66,
"usage_type": "call"
}
] |
10696495998
|
# -*- coding: utf-8 -*-
import os
import uuid
import json
import requests
import re
from datetime import datetime
import urllib
import hmac
import base64
from threading import Timer
REQUEST_URL = 'https://alidns.aliyuncs.com/'
LOCAL_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'ip.txt')
ALIYUN_SETTINGS = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'settings.json')
def get_common_params(settings):
"""
获取公共参数
参考文档:https://help.aliyun.com/document_detail/29745.html?spm=5176.doc29776.6.588.sYhLJ0
"""
return {
'Format': 'json',
'Version': '2015-01-09',
'AccessKeyId': settings['access_key'],
'SignatureMethod': 'HMAC-SHA1',
'Timestamp': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'SignatureVersion': '1.0',
'SignatureNonce': uuid.uuid4()
}
def get_signed_params(http_method, params, settings):
"""
参考文档:https://help.aliyun.com/document_detail/29747.html?spm=5176.doc29745.2.1.V2tmbU
"""
# 1、合并参数,不包括Signature
params.update(get_common_params(settings))
# 2、按照参数的字典顺序排序
sorted_params = sorted(params.items())
# 3、encode 参数
query_params = urllib.parse.urlencode(sorted_params)
# 4、构造需要签名的字符串
str_to_sign = http_method + "&" + urllib.parse.quote_plus("/") + "&" + urllib.parse.quote_plus(query_params)
# 5、计算签名
signature = base64.b64encode(hmac.new((settings['access_secret']+'&').encode('utf-8'), str_to_sign.encode('utf-8'),
digestmod='sha1').digest())
# 6、将签名加入参数中
params['Signature'] = signature
return params
def update_yun(ip):
"""
修改云解析
参考文档:
获取解析记录:https://help.aliyun.com/document_detail/29776.html?spm=5176.doc29774.6.618.fkB0qE
修改解析记录:https://help.aliyun.com/document_detail/29774.html?spm=5176.doc29774.6.616.qFehCg
"""
with open(ALIYUN_SETTINGS, 'r') as f:
settings = json.loads(f.read())
# 首先获取解析列表
get_params = get_signed_params('GET', {
'Action': 'DescribeDomainRecords',
'DomainName': settings['domain'],
'TypeKeyWord': 'A'
}, settings)
get_resp = requests.get(REQUEST_URL, get_params)
records = get_resp.json()
print('get_records============')
print(records)
for record in records['DomainRecords']['Record']:
post_params = get_signed_params('POST', {
'Action': 'UpdateDomainRecord',
'RecordId': record['RecordId'],
'RR': record['RR'],
'Type': record['Type'],
'Value': ip
}, settings)
post_resp = requests.post(REQUEST_URL, post_params)
result = post_resp.json()
print('update_record============')
print(result)
def get_curr_ip():
headers = {
'content-type': 'text/html',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'
}
resp = requests.get('http://2018.ip138.com/ic.asp', headers=headers)
ip = re.split(r"[\[\]]", resp.text)[1]
return ip
def get_lastest_local_ip():
"""
获取最近一次保存在本地的ip
"""
print('ip local path', LOCAL_FILE)
with open(LOCAL_FILE, 'r') as f:
last_ip = f.readline()
return last_ip
def task_update():
ip = get_curr_ip()
if not ip:
print('get ip failed')
else:
last_ip = get_lastest_local_ip()
print('curr_ip:', ip, ' last_ip:', last_ip)
if ip != last_ip:
print('save ip to {}...'.format(LOCAL_FILE))
with open(LOCAL_FILE, 'w') as f:
f.write(ip)
print('update remote record...')
update_yun(ip)
Timer(300, task_update).start()
if __name__ == '__main__':
print('启动ddns服务,每5分钟执行一次...')
Timer(0, task_update).start()
|
mikuh/aliyun_ddns
|
ddns.py
|
ddns.py
|
py
| 4,069 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urlencode",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse.quote_plus",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "base64.b64encode",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "hmac.new",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "threading.Timer",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "threading.Timer",
"line_number": 131,
"usage_type": "call"
}
] |
72683668667
|
import sys
from PyQt5.QtWidgets import QApplication, QDialog, QVBoxLayout, QPushButton, QLabel, QHBoxLayout, QLineEdit
button_y_position = 0.1 # Initial Y position for the buttons
class Addsubject(QDialog):
def __init__(self):
super().__init__()
self.setWindowTitle('Custom Pop-up Block')
self.setGeometry(300, 200, 400, 200)
# Create layouts for the dialog
main_layout = QVBoxLayout()
button_layout = QHBoxLayout()
# Create labels and line edits for subject name, teacher name, and number of students
subject_label = QLabel("Subject Name:")
self.subject_edit = QLineEdit(self)
teacher_label = QLabel("Teacher Name:")
self.teacher_edit = QLineEdit(self)
students_label = QLabel("Number of Students:")
self.students_edit = QLineEdit(self)
# Create and add a button to the button layout
button1 = QPushButton("Create")
button1.clicked.connect(self.on_button1_click)
# Add labels, line edits, and the button to the layouts
main_layout.addWidget(subject_label)
main_layout.addWidget(self.subject_edit)
main_layout.addWidget(teacher_label)
main_layout.addWidget(self.teacher_edit)
main_layout.addWidget(students_label)
main_layout.addWidget(self.students_edit)
button_layout.addWidget(button1)
# Set the layouts for the dialog
main_layout.addLayout(button_layout)
self.setLayout(main_layout)
def on_button1_click(self):
subject_name = self.subject_edit.text()
teacher_name = self.teacher_edit.text()
num_students = self.students_edit.text()
print(f"Subject Name: {subject_name}, Teacher Name: {teacher_name}, Students: {num_students}")
self.accept()
def create_class_data(self):
subject_name = self.subject_edit.text()
teacher_name = self.teacher_edit.text()
num_students = self.students_edit.text()
return subject_name , teacher_name , num_students
|
Rush-154/DBMS
|
Login/flexcards.py
|
flexcards.py
|
py
| 2,119 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QHBoxLayout",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QLineEdit",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QPushButton",
"line_number": 27,
"usage_type": "call"
}
] |
36411448777
|
#import logging; logging.basicConfig(level=logging.INFO)
import asyncio, os, json, time, base64
from datetime import datetime
from aiohttp import web
from jinja2 import Environment, FileSystemLoader
from log import Log,create_logger
import config as conf
import common.orm as orm
from common.webmiddlewares import logger_factory, data_factory,response_factory,auth_factory
from common.coroweb import add_routes, add_static
from cryptography import fernet
def init_jinja2(app, **kw):
Log.info('init jinja2...')
options = dict(
autoescape = kw.get('autoescape', True),
block_start_string = kw.get('block_start_string', '{%'),
block_end_string = kw.get('block_end_string', '%}'),
variable_start_string = kw.get('variable_start_string', '{{'),
variable_end_string = kw.get('variable_end_string', '}}'),
auto_reload = kw.get('auto_reload', True)
)
path = kw.get('path', None)
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
Log.info('set jinja2 template path: %s' % path)
env = Environment(loader=FileSystemLoader(path), **options)
filters = kw.get('filters', None)
if filters is not None:
for name, f in filters.items():
env.filters[name] = f
app['__templating__'] = env
def datetime_filter(t):
delta = int(time.time() - t)
if delta < 60:
return u'1分钟前'
if delta < 3600:
return u'%s分钟前' % (delta // 60)
if delta < 86400:
return u'%s小时前' % (delta // 3600)
if delta < 604800:
return u'%s天前' % (delta // 86400)
dt = datetime.fromtimestamp(t)
return u'%s年%s月%s日' % (dt.year, dt.month, dt.day)
def index(request):
return web.Response(body=b'<h1>Hello</h1>',headers={'content-type':'text/html'})
@asyncio.coroutine
async def init(loop):
try:
create_logger()
Log.info("server init...")
db = conf.configs['db'];
Log.info("init configs...")
fernet_key = fernet.Fernet.generate_key()
secret_key = base64.urlsafe_b64decode(fernet_key)
await orm.create_pool(loop=loop, host=db['host'], port=db['port'], user=db['user'], password=db['password'], db= db['database'])
app = web.Application(loop=loop, middlewares=[
logger_factory,
data_factory,
auth_factory,
response_factory,
])
init_jinja2(app, filters=dict(datetime=datetime_filter))
add_routes(app, 'routers')
add_static(app)
url = 'localhost'
port = 8050
srv = await loop.create_server(app.make_handler(),url, port)
Log.info("server started at http://"+url+":"+ str(port))
return srv
except Exception as ex:
print('服务启动失败')
print(ex)
#app = web.Application(loop=loop)
#app.router.add_route('GET', '/', index)
#srv = yield from loop.create_server(app.make_handler(), url, port)
#logging.info("server started at http://"+url+":"+ str(port))
#return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever()
|
jkilili/python_web
|
www/app.py
|
app.py
|
py
| 3,208 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "log.Log.info",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "log.Log",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "log.Log.info",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "log.Log",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "jinja2.Environment",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "aiohttp.web.Response",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "aiohttp.web",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "log.create_logger",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "log.Log.info",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "log.Log",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "config.configs",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "log.Log.info",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "log.Log",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "cryptography.fernet.Fernet.generate_key",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "cryptography.fernet.Fernet",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "cryptography.fernet",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "base64.urlsafe_b64decode",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "common.orm.create_pool",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "common.orm",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "aiohttp.web.Application",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "aiohttp.web",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "common.webmiddlewares.logger_factory",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "common.webmiddlewares.data_factory",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "common.webmiddlewares.auth_factory",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "common.webmiddlewares.response_factory",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "common.coroweb.add_routes",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "common.coroweb.add_static",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "log.Log.info",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "log.Log",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "asyncio.coroutine",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 93,
"usage_type": "call"
}
] |
18527949488
|
import pytest
from selene.support.shared import browser
from selene import be, have
@pytest.fixture(scope='session', autouse=True)
def browser_size():
browser.config.window_width = 1280
browser.config.window_height = 720
def test_search(browser_size):
browser.open('https://google.com')
browser.element('[name="q"]').should(be.blank).type('yashaka/selene').press_enter()
browser.element('[id="search"]').should(have.text('yashaka/selene: User-oriented Web UI browser tests in'))
def test_search_no_result(browser_size):
browser.open('https://google.com')
browser.element('[name="q"]').should(be.blank).type('fdfdfdfdfdh141cnjmmmmmm').press_enter()
browser.element('[id="result-stats"]').should(have.text('Результатов: примерно 0'))
|
ekat-barkova/qa_guru_python_6_2_homework
|
tests/test_google_should_find_selene.py
|
test_google_should_find_selene.py
|
py
| 788 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selene.support.shared.browser.config",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "selene.support.shared.browser",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "selene.support.shared.browser.config",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "selene.support.shared.browser",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "selene.support.shared.browser.open",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "selene.support.shared.browser",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "selene.support.shared.browser.element",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "selene.support.shared.browser",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "selene.be.blank",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "selene.be",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "selene.support.shared.browser.element",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "selene.support.shared.browser",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "selene.have.text",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "selene.have",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "selene.support.shared.browser.open",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "selene.support.shared.browser",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "selene.support.shared.browser.element",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "selene.support.shared.browser",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "selene.be.blank",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "selene.be",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "selene.support.shared.browser.element",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "selene.support.shared.browser",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "selene.have.text",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "selene.have",
"line_number": 21,
"usage_type": "name"
}
] |
42483141861
|
import os
import json
import requests
from flask import Flask, jsonify, request, Response
from faker import Factory
from twilio.access_token import AccessToken, IpMessagingGrant
app = Flask(__name__)
fake = Factory.create()
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/tone', methods=['POST'])
def tone():
try:
response = requests.post(
url="https://gateway.watsonplatform.net/tone-analyzer/api/v3/tone",
params={
"version": "2016-05-19",
},
headers={
"Authorization": "Basic MDQ1MjE5ZDUtYzVjNC00ZTE0LTk0MDItMWY1OWJmOTY5OWE3Olk3S1h1bTBNMWY2bw==",
"Content-Type": "application/json",
},
data=json.dumps({
"text": json.loads(request.data)['text']
})
)
print('Response HTTP Status Code: {status_code}'.format(
status_code=response.status_code))
print('Response HTTP Response Body: {content}'.format(
content=response.content))
return Response(response.content, mimetype='application/json')
except requests.exceptions.RequestException:
print('HTTP Request failed')
# json_data = json.loads(request.data)
@app.route('/token')
def token():
# get credentials for environment variables
account_sid = "ACcdbab0f13e08eb8b19b6d3025a9ad6f7"
api_key = "SK2f52e17a9ca74d4714d28a7c575e1e21"
api_secret = "6XYHaD6O5zPKDpM4wU34NknCQj7L1d6C"
service_sid = "IS27b6d9077d6c48838881fc41b4748bb2"
# create a randomly generated username for the client
identity = request.args.get('identity')
# Create a unique endpoint ID for the
device_id = request.args.get('device')
endpoint = "TwilioChatDemo:{0}:{1}".format(identity, device_id)
# Create access token with credentials
token = AccessToken(account_sid, api_key, api_secret, identity)
# Create an IP Messaging grant and add to token
ipm_grant = IpMessagingGrant(endpoint_id=endpoint, service_sid=service_sid)
token.add_grant(ipm_grant)
# Return token info as JSON
return jsonify(identity=identity, token=token.to_jwt())
if __name__ == '__main__':
#app.run(debug=True)
port = os.getenv('PORT', '5000')
app.run(host="0.0.0.0", port=int(port))
|
AvonGenesis/jwms
|
app.py
|
app.py
|
py
| 2,338 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "faker.Factory.create",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "faker.Factory",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "flask.Response",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "flask.request.args.get",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "twilio.access_token.AccessToken",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "twilio.access_token.IpMessagingGrant",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 71,
"usage_type": "call"
}
] |
11300309585
|
from django.contrib import admin
from django.urls import path
from firstapp import views as v1
urlpatterns = [
path('admin/', admin.site.urls),
path('home/',v1.home),
path('gm/',v1.gm_),
path('ga/',v1.ga_),
path('gn/',v1.gn_),
]
|
Ranjith8796/Demo
|
firstapp/urls.py
|
urls.py
|
py
| 262 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "firstapp.views.home",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "firstapp.views",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "firstapp.views.gm_",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "firstapp.views",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "firstapp.views.ga_",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "firstapp.views",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "firstapp.views.gn_",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "firstapp.views",
"line_number": 11,
"usage_type": "name"
}
] |
5898520470
|
import os
from datetime import datetime, timezone
import tweepy
def scrape_user_tweets(username: str, num_tweets: int = 10) -> list:
"""
Scrapes Twitter user's original tweets (i.e., no retweets or replies) and returns them as a list of dictionaries.
Each dictionary has three fields: "time_posted" (relative or now), "text", and "url".
:param username: Twitter account username
:param num_tweets: number of tweets to scrape
:return: list
"""
auth = tweepy.OAuthHandler(
os.environ["TWITTER_API_KEY"], os.environ["TWITTER_API_SECRET"]
)
auth.set_access_token(
os.environ["TWITTER_ACCESS_TOKEN"], os.environ["TWITTER_ACCESS_SECRET"]
)
twitter_api = tweepy.API(auth)
tweets = twitter_api.user_timeline(screen_name=username, count=num_tweets)
tweet_list = []
for tweet in tweets:
if "RT @" not in tweet.text and not tweet.text.startswith("@"):
tweet_dict = dict()
tweet_dict["time_posted"] = str(
datetime.now(timezone.utc) - tweet.created_at
)
tweet_dict["text"] = tweet.text
tweet_dict[
"url"
] = f"https://twitter.com/{tweet.user.screen_name}/status/{tweet.id}"
tweet_list.append(tweet_dict)
return tweet_list
|
mdalvi/langchain-with-milind
|
third_parties/twitter.py
|
twitter.py
|
py
| 1,318 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "tweepy.OAuthHandler",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "tweepy.API",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "datetime.timezone.utc",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "datetime.timezone",
"line_number": 28,
"usage_type": "name"
}
] |
16062801257
|
import pygame
import init
from gui_config import Config
from widget import Widget
class Label(Widget):
def __init__(self,
text,
x,y,width,height,
config=Config.default_drawing_conf):
Widget.__init__(self,x,y,width,height,config)
self.__set_text(text)
def init(self):
Widget.init(self)
self.__set_text(self.__text)
def __set_text(self,text):
self.__text=text
config=self._config
font=self.fonts.get_font(config.font_name,config.font_size)
(w,h)=font.size(text)
inner=self.inner_rect
if w > inner.width or h > inner.height:
raise Exception(u'Box too small for label "'+text+'"'+\
u'required: '+str((w,h))+u' available: '+\
str((inner.width,inner.height)))
if config.label_align=='left':
left=inner.x
elif config.label_align=='center':
left=inner.x+(inner.width-w)/2.0
else:
left=inner.right-w
self.pos=(left,inner.y+(inner.height-h)/2.0)
self.img=font.render(text,config.font_color,config.bckg_color)
self.needs_update=True
@property
def text(self):
return self.__text
@text.setter
def text(self,t):
if t==self.__text: return
else: self.__set_text(t)
# Draw thyself
# Return updated rectangle if there was an update, None otherwise
def update(self,surface):
surface.blit(self.img,self.pos)
self.needs_update=False
return self.rect
# Label doesn't receive input.
def focus_queue(self):
return []
# Label doesn't receive input.
def focus(self):
return
def unfocus(self):
return
# Handle event, return True if handled
# If some other widget handled it already 'handled' is True
def handle(self, event, handled=False):
return False
if __name__ == "__main__":
l1=Label(u'Label One',10,10,150,42)
l1.configuration.label_align='right'
l1.init()
l2=Label(u'label two',10,70,150,40)
l3=Label(u'label Three',10,130,150,45)
l3.configuration.label_align='center'
l3.init()
l4=Label(u'label Four',10,190,150,50)
from widgets import Widgets
scr = pygame.display.set_mode((300,600))
scr.fill(Config.default_drawing_conf.bckg_color)
widgets=Widgets();
widgets.add((l1,l2,l3,l4))
l4.text='LABEL FOUR'
widgets.run(scr)
|
unusualcomputers/unusualgui
|
code/label.py
|
label.py
|
py
| 2,516 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "widget.Widget",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "gui_config.Config.default_drawing_conf",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "gui_config.Config",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "widget.Widget.__init__",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "widget.Widget",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "widget.Widget.init",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "widget.Widget",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "gui_config.Config.default_drawing_conf",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "gui_config.Config",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "widgets.Widgets",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "widgets.add",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "widgets.run",
"line_number": 88,
"usage_type": "call"
}
] |
71700390588
|
from django.shortcuts import render
from django.shortcuts import render, redirect
from .forms import NewUserForm
from django.contrib.auth import login, authenticate
from django.contrib import messages
from django.contrib.auth.forms import AuthenticationForm
from django.core.mail import send_mail, BadHeaderError
from django.http import HttpResponse
from django.contrib.auth.forms import PasswordResetForm
from django.contrib.auth import get_user_model
User = get_user_model()
from datetime import datetime
import re
from gardenGame.models import buildingOfTheDay
from pathlib import Path
import os
from datetime import date
from django.http import HttpResponse
DIR = Path(__file__).resolve().parent.parent
directory = "static/Users/"
BASE_DIR = os.path.join(DIR, directory)
building_list = ["Harrison Building","Amory Building","The Forum",
"Business School Building One","Cornwall House",
"Northcott Theatre","Geoffrey Pope","Great Hall","Hatherly",
"Henry Wellcome | Biocatalysis",
"Innovation One SWIoT",
"Institute of AIS","INTO Study Centre",
"Laver","Living Systems","Mary Harris","Old Library",
"Peter Chalk Centre","Physics","Queens","Reed Hall","Reed Mews Wellbeing Centre",
"Sir Henry Wellcome Building","Sports Park",
"Streatham Court","Student Health Centre","Washington Singer","Xfi"]
# Create your views here.
def homepage(request):
return render(request, 'homepage.html')
def login_error(request):
return render(request, 'loginError.html')
def login_request(request):
if request.method == "POST":
form = AuthenticationForm(request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, f"You are now logged in as {username}.")
today = date.today()
d1 = today.strftime("%d/%m/%Y")
current_date = d1.split("/")
#read in that users file and store it as array
#increment the login one, update the date
#then write the whole file back
file_contents_array = []
with open(os.path.join(BASE_DIR, username + ".txt")) as my_file:
for line in my_file:
file_contents_array.append(line)
my_file.close()
login_holder = file_contents_array[0]
login_data = login_holder.split(",")
last_logged_date = login_data[2]
last_logged_date = last_logged_date.split("/")
if (last_logged_date[0] < current_date[0]):
login_data[1] = str(int(login_data[1]) + 1)
login_data[2] = d1
elif (last_logged_date[1] < current_date[1]):
login_data[1] = str(int(login_data[1]) + 1)
login_data[2] = d1
elif (last_logged_date[2] < current_date[2]):
login_data[1] = str(int(login_data[1]) + 1)
login_data[2] = d1
login_holder = ','.join(login_data)
file_contents_array[0] = login_holder+"\n"
fileOverwrite = open(os.path.join(BASE_DIR, form.cleaned_data['username'] + ".txt"), "w")
for line in file_contents_array:
fileOverwrite.write(line)
fileOverwrite.close()
return redirect('/main/')
else:
messages.error(request, "Invalid username or password.")
return redirect('/loginError')
else:
messages.error(request, "Invalid username or password.")
return redirect('/loginError')
form = AuthenticationForm()
return render(request=request, template_name="login.html", context={"login_form":form})
def register_request(request):
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
code = form.cleaned_data.get('staffVerification')
user = User(username=form.cleaned_data.get('username'), email =form.cleaned_data.get('email') )
user.set_password(form.cleaned_data.get('password1'))
if code != "":
if code == "54321":
user.is_superuser=True
user.is_staff=True
else:
messages.error(request, "Unsuccessful registration, Invalid Staff Code")
return redirect('/loginError')
user.save()
login(request, user)
messages.success(request, "Registration successful." )
today = date.today()
d1 = today.strftime("%d/%m/%Y")
current_date = d1.split("/")
current_date[0] = str(int(current_date[0])-1)
fileCreate = open(os.path.join(BASE_DIR, form.cleaned_data['username']+".txt"), "x")
fileCreate.write("login_streak,0,"+current_date[0]+"/"+current_date[1]+"/"+current_date[2])
fileCreate.close()
fileAppend = open(os.path.join(BASE_DIR, form.cleaned_data['username']+".txt"), "a")
for build in building_list:
fileAppend.write(build + ",0,00/00/0000\n")
fileCreate.close()
return redirect('/main')
messages.error(request, "Unsuccessful registration. Invalid information.")
return redirect('/loginError')
form = NewUserForm()
return render (request=request, template_name="register.html", context={"register_form":form})
def loginStreak(request, user_id):
user = User.objects.get(id=user_id)
if(user.LastLogin != datatime.today()):
user.LoginStreak = user.LoginStreak+1
user.LastLogin = datetime.now().date()
user.save()
redirect("/main")
def simple_function(request):
listTemp = str(request).split("?")
TempUsername = request.user.username
Temp = listTemp[1]
Temp = Temp.split("%20&%20")[0]
building = re.sub(r'%20', ' ', Temp)
building = re.sub(r"'>", '', building)
building = re.sub(r'You%20have%20checked%20in%20at%20:%20','',building)
##May need to correct string to be int and then put in error handing
userList = User.objects.get(username = TempUsername)
user = userList
if(building == "Harrison Building"):
if(str(user.Harrison_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Harrison_Streak = user.Harrison_Streak + 1
user.Harrison_lastLogin = datetime.now().date()
elif(building == "Amory Building"):
if(str(user.Amory_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Amory_Streak = user.Amory_Streak + 1
user.Amory_lastLogin = datetime.now().date()
elif(building == "The Forum"):
if(str(user.Forum_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Forum_Streak = user.Forum_Streak + 1
user.Forum_lastLogin = datetime.now().date()
elif(building == "Business School Building One"):
if(str(user.Business_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Business_Streak = user.Business_Streak + 1
user.Business_lastLogin = datetime.now().date()
elif(building == "Cornwall House Swimming Pool"):
if(str(user.Cornwall_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Cornwall_Streak = user.Cornwall_Streak + 1
user.Cornwall_lastLogin = datetime.now().date()
elif(building == "Northcott Theatre"):
if(str(user.Northcott_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Northcott_Streak = user.Northcott_Streak + 1
user.Northcott_lastLogin = datetime.now().date()
elif(building == "Geoffrey Pope"):
if(str(user.Geoffrey_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Geoffrey_Streak = user.Geoffrey_Streak + 1
user.Geoffrey_lastLogin = datetime.now().date()
elif(building == "Great Hall"):
if(str(user.GreatHall_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.GreatHall_Streak = user.GreatHall_Streak + 1
user.GreatHall_lastLogin = datetime.now().date()
elif(building == "Hatherly"):
if(str(user.Hatherly_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Hatherly_Streak = user.Hatherly_Streak + 1
user.Hatherly_lastLogin = datetime.now().date()
elif(building == "Henry Welcome Building for Biocatalysis"):
if(str(user.Henry_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Henry_Streak = user.Henry_Streak + 1
user.Henry_lastLogin = datetime.now().date()
elif(building == "Innovation One | South West Institute of Technology"):
if(str(user.Innovation_One_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Innovation_One_Streak = user.Innovation_One_Streak + 1
user.Innovation_One_lastLogin = datetime.now().date()
elif(building == "Institute of Arab and Islamic Studies"):
if(str(user.Iais_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Iais_Streak = user.Iais_Streak + 1
user.Iais_lastLogin = datetime.now().date()
elif(building == "INTO International Study Centre"):
if(str(user.into_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Into_Streak = user.Into_Streak + 1
user.into_lastLogin = datetime.now().date()
elif(building == "Laver"):
if(str(user.Laver_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Laver_Streak = user.Laver_Streak + 1
user.Laver_lastLogin = datetime.now().date()
elif(building == "Library"):
if(str(user.Library_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Library_Streak = user.Library_Streak + 1
user.Library_lastLogin = datetime.now().date()
elif(building == "Living Systems"):
if(str(user.Living_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Living_Streak = user.Living_Streak + 1
user.Living_lastLogin = datetime.now().date()
elif(building == "Mary Harris Memorial Chapel"):
if(str(user.Mary_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Mary_Streak = user.Mary_Streak + 1
user.Mary_lastLogin = datetime.now().date()
elif(building == "Old Library"):
if(str(user.Old_Library_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Old_Library_Streak = user.Old_Library_Streak + 1
user.Old_Library_lastLogin = datetime.now().date()
elif(building == "Peter Chalk Centre"):
if(str(user.Peter_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Peter_Streak = user.Peter_Streak + 1
user.Peter_lastLogin = datetime.now().date()
elif(building == "Physics"):
if(str(user.Physics_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Physics_Streak = user.Physics_Streak + 1
user.Physics_lastLogin = datetime.now().date()
elif(building == "Queens"):
if(str(user.Queen_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Queens_Streak = user.Queens_Streak + 1
user.Queen_lastLogin = datetime.now().date()
elif(building == "Reed Hall"):
if(str(user.Reed_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Reed_Streak = user.Reed_Streak + 1
user.Reed_lastLogin = datetime.now().date()
elif(building == "Reed Mews Wellbeing Centre"):
if(str(user.Wellbeing_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Wellbeing_Streak = user.Wellbeing_Streak + 1
user.Wellbeing_lastLogin = datetime.now().date()
elif(building == "Sir Henry Welcome Building for Mood Disorders Research"):
if(str(user.Mood_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Mood_Streak = user.Mood_Streak + 1
user.Mood_lastLogin = datetime.now().date()
elif(building == "Sports Park"):
if(str(user.Sports_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Sports_Streak = user.Sports_Streak + 1
user.Sports_lastLogin = datetime.now().date()
elif(building == "Streatham Court"):
if(str(user.Streatham_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Streatham_Streak = user.Streatham_Streak + 1
user.Streatham_lastLogin = datetime.now().date()
elif(building == "Student Health Centre"):
if(str(user.Health_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Health_Streak = user.Health_Streak + 1
user.Health_lastLogin = datetime.now().date()
elif(building == "Washington Singer"):
if(str(user.Washington_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Washington_Streak = user.Washington_Streak + 1
user.Washington_lastLogin = datetime.now().date()
elif(building == "Xfi"):
if(str(user.Xfi_lastLogin) != str(datetime.today().strftime('%Y-%m-%d'))):
user.Xfi_Streak = user.Xfi_Streak + 1
user.Xfi_lastLogin = datetime.now().date()
buildingsOTDList = buildingOfTheDay.objects.all()
buildingOTD = None
for i in buildingsOTDList:
if(str(i.date) == datetime.today().strftime('%Y-%m-%d')):
buildingOTD = i
if(buildingOTD.name == building):
reward = buildingOTD.reward
if(user.UserRewards != ""):
user.UserRewards = user.UserRewards + "*" + reward
else:
user.UserRewards = reward
user.save()
return redirect("/main")
def test(request):
if request.GET.get('NameOfYourButton') == 'YourValue':
print('\nuser')
print('\nuser clicked button')
return HttpResponse("""<html><script>window.location.replace('/')</script></html>""")
|
KeanDelly/GrowExplore
|
worldBuilder/Login/views.py
|
views.py
|
py
| 13,994 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "django.shortcuts.render",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.forms.AuthenticationForm",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.info",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.forms.AuthenticationForm",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "forms.NewUserForm",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "forms.NewUserForm",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.today",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 206,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 244,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 259,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 261,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 264,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 269,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 271,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 274,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 299,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 311,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 314,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 316,
"usage_type": "name"
},
{
"api_name": "gardenGame.models.buildingOfTheDay.objects.all",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "gardenGame.models.buildingOfTheDay.objects",
"line_number": 319,
"usage_type": "attribute"
},
{
"api_name": "gardenGame.models.buildingOfTheDay",
"line_number": 319,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 322,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 338,
"usage_type": "call"
}
] |
6922603466
|
import os
# python -m pip install --upgrade pip
# python -m pip install --upgrade Pillow
from PIL import Image
# pip install numpy
import numpy as np
#######################################################################
black=[25,25,25]
blue=[50,75,175]
brown=[100,75,50]
cyan=[75,125,151]
gray=[75,75,75]
green=[100,125,50]
light_blue=[100,151,213]
light_gray=[151,151,151]
lime=[125,201,25]
magenta=[175,75,213]
orange=[213,125,50]
pink=[238,125,162]
purple=[125,62,175]
red=[151,50,50]
white=[251,251,251]
yellow=[225,225,50]
basic_colors = [black, blue, brown, cyan, gray, green, light_blue, light_gray, lime, magenta, orange, pink, purple, red, white, yellow]
#######################################################################
def closest(color):
""" https://stackoverflow.com/a/54244301/1106708 """
colors = np.array(basic_colors)
color = np.array(color)
distances = np.sqrt(np.sum((colors-color)**2,axis=1))
index_of_smallest = np.where(distances==np.amin(distances))
smallest_distance = colors[index_of_smallest]
return smallest_distance[0]
def concrete_art(closest_color):
if (closest_color == black).all():
return "black_concrete"
elif (closest_color == blue).all():
return "blue_concrete"
elif (closest_color == brown).all():
return "brown_concrete"
elif (closest_color == cyan).all():
return "cyan_concrete"
elif (closest_color == gray).all():
return "gray_concrete"
elif (closest_color == green).all():
return "green_concrete"
elif (closest_color == light_blue).all():
return "light_blue_concrete"
elif (closest_color == light_gray).all():
return "light_gray_concrete"
elif (closest_color == lime).all():
return "lime_concrete"
elif (closest_color == magenta).all():
return "magenta_concrete"
elif (closest_color == orange).all():
return "orange_concrete"
elif (closest_color == pink).all():
return "pink_concrete"
elif (closest_color == purple).all():
return "purple_concrete"
elif (closest_color == red).all():
return "red_concrete"
elif (closest_color == white).all():
return "white_concrete"
elif (closest_color == yellow).all():
return "yellow_concrete"
def glass_art(closest_color):
if (closest_color == black).all():
return "black_stained_glass"
elif (closest_color == blue).all():
return "blue_stained_glass"
elif (closest_color == brown).all():
return "brown_stained_glass"
elif (closest_color == cyan).all():
return "cyan_stained_glass "
elif (closest_color == gray).all():
return "gray_stained_glass"
elif (closest_color == green).all():
return "green_stained_glass"
elif (closest_color == light_blue).all():
return "light_blue_stained_glass"
elif (closest_color == light_gray).all():
return "light_gray_stained_glass"
elif (closest_color == lime).all():
return "lime_stained_glass"
elif (closest_color == magenta).all():
return "magenta_stained_glass"
elif (closest_color == orange).all():
return "orange_stained_glass"
elif (closest_color == pink).all():
return "pink_stained_glass"
elif (closest_color == purple).all():
return "purple_stained_glass"
elif (closest_color == red).all():
return "red_stained_glass"
elif (closest_color == white).all():
return "white_stained_glass"
elif (closest_color == yellow).all():
return "yellow_stained_glass"
def create_mcfunction(image_file_name, img_type):
im = Image.open(image_file_name)
pix = im.load()
h = im.size[0]
w = im.size[1]
offset = (h+w)//4
fileName = f"datapacks/img/data/img/functions/items/{img_type}/{image_file_name.split('.')[0]}.mcfunction"
os.remove(fileName)
with open(fileName, 'a') as mcfunction:
for x in range(h):
for y in range(w):
rgb = pix[x, y]
color = [rgb[0], rgb[1], rgb[2]]
closest_color = closest(color)
if img_type == "player": func = f"setblock ~{x-offset} ~ ~{y-offset} "
elif img_type == "sky": func = f"setblock ~{x-offset} 319 ~{y-offset} "
if str(rgb) == "(0, 0, 0, 0)":
func+="air"
else:
if img_type == "player": func+=concrete_art(closest_color)
elif img_type == "sky": func+=glass_art(closest_color)
mcfunction.write(func+'\n')
create_mcfunction("apple.png", "player")
create_mcfunction("apple.png", "sky")
|
kirbycope/map-markers-java
|
img.py
|
img.py
|
py
| 4,689 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.amin",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "os.remove",
"line_number": 117,
"usage_type": "call"
}
] |
71483943548
|
# SPDX-License-Identifier: MIT
# (c) 2023 knuxify and Ear Tag contributors
from gi.repository import GObject, GLib
import threading
import time
class EartagBackgroundTask(GObject.Object):
"""
Convenience class for creating tasks that run in the background
without freezing the UI.
Provides a "progress" property that can be used by target functions
to signify a progress change. This is a float from 0 to 1 and is
passed directly to GtkProgressBar.
Also provides an optional "halt" property; target functions can
check for this property to stop an operation early.
Remember to pass all code that interacts with GTK through
GLib.idle_add().
"""
def __init__(self, target, *args, **kwargs):
super().__init__()
self._progress = 0
self.target = target
self.reset(args, kwargs)
def wait_for_completion(self):
while self.thread.is_alive():
time.sleep(0.25)
def stop(self):
self.halt = True
self.wait_for_completion()
self.halt = False
def run(self):
self.thread.start()
def reset(self, args=[], kwargs=[]):
"""Re-creates the inner thread with new args and kwargs."""
self._is_done = False
self.thread = None
self.halt = False
self.failed = False
if args and kwargs:
self.thread = threading.Thread(
target=self.target, daemon=True,
args=args, kwargs=kwargs
)
elif args:
self.thread = threading.Thread(
target=self.target, daemon=True,
args=args
)
elif kwargs:
self.thread = threading.Thread(
target=self.target, daemon=True,
kwargs=kwargs
)
else:
self.thread = threading.Thread(
target=self.target, daemon=True
)
@GObject.Property(type=float, minimum=0, maximum=1)
def progress(self):
"""
Float from 0 to 1 signifying the current progress of the operation.
When the task is done, this automatically resets to 0.
This value is set by the target function.
"""
return self._progress
@progress.setter
def progress(self, value):
self._progress = value
@GObject.Signal
def task_done(self):
self.reset_progress()
self._is_done = True
@GObject.Property(type=bool, default=False)
def is_running(self):
if not self.thread:
return False
return self.thread.is_alive()
def reset_progress(self):
self.props.progress = 0
def set_progress_threadsafe(self, value):
"""
Wrapper around self.props.progress that updates the progress, wrapped
around GLib.idle_add. This is the preferred way for users to set the
progress variable.
"""
GLib.idle_add(self.set_property, 'progress', value)
def increment_progress(self, value):
"""
Wrapper around self.props.progress that increments the progress, wrapped
around GLib.idle_add. This is the preferred way for users to increment the
progress variable.
"""
self.set_progress_threadsafe(self.props.progress + value)
def emit_task_done(self):
"""
Wrapper around self.emit('task-done') that is wrapped around
GLib.idle_add. This is the preferred way for users to emit the
task-done signal.
"""
GLib.idle_add(self.emit, 'task-done')
|
knuxify/eartag
|
src/utils/bgtask.py
|
bgtask.py
|
py
| 3,589 |
python
|
en
|
code
| 67 |
github-code
|
6
|
[
{
"api_name": "gi.repository.GObject.Object",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.GObject",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "gi.repository.GObject.Property",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "gi.repository.GObject",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "gi.repository.GObject.Signal",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "gi.repository.GObject",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "gi.repository.GObject.Property",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "gi.repository.GObject",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "gi.repository.GLib.idle_add",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "gi.repository.GLib",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "gi.repository.GLib.idle_add",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "gi.repository.GLib",
"line_number": 117,
"usage_type": "name"
}
] |
28108895632
|
import mat73
import matplotlib.pyplot as plt
FORMAT = 'pdf'
plt.rcParams.update({'font.size': 22})
datasets = ["Brain", "MAG-10", "Cooking", "DAWN", "Walmart-Trips", "Trivago"]
budgets = [0, .01, .05, .1, .15, .2, .25]
budget_strings = ['0.0', '0.01', '0.05', '0.1', '0.15', '0.2', '0.25']
n = [638, 80198, 6714, 2109, 88837, 207974]
bicrit_apx = []
bicrit_beta = []
greedy_satisfaction = []
for i in range(len(datasets)):
print(i)
dataset = datasets[i]
bicrit_apx.append([])
bicrit_beta.append([])
greedy_satisfaction.append([])
for j in range(len(budgets)):
data = mat73.loadmat("Output/RECC/"+dataset+"_b"+budget_strings[j]+"_results.mat")
bicrit_apx[i].append(data["ratio"])
bicrit_beta[i].append(data["budget_ratio"])
greedy_satisfaction[i].append(100*data["greedy_satisfaction"])
orange = [x/255.0 for x in [230, 159, 0]]
skyblue = [x/255.0 for x in [86, 180, 233]]
bluegreen = [x/255.0 for x in [0, 158, 115]]
blue = [x/255.0 for x in [0, 114, 178]]
vermillion = [x/255.0 for x in [213, 94, 0]]
redpurple = [x/255.0 for x in [204, 121, 167]]
colors = [orange, skyblue, bluegreen, redpurple, vermillion, blue]
legend_text = ["Brain", "MAG-10", "Cooking", "DAWN", "Walmart", "Trivago"]
markers = ['^', 'v', 'o', 's', '<', '>']
fig, ax = plt.subplots()
x = [budg*100 for budg in budgets]
for i in range(len(datasets)):
ax.plot(x, bicrit_apx[i], label=legend_text[i], color=colors[i], marker=markers[i])
ax.set_xlabel(r"$b$ = Deletion Budget (% of $V$)")
ax.set_ylabel(r"Observed $\alpha$")
ax.legend(fontsize=16)
fig.savefig(f'Plots/r_alphas.{FORMAT}', format=FORMAT, bbox_inches='tight')
fig2, ax2 = plt.subplots()
for i in range(len(datasets)):
ax2.plot(x, bicrit_beta[i], label=legend_text[i], color=colors[i],marker=markers[i])
ax2.set_xlabel(r"$b$ = Deletion Budget (% of $V$)")
ax2.set_ylabel(r"Observed $\beta$")
ax2.legend(fontsize=16)
fig2.savefig(f'Plots/r_betas.{FORMAT}', format=FORMAT, bbox_inches='tight')
fig3, ax3 = plt.subplots()
for i in range(len(datasets)):
ax3.plot(x, greedy_satisfaction[i], label=legend_text[i], color=colors[i],marker=markers[i])
ax3.set_xlabel(r"$b$ = Deletion Budget (% of $V$)")
ax3.set_ylabel(r"Edge Satisfaction (% of $E$)")
ax3.legend(fontsize=16)
fig3.savefig(f'Plots/r_greedy_satisfactions.{FORMAT}', format=FORMAT, bbox_inches='tight')
|
TheoryInPractice/overlapping-ecc
|
Exp1-Algorithm-Evaluation/R_Plots.py
|
R_Plots.py
|
py
| 2,373 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.rcParams.update",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "mat73.loadmat",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 57,
"usage_type": "name"
}
] |
72761928188
|
import nussl
import torch
from torch import nn
from torch.nn.utils import weight_norm
from nussl.ml.networks.modules import (
Embedding, DualPath, DualPathBlock, STFT,
LearnedFilterBank, AmplitudeToDB, RecurrentStack,
MelProjection, BatchNorm, InstanceNorm, ShiftAndScale
)
import numpy as np
from . import utils, argbind
from typing import Dict, List
# ----------------------------------------------------
# --------------------- SEPARATORS -------------------
# ----------------------------------------------------
def dummy_signal():
return nussl.AudioSignal(
audio_data_array=np.random.rand(1, 100),
sample_rate=100
)
@argbind.bind_to_parser()
def deep_mask_estimation(
device : torch.device,
model_path : str = 'checkpoints/best.model.pth',
mask_type : str = 'soft',
):
"""
Creates a DeepMaskEstimation Separation object.
Parameters
----------
device : str
Either 'cuda' (needs GPU) or 'cpu'.
model_path : str, optional
Path to the model, by default 'checkpoints/best.model.pth'
mask_type : str, optional
Type of mask to use, either 'soft' or 'binary', by
default 'soft'.
"""
separator = nussl.separation.deep.DeepMaskEstimation(
dummy_signal(), model_path=model_path, device=device,
mask_type=mask_type
)
return separator
@argbind.bind_to_parser()
def deep_audio_estimation(
device : torch.device,
model_path : str = 'checkpoints/best.model.pth',
):
"""
Creates a DeepMaskEstimation Separation object.
Parameters
----------
device : str
Either 'cuda' (needs GPU) or 'cpu'.
model_path : str, optional
Path to the model, by default 'checkpoints/best.model.pth'
mask_type : str, optional
Type of mask to use, either 'soft' or 'binary', by
default 'soft'.
"""
separator = nussl.separation.deep.DeepAudioEstimation(
dummy_signal(), model_path=model_path, device=device,
)
return separator
# ----------------------------------------------------
# --------------- MASK ESTIMATION MODELS -------------
# ----------------------------------------------------
class MaskInference(nn.Module):
def __init__(self, num_features, num_audio_channels, hidden_size,
num_layers, bidirectional, dropout, num_sources,
activation='sigmoid'):
super().__init__()
self.amplitude_to_db = AmplitudeToDB()
self.input_normalization = BatchNorm(num_features)
self.recurrent_stack = RecurrentStack(
num_features * num_audio_channels, hidden_size,
num_layers, bool(bidirectional), dropout
)
hidden_size = hidden_size * (int(bidirectional) + 1)
self.embedding = Embedding(num_features, hidden_size,
num_sources, activation,
num_audio_channels)
def forward(self, data):
mix_magnitude = data # save for masking
data = self.amplitude_to_db(mix_magnitude)
data = self.input_normalization(data)
data = self.recurrent_stack(data)
mask = self.embedding(data)
estimates = mix_magnitude.unsqueeze(-1) * mask
output = {
'mask': mask,
'estimates': estimates
}
return output
# Added function
@staticmethod
@argbind.bind_to_parser()
def build(num_features, num_audio_channels, hidden_size,
num_layers, bidirectional, dropout, num_sources,
activation='sigmoid'):
# Step 1. Register our model with nussl
nussl.ml.register_module(MaskInference)
# Step 2a: Define the building blocks.
modules = {
'model': {
'class': 'MaskInference',
'args': {
'num_features': num_features,
'num_audio_channels': num_audio_channels,
'hidden_size': hidden_size,
'num_layers': num_layers,
'bidirectional': bidirectional,
'dropout': dropout,
'num_sources': num_sources,
'activation': activation
}
}
}
# Step 2b: Define the connections between input and output.
# Here, the mix_magnitude key is the only input to the model.
connections = [
['model', ['mix_magnitude']]
]
# Step 2c. The model outputs a dictionary, which SeparationModel will
# change the keys to model:mask, model:estimates. The lines below
# alias model:mask to just mask, and model:estimates to estimates.
# This will be important later when we actually deploy our model.
for key in ['mask', 'estimates']:
modules[key] = {'class': 'Alias'}
connections.append([key, [f'model:{key}']])
# Step 2d. There are two outputs from our SeparationModel: estimates and mask.
# Then put it all together.
output = ['estimates', 'mask',]
config = {
'name': 'MaskInference',
'modules': modules,
'connections': connections,
'output': output
}
# Step 3. Instantiate the model as a SeparationModel.
return nussl.ml.SeparationModel(config)
# ----------------------------------------------------
# --------------- AUDIO ESTIMATION MODELS ------------
# ----------------------------------------------------
class BaseAudioModel(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
@classmethod
def config(cls, **kwargs):
nussl.ml.register_module(cls)
_config = {
'modules': {
'audio': {
'class': cls.__name__,
'args': kwargs
}
},
'connections': [
['audio', ['mix_audio']]
],
'output': ['audio']
}
return _config
# ----------------------------------------------------
# ------------- REGISTER MODELS WITH NUSSL -----------
# ----------------------------------------------------
nussl.ml.register_module(MaskInference)
|
bfredl/tutorial
|
common/models.py
|
models.py
|
py
| 6,359 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "nussl.AudioSignal",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "nussl.separation.deep.DeepMaskEstimation",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "nussl.separation",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "nussl.separation.deep.DeepAudioEstimation",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "nussl.separation",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "nussl.ml.networks.modules.AmplitudeToDB",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "nussl.ml.networks.modules.BatchNorm",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "nussl.ml.networks.modules.RecurrentStack",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "nussl.ml.networks.modules.Embedding",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "nussl.ml.register_module",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "nussl.ml",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "nussl.ml.SeparationModel",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "nussl.ml",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "nussl.ml.register_module",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "nussl.ml",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "nussl.ml.register_module",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "nussl.ml",
"line_number": 189,
"usage_type": "attribute"
}
] |
23060222816
|
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
compdata=np.loadtxt('compdata.txt')
agelabel=compdata[:,0]
compdata=compdata[:,1]
obsdata=np.loadtxt('obsdata.txt')
error=obsdata[:,3]
obsdata=obsdata[:,1]
#plt.plot(agelabel, compdata, 'r',label='Computed data')
plt.errorbar(agelabel, obsdata, yerr=error,fmt='',label='Observational data')
#legend = plt.legend(loc='upper left', shadow=True)
plt.ylim(ymin=0.1)
plt.xlabel('$\mathrm{t}/\mathrm{t}_{ms}$')
plt.ylabel('Probability Density')
plt.show()
|
AndreasMu/Bachelor
|
Graphs and Tex/obsdataplot.py
|
obsdataplot.py
|
py
| 554 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.loadtxt",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.errorbar",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
}
] |
8774738477
|
import gym
import numpy as np
from cartpole_deep_net import CartpoleDeepNet
class CartPole:
def __init__(self, render_mode='None'):
self.env_title = 'CartPole-v1'
self.env=gym.make(self.env_title, render_mode=render_mode)
self.single_state_shape_len = 1
self.desired_mean_score = 130
self.observation_space_shape = self.env.observation_space.shape
self.no_of_actions = self.env.action_space.n
self.hidden_layers = [128]
self.model = CartpoleDeepNet(self.observation_space_shape[0], self.hidden_layers, self.no_of_actions)
self.model_file_name = 'cartpole_model.pth'
self.reset()
def reset(self):
self.score = 0
self.state = self.env.reset()
def get_state(self):
if len(self.state) != self.observation_space_shape[0]:
return self.state[0]
return self.state
def play_step(self, action):
move = action.index(1)
(state, reward, terminated, truncated , _) = self.env.step(move)
done = truncated or terminated
self.state = state
if not done:
self.score += 1
return reward, done, self.score
|
asharali001/Deep-Q-Learning
|
cartpole.py
|
cartpole.py
|
py
| 1,231 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "gym.make",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cartpole_deep_net.CartpoleDeepNet",
"line_number": 16,
"usage_type": "call"
}
] |
70939657148
|
from selenium import webdriver
import requests
from bs4 import BeautifulSoup
from pymongo import MongoClient
import time
import datetime
def get_page(url):
header = {
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"
}
html = requests.get(url,headers=header)
html.encoding = 'utf-8'
return html
def parse_page(html,addr):
#获取网页里面目标信息,以字典的方式储存
dict = {}
doc = BeautifulSoup(html,'lxml')
title = doc.select('h1')
if len(title)==0:
return
articles = doc.select('#artibody p')#得到的是一个列表
content = ''
date = time.strftime('%Y.%m.%d',time.localtime(time.time()))
for article in articles:
content += article.get_text()
dict['date'] = date
dict['title'] = title[0].get_text().strip()
dict['content'] = content
dict['url'] = addr.get_attribute('href')
write_in_database(dict)
def write_in_database(dict):
#当文章未存入时存入
client = MongoClient('mongodb://localhost:27017/')
database = client.xinlang
collection = database.articles
dict['Id'] = collection.find().count()
print(dict)
if collection.find_one({'title':dict['title']}) == None:
collection.insert(dict)
def main():
url = 'https://mobile.sina.com.cn/'
browser = webdriver.Chrome()
browser.get(url)
addrs = browser.find_elements_by_css_selector('#feedCard #feedCardContent .feed-card-item h2 a')
#获取每篇文章的url
for addr in addrs:
html=get_page(addr.get_attribute('href')).text
parse_page(html,addr)
if __name__ == '__main__':
while(True):#定时在9点和21点时运行
now = datetime.datetime.now()
if (now.hour == 9 or now.hour == 21) and now.minute == 0 :
main()
time.sleep(60)
##ok
|
lzzandsx/lizhengzhao_python_homework
|
xinlang.py
|
xinlang.py
|
py
| 1,935 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 62,
"usage_type": "call"
}
] |
74197622589
|
from rest_framework import serializers
from des.models import Exposure, SkybotJob, SkybotJobResult
class SkybotJobResultSerializer(serializers.ModelSerializer):
# job = serializers.PrimaryKeyRelatedField(
# queryset=SkybotJob.objects.all(), many=False
# )
# exposure = serializers.PrimaryKeyRelatedField(
# queryset=Exposure.objects.all(), many=False
# )
job = serializers.PrimaryKeyRelatedField(
read_only=True
)
exposure = serializers.PrimaryKeyRelatedField(
read_only=True
)
ticket = serializers.SerializerMethodField()
band = serializers.SerializerMethodField()
date_obs = serializers.SerializerMethodField()
ccds = serializers.SerializerMethodField()
class Meta:
model = SkybotJobResult
fields = (
"id",
"job",
"exposure",
"ticket",
"success",
"error",
"execution_time",
"ccds",
"ccds_with_asteroids",
"positions",
"inside_ccd",
"outside_ccd",
"filename",
"band",
"date_obs",
)
def get_ticket(self, obj):
try:
return str(obj.ticket)
except:
return None
def get_band(self, obj):
try:
return obj.exposure.band
except:
return None
def get_date_obs(self, obj):
try:
return obj.exposure.date_obs
except:
return None
def get_ccds(self, obj):
try:
return obj.exposure.ccd_set.count()
except:
return 0
|
linea-it/tno
|
backend/des/serializers/skybot_job_result.py
|
skybot_job_result.py
|
py
| 1,678 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.PrimaryKeyRelatedField",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.PrimaryKeyRelatedField",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.SerializerMethodField",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.SerializerMethodField",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.SerializerMethodField",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.SerializerMethodField",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "des.models.SkybotJobResult",
"line_number": 33,
"usage_type": "name"
}
] |
22134160802
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name = 'home'),
path('about', views.about, name = 'about'),
path('create', views.create, name = 'create'),
path('review', views.review, name = 'review'),
path('test1', views.test1, name = 'test1'),
path('<int:pk>', views.TaskDetailView.as_view(), name='detail_task'),
path('<int:pk>/update_task', views.TaskUpdateView.as_view(), name='update_task'),
path('<int:pk>/delete_task', views.TaskDeleteView.as_view(), name='delete_task'),
]
|
Voron4ikhin/Web_lab
|
taskmanager/main/urls.py
|
urls.py
|
py
| 553 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
}
] |
22963907434
|
import urllib3
# 忽略警告:InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised.
urllib3.disable_warnings()
import json
import os
# http://www.meituan.com/meishi/api/poi/getMerchantComment?id=42913246&offset=0&pageSize=50&sortType=1
COMMENT_URL = 'http://www.meituan.com/meishi/api/poi/getMerchantComment?id={}&offset={}&pageSize=50&sortType=1'
# DIVIDER = '\n' + '=' * 200 + '\n'
DIVIDER = '\n\n\n'
def repeat(func, retry=10):
success = False
response = None
try:
response = func()
success = response.code == 200
except:
pass
if not success and retry > 0:
repeat(func, retry - 1)
return response
def get_comment(id, offset=0, result=None, save_dir=None, sub_dir=False):
# result = [] if result is None else result
url = COMMENT_URL.format(id, offset)
response = urllib3.PoolManager().request('GET', url, headers={
'Host': 'www.meituan.com',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Mobile Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
# 'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cookie': 'uuid=d9f34299c1aa4700b57b.1533529950.1.0.0; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; _lxsdk_cuid=1650d8210c2c8-01cc48fa57f124-336b7b05-13c680-1650d8210c3c8; __mta=147110536.1533529952707.1533529952707.1533529952707.1; ci=50; client-id=4fb967a2-deb0-4c90-8408-2def9dc61c9a; oc=-3SKHsosP2d57O95hCYTmTexMhVqo4FIr5jQcztD5J5u_GXn3LjVWWou3uvfqHm4cPOGIMmgH3hNpYXXqbtqA66xGDYgxq8SWnCYIRpidQP13Wxum7XxTcrTNbJC_8r_5xlRsKULCrAWTz-CPQfr6HgZM1gLCuOpCxBnDwi_9JQ; lat=30.207471; lng=120.208933',
})
# response = urllib2.urlopen(request)
# response = repeat(lambda: urllib.urlopen(request))
if not response:
return result
comments = None
try:
text = response.data.decode()
json_data = json.loads(text, encoding='utf-8')
data = json_data['data']
comments = data['comments']
except:
return
if comments and result:
result.extend(comments)
n_comments = len(comments) if comments else 0
if sub_dir and save_dir:
filename = 'data.txt'
final_save_dir = os.path.join(save_dir, str(id), '%05d_%02d' % (offset // 50, n_comments))
else:
final_save_dir = './'
filename = 'comment_%d.txt' % id
filepath = os.path.join(final_save_dir, filename)
save_2_json(comments, filepath)
total = data['total']
offset += n_comments
if offset < total - 1:
get_comment(id, offset, result, save_dir, sub_dir)
return result
def save_2_json(comments, save_dir):
if not comments:
return
dirname = os.path.abspath(os.path.dirname(save_dir))
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(save_dir, 'a', encoding='utf-8') as f:
for c in comments:
# get = lambda name: (c[name].encode('utf-8')) if c[name] else ''
get = lambda name: c[name] if c[name] else ''
menu = get('menu')
text = get('comment')
if not text:
continue
star = c['star']
item = 'menu = %s\nstar = %s\ntext = %s' % (menu, star / 5, text)
f.write(item + DIVIDER)
print(text)
def dump_comment_data(id, save_dir=None, sub_dir=False):
save_dir = save_dir or get_root_path('data/comment')
get_comment(id, save_dir=save_dir, sub_dir=sub_dir)
def get_root_path(relative=None):
relative = relative or ''
root_dir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(root_dir, relative)
if __name__ == '__main__':
# dump_comment_data(42913246, save_dir='data/comment')
dump_comment_data(42913246)
# print(get_root_path())
# print(get_root_path('data'))
|
puke3615/comment_spider
|
comment_spider/util.py
|
util.py
|
py
| 4,172 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "urllib3.disable_warnings",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "urllib3.PoolManager",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 104,
"usage_type": "attribute"
}
] |
31534048424
|
import telebot;
import os
import subprocess
import re
#получение токена при запуске и возможно его сохранение
file = open("Token", "r")
token = file.read()
file.close()
bot = telebot.TeleBot(token)
os.system("echo запущено")
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
if message.text == "/start":
bot.send_message(message.from_user.id, "bashbot для Телеграмм список основных команд /help")
elif message.text == "/help":
bot.send_message(message.from_user.id, "здесь пока что пусто") ##
else:
comm_out=(b'',)
lst = message.text.split(' ')
print(lst[0])
if (lst[0] == 'cd'):
del lst[0]
if not lst:
lst = ['/root']
path="".join(lst)
try:
os.chdir(path)
except FileNotFoundError:
bot.send_message(message.from_user.id, "No such file or directory")
else:
comm = subprocess.Popen('pwd', stdout=subprocess.PIPE)
comm_out = comm.communicate()
elif(lst[0] == 'echo'):
comm = subprocess.Popen(lst, stdout=subprocess.PIPE)
comm_out = comm.communicate()
print(comm_out)##
elif(lst[0] == 'export'):
c = "".join(lst)
print(message.text)
os.system(message.text)
bot.send_message(message.from_user.id, "не готово")##
else:
try:
comm = subprocess.Popen(message.text.split(' '), stdout=subprocess.PIPE)
comm_out = comm.communicate()
print(comm_out)
except FileNotFoundError:
bot.send_message(message.from_user.id, "No such file or directory")
if (comm_out[0] != b''):
bot.send_message(message.from_user.id, comm_out)
bot.polling(none_stop=True, interval=0)
#сделать логи по ID
#добавить clear
|
romazanovma/probable-octo-potato-telegram-bot
|
bot.py
|
bot.py
|
py
| 1,759 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "telebot.TeleBot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 44,
"usage_type": "attribute"
}
] |
71200356027
|
from moviepy.editor import *
from moviepy.video.tools.subtitles import SubtitlesClip
import moviepy.video.fx as vfx
def create_srt(line):
subs_obj = open(r"D:\Final Renders\your next line.srt", "r")
orig_subs = subs_obj.read().split("\n")
print(orig_subs)
orig_subs[6] = f"\"{line}\""
orig_subs[10] = line
new_srt = open(r"D:\Final Renders\result.srt", "w")
for x in orig_subs:
new_srt.write(x + "\n")
new_srt.close()
subs_obj.close()
def composite_gif(line):
video = VideoFileClip(r"D:\Final Renders\tsuginiomaewa.mp4")
video = vfx.resize.resize(video,0.5)
generator = lambda txt: TextClip(txt, font='Arial',fontsize=16, color='white')
create_srt(line)
sub = SubtitlesClip(r"D:\Final Renders\result.srt", generator)
result = CompositeVideoClip([video, sub.set_position(('center','bottom'))])
result.write_gif(r"D:\Final Renders\result.gif",fps=10,program="ffmpeg", fuzz=100,colors=2)
result.close()
video.close()
|
alwynwan/Messenger-Bot
|
srtgen.py
|
srtgen.py
|
py
| 1,002 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "moviepy.video.fx.resize.resize",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "moviepy.video.fx.resize",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "moviepy.video.fx",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "moviepy.video.tools.subtitles.SubtitlesClip",
"line_number": 28,
"usage_type": "call"
}
] |
33645109204
|
from __future__ import print_function
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from datetime import datetime, timedelta
calendar_id = os.environ.get('calendar_id')
SCOPES = ['https://www.googleapis.com/auth/calendar']
def connect():
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists("token.json"):
creds = Credentials.from_authorized_user_file("token.json", SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=8080)
# Save the credentials for the next run
with open("token.json", 'w') as token:
token.write(creds.to_json())
return build('calendar', 'v3', credentials=creds)
def create_event(service, event):
try:
eventAdded = service.events().insert(calendarId=calendar_id, body=generate_json(event)).execute()
print(f'added {event.name} {event.date} {event.eventId}')
except Exception as e:
save_error("creating", event.name, e)
def delete_event(service, event):
if event.eventId is not None:
try: service.events().delete(calendarId=calendar_id, eventId=event.eventId).execute()
except Exception as e: save_error("deleting", event.name, e)
def update_event(service, event):
try: service.events().update(calendarId=calendar_id, eventId=event.eventId, body=generate_json(event)).execute()
except Exception as e: save_error("updating", event.name, e)
def generate_json(event):
if event.status == 'Urgente': event.icon = "⭐"
elif event.progress == 100 or event.status == 'Completado': event.icon = "✔️"
if event.type=="Exam": color = 11
elif event.type == "Assignment": color = 4
else: color = 6
#We have to differentiate between the yyy-mm-dd format and the RFC3339 format.
#In the case of all day events, for Notion, the start day and the end day is the same, for G. Calendar the end day is the next one.
if len(event.date)>11:date = datetype = "dateTime"
else: datetype = "date"; event.endDate = datetime.strftime(datetime.strptime(event.endDate, '%Y-%m-%d') + timedelta(1),'%Y-%m-%d')
event_to_add = {
'id': event.eventId,
'colorId':color,
'summary': f'{event.icon} {event.type[:1]}. {event.subject} {event.name} ',
'description': f'{event.type}. of {event.subject}: {event.name} \n{event.comments} \n {f"Progreso: {str(event.progress)}" if event.progress >=0 else ""} Status: {event.status}',
'start': {
datetype : event.date,
},
'end': {
datetype : event.endDate,
},
'reminders': {
'useDefault': False,
'overrides': [
{'method': 'popup', 'minutes': 10080}, #1 week
],
},
}
return event_to_add
def save_error(type, name, msg = ""):
text = f"{datetime.now()} --> Error while {type} the following event: {name} \n"
file_object = open('errors.log', 'a')
file_object.write(text)
print (f"{text} \n {msg}")
file_object.close()
|
Santixs/NotionGSync
|
gcalendarsync.py
|
gcalendarsync.py
|
py
| 3,727 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.path.environ.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.path.path.exists",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "google.oauth2.credentials.Credentials.from_authorized_user_file",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "google.oauth2.credentials.Credentials",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "google.auth.transport.requests.Request",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "google_auth_oauthlib.flow.InstalledAppFlow",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "googleapiclient.discovery.build",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strftime",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 95,
"usage_type": "name"
}
] |
71913849148
|
import requests
API_ENDPOINT = 'https://api.openai.com/v1/chat/completions'
API_KEY = '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'
MODEL_ID = 'gpt-3.5-turbo'
def test_chatgpt_api():
headers = {
'Authorization': f'Bearer {API_KEY}',
'Content-Type': 'application/json'
}
data = {
'model': MODEL_ID,
'messages': [{'role': 'system', 'content': 'Test message'}]
}
response = requests.post(API_ENDPOINT, headers=headers, json=data)
if response.status_code == 200:
print("API is working normally.")
else:
print("API request failed. Status code:", response.status_code)
print("Response:", response.json())
# Call the function to test the API
test_chatgpt_api()
|
Ali-Jabowr/TelegramWordBot
|
commands/chatgpt_testing.py
|
chatgpt_testing.py
|
py
| 777 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.post",
"line_number": 16,
"usage_type": "call"
}
] |
9837393322
|
import networkx as nx
# import pulp
G = nx.DiGraph()
G.add_nodes_from(['A', 'B', 'C', 'D', 'E', 'F'])
G.add_edges_from([('A', 'B'), ('A', 'D'), ('B', 'C'), ('B', 'E'), ('C', 'F'), ('D', 'C'), ('E', 'C'), ('E', 'D'), ('E', 'F')])
capacities = [4,5,5,4,4,3,2,2,1]
costs = [1,7,7,2,3,2,1,1,4]
for i, edge in enumerate(G.edges()):
G.edges[edge]['capacity'] = capacities[i]
G.edges[edge]['cost'] = costs[i]
demands = [-2,-5,-1,3,2,3]
for i, node in enumerate(G.nodes()):
G.nodes[node]['demand'] = demands[i]
myflow = nx.min_cost_flow(G, weight='cost')
mycost = nx.cost_of_flow(G, myflow, weight='cost')
print(mycost, myflow)
|
havarpan/verkkomallit-k21-glitchtest
|
python/luentoesim.py
|
luentoesim.py
|
py
| 639 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "networkx.DiGraph",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "networkx.min_cost_flow",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "networkx.cost_of_flow",
"line_number": 22,
"usage_type": "call"
}
] |
17335047402
|
import sesame
import numpy as np
######################################################
## define the system
######################################################
# dimensions of the system
Lx = 3e-4 #[cm]
Ly = 3e-4 #[cm]
# extent of the junction from the left contact [cm]
junction = .1e-4 # [cm]
# Mesh
x = np.concatenate((np.linspace(0,.2e-4, 30, endpoint=False),
np.linspace(0.2e-4, 1.4e-4, 60, endpoint=False),
np.linspace(1.4e-4, 2.9e-4, 70, endpoint=False),
np.linspace(2.9e-4, 3e-4, 10)))
y = np.concatenate((np.linspace(0, 1.75e-4, 50, endpoint=False),
np.linspace(1.75e-4, 2.75e-4, 50, endpoint=False),
np.linspace(2.75e-4, Ly, 50)))
# Create a system
sys = sesame.Builder(x, y, periodic=False)
# Dictionary with the material parameters
mat = {'Nc':8e17, 'Nv':1.8e19, 'Eg':1.5, 'epsilon':9.4, 'Et': 0,
'mu_e':320, 'mu_h':40, 'tau_e':10*1e-9, 'tau_h':10*1e-9, 'B': 1e-10}
# Add the material to the system
sys.add_material(mat)
# define a function specifiying the n-type region
def region(pos):
x, y = pos
return x < junction
# define a function specifiying the p-type region
region2 = lambda pos: 1 - region(pos)
# Add the donors
nD = 1e17 # [cm^-3]
sys.add_donor(nD, region)
# Add the acceptors
nA = 1e15 # [cm^-3]
sys.add_acceptor(nA, region2)
# Use Ohmic contacts
Sn_left, Sp_left, Sn_right, Sp_right = 1e7, 1e7, 1e7, 1e7
sys.contact_S(Sn_left, Sp_left, Sn_right, Sp_right)
sys.contact_type('Ohmic','Ohmic')
# gap state characteristics
E = 0 # energy of gap state (eV) from midgap
rhoGB = 1e14 # density of defect states
s = 1e-14 # defect capture cross section
# this implies a surface recombination velocity S = rhoGB*s*vthermal = 1e5 [cm/s]
# Specify the two points that make the line containing additional recombination centers
p1 = (0, Ly)
p2 = (Lx, Ly)
# add neutral defect along surface (surface recombination boundary condition)
sys.add_defects([p1, p2], rhoGB, s, E=E, transition=(0,0))
# find equilibrium solution with GB. Note we provide the GB-free equilibrium solution as a starting guess
solution = sesame.solve(sys, compute='Poisson', periodic_bcs=False)
######################################################
## EBIC generation profile parameters
######################################################
q = 1.6e-19 # C
ibeam = 10e-12 # A
Ebeam = 15e3 # eV
eg = 1.5 # eV
density = 5.85 # g/cm^3
kev = 1e3 # eV
# rough approximation for total carrier generation rate from electron beam
Gtot = ibeam/q * Ebeam / (3*eg)
# length scale of generation volume
Rbulb = 0.043 / density * (Ebeam/kev)**1.75 # given in micron
Rbulb = Rbulb * 1e-4 # converting to cm
# Gaussian spread
sigma = Rbulb / np.sqrt(15)
# penetration depth
y0 = 0.3 * Rbulb
# get diffusion length to scale generation density
vt = .0258
Ld = np.sqrt(sys.mu_e[0] * sys.tau_e[0]) * sys.scaling.length
######################################################
## vary position of the electron beam
######################################################
x0list = np.linspace(.1e-4, 2.5e-4, 11)
# Array in which to store results
jset = np.zeros(len(x0list))
jratio = np.zeros(len(x0list))
rset = np.zeros(len(x0list))
rad_ratio = np.zeros(len(x0list))
# Cycle over beam positions
for idx, x0 in enumerate(x0list):
# define a function for generation profile
def excitation(x,y):
return Gtot/(2*np.pi*sigma**2*Ld) * np.exp(-(x-x0)**2/(2*sigma**2)) * np.exp(-(y-Ly+y0)**2/(2*sigma**2))
# add generation to the system at new beam position
sys.generation(excitation)
# solve the system
solution = sesame.solve(sys, periodic_bcs=False, tol=1e-8)
# get analyzer object to evaluate current and radiative recombination
az = sesame.Analyzer(sys, solution)
# compute (dimensionless) current and convert to to dimension-ful form
tj = az.full_current() * sys.scaling.current * sys.scaling.length
# save the current
jset[idx] = tj
# obtain total generation from sys object
gtot = sys.gtot * sys.scaling.generation * sys.scaling.length**2
jratio[idx] = tj/(q * gtot)
# compute (dimensionless) total radiative recombination and convert to to dimension-ful form
cl = az.integrated_radiative_recombination() * sys.scaling.generation * sys.scaling.length**2
# save the CL
rset[idx] = cl
rad_ratio[idx] = cl/gtot
# display result
for counter in range(len(jset)):
print('x = {0:2.1e} [cm], J = {1:5.3e} [mA/cm], CL = {2:5.3e}'.format(x0list[counter],jset[counter]*1e3,rset[counter]))
for counter in range(len(jset)):
print('x = {0:2.1e} {1:5.3e} {2:5.3e}'.format(x0list[counter],jratio[counter],rad_ratio[counter]))
import matplotlib.pyplot as plt
plt.plot(x0list,jset)
plt.show()
|
usnistgov/sesame
|
examples/tutorial5/2d_EBIC.py
|
2d_EBIC.py
|
py
| 4,876 |
python
|
en
|
code
| 16 |
github-code
|
6
|
[
{
"api_name": "numpy.concatenate",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sesame.Builder",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sesame.solve",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "numpy.exp",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "sesame.solve",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "sesame.Analyzer",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 142,
"usage_type": "name"
}
] |
42649131070
|
""" some tools """
import logging
import re
import sys
from rancon import settings
tag_matcher = re.compile("%([A-Z0-9]+)%")
def fail(message):
""" logs message before calling sys.exit
XXX: why is this using print not log?
"""
if isinstance(message, list) or isinstance(message, tuple):
if len(message) > 1:
message = "\n - " + "\n - ".join(message)
else:
message = " " + message[0]
else:
message = " " + message
print("FATAL:%s" % message)
sys.exit(-1)
def is_true(something):
""" checks if something is truthy.
for strings this is supposed to be one (lowercased) of "true", "1", "yes", "on"
"""
if isinstance(something, str):
return something.lower() in ("true", "1", "yes", "on")
else:
return bool(something)
def tag_replace(line, replacement_dict, default="UNDEFINED"):
"""
Replaces a tag content with replacement information from the given
replacement hash. The replacement must exist.
:param line: The tag value
:param replacement_dict: The hash to use for the replacements
:return: The processed string
"""
tags = tag_matcher.findall(line)
for tag in tags:
replacement = str(replacement_dict.get(tag.lower(), default))
line = line.replace("%{}%".format(tag), replacement)
return line
def getLogger(*args, **kwargs):
""" returns a logger
XXX: why not define this in settings?
"""
logger = logging.getLogger(*args, **kwargs)
logger.setLevel(settings.loglevel)
return logger
|
flypenguin/python-rancon
|
rancon/tools.py
|
tools.py
|
py
| 1,596 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "re.compile",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "rancon.settings.loglevel",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "rancon.settings",
"line_number": 57,
"usage_type": "name"
}
] |
40369189284
|
from multiprocessing.spawn import old_main_modules
from os import listdir
from os.path import isfile, join
from wordcloud import WordCloud
import codecs
import jieba
import matplotlib.pyplot as plt
# read data
mypath = 'lab/contexts/'
font_filename = 'fonts/STFangSong.ttf'
files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
text_list=[]
text=''
for file in files:
with open(mypath+file) as f:
lines = f.readlines()
text = text + ' '.join(lines)
text = ' '.join(jieba.cut(text.replace(" ", "")))
stopwords_filename = 'lab/stopwords.txt'
stopwords = set([line.strip()
for line in codecs.open(stopwords_filename, 'r', 'utf-8')])
# build wordcloud
wordcloud = WordCloud(font_path=font_filename,
prefer_horizontal=1,stopwords=stopwords,
max_font_size=260, width=1000, height=860, max_words=200).generate(text)
plt.figure()
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
|
NATaaLiAAK/PPE1
|
cloud_build.py
|
cloud_build.py
|
py
| 1,000 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.listdir",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "jieba.cut",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
}
] |
24362182930
|
from datetime import date, timedelta
from .downloaders import Downloader
from .parsers import Parser
class RequiredValue():
def __init__(self, child_of=None):
self.child_of = child_of
def __call__(self, value):
return self.child_of is None or isinstance(value, self.child_of)
class DefaultValue():
def __init__(self, default):
self.default = default
class Supplier():
files = []
categories = ()
def __init__(self, partner=None):
self.partner = partner
def retrieve(self, files):
data = []
parameters = {
'today': date.today().strftime('%Y%m%d'),
'yesterday': (date.today() - timedelta(days=1)).strftime('%Y%m%d'),
}
join = files.get('join', None)
for location, mapping in files['mapping']:
source = self.downloader.download(location % parameters)
parsed = self.parser.parse(source, mapping, join)
for k, v in parsed.items():
parsed[k]['_file'] = files['name']
data.append(parsed)
return data
def merge(self, files):
merged = {}
value_keys = set([])
for f in files:
if len(f):
value_keys.update(f[list(f.keys())[0]].keys())
for i, f in enumerate(files):
for key, value in f.items():
for value_key in value_keys:
if value_key not in value:
value[value_key] = None
if key not in merged:
if i > 0:
continue
merged[key] = {}
for k, v in value.items():
if merged[key].get(k) is None:
merged[key][k] = v
return merged.values()
def pull(self):
data = []
for files in self.files:
file_data = self.retrieve(files)
file_data = self.merge(file_data)
data += file_data
return data
def validate(self, item):
assert True
def group(self, item):
return None
|
suningwz/netaddiction_addons
|
netaddiction_octopus/base/supplier.py
|
supplier.py
|
py
| 2,132 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.date.today",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 34,
"usage_type": "call"
}
] |
30293188969
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path("new", views.create, name='new'),
path('list', views.list, name='list'),
path('edit/<int:task_id>', views.edit, name='edit'),
]
|
drazisil/task-zero
|
tasks/urls.py
|
urls.py
|
py
| 252 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
}
] |
4473417766
|
import datetime
import logging
import re
from estropadakparser.parsers.parser import Parser
from estropadakparser.estropada.estropada import Estropada, TaldeEmaitza
class ArcParserLegacy(Parser):
'''Base class to parse an ARC legacy(2006-2008) race result'''
def __init__(self, **kwargs):
pass
def parse(self, *args):
'''Parse a result and return an estropada object'''
document = self.get_content(*args)
urla = args[0]
estropadaDate = args[2]
liga = args[3]
d = datetime.datetime.strptime(estropadaDate, '%Y-%m-%d')
(estropadaName) = self.parse_headings(document)
opts = {'urla': urla, 'data': estropadaDate, 'liga': liga}
self.estropada = Estropada(estropadaName, **opts)
self.parse_tandas(document, d.year)
if d.year <= 2008:
self.calculate_tanda_posizioa()
else:
self.parse_resume(document)
return self.estropada
def parse_headings(self, document):
'''Parse headings table'''
heading_one = document.cssselect('#contenido h1')
estropada = heading_one[0].text.strip()
estropada = estropada.replace("Resultados de: ", "")
return (estropada)
def parse_date(self, date):
new_date = date.replace('Jun', '06')
new_date = new_date.replace('Jul', '07')
new_date = new_date.replace('Ago', '08')
new_date = new_date.replace('Sept', '09')
date_list = re.split('-', new_date)
if len(date_list) == 3:
new_date = date_list[2] + "-" + date_list[1] + "-" + date_list[0]
return new_date
def parse_tandas(self, document, urtea):
tandas = document.cssselect('table.resultados')
for num, text in enumerate(tandas):
rows = text.findall('.//tr')
for kalea, row in enumerate(rows):
if kalea == 0:
continue
data = [x.text for x in row.findall('.//td')]
try:
if not data[1] is None:
if urtea < 2008:
pos = 12
aux = re.sub('[^0-9]', '', data[6])
try:
pos = int(aux)
except ValueError:
pos = 12
else:
pos = 0
emaitza = TaldeEmaitza(talde_izena=data[1],
kalea=kalea, ziabogak=data[2:5],
denbora=data[5], tanda=num + 1, posizioa=pos, tanda_postua=4)
self.estropada.taldeak_add(emaitza)
except TypeError as e:
print(e)
def calculate_tanda_posizioa(self):
tanda_posizioak = [0] + [1] * 7
for pos, taldea in enumerate(sorted(self.estropada.sailkapena)):
taldea.posizioa = pos + 1
taldea.tanda_postua = tanda_posizioak[taldea.tanda]
tanda_posizioak[taldea.tanda] = tanda_posizioak[taldea.tanda] + 1
def parse_resume(self, document):
sailkapenak = document.cssselect('#resultado table')
tandaKopurua = len(sailkapenak)
rows = sailkapenak[tandaKopurua-1].findall('.//tr')
tanda_posizioak = [0] + [1] * 7
for pos, row in enumerate(rows):
if pos == 0:
continue
try:
taldea = row.find('.//td[2]').text.strip()
posizioa = pos
print(posizioa)
puntuak = row.find('.//td[4]').text.strip()
for t in self.estropada.sailkapena:
if re.match(t.talde_izena, taldea, re.I):
try:
t.posizioa = posizioa
t.tanda_postua = tanda_posizioak[t.tanda]
t.puntuazioa = int(puntuak)
except Exception as e:
print(e)
t.posizioa = 1
t.tanda_postua = tanda_posizioak[t.tanda]
t.puntuazioa = 0
tanda_posizioak[t.tanda] = tanda_posizioak[t.tanda] + 1
except Exception as e:
logging.warn(self.estropada.izena)
logging.info("Error parsing results", exec_info=True)
|
ander2/estropadak-lxml
|
estropadakparser/parsers/arc_legacy_parser.py
|
arc_legacy_parser.py
|
py
| 4,486 |
python
|
eu
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "estropadakparser.parsers.parser.Parser",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "estropadakparser.estropada.estropada.Estropada",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "estropadakparser.estropada.estropada.TaldeEmaitza",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "logging.warn",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 108,
"usage_type": "call"
}
] |
17615533766
|
import web
import os
urls = ('/upload', 'Upload')
class Upload:
def GET(self):
web.header("Content-Type","text/html; charset=utf-8")
return open(r'upload.html', 'r').read()
def POST(self):
try:
x = web.input(myfile={})
filedir = 'submit' # change this to the directory you want to store the file in.
if 'myfile' in x: # to check if the file-object is created
filepath = x.myfile.filename.replace('\\','/') # replaces the windows-style slashes with linux ones.
filename = filepath.split('/')[-1] # splits the and chooses the last part (the filename with extension)
fout = open(filedir +'/'+ filename,'w+') # creates the file where the uploaded file should be stored
fout.write(x.myfile.file.read().decode('utf-8')) # writes the uploaded file to the newly created file.
fout.close() # closes the file, upload complete.
if filename[-2:] == '.c':
# start analyze
os.system('echo check')
os.system('python3 grade.py > result.txt')
os.system('rm ' + filedir + '/' + filename)
print('[file deleted]')
resultWeb = open(r'result.html', 'r').read()
result = open('result.txt', 'r').read()
if(result.find('Failed.') != -1):
resultWeb = resultWeb[: resultWeb.find('<!--result-->')] + '\n<h3><strong style="color: red"> Test Failed</strong></h3><br>\n' + result + '\n' + resultWeb[resultWeb.find('<!--result-->') + len('<!--result-->'):]
else:
# add result to website
resultWeb = resultWeb[: resultWeb.find('<!--result-->')] + '\n<h3><strong style="color: green"> Test Passed</strong></h3><br>\n' + result + '\n' + resultWeb[resultWeb.find('<!--result-->') + len('<!--result-->'):]
else:
resultWeb = open(r'error.html', 'r').read()
resultWeb = resultWeb[:resultWeb.find('<!--file-->')] + filename +resultWeb[resultWeb.find('<!--file-->'):]
os.system('rm ' + filedir + '/' + filename)
print('[file deleted]')
return resultWeb
except:
resultWeb = open(r'error.html', 'r').read()
resultWeb = resultWeb[:resultWeb.find('<!--file-->')] + 'ERROR:\n No file uploaded or internal error occured while uploading the file.' + resultWeb[resultWeb.find('<!--file-->'):]
return resultWeb
raise web.seeother('/upload')
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
|
Louis-He/simpleOj
|
webmain.py
|
webmain.py
|
py
| 2,766 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "web.header",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "web.input",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "web.seeother",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "web.application",
"line_number": 57,
"usage_type": "call"
}
] |
31261537971
|
from collections import namedtuple
from itertools import izip
from operator import attrgetter
from tilequeue.log import LogCategory
from tilequeue.log import LogLevel
from tilequeue.log import MsgType
from tilequeue.metatile import common_parent
from tilequeue.metatile import make_metatiles
from tilequeue.process import convert_source_data_to_feature_layers
from tilequeue.process import process_coord
from tilequeue.queue import JobProgressException
from tilequeue.queue.message import QueueHandle
from tilequeue.store import write_tile_if_changed
from tilequeue.tile import coord_children_subrange
from tilequeue.tile import coord_to_mercator_bounds
from tilequeue.tile import serialize_coord
from tilequeue.utils import convert_seconds_to_millis
from tilequeue.utils import format_stacktrace_one_line
import Queue
import signal
import sys
import time
# long enough to not fight with other threads, but not long enough
# that it prevents a timely stop
timeout_seconds = 5
def _non_blocking_put(q, data):
# don't block indefinitely when trying to put to a queue
# this helps prevent deadlocks if the destination queue is full
# and stops
try:
q.put(data, timeout=timeout_seconds)
except Queue.Full:
return False
else:
return True
def _force_empty_queue(q):
# expects a sentinel None value to get enqueued
# throws out all messages until we receive the sentinel
# with no sentinel this will block indefinitely
while q.get() is not None:
continue
# OutputQueue wraps the process of sending data to a multiprocessing queue
# so that we can simultaneously check for the "stop" signal when it's time
# to shut down.
class OutputQueue(object):
def __init__(self, output_queue, tile_proc_logger, stop):
self.output_queue = output_queue
self.tile_proc_logger = tile_proc_logger
self.stop = stop
def __call__(self, coord, data):
"""
Send data, associated with coordinate coord, to the queue. While also
watching for a signal to stop. If the data is too large to send, then
trap the MemoryError and exit the program.
Note that `coord` may be a Coordinate instance or a string. It is only
used for printing out a message if there's a MemoryError, so for
requests which have no meaningful single coordinate, something else
can be used.
Returns True if the "stop signal" has been set and the thread should
shut down. False if normal operations should continue.
"""
try:
while not _non_blocking_put(self.output_queue, data):
if self.stop.is_set():
return True
except MemoryError as e:
stacktrace = format_stacktrace_one_line()
self.tile_proc_logger.error(
'MemoryError sending to queue', e, stacktrace, coord)
# memory error might not leave the malloc subsystem in a usable
# state, so better to exit the whole worker here than crash this
# thread, which would lock up the whole worker.
sys.exit(1)
return False
def _ack_coord_handle(
coord, coord_handle, queue_mapper, msg_tracker, timing_state,
tile_proc_logger, stats_handler):
"""share code for acknowledging a coordinate"""
# returns tuple of (handle, error), either of which can be None
track_result = msg_tracker.done(coord_handle)
queue_handle = track_result.queue_handle
if not queue_handle:
return None, None
tile_queue = queue_mapper.get_queue(queue_handle.queue_id)
assert tile_queue, \
'Missing tile_queue: %s' % queue_handle.queue_id
parent_tile = None
if track_result.all_done:
parent_tile = track_result.parent_tile
try:
tile_queue.job_done(queue_handle.handle)
except Exception as e:
stacktrace = format_stacktrace_one_line()
tile_proc_logger.error_job_done(
'tile_queue.job_done', e, stacktrace,
coord, parent_tile,
)
return queue_handle, e
if parent_tile is not None:
# we completed a tile pyramid and should log appropriately
start_time = timing_state['start']
stop_time = convert_seconds_to_millis(time.time())
tile_proc_logger.log_processed_pyramid(
parent_tile, start_time, stop_time)
stats_handler.processed_pyramid(
parent_tile, start_time, stop_time)
else:
try:
tile_queue.job_progress(queue_handle.handle)
except Exception as e:
stacktrace = format_stacktrace_one_line()
err_details = {"queue_handle": queue_handle.handle}
if isinstance(e, JobProgressException):
err_details = e.err_details
tile_proc_logger.error_job_progress(
'tile_queue.job_progress', e, stacktrace,
coord, parent_tile, err_details,
)
return queue_handle, e
return queue_handle, None
# The strategy with each worker is to loop on a thread event. When the
# main thread/process receives a kill signal, it will issue stops to
# each worker to signal that work should end.
# Additionally, all workers that receive work from a python queue will
# also wait for a sentinel value, None, before terminating. They will
# discard all messages until receiving this sentinel value. Special
# care is also given to the scenario where a None value is received
# before the stop event is checked. The sentinel value here counts as
# a hard stop as well.
# Furthermore, all queue gets and puts are done with timeouts. This is
# to prevent race conditions where a worker is blocked waiting to read
# from a queue that upstream will no longer write to, or try to put to
# a queue that downstream will no longer read from. After any timeout,
# the stop event is checked before any processing to see whether a
# stop event has been received in the interim.
class TileQueueReader(object):
def __init__(
self, queue_mapper, msg_marshaller, msg_tracker, output_queue,
tile_proc_logger, stats_handler, stop, max_zoom, group_by_zoom):
self.queue_mapper = queue_mapper
self.msg_marshaller = msg_marshaller
self.msg_tracker = msg_tracker
self.output = OutputQueue(output_queue, tile_proc_logger, stop)
self.tile_proc_logger = tile_proc_logger
self.stats_handler = stats_handler
self.stop = stop
self.max_zoom = max_zoom
self.group_by_zoom = group_by_zoom
def __call__(self):
while not self.stop.is_set():
msg_handles = ()
for queue_id, tile_queue in (
self.queue_mapper.queues_in_priority_order()):
try:
msg_handles = tile_queue.read()
except Exception as e:
stacktrace = format_stacktrace_one_line()
self.tile_proc_logger.error(
'Queue read error', e, stacktrace)
continue
if msg_handles:
break
if not msg_handles:
continue
for msg_handle in msg_handles:
# if asked to stop, break as soon as possible
if self.stop.is_set():
break
now = convert_seconds_to_millis(time.time())
msg_timestamp = None
if msg_handle.metadata:
msg_timestamp = msg_handle.metadata.get('timestamp')
timing_state = dict(
msg_timestamp=msg_timestamp,
start=now,
)
coords = self.msg_marshaller.unmarshall(msg_handle.payload)
# it seems unlikely, but just in case there are no coordinates
# in the payload, there's nothing to do, so skip to the next
# payload.
if not coords:
continue
# check for duplicate coordinates - for the message tracking to
# work, we assume that coordinates are unique, as we use them
# as keys in a dict. (plus, it doesn't make a lot of sense to
# render the coordinate twice in the same job anyway).
coords = list(set(coords))
parent_tile = self._parent(coords)
queue_handle = QueueHandle(queue_id, msg_handle.handle)
coord_handles = self.msg_tracker.track(
queue_handle, coords, parent_tile)
all_coords_data = []
for coord, coord_handle in izip(coords, coord_handles):
if coord.zoom > self.max_zoom:
self._reject_coord(coord, coord_handle, timing_state)
continue
metadata = dict(
# the timing is just what will be filled out later
timing=dict(
fetch=None,
process=None,
s3=None,
ack=None,
),
# this is temporary state that is used later on to
# determine timing information
timing_state=timing_state,
coord_handle=coord_handle,
)
data = dict(
metadata=metadata,
coord=coord,
)
all_coords_data.append(data)
# we might have no coordinates if we rejected all the
# coordinates. in which case, there's nothing to do anyway, as
# the _reject_coord method will have marked the job as done.
if all_coords_data:
coord_input_spec = all_coords_data, parent_tile
msg = "group of %d tiles below %s" \
% (len(all_coords_data),
serialize_coord(parent_tile))
if self.output(msg, coord_input_spec):
break
for _, tile_queue in self.queue_mapper.queues_in_priority_order():
tile_queue.close()
self.tile_proc_logger.lifecycle('tile queue reader stopped')
def _reject_coord(self, coord, coord_handle, timing_state):
self.tile_proc_logger.log(
LogLevel.WARNING,
LogCategory.PROCESS,
MsgType.INDIVIDUAL,
'Job coordinates above max zoom are not '
'supported, skipping %d > %d' % (
coord.zoom, self.max_zoom),
None, # exception
None, # stacktrace
coord,
)
# delete jobs that we can't handle from the
# queue, otherwise we'll get stuck in a cycle
# of timed-out jobs being re-added to the
# queue until they overflow max-retries.
_ack_coord_handle(
coord, coord_handle, self.queue_mapper, self.msg_tracker,
timing_state, self.tile_proc_logger, self.stats_handler)
def _parent(self, coords):
if len(coords) == 0:
return None
parent = reduce(common_parent, coords)
if self.group_by_zoom is not None:
if parent.zoom < self.group_by_zoom:
assert len(coords) == 1, "Expect either a single tile or a " \
"pyramid at or below group zoom."
else:
parent = parent.zoomTo(self.group_by_zoom).container()
return parent
class DataFetch(object):
def __init__(
self, fetcher, input_queue, output_queue, io_pool,
tile_proc_logger, stats_handler, metatile_zoom, max_zoom,
metatile_start_zoom=0):
self.fetcher = fetcher
self.input_queue = input_queue
self.output_queue = output_queue
self.io_pool = io_pool
self.tile_proc_logger = tile_proc_logger
self.stats_handler = stats_handler
self.metatile_zoom = metatile_zoom
self.max_zoom = max_zoom
self.metatile_start_zoom = metatile_start_zoom
def __call__(self, stop):
saw_sentinel = False
output = OutputQueue(self.output_queue, self.tile_proc_logger, stop)
while not stop.is_set():
try:
coord_input_spec = self.input_queue.get(
timeout=timeout_seconds)
except Queue.Empty:
continue
if coord_input_spec is None:
saw_sentinel = True
break
coord = None
parent = None
try:
all_data, parent = coord_input_spec
for fetch, data in self.fetcher.fetch_tiles(all_data):
metadata = data['metadata']
coord = data['coord']
if self._fetch_and_output(fetch, coord, metadata, output):
break
except Exception as e:
stacktrace = format_stacktrace_one_line()
self.tile_proc_logger.fetch_error(e, stacktrace, coord, parent)
self.stats_handler.fetch_error()
if not saw_sentinel:
_force_empty_queue(self.input_queue)
self.tile_proc_logger.lifecycle('data fetch stopped')
def _fetch_and_output(self, fetch, coord, metadata, output):
data = self._fetch(fetch, coord, metadata)
return output(coord, data)
def _fetch(self, fetch, coord, metadata):
nominal_zoom = coord.zoom + self.metatile_zoom
start_zoom = coord.zoom + self.metatile_start_zoom
unpadded_bounds = coord_to_mercator_bounds(coord)
start = time.time()
source_rows = fetch(nominal_zoom, unpadded_bounds)
metadata['timing']['fetch'] = convert_seconds_to_millis(
time.time() - start)
# every tile job that we get from the queue is a "parent" tile
# and its four children to cut from it. at zoom 15, this may
# also include a whole bunch of other children below the max
# zoom.
cut_coords = list(
coord_children_subrange(coord, start_zoom, nominal_zoom))
return dict(
metadata=metadata,
coord=coord,
source_rows=source_rows,
unpadded_bounds=unpadded_bounds,
cut_coords=cut_coords,
nominal_zoom=nominal_zoom,
)
class ProcessAndFormatData(object):
scale = 4096
def __init__(self, post_process_data, formats, input_queue,
output_queue, buffer_cfg, output_calc_mapping, layer_data,
tile_proc_logger, stats_handler):
formats.sort(key=attrgetter('sort_key'))
self.post_process_data = post_process_data
self.formats = formats
self.input_queue = input_queue
self.output_queue = output_queue
self.buffer_cfg = buffer_cfg
self.output_calc_mapping = output_calc_mapping
self.layer_data = layer_data
self.tile_proc_logger = tile_proc_logger
self.stats_handler = stats_handler
def __call__(self, stop):
# ignore ctrl-c interrupts when run from terminal
signal.signal(signal.SIGINT, signal.SIG_IGN)
output = OutputQueue(self.output_queue, self.tile_proc_logger, stop)
saw_sentinel = False
while not stop.is_set():
try:
data = self.input_queue.get(timeout=timeout_seconds)
except Queue.Empty:
continue
if data is None:
saw_sentinel = True
break
coord = data['coord']
unpadded_bounds = data['unpadded_bounds']
cut_coords = data['cut_coords']
nominal_zoom = data['nominal_zoom']
source_rows = data['source_rows']
start = time.time()
try:
feature_layers = convert_source_data_to_feature_layers(
source_rows, self.layer_data, unpadded_bounds,
nominal_zoom)
formatted_tiles, extra_data = process_coord(
coord, nominal_zoom, feature_layers,
self.post_process_data, self.formats, unpadded_bounds,
cut_coords, self.buffer_cfg, self.output_calc_mapping)
except Exception as e:
stacktrace = format_stacktrace_one_line()
self.tile_proc_logger.error(
'Processing error', e, stacktrace, coord)
self.stats_handler.proc_error()
continue
metadata = data['metadata']
metadata['timing']['process'] = convert_seconds_to_millis(
time.time() - start)
metadata['layers'] = extra_data
data = dict(
metadata=metadata,
coord=coord,
formatted_tiles=formatted_tiles,
)
if output(coord, data):
break
if not saw_sentinel:
_force_empty_queue(self.input_queue)
self.tile_proc_logger.lifecycle('processor stopped')
class S3Storage(object):
def __init__(self, input_queue, output_queue, io_pool, store,
tile_proc_logger, metatile_size):
self.input_queue = input_queue
self.output_queue = output_queue
self.io_pool = io_pool
self.store = store
self.tile_proc_logger = tile_proc_logger
self.metatile_size = metatile_size
def __call__(self, stop):
saw_sentinel = False
queue_output = OutputQueue(
self.output_queue, self.tile_proc_logger, stop)
while not stop.is_set():
try:
data = self.input_queue.get(timeout=timeout_seconds)
except Queue.Empty:
continue
if data is None:
saw_sentinel = True
break
coord = data['coord']
start = time.time()
try:
async_jobs = self.save_tiles(data['formatted_tiles'])
except Exception as e:
# cannot propagate this error - it crashes the thread and
# blocks up the whole queue!
stacktrace = format_stacktrace_one_line()
self.tile_proc_logger.error('Save error', e, stacktrace, coord)
continue
async_exc_info = None
e = None
n_stored = 0
n_not_stored = 0
for async_job in async_jobs:
try:
did_store = async_job.get()
if did_store:
n_stored += 1
else:
n_not_stored += 1
except Exception as e:
# it's important to wait for all async jobs to
# complete but we just keep a reference to the last
# exception it's unlikely that we would receive multiple
# different exceptions when uploading to s3
async_exc_info = sys.exc_info()
if async_exc_info:
stacktrace = format_stacktrace_one_line(async_exc_info)
self.tile_proc_logger.error(
'Store error', e, stacktrace, coord)
continue
metadata = data['metadata']
metadata['timing']['s3'] = convert_seconds_to_millis(
time.time() - start)
metadata['store'] = dict(
stored=n_stored,
not_stored=n_not_stored,
)
data = dict(
coord=coord,
metadata=metadata,
)
if queue_output(coord, data):
break
if not saw_sentinel:
_force_empty_queue(self.input_queue)
self.tile_proc_logger.lifecycle('s3 storage stopped')
def save_tiles(self, tiles):
async_jobs = []
if self.metatile_size:
tiles = make_metatiles(self.metatile_size, tiles)
for tile in tiles:
async_result = self.io_pool.apply_async(
write_tile_if_changed, (
self.store,
tile['tile'],
# important to use the coord from the
# formatted tile here, because we could have
# cut children tiles that have separate zooms
# too
tile['coord'],
tile['format']))
async_jobs.append(async_result)
return async_jobs
CoordProcessData = namedtuple(
'CoordProcessData',
('coord', 'timing', 'size', 'store_info',),
)
class TileQueueWriter(object):
def __init__(
self, queue_mapper, input_queue, inflight_mgr, msg_tracker,
tile_proc_logger, stats_handler, stop):
self.queue_mapper = queue_mapper
self.input_queue = input_queue
self.inflight_mgr = inflight_mgr
self.msg_tracker = msg_tracker
self.tile_proc_logger = tile_proc_logger
self.stats_handler = stats_handler
self.stop = stop
def __call__(self):
saw_sentinel = False
while not self.stop.is_set():
try:
data = self.input_queue.get(timeout=timeout_seconds)
except Queue.Empty:
continue
if data is None:
saw_sentinel = True
break
metadata = data['metadata']
coord_handle = metadata['coord_handle']
coord = data['coord']
timing_state = metadata['timing_state']
start = time.time()
try:
self.inflight_mgr.unmark_inflight(coord)
except Exception as e:
stacktrace = format_stacktrace_one_line()
self.tile_proc_logger.error(
'Unmarking in-flight error', e, stacktrace, coord)
continue
queue_handle, err = _ack_coord_handle(
coord, coord_handle, self.queue_mapper, self.msg_tracker,
timing_state, self.tile_proc_logger, self.stats_handler)
if err is not None:
continue
timing = metadata['timing']
now = time.time()
timing['ack'] = convert_seconds_to_millis(now - start)
time_in_queue = 0
msg_timestamp = timing_state['msg_timestamp']
if msg_timestamp:
time_in_queue = convert_seconds_to_millis(now) - msg_timestamp
timing['queue'] = time_in_queue
layers = metadata['layers']
size = layers['size']
store_info = metadata['store']
coord_proc_data = CoordProcessData(
coord,
timing,
size,
store_info,
)
self.tile_proc_logger.log_processed_coord(coord_proc_data)
self.stats_handler.processed_coord(coord_proc_data)
if not saw_sentinel:
_force_empty_queue(self.input_queue)
self.tile_proc_logger.lifecycle('tile queue writer stopped')
class QueuePrint(object):
def __init__(self, interval_seconds, queue_info, tile_proc_logger, stop):
self.interval_seconds = interval_seconds
self.queue_info = queue_info
self.tile_proc_logger = tile_proc_logger
self.stop = stop
def __call__(self):
# sleep in smaller increments, so that when we're asked to
# stop we aren't caught sleeping on the job
sleep_interval_seconds = min(timeout_seconds, self.interval_seconds)
while not self.stop.is_set():
i = float(0)
while i < self.interval_seconds:
if self.stop.is_set():
break
time.sleep(sleep_interval_seconds)
i += sleep_interval_seconds
# to prevent the final empty queue log message
if self.stop.is_set():
break
self.tile_proc_logger.log_queue_sizes(self.queue_info)
self.tile_proc_logger.lifecycle('queue printer stopped')
|
thanhnghiacntt/tilequeue
|
tilequeue/worker.py
|
worker.py
|
py
| 24,574 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "Queue.Full",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "tilequeue.utils.format_stacktrace_one_line",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "tilequeue.utils.format_stacktrace_one_line",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "tilequeue.utils.convert_seconds_to_millis",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "tilequeue.utils.format_stacktrace_one_line",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "tilequeue.queue.JobProgressException",
"line_number": 136,
"usage_type": "argument"
},
{
"api_name": "tilequeue.utils.format_stacktrace_one_line",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "tilequeue.utils.convert_seconds_to_millis",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "tilequeue.queue.message.QueueHandle",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "itertools.izip",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "tilequeue.tile.serialize_coord",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "tilequeue.log.LogLevel.WARNING",
"line_number": 275,
"usage_type": "attribute"
},
{
"api_name": "tilequeue.log.LogLevel",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "tilequeue.log.LogCategory.PROCESS",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "tilequeue.log.LogCategory",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "tilequeue.log.MsgType.INDIVIDUAL",
"line_number": 277,
"usage_type": "attribute"
},
{
"api_name": "tilequeue.log.MsgType",
"line_number": 277,
"usage_type": "name"
},
{
"api_name": "tilequeue.metatile.common_parent",
"line_number": 296,
"usage_type": "argument"
},
{
"api_name": "Queue.Empty",
"line_number": 330,
"usage_type": "attribute"
},
{
"api_name": "tilequeue.utils.format_stacktrace_one_line",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "tilequeue.tile.coord_to_mercator_bounds",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "tilequeue.utils.convert_seconds_to_millis",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "tilequeue.tile.coord_children_subrange",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "operator.attrgetter",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "signal.signal",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "signal.SIGINT",
"line_number": 408,
"usage_type": "attribute"
},
{
"api_name": "signal.SIG_IGN",
"line_number": 408,
"usage_type": "attribute"
},
{
"api_name": "Queue.Empty",
"line_number": 416,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "tilequeue.process.convert_source_data_to_feature_layers",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "tilequeue.process.process_coord",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "tilequeue.utils.format_stacktrace_one_line",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "tilequeue.utils.convert_seconds_to_millis",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 447,
"usage_type": "call"
},
{
"api_name": "Queue.Empty",
"line_number": 484,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 492,
"usage_type": "call"
},
{
"api_name": "tilequeue.utils.format_stacktrace_one_line",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "sys.exc_info",
"line_number": 519,
"usage_type": "call"
},
{
"api_name": "tilequeue.utils.format_stacktrace_one_line",
"line_number": 522,
"usage_type": "call"
},
{
"api_name": "tilequeue.utils.convert_seconds_to_millis",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "tilequeue.metatile.make_metatiles",
"line_number": 551,
"usage_type": "call"
},
{
"api_name": "tilequeue.store.write_tile_if_changed",
"line_number": 556,
"usage_type": "argument"
},
{
"api_name": "collections.namedtuple",
"line_number": 570,
"usage_type": "call"
},
{
"api_name": "Queue.Empty",
"line_number": 594,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 605,
"usage_type": "call"
},
{
"api_name": "tilequeue.utils.format_stacktrace_one_line",
"line_number": 610,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 622,
"usage_type": "call"
},
{
"api_name": "tilequeue.utils.convert_seconds_to_millis",
"line_number": 623,
"usage_type": "call"
},
{
"api_name": "tilequeue.utils.convert_seconds_to_millis",
"line_number": 628,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 667,
"usage_type": "call"
}
] |
7125623220
|
######################################################
### SQL Database Script for PokeDex and TrainerDex ###
######################################################
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
from config import password
import pprint as pp
# Set locations for CSV Files and create dataframes
csv_file1 = "pokedex_clean.csv"
pokedex_df = pd.read_csv(csv_file1)
csv_file2 = "trainer_clean.csv"
trainer_df = pd.read_csv(csv_file2)
csv_file3 = "trainer_junction_clean.csv"
trainer_junction_df = pd.read_csv(csv_file3)
# Connect to PostgreSQL
rds_connection_string = "postgres:"+password+"@localhost:5432/pokemon_db"
engine = create_engine(f'postgresql://{rds_connection_string}')
# Read Dataframes into SQL and replace table if exists
pokedex_df.to_sql(name='pokedex', con=engine, if_exists='replace', index=False)
trainer_df.to_sql(name='trainer', con=engine, if_exists='replace', index=False)
trainer_junction_df.to_sql(name='trainer_junction', con=engine, if_exists='replace', index=False)
#################################################
############ Search Function Script ############
#################################################
# Resume_Search is a nested inner function that allows the user to choose whether or not they want to search again.
def search():
def resume_search():
resume = input ("Would you like to search again? (Yes or No) ")
if resume.lower() in ["yes", "y"]:
search()
else:
print ('Search Canceled. Closing Script.')
request = 0
request = input ("What would you like to search for? (Select the number of the menu option)\
\n1: Trainer Name Containing:\
\n2: Trainers that own (name of pokemon):\
\n3: Pokedex Entry for (pokemon name):\
\n4: Pokedex Entry for (dex number):\
\n0: Cancel search and end program.:\
\nMenu Number: ")
if request == 1 or request == str(1):
trainer_search = input('Search for a trainer using their name:\nTrainer Name: ')
search_return1 = pd.read_sql_query(\
"SELECT tr.trainer_id, tr.trainername, t_j.pokelevel, pk.pokemon_name\
FROM trainer_junction as t_j \
LEFT JOIN trainer AS tr ON tr.trainer_id = t_j.trainer_id \
LEFT JOIN pokedex as pk ON t_j.pokedex_number = pk.pokedex_number \
WHERE tr.trainername LIKE " "'%%" + str((trainer_search).title()) + "%%'", con=engine)
print(search_return1)
resume_search()
elif request == 2 or request == str(2):
poke_search = input('For a list of all trainers owning this pokemon: \nPokemon Name: ')
search_return2 = pd.read_sql_query(\
"SELECT tr.trainername\
FROM trainer_junction as t_j \
LEFT JOIN trainer AS tr ON tr.trainer_id = t_j.trainer_id \
LEFT JOIN pokedex as pk ON t_j.pokedex_number = pk.pokedex_number \
WHERE pk.pokemon_name = " "'" + str((poke_search).title()) + "'", con=engine)
search_result2 = [val[0] for val in search_return2.values]
pp.pprint(search_result2)
resume_search()
elif request == 3 or request == str(3):
poke_name = input('What is the name of the pokemon whose pokedex entry you wish to search for? \nPokemon Name: ')
search_return3 = pd.read_sql_query("SELECT * FROM pokedex WHERE pokemon_name = " + "'" + str((poke_name).title()) + "'", con=engine)
search_result3 = search_return3.transpose()
print(search_result3)
resume_search()
elif request == 4 or request == str(4):
poke_num = input('What is the pokedex number of the pokemon whose pokedex entry you wish to search for? \
\nPlease note that we have our own pokedex numbering system. \
\nPokedex Number: ')
search_return4 = pd.read_sql_query("SELECT * FROM pokedex WHERE pokedex_number = " + "'" + str(poke_num) + "'", con=engine)
search_result4 = search_return4.transpose()
print(search_result4)
resume_search()
elif request == 0 or request == str(0):
print ('Search Canceled. Ending Prompt.')
else:
request = 0
print ("That isn't a menu option number. Please try again.")
search()
##########################################
######## Call The Search Function ########
##########################################
if __name__ == '__main__':
search()
##########################################
############### END SCRIPT ###############
##########################################
|
ASooklall/ETL_Project
|
pokemon_db/pokemon_db.py
|
pokemon_db.py
|
py
| 4,633 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "config.password",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql_query",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql_query",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql_query",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql_query",
"line_number": 88,
"usage_type": "call"
}
] |
35743736914
|
import sys
import os
import numpy as np
import scipy.sparse
from scipy.sparse import csr_matrix, find
import read_write as rw
import matlab.engine
'''
finput_iu_rating_matrix_train = "Data/iu_sparse_matrix_train.npz"
finput_title_sim_matrix = "Data/title_similarity_matrix"
finput_description_sim_matrix = "Data/description_similarity_matrix"
finput_user_cluster_set = "Data/user_cluster_set"
finput_train_item_id = "Data/train_item_id"
finput_test_item_id = "Data/test_item_id"
finput_nonlinreg = "Data/nonlinreg"
finput_init_tp = 1.0
finput_init_dp = 1.0
foutput_iuclst_rating_matrix = "Data/iuclst_rating_matrix"
foutput_item_sim_matrix = "Data/item_sim_matrix"
'''
if (__name__ == '__main__'):
#### data path
finput_iu_rating_matrix_train = sys.argv[1]
finput_iu_rating_matrix_test = sys.argv[2]
finput_title_sim_matrix = sys.argv[3]
finput_description_sim_matrix = sys.argv[4]
finput_user_cluster_set = sys.argv[5]
finput_train_item_id = sys.argv[6]
finput_test_item_id = sys.argv[7]
finput_nonlinreg = sys.argv[8]
finput_init_tp = float(sys.argv[9])
finput_init_dp = float(sys.argv[10])
foutput_iuclst_rating_matrix_train = sys.argv[11]
foutput_iuclst_rating_matrix_test = sys.argv[12]
foutput_item_sim_matrix = sys.argv[13]
# load data
iu_rating_matrix_train = scipy.sparse.load_npz(finput_iu_rating_matrix_train)
iu_rating_matrix_test = scipy.sparse.load_npz(finput_iu_rating_matrix_test)
title_sim_matrix = rw.readffile(finput_title_sim_matrix)
description_sim_matrix = rw.readffile(finput_description_sim_matrix)
user_cluster_set = rw.readffile(finput_user_cluster_set)
train_item_id = rw.readffile(finput_train_item_id)
test_item_id = rw.readffile(finput_test_item_id)
# run matlab script and get parameters for title and description
print("call matlab script....")
cur_path = os.getcwd()
os.chdir("D:\GitCode\Dissertation\Step1-Preprocessing")
eng = matlab.engine.start_matlab()
x = eng.my_fitnlm(finput_nonlinreg, finput_init_tp, finput_init_dp, nargout=3)
theta1, theta2, RMSE = x[0], x[1], x[2]
eng.quit()
sim_matrix = theta1*title_sim_matrix + theta2*description_sim_matrix
os.chdir(cur_path)
rw.write2file(sim_matrix, foutput_item_sim_matrix)
print("theta1 = ", theta1)
print("theta2 = ", theta2)
print("RMSE = ", RMSE)
print("matlab finished")
# extract similarity matrix for training and test item
# resort_id = list(train_item_id.keys()) + list(test_item_id.keys())
sim_matrix_train = sim_matrix.loc[list(train_item_id.keys()), list(train_item_id.keys())].values
sim_matrix_test = sim_matrix.loc[list(test_item_id.keys()), list(test_item_id.keys())].values
# user cluster - item rating matrix
iuclst_rating_matrix_train = np.zeros((len(train_item_id), len(user_cluster_set)))
iuclst_rating_matrix_test = np.zeros((len(test_item_id), len(user_cluster_set)))
item_in_node_train = list(range(iu_rating_matrix_train.shape[0]))
item_in_node_test = list(range(iu_rating_matrix_test.shape[0]))
for ind, user_cluster in zip(range(len(user_cluster_set)), user_cluster_set):
print("user cluster: %d / %d"%(ind+1, len(user_cluster_set)), end="\r")
user_cluster_size = len(user_cluster)
sub_rating_matrix = iu_rating_matrix_train[np.ix_(item_in_node_train, user_cluster)].T.toarray() # user number * training item number
sub_rating_matrix_pred = (np.dot(sub_rating_matrix, sim_matrix_train) / (1e-9+np.dot(sub_rating_matrix != 0, sim_matrix_train)))
iuclst_rating_matrix_train[:, ind] = np.sum(sub_rating_matrix + 0.01*(sub_rating_matrix == 0) * sub_rating_matrix_pred, axis=0) / np.sum((sub_rating_matrix == 0)*0.01 + (sub_rating_matrix != 0)*1, axis=0)
sub_rating_matrix = iu_rating_matrix_test[np.ix_(item_in_node_test, user_cluster)].T.toarray() # user number * test item number
sub_rating_matrix_pred = (np.dot(sub_rating_matrix, sim_matrix_test) / (1e-9+np.dot(sub_rating_matrix != 0, sim_matrix_test)))
iuclst_rating_matrix_test[:, ind] = np.sum(sub_rating_matrix + 0.01*(sub_rating_matrix == 0) * sub_rating_matrix_pred, axis=0) / np.sum((sub_rating_matrix == 0)*0.01 + (sub_rating_matrix != 0)*1, axis=0)
print("\nuser cluster/item rating matrix generated done!")
rw.write2file(iuclst_rating_matrix_train, foutput_iuclst_rating_matrix_train)
rw.write2file(iuclst_rating_matrix_test, foutput_iuclst_rating_matrix_test)
print("file saved done!")
|
clamli/Dissertation
|
Step1-Preprocessing/buildtree_preparation.py
|
buildtree_preparation.py
|
py
| 4,436 |
python
|
en
|
code
| 28 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "scipy.sparse.sparse.load_npz",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.sparse",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "scipy.sparse",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "scipy.sparse.sparse.load_npz",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.sparse",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "scipy.sparse",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "read_write.readffile",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "read_write.readffile",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "read_write.readffile",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "read_write.readffile",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "read_write.readffile",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matlab.engine.engine.start_matlab",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matlab.engine.engine",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "matlab.engine",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "os.chdir",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "read_write.write2file",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.ix_",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.ix_",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "read_write.write2file",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "read_write.write2file",
"line_number": 86,
"usage_type": "call"
}
] |
25033983488
|
from django.urls import path
from . import views
urlpatterns = [
# UI & API hybrid routes
path("", views.index, name="index"),
path("posts/<int:page>", views.posts, name="posts"),
path("following/<int:page>", views.following, name="following"),
path("profile/<str:username>/<int:page>", views.profile, name="profile"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
# API routes
path("post-edit/<int:post_id>", views.post_edit, name="post_edit"),
path("toggle-like/<int:post_id>",
views.toggle_like, name="toggle_like"),
path("toggle-follow/<int:user_id>",
views.toggle_follow, name="toggle_follow"),
]
|
csloan29/HES-e-33a-web-django
|
network/network/urls.py
|
urls.py
|
py
| 773 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 19,
"usage_type": "call"
}
] |
8410504339
|
#! /usr/bin/python
import logging
import os
from pathlib import Path
import coloredlogs
from dotenv import load_dotenv
PROJECT_ROOT = Path(__file__).parent.resolve()
#####################
# CONFIGURE LOGGING #
#####################
LOG_PATH = str(PROJECT_ROOT / "worker.log")
logging.basicConfig(
filename=LOG_PATH,
filemode="a+",
format="%(asctime)s,%(msecs)d [%(name)s] %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
coloredlogs.install(fmt="%(asctime)s [%(programname)s] %(levelname)s %(message)s")
#################
# ENV VARIABLES #
#################
ENV_PATH = str(PROJECT_ROOT / ".env")
ENV_LOCAL_PATH = str(PROJECT_ROOT / ".env.local")
# load default variables
load_dotenv(ENV_PATH)
# overide variables with .env.local
load_dotenv(ENV_LOCAL_PATH, override=True)
#######
# AWS #
#######
AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
AWS_REGION_NAME = os.getenv("AWS_REGION_NAME")
AWS_BUCKET_NAME = os.getenv("AWS_BUCKET_NAME")
#########
# MYSQL #
#########
MYSQL_HOST = os.getenv("MYSQL_HOST")
MYSQL_USER = os.getenv("MYSQL_USER")
MYSQL_PASSWORD = os.getenv("MYSQL_PASSWORD")
MYSQL_DB = os.getenv("MYSQL_DB")
|
darwin403/translate-transcribe-videos
|
settings.py
|
settings.py
|
py
| 1,222 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "coloredlogs.install",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 55,
"usage_type": "call"
}
] |
6148825082
|
from selenium import webdriver
from time import sleep
import selenium
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
import pandas as pd
if __name__ == '__main__':
# option=webdriver.ChromeOptions()
# option.add_argument("--user-data-dir="+r"C:\\Users\\20142266\\AppData\\Local\\Google\\Chrome\\User Data")
# driver = webdriver.Chrome(chrome_options=option)
# sleep(2)
driver = webdriver.Ie()
#driver.get('chrome-extension://hehijbfgiekmjfkfjpbkbammjbdenadd/nhc.htm#url=http://tec.cqccms.com.cn/')
driver.get('http://tec.cqccms.com.cn/')
sleep(1)
js = 'document.getElementById("submitButton").click()'
driver.execute_script(js)
sleep(1)
#driver.find_element_by_id("submitButton").send_keys(Keys.ENTER)
#testck.input_windows("核对数字证书口令","")
ratio = driver.find_elements_by_tag_name("input")
for a in ratio:
if a.get_attribute('value') == '4005919':
a.click()
if a.get_attribute("type") == "submit":
a.click()
sleep(2)
#str = driver.get_cookies()
#print(str)
#cookie1 = str[0]['value']
#driver.add_cookie({'name': 'JSESSIONID', 'value': cookie1})
URL = "http://tec.cqccms.com.cn/cocComplete!cocCompleteCreate.action?" \
"id=201709291432251U8205&carType=HFC5181XXYP3K1A57S2QV&carCellCode" \
"=2017011101011956&carTypeSeqCode=N36N&carCellId=5636338&collection" \
"=A013551N36NZM95ZZEZ420000901@null@20201105173042786997%3B1@@1@0@5254506@1"
driver.get(URL)
myselect=driver.find_elements_by_tag_name("select")
for i in myselect:
if i.get_property("name")=="f3":
try:
Select(i).select_by_visible_text("5700")
except selenium.common.exceptions.NoSuchElementException:
Select(i).select_by_index(1)
if i.get_property("name")=="f7":
try:
Select(i).select_by_visible_text("5700")
except selenium.common.exceptions.NoSuchElementException:
Select(i).select_by_index(1)
|
YN3359/runoob-git-test
|
PythonScripts/自动备案COC.py
|
自动备案COC.py
|
py
| 2,122 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.Ie",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.select.Select",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "selenium.common",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.support.select.Select",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.select.Select",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "selenium.common",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.support.select.Select",
"line_number": 49,
"usage_type": "call"
}
] |
16543689747
|
import csv
import sys
from nuitka.__past__ import StringIO
from nuitka.Tracing import my_print
from nuitka.utils.Execution import check_output
def main():
# many cases, pylint: disable=too-many-branches
my_print("Querying openSUSE build service status of Nuitka packages.")
# spell-checker: ignore kayhayen
osc_cmd = ["osc", "pr", "-c", "home:kayhayen"]
stdout_osc = check_output(args=osc_cmd)
if str is not bytes:
stdout_osc = stdout_osc.decode("utf8")
# Response is really a CSV file, so use that for parsing.
csv_file = StringIO(stdout_osc)
osc_reader = csv.reader(csv_file, delimiter=";")
osc_reader = iter(osc_reader)
bad = ("failed", "unresolvable", "broken", "blocked")
titles = next(osc_reader)[1:]
# Nuitka (follow git main branch)
row1 = next(osc_reader)
# Nuitka-Unstable (follow git develop branch)
row2 = next(osc_reader)
# Nuitka-Experimental (follow git factory branch)
row3 = next(osc_reader)
problems = []
def decideConsideration(title, status):
# Ignore other arch builds, they might to not even boot at times.
# spell-checker: ignore aarch
if "ppc" in title or "aarch" in title or "arm" in title:
return False
# This fails for other reasons often, and is not critical to Nuitka.
if "openSUSE_Tumbleweed" in title:
return False
# Ignore old Fedora and RHEL6 32 bit being blocked.
if status == "blocked":
if (
"Fedora_2" in title
or "RedHat_RHEL-6/i586" in title
or "CentOS_CentOS-6/i586" in title
):
return False
# It makes building visible now, that's not an error of course.
if status == "building":
return False
# It makes need to build visible as well, that too is not an error
# really.
if status == "outdated":
return False
return True
for count, title in enumerate(titles):
status = row1[count + 1]
if not decideConsideration(title, status):
continue
if status in bad:
problems.append((row1[0], title, status))
for count, title in enumerate(titles):
status = row2[count + 1]
if not decideConsideration(title, status):
continue
if status in bad:
problems.append((row2[0], title, status))
for count, title in enumerate(titles):
status = row3[count + 1]
if not decideConsideration(title, status):
continue
if status in bad:
problems.append((row3[0], title, status))
if problems:
my_print("There are problems with:", style="yellow")
my_print(
"\n".join("%s: %s (%s)" % problem for problem in problems), style="yellow"
)
if any(problem[0] == "Nuitka" for problem in problems):
my_print(
"Check here: https://build.opensuse.org/package/show/home:kayhayen/Nuitka"
)
if any(problem[0] == "Nuitka-Unstable" for problem in problems):
my_print(
"Check here: https://build.opensuse.org/package/show/home:kayhayen/Nuitka-Unstable"
)
if any(problem[0] == "Nuitka-experimental" for problem in problems):
my_print(
"Check here: https://build.opensuse.org/package/show/home:kayhayen/Nuitka-experimental"
)
sys.exit(1)
else:
my_print("Looks good.", style="blue")
sys.exit(0)
if __name__ == "__main__":
main()
|
Nuitka/Nuitka
|
nuitka/tools/release/osc_check/__main__.py
|
__main__.py
|
py
| 3,651 |
python
|
en
|
code
| 10,019 |
github-code
|
6
|
[
{
"api_name": "nuitka.Tracing.my_print",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "nuitka.utils.Execution.check_output",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "nuitka.__past__.StringIO",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "nuitka.Tracing.my_print",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "nuitka.Tracing.my_print",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "nuitka.Tracing.my_print",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "nuitka.Tracing.my_print",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "nuitka.Tracing.my_print",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "nuitka.Tracing.my_print",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 122,
"usage_type": "call"
}
] |
40077076852
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import re
import logging
from .pushbullet import *
class Messenger(object):
"""docstring for Message"""
def __init__(self): #, arg):
# super(Message, self).__init__()
# self.arg = arg
self.ready = False
self.message = ''
self.error_bag = ''
self.result_bag = ''
self.pattern_time=re.compile(r'\d{1,2}h:\d{1,2}m:\d{1,2}s:\d{1,4}ms')
self.pattern_process = re.compile(r'\([0-9\ ]{0,2}/\w{2}\)')
self.pattern_stream = re.compile(r'stream\s+\d{1,2}')
self.pattern_long_space = re.compile(r'\s+')
def _message_chopper(self,line):
if 'Finished' in line:
return line
else:
return
def bb_message_chopper(self,line):
if 'Load test' in line:
if 'finished' in line:
try:
time = re.search(self.pattern_time,line)[0]
except:
logging.info('re seach time failed')
time = 'TIME SEARH FAILED'
message = 'Load finished. Time: '+time
return message
elif 'Power test' in line:
if 'finished' in line:
try:
time = re.search(self.pattern_time,line)[0]
except:
time = ' TIME SEARH FAILED '
try:
process = re.search(self.pattern_process,line)[0]
except:
if time:
process = ' finished.'
else:
process = ' PROCESS SEARCH FAILED '
# logging.info('re seach time failed')
message = 'Power'+process+' Time: '+time
if process == ' finished.':
return message
elif 'throughput' in line:
if 'finished' in line:
try:
self.pattern_long_space = re.compile(r'\s+')
stream_buff = re.search(self.pattern_stream,line)[0]
stream = self.pattern_long_space.sub(' ',stream_buff)
except :
stream =''
try:
time = re.search(self.pattern_time,line)[0]
except:
time = ' TIME SEARH FAILED '
try:
process = re.search(self.pattern_process,line)[0]
except:
if time:
process = ' finished.'
else:
process = ' PROCESS SEARCH FAILED '
# logging.info('re seach time failed')
message = 'Throughput '+ stream +process+' Time: '+time
# if stream == 'stream 0':
if process == ' finished.':
return message
elif 'benchmark: Stop' in line:
if 'finished' in line:
try:
time = re.search(self.pattern_time,line)[0]
except:
time = ' TIME SEARH FAILED '
message = 'Benchmark Stop. '+'Time: '+time
return message
elif 'VALID BBQpm' in line:
self.result_bag+=line
message = line[:-1]
return message
elif 'Benchmark run terminated' in line:
self.error_bag+=line
elif 'Reason:' in line:
self.error_bag+=line
elif 'No final result available.' in line:
self.error_bag+=line
message=self.error_bag
return message
def message_buffer(self, line):
if line[-1] == '\n':
line_tmp=line[:-1]
else:
line_tmp=line
print(line_tmp)
sys.stdout.flush()
logging.info(line_tmp)
if line!='':
message2push=self.bb_message_chopper(line)
if message2push:
self.message+=message2push
self.ready=True
if self.ready == True:
logging.info('Pushing message...(%s)'%self.message)
self.send()
def test(self,message='Your network seems good.'):
p = PushBullet(USER_API_KEY)
try:
# Get a list of devices
devices = p.getDevices()
# print_devices(devices)
except:
print('You may have a network connection probelem to connect pushbullet.com.')
sys.stdout.flush()
logging.info('You may have a network connection probelem to connect pushbullet.com.')
else:
if len(devices)!=0:
print_devices(devices)
sys.stdout.flush()
# Send a note
p.pushNote(USER_DEVICE_IDEN, 'Alfred is with you.', message)
def send(self):
p = PushBullet(USER_API_KEY)
try:
# Get a list of devices
devices = p.getDevices()
# devices = 'pass'
# print_devices(devices)
except:
print('You may have a network connection probelem to connect pushbullet.com.')
sys.stdout.flush()
logging.info('You may have a network connection probelem to connect pushbullet.com.')
else:
if len(devices)!=0:
# Send a note
p.pushNote(USER_DEVICE_IDEN, 'News from Alfred', self.message)
# print('PUSHING NEWS:%s'%self.message)
self.message=''
self.ready=False
def send_all(self,retry=20):
while retry>0 and self.message!='':
M.send()
retry-=1
logging.info('Remaining Attempts:%d'%retry)
|
lucy9215/jobNotification
|
pushbullet/messenger.py
|
messenger.py
|
py
| 5,743 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "re.compile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "sys.stdout.flush",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "sys.stdout.flush",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "sys.stdout.flush",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 151,
"usage_type": "call"
}
] |
23307657200
|
from __future__ import print_function
try:
# For Python 3.0 and later
from urllib.request import urlopen
from urllib.request import urlretrieve
from urllib import request
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen
from urllib import urlretrieve
import sys
import numpy as np
import matplotlib.pyplot as plt
import pylab
from matplotlib.ticker import MaxNLocator
import argparse
import os
import astropy.wcs
from astropy.io import fits
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy import units as u
from astroquery.gaia import Gaia
from astropy.wcs import WCS
from astropy.visualization.wcsaxes import SphericalCircle
from scipy.ndimage.filters import gaussian_filter
import warnings
warnings.filterwarnings("ignore")
def deg2hour(ra, dec, sep=":"):
'''
Transforms the coordinates in degrees into HH:MM:SS DD:MM:SS with the requested separator.
'''
if ( type(ra) is str and type(dec) is str ):
return ra, dec
c = SkyCoord(ra, dec, frame='icrs', unit='deg')
ra = c.ra.to_string(unit=u.hourangle, sep=sep, precision=2, pad=True)
dec = c.dec.to_string(sep=sep, precision=2, alwayssign=True, pad=True)
return str(ra), str(dec)
def hour2deg(ra, dec):
'''
Transforms string HH:MM:SS DD:MM:SS coordinates into degrees (floats).
'''
try:
ra = float(ra)
dec = float(dec)
except:
c = SkyCoord(ra, dec, frame='icrs', unit=(u.hourangle, u.deg))
ra = c.ra.deg
dec = c.dec.deg
return ra, dec
def get_offset(ra1, dec1, ra2, dec2):
'''
Computes the offset in arcsec between two coordinates.
The offset is from (ra1, dec1) - generally an offset star to (ra2, dec2) - the fainter target.
'''
from astropy.coordinates import SkyCoord
bright_star = SkyCoord(ra1, dec1, frame='icrs', unit=(u.deg, u.deg))
target = SkyCoord(ra2, dec2, frame='icrs', unit=(u.deg, u.deg))
dra, ddec = bright_star.spherical_offsets_to(target)
return dra.to(u.arcsec).value, ddec.to(u.arcsec).value
def query_sky_mapper_catalogue(ra, dec, radius_deg, minmag=15, maxmag=18.5):
'''
Sends a VO query to the SkyMapper catalogue.
'''
url = "http://skymapper.anu.edu.au/sm-cone/query?RA=%.6f&DEC=%.6f&SR=%.4f&RESPONSEFORMAT=CSV"%(ra, dec, radius_deg)
f = open("/tmp/skymapper_cat.csv", "wb")
page = urlopen(url)
content = page.read()
f.write(content)
f.close()
# Read RA, Dec and magnitude from CSV
catalog = Table.read("/tmp/skymapper_cat.csv", format="ascii.csv")
mask = (catalog["class_star"]>0.7) * (catalog["ngood"] >5) * (catalog['r_psf']>minmag) * (catalog['r_psf']<maxmag)
catalog = catalog[mask]
newcat = np.zeros(len(catalog), dtype=[("ra", np.double), ("dec", np.double), ("mag", np.float)])
newcat["ra"] = catalog["raj2000"]
newcat["dec"] = catalog["dej2000"]
newcat["mag"] = catalog["r_psf"]
return newcat
def query_ps1_catalogue(ra, dec, radius_deg, minmag=15, maxmag=18.5):
'''
Sends a VO query to the PS1 catalogue.
Filters the result by mangitude (between 15 and 18.5)
and by the PSF-like shape of the sources.
'''
url = "http://gsss.stsci.edu/webservices/vo/CatalogSearch.aspx?CAT=PS1V3OBJECTS&RA=%.5f&DEC=%.5f&SR=%.5f&FORMAT=csv"%(ra, dec, radius_deg)
#urllib.urlretrieve(url, "/tmp/ps1_cat.xml")
f = open("/tmp/ps1_cat.csv", "wb")
try:
page = request.urlopen(url)
except:
page = urlopen(url)
content = page.read()
f.write(content)
f.close()
# Read RA, Dec and magnitude from CSV
catalog = Table.read("/tmp/ps1_cat.csv", format="ascii.csv", header_start=1)
mask = (catalog["nDetections"]>3) * (catalog["rMeanPSFMag"] > minmag) * (catalog["rMeanPSFMag"] < maxmag) *\
(catalog["iMeanPSFMag"] - catalog["iMeanKronMag"] < 0.1) #This last one to select stars.
#*(catalog["rMeanPSFMag"] > minmag) * (catalog["rMeanPSFMag"] < maxmag)
catalog = catalog[mask]
newcat = np.zeros(len(catalog), dtype=[("ra", np.double), ("dec", np.double), ("mag", np.float)])
newcat["ra"] = catalog["RaMean"]
newcat["dec"] = catalog["DecMean"]
newcat["mag"] = catalog["rMeanPSFMag"]
return newcat
def query_gaia_catalogue(ra, dec, radius_deg, minmag=15, maxmag=18.5):
'''
Sends a VO query to the Gaia catalogue.
Filters the result by G mangitude (between minmag and maxmag).
'''
query = '''SELECT ra, dec, phot_g_mean_mag
FROM gaiaedr3.gaia_source
WHERE 1=CONTAINS(
POINT('ICRS', %.6f, %.6f),
CIRCLE('ICRS',ra, dec, %.6f))
AND phot_g_mean_mag>=%.2d AND phot_g_mean_mag<%.2f'''%(ra, dec, radius_deg, minmag, maxmag)
print (query)
job = Gaia.launch_job_async(query)
#, dump_to_file=True, output_format='votable')
catalog = job.get_results()
newcat = np.zeros(len(catalog), dtype=[("ra", np.double), ("dec", np.double), ("mag", np.float)])
newcat["ra"] = catalog["ra"]
newcat["dec"] = catalog["dec"]
newcat["mag"] = catalog["phot_g_mean_mag"]
return newcat
def get_fits_image(ra, dec, rad, debug=True):
'''
Connects to the PS1 or SkyMapper image service to retrieve the fits file to be used as a bse for the finder chart.
'''
#If dec> -30, we have Pan-STARRS
if dec > -30:
# Construct URL to download Pan-STARRS image cutout, and save to tmp.fits
# First find the index of images and retrieve the file of the image that we want to use.
image_index_url = 'http://ps1images.stsci.edu/cgi-bin/ps1filenames.py?ra={0}&dec={1}&filters=r'.format(ra, dec)
urlretrieve(image_index_url, '/tmp/ps1_image_index.txt')
ix = Table.read('/tmp/ps1_image_index.txt', format="ascii")
f = ix['filename'].data[0]
image_url = "http://ps1images.stsci.edu/cgi-bin/fitscut.cgi?red={0}&format=fits&size={1}&ra={2}&dec={3}".format(f, int(np.round(rad*3600*8, 0)), ra, dec)
if (debug):
print ("URL:", image_url)
print ("Downloading PS1 r-band image...")
#Store the object to a fits file.
urlretrieve(image_url, '/tmp/tmp.fits')
#Otherwise, we have SkyMapper
else:
url="http://skymappersiap.asvo.nci.org.au/dr1_cutout/query?POS=%.6f,%.6f&SIZE=%.3f&FORMAT=image/fits&INTERSECT=center&RESPONSEFORMAT=CSV"%(ra, dec, rad*2)
page = urlopen(url)
content = page.read()
f = open("/tmp/skymapper_image_index.csv", "wb")
f.write(content)
f.close()
ix = Table.read('/tmp/skymapper_image_index.csv', format="ascii.csv")
mask = ((ix['band']=='r')|(ix['band']=='g'))
ix = ix[mask]
ix.sort(keys='exptime')
image_url = ix['get_image'][-1]
urlretrieve(image_url, '/tmp/tmp.fits')
#Finally, once we have Pan-STARRS or SkyMapper images, we try to open them.
#If there has been any problem with that, we will just go to the DSS image service.
try:
image = fits.open("/tmp/tmp.fits")
#If everything went well, it shall be a fits image and opening it shall cause no issue.
return '/tmp/tmp.fits'
#If there was an error with the fits, we shall go for the DSS image
except IOError:
#One of the services may fail, so we need to account for that and provide a backup DSS image service.
try:
image_url = 'http://archive.eso.org/dss/dss/image?ra=%.5f&dec=%.5f&x=%.2f&y=%.2f&Sky-Survey=DSS1&mime-type=download-fits' % \
((ra), (dec), (rad*60), (rad*60))
if debug: print ("Downloading DSS image...")
urlretrieve(image_url, '/tmp/tmp.fits')
except:
image_url = 'http://archive.stsci.edu/cgi-bin/dss_search?ra=%.6f&dec=%.6f&generation=DSS2r&equinox=J2000&height=%.4f&width=%.4f&format=FITS' % \
(ra, dec, rad*60, rad*60)
urlretrieve(image_url, '/tmp/tmp.fits')
#We try one more time to open it. If not successful, we return None as the image filename.
try:
fits.open("/tmp/tmp.fits")
except IOError:
print ("Your fits image could not be retrieved.")
return None
def get_cutout(ra, dec, name, rad, debug=True):
'''
Obtains the color composite cutout from the PS1 images.
'''
try:
ra=float(ra)
dec=float(dec)
except:
ra, dec = hour2deg(ra, dec)
catalog = query_ps1_catalogue(ra, dec, rad)
if (debug):
print (catalog)
# Construct URL to download DSS image cutout, and save to tmp.fits
image_index_url_red = 'http://ps1images.stsci.edu/cgi-bin/ps1filenames.py?ra={0}&dec={1}&filters=y'.format(ra, dec)
image_index_url_green = 'http://ps1images.stsci.edu/cgi-bin/ps1filenames.py?ra={0}&dec={1}&filters=i'.format(ra, dec)
image_index_url_blue = 'http://ps1images.stsci.edu/cgi-bin/ps1filenames.py?ra={0}&dec={1}&filters=g'.format(ra, dec)
urlretrieve(image_index_url_red, '/tmp/image_index_red.txt')
urlretrieve(image_index_url_green, '/tmp/image_index_green.txt')
urlretrieve(image_index_url_blue, '/tmp/image_index_blue.txt')
ix_red = np.genfromtxt('/tmp/image_index_red.txt', names=True, dtype=None)
ix_green = np.genfromtxt('/tmp/image_index_green.txt', names=True, dtype=None)
ix_blue = np.genfromtxt('/tmp/image_index_blue.txt', names=True, dtype=None)
image_url = "http://ps1images.stsci.edu/cgi-bin/fitscut.cgi?red=%s&green=%s&blue=%s&filetypes=stack&auxiliary=data&size=%d&ra=%.6f&dec=%.6f&output_size=256"%\
(ix_red["filename"], ix_green["filename"], ix_blue["filename"], rad*3600*4, ra, dec)
if (debug):
print (image_url)
print ("Downloading PS1 r-band image...")
urlretrieve(image_url, '/tmp/tmp_%s.jpg'%name)
def get_finder(ra, dec, name, rad, debug=False, starlist=None, print_starlist=True, \
telescope="P200", directory=".", minmag=15, maxmag=18.5, mag=np.nan, image_file=None):
'''
Creates a PDF with the finder chart for the object with the specified name and coordinates.
It queries the PS1 catalogue to obtain nearby offset stars and get an R-band image as background.
Parameters
----------
ra : float
RA of our target in degrees.
dec : float
DEC of our target in degrees.
name : str
The name of your target
rad : float
Search radius for the finder in degrees.
debug : bool (optional)
Option to activate/ deactivate additional output.
starlist : str (optional)
Path/name of the file where the coordinates for the object are going to be saved.
If the file exists, the content will be appended at the end.
If no value is provided, the output just writes to the standard output (in case print_starlist is True).
print_starlist : boolean (optional)
Indicates if the starlist shall be printed in the standard output.
telescope : str (optional)
The current accepted values are "Keck", "P200", "Calar".
directory : str (optional)
The directory where the PDF with the finder chart shall be stored.
If no value given, the file will be store in the current directory where the script is run.
minmag : float (optional)
The minimum magnitud (brightest in this case) star that we would like to use as an offset star.
maxmag : float (optional)
The maximum magnitude (faintest) star that we would like to use as an offset star.
mag : float or `None` (optional)
The magnitude of our target.
image_file : str (optional)
The name of the fits file that you want to use as a background to your finder chart. If none, provided,
the script will automatically look for imaging catalogues: PS1 (North), SkyMapper (South), or DSS
'''
print ("Got it")
try:
ra=float(ra)
dec=float(dec)
except:
ra, dec = hour2deg(ra, dec)
print ('Image file:', image_file)
#If no custom fits image is provided, we query for one from PS1/DSS/SkyMapper
if image_file is None:
image_file = get_fits_image(ra, dec, rad, debug=debug)
image = fits.open(image_file)
wcs = astropy.wcs.WCS(image[0].header)
else:
print ('Reading custom fits')
image = fits.open(image_file)
# Get pixel coordinates of SN, reference stars in DSS image
wcs = astropy.wcs.WCS(fits.open(image_file)[0].header)
try:
image[0].data = np.rot90(np.rot90(image[0].data))
except ValueError:
print ('Rotation failed')
if image_file is None or image is None:
print ("FATAL ERROR! Your FITS image could not be retrieved.")
return
# Plot finder chart
#Adjust some of the counts to make easier the plotting.
image[0].data[image[0].data>20000] = 20000
image[0].data[np.isnan(image[0].data)] = 0
plt.figure(figsize=(8, 6))
plt.set_cmap('gray_r')
smoothedimage = gaussian_filter(image[0].data, 1.1)
hdu = fits.open(image_file)[0]
wcs = WCS(hdu.header)
plt.subplot(projection=wcs)
#plt.imshow(hdu.data, vmin=-2.e-5, vmax=2.e-4, origin='lower')
plt.grid(color='white', ls='solid')
plt.imshow(smoothedimage, origin='lower',vmin=np.percentile(smoothedimage.flatten(), 1), \
vmax=np.percentile(smoothedimage.flatten(), 99))
#Mark target in green
ax = plt.gca()
r = SphericalCircle((ra * u.deg, dec * u.deg), 2 * u.arcsec,
edgecolor='green', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(r)
#Write the name of the target
plt.annotate(name, xy=(ra, dec), xycoords='data', xytext=(0.55, 0.5), textcoords='axes fraction', color="g")
# Plot compass
plt.plot([(image[0].data.shape[0])-10,(image[0].data.shape[0]-40)],[10,10], 'k-', lw=2)
plt.plot([(image[0].data.shape[0])-10,(image[0].data.shape[0])-10],[10,40], 'k-', lw=2)
plt.annotate("N", xy=((image[0].data.shape[0])-20, 40), xycoords='data', xytext=(-4,5), textcoords='offset points')
plt.annotate("E", xy=((image[0].data.shape[0])-40, 20), xycoords='data', xytext=(-12,-5), textcoords='offset points')
ax.set_xlabel('%.1f\''%(rad*60*2))
ax.set_ylabel('%.1f\''%(rad*60*2))
#Queries a catalogue to get the offset stars
try:
catalog = query_gaia_catalogue(ra, dec, rad, minmag=minmag, maxmag=maxmag)
except Exception as e:
print(e)
catalog = []
if len(catalog)==0:
try:
if dec < -30:
catalog = query_sky_mapper_catalogue(ra, dec, (rad)*0.95, minmag=minmag, maxmag=maxmag)
else:
catalog = query_ps1_catalogue(ra, dec, (rad)*0.95, minmag=minmag, maxmag=maxmag)
except Exception as e:
print(e)
catalog = []
if (debug):
print ("Catalog of %d stars retrieved"%len(catalog), catalog)
#Possible ways to get more stars if we find nothing in the specified range.
'''if (len(catalog)<3):
if debug: print ("Looking for a bit fainter stars up to mag: %.2f"%(maxmag+0.25))
catalog = query_ps1_catalogue(ra, dec, (rad/2.)*0.95, minmag=minmag, maxmag=maxmag+0.5)
if (len(catalog)<3):
print ("Restarting with larger radius %.2f arcmin"%(rad*60+0.5))
get_finder(ra, dec, name, rad+0.5/60, directory=directory, minmag=minmag, \
maxmag=maxmag+0.5, mag=mag, starlist=starlist, telescope=telescope, image_file=image_file)
return'''
if (not catalog is None and len(catalog)>0):
np.random.shuffle(catalog)
no_self_object = (np.abs(catalog["ra"]-ra)*np.cos(np.deg2rad(dec))>2./3600)*(np.abs(catalog["dec"]-dec)>2./3600)
catalog = catalog[no_self_object]
#catalog.sort(order='mag')
if (debug): print ("Once removed the object", catalog)
if (len(catalog)>0):
ref1_pix = wcs.wcs_world2pix(np.array([[catalog["ra"][0], catalog["dec"][0]]], np.float_), 1)
if (len(catalog)>1):
ref2_pix = wcs.wcs_world2pix(np.array([[catalog["ra"][1], catalog["dec"][1]]], np.float_), 1)
# Mark and label reference stars
#If we have 1, we mark it
if (len(catalog)>0):
s1ra = catalog[0]["ra"]
s1dec = catalog[0]["dec"]
s1= SphericalCircle(( s1ra* u.deg, s1dec* u.deg), 2 * u.arcsec,
edgecolor='b', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(s1)
plt.annotate("S1", xy=(ref1_pix[0][0]+3, ref1_pix[0][1]+3), xycoords='data', color="b")
#IFfwe have 2, we mark them as well
if (len(catalog)>1):
s2ra = catalog[1]["ra"]
s2dec = catalog[1]["dec"]
s2= SphericalCircle(( s2ra* u.deg, s2dec* u.deg), 2 * u.arcsec,
edgecolor='r', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(s2)
plt.annotate("S2", xy=(ref2_pix[0][0]+3, ref2_pix[0][1]+3), xycoords='data', color="r")
# Set size of window (leaving space to right for ref star coords)
plt.subplots_adjust(right=0.65,left=0.1, top=0.99, bottom=0.01)
#Write the starlist
#If no magnitude was supplied, just do not put it on the chart.
if not np.isnan(mag):
target_mag = "mag=%.2f"%mag
else:
target_mag = ""
# List name, coords, mag of references etc
plt.text(1.02, 0.85, name, fontweight='bold', transform=ax.transAxes)
plt.text(1.02, 0.85, name, transform=ax.transAxes, fontweight='bold')
plt.text(1.02, 0.80, "%s"%target_mag, transform=ax.transAxes, fontweight='bold')
plt.text(1.02, 0.75, "%.5f %.5f"%(ra, dec),transform=ax.transAxes)
rah, dech = deg2hour(ra, dec)
plt.text(1.02, 0.7,rah+" "+dech, transform=ax.transAxes)
#Put the text for the offset stars.
if (len(catalog)>0):
ofR1 = get_offset(catalog["ra"][0], catalog["dec"][0], ra, dec)
S1 = deg2hour(catalog["ra"][0], catalog["dec"][0], sep=":")
plt.text(1.02, 0.60,'S1, mag=%.2f'%catalog["mag"][0], transform=ax.transAxes, color="b")
plt.text(1.02, 0.55,'%s %s'%(S1[0], S1[1]), transform=ax.transAxes, color="b")
plt.text(1.02, 0.5,"E: %.2f N: %.2f"%(ofR1[0], ofR1[1]),transform=ax.transAxes, color="b")
if (len(catalog)>1):
ofR2 = get_offset(catalog["ra"][1], catalog["dec"][1], ra, dec)
S2 = deg2hour(catalog["ra"][1], catalog["dec"][1], sep=":")
plt.text(1.02, 0.4,'RS, mag=%.2f'%catalog["mag"][1], transform=ax.transAxes, color="r")
plt.text(1.02, 0.35,'%s %s'%(S2[0], S2[1]), transform=ax.transAxes, color="r")
plt.text(1.02, 0.3,"E: %.2f N: %.2f"%(ofR2[0], ofR2[1]),transform=ax.transAxes, color="r")
#Print starlist in the right format for each telescope
if telescope == "Keck":
commentchar = "#"
separator = ""
if telescope == "P200":
commentchar = "!"
separator = "!"
if telescope == "Calar":
commentchar = "|"
separator = "|"
if (len(catalog)>0 and (print_starlist or not starlist is None)):
ra_h, dec_h = deg2hour(ra, dec, sep=":")
print ( "%s %s %s %s %s %s %s %s %s"%("# Object".ljust(20), separator, "alpha".ljust(11), separator, "delta".ljust(12), separator, "eq".ljust(6), separator, "camp de format lliure") )
print ( "%s %s %s %s %s %s 2000.0 %s"%(name.ljust(20), separator, ra_h, separator, dec_h, separator, commentchar) )
S1 = deg2hour(catalog["ra"][0], catalog["dec"][0], sep=":")
print ( "{:s} {:s} {:s} {:s} {:s} {:s} 2000.0 {:s} raoffset={:.2f} decoffset={:.2f} r={:.1f} ".format((name+"_S1").ljust(20),
separator, S1[0], separator, S1[1], separator, commentchar, ofR1[0], ofR1[1], catalog["mag"][0]) )
if (len(catalog)>1 and (print_starlist or not starlist is None)):
S2 = deg2hour(catalog["ra"][1], catalog["dec"][1], sep=":")
print ( "{:s} {:s} {:s} {:s} {:s} {:s} 2000.0 {:s} raoffset={:.2f} decoffset={:.2f} r={:.1f} ".format((name+"_S2").ljust(20),
separator, S2[0], separator, S2[1], separator, commentchar, ofR2[0], ofR2[1], catalog["mag"][1]) )
r, d = deg2hour(ra, dec, sep=" ")
#Write to the starlist if the name of the starlist was provided.
if (not starlist is None) and (telescope =="Keck"):
with open(starlist, "a") as f:
f.write( "{0} {1} {2} 2000.0 # {3} \n".format(name.ljust(17), r, d, target_mag) )
if (len(catalog)>0):
f.write ( "{:s} {:s} {:s} 2000.0 raoffset={:.2f} decoffset={:.2f} r={:.1f} # \n".format( (name+"_S1").ljust(17), S1[0], S1[1], ofR1[0], ofR1[1], catalog["mag"][0]))
if (len(catalog)>1):
f.write ( "{:s} {:s} {:s} 2000.0 raoffset={:.2f} decoffset={:.2f} r={:.1f} # \n".format( (name+"_S2").ljust(17), S2[0], S2[1], ofR2[0], ofR2[1], catalog["mag"][1]))
f.write('\n')
if (not starlist is None) and (telescope =="P200"):
with open(starlist, "a") as f:
f.write( "{0} {1} {2} 2000.0 ! {3}\n".format(name.ljust(19), r, d, target_mag) )
if (len(catalog)>0):
f.write ( "{:s} {:s} {:s} 2000.0 ! raoffset={:.2f} decoffset={:.2f} r={:.1f} \n".format( (name+"_S1").ljust(19), S1[0], S1[1], ofR1[0], ofR1[1], catalog["mag"][0]))
if (len(catalog)>1):
f.write ( "{:s} {:s} {:s} 2000.0 ! raoffset={:.2f} decoffset={:.2f} r={:.1f} \n".format( (name+"_S2").ljust(19), S2[0], S2[1], ofR2[0], ofR2[1], catalog["mag"][1]))
f.write('\n')
if (not starlist is None) and (telescope =="Calar"):
with open(starlist, "a") as f:
f.write( "{0} {1} {2} %s 2000.0 %s {3}\n".format(separator, separator, name.ljust(19), r, d, target_mag) )
if (len(catalog)>0):
f.write ( "{:s} {:s} {:s} %s 2000.0 %s raoffset={:.2f} decoffset={:.2f} r={:.1f} \n".format( (name+"_S1").ljust(19), separator, S1[0], separator, S1[1], ofR1[0], ofR1[1], catalog["mag"][0]))
if (len(catalog)>1):
f.write ( "{:s} {:s} {:s} %s 2000.0 %s raoffset={:.2f} decoffset={:.2f} r={:.1f} \n".format( (name+"_S2").ljust(19), separator, S2[0], separator, S2[1], ofR2[0], ofR2[1], catalog["mag"][1]))
f.write('\n')
# Save to pdf
pylab.savefig(os.path.join(directory, str(name+'_finder.pdf')))
if debug: print ("Saved to %s"%os.path.join(directory, str(name+'_finder.pdf')))
pylab.close("all")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=\
'''
Creates the finder chart for the given RA, DEC and NAME.
Usage: finder.py <RA> <Dec> <Name> <rad [deg]> <telescope [P200|Keck]>
''', formatter_class=argparse.RawTextHelpFormatter)
print ("Usage: finder_chart.py <RA> <Dec> <Name> <rad [deg]> <telescope [P200|Keck]>")
#Check if correct number of arguments are given
if len(sys.argv) < 4:
print ("Not enough parameters given. Please, provide at least: finder_chart.py <RA> <Dec> <Name>")
sys.exit()
ra = float(sys.argv[1])
dec = float(sys.argv[2])
name = str(sys.argv[3])
if (len(sys.argv)>=5):
rad = float(sys.argv[4])
if (rad > 15./60):
print ('Requested search radius of %.2f arcmin is larger than 15 arcmin. Not sure why you need such a large finder chart... reducing to 10 armin for smoother operations...'%(rad * 60))
rad = 10./60
else:
rad = 2./60
print ('Using search radius of %.1f arcsec.'%(rad*3600))
if (len(sys.argv)>5):
telescope = sys.argv[5]
else:
telescope = "P200"
print ('Assuming that the telescope you observe will be P200. If it is "Keck", please specify otherwise.')
get_finder(ra, dec, name, rad, telescope=telescope, debug=False, minmag=7, maxmag=15)
|
nblago/utils
|
src/utils/finder_chart.py
|
finder_chart.py
|
py
| 24,759 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "astropy.coordinates.SkyCoord",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "astropy.units.hourangle",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "astropy.coordinates.SkyCoord",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "astropy.units.hourangle",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "astropy.units.deg",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "astropy.coordinates.SkyCoord",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "astropy.units.deg",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "astropy.coordinates.SkyCoord",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "astropy.units.deg",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "astropy.units.arcsec",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "urllib2.urlopen",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "astropy.table.Table.read",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "astropy.table.Table",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.double",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "numpy.float",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "urllib2.urlopen",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "astropy.table.Table.read",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "astropy.table.Table",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.double",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "numpy.float",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "astroquery.gaia.Gaia.launch_job_async",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "astroquery.gaia.Gaia",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "numpy.double",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "numpy.float",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "urllib.urlretrieve",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "astropy.table.Table.read",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "astropy.table.Table",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "numpy.round",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "urllib.urlretrieve",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "astropy.table.Table.read",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "astropy.table.Table",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "urllib.urlretrieve",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "urllib.urlretrieve",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "urllib.urlretrieve",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "urllib.urlretrieve",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "urllib.urlretrieve",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "urllib.urlretrieve",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "urllib.urlretrieve",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 282,
"usage_type": "attribute"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 334,
"usage_type": "name"
},
{
"api_name": "astropy.wcs.wcs.WCS",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "astropy.wcs.wcs",
"line_number": 335,
"usage_type": "attribute"
},
{
"api_name": "astropy.wcs",
"line_number": 335,
"usage_type": "name"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 338,
"usage_type": "name"
},
{
"api_name": "astropy.wcs.wcs.WCS",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "astropy.wcs.wcs",
"line_number": 340,
"usage_type": "attribute"
},
{
"api_name": "astropy.wcs",
"line_number": 340,
"usage_type": "name"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 340,
"usage_type": "name"
},
{
"api_name": "numpy.rot90",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 360,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.set_cmap",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 361,
"usage_type": "name"
},
{
"api_name": "scipy.ndimage.filters.gaussian_filter",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 364,
"usage_type": "name"
},
{
"api_name": "astropy.wcs.WCS",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 367,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 369,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 370,
"usage_type": "name"
},
{
"api_name": "numpy.percentile",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 374,
"usage_type": "name"
},
{
"api_name": "astropy.visualization.wcsaxes.SphericalCircle",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "astropy.units.deg",
"line_number": 375,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 375,
"usage_type": "name"
},
{
"api_name": "astropy.units.arcsec",
"line_number": 375,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.annotate",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 380,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 383,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 384,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.annotate",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 385,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.annotate",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 386,
"usage_type": "name"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 425,
"usage_type": "attribute"
},
{
"api_name": "numpy.abs",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "numpy.deg2rad",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "numpy.float_",
"line_number": 436,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "numpy.float_",
"line_number": 438,
"usage_type": "attribute"
},
{
"api_name": "astropy.visualization.wcsaxes.SphericalCircle",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "astropy.units.deg",
"line_number": 445,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 445,
"usage_type": "name"
},
{
"api_name": "astropy.units.arcsec",
"line_number": 445,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.annotate",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 449,
"usage_type": "name"
},
{
"api_name": "astropy.visualization.wcsaxes.SphericalCircle",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "astropy.units.deg",
"line_number": 455,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 455,
"usage_type": "name"
},
{
"api_name": "astropy.units.arcsec",
"line_number": 455,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.annotate",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 460,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots_adjust",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 463,
"usage_type": "name"
},
{
"api_name": "numpy.isnan",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 475,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 477,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 478,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 479,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 481,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 481,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 489,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 489,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 490,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 490,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 491,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 491,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 497,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 498,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 499,
"usage_type": "name"
},
{
"api_name": "pylab.savefig",
"line_number": 561,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 561,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 561,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 562,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 562,
"usage_type": "attribute"
},
{
"api_name": "pylab.close",
"line_number": 563,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 568,
"usage_type": "call"
},
{
"api_name": "argparse.RawTextHelpFormatter",
"line_number": 575,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 581,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 583,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 585,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 586,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 587,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 588,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 589,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 599,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 600,
"usage_type": "attribute"
}
] |
14927451670
|
import tkinter as tk
from tkinter import filedialog, simpledialog, messagebox, Scale, HORIZONTAL
from tkinter.ttk import Notebook
from PIL import Image, ImageTk
import random
import os
import shutil
import zipfile
import json
class TraitBubble:
def __init__(self, canvas, trait_name, x, y, prob_scale):
self.canvas = canvas
self.trait_name = trait_name
self.x = x
self.y = y
self.prob_scale = prob_scale
self.radius = 25
self.bubble_id = self.canvas.create_oval(
self.x - self.radius, self.y - self.radius,
self.x + self.radius, self.y + self.radius,
fill="white", outline="black"
)
self.text_id = self.canvas.create_text(
self.x, self.y - self.radius - 15,
text=self.trait_name, fill="black",
font=("Arial", 10, "bold")
)
self.canvas.tag_bind(self.bubble_id, "<Button-1>", self.start_drag)
self.canvas.tag_bind(self.bubble_id, "<B1-Motion>", self.move_drag)
self.canvas.tag_bind(self.bubble_id, "<ButtonRelease-1>", self.stop_drag)
self.canvas.tag_bind(self.bubble_id, "<Double-Button-1>", self.delete_bubble)
def start_drag(self, event):
self.drag_data = {"x": event.x, "y": event.y}
def move_drag(self, event):
dx = event.x - self.drag_data["x"]
dy = event.y - self.drag_data["y"]
self.canvas.move(self.bubble_id, dx, dy)
self.canvas.move(self.text_id, dx, dy)
self.x += dx
self.y += dy
self.drag_data["x"] = event.x
self.drag_data["y"] = event.y
def stop_drag(self, event):
self.drag_data = None
def delete_bubble(self, event):
self.canvas.delete(self.bubble_id)
self.canvas.delete(self.text_id)
self.prob_scale.destroy()
class NFTGeneratorApp:
def __init__(self, root):
self.root = root
self.root.title("NFT Generator")
self.trait_categories = {}
self.trait_bubbles = []
self.generated_images = []
self.num_nfts = 1
self.unique_combinations = set()
# Create and configure the UI elements
self.trait_label = tk.Label(self.root, text="Trait Categories:")
self.trait_label.pack()
self.trait_buttons_frame = tk.Frame(self.root)
self.trait_buttons_frame.pack()
self.add_trait_button = tk.Button(self.root, text="Add Trait Category", command=self.add_trait_category)
self.add_trait_button.pack()
self.generate_label = tk.Label(self.root, text="Number of NFTs to Generate:")
self.generate_label.pack()
self.generate_entry = tk.Entry(self.root)
self.generate_entry.insert(tk.END, "1")
self.generate_entry.pack()
self.generate_button = tk.Button(self.root, text="Generate NFT", command=self.generate_nft)
self.generate_button.pack()
self.download_button = tk.Button(self.root, text="Download All NFTs", command=self.download_nfts)
self.download_button.pack()
self.notebook = Notebook(self.root)
self.notebook.pack()
self.canvas = tk.Canvas(self.root, width=500, height=500)
self.canvas.pack()
def add_trait_category(self):
trait_category = simpledialog.askstring("Add Trait Category", "Enter the name of the trait category:")
if trait_category:
folder_path = filedialog.askdirectory(title="Select Folder for {}".format(trait_category))
if folder_path:
self.trait_categories[trait_category] = self.get_valid_image_files(folder_path)
self.create_trait_bubbles(trait_category)
def create_trait_bubbles(self, trait_category):
x = 60
y = len(self.trait_bubbles) * 60 + 60
prob_scale = Scale(self.root, from_=0, to=100, orient=HORIZONTAL, length=100)
prob_scale.set(50)
prob_scale.pack()
bubble = TraitBubble(self.canvas, trait_category, x, y, prob_scale)
self.trait_bubbles.append(bubble)
def generate_nft(self):
self.num_nfts = int(self.generate_entry.get())
if self.trait_bubbles and self.num_nfts > 0:
self.generated_images = []
for _ in range(self.num_nfts):
traits_used = []
layers = sorted(self.trait_bubbles, key=lambda bubble: bubble.y)
base_image = Image.new("RGBA", (400, 400))
for bubble in layers:
trait_category = bubble.trait_name
if random.randint(1, 100) <= bubble.prob_scale.get():
trait_image = self.get_random_image(self.trait_categories[trait_category])
traits_used.append((trait_category, trait_image))
trait_image = trait_image.resize((400, 400))
base_image = Image.alpha_composite(base_image, trait_image)
trait_combination = frozenset(traits_used)
if trait_combination not in self.unique_combinations:
self.generated_images.append((base_image, traits_used))
self.unique_combinations.add(trait_combination)
self.add_preview(base_image, traits_used)
def get_valid_image_files(self, folder_path):
valid_extensions = [".png", ".jpg", ".jpeg", ".gif"]
image_files = []
for root, dirs, files in os.walk(folder_path):
for file in files:
_, ext = os.path.splitext(file)
if ext.lower() in valid_extensions:
image_files.append(os.path.join(root, file))
return image_files
def get_random_image(self, image_list):
return Image.open(random.choice(image_list)).convert("RGBA")
def add_preview(self, image, traits_used):
image = image.resize((200, 200))
photo = ImageTk.PhotoImage(image)
frame = tk.Frame(self.notebook)
label = tk.Label(frame, image=photo)
label.image = photo
label.pack()
text = tk.Text(frame, height=5, width=30)
text.pack()
text.insert(tk.END, json.dumps({trait: str(image) for trait, image in traits_used}, indent=4))
self.notebook.add(frame, text=f"NFT #{len(self.generated_images)}")
def download_nfts(self):
if self.generated_images:
folder_path = filedialog.askdirectory(title="Select Folder to Save NFTs")
if folder_path:
zip_path = os.path.join(folder_path, "nfts.zip")
with zipfile.ZipFile(zip_path, "w", compression=zipfile.ZIP_DEFLATED, compresslevel=9) as zip_file:
for index, (image, traits) in enumerate(self.generated_images):
image_path = os.path.join(folder_path, f"nft_{index + 1}.png")
image.save(image_path, "PNG", compress_level=9)
zip_file.write(image_path, f"nft_{index + 1}.png")
os.remove(image_path)
metadata = {trait: str(img) for trait, img in traits}
metadata_path = os.path.join(folder_path, f"metadata_{index + 1}.json")
with open(metadata_path, "w") as f:
json.dump(metadata, f, indent=4)
zip_file.write(metadata_path, f"metadata_{index + 1}.json")
os.remove(metadata_path)
messagebox.showinfo("Download Complete", "All NFTs and metadata have been downloaded as a zip file.")
if __name__ == "__main__":
root = tk.Tk()
app = NFTGeneratorApp(root)
root.mainloop()
|
net1ife/NFT-generation
|
v0.py
|
v0.py
|
py
| 7,914 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tkinter.Label",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "tkinter.Frame",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "tkinter.Entry",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "tkinter.END",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Button",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk.Notebook",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "tkinter.Canvas",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "tkinter.simpledialog.askstring",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "tkinter.simpledialog",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "tkinter.filedialog.askdirectory",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "tkinter.Scale",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "tkinter.HORIZONTAL",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "PIL.Image.new",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "PIL.Image.alpha_composite",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "os.walk",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "tkinter.Frame",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "tkinter.Text",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "tkinter.END",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog.askdirectory",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "zipfile.ZipFile",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "zipfile.ZIP_DEFLATED",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "tkinter.Tk",
"line_number": 187,
"usage_type": "call"
}
] |
2690012282
|
import boto3
import os
class WasabiUploader:
def __init__(self, directory):
self.directory = directory
self.session = boto3.Session(profile_name="default")
self.credentials = self.session.get_credentials()
self.aws_access_key_id = self.credentials.access_key
self.aws_secret_access_key = self.credentials.secret_key
self.s3 = boto3.resource('s3',
endpoint_url='https://s3.US-central-1.wasabisys.com',
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key
)
self.mpdb = self.s3.Bucket('mpdb')
def create_list_of_uploaded_parts(self, directory):
print("Working...")
UploadedPNsFileLocation = "/".join(
directory.split("/", )[:-1])
with open(f'{UploadedPNsFileLocation}/Wasabi_UploadedPNS.txt', 'a+') as f:
f.seek(0)
existing_contents = f.read()
for obj in self.mpdb.objects.all():
item = obj.key.split("/", 1)[1]
if item not in existing_contents:
f.write(f"{item}\n")
f.close()
print(f"Wasabi Data processed, PN file created. "
f"Available at: {UploadedPNsFileLocation}/Wasabi_UploadedPNS.txt'")
def upload_photos(self):
recordNumber = 0
recordsAdded = 0
for filename in os.listdir(self.directory):
recordNumber += 1
with open(f'{self.directory}/Wasabi_UploadedPNS.txt', 'a+') as f:
f.seek(0)
existing_contents = f.read()
file = os.path.join(self.directory, filename)
PN = filename.split(".")[0]
if os.path.isfile(file):
if PN not in existing_contents:
try:
self.mpdb.upload_file(file, f"productimages/{filename}")
f.write(f"{filename}\n")
recordsAdded += 1
if recordNumber % 20 == 0: # only printing every 20th record for confirmation of upload
print(f"{PN} successfully uploaded to Wasabi ({recordsAdded} images uploaded)")
except Exception as e:
print(f"failed to upload {PN} to Wasabi. Error: {e}")
f.close()
print(f"Complete! Records Added: {recordsAdded}")
def count_uploads(self):
counting_mpdb = self.s3.Bucket('mpdb')
count = 0
print("Counting...")
for _ in counting_mpdb.objects.all():
count += 1
print(f"{count} objects found in the library's bucket")
def count_items_in_part_list(self):
"""
Counts the number of items inside the part number upload log created by the Image Uploader.
Assumes the log file is located at 'Wasabi_UploadedPNS.txt' in the specified directory.
"""
directory_parts = self.directory.split("/")[:-1] # Remove the last part (file name) from the path
directory = "/".join(directory_parts)
with open(f'{directory}/Wasabi_UploadedPNS.txt', 'r') as f:
x = len(f.readlines())
print(f"{x} items in the part number log")
|
evanwmeeks/PersonalProjects
|
wasabi_interface/wasabi.py
|
wasabi.py
|
py
| 3,414 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "boto3.Session",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "boto3.resource",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 53,
"usage_type": "attribute"
}
] |
73017766588
|
import json
from typing import Dict, List, Tuple
from flask import Flask, jsonify, request
from rb.complexity.complexity_index import compute_indices
from rb.complexity.index_category import IndexCategory
from rb.core.document import Document
from rb.core.lang import Lang
from rb.core.text_element import TextElement
from rb.core.word import Word
from rb.processings.keywords.keywords_extractor import KeywordExtractor
from rb.similarity.vector_model import (CorporaEnum, VectorModel,
VectorModelType)
from rb.similarity.vector_model_factory import VECTOR_MODELS, get_default_model
from rb.utils.utils import str_to_lang
from nltk.corpus import wordnet as wn
import networkx as nx
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import logging
app = Flask(__name__)
def keywordsOption():
return ""
def transform_for_visualization(dataName, JsonName, textType, keywords: List[Tuple[int, Word]], keywordsWithmax: List[Tuple[int, Word]], lang: Lang) -> Dict:
log = logging.getLogger("my-logger")
vector_model: VectorModel = get_default_model(lang)
edge_list, node_list = [], []
edge_list2, node_list2 = [], []
#sort the keywords
G = nx.Graph()
edge_labels={}
from_node = []
to_node = []
value= []
node_size = []
for kw in keywords:
node_list.append({
"type": "Word",
"uri": kw[1],
"displayName": kw[1],
"active": True,
"degree": str(max(0, float(kw[0])))
})
for kw in keywordsWithmax:
node_list2.append({
"type": "Word",
"uri": kw[1],
"displayName": kw[1],
"active": True,
"degree": str(max(0, float(kw[0])))
})
G.add_node(kw[1],weight=max(0, float(kw[0])))
node_size.append(int(max(0, float(kw[0]))*1000))
for i, kw1 in enumerate(keywords):
for j, kw2 in enumerate(keywords):
try:
sim = vector_model.similarity(vector_model.get_vector(kw1[1]), vector_model.get_vector(kw2[1]))
if i != j and sim >= 0.3:
edge_list.append({
"edgeType": "SemanticDistance",
"score": str(max(sim, 0)),
"sourceUri": kw1[1],
"targetUri": kw2[1]
})
except:
print("Problem with " + kw1[1] + " or " + kw2[1])
for i, kw1 in enumerate(keywordsWithmax):
for j, kw2 in enumerate(keywordsWithmax):
try:
sim = vector_model.similarity(vector_model.get_vector(kw1[1]), vector_model.get_vector(kw2[1]))
if i != j and sim >= 0.3:
edge_list2.append({
"edgeType": "SemanticDistance",
"score": str(max(sim, 0)),
"sourceUri": kw1[1],
"targetUri": kw2[1]
})
print("Problem with ****************************************")
from_node.append(kw1[1])
to_node.append(kw2[1])
if not G.has_edge(str(kw1[1]), str(kw2[1])):
G.add_edge(str(kw1[1]), str(kw2[1]))
value.append(int(max(sim, 0)*10))
except:
print("Problem with " + kw1[1] + " or " + kw2[1])
#pos = nx.nx_agraph.graphviz_layout(G, prog="twopi")
#nx.draw(G, with_labels = True, node_size=1500, node_color="skyblue", pos=pos)
#nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)
# Build a dataframe with your connections
#df = pd.DataFrame({ 'from':from_node, 'to':to_node, 'value':value})
# Build your graph
#G=nx.from_pandas_edgelist(df, 'from', 'to', create_using=nx.Graph() )
#G = nx.star_graph(30)
plt.clf()
pos = nx.spring_layout(G, k=1, iterations=20, scale=6)
options = {
"node_color": "#A0CBE2",
"edge_color": value,
"width": 2,
"edge_cmap": plt.cm.Blues,
"with_labels": True,
"node_size":node_size
}
plt.figure(figsize=(8, 5))
nx.draw(G, pos, **options)
# Custom the nodes:
#nx.draw(G, with_labels=True, node_color='skyblue', node_size=1500, edge_color=df['value'], width=10.0, edge_cmap=plt.cm.Blues)
plt.savefig('rb_api/pandoc_filters/images/'+ dataName +'.png', dpi=300)
plt.clf()
data = getJson('rb_api/pandoc_filters/'+JsonName+'.json')
data.update({textType : 'rb_api/pandoc_filters/images/'+dataName+'.png'})
data.update({dataName : node_list})
with open('rb_api/pandoc_filters/'+JsonName+'.json', 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
return {
"data": {
"edgeList": edge_list,
"nodeList": node_list
},
"success": True,
"errorMsg": ""
}
def getJson(url):
varData= {}
if os.path.isfile(url):
# checks if file exists
print ("File exists ")
with open(url, encoding='UTF-8') as f:
varData = json.load(f)
return varData
def keywordsPost():
"""TODO, not working"""
params = json.loads(request.get_data())
posTagging = params.get('pos-tagging')
bigrams = params.get('bigrams')
text = params.get('text')
languageString = params.get('language')
lang = str_to_lang(languageString)
threshold = params.get('threshold')
plotName = "wordnet"
#plotName = params.get('saveAs')
# if lang is Lang.RO:
# vector_model = VECTOR_MODELS[lang][CorporaEnum.README][VectorModelType.WORD2VEC](
# name=CorporaEnum.README.value, lang=lang)
# elif lang is Lang.EN:
# vector_model = VECTOR_MODELS[lang][CorporaEnum.COCA][VectorModelType.WORD2VEC](
# name=CorporaEnum.COCA.value, lang=lang)
# elif lang is Lang.ES:
# vector_model = VECTOR_MODELS[lang][CorporaEnum.JOSE_ANTONIO][VectorModelType.WORD2VEC](
# name=CorporaEnum.JOSE_ANTONIO.value, lang=lang)
# lsa = params.get('lsa')
# lda = params.get('lda')
# w2v = params.get('w2v')
# threshold = params.get('threshold')
# textElement = Document(lang=lang, text=text, vector_model=vector_model)
# print(textElement.keywords)
dataName = params.get('saveAs')
textType = params.get('type')
JsonName = params.get('topicName')
keywords = KeywordExtractor.extract_keywords(True, text=text, lang=lang)
keywordsWithmax = KeywordExtractor.extract_keywords(True, text=text, lang=lang, max_keywords=15)
return jsonify(transform_for_visualization(dataName, JsonName, textType, keywords=keywords, keywordsWithmax=keywordsWithmax, lang=lang))
|
rwth-acis/readerbenchpyapi
|
rb_api/keywords/keywords.py
|
keywords.py
|
py
| 6,881 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "rb.core.word.Word",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "rb.core.lang.Lang",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "rb.similarity.vector_model.VectorModel",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "rb.similarity.vector_model_factory.get_default_model",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "networkx.Graph",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "networkx.spring_layout",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.cm",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "networkx.draw",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "json.dump",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "flask.request.get_data",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "rb.utils.utils.str_to_lang",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "rb.processings.keywords.keywords_extractor.KeywordExtractor.extract_keywords",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "rb.processings.keywords.keywords_extractor.KeywordExtractor",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "rb.processings.keywords.keywords_extractor.KeywordExtractor.extract_keywords",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "rb.processings.keywords.keywords_extractor.KeywordExtractor",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 191,
"usage_type": "call"
}
] |
40299141620
|
import numpy as np
from typing import List, Optional, Tuple
from collections import defaultdict
from kaggle_environments.envs.halite.helpers import Ship
from .board import MyBoard, ALL_SHIP_ACTIONS
from .logger import logger
def ship_converts(board: MyBoard):
""" Convert our ships into shipyards """
if board.step == 0 or board.moves_left < 20:
return
if not board.num_my_shipyards:
is_final_part = board.moves_left <= 40
_create_shipyard(board, is_final_part)
for ship in board.free_ships:
# CHECK if in danger without escape, convert if h > 500
if ship.halite <= board.configuration.convert_cost:
continue
avoid_moves = board.avoid_moves(ship)
if ALL_SHIP_ACTIONS - avoid_moves:
continue
logger.warning(
f"Ship {ship.id} at {ship.position}: Can't run away, converting."
)
board.create_shipyard(ship)
# Generate a shipyard from the best ship
min_score = 1000
if board.num_my_shipyards < 2:
min_score = 400
if (
board.num_my_shipyards <= 3
and board.num_my_ships > 10 + board.num_my_shipyards * 5
and board.moves_left > 100
):
available_ships = [x for x in board.free_ships if _can_convert_ship(board, x)]
if available_ships:
ship, score = _choice_ship_to_convert(board, available_ships)
if ship is not None and score > min_score:
logger.info(
f"Ship {ship.id} at {ship.position}: Create a shipyard, cell score = {score}."
)
board.create_shipyard(ship)
def _can_convert_ship(board: MyBoard, ship: Ship) -> bool:
""" Is this the good place for a shipyard? """
pos = ship.position
if pos in board.position_to_shipyard:
return False
if (
ship.halite + board.my_halite < board.configuration.convert_cost
or board.is_danger_position(pos, ship)
):
return False
num_my_shipyards = sum(
1 for x in board.my_shipyards if board.distance(x.position, pos) <= 2
)
if num_my_shipyards > 0:
return False
num_my_ships = sum(
1 for x in board.my_ships if board.distance(x.position, pos) <= 1
)
if num_my_ships < 1:
return False
min_distance_to_enemy_ship = min(
board.distance(x.position, pos)
for x in board.ships.values()
if x.player_id != board.me.id
)
if min_distance_to_enemy_ship <= 2:
return False
return True
def _create_shipyard(board: MyBoard, is_final_part: bool = False):
""" What we do if we haven't shipyards """
if is_final_part:
# the end of the game, convert one ship if it makes sense
ship_to_halite = defaultdict(int)
available_ships = [
x
for x in board.my_ships
if x.halite + board.my_halite >= board.configuration.convert_cost
]
for ship in available_ships:
distance_to_enemy_ship = board.distance_to_enemy_ship(ship.position, board.me)
distance_to_enemy_ship = distance_to_enemy_ship or board.size
if distance_to_enemy_ship < 3:
# an enemy vessel nearby, can't convert
continue
max_my_ship_distance = min(distance_to_enemy_ship, board.moves_left)
for other_ship in board.my_ships:
if board.distance(ship.position, other_ship.position) < max_my_ship_distance:
ship_to_halite[ship] += other_ship.halite
if not ship_to_halite:
return
max_halite = max(ship_to_halite.values())
if max_halite > board.configuration.convert_cost:
# it makes sense to convert, choose one
ship = [s for s, h in ship_to_halite.items() if h == max_halite][0]
board.create_shipyard(ship)
else:
# meddle of the game, we have to create a shipyard
logger.warning("No shipyards! We must create at least one!")
available_ships = [
x
for x in board.my_ships
if x.halite + board.my_halite >= board.configuration.convert_cost
]
if not available_ships:
logger.warning("Can't create a shipyard, not enough halite! Keep mining.")
return
if (
len(available_ships) == 1
and board.my_halite + available_ships[0].halite
< board.configuration.convert_cost + board.configuration.spawn_cost
):
logger.warning("Can't create a shipyard, not enough halite! Keep mining.")
return
ship, _ = _choice_ship_to_convert(board, available_ships)
if ship:
board.create_shipyard(ship)
def _choice_ship_to_convert(
board: MyBoard, ships: List[Ship]
) -> Tuple[Optional[Ship], float]:
assert len(ships) > 0
ship, score = None, -np.inf
for _ship in ships:
pos = _ship.position
if pos in board.position_to_shipyard:
# shipyard here
continue
_score = board.environment_reserves(pos)
_score -= board.position_to_halite[pos]
if _score > score:
ship, score = _ship, _score
return ship, score
|
w9PcJLyb/HaliteIV-bot
|
halite/ship_converts.py
|
ship_converts.py
|
py
| 5,297 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "board.MyBoard",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "board.step",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "board.moves_left",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "board.num_my_shipyards",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "board.moves_left",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "board.free_ships",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "board.configuration",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "board.avoid_moves",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "board.ALL_SHIP_ACTIONS",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "logger.logger.warning",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "logger.logger",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "board.create_shipyard",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "board.num_my_shipyards",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "board.num_my_shipyards",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "board.num_my_ships",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "board.num_my_shipyards",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "board.moves_left",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "board.free_ships",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "logger.logger.info",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "logger.logger",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "board.create_shipyard",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "board.MyBoard",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "kaggle_environments.envs.halite.helpers.Ship",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "board.position_to_shipyard",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "board.my_halite",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "board.configuration",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "board.is_danger_position",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "board.my_shipyards",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "board.distance",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "board.my_ships",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "board.distance",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "board.distance",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "board.ships.values",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "board.ships",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "board.me",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "board.MyBoard",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "board.my_ships",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "board.my_halite",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "board.configuration",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "board.distance_to_enemy_ship",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "board.me",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "board.size",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "board.moves_left",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "board.my_ships",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "board.distance",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "board.configuration",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "board.create_shipyard",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "logger.logger.warning",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "logger.logger",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "board.my_ships",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "board.my_halite",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "board.configuration",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "logger.logger.warning",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "logger.logger",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "board.my_halite",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "board.configuration",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "logger.logger.warning",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "logger.logger",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "board.create_shipyard",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "board.MyBoard",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "kaggle_environments.envs.halite.helpers.Ship",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "numpy.inf",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "board.position_to_shipyard",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "board.environment_reserves",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "board.position_to_halite",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "kaggle_environments.envs.halite.helpers.Ship",
"line_number": 146,
"usage_type": "name"
}
] |
73790991549
|
from typing import List, Tuple
from abstract_puzzles import AbstractPuzzles
DATA_TYPE = List[Tuple[Tuple[int, int], Tuple[int, int]]]
class Puzzles(AbstractPuzzles):
def __init__(self, method_name):
super().__init__(
method_name,
day=4,
puzzle_1_example_answer=2,
puzzle_1_answer=487,
puzzle_2_example_answer=4,
puzzle_2_answer=849,
)
def read(self, file_path: str) -> Tuple[DATA_TYPE]:
data = []
with open(file_path, 'r') as f:
for line in f.read().splitlines():
elf1, elf2 = line.split(',')
elf1_start, elf1_end = elf1.split('-')
elf2_start, elf2_end = elf2.split('-')
data.append((
(int(elf1_start), int(elf1_end)),
(int(elf2_start), int(elf2_end)),
))
return data,
def puzzle_1(self, schedules: DATA_TYPE) -> int:
return len(list(filter(
lambda elfs: (elfs[0][0] >= elfs[1][0] and elfs[0][1] <= elfs[1][1]) or
(elfs[1][0] >= elfs[0][0] and elfs[1][1] <= elfs[0][1]),
schedules
)))
def puzzle_2(self, schedules: DATA_TYPE) -> int:
return len(list(filter(
lambda elfs: elfs[1][0] <= elfs[0][0] <= elfs[1][1] or
elfs[1][0] <= elfs[0][1] <= elfs[1][1] or
elfs[0][0] <= elfs[1][0] <= elfs[0][1] or
elfs[0][0] <= elfs[1][1] <= elfs[0][1],
schedules
)))
|
Lynxens/AdventOfCode2022
|
advent_of_code/day4.py
|
day4.py
|
py
| 1,600 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "abstract_puzzles.AbstractPuzzles",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 18,
"usage_type": "name"
}
] |
519381337
|
from openmdao.core.driver import Driver, RecordingDebugging
from openmdao.api import SimpleGADriver, Problem, LatinHypercubeGenerator, DOEDriver
from dataclasses import dataclass
from copy import deepcopy
import random
import numpy as np
from itertools import chain
from deap import algorithms, base, tools
from deap.benchmarks import rosenbrock
class DeapDriver(Driver):
def _declare_options(self):
self.options.declare("container_class")
def _get_name(self):
return "DeapDriver"
def _setup_driver(self, problem):
super()._setup_driver(problem)
self.container = self.options["container_class"](driver=self)
def run(self):
final_population = self.container.run_algorithm()
# Evaluates a point in the middle of the pareto front to have one of
# the optimal points as the final values in the model
# self.container.evaluate(pareto_front[len(pareto_front) // 2])
# print(pareto_front)
return False
class Individual(list):
def __init__(self, *args, fitness_class, **kwargs):
super().__init__(*args, **kwargs)
self.fitness = fitness_class()
def __repr__(self):
return f"Individual({super().__repr__()})"
@dataclass(frozen=True)
class DeapContainer:
"""
An abstract class for containing the algorithm-specific logic. This is
instantiated in the Driver's _setup_driver() function with the driver
itself passed in as an argument.
This object in itself should be fully stateless.
The motivation for having this in a dedicated object is mainly that the
Driver class is already heavily bloated.
"""
driver: DeapDriver
def __post_init__(self):
# FIXME: this API is inflexible
self.fitness_class = type(
"Fitness",
(base.Fitness,),
{"weights": (-1,) * len(self.problem.model.get_objectives())},
)
self.design_var_shapes = {
name: np.shape(value)
for (name, value) in self.driver.get_design_var_values().items()
}
self.objective_shapes = {
name: np.shape(value)
for (name, value) in self.driver.get_objective_values().items()
}
self.constraint_shapes = {
name: np.shape(value)
for (name, value) in self.driver.get_constraint_values().items()
}
self.individual_bounds = self._individual_bounds()
@property
def problem(self):
return self.driver._problem
def individual_factory(self, *args, **kwargs):
individual = self.individual_class(fitness_class=self.fitness_class, *args, **kwargs)
return individual
def _individual_bounds(self):
design_vars = self.problem.model.get_design_vars()
lower, upper = chain.from_iterable(
(design_vars[key]["lower"].flat, design_vars[key]["upper"].flat)
for key in self.design_var_shapes.keys()
)
return tuple(lower), tuple(upper)
def convert_design_vars_to_individual(self, design_vars):
"""
Converts a dict of OpenMDAO design variables into a DEAP individual.
"""
individual = Individual(
chain.from_iterable(
design_vars[key].flat for key in self.design_var_shapes.keys()
),
fitness_class=self.fitness_class,
)
return individual
def convert_individual_to_design_vars(self, individual):
"""
Converts a DEAP individual into a dict of OpenMDAO design variables.
"""
ind = deepcopy(individual)
design_vars = {}
for name, shape in self.design_var_shapes.items():
ind_items = np.product(shape)
design_vars[name] = np.reshape(ind[:ind_items], shape)
ind = ind[ind_items:]
return design_vars
def get_population_generator(self, count):
return LatinHypercubeGenerator(
samples=count, criterion="correlation", iterations=count // 10
)
def init_population(self, count):
return [
self.convert_design_vars_to_individual(dict(case))
for case in self.get_population_generator(count)(
self.problem.model.get_design_vars()
)
]
def evaluate(self, individual):
pre = id(individual.fitness)
for (name, value) in self.convert_individual_to_design_vars(individual).items():
self.driver.set_design_var(name, value)
assert id(individual.fitness) == pre
with RecordingDebugging(
self.driver._get_name(), self.driver.iter_count, self.driver
):
failure_flag, abs_error, rel_error = self.problem.model._solve_nonlinear()
self.driver.iter_count += 1
# print(tuple(float(x) for x in self.driver.get_objective_values().values()))
return tuple(
chain.from_iterable(
x.flat for x in self.driver.get_objective_values().values()
)
)
def run_algorithm(self):
raise NotImplemented("run_algorithm() method not implemented.")
|
ovidner/openmdao-deap
|
openmdao_deap/__init__.py
|
__init__.py
|
py
| 5,155 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "openmdao.core.driver.Driver",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "deap.base.Fitness",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "deap.base",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "numpy.shape",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "itertools.chain.from_iterable",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "itertools.chain.from_iterable",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.product",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "openmdao.api.LatinHypercubeGenerator",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "openmdao.core.driver.RecordingDebugging",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "itertools.chain.from_iterable",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 42,
"usage_type": "call"
}
] |
33914485796
|
from django.conf.urls import patterns, url
from ventas import viewsInforme,viewsPedido
urlpatterns = patterns('',
url(r'^$', viewsPedido.venta_desktop, name='venta_desktop'),
url(r'^fac/', viewsPedido.venta_desktop, name='venta_desktop1'),
url(r'^mobile/$', viewsPedido.venta_mobile, name='venta_mobile'),
url(r'^listar/$', viewsInforme.listar, name='listar'),
url(r'^clientes/', viewsPedido.clientes, name='clientes'),
url(r'^vendedores/', viewsPedido.vendedores, name='vendedores'),
url(r'^codproducto/', viewsPedido.codproducto, name='codproducto'),
url(r'^nomproducto/', viewsPedido.nomproducto, name='nomproducto'),
url(r'^save/$', viewsPedido.savePedido, name='save'),
url(r'^save/(?P<anyway>\w+)/$', viewsPedido.savePedido, name='save'),
url(r'^saveDetalle/$', viewsPedido.saveDetalle, name='saveDetalle'),
url(r'^deleteDetalle/(?P<id>\d+)/$', viewsPedido.deleteDetalle, name='deleteDetalle'),
url(r'^pagar/$', viewsPedido.pagarPedido, name='pagarPedido'),
)
|
wilmandx/ipos
|
ventas/urls.py
|
urls.py
|
py
| 1,020 |
python
|
es
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.conf.urls.patterns",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "ventas.viewsPedido.venta_desktop",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "ventas.viewsPedido",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "ventas.viewsPedido.venta_desktop",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "ventas.viewsPedido",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "ventas.viewsPedido.venta_mobile",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "ventas.viewsPedido",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "ventas.viewsInforme.listar",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "ventas.viewsInforme",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "ventas.viewsPedido.clientes",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "ventas.viewsPedido",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "ventas.viewsPedido.vendedores",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "ventas.viewsPedido",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ventas.viewsPedido.codproducto",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "ventas.viewsPedido",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "ventas.viewsPedido.nomproducto",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "ventas.viewsPedido",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "ventas.viewsPedido.savePedido",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "ventas.viewsPedido",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "ventas.viewsPedido.savePedido",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "ventas.viewsPedido",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "ventas.viewsPedido.saveDetalle",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "ventas.viewsPedido",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "ventas.viewsPedido.deleteDetalle",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "ventas.viewsPedido",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "ventas.viewsPedido.pagarPedido",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "ventas.viewsPedido",
"line_number": 18,
"usage_type": "name"
}
] |
13538653206
|
import pygame
import numpy as np
from util.helpers import *
from physics.colliding_object import Colliding
class EyeBeam(Colliding):
def __init__(self, start, end):
self.start = np.array(start)
super(EyeBeam, self).__init__(self.start)
self.end = np.array(end)
self.collide_type = 'line'
def unobstructed(self, list_of_game_objects):
walls_vector = walls_vector_from_game_objects(list_of_game_objects)
edge_vector = np.array((self.start, self.end))
return unobstructed_edges(edge_vector, walls_vector)[0]
class Eyes:
def __init__(self, view_distance=200):
self.view_distance = view_distance
self.look_ahead = 10
self.list_of_game_objects = []
def update(self, list_of_game_objects):
self.list_of_game_objects = list_of_game_objects
def direct_path_to_goal(self, current_position, goal, exclude=[]):
obstructions = [i for i in self.list_of_game_objects if i not in exclude]
walls_vector = walls_vector_from_game_objects(obstructions)
if len(walls_vector) == 0:
return True
goal_edge = np.array([[current_position[0], current_position[1], goal[0], goal[1]]])
return unobstructed_edges(goal_edge, walls_vector)
def get_mouse_position(self):
return np.array(pygame.mouse.get_pos()).astype(float)
def look_for_collisions(self, coords, vector, radius):
for sign in [1.0, 0.0, -1.0]:
adjustment = normalise_vector(perpendicular_vector(vector)) * (sign * radius)
adjusted_coords = coords + adjustment
ahead_end = adjusted_coords + (vector * self.look_ahead)
ahead = EyeBeam(adjusted_coords, ahead_end)
collision = ahead.get_closest_collision(self.list_of_game_objects)
if collision is not None:
return collision
return None
def look_at_object(self, coords, screen_object):
if self.direct_path_to_goal(coords, screen_object.coords(), exclude=[screen_object]):
return screen_object
else:
return None
def visible_objects(self, coords):
visibles = []
for screen_object in self.list_of_game_objects:
eyes_see = self.look_at_object(coords,
screen_object)
if eyes_see is not None:
visibles.append(eyes_see)
return visibles
def look_for_object(self,
coords,
object_description):
matching_objects_in_range = [screen_object for screen_object in \
self.list_of_game_objects \
if screen_object.image['kind'] != 'wall'
and distance_between_points(coords, screen_object.coords()) < self.view_distance \
and object_description.viewitems() <= screen_object.image.viewitems()]
if len(matching_objects_in_range) > 0:
closest_index = find_closest_point_index(coords, [screen_object.coords() for screen_object in matching_objects_in_range])
target_object = matching_objects_in_range[closest_index]
return self.look_at_object(coords, target_object)
return None
|
SimonCarryer/video_game_ai
|
brains/eyes.py
|
eyes.py
|
py
| 3,313 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "physics.colliding_object.Colliding",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pygame.mouse.get_pos",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 38,
"usage_type": "attribute"
}
] |
74451964986
|
from core.visualization import PlotGraphics
from core.relation_extraction import SpacyRelationExtraction
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
import numpy as np
import joblib
import os
import sys
import warnings
warnings.filterwarnings('ignore')
class TextPipeline:
def __init__(self, predictor_name, data_tuple, classifier_name='decision_tree', resources_path=None):
# root path
self.resources_folder = os.path.join(os.path.dirname(sys.path[0]), 'resources') \
if resources_path is None else resources_path
# initializes the classifier dict
classifiers = {'decision_tree': DecisionTreeClassifier(random_state=0),
'k_neighbors': KNeighborsClassifier(n_neighbors=15)}
# save the predictor name
self.predictor_name = predictor_name
# receives the data
self.x_train, self.x_test, self.y_train, self.y_test = data_tuple
# text extraction pipelines
self.text_extraction_pipes = {}
# prediction model to use
self.prediction_model = classifiers[classifier_name]
# init the visualizer
self.plt_graphics = PlotGraphics(data_tuple, self.text_extraction_pipes)
def create_features_pipeline(self, n_features=150, pipeline_obj=None):
# checks if the transformer
if pipeline_obj is None:
# features vectorization
transformer_obj = TfidfVectorizer(strip_accents='unicode',
stop_words='english',
lowercase=True,
max_features=n_features,
ngram_range=(1, 2),
min_df=0.1, max_df=0.7)
# creates the pipeline obj
pipeline_obj = Pipeline([('vectorizer', transformer_obj)])
# pipeline mapping
self.text_extraction_pipes['feature'] = pipeline_obj
# returns the pipeline obj
return self.text_extraction_pipes['feature']
def create_label_pipeline(self, relation_extraction=False, n_targets=20, n_jobs=8):
# target vectorization
if relation_extraction:
# uses the spacy relation extraction
vectorizer = SpacyRelationExtraction(n_relation=n_targets, n_jobs=n_jobs)
else:
# otherwise uses a normal vectorizer
vectorizer = CountVectorizer(strip_accents='unicode',
stop_words='english',
lowercase=True,
max_features=n_targets,
ngram_range=(1, 2),
min_df=0.1, max_df=0.7)
# pipeline creation
self.text_extraction_pipes['target'] = Pipeline([('vectorizer', vectorizer)])
def pickle_predictor(self):
# save the labels pipeline
labels_extractor = self.text_extraction_pipes['target']['vectorizer']
obj_name = os.path.join(self.resources_folder, '_'.join([self.predictor_name, 'labels', 'vectorizer']))
joblib.dump(labels_extractor, obj_name + '.pkl')
# saves the model
obj_name = os.path.join(self.resources_folder, '_'.join([self.predictor_name, 'predictor']))
joblib.dump(self.prediction_model, obj_name + '.pkl')
def unpickle_predictor(self):
# loads the object
obj_name = os.path.join(self.resources_folder, '_'.join([self.predictor_name, 'labels', 'vectorizer']))
labels_extractor = joblib.load(obj_name + '.pkl')
self.text_extraction_pipes['target'] = Pipeline([('vectorizer', labels_extractor)])
# unpickle the model
obj_name = os.path.join(self.resources_folder, '_'.join([self.predictor_name, 'predictor']))
self.prediction_model = joblib.load(obj_name + '.pkl')
def fit(self, x_vector):
# fit the feature data
y_vector = self.text_extraction_pipes['target'].fit_transform(self.y_train).toarray()
# convert the y_train
y_vector[y_vector > 1] = 1
# print some information data
print('\ninput array, shape:', x_vector.shape)
print('output array, shape:', y_vector.shape, '\n')
# fit the model
self.prediction_model.fit(x_vector, y_vector)
def predict(self, x_test):
# convert using the pipeline
x_test_vector = self.text_extraction_pipes['feature'].transform(x_test)
# convert the y_test
predictions = self.prediction_model.predict(x_test_vector)
# returns the predictions
return predictions
def score(self):
# add the exception treatment
y_test_vector = self.text_extraction_pipes['target'].transform(self.y_test).toarray()
# predict the output for the test set
predictions = self.predict(self.x_test)
# print some metrics
class_labels = self.text_extraction_pipes['target']['vectorizer'].get_feature_names()
class_report = self.calculate_metrics(y_test_vector, predictions, class_labels)
# plot the data
self.plt_graphics.plot_bag_words(class_report)
# return the classification report
return class_report
@staticmethod
def calculate_metrics(y_test, predictions, class_labels):
# print the results
y_test[y_test > 1] = 1
class_report = classification_report(y_test, predictions, target_names=class_labels, output_dict=True)
print("Classification report: \n", classification_report(y_test, predictions, target_names=class_labels))
# print("F1 micro averaging:", f1_score(y_test, predictions, average='micro', labels=np.unique(predictions)))
print("ROC: ", roc_auc_score(y_test, predictions), '\n')
# return the classification results
return class_report
|
eliseu31/MSDS-Analyser
|
core/text_pipeline.py
|
text_pipeline.py
|
py
| 6,284 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "sklearn.tree.DecisionTreeClassifier",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "core.visualization.PlotGraphics",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "core.relation_extraction.SpacyRelationExtraction",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "joblib.dump",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "joblib.dump",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "joblib.load",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "joblib.load",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classification_report",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classification_report",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 136,
"usage_type": "call"
}
] |
21932276295
|
from discord.ext import commands
class ErrorHandeler(commands.Cog):
"""A cog for global error handling"""
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx: commands.Context, error: commands.CommandError):
if isinstance(error, commands.MemberNotFound):
await ctx.send("Please input a valid user")
if isinstance(error, commands.UnexpectedQuoteError):
await ctx.send("Your message must be surrounded by quotes.")
def setup(bot: commands.Bot):
bot.add_cog(ErrorHandeler(bot))
|
Jarkyc/Franklin-The-Undying
|
errorhandler.py
|
errorhandler.py
|
py
| 599 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "discord.ext.commands.Cog",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Context",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.CommandError",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.MemberNotFound",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.UnexpectedQuoteError",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Cog.listener",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 17,
"usage_type": "name"
}
] |
36697027175
|
from jinja2 import Environment, PackageLoader
import os
from typing import Dict
import re
class SQLTemplate:
_templatePath = os.path.join(
os.path.dirname(os.path.dirname(os.path.relpath(__file__))), "templates"
)
_templatePath = os.path.join("templates")
# raise ValueError(f'templatePath = {_templatePath}')
def getTemplate(self, sqlAction: str, parameters: Dict, **kwargs) -> str:
templateName = f"{sqlAction.lower().strip()}.j2"
templateEnv = Environment(
loader=PackageLoader(
package_name="tips", package_path="framework/templates"
),
trim_blocks=True
# loader=FileSystemLoader(self._templatePath), trim_blocks=True
)
cmd = (
templateEnv.get_template(templateName)
.render(parameters=parameters, kwargs=kwargs)
.strip()
.replace("\n", " ")
)
return re.sub(" +", " ", cmd)
|
ProjectiveGroupUK/tips-snowpark
|
tips/framework/utils/sql_template.py
|
sql_template.py
|
py
| 1,001 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.relpath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "jinja2.Environment",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "jinja2.PackageLoader",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 30,
"usage_type": "call"
}
] |
72784214587
|
# https://www.codewars.com/kata/5b76a34ff71e5de9db0000f2
from itertools import starmap
def solve(arr):
def conv(t1,t2):
t1 = tuple(map(int,t1.split(":")))
t2 = tuple(map(int,t2.split(":")))
hd1, md = divmod((t2[1] - t1[1] - 1), 60)
hd2 = (t2[0] - t1[0] + hd1) % 24
mm = (hd2 % 24) * 60 + md
return mm, ':'.join(str(c).zfill(2) for c in divmod(mm, 60))
arr = sorted(set(arr))
return max(starmap(conv,zip(arr,arr[1:]+arr)))[1]
|
blzzua/codewars
|
6-kyu/simple_time_difference.py
|
simple_time_difference.py
|
py
| 487 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "itertools.starmap",
"line_number": 13,
"usage_type": "call"
}
] |
21347456845
|
class IP():
def __init__(self,ipaddress):
url='http://m.ip138.com/ip.asp?ip='
self.IP=ipaddress
self.site=url+self.IP
self.header={'User-Agent' :'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063'}
def get_phy(self):
import requests as RQ
import re
try:
r=RQ.get(self.site)
r.raise_for_status()
r.encoding=r.apparent_encoding
html=r.text[-1000:]
#print(html)
answer=re.findall('本站主数据:(.*?)</p><p',html,re.S)
answer=answer[0]
return '您查询的IP:%s物理地址应该在:%s '%(self.IP,answer)
except:
return 'sth wrong'
#素质裤衩分析
'''<h1 class="query">您查询的IP:1.1.1.1</h1><p class="result">
本站主数据:澳大利亚 </p><p class="result">
参考数据一:澳大利亚</p>
'''
'''
while True:
point='.'
for I in range(7,100,7):
for j in range(1,100,7):
for k in range(1,100,70):
for L in range(1,100,20):
add=str(I)+point+str(j)+point+str(k)+point+str(L)
print(add)
#ip=input()
i=IP(add)
ans=i.get_phy()
print(ans)
'''
#第一个利用接口写的东西
'''num=input()
num_list=list(num)
num_list.remove(' ')
num_list.remove(' ')
new_num_list=[]
print(num_list)
for i in range(6):
if num_list[i]=='-':
new_num_list.append(int(num_list[i]+num_list[i+1]))
'''
'''
sentinel ='' # 遇到这个就结束
lines = []
for line in iter(input, sentinel):
lines.append(line)
init=list(str(input()))
try:
init.remove(' ')
except:
pass
print(int(init[0])+int(init[1]))
string=input()
str_list=list(string)
str_list=str_list.reverse()
new_str=str(str_list)
print(new_str)
'''
|
Alex-Beng/CubingQQBot
|
IP.py
|
IP.py
|
py
| 1,869 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.S",
"line_number": 17,
"usage_type": "attribute"
}
] |
5312412579
|
import pygame
from flame import Flame
class Firework:
def __init__(self):
self.rect = pygame.Rect(640, 720, 25, 50)
self.image = pygame.Surface( (25, 50) )
self.image.fill( (255, 255, 255) )
self.exploded = False
self.flames = []
def update(self):
if not self.exploded:
self.rect.y -= 2
if self.rect.y <= 200:
self.explode()
else:
for flame in self.flames:
flame.update()
def draw(self, screen):
if not self.exploded:
screen.blit(self.image, self.rect)
else:
for flame in self.flames:
flame.draw(screen)
def explode(self):
self.exploded = True
for i in range(1000):
self.flames.append(Flame())
|
jbedu1024/fajerwerki
|
firework.py
|
firework.py
|
py
| 822 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.Rect",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flame.update",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flame.draw",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "flame.Flame",
"line_number": 31,
"usage_type": "call"
}
] |
33105484438
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging; logger = logging.getLogger("main")
FORMAT = '%(asctime)s - %(levelname)s: %(message)s'
logging.basicConfig(format=FORMAT, level=logging.WARNING)
import time
from flask import Flask, escape, url_for,render_template, g, request, redirect, jsonify, session
from werkzeug import secure_filename
import sys, os
from jinja2 import Environment, PackageLoader
import json
app = Flask(__name__, static_folder='static')
maze = []
width = 0
height = 0
STARTPOS=[1,1]
MAXLIFE = 10
MIN_TIME_BETWEEN_INTERACTIONS=0.2 #seconds
robots = {}
def store_map(rawmaze):
global maze, width, height
if is_map_loaded():
logger.warning("Map already loaded. Ignoring it. Restart the backend if you want to update the map.")
return
width = rawmaze["width"]
height = rawmaze["height"]
maze = [True if x in [399,431,463,492,493,494,495] else False for x in rawmaze["data"]]
for j in range(height):
for i in range(width):
idx = i + j * width
if maze[idx]:
sys.stdout.write('.')
else:
sys.stdout.write(' ')
sys.stdout.write('\n')
logger.info("Maze successfully loaded!")
def is_map_loaded():
return width and height
def get_obstacles(x,y):
# obstacle at centre, north, south, east, west?
obstacles = [True, True, True, True, True]
if x >= 0 and y >= 0 and x < width and y < height and maze[x + y * width]:
obstacles[0] = False
if x >= 0 and y-1 >= 0 and x < width and y-1 < height and maze[x + (y-1) * width]:
obstacles[1] = False
if x >= 0 and y+1 >= 0 and x < width and y+1 < height and maze[x + (y+1) * width]:
obstacles[2] = False
if x+1 >= 0 and y >= 0 and x+1 < width and y < height and maze[x+1 + y * width]:
obstacles[3] = False
if x-1 >= 0 and y >= 0 and x-1 < width and y < height and maze[x-1 + y * width]:
obstacles[4] = False
logger.info(str(obstacles))
return obstacles
@app.route("/set/<name>/<x>/<y>")
def set_robot(name, x, y):
logger.info("Placing robot %s to %s,%s" % (name,x,y))
x = int(x)
y=int(y)
c,_,_,_,_ = get_obstacles(x,y)
if c:
logger.info("Can not place robot there!")
return json.dumps(False)
robots[name]["pos"] = [x,y]
return json.dumps(True)
def get_robot(name):
if name not in robots:
return json.dumps([-1,-1])
return json.dumps(robots[name]["pos"])
@app.route("/")
def main():
return render_template('index.html')
@app.route("/live")
def map():
return render_template('map.html')
@app.route("/get_robots")
def get_robots():
now = time.time()
complete_robots = dict(robots)
for k in list(robots.keys()):
if robots[k]["life"] <= 0:
logger.warning("Robot %s has no life left! killing it!" % k)
del robots[k]
del complete_robots[k]
continue
if now - robots[k]["lastinteraction"] > 60 * 10:
logger.warning("Robot %s has not being used for 10 min. Removing it." % k)
del robots[k]
del complete_robots[k]
continue
complete_robots[k]["age"] = now - robots[k]["created"]
return json.dumps(complete_robots)
def create_new_robot(name):
logger.info("Placing new robot %s at start position" % name)
robots[name] = {"pos": STARTPOS,
"created": time.time(),
"lastinteraction": 0,
"life": MAXLIFE
}
@app.route('/move/<name>/<direction>')
def move(name, direction):
if not is_map_loaded():
logger.error("Map not loaded yet! Reload webpage.")
return json.dumps([False,[]])
if name not in robots:
create_new_robot(name)
logger.info("Moving robot %s to %s" % (name,direction))
now = time.time()
if now - robots[name]["lastinteraction"] < MIN_TIME_BETWEEN_INTERACTIONS:
logger.error("Too many interactions with %s. Wait a bit." % name)
return json.dumps([False,[]])
robots[name]["lastinteraction"] = now
x,y = robots[name]["pos"]
if direction == 'N':
nx, ny = x, y-1
if direction == 'S':
nx, ny = x, y+1
if direction == 'E':
nx, ny = x+1, y
if direction == 'W':
nx, ny = x-1, y
c,n,s,e,w = get_obstacles(nx,ny)
if c:
logger.info("...can not move there!")
robots[name]["life"] -= 1
return json.dumps([False,[]])
else:
robots[name]["pos"] = [nx,ny]
return json.dumps([True,[n,s,e,w]])
@app.route("/map", methods=['POST'])
def load_map():
logger.info("Retrieving the map data...")
store_map(json.loads([k for k in request.form.keys()][0]))
return ""
@app.route("/life/<name>")
def life(name):
return json.dumps(robots[name]["life"] if name in robots else 0)
|
severin-lemaignan/robomaze
|
backend/backend.py
|
backend.py
|
py
| 4,956 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.write",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.write",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "flask.request.form.keys",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 193,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.