id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
31603
|
from os import environ
from pathlib import Path
from appdirs import user_cache_dir
from ._version import version as __version__ # noqa: F401
from .bridge import Transform # noqa: F401
from .core import combine # noqa: F401
from .geodesic import BBox, line, panel, wedge # noqa: F401
from .geometry import get_coastlines # noqa: F401
from .geoplotter import GeoBackgroundPlotter, GeoMultiPlotter, GeoPlotter # noqa: F401
from .log import get_logger
# Configure the top-level logger.
logger = get_logger(__name__)
# https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
_cache_dir = Path(environ.get("XDG_CACHE_HOME", user_cache_dir())) / __package__
#: GeoVista configuration dictionary.
config = dict(cache_dir=_cache_dir)
try:
from .siteconfig import update_config as _update_config
_update_config(config)
del _update_config
except ImportError:
pass
try:
from geovista_config import update_config as _update_config
_update_config(config)
del _update_config
except ImportError:
pass
del _cache_dir
|
31611
|
class ControlStyles(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies the style and behavior of a control.
enum (flags) ControlStyles,values: AllPaintingInWmPaint (8192),CacheText (16384),ContainerControl (1),DoubleBuffer (65536),EnableNotifyMessage (32768),FixedHeight (64),FixedWidth (32),Opaque (4),OptimizedDoubleBuffer (131072),ResizeRedraw (16),Selectable (512),StandardClick (256),StandardDoubleClick (4096),SupportsTransparentBackColor (2048),UserMouse (1024),UserPaint (2),UseTextForAccessibility (262144)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
AllPaintingInWmPaint=None
CacheText=None
ContainerControl=None
DoubleBuffer=None
EnableNotifyMessage=None
FixedHeight=None
FixedWidth=None
Opaque=None
OptimizedDoubleBuffer=None
ResizeRedraw=None
Selectable=None
StandardClick=None
StandardDoubleClick=None
SupportsTransparentBackColor=None
UserMouse=None
UserPaint=None
UseTextForAccessibility=None
value__=None
|
31617
|
from Crypto.Hash import HMAC, SHA256
import base64
def hmac256Calculation(keyHmac, data):
h = HMAC.new(keyHmac.encode("ascii"), digestmod=SHA256)
h.update(data.encode("ascii"))
return h.digest()
def base64Encoding(input):
dataBase64 = base64.b64encode(input)
dataBase64P = dataBase64.decode("UTF-8")
return dataBase64P
print("HMAC 256 calculation")
hmac256KeyString = "hmac256ForAesEncryption"
plaintext = "The quick brown fox jumps over the lazy dog"
print ("hmac256Key: " + hmac256KeyString)
print("plaintext: " + plaintext)
hmac256 = hmac256Calculation(hmac256KeyString, plaintext)
hmacBase64 = base64Encoding(hmac256)
print ("hmac256 length: " + str(len(hmac256)) + " (Base64) data: " + base64Encoding(hmac256))
|
31645
|
from django.shortcuts import render,redirect,HttpResponse
from repository import models
def trouble_list(request):
# user_info = request.session.get('user_info') # {id:'',}
current_user_id = 1
result = models.Trouble.objects.filter(user_id=current_user_id).order_by('status').\
only('title','status','ctime','processer')
return render(request,'backend_trouble_list.html',{'result': result})
from django.forms import Form
from django.forms import fields
from django.forms import widgets
class TroubleMaker(Form):
title = fields.CharField(
max_length=32,
widget=widgets.TextInput(attrs={'class': 'form-control'})
)
detail = fields.CharField(
widget=widgets.Textarea(attrs={'id':'detail','class':'kind-content'})
)
import datetime
def trouble_create(request):
if request.method == 'GET':
form = TroubleMaker()
else:
form = TroubleMaker(request.POST)
if form.is_valid():
# title,content
# form.cleaned_data
dic = {}
dic['user_id'] = 1 # session中获取
dic['ctime'] = datetime.datetime.now()
dic['status'] = 1
dic.update(form.cleaned_data)
models.Trouble.objects.create(**dic)
return redirect('/backend/trouble-list.html')
return render(request, 'backend_trouble_create.html',{'form':form})
def trouble_edit(request,nid):
if request.method == "GET":
obj = models.Trouble.objects.filter(id=nid, status=1).only('id', 'title', 'detail').first()
if not obj:
return HttpResponse('已处理中的保单章无法修改..')
# initial 仅初始化
form = TroubleMaker(initial={'title': obj.title,'detail': obj.detail})
# 执行error会进行验证
return render(request,'backend_trouble_edit.html',{'form':form,'nid':nid})
else:
form = TroubleMaker(data=request.POST)
if form.is_valid():
# 受响应的行数
v = models.Trouble.objects.filter(id=nid, status=1).update(**form.cleaned_data)
if not v:
return HttpResponse('已经被处理')
else:
return redirect('/backend/trouble-list.html')
return render(request, 'backend_trouble_edit.html', {'form': form, 'nid': nid})
def trouble_kill_list(request):
from django.db.models import Q
current_user_id = 1
result = models.Trouble.objects.filter(Q(processer_id=current_user_id)|Q(status=1)).order_by('status')
return render(request,'backend_trouble_kill_list.html',{'result':result})
class TroubleKill(Form):
solution = fields.CharField(
widget=widgets.Textarea(attrs={'id':'solution','class':'kind-content'})
)
def trouble_kill(request,nid):
current_user_id = 1
if request.method == 'GET':
ret = models.Trouble.objects.filter(id=nid, processer=current_user_id).count()
# 以前未强盗
if not ret:
v = models.Trouble.objects.filter(id=nid,status=1).update(processer=current_user_id,status=2)
if not v:
return HttpResponse('手速太慢...')
obj = models.Trouble.objects.filter(id=nid).first()
form = TroubleKill(initial={'title': obj.title,'solution': obj.solution})
return render(request,'backend_trouble_kill.html',{'obj':obj,'form': form,'nid':nid})
else:
ret = models.Trouble.objects.filter(id=nid, processer=current_user_id,status=2).count()
if not ret:
return HttpResponse('去你妈的')
form = TroubleKill(request.POST)
if form.is_valid():
dic = {}
dic['status'] = 3
dic['solution'] = form.cleaned_data['solution']
dic['ptime'] = datetime.datetime.now()
models.Trouble.objects.filter(id=nid, processer=current_user_id,status=2).update(**dic)
return redirect('/backend/trouble-kill-list.html')
obj = models.Trouble.objects.filter(id=nid).first()
return render(request, 'backend_trouble_kill.html', {'obj': obj, 'form': form, 'nid': nid})
def trouble_report(request):
return render(request,'backend_trouble_report.html')
def trouble_json_report(request):
# 数据库中获取数据
user_list = models.UserInfo.objects.filter()
response = []
for user in user_list:
from django.db import connection, connections
cursor = connection.cursor()
cursor.execute("""select strftime('%%s',strftime("%%Y-%%m-01",ctime)) * 1000,count(id) from repository_trouble where processer_id = %s group by strftime("%%Y-%%m",ctime)""", [user.nid,])
result = cursor.fetchall()
print(user.username,result)
temp = {
'name': user.username,
'data':result
}
response.append(temp)
import json
return HttpResponse(json.dumps(response))
|
31648
|
from datetime import datetime
from typing import Optional
from src.utils.config import config
from tortoise import fields
from tortoise.models import Model
defaule_nickname: str = config.get('default').get('nickname')
class BotInfo(Model):
'''QQ机器人表'''
bot_id = fields.IntField(pk=True)
'''机器人QQ号'''
owner_id = fields.IntField(null=True)
'''管理员账号'''
nickname = fields.CharField(max_length=255, default=defaule_nickname)
'''机器人昵称'''
last_sign = fields.DatetimeField(null=True)
'''上次登录时间'''
last_left = fields.DatetimeField(null=True)
'''上次离线时间'''
online = fields.BooleanField(default=True)
'''当前在线情况'''
class Meta:
table = "bot_info"
table_description = "管理QQ机器人账号信息"
@classmethod
async def bot_connect(cls, bot_id):
'''
:说明
机器人链接
:参数
* bot_id:机器人QQ号
'''
record, _ = await cls.get_or_create(bot_id=bot_id)
now_time = datetime.now()
record.last_sign = now_time
record.online = True
await record.save(update_fields=["last_sign", "online"])
@classmethod
async def bot_disconnect(cls, bot_id):
'''
:说明
机器人断开链接
:参数
* bot_id:机器人QQ号
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is not None:
now_time = datetime.now()
record.last_left = now_time
record.online = False
await record.save(update_fields=["last_left", "online"])
@classmethod
async def set_owner(cls, bot_id, owner_id) -> bool:
'''
:说明
设置机器人管理员
:参数
* bot_id:机器人QQ号
* owner_id:管理员QQ号
:返回
* bool:是否成功
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is None:
return False
record.owner_id = owner_id
await record.save(update_fields=["owner_id"])
return True
@classmethod
async def get_owner(cls, bot_id) -> Optional[int]:
'''
:说明
获取机器人管理员
:参数
* bot_id:机器人QQ
:返回
* int:管理员QQ
* None
'''
record = await cls.get_or_none(bot_id=bot_id)
owner_id = None
if record is not None:
owner_id = record.owner_id
return owner_id
@classmethod
async def clean_owner(cls, bot_id) -> bool:
'''
:说明
清除管理员
:参数
* bot_id:机器人QQ
:返回
* bool:是否清除成功
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is None:
return False
record.owner_id = None
await record.save(update_fields=["owner_id"])
return True
@classmethod
async def get_online(cls, bot_id) -> Optional[bool]:
'''
:说明
获取机器人在线状态
:参数
* bot_id:机器人QQ
:返回
* bool:是否在线
* None:不存在
'''
record = await cls.get_or_none(bot_id=bot_id)
return None if record is None else record.online
@classmethod
async def set_nickname(cls, bot_id: int, nickname: str) -> bool:
'''
:说明
设置昵称
:参数
* bot_id:机器人QQ
* nickname:昵称
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is None:
return False
record.nickname = nickname
await record.save(update_fields=["nickname"])
return True
@classmethod
async def get_nickname(cls, bot_id: int) -> Optional[str]:
'''
:说明
获取昵称
:参数
* bot_id:机器人QQ
:返回
* str:昵称
'''
record = await cls.get_or_none(bot_id=bot_id)
return None if record is None else record.nickname
@classmethod
async def detele_bot(cls, bot_id) -> bool:
'''
:说明
删除机器人
:参数
* bot_id:机器人QQ
:返回
* bool:删除是否成功,失败则数据不存在
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is not None:
await record.delete()
return True
return False
@classmethod
async def get_disconnect_bot(cls) -> list[dict]:
'''
获取离线bot列表,dict["bot_id", "last_left"]
'''
record_list = await cls.filter(online=False).values("bot_id", "last_left")
return record_list
@classmethod
async def get_all_bot(cls) -> list[dict]:
'''
获取所有数据
'''
record_list = await cls.all().values("bot_id", "owner_id", "nickname", "last_sign", "last_left", "online")
return record_list
|
31671
|
import datetime
import json
import logging
import operator
import os
from collections import defaultdict
from datetime import date
import vk_api
import vk_api.exceptions
from vk_api import execute
#from .TimeActivityAnalysis import VKOnlineGraph
from .VKFilesUtils import check_and_create_path, DIR_PREFIX
class VKActivityAnalysis:
"""
Модуль, связанный с исследованием активности пользователей
"""
def __init__(self, vk_session):
"""
Конструктор
:param vk_session: объект сессии класса VK
"""
self.api = vk_session.get_api()
self.tools = vk_api.VkTools(vk_session)
self.logger = logging.getLogger("ActivityAnalysis")
# функция получения лайков по 25 штук
vk_get_all_likes_info = vk_api.execute.VkFunction(
args=('user_id', 'owner_id', 'item_ids', 'type'),
code='''
var item_ids = %(item_ids)s;
var result = [];
var i = 0;
while(i <= 25 && item_ids.length > i){
var params = {"user_id":%(user_id)s,
"owner_id": %(owner_id)s,
"item_id": item_ids[i],
"type": %(type)s
};
result = result + [API.likes.isLiked(params) + {"owner_id": params["owner_id"],
"user_id": params["user_id"],
"type": params["type"],
"item_id": params["item_id"]} ];
i = i+1;
}
return {result: result, count: item_ids.length};
''')
# функция получения общих друзей по 25 друзей проверяет
vk_get_all_common_friends = vk_api.execute.VkFunction(
args=('source_uid', 'target_uids'),
code='''
var source_uid = %(source_uid)s;
var target_uids = %(target_uids)s;
var result = [];
var i = 0;
while(i <= 25 && target_uids.length > i*100){
var sliced = 0;
if ( (i+1)*100 > target_uids.length) {
sliced = target_uids.slice(i*100,target_uids.length);
} else {
sliced = target_uids.slice(i*100,(i+1)*100);
}
var params = {"source_uid":%(source_uid)s,
"target_uids": sliced,
};
result = result + API.friends.getMutual(params);
i = i+1;
}
return {result:result};
''')
def is_online(self, uid):
"""
Проверяет онлайн пользователя
:param uid: id пользователя
"""
resp = self.api.users.get(user_id=uid, fields='online')
self.logger.debug("is_online: " + str(uid) + '; ' + str(resp))
if len(resp) > 0 and 'online' in resp[0]:
return resp[0]['online']
else:
return None
def likes_iter(self, uid, friend_uid, count, method, max_count, values, type='post', limit=100):
"""
Генератор инфомации о лайках
:param uid: id пользователя которого проверяем
:param friend_uid: id друга пользователя
:param count: количество ??? TODO: че я тут написал, фигня какая-то
:param method: метод VKApi
:param max_count: Максимальное количество элментов, которое можно загрузить 1м методом за раз
:param values: Параметры метода
:param type: Тип записей (пост, фото)
:param limit: максимальное количство записей
"""
self.logger.debug("likes_iter: " + str(uid) + '; ' + str(friend_uid))
item_ids = []
entries = []
iterations = count // 25
tail = count % 25
iterations_count = 0
for key, entry in enumerate(self.tools.get_all_iter(method, max_count, values=values,
limit=limit)
):
if key > limit:
break
if iterations_count < iterations:
if key != 0 and key % 25 != 0:
item_ids += [entry['id']]
entries += [entry]
else:
for i, like_info in enumerate(self.vk_get_all_likes_info(self.api, user_id=uid,
owner_id=friend_uid,
item_ids=item_ids,
type=type).get('result')):
entries[i].update(like_info)
yield entries[i]
item_ids = []
entries = []
iterations_count += 1
else:
if key % 25 != tail - 1:
item_ids += [entry['id']]
entries += [entry]
else:
for i, like_info in enumerate(self.vk_get_all_likes_info(self.api, user_id=uid,
owner_id=friend_uid,
item_ids=item_ids,
type=type).get('result')):
entries[i].update(like_info)
yield entries[i]
item_ids = []
entries = []
def likes_friend_photos(self, uid, friend_uid, limit=100):
"""
Генератор лайков на фотографиях
:param uid: id пользователя, которого проверяем
:param friend_uid: id друга
:param limit: максимальное количество загруженных записей
"""
self.logger.debug("likes_friend_photos: " + str(uid) + '; ' + str(friend_uid))
count = self.api.photos.getAll(owner_id=friend_uid, count=1)['count']
values = {'owner_id': friend_uid, 'extended': 1, 'no_service_albums': 0}
for like_info in self.likes_iter(uid=uid,
friend_uid=friend_uid,
count=count,
method='photos.getAll',
max_count=200,
values=values,
type='photo',
limit=limit):
yield like_info
def likes_friend_wall(self, uid, friend_uid, limit=100):
"""
Генератор лайков на стене TODO: может, совместить фото и стену? А то код почти одинковый
:param uid: id пользователя, которого проверяем
:param friend_uid: id друга
:param limit: максимально число записей для загрузки
"""
self.logger.debug("likes_friend_wall: " + str(uid) + '; ' + str(friend_uid))
count = self.api.wall.get(owner_id=friend_uid, count=1)['count']
values = {'owner_id': friend_uid, 'filter': 'all'}
for like_info in self.likes_iter(uid=uid,
friend_uid=friend_uid,
count=count,
method='wall.get',
max_count=100,
values=values,
type='post',
limit=limit):
yield like_info
def likes_group_wall(self, uid, group_id, limit=100):
"""
Генератор лайков на стене СООБЩЕСТВА
:param uid: id пользователя
:param group_id: id группы
:param limit: максимальное число записей для обработки
"""
self.logger.debug("likes_group_wall: " + str(uid) + '; ' + str(group_id))
return self.likes_friend_wall(uid, -abs(group_id), limit)
def friends_common_iter(self, uid, friends_ids):
"""
Генератор информации об общих друзьях
:param uid: id пользователя, которого проверяем
:param friends_ids: массив id друзей
"""
self.logger.debug("friends_common_iter: " + str(uid) + '; ' + str(friends_ids))
steps = len(friends_ids) // 2500 + 1
for i in range(steps):
commmon_friends = self.vk_get_all_common_friends(self.api,
source_uid=uid,
target_uids=friends_ids[
i * 2500: min(
(i + 1) * 2500,
len(friends_ids)
)
]).get('result')
if not commmon_friends:
continue
for friend in commmon_friends:
yield friend
def friends_all_ids(self, uid, friends_full=None):
"""
Получить id всех АКТИВНЫХ (не собачек) друзей пользователя
:param uid: id пользователя
:param friends_full: массив полной информации о друзьях
"""
self.logger.debug("friends_all_ids: " + str(uid))
if friends_full is None:
friends_full = self.friends_all_full(uid=uid)
return [el['id'] for el in friends_full]
def friends_all_full(self, uid, friends_full=None):
"""
Получает подробную информацию по всем АКТИВНЫМ (не собачкам) друзьям пользователя
:param uid: id пользователя
:param friends_full: массив полной информации о друзьях
"""
self.logger.debug("friends_all_full: " + str(uid))
if friends_full is not None:
return friends_full
# TODO: надо посмотреть, есть ли битовая маска scop'а друзей
scope = 'nickname, domain, sex, bdate, city, country, timezone, photo_50, photo_100, photo_200_orig, has_mobile, contacts, education, online, relation, last_seen, status, can_write_private_message, can_see_all_posts, can_post, universities';
return [el for el in self.tools.get_all('friends.get', 5000, values={'user_id': uid, 'fields': scope})['items']
if 'deactivated' not in el]
def common_city_score(self, uid, friends_full=None, result_type='first'):
"""
Возвращает очки за общий город.
Если пользователь совпадает городом с другом, то +3 очка
Если количество людей с таким городом максимально, то +3 очка первым 10%, +2 -- првым 20%
:param uid: id пользователя, которого проверяем
:param friends_full: массив полной информации о друзьях
:param result_type: Тип позвращаемого результата. 'count' - все результаты
:type result_type: any('first', 'count')
:return: все результаты или первые 20%
"""
self.logger.debug("common_city_score: " + str(uid))
res = {}
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
for friend in friends_full:
if 'city' in friend:
if friend['city']['title'] in res:
res[friend['city']['title']] += 1
else:
res.update({friend['city']['title']: 1})
res = sorted(res.items(), key=operator.itemgetter(1), reverse=True)
if result_type == 'count':
return dict(res)
first_10p = {city[0]: 3 for city in res[:int(len(res) * 0.1)]}
first_30p = {city[0]: 2 for city in res[int(len(res) * 0.1):int(len(res) * 0.3)]}
first_10p.update(first_30p)
return first_10p
def score_common_age(self, uid, friends_full=None, result_type='first'):
"""
Очки за общий возраст
:param uid: id пользователя
:param friends_full: массив полной информации о друзьях
:param result_type: Тип позвращаемого результата. 'count' - все результаты
:type result_type: any('first', 'count')
:return: все результаты или первые 20%
"""
self.logger.debug("score_common_age: " + str(uid))
res = defaultdict(lambda: 0)
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
for friend in friends_full:
if 'bdate' in friend:
bdate = friend['bdate'].split('.')
if len(bdate) > 2:
res[int(bdate[2])] += 1
res = sorted(res.items(), key=operator.itemgetter(1), reverse=True)
if result_type == 'count':
return dict(res)
first_10p = {city[0]: 3 for city in res[:int(len(res) * 0.1)]}
first_30p = {city[0]: 2 for city in res[int(len(res) * 0.1):int(len(res) * 0.3)]}
first_10p.update(first_30p)
if len(first_10p) == 0:
first_10p = {res[0][0]: 1}
return first_10p
def search_user_by_age(self, user_info, group_id, age=(1, 100)):
"""
Вычислить год рождения пользователя через группу
:param user_info: информация о пользователе, которого проверяем
:param group_id: id любой группы у пользователя
:param age: диапазон предполагаемых возрастов
:return: точный год рождения, который указал пользователь
"""
info = self.api.users.search(q=user_info['first_name'] + ' ' + user_info['last_name'],
group_id=group_id,
age_from=age[0],
age_to=age[1],
count=1000)['items']
for user in info:
if user['id'] == user_info['id']:
if age[0] == age[1]:
return date.today().year - age[0]
return self.search_user_by_age(user_info=user_info,
group_id=group_id,
age=(age[0], (age[1] - age[0]) // 2 + age[0]))
if age[0] == age[1]:
return date.today().year - age[0] - 1
return self.search_user_by_age(user_info=user_info,
group_id=group_id,
age=(age[1], (age[1] - age[0]) * 2 + age[0]))
def user_age(self, uid, friends_full=None):
"""
Вычислить предполагаемый возраст пользователя 2мя способами:
-максимальное кол-во по друзьям (для <25 лет вполне точный рез-т)
-по поиску в группе (точный результат указанного пользователем)
:param uid: id пользователя, которого проверяем
:param friends_full: массив полной информации о друзьях
:return: словарь с результатами
"""
res = {'user_defined': -1, 'friends_predicted': -1}
user_info = self.api.users.get(user_ids=uid, fields='bdate')[0]
if 'bdate' in user_info:
bdate = user_info['bdate'].split('.')
if len(bdate) > 2:
res['user_defined'] = bdate[2]
else:
user_group = self.api.groups.get(user_id=uid, count=1)['items']
if 0 in user_group:
user_group = user_group[0]
res['user_defined'] = self.search_user_by_age(user_info=user_info,
group_id=user_group)
else:
user_group = self.api.groups.get(user_id=uid, count=1)['items']
if 0 in user_group:
user_group = user_group[0]
res['user_defined'] = self.search_user_by_age(user_info=user_info,
group_id=user_group)
common_age = int(list(self.score_common_age(uid=uid).items())[0][0])
res['friends_predicted'] = common_age
return res
def check_friends_online(self, uid):
"""
Проверяет онлайн всех друзей пользователя
:param uid: id пользователя, которого проверяем
:return: результат friends.getOnline
"""
return self.api.friends.getOnline(user_id=uid)
def likes_friends(self, uid, limit_entries=100, friends_full=None):
"""
Генератор информации о лайках у друзей на фото и стене
:param uid: id пользователя, которого проверяем
:param limit_entries: максимальное кол-во записей на каждом друге
:param friends_full: массив полной информации о друзьях
"""
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
friends = self.friends_all_ids(uid=uid, friends_full=friends_full)
count = len(friends)
for i, friend in enumerate(friends, 1):
for like in self.likes_friend_wall(uid=uid, friend_uid=friend, limit=limit_entries):
if like['liked'] or like['copied']:
r = like
r.update({"count": count,
"current": i,
"name": friends_full[i-1]['first_name'] + ' ' + friends_full[i-1]['last_name']})
yield r
for like in self.likes_friend_photos(uid=uid, friend_uid=friend, limit=limit_entries):
if like['liked'] or like['copied']:
r = like
r.update({"count": count,
"current": i,
"name": friends_full[i-1]['first_name'] + ' ' + friends_full[i-1]['last_name']})
yield r
yield {"count": len(friends), "current": i, "inf": 0}
def likes_groups(self, uid, limit=100, groups=None):
"""
Генератор информации о лайках в сообществах
:param uid: id пользователя, которого проверяем
:param limit: максимальное число записей с каждой группы
:param groups: массив id групп
"""
# TODO: здесь бы хорошо убрать повторное использование кода из likes_friends
if groups is None:
groups = self.tools.get_all('users.getSubscriptions', 200, values={"extended": 1, "user_id": uid})
for i, group in enumerate(groups['items'], 1):
try:
for like in self.likes_group_wall(uid=uid, group_id=group['id'], limit=limit):
if like['liked'] or like['copied']:
r = like
r.update({"count": groups['count'],
"current": i,
"name": groups['items'][i-1]['name']})
yield r
except vk_api.exceptions.ApiError as error:
# TODO: обработать это по-нормальному
if error.code == 13:
self.logger.error("Size is too big, skipping group_id=" + str(group['id']))
elif error.code == 15:
self.logger.warning("Wall is disabled, skipping group_id=" + str(group['id']))
else:
raise error
except vk_api.exceptions.ApiHttpError as error:
# TODO: не понятная фигня, надо разобраться
self.logger.error("Server 500 error, skipping group_id=" + str(group['id']))
yield {"count": groups['count'], "current": i, "inf": 0}
def likes_friends_and_groups(self, uid, limit=100, friends_need=False, groups_need=False, friends_full=None, groups=None):
"""
Генератор информации о лайках в группах и сообществах
:param uid: id пользователя, которого проверяем
:param limit: количество записей, которые нужно загружать на каждом элементе
:param friends_need: необходима проверка у друзй
:param groups_need: необходима проверка у групп
:param friends_full: массив полной информации о друзьях
:param groups: массив подписок
:return:
"""
friends_full = self.friends_all_full(uid, friends_full)
if groups is None:
# TODO: subsriptions может содержать людей, надо доработать, возможны баги
groups = self.tools.get_all('users.getSubscriptions', 200, values={"extended": 1, "user_id": uid})
friends_count = friends_need*len(friends_full)
groups_count = groups_need*groups['count']
count = friends_count + groups_need*groups['count']
if friends_need:
for like in self.likes_friends(uid=uid, limit_entries=limit, friends_full=friends_full):
r = like
r.update({"count": count})
yield r
if groups_need:
for like in self.likes_groups(uid=uid, limit=limit, groups=groups):
r = like
r.update({"count": count, "current": like['current'] + friends_count})
yield r
def score_likes_friends(self, uid, limit=100, friends_full=None):
"""
Возвращает баллы за лайки друзьям
:param uid: id пользователя, которого проверяем
:param limit: количество записей загружаемых на каждой странице
:param friends_full: массив полной информации о друзтях
"""
score = 0
for post_info in self.likes_friends(uid=uid,
limit_entries=limit,
friends_full=friends_full):
if 'liked' in post_info:
if post_info['liked'] == 1:
score += 1
if 'copied' in post_info:
if post_info['copied'] == 1:
score += 10
if 'inf' in post_info:
temp = score
score = 0
yield 'likes_friends', post_info['current']-1, temp
def score_likes_self(self, uid, limit=100, friends_full=None):
"""
Возвращает очки за лайки друзей у пользователя на странице
:param uid: id пользователя, которого проверяем
:param limit: максимальное число записей
:param friends_full: массив полной информации о друзьях
"""
friends = self.friends_all_ids(uid=uid, friends_full=friends_full)
res = [0]*len(friends)
for key, post in enumerate(self.tools.get_all_iter(method='wall.get', max_count=100, values={'owner_id': uid},
limit=limit)):
if key > limit:
break
post_likes = self.tools.get_all(method='likes.getList', max_count=100, values={'type': 'post',
'skip_own':1,
'owner_id': uid,
'item_id': post['id']})['items']
post_reposts = self.tools.get_all(method='likes.getList', max_count=100, values={'type': 'post',
'skip_own': 1,
'owner_id': uid,
'filter': 'copies',
'item_id': post['id']})['items']
for user in post_likes:
if user in friends:
res[friends.index(user)] += 1
for user in post_reposts:
if user in friends:
if user in friends:
res[friends.index(user)] += 10
for key, photo in enumerate(self.tools.get_all_iter(method='photos.getAll',
max_count=200,
values={'owner_id': uid, 'extended': 1, 'no_service_albums': 0})):
if key>limit:
break
photo_likes = self.tools.get_all(method='likes.getList', max_count=100, values={'type': 'photo',
'skip_own':1,
'owner_id': uid,
'item_id': photo['id']})['items']
for user in photo_likes:
if user in friends:
if user in friends:
res[friends.index(user)] += 1
for i, friend in enumerate(res):
yield 'likes_self', i, friend
def score_mutual_friends(self, uid, friends_full=None):
"""
Возвращает очки за общих друзей
:param uid: id пользователя, которого проверяем
:param friends_full: массив полной информации о друзьях
"""
res = []
friends = self.friends_all_ids(uid=uid, friends_full=friends_full)
for mutual in self.friends_common_iter(uid=uid, friends_ids=friends):
res.append(mutual['common_count'])
res_sorted = sorted(list(set(res)))
count = len(res_sorted)
for i, friend in enumerate(res):
yield 'friends', i, res_sorted.index(friend)*10//count
def score_all_common_age(self, uid, friends_full=None):
"""
Возвращает очки за общий возраст
:param uid: id пользователя, которого проверяем
:param friends_full: массив полной информации о друзьях
"""
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
user_age = self.user_age(uid=uid, friends_full=friends_full)
def get_user_real_age(age):
if age[0] == age[1]:
return age[0],1,2
elif age[0] == -1:
return age[1],2,3
elif age[1] == -1:
return age[0],2,3
else:
return (int(age[0])+int(age[1]))//2, -1, abs(int(age[0])-int(age[1]))
user_real_age = get_user_real_age((user_age['user_defined'], user_age['friends_predicted']))
for i, friend in enumerate(friends_full):
score = 0
if 'bdate' in friend:
date = friend['bdate'].split('.')
if len(date)>2:
if int(date[2]) - user_real_age[1] <= user_real_age[0] <= int(date[2]) + user_real_age[1]:
score = 3
elif int(date[2]) - user_real_age[2] <= user_real_age[0] <= int(date[2]) + user_real_age[2]:
score = 1
yield 'age', i, score
def score_all_common_city(self, uid, friends_full=None):
"""
Возвращает очки за общий город
:param uid: id пользователя, которого проверяем
:param friends_full: массив полной информации о друзьях
"""
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
common_city_score = self.common_city_score(uid=uid, friends_full=friends_full, result_type='first')
user = self.api.users.get(user_id=uid,fields='city')[0]
user_city = ''
if 'city' in user:
user_city = user['city']['title']
for i, friend in enumerate(friends_full):
score = 0
if 'city' in friend:
friend_city = friend['city']['title']
if friend_city in common_city_score:
score = common_city_score[friend_city]
score += (friend_city==user_city)*3
yield 'city', i, score
def score_all(self,
uid,
limit=100,
likes_friends_need=False,
likes_self_need=False,
common_friends_need=False,
common_age_need=False,
common_city_need=False,
friends_full=None):
"""
Генератор информации о круге общения
:param uid: id пользователя, которого проверяем
:param limit: максимальное количество загружаемых каждый раз записей
:param likes_friends_need: необходимо проверять лайки друзьям
:param likes_self_need: необходимо проверять лайки друзей
:param common_friends_need: проверять общих друзей
:param common_age_need: проверять общий возраст
:param common_city_need: проверять общий город
:param friends_full: массив полной информации о друзьях
"""
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
if common_age_need:
for element in self.score_all_common_age(uid=uid, friends_full=friends_full):
yield element
if common_city_need:
for element in self.score_all_common_city(uid=uid, friends_full=friends_full):
yield element
if common_friends_need:
for element in self.score_mutual_friends(uid=uid, friends_full=friends_full):
yield element
if likes_self_need:
for element in self.score_likes_self(uid=uid, limit=limit, friends_full=friends_full):
yield element
if likes_friends_need:
for element in self.score_likes_friends(uid=uid, limit=limit, friends_full=friends_full):
yield element
|
31707
|
import numpy as np
from nlpaug.model.audio import Audio
class Normalization(Audio):
def manipulate(self, data, method, start_pos, end_pos):
aug_data = data.copy()
if method == 'minmax':
new_data = self._min_max(aug_data[start_pos:end_pos])
elif method == 'max':
new_data = self._max(aug_data[start_pos:end_pos])
elif method == 'standard':
new_data = self._standard(aug_data[start_pos:end_pos])
aug_data[start_pos:end_pos] = new_data
return aug_data
def get_support_methods(self):
return ['minmax', 'max', 'standard']
def _standard(self, data):
return (data - np.mean(data)) / np.std(data)
def _max(self, data):
return data / np.amax(np.abs(data))
def _min_max(self, data):
lower = np.amin(np.abs(data))
return (data - lower) / (np.amax(np.abs(data)) - lower)
|
31708
|
from __future__ import print_function, division
import numpy as np
weights = np.transpose(np.load('w0.npy'))
print(weights.shape)
feature_names = ["" for i in range(125)]
prev = 0
prev_name = ''
for line in open('feature_names.txt'):
if line.startswith('#'):
continue
words = line.split()
index = int(words[0])
feature_name = words[1][:-1]
feature_type = words[2]
if prev_name != '':
for i in range(prev, index + 1):
if prev + 1 < index:
feature_names[i] = prev_name + '_' + str(i - prev)
else:
feature_names[i] = prev_name
prev = index
prev_name = feature_name
feature_names[-1] = prev_name
print(feature_names, len(feature_names))
sorted_indices = np.argsort(np.absolute(weights), axis=1)
print(sorted_indices[:, 120:124])
|
31762
|
try:
import ujson as json
except ModuleNotFoundError:
# https://github.com/python/mypy/issues/1153 (mypy bug with try/except conditional imports)
import json # type: ignore
try:
import msgpack
except ModuleNotFoundError:
pass
class Serializer:
pass
class StringSerializer(Serializer):
def serialize(self, item):
return str(item).encode("utf-8")
def deserialize(self, data):
return data.decode("utf-8")
class JsonSerializer(Serializer):
def serialize(self, item):
return json.dumps(item).encode("utf-8")
def deserialize(self, data):
return json.loads(data.decode("utf-8"))
class MsgpackSerializer(Serializer):
def serialize(self, item):
result = msgpack.packb(item, use_bin_type=True)
return result
def deserialize(self, data):
return msgpack.unpackb(data, raw=False)
|
31766
|
import numpy as np
from numpy.testing import assert_allclose
from robogym.envs.rearrange.common.utils import (
get_mesh_bounding_box,
make_block,
make_blocks_and_targets,
)
from robogym.envs.rearrange.simulation.composer import RandomMeshComposer
from robogym.mujoco.mujoco_xml import MujocoXML
def _get_default_xml():
xml_source = """
<mujoco>
<asset>
<material name="block_mat" specular="0" shininess="0.5" reflectance="0" rgba="1 0 0 1"></material>
</asset>
</mujoco>
"""
xml = MujocoXML.from_string(xml_source)
return xml
def test_mesh_composer():
for path in [
None,
RandomMeshComposer.GEOM_ASSET_PATH,
RandomMeshComposer.GEOM_ASSET_PATH,
]:
composer = RandomMeshComposer(mesh_path=path)
for num_geoms in range(1, 6):
xml = _get_default_xml()
composer.reset()
xml.append(composer.sample("object0", num_geoms, object_size=0.05))
sim = xml.build()
assert len(sim.model.geom_names) == num_geoms
pos, size = get_mesh_bounding_box(sim, "object0")
assert np.isclose(np.max(size), 0.05)
pos2, size2 = composer.get_bounding_box(sim, "object0")
assert np.allclose(pos, pos2)
assert np.allclose(size, size2)
def test_block_object():
xml = _get_default_xml()
xml.append(make_block("object0", object_size=np.ones(3) * 0.05))
sim = xml.build()
assert len(sim.model.geom_size) == 1
assert_allclose(sim.model.geom_size, 0.05)
def test_blocks_and_targets():
xml = _get_default_xml()
for obj_xml, target_xml in make_blocks_and_targets(num_objects=5, block_size=0.05):
xml.append(obj_xml)
xml.append(target_xml)
sim = xml.build()
assert len(sim.model.geom_size) == 10
assert_allclose(sim.model.geom_size, 0.05)
|
31784
|
import logging
import pytest
from selenium.webdriver.remote.remote_connection import LOGGER
from stere.areas import Area, Areas
LOGGER.setLevel(logging.WARNING)
def test_areas_append_wrong_type():
"""Ensure a TypeError is raised when non-Area objects are appended
to an Areas.
"""
a = Areas()
with pytest.raises(TypeError) as e:
a.append('1')
assert str(e.value) == (
'1 is not an Area. Only Area objects can be inside Areas.'
)
def test_areas_append():
"""Ensure Area objects can be appended to an Areas."""
a = Areas()
area = Area()
a.append(area)
assert 1 == len(a)
def test_areas_remove():
"""Ensure Areas.remove() behaves like list.remove()."""
a = Areas()
area = Area()
a.append(area)
a.remove(area)
assert 0 == len(a)
def test_areas_len():
"""Ensure Areas reports length correctly."""
a = Areas(['1', '2', '3'])
assert 3 == len(a)
def test_areas_containing_type(test_page):
"""Ensure Areas.containing() returns an Areas object."""
test_page.navigate()
found_areas = test_page.repeating_area.areas.containing(
'link', 'Repeating Link 2',
)
assert isinstance(found_areas, Areas)
def test_areas_containing(test_page):
"""Ensure Areas.containing() returns valid results."""
test_page.navigate()
found_areas = test_page.repeating_area.areas.containing(
'link', 'Repeating Link 2',
)
assert found_areas[0].text.value == 'Repeating Area 2'
def test_areas_containing_nested_attr(test_page):
"""Ensure Areas.containing() handles dot attrs."""
test_page.navigate()
found_areas = test_page.repeating_area.areas.containing(
'nested.ax', 'AX1',
)
assert found_areas[0].nested.ax.value == 'AX1'
def test_areas_containing_invalid_field_name(test_page):
test_page.navigate()
with pytest.raises(AttributeError) as e:
test_page.repeating_area.areas.containing(
'lunk', 'Repeating Link 2')
assert str(e.value) == "'Area' object has no attribute 'lunk'"
def test_areas_containing_nested_attr_invalid_field_name(test_page):
test_page.navigate()
with pytest.raises(AttributeError) as e:
test_page.repeating_area.areas.containing(
'nested.cx', 'CX1')
assert str(e.value) == "'Area' object has no attribute 'cx'"
def test_areas_contain(test_page):
"""Ensure Areas.contain() returns True when a result is found."""
test_page.navigate()
assert test_page.repeating_area.areas.contain("link", "Repeating Link 1")
def test_areas_contain_not_found(test_page):
"""Ensure Areas.contain() returns False when a result is not found."""
test_page.navigate()
assert not test_page.repeating_area.areas.contain(
"link", "Repeating Link 666",
)
|
31810
|
class ATMOS(object):
'''
class ATMOS
- attributes:
- self defined
- methods:
- None
'''
def __init__(self,info):
self.info = info
for key in info.keys():
setattr(self, key, info[key])
def __repr__(self):
return 'Instance of class ATMOS'
|
31904
|
import re
from .utils import validator
regex = (
r'^[A-Z]{2}[0-9]{2}[A-Z0-9]{13,30}$'
)
pattern = re.compile(regex)
def char_value(char):
"""A=10, B=11, ..., Z=35
"""
if char.isdigit():
return int(char)
else:
return 10 + ord(char) - ord('A')
def modcheck(value):
"""Check if the value string passes the mod97-test.
"""
# move country code and check numbers to end
rearranged = value[4:] + value[:4]
# convert letters to numbers
converted = [char_value(char) for char in rearranged]
# interpret as integer
integerized = int(''.join([str(i) for i in converted]))
return (integerized % 97 == 1)
@validator
def iban(value):
"""
Return whether or not given value is a valid IBAN code.
If the value is a valid IBAN this function returns ``True``, otherwise
:class:`~validators.utils.ValidationFailure`.
Examples::
>>> iban('DE29100500001061045672')
True
>>> iban('123456')
ValidationFailure(func=iban, ...)
.. versionadded:: 0.8
:param value: IBAN string to validate
"""
return pattern.match(value) and modcheck(value)
|
31969
|
import math
def get_dist_bins(num_bins, interval=0.5):
bins = [(interval * i, interval * (i + 1)) for i in range(num_bins - 1)]
bins.append((bins[-1][1], float('Inf')))
return bins
def get_dihedral_bins(num_bins, rad=False):
first_bin = -180
bin_width = 2 * 180 / num_bins
bins = [(first_bin + bin_width * i, first_bin + bin_width * (i + 1))
for i in range(num_bins)]
if rad:
bins = deg_bins_to_rad(bins)
return bins
def get_planar_bins(num_bins, rad=False):
first_bin = 0
bin_width = 180 / num_bins
bins = [(first_bin + bin_width * i, first_bin + bin_width * (i + 1))
for i in range(num_bins)]
if rad:
bins = deg_bins_to_rad(bins)
return bins
def deg_bins_to_rad(bins):
return [(v[0] * math.pi / 180, v[1] * math.pi / 180) for v in bins]
def get_bin_values(bins):
bin_values = [t[0] for t in bins]
bin_width = (bin_values[2] - bin_values[1]) / 2
bin_values = [v + bin_width for v in bin_values]
bin_values[0] = bin_values[1] - 2 * bin_width
return bin_values
|
31999
|
import logging
from fastapi import APIRouter
from starlette import status
from api.endpoints.dependencies.tenant_security import get_from_context
from api.endpoints.models.v1.tenant import TenantGetResponse
from api.services.v1 import tenant_service
router = APIRouter()
logger = logging.getLogger(__name__)
@router.post(
"/make-issuer", status_code=status.HTTP_200_OK, response_model=TenantGetResponse
)
async def initialize_issuer() -> TenantGetResponse:
"""
If the innkeeper has authorized your tenant to become an issuer, initialize
here to write a endorsed public did the configured Hyperledger-Indy service
"""
wallet_id = get_from_context("TENANT_WALLET_ID")
tenant_id = get_from_context("TENANT_ID")
item = await tenant_service.make_issuer(
tenant_id,
wallet_id,
)
links = [] # TODO: determine useful links for /make-issuer
return TenantGetResponse(item=item, links=links)
|
32062
|
import SpiceInterface
import TestUtilities
# create the test utility object
test_utilities_obj = TestUtilities.TestUtilities()
test_utilities_obj.netlist_generation('bandgap_opamp_test_op.sch', 'rundir')
# create the spice interface
spice_interface_obj = SpiceInterface.SpiceInterface(netlist_path="rundir/bandgap_opamp_test_op.spice")
spice_interface_obj.config['simulator']['shared'] = True
# add the op save parameters
devices = ['xbmr.XMcurr', 'xbmr.XMcurr1', 'xbmr.XM2', 'xbmr.XM3']
spice_interface_obj.insert_op_save(devices, ['vsat_marg'])
# run the simulation
spice_interface_obj.run_simulation()
# analyse the results
spice_interface_obj.plot_op_save(devices, ['vsat_marg'], 'temp-sweep')
['xbmr.XMcurr', 'xbmr.XMcurr1', 'xbmr.XM2', 'xbmr.XM3']
|
32101
|
class GraphLearner:
"""Base class for causal discovery methods.
Subclasses implement different discovery methods. All discovery methods are in the package "dowhy.causal_discoverers"
"""
def __init__(self, data, library_class, *args, **kwargs):
self._data = data
self._labels = list(self._data.columns)
self._adjacency_matrix = None
self._graph_dot = None
def learn_graph(self):
'''
Discover causal graph and the graph in DOT format.
'''
raise NotImplementedError
|
32135
|
import pdfkit
import boto3
s3 = boto3.client('s3')
def lambda_handler(event, context):
pdfkit.from_url('http://google.com', '/tmp/out.pdf')
with open('/tmp/out.pdf', 'rb') as f:
response = s3.put_object(
Bucket='temp-awseabsgddev',
Key='juni/google.pdf',
Body=f.read()
)
return {'response': response}
|
32142
|
class Solution:
def canThreePartsEqualSum(self, A: List[int]) -> bool:
# Since all the three parts are equal, if we sum all element of arrary it should be a multiplication of 3
# so the sum of each part must be equal to sum of all element divided by 3
quotient, remainder = divmod(sum(A), 3)
if remainder != 0:
return False
subarray = 0
partitions = 0
for num in A:
subarray += num
if subarray == quotient:
partitions += 1
subarray = 0
# Check if it consist at least 3 partitions
return partitions >= 3
|
32144
|
import os
import sys
sys.path.append('.')
import argparse
import numpy as np
import os.path as osp
from multiprocessing import Process, Pool
from glob import glob
from tqdm import tqdm
import tensorflow as tf
from PIL import Image
from lib.core.config import INSTA_DIR, INSTA_IMG_DIR
def process_single_record(fname, outdir, split):
sess = tf.Session()
#print(fname)
record_name = fname.split('/')[-1]
for vid_idx, serialized_ex in enumerate(tf.python_io.tf_record_iterator(fname)):
#print(vid_idx)
os.makedirs(osp.join(outdir, split, record_name, str(vid_idx)), exist_ok=True)
example = tf.train.Example()
example.ParseFromString(serialized_ex)
N = int(example.features.feature['meta/N'].int64_list.value[0])
images_data = example.features.feature[
'image/encoded'].bytes_list.value
for i in range(N):
image = np.expand_dims(sess.run(tf.image.decode_jpeg(images_data[i], channels=3)), axis=0)
#video.append(image)
image = Image.fromarray(np.squeeze(image, axis=0))
image.save(osp.join(outdir, split, record_name, str(vid_idx), str(i)+".jpg"))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--inp_dir', type=str, help='tfrecords file path', default=INSTA_DIR)
parser.add_argument('--n', type=int, help='total num of workers')
parser.add_argument('--i', type=int, help='current index of worker (from 0 to n-1)')
parser.add_argument('--split', type=str, help='train or test')
parser.add_argument('--out_dir', type=str, help='output images path', default=INSTA_IMG_DIR)
args = parser.parse_args()
fpaths = glob(f'{args.inp_dir}/{args.split}/*.tfrecord')
fpaths = sorted(fpaths)
total = len(fpaths)
fpaths = fpaths[args.i*total//args.n : (args.i+1)*total//args.n]
#print(fpaths)
#print(len(fpaths))
os.makedirs(args.out_dir, exist_ok=True)
for idx, fp in enumerate(fpaths):
process_single_record(fp, args.out_dir, args.split)
|
32150
|
STATE_CITY = "fluids_state_city"
OBS_QLIDAR = "fluids_obs_qlidar"
OBS_GRID = "fluids_obs_grid"
OBS_BIRDSEYE = "fluids_obs_birdseye"
OBS_NONE = "fluids_obs_none"
BACKGROUND_CSP = "fluids_background_csp"
BACKGROUND_NULL = "fluids_background_null"
REWARD_PATH = "fluids_reward_path"
REWARD_NONE = "fluids_reward_none"
RIGHT = "RIGHT"
LEFT = "LEFT"
STRAIGHT = "STRAIGHT"
RED = (0xf6, 0x11, 0x46)
YELLOW = (0xfc, 0xef, 0x5e),
GREEN = (0, 0xc6, 0x44)
|
32173
|
import numpy as np
from JacobiPolynomials import *
import math
# 1D - LINE
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
def NormalisedJacobi1D(C,x):
p = np.zeros(C+2)
for i in range(0,C+2):
p[i] = JacobiPolynomials(i,x,0,0)[-1]*np.sqrt((2.*i+1.)/2.)
return p
# 2D - TRI
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
def NormalisedJacobi2D(C,x):
""" Computes the orthogonal base of 2D polynomials of degree less
or equal to C+1 at the point x=(r,s) in [-1,1]^2 (i.e. on the reference quad)
"""
N = int( (C+2.)*(C+3.)/2. )
p = np.zeros(N)
r = x[0]; s = x[1]
# Ordering: 1st increasing the degree and 2nd lexicogafic order
ncount = 0 # counter for the polynomials order
# Loop on degree
for nDeg in range(0,C+2):
# Loop by increasing i
for i in range(0,nDeg+1):
if i==0:
p_i = 1.; q_i = 1.
else:
p_i = JacobiPolynomials(i,r,0.,0.)[-1]; q_i = q_i*(1.-s)/2.
# Value for j
j = nDeg-i
if j==0:
p_j = 1.
else:
p_j = JacobiPolynomials(j,s,2.*i+1.,0.)[-1]
# factor = np.sqrt( (2.*i+1.)*(i+j+1.)/2. )
factor = math.sqrt( (2.*i+1.)*(i+j+1.)/2. )
p[ncount] = ( p_i*q_i*p_j )*factor
ncount += 1
return p
def NormalisedJacobiTri(C,x):
""" Computes the orthogonal base of 2D polynomials of degree less
or equal to n at the point x=(xi,eta) in the reference triangle
"""
xi = x[0]; eta = x[1]
if eta==1:
r = -1.; s=1.;
else:
r = 2.*(1+xi)/(1.-eta)-1.
s = eta
return NormalisedJacobi2D(C,np.array([r,s]))
def GradNormalisedJacobiTri(C,x,EvalOpt=0):
""" Computes the orthogonal base of 2D polynomials of degree less
or equal to n at the point x=(xi,eta) in the reference triangle
"""
N = int((C+2.)*(C+3.)/2.)
p = np.zeros(N);
dp_dxi = np.zeros(N)
dp_deta = np.zeros(N)
r = x[0]; s = x[1]
# THIS MAY RUIN THE CONVERGENCE, BUT FOR POST PROCESSING ITS FINE
if EvalOpt==1:
if s==1:
s=0.99999999999999
xi = (1.+r)*(1.-s)/2.-1
eta = s
dr_dxi = 2./(1.-eta)
dr_deta = 2.*(1.+xi)/(1.-eta)**2
# Derivative of s is not needed because s=eta
# Ordering: 1st increasing the degree and 2nd lexicogafic order
ncount = 0
# Loop on degree
for nDeg in range(0,C+2):
# Loop increasing i
for i in range(0,nDeg+1):
if i==0:
p_i = 1; q_i = 1; dp_i = 0; dq_i = 0
else:
p_i = JacobiPolynomials(i,r,0.,0.)[-1]; dp_i = JacobiPolynomials(i-1,r,1.,1.)[-1]*(i+1.)/2.
q_i = q_i*(1.-s)/2.; dq_i = 1.*q_i*(-i)/(1-s)
# Value for j
j = nDeg-i
if j==0:
p_j = 1; dp_j = 0
else:
p_j = JacobiPolynomials(j,s,2.*i+1.,0.)[-1]; dp_j = JacobiPolynomials(j-1,s,2.*i+2.,1.)[-1]*(j+2.*i+2.)/2.
factor = math.sqrt( (2.*i+1.)*(i+j+1.)/2. )
# Normalized polynomial
p[ncount] = ( p_i*q_i*p_j )*factor
# Derivatives with respect to (r,s)
dp_dr = ( (dp_i)*q_i*p_j )*factor
dp_ds = ( p_i*(dq_i*p_j+q_i*dp_j) )*factor
# Derivatives with respect to (xi,eta)
dp_dxi[ncount] = dp_dr*dr_dxi
dp_deta[ncount] = dp_dr*dr_deta + dp_ds
ncount += 1
return p,dp_dxi,dp_deta
# 3D - TET
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
def NormalisedJacobi3D(C,x):
"""Computes the orthogonal base of 3D polynomials of degree less
or equal to n at the point x=(r,s,t) in [-1,1]^3
"""
N = int((C+2)*(C+3)*(C+4)/6.)
p = np.zeros(N)
r = x[0]; s = x[1]; t = x[2]
# Ordering: 1st incresing the degree and 2nd lexicogafic order
ncount = 0
# Loop on degree
for nDeg in range(0,C+2):
# Loop increasing i
for i in range(0,nDeg+1):
if i==0:
p_i = 1; q_i = 1
else:
p_i = JacobiPolynomials(i,r,0.,0.)[-1]; q_i = q_i*(1.-s)/2.
# Loop increasing j
for j in range(0,nDeg-i+1):
if j==0:
p_j = 1; q_j = ((1.-t)/2.)**i
else:
p_j = JacobiPolynomials(j,s,2.*i+1.,0.)[-1]; q_j = q_j*(1.-t)/2.
# Value for k
k = nDeg-(i+j)
if k==0:
p_k = 1.
else:
p_k = JacobiPolynomials(k,t,2.*(i+j)+2.,0.)[-1]
factor = math.sqrt( (2.*i+1.)*(i+j+1.)*(2.*(i+j+k)+3.)/4. )
p[ncount] = ( p_i*q_i*p_j*q_j*p_k )*factor
ncount += 1
return p
def NormalisedJacobiTet(C,x):
"""Computes the orthogonal base of 3D polynomials of degree less
or equal to n at the point x=(r,s,t) in [-1,1]^3
"""
xi = x[0]; eta = x[1]; zeta = x[2]
if (eta+zeta)==0:
r = -1; s=1
elif zeta==1:
r = -1; s=1 # or s=-1 (check that nothing changes)
else:
r = -2.*(1+xi)/(eta+zeta)-1.;
s = 2.*(1+eta)/(1-zeta)-1.;
t = zeta
return NormalisedJacobi3D(C,[r,s,t])
# return NormalisedJacobi3D_Native(C,[r,s,t])
def GradNormalisedJacobiTet(C,x,EvalOpt=0):
"""Computes the orthogonal base of 3D polynomials of degree less
or equal to n at the point x=(r,s,t) in [-1,1]^3
"""
N = int((C+2)*(C+3)*(C+4)/6.)
p = np.zeros(N)
dp_dxi = np.zeros(N)
dp_deta = np.zeros(N)
dp_dzeta = np.zeros(N)
r = x[0]; s = x[1]; t = x[2]
# THIS MAY RUIN THE CONVERGENCE, BUT FOR POST PROCESSING ITS FINE
if EvalOpt==1:
if t==1.:
t=0.999999999999
if np.isclose(s,1.):
s=0.999999999999
if np.isclose(s,1.):
s=0.99999999999999
eta = (1./2.)*(s-s*t-1.-t)
xi = -(1./2.)*(r+1)*(eta+t)-1.
zeta = 1.0*t
# THIS MAY RUIN THE CONVERGENCE, BUT FOR POST PROCESSING ITS FINE
if eta == 0. and zeta == 0.:
eta = 1.0e-14
zeta = 1e-14
eta_zeta = eta+zeta
if np.isclose(eta_zeta,0.):
eta_zeta = 0.000000001
dr_dxi = -2./eta_zeta
dr_deta = 2.*(1.+xi)/eta_zeta**2
dr_dzeta = dr_deta
ds_deta = 2./(1.-zeta)
ds_dzeta = 2.*(1.+eta)/(1.-zeta)**2
# Derivative of t is not needed because t=zeta
#--------------------------------------------------------
# if np.allclose(eta+zeta,0):
# dr_dxi = -2./(0.001)**2
# dr_deta = 2.*(1.+xi)/(0.001)**2
# else:
# dr_dxi = -2./(eta+zeta)
# dr_deta = 2.*(1.+xi)/(eta+zeta)**2
# dr_dzeta = dr_deta
# if np.allclose(eta+zeta,0):
# ds_deta = 2./(0.001)
# ds_dzeta = 2.*(1.+eta)/(0.001)**2
# else:
# ds_deta = 2./(1.-zeta)
# ds_dzeta = 2.*(1.+eta)/(1.-zeta)**2
#--------------------------------------------------------
# Ordering: 1st increasing the degree and 2nd lexicogafic order
ncount = 0
# Loop on degree
for nDeg in range(0,C+2):
# Loop increasing i
for i in range(0,nDeg+1):
if i==0:
p_i = 1.; q_i = 1.; dp_i = 0.; dq_i = 0.
else:
p_i = JacobiPolynomials(i,r,0.,0.)[-1]; dp_i = JacobiPolynomials(i-1,r,1.,1.)[-1]*(i+1.)/2.
q_i = q_i*(1.-s)/2.; dq_i = q_i*(-i)/(1.-s)
# Loop increasing j
for j in range(0,nDeg-i+1):
if j==0:
p_j = 1; q_j = ((1.-t)/2.)**i; dp_j = 0; dq_j = q_j*(-(i+j))/(1.-t);
else:
p_j = JacobiPolynomials(j,s,2.*i+1.,0.)[-1]; dp_j = JacobiPolynomials(j-1,s,2.*i+2.,1.)[-1]*(j+2.*i+2.)/2.
q_j = q_j*(1.-t)/2.; dq_j = q_j*(-(i+j))/(1.-t)
# Value for k
k = nDeg-(i+j);
if k==0:
p_k = 1.; dp_k = 0.;
else:
p_k = JacobiPolynomials(k,t,2.*(i+j)+2.,0.)[-1]; dp_k = JacobiPolynomials(k-1,t,2.*(i+j)+3.,1.)[-1]*(k+2.*i+2.*j+3.)/2.
factor = math.sqrt( (2.*i+1.)*(i+j+1.)*(2.*(i+j+k)+3.)/4. )
# Normalized polynomial
p[ncount] = ( p_i*q_i*p_j*q_j*p_k )*factor
# Derivatives with respect to (r,s,t)
dp_dr = ( (dp_i)*q_i*p_j*q_j*p_k )*factor
dp_ds = ( p_i*(dq_i*p_j+q_i*dp_j)*q_j*p_k )*factor
dp_dt = ( p_i*q_i*p_j*(dq_j*p_k+q_j*dp_k) )*factor
# Derivatives with respect to (xi,eta,zeta)
dp_dxi[ncount] = dp_dr*dr_dxi
dp_deta[ncount] = dp_dr*dr_deta + dp_ds*ds_deta
dp_dzeta[ncount] = dp_dr*dr_dzeta + dp_ds*ds_dzeta + dp_dt
ncount += 1
return p,dp_dxi,dp_deta,dp_dzeta
|
32192
|
from conans import ConanFile, tools
class HapplyConan(ConanFile):
name = "happly"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/nmwsharp/happly"
topics = ("conan", "happly", "ply", "3D")
license = "MIT"
description = "A C++ header-only parser for the PLY file format. Parse .ply happily!"
settings = "compiler"
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def validate(self):
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, 11)
def source(self):
tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
def package(self):
self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
self.copy("happly.h", src=self._source_subfolder, dst="include")
def package_id(self):
self.info.header_only()
|
32206
|
import pytest
import stweet as st
from tests.test_util import get_temp_test_file_name, get_tweets_to_tweet_output_test, \
two_lists_assert_equal
def test_csv_serialization():
csv_filename = get_temp_test_file_name('csv')
tweets_collector = st.CollectorTweetOutput()
get_tweets_to_tweet_output_test([
st.CsvTweetOutput(csv_filename),
tweets_collector
])
tweets_from_csv = st.read_tweets_from_csv_file(csv_filename)
two_lists_assert_equal(tweets_from_csv, tweets_collector.get_raw_list())
def test_file_json_lines_serialization():
jl_filename = get_temp_test_file_name('jl')
tweets_collector = st.CollectorTweetOutput()
get_tweets_to_tweet_output_test([
st.JsonLineFileTweetOutput(jl_filename),
tweets_collector
])
tweets_from_jl = st.read_tweets_from_json_lines_file(jl_filename)
two_lists_assert_equal(tweets_from_jl, tweets_collector.get_raw_list())
|
32311
|
import os
os.system("pip install tqsdk -i https://pypi.tuna.tsinghua.edu.cn/simple")
os.system("pip install numba -i https://pypi.tuna.tsinghua.edu.cn/simple")
os.system("pip install janus -i https://pypi.tuna.tsinghua.edu.cn/simple")
os.system("pip install redis -i https://pypi.tuna.tsinghua.edu.cn/simple")
os.system("pip install aioredis -i https://pypi.tuna.tsinghua.edu.cn/simple")
os.system("pip install schedule -i https://pypi.tuna.tsinghua.edu.cn/simple")
os.system("pip install pyqt5 -i https://pypi.tuna.tsinghua.edu.cn/simple")
os.system("pip install PyQt5-tools -i http://pypi.douban.com/simple --trusted-host=pypi.douban.com")
|
32322
|
from opyoid.bindings.binding import Binding
from opyoid.bindings.binding_to_provider_adapter import BindingToProviderAdapter
from opyoid.bindings.registered_binding import RegisteredBinding
from opyoid.injection_context import InjectionContext
from opyoid.provider import Provider
from opyoid.utils import InjectedT
from .from_instance_provider import FromInstanceProvider
from .instance_binding import InstanceBinding
class InstanceBindingToProviderAdapter(BindingToProviderAdapter[InstanceBinding]):
"""Creates a Provider from an InstanceBinding."""
def accept(self, binding: Binding[InjectedT], context: InjectionContext[InjectedT]) -> bool:
return isinstance(binding, InstanceBinding)
def create(self,
binding: RegisteredBinding[InstanceBinding[InjectedT]],
context: InjectionContext[InjectedT]) -> Provider[InjectedT]:
return FromInstanceProvider(binding.raw_binding.bound_instance)
|
32359
|
from abc import ABCMeta, abstractmethod
from functools import partial
from typing import Tuple, Union
import numexpr
import numpy as np
from scipy import sparse, special
from tabmat import MatrixBase, StandardizedMatrix
from ._functions import (
binomial_logit_eta_mu_deviance,
binomial_logit_rowwise_gradient_hessian,
gamma_deviance,
gamma_log_eta_mu_deviance,
gamma_log_likelihood,
gamma_log_rowwise_gradient_hessian,
normal_deviance,
normal_identity_eta_mu_deviance,
normal_identity_rowwise_gradient_hessian,
normal_log_likelihood,
poisson_deviance,
poisson_log_eta_mu_deviance,
poisson_log_likelihood,
poisson_log_rowwise_gradient_hessian,
tweedie_deviance,
tweedie_log_eta_mu_deviance,
tweedie_log_likelihood,
tweedie_log_rowwise_gradient_hessian,
)
from ._link import IdentityLink, Link, LogitLink, LogLink
from ._util import _safe_lin_pred, _safe_sandwich_dot
class ExponentialDispersionModel(metaclass=ABCMeta):
r"""Base class for reproductive Exponential Dispersion Models (EDM).
The PDF of :math:`Y \sim \mathrm{EDM}(\mu, \phi)` is given by
.. math::
p(y \mid \theta, \phi)
&= c(y, \phi) \exp((\theta y - A(\theta)_ / \phi) \\
&= \tilde{c}(y, \phi) \exp(-d(y, \mu) / (2\phi))
with mean :math:`\mathrm{E}(Y) = A'(\theta) = \mu`, variance
:math:`\mathrm{var}(Y) = \phi \cdot v(\mu)`, unit variance
:math:`v(\mu)` and unit deviance :math:`d(y, \mu)`.
Properties
----------
lower_bound
upper_bound
include_lower_bound
include_upper_bound
Methods
-------
in_y_range
unit_variance
unit_variance_derivative
variance
variance_derivative
unit_deviance
unit_deviance_derivative
deviance
deviance_derivative
starting_mu
_mu_deviance_derivative
eta_mu_deviance
gradient_hessian
References
----------
https://en.wikipedia.org/wiki/Exponential_dispersion_model.
"""
@property
@abstractmethod
def lower_bound(self) -> float:
"""Get the lower bound of values for the EDM."""
pass
@property
@abstractmethod
def upper_bound(self) -> float:
"""Get the upper bound of values for the EDM."""
pass
@property
def include_lower_bound(self) -> bool:
"""Return whether ``lower_bound`` is allowed as a value of ``y``."""
pass
@property
def include_upper_bound(self) -> bool:
"""Return whether ``upper_bound`` is allowed as a value of ``y``."""
pass
def in_y_range(self, x) -> np.ndarray:
"""Return ``True`` if ``x`` is in the valid range of the EDM.
Parameters
----------
x : array-like, shape (n_samples,)
Target values.
Returns
-------
np.ndarray
"""
if self.include_lower_bound:
if self.include_upper_bound:
return np.logical_and(
np.greater_equal(x, self.lower_bound),
np.less_equal(x, self.upper_bound),
)
else:
return np.logical_and(
np.greater_equal(x, self.lower_bound), np.less(x, self.upper_bound)
)
else:
if self.include_upper_bound:
return np.logical_and(
np.greater(x, self.lower_bound), np.less_equal(x, self.upper_bound)
)
else:
return np.logical_and(
np.greater(x, self.lower_bound), np.less(x, self.upper_bound)
)
@abstractmethod
def unit_variance(self, mu):
r"""Compute the unit variance function.
The unit variance :math:`v(\mu)` determines the variance as a function
of the mean :math:`\mu` by
:math:`\mathrm{var}(y_i) = (\phi / s_i) \times v(\mu_i)`. It can
also be derived from the unit deviance :math:`d(y, \mu)` as
.. math::
v(\mu) = \frac{2}{\frac{\partial^2 d(y, \mu)}{\partial\mu^2}}\big|_{y=\mu}.
See also :func:`variance`.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
"""
pass
@abstractmethod
def unit_variance_derivative(self, mu):
r"""Compute the derivative of the unit variance with respect to ``mu``.
Return :math:`v'(\mu)`.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
"""
pass
def variance(self, mu: np.ndarray, dispersion=1, sample_weight=1) -> np.ndarray:
r"""Compute the variance function.
The variance of :math:`Y_i \sim \mathrm{EDM}(\mu_i, \phi / s_i)` is
:math:`\mathrm{var}(Y_i) = (\phi / s_i) * v(\mu_i)`, with unit variance
:math:`v(\mu)` and weights :math:`s_i`.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
dispersion : float, optional (default=1)
Dispersion parameter :math:`\phi`.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Weights or exposure to which variance is inverse proportional.
Returns
-------
array-like, shape (n_samples,)
"""
return self.unit_variance(mu) * dispersion / sample_weight
def variance_derivative(self, mu, dispersion=1, sample_weight=1):
r"""Compute the derivative of the variance with respect to ``mu``.
The derivative of the variance is equal to
:math:`(\phi / s_i) * v'(\mu_i)`, where :math:`v(\mu)` is the unit
variance and :math:`s_i` are weights.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
dispersion : float, optional (default=1)
Dispersion parameter :math:`\phi`.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Weights or exposure to which variance is inverse proportional.
Returns
-------
array-like, shape (n_samples,)
"""
return self.unit_variance_derivative(mu) * dispersion / sample_weight
@abstractmethod
def unit_deviance(self, y, mu):
r"""Compute the unit deviance.
In terms of the log likelihood :math:`L`, the unit deviance is
:math:`-2\phi\times [L(y, \mu, \phi) - L(y, y, \phi)].`
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
"""
pass
def unit_deviance_derivative(self, y, mu):
r"""Compute the derivative of the unit deviance with respect to ``mu``.
The derivative of the unit deviance is given by
:math:`-2 \times (y - \mu) / v(\mu)`, where :math:`v(\mu)` is the unit
variance.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
Returns
-------
array-like, shape (n_samples,)
"""
return -2 * (y - mu) / self.unit_variance(mu)
def deviance(self, y, mu, sample_weight=1):
r"""Compute the deviance.
The deviance is a weighted sum of the unit deviances,
:math:`\sum_i s_i \times d(y_i, \mu_i)`, where :math:`d(y, \mu)` is the
unit deviance and :math:`s` are weights. In terms of the log likelihood,
it is :math:`-2\phi \times [L(y, \mu, \phi / s) - L(y, y, \phi / s)]`.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Weights or exposure to which variance is inversely proportional.
Returns
-------
float
"""
if sample_weight is None:
return np.sum(self.unit_deviance(y, mu))
else:
return np.sum(self.unit_deviance(y, mu) * sample_weight)
def deviance_derivative(self, y, mu, sample_weight=1):
r"""Compute the derivative of the deviance with respect to ``mu``.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,) (default=1)
Weights or exposure to which variance is inverse proportional.
Returns
-------
array-like, shape (n_samples,)
"""
return sample_weight * self.unit_deviance_derivative(y, mu)
def _mu_deviance_derivative(
self,
coef: np.ndarray,
X,
y: np.ndarray,
sample_weight: np.ndarray,
link: Link,
offset: np.ndarray = None,
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute ``mu`` and the derivative of the deviance \
with respect to coefficients."""
lin_pred = _safe_lin_pred(X, coef, offset)
mu = link.inverse(lin_pred)
d1 = link.inverse_derivative(lin_pred)
temp = d1 * self.deviance_derivative(y, mu, sample_weight)
if coef.size == X.shape[1] + 1:
devp = np.concatenate(([temp.sum()], temp @ X))
else:
devp = temp @ X # same as X.T @ temp
return mu, devp
def eta_mu_deviance(
self,
link: Link,
factor: float,
cur_eta: np.ndarray,
X_dot_d: np.ndarray,
y: np.ndarray,
sample_weight: np.ndarray,
):
"""
Compute ``eta``, ``mu`` and the deviance.
Compute:
* the linear predictor, ``eta``, as ``cur_eta + factor * X_dot_d``;
* the link-function-transformed prediction, ``mu``;
* the deviance.
Returns
-------
numpy.ndarray, shape (X.shape[0],)
The linear predictor, ``eta``.
numpy.ndarray, shape (X.shape[0],)
The link-function-transformed prediction, ``mu``.
float
The deviance.
"""
# eta_out and mu_out are filled inside self._eta_mu_deviance,
# avoiding allocating new arrays for every line search loop
eta_out = np.empty_like(cur_eta)
mu_out = np.empty_like(cur_eta)
deviance = self._eta_mu_deviance(
link, factor, cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out
)
return eta_out, mu_out, deviance
def _eta_mu_deviance(
self,
link: Link,
factor: float,
cur_eta: np.ndarray,
X_dot_d: np.ndarray,
y: np.ndarray,
sample_weight: np.ndarray,
eta_out: np.ndarray,
mu_out: np.ndarray,
):
"""
Update ``eta`` and ``mu`` and compute the deviance.
This is a default implementation that should work for all valid
distributions and link functions. To implement a custom optimized
version for a specific distribution and link function, please override
this function in the subclass.
Returns
-------
float
"""
eta_out[:] = cur_eta + factor * X_dot_d
mu_out[:] = link.inverse(eta_out)
return self.deviance(y, mu_out, sample_weight=sample_weight)
def rowwise_gradient_hessian(
self,
link: Link,
coef: np.ndarray,
dispersion,
X: Union[MatrixBase, StandardizedMatrix],
y: np.ndarray,
sample_weight: np.ndarray,
eta: np.ndarray,
mu: np.ndarray,
offset: np.ndarray = None,
):
"""
Compute the gradient and negative Hessian of the log likelihood row-wise.
Returns
-------
numpy.ndarray, shape (X.shape[0],)
The gradient of the log likelihood, row-wise.
numpy.ndarray, shape (X.shape[0],)
The negative Hessian of the log likelihood, row-wise.
"""
gradient_rows = np.empty_like(mu)
hessian_rows = np.empty_like(mu)
self._rowwise_gradient_hessian(
link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
)
# To form the full Hessian matrix from the IRLS sample_weight:
# hessian_matrix = _safe_sandwich_dot(X, hessian_rows, intercept=intercept)
return gradient_rows, hessian_rows
def _rowwise_gradient_hessian(
self, link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
):
"""
Update ``gradient_rows`` and ``hessian_rows`` in place.
This is a default implementation that should work for all valid
distributions and link functions. To implement a custom optimized
version for a specific distribution and link function, please override
this function in the subclass.
"""
# FOR TWEEDIE: sigma_inv = weights / (mu ** p) during optimization bc phi = 1
sigma_inv = get_one_over_variance(self, link, mu, eta, 1.0, sample_weight)
d1 = link.inverse_derivative(eta) # = h'(eta)
# Alternatively:
# h'(eta) = h'(g(mu)) = 1/g'(mu), note that h is inverse of g
# d1 = 1./link.derivative(mu)
d1_sigma_inv = d1 * sigma_inv
gradient_rows[:] = d1_sigma_inv * (y - mu)
hessian_rows[:] = d1 * d1_sigma_inv
def _fisher_information(
self, link, X, y, mu, sample_weight, dispersion, fit_intercept
):
"""Compute the expected information matrix.
Parameters
----------
link : Link
A link function (i.e. an instance of :class:`~glum._link.Link`).
X : array-like
Training data.
y : array-like
Target values.
mu : array-like
Predicted mean.
sample_weight : array-like
Weights or exposure to which variance is inversely proportional.
dispersion : float
The dispersion parameter.
fit_intercept : bool
Whether the model has an intercept.
"""
W = (link.inverse_derivative(link.link(mu)) ** 2) * get_one_over_variance(
self, link, mu, link.inverse(mu), dispersion, sample_weight
)
return _safe_sandwich_dot(X, W, intercept=fit_intercept)
def _observed_information(
self, link, X, y, mu, sample_weight, dispersion, fit_intercept
):
"""Compute the observed information matrix.
Parameters
----------
X : array-like
Training data.
y : array-like
Target values.
mu : array-like
Predicted mean.
sample_weight : array-like
Weights or exposure to which variance is inversely proportional.
dispersion : float
The dispersion parameter.
fit_intercept : bool
Whether the model has an intercept.
"""
linpred = link.link(mu)
W = (
-link.inverse_derivative2(linpred) * (y - mu)
+ (link.inverse_derivative(linpred) ** 2)
* (
1
+ (y - mu) * self.unit_variance_derivative(mu) / self.unit_variance(mu)
)
) * get_one_over_variance(self, link, mu, linpred, dispersion, sample_weight)
return _safe_sandwich_dot(X, W, intercept=fit_intercept)
def _score_matrix(self, link, X, y, mu, sample_weight, dispersion, fit_intercept):
"""Compute the score.
Parameters
----------
X : array-like
Training data.
y : array-like
Target values.
mu : array-like
Predicted mean.
sample_weight : array-like
Weights or exposure to which variance is inversely proportional.
dispersion : float
The dispersion parameter.
fit_intercept : bool
Whether the model has an intercept.
"""
linpred = link.link(mu)
W = (
get_one_over_variance(self, link, mu, linpred, dispersion, sample_weight)
* link.inverse_derivative(linpred)
* (y - mu)
).reshape(-1, 1)
if fit_intercept:
if sparse.issparse(X):
return sparse.hstack((W, X.multiply(W)))
else:
return np.hstack((W, np.multiply(X, W)))
else:
if sparse.issparse(X):
return X.multiply(W)
else:
return np.multiply(X, W)
def dispersion(self, y, mu, sample_weight=None, ddof=1, method="pearson") -> float:
r"""Estimate the dispersion parameter :math:`\phi`.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Weights or exposure to which variance is inversely proportional.
ddof : int, optional (default=1)
Degrees of freedom consumed by the model for ``mu``.
method = {'pearson', 'deviance'}, optional (default='pearson')
Whether to base the estimate on the Pearson residuals or the deviance.
Returns
-------
float
"""
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
if method == "pearson":
pearson_residuals = ((y - mu) ** 2) / self.unit_variance(mu)
if sample_weight is None:
numerator = pearson_residuals.sum()
else:
numerator = np.dot(pearson_residuals, sample_weight)
elif method == "deviance":
numerator = self.deviance(y, mu, sample_weight)
else:
raise NotImplementedError(f"Method {method} hasn't been implemented.")
if sample_weight is None:
return numerator / (len(y) - ddof)
else:
return numerator / (sample_weight.sum() - ddof)
class TweedieDistribution(ExponentialDispersionModel):
r"""A class for the Tweedie distribution.
A Tweedie distribution with mean :math:`\mu = \mathrm{E}(Y)` is uniquely
defined by its mean-variance relationship
:math:`\mathrm{var}(Y) \propto \mu^{\mathrm{power}}`.
Special cases are:
====== ================
Power Distribution
====== ================
0 Normal
1 Poisson
(1, 2) Compound Poisson
2 Gamma
3 Inverse Gaussian
====== ================
Parameters
----------
power : float, optional (default=0)
The variance power of the `unit_variance`
:math:`v(\mu) = \mu^{\mathrm{power}}`. For
:math:`0 < \mathrm{power} < 1`, no distribution exists.
"""
upper_bound = np.Inf
include_upper_bound = False
def __init__(self, power=0):
# validate power and set _upper_bound, _include_upper_bound attrs
self.power = power
@property
def lower_bound(self) -> Union[float, int]:
"""Return the lowest value of ``y`` allowed."""
if self.power <= 0:
return -np.Inf
if self.power >= 1:
return 0
raise ValueError
@property
def include_lower_bound(self) -> bool:
"""Return whether ``lower_bound`` is allowed as a value of ``y``."""
if self.power <= 0:
return False
if (self.power >= 1) and (self.power < 2):
return True
if self.power >= 2:
return False
raise ValueError
@property
def power(self) -> float:
"""Return the Tweedie power parameter."""
return self._power
@power.setter
def power(self, power):
if not isinstance(power, (int, float)):
raise TypeError(f"power must be an int or float, input was {power}")
if (power > 0) and (power < 1):
raise ValueError("For 0<power<1, no distribution exists.")
# Prevents upcasting when working with 32-bit data
self._power = power if isinstance(power, int) else np.float32(power)
def unit_variance(self, mu: np.ndarray) -> np.ndarray:
"""Compute the unit variance of a Tweedie distribution ``v(mu) = mu^power``.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
Returns
-------
numpy.ndarray, shape (n_samples,)
"""
p = self.power # noqa: F841
return numexpr.evaluate("mu ** p")
def unit_variance_derivative(self, mu: np.ndarray) -> np.ndarray:
r"""Compute the derivative of the unit variance of a Tweedie distribution.
Equation: :math:`v(\mu) = p \times \mu^{(p-1)}`.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
Returns
-------
numpy.ndarray, shape (n_samples,)
"""
p = self.power # noqa: F841
return numexpr.evaluate("p * mu ** (p - 1)")
def deviance(self, y, mu, sample_weight=None) -> float:
"""Compute the deviance.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Sample weights.
"""
p = self.power
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
sample_weight = np.ones_like(y) if sample_weight is None else sample_weight
# NOTE: the dispersion parameter is only necessary to convey
# type information on account of a bug in Cython
if p == 0:
return normal_deviance(y, sample_weight, mu, dispersion=1.0)
if p == 1:
return poisson_deviance(y, sample_weight, mu, dispersion=1.0)
elif p == 2:
return gamma_deviance(y, sample_weight, mu, dispersion=1.0)
else:
return tweedie_deviance(y, sample_weight, mu, p=float(p))
def unit_deviance(self, y, mu):
"""Get the deviance of each observation."""
p = self.power
if p == 0: # Normal distribution
return (y - mu) ** 2
if p == 1: # Poisson distribution
return 2 * (special.xlogy(y, y / mu) - y + mu)
elif p == 2: # Gamma distribution
return 2 * (np.log(mu / y) + y / mu - 1)
else:
mu1mp = mu ** (1 - p)
return 2 * (
(np.maximum(y, 0) ** (2 - p)) / ((1 - p) * (2 - p))
- y * mu1mp / (1 - p)
+ mu * mu1mp / (2 - p)
)
def _rowwise_gradient_hessian(
self, link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
):
f = None
if self.power == 0 and isinstance(link, IdentityLink):
f = normal_identity_rowwise_gradient_hessian
elif self.power == 1 and isinstance(link, LogLink):
f = poisson_log_rowwise_gradient_hessian
elif self.power == 2 and isinstance(link, LogLink):
f = gamma_log_rowwise_gradient_hessian
elif 1 < self.power < 2 and isinstance(link, LogLink):
f = partial(tweedie_log_rowwise_gradient_hessian, p=self.power)
if f is not None:
return f(y, sample_weight, eta, mu, gradient_rows, hessian_rows)
return super()._rowwise_gradient_hessian(
link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
)
def _eta_mu_deviance(
self,
link: Link,
factor: float,
cur_eta: np.ndarray,
X_dot_d: np.ndarray,
y: np.ndarray,
sample_weight: np.ndarray,
eta_out: np.ndarray,
mu_out: np.ndarray,
):
f = None
if self.power == 0 and isinstance(link, IdentityLink):
f = normal_identity_eta_mu_deviance
elif self.power == 1 and isinstance(link, LogLink):
f = poisson_log_eta_mu_deviance
elif self.power == 2 and isinstance(link, LogLink):
f = gamma_log_eta_mu_deviance
elif 1 < self.power < 2 and isinstance(link, LogLink):
f = partial(tweedie_log_eta_mu_deviance, p=self.power)
if f is not None:
return f(cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out, factor)
return super()._eta_mu_deviance(
link, factor, cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out
)
def log_likelihood(self, y, mu, sample_weight=None, dispersion=None) -> float:
r"""Compute the log likelihood.
For ``1 < power < 2``, we use the series approximation by Dunn and Smyth
(2005) to compute the normalization term.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Sample weights.
dispersion : float, optional (default=None)
Dispersion parameter :math:`\phi`. Estimated if ``None``.
"""
p = self.power
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
sample_weight = np.ones_like(y) if sample_weight is None else sample_weight
if (p != 1) and (dispersion is None):
dispersion = self.dispersion(y, mu, sample_weight)
if p == 0:
return normal_log_likelihood(y, sample_weight, mu, float(dispersion))
if p == 1:
# NOTE: the dispersion parameter is only necessary to convey
# type information on account of a bug in Cython
return poisson_log_likelihood(y, sample_weight, mu, 1.0)
elif p == 2:
return gamma_log_likelihood(y, sample_weight, mu, float(dispersion))
elif p < 2:
return tweedie_log_likelihood(
y, sample_weight, mu, float(p), float(dispersion)
)
else:
raise NotImplementedError
def dispersion(self, y, mu, sample_weight=None, ddof=1, method="pearson") -> float:
r"""Estimate the dispersion parameter :math:`\phi`.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights or exposure to which variance is inversely proportional.
ddof : int, optional (default=1)
Degrees of freedom consumed by the model for ``mu``.
method = {'pearson', 'deviance'}, optional (default='pearson')
Whether to base the estimate on the Pearson residuals or the deviance.
Returns
-------
float
"""
p = self.power # noqa: F841
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
if method == "pearson":
formula = "((y - mu) ** 2) / (mu ** p)"
if sample_weight is None:
return numexpr.evaluate(formula).sum() / (len(y) - ddof)
else:
formula = f"sample_weight * {formula}"
return numexpr.evaluate(formula).sum() / (sample_weight.sum() - ddof)
return super().dispersion(
y, mu, sample_weight=sample_weight, ddof=ddof, method=method
)
class NormalDistribution(TweedieDistribution):
"""Class for the Normal (a.k.a. Gaussian) distribution."""
def __init__(self):
super().__init__(power=0)
class PoissonDistribution(TweedieDistribution):
"""Class for the scaled Poisson distribution."""
def __init__(self):
super().__init__(power=1)
class GammaDistribution(TweedieDistribution):
"""Class for the Gamma distribution."""
def __init__(self):
super().__init__(power=2)
class InverseGaussianDistribution(TweedieDistribution):
"""Class for the scaled Inverse Gaussian distribution."""
def __init__(self):
super().__init__(power=3)
class GeneralizedHyperbolicSecant(ExponentialDispersionModel):
"""A class for the Generalized Hyperbolic Secant (GHS) distribution.
The GHS distribution is for targets ``y`` in ``(-∞, +∞)``.
"""
lower_bound = -np.Inf
upper_bound = np.Inf
include_lower_bound = False
include_upper_bound = False
def unit_variance(self, mu: np.ndarray) -> np.ndarray:
"""Get the unit-level expected variance.
See superclass documentation.
Parameters
----------
mu : array-like or float
Returns
-------
array-like
"""
return 1 + mu**2
def unit_variance_derivative(self, mu: np.ndarray) -> np.ndarray:
"""Get the derivative of the unit variance.
See superclass documentation.
Parameters
----------
mu : array-like or float
Returns
-------
array-like
"""
return 2 * mu
def unit_deviance(self, y: np.ndarray, mu: np.ndarray) -> np.ndarray:
"""Get the unit-level deviance.
See superclass documentation.
Parameters
----------
y : array-like
mu : array-like
Returns
-------
array-like
"""
return 2 * y * (np.arctan(y) - np.arctan(mu)) + np.log(
(1 + mu**2) / (1 + y**2)
)
class BinomialDistribution(ExponentialDispersionModel):
"""A class for the Binomial distribution.
The Binomial distribution is for targets ``y`` in ``[0, 1]``.
"""
lower_bound = 0
upper_bound = 1
include_lower_bound = True
include_upper_bound = True
def __init__(self):
return
def unit_variance(self, mu: np.ndarray) -> np.ndarray:
"""Get the unit-level expected variance.
See superclass documentation.
Parameters
----------
mu : array-like
Returns
-------
array-like
"""
return mu * (1 - mu)
def unit_variance_derivative(self, mu):
"""Get the derivative of the unit variance.
See superclass documentation.
Parameters
----------
mu : array-like or float
Returns
-------
array-like
"""
return 1 - 2 * mu
def unit_deviance(self, y: np.ndarray, mu: np.ndarray) -> np.ndarray:
"""Get the unit-level deviance.
See superclass documentation.
Parameters
----------
y : array-like
mu : array-like
Returns
-------
array-like
"""
# see Wooldridge and Papke (1996) for the fractional case
return -2 * (special.xlogy(y, mu) + special.xlogy(1 - y, 1 - mu))
def _rowwise_gradient_hessian(
self, link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
):
if isinstance(link, LogitLink):
return binomial_logit_rowwise_gradient_hessian(
y, sample_weight, eta, mu, gradient_rows, hessian_rows
)
return super()._rowwise_gradient_hessian(
link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
)
def _eta_mu_deviance(
self,
link: Link,
factor: float,
cur_eta: np.ndarray,
X_dot_d: np.ndarray,
y: np.ndarray,
sample_weight: np.ndarray,
eta_out: np.ndarray,
mu_out: np.ndarray,
):
if isinstance(link, LogitLink):
return binomial_logit_eta_mu_deviance(
cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out, factor
)
return super()._eta_mu_deviance(
link, factor, cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out
)
def log_likelihood(self, y, mu, sample_weight=None, dispersion=1) -> float:
"""Compute the log likelihood.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Sample weights.
dispersion : float, optional (default=1)
Ignored.
"""
ll = special.xlogy(y, mu) + special.xlogy(1 - y, 1 - mu)
return np.sum(ll) if sample_weight is None else np.dot(ll, sample_weight)
def dispersion(self, y, mu, sample_weight=None, ddof=1, method="pearson") -> float:
r"""Estimate the dispersion parameter :math:`\phi`.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights or exposure to which variance is inversely proportional.
ddof : int, optional (default=1)
Degrees of freedom consumed by the model for ``mu``.
method = {'pearson', 'deviance'}, optional (default='pearson')
Whether to base the estimate on the Pearson residuals or the deviance.
Returns
-------
float
"""
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
if method == "pearson":
formula = "((y - mu) ** 2) / (mu * (1 - mu))"
if sample_weight is None:
return numexpr.evaluate(formula).sum() / (len(y) - ddof)
else:
formula = f"sample_weight * {formula}"
return numexpr.evaluate(formula).sum() / (sample_weight.sum() - ddof)
return super().dispersion(
y, mu, sample_weight=sample_weight, ddof=ddof, method=method
)
def guess_intercept(
y: np.ndarray,
sample_weight: np.ndarray,
link: Link,
distribution: ExponentialDispersionModel,
eta: Union[np.ndarray, float] = None,
):
"""
Say we want to find the scalar `b` that minimizes ``LL(eta + b)``, with \
``eta`` fixed.
An exact solution exists for Tweedie distributions with a log link and for
the normal distribution with identity link. An exact solution also exists
for the case of logit with no offset.
If the distribution and corresponding link are something else, we use the
Tweedie or normal solution, depending on the link function.
"""
avg_y = np.average(y, weights=sample_weight)
if isinstance(link, IdentityLink):
# This is only correct for normal. For other distributions, answer is unknown,
# but assume that we want sum(y) = sum(mu)
if eta is None:
return avg_y
avg_eta = eta if np.isscalar(eta) else np.average(eta, weights=sample_weight)
return avg_y - avg_eta
elif isinstance(link, LogLink):
# This is only correct for Tweedie
log_avg_y = np.log(avg_y)
assert np.isfinite(log_avg_y).all()
if eta is None:
return log_avg_y
mu = np.exp(eta)
if isinstance(distribution, TweedieDistribution):
p = distribution.power
else:
p = 1 # Like Poisson
if np.isscalar(mu):
first = np.log(y.dot(sample_weight) * mu ** (1 - p))
second = np.log(sample_weight.sum() * mu ** (2 - p))
else:
first = np.log((y * mu ** (1 - p)).dot(sample_weight))
second = np.log((mu ** (2 - p)).dot(sample_weight))
return first - second
elif isinstance(link, LogitLink):
log_odds = np.log(avg_y) - np.log(np.average(1 - y, weights=sample_weight))
if eta is None:
return log_odds
avg_eta = eta if np.isscalar(eta) else np.average(eta, weights=sample_weight)
return log_odds - avg_eta
else:
return link.link(y.dot(sample_weight))
def get_one_over_variance(
distribution: ExponentialDispersionModel,
link: Link,
mu: np.ndarray,
eta: np.ndarray,
dispersion,
sample_weight: np.ndarray,
):
"""
Get one over the variance.
For Tweedie: ``sigma_inv = sample_weight / (mu ** p)`` during optimization,
because ``phi = 1``.
For Binomial with Logit link: Simplifies to
``variance = phi / ( sample_weight * (exp(eta) + 2 + exp(-eta)))``.
More numerically accurate.
"""
if isinstance(distribution, BinomialDistribution) and isinstance(link, LogitLink):
max_float_for_exp = np.log(np.finfo(eta.dtype).max / 10)
if np.any(np.abs(eta) > max_float_for_exp):
eta = np.clip(eta, -max_float_for_exp, max_float_for_exp) # type: ignore
return sample_weight * (np.exp(eta) + 2 + np.exp(-eta)) / dispersion
return 1.0 / distribution.variance(
mu, dispersion=dispersion, sample_weight=sample_weight
)
def _as_float_arrays(*args):
"""Convert to a float array, passing ``None`` through, and broadcast."""
never_broadcast = {} # type: ignore
maybe_broadcast = {}
always_broadcast = {}
for ix, arg in enumerate(args):
if isinstance(arg, (int, float)):
maybe_broadcast[ix] = np.array([arg], dtype="float")
elif arg is None:
never_broadcast[ix] = None
else:
always_broadcast[ix] = np.asanyarray(arg, dtype="float")
if always_broadcast and maybe_broadcast:
to_broadcast = {**always_broadcast, **maybe_broadcast}
_broadcast = np.broadcast_arrays(*to_broadcast.values())
broadcast = dict(zip(to_broadcast.keys(), _broadcast))
elif always_broadcast:
_broadcast = np.broadcast_arrays(*always_broadcast.values())
broadcast = dict(zip(always_broadcast.keys(), _broadcast))
else:
broadcast = maybe_broadcast # possibly `{}`
out = {**never_broadcast, **broadcast}
return [out[ix] for ix in range(len(args))]
|
32370
|
from app.factory import create_app, celery_app
app = create_app(config_name="DEVELOPMENT")
app.app_context().push()
if __name__ == "__main__":
app.run()
|
32392
|
import json
def get_qtypes(dataset_name, part):
"""Return list of question-types for a particular TriviaQA-CP dataset"""
if dataset_name not in {"location", "person"}:
raise ValueError("Unknown dataset %s" % dataset_name)
if part not in {"train", "dev", "test"}:
raise ValueError("Unknown part %s" % part)
is_biased = part in {"train", "dev"}
is_location = dataset_name == "location"
if is_biased and is_location:
return ["person", "other"]
elif not is_biased and is_location:
return ["location"]
elif is_biased and not is_location:
return ["location", "other"]
elif not is_biased and not is_location:
return ["person"]
else:
raise RuntimeError()
def load_triviaqa_cp(filename, dataset_name, part, expected_version=None):
"""Load a TriviaQA-CP dataset
:param filename: The TriviaQA-CP train or dev json file, must be the train file if
if `part`=="train" and the dev file otherwise
:param dataset_name: dataset to load, must be in ["person", "location"]
:param part: which part, must be in ["test", "dev", "train"[
:param expected_version: Optional version to require the data to match
:return: List of question in dictionary form
"""
target_qtypes = get_qtypes(dataset_name, part)
with open(filename, "r") as f:
data = json.load(f)
if expected_version is not None:
if expected_version != data["Version"]:
raise ValueError("Expected version %s, but data was version %s" % (
expected_version, data["Version"]))
if part == "train":
if data["Split"] != "Train":
raise ValueError("Expected train file, but split is %s" % data["Split"])
else:
if data["Split"] != "Dev":
raise ValueError("Expected dev file, but split is %s" % data["Split"])
out = []
for question in data["Data"]:
if question["QuestionType"] in target_qtypes:
out.append(question)
return out
|
32395
|
from cssdbpy import Connection
from time import time
import md5
if __name__ == '__main__':
conn = Connection('127.0.0.1', 8888)
for i in xrange(0, 10000):
md5word = md5.new('word{}'.format(i)).hexdigest()
create = conn.execute('hset','words', md5word, int(time()))
value = conn.execute('hget','words', md5word)
exists = conn.execute('hexists','words', md5word)
delete = conn.execute('hdel','words', md5word)
print md5word, value, create, exists, delete
print conn.execute('hscan', 'words', '', '', 100)
conn.execute('hclear','words')
|
32397
|
import datetime
import pytz
from tws_async import *
stocks = [
Stock('TSLA'),
Stock('AAPL'),
Stock('GOOG'),
Stock('INTC', primaryExchange='NASDAQ')
]
forexs = [
Forex('EURUSD'),
Forex('GBPUSD'),
Forex('USDJPY')
]
endDate = datetime.date.today()
startDate = endDate - datetime.timedelta(days=7)
histReqs = []
for date in util.dateRange(startDate, endDate):
histReqs += [HistRequest(stock, date) for stock in stocks]
histReqs += [HistRequest(forex, date, whatToShow='MIDPOINT',
durationStr='30 D', barSizeSetting='1 day') for forex in forexs]
timezone = datetime.timezone.utc
# timezone = pytz.timezone('Europe/Amsterdam')
# timezone = pytz.timezone('US/Eastern')
util.logToConsole()
tws = HistRequester()
tws.connect('127.0.0.1', 7497, clientId=1)
task = tws.download(histReqs, rootDir='data', timezone=timezone)
tws.run(task)
|
32403
|
import numpy as np
def mean_or_nan(xs):
"""Return its mean a non-empty sequence, numpy.nan for a empty one."""
return np.mean(xs) if xs else np.nan
|
32414
|
import pytest
from playwright.sync_api import Page
from pages.main_page.main_page import MainPage
from test.test_base import *
import logging
import re
logger = logging.getLogger("test")
@pytest.mark.only_browser("chromium")
def test_find_element_list(page: Page):
main_page = MainPage(base_url, page)
main_page.delete_cookies()
main_page.open()
# Wait articles and page to be loaded
main_page.loader().should_be_visible()
main_page.loader().should_be_hidden()
assert main_page.register_button().is_visible()
pattern = re.compile(".*")
# Check articles
assert main_page.articles().size() == 10
assert main_page.articles().get(1).is_visible()
assert pattern.match(main_page.articles().get(1).title().inner_text())
assert pattern.match(main_page.articles().get(1).body().inner_text())
logger.info(main_page.articles().get(2).title().inner_text())
# Check nav panel
assert main_page.nav_bar().is_visible()
assert main_page.nav_bar().login_button().is_visible()
logger.info(main_page.nav_bar().login_button().inner_text())
logger.info(main_page.nav_bar().register_button().inner_text())
# articles = page.querySelectorAll(".article-preview")
# assert len(articles) == 10
# texts = page.evalOnSelectorAll(".article-preview h1", '''
# (elems, min) => {
# return elems.map(function(el) {
# return el.textContent //.toUpperCase()
# }); //.join(", ");
# }''')
# assert len(texts) == 10
# assert not texts == []
# assert articles[0].querySelector("h1").innerText() == "Python Playwright Demo"
# assert articles[0].querySelector("p").innerText() == "Playwright Demo"
|
32444
|
import sys
import os
import timeit
# use local python package rather than the system install
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../python"))
from bitboost import BitBoostRegressor
import numpy as np
import sklearn.metrics
nfeatures = 5
nexamples = 10000
data = np.random.choice(np.array([0.0, 1.0, 2.0], dtype=BitBoostRegressor.numt),
size=(nexamples * 2, nfeatures))
target = (1.22 * (data[:, 0] > 1.0)
+ 0.65 * (data[:, 1] > 1.0)
+ 0.94 * (data[:, 2] != 2.0)
+ 0.13 * (data[:, 3] == 1.0)).astype(BitBoostRegressor.numt)
dtrain, ytrain = data[0:nexamples, :], target[0:nexamples]
dtest, ytest = data[nexamples:, :], target[nexamples:]
bit = BitBoostRegressor()
bit.objective = "l2"
bit.discr_nbits = 4
bit.max_tree_depth = 5
bit.learning_rate = 0.5
bit.niterations = 50
bit.categorical_features = list(range(nfeatures))
bit.fit(dtrain, ytrain)
train_pred = bit.predict(dtrain)
test_pred = bit.predict(dtest)
train_acc = sklearn.metrics.mean_absolute_error(ytrain, train_pred)
test_acc = sklearn.metrics.mean_absolute_error(ytest, test_pred)
print(f"bit train accuracy: {train_acc}")
print(f"bit test accuracy: {test_acc}")
|
32446
|
import unittest
import os
import logging
import time
import re
import splunklib.client as client
import splunklib.results as results
from splunklib.binding import HTTPError
from . import dltk_api
from . import splunk_api
from . import dltk_environment
level_prog = re.compile(r'level=\"([^\"]*)\"')
msg_prog = re.compile(r'msg=\"((?:\n|.)*)\"')
def run_job(algorithm_name):
environment_name = dltk_environment.get_name()
# raise Exception("| savedsearch job:deploy:%s:%s | %s" % (
# algorithm_name,
# environment_name,
# 'rex field=_raw "level=\\"(?<level>[^\\"]*)\\", msg=\\"(?<msg>[^[\\"|\\\\"]*)\\"" | table level msg',
# ))
for event in splunk_api.search("| savedsearch job:deploy:%s:%s | %s" % (
algorithm_name,
environment_name,
#'rex field=_raw "level=\\"(?<level>[^\\"]*)\\", msg=\\"(?<msg>.*)\\"" | table _raw level msg',
#'rex field=_raw "level=\\"(?<level>[^\\"]*)\\", msg=\\"(?<msg>(?:\\n|.)*)\\"" | table _raw level msg',
'table _raw',
)):
raw = event["_raw"]
if "level" not in event:
m = level_prog.search(raw)
if m:
event["level"] = m.group(1)
if "msg" not in event:
m = msg_prog.search(raw)
if m:
event["msg"] = m.group(1)
if "level" in event:
level = event["level"]
else:
#logging.error("missing 'level' field in deploy result: %s" % (event))
raise Exception("missing 'level' field in deploy result: %s" % raw)
# continue
msg = event["msg"]
if level == "DEBUG":
log = logging.debug
elif level == "WARNING":
log = logging.warning
elif level == "ERROR":
log = logging.error
elif level == "INFO":
log = logging.info
else:
log = logging.warning
msg = "UNEXPECTED LEVEL (%s): %s" % (level, msg)
log(" %s" % msg)
def list_deployments(algorithm_name):
return dltk_api.call(
"GET",
"deployments",
data={
"algorithm": algorithm_name,
}
)
def get_deployment(algorithm_name, environment_name, raise_if_not_exists=True):
deployments = dltk_api.call(
"GET",
"deployments",
data={
"algorithm": algorithm_name,
"environment": environment_name,
}
)
if not len(deployments):
if raise_if_not_exists:
raise Exception("could not find deployment")
return None
return deployments[0]
def deploy(algorithm_name, params={}):
undeploy(algorithm_name)
splunk = splunk_api.connect()
environment_name = dltk_environment.get_name()
dltk_api.call("POST", "deployments", data={
**{
"algorithm": algorithm_name,
"environment": environment_name,
"enable_schedule": False,
},
**params,
}, return_entries=False)
try:
while True:
deployment = get_deployment(algorithm_name, environment_name, raise_if_not_exists=False)
if deployment:
deployment = get_deployment(algorithm_name, environment_name)
status = deployment["status"]
if status == "deploying":
logging.info("still deploying...")
run_job(algorithm_name)
continue
if status == "deployed":
break
status_message = deployment["status_message"]
raise Exception("unexpected deployment status: %s: %s" % (status, status_message))
logging.info("successfully deployed algo \"%s\"" % algorithm_name)
except:
logging.warning("error deploying '%s' to '%s' -> undeploying ..." % (algorithm_name, environment_name))
# while True:
# import time
# time.sleep(10)
undeploy(algorithm_name)
logging.warning("finished undeploying")
raise
def undeploy(algorithm_name):
splunk = splunk_api.connect()
environment_name = dltk_environment.get_name()
while True:
try:
dltk_api.call("DELETE", "deployments", data={
"algorithm": algorithm_name,
"environment": environment_name,
"enable_schedule": False,
}, return_entries=False)
except HTTPError as e:
logging.error("error calling API: %s" % e)
if e.status == 404:
break
raise
run_job(algorithm_name)
|
32490
|
from factory import Faker
from .network_node import NetworkNodeFactory
from ..constants.network import ACCOUNT_FILE_HASH_LENGTH, BLOCK_IDENTIFIER_LENGTH, MAX_POINT_VALUE, MIN_POINT_VALUE
from ..models.network_validator import NetworkValidator
class NetworkValidatorFactory(NetworkNodeFactory):
daily_confirmation_rate = Faker('pyint', max_value=MAX_POINT_VALUE, min_value=MIN_POINT_VALUE)
root_account_file = Faker('url')
root_account_file_hash = Faker('text', max_nb_chars=ACCOUNT_FILE_HASH_LENGTH)
seed_block_identifier = Faker('text', max_nb_chars=BLOCK_IDENTIFIER_LENGTH)
class Meta:
model = NetworkValidator
abstract = True
|
32491
|
from menu.models import Menu
from products.models import Product, Category
def get_dashboard_data_summary():
cardapios = Menu.objects.all()
produtos = Product.objects.all()
categorias = Category.objects.all()
return {'total_cardapios': len(cardapios),
'total_produtos': len(produtos),
'total_categorias': len(categorias)}
|
32548
|
import torch
from torch.nn import Module, Parameter
from torch.autograd import Function
class Forward_Warp_Python:
@staticmethod
def forward(im0, flow, interpolation_mode):
im1 = torch.zeros_like(im0)
B = im0.shape[0]
H = im0.shape[2]
W = im0.shape[3]
if interpolation_mode == 0:
for b in range(B):
for h in range(H):
for w in range(W):
x = w + flow[b, h, w, 0]
y = h + flow[b, h, w, 1]
nw = (int(torch.floor(x)), int(torch.floor(y)))
ne = (nw[0]+1, nw[1])
sw = (nw[0], nw[1]+1)
se = (nw[0]+1, nw[1]+1)
p = im0[b, :, h, w]
if nw[0] >= 0 and se[0] < W and nw[1] >= 0 and se[1] < H:
nw_k = (se[0]-x)*(se[1]-y)
ne_k = (x-sw[0])*(sw[1]-y)
sw_k = (ne[0]-x)*(y-ne[1])
se_k = (x-nw[0])*(y-nw[1])
im1[b, :, nw[1], nw[0]] += nw_k*p
im1[b, :, ne[1], ne[0]] += ne_k*p
im1[b, :, sw[1], sw[0]] += sw_k*p
im1[b, :, se[1], se[0]] += se_k*p
else:
round_flow = torch.round(flow)
for b in range(B):
for h in range(H):
for w in range(W):
x = w + int(round_flow[b, h, w, 0])
y = h + int(round_flow[b, h, w, 1])
if x >= 0 and x < W and y >= 0 and y < H:
im1[b, :, y, x] = im0[b, :, h, w]
return im1
@staticmethod
def backward(grad_output, im0, flow, interpolation_mode):
B = grad_output.shape[0]
C = grad_output.shape[1]
H = grad_output.shape[2]
W = grad_output.shape[3]
im0_grad = torch.zeros_like(grad_output)
flow_grad = torch.empty([B, H, W, 2])
if interpolation_mode == 0:
for b in range(B):
for h in range(H):
for w in range(W):
x = w + flow[b, h, w, 0]
y = h + flow[b, h, w, 1]
x_f = int(torch.floor(x))
y_f = int(torch.floor(y))
x_c = x_f+1
y_c = y_f+1
nw = (x_f, y_f)
ne = (x_c, y_f)
sw = (x_f, y_c)
se = (x_c, y_c)
p = im0[b, :, h, w]
if nw[0] >= 0 and se[0] < W and nw[1] >= 0 and se[1] < H:
nw_k = (se[0]-x)*(se[1]-y)
ne_k = (x-sw[0])*(sw[1]-y)
sw_k = (ne[0]-x)*(y-ne[1])
se_k = (x-nw[0])*(y-nw[1])
nw_grad = grad_output[b, :, nw[1], nw[0]]
ne_grad = grad_output[b, :, ne[1], ne[0]]
sw_grad = grad_output[b, :, sw[1], sw[0]]
se_grad = grad_output[b, :, se[1], se[0]]
im0_grad[b, :, h, w] += nw_k*nw_grad
im0_grad[b, :, h, w] += ne_k*ne_grad
im0_grad[b, :, h, w] += sw_k*sw_grad
im0_grad[b, :, h, w] += se_k*se_grad
flow_grad_x = torch.zeros(C)
flow_grad_y = torch.zeros(C)
flow_grad_x -= (y_c-y)*p*nw_grad
flow_grad_y -= (x_c-x)*p*nw_grad
flow_grad_x += (y_c-y)*p*ne_grad
flow_grad_y -= (x-x_f)*p*ne_grad
flow_grad_x -= (y-y_f)*p*sw_grad
flow_grad_y += (x_c-x)*p*sw_grad
flow_grad_x += (y-y_f)*p*se_grad
flow_grad_y += (x-x_f)*p*se_grad
flow_grad[b, h, w, 0] = torch.sum(flow_grad_x)
flow_grad[b, h, w, 1] = torch.sum(flow_grad_y)
else:
round_flow = torch.round(flow)
for b in range(B):
for h in range(H):
for w in range(W):
x = w + int(round_flow[b, h, w, 0])
y = h + int(round_flow[b, h, w, 1])
if x >= 0 and x < W and y >= 0 and y < H:
im0_grad[b, :, h, w] = grad_output[b, :, y, x]
return im0_grad, flow_grad
|
32549
|
import itertools
import Partitioning
class Algorithm( object ):
def __init__( self, linv, variant, init, repart, contwith, before, after, updates ):
self.linv = linv
self.variant = variant
if init:
#assert( len(init) == 1 )
self.init = init[0]
else:
self.init = None
self.repart = repart
self.contwith = contwith
self.before = before
self.after = after
self.updates = updates
# To be filled up for code generation
self.name = None
self.partition = None
self.partition_size = None
self.guard = None
self.repartition = None
self.repartition_size = None
self.basic_repart = None
self.cont_with = None
def prepare_for_code_generation( self ):
self.set_name()
self.set_partition()
self.set_partition_size()
self.set_guard()
self.set_repartition()
self.set_repartition_size()
self.set_basic_repart()
self.set_cont_with()
def set_name( self ):
self.name = "%s_blk_var%d" % (self.linv.operation.name, self.variant)
def set_partition( self ):
self.partition = dict()
traversals = self.linv.traversals[0][0]
#for op in self.linv.operation.operands: # [FIX] linv_operands?
for op in self.linv.linv_operands: # [FIX] linv_operands?
#part_size = self.linv.pme.part_shape[ op.get_name() ]
part_size = self.linv.linv_operands_part_shape[ op.get_name() ]
#part_flat = list(itertools.chain( *self.linv.pme.partitionings[ op.get_name() ] ))
part_flat = list(itertools.chain( *self.linv.linv_operands_basic_part[ op.get_name() ] ))
trav = traversals[op.get_name()]
if part_size == (1, 1):
continue
elif part_size == (1, 2):
if trav == (0, 1):
part_quad = "L"
else: # (0, -1)
part_quad = "R"
elif part_size == (2, 1):
if trav == (1, 0):
part_quad = "T"
else: # (-1, 0)
part_quad = "B"
elif part_size == (2, 2):
if trav == (1, 1):
part_quad = "TL"
elif trav == (1, -1):
part_quad = "TR"
elif trav == (-1, 1):
part_quad = "BL"
else: #(-1, -1):
part_quad = "BR"
else:
raise Exception
self.partition[ op.get_name() ] = (part_size, part_flat, part_quad)
def set_partition_size( self ):
self.partition_size = dict()
traversals = self.linv.traversals[0][0]
#for op in self.linv.operation.operands:
for op in self.linv.linv_operands:
name = op.get_name()
traversal = traversals[op.get_name()]
if traversal == (0, 0):
continue
elif traversal in ( (0, 1), (0, -1) ): # L|R (the specific quadrant can be retrieved from self.partition)
self.partition_size[ name ] = ( op.size[0], 0 )
elif traversal in ( (1, 0), (-1, 0) ): # T/B
self.partition_size[ name ] = ( 0, op.size[1] )
elif traversal in ( (1, 1), (1, -1), (-1, 1), (-1, -1) ): # 2x2
self.partition_size[ name ] = ( 0, 0 )
else:
print( name, traversal )
raise Exception
def set_guard( self ):
self.guard = []
traversals = self.linv.traversals[0][0]
#guard_dims = [bd[0] for bd in self.linv.linv_bound_dimensions[1:]]
guard_dims = []
#for bd in self.linv.linv_bound_dimensions[1:]:
for bd in self.linv.operation.bound_dimensions[1:]:
for d in bd:
op_name, dim = d.split("_")
op = [ o for o in self.linv.operation.operands if o.name == op_name ][0]
if op.st_info[1] != op:
continue
if dim == "r":
idx = 0
else:
idx = 1
if ( traversals[op_name][idx] == 0 ):
continue
self.guard.append( (op.get_size()[idx], guard(op, traversals[op_name])) )
break
def set_repartition( self ):
self.repartition = dict()
traversals = self.linv.traversals[0][0]
#for op in self.linv.operation.operands:
for op in self.linv.linv_operands:
part_size = self.linv.linv_operands_part_shape[ op.get_name() ]
#part_size = self.linv.pme.part_shape[ op.get_name() ]
repart = self.repart[ op.get_name() ]
traversal = traversals[op.get_name()]
if part_size == (1, 1):
continue
elif part_size == (1, 2):
repart_size = (1, 3)
if traversal == (0, 1): # ( 0 || 1 | 2 )
repart_quadrant = "R"
else: # ( 0 | 1 || 2 )
repart_quadrant = "L"
elif part_size == (2, 1):
repart_size = (3, 1)
if traversal == (1, 0): # ( 0 // 1 / 2 )
repart_quadrant = "B"
else: # ( 0 / 1 // 2 )
repart_quadrant = "T"
elif part_size == (2, 2):
repart_size = (3, 3)
if traversal == (1, 1): # BR becomes 2x2
repart_quadrant = "BR"
elif traversal == (1, -1): # BL becomes 2x2
repart_quadrant = "BL"
elif traversal == (-1, 1): # TR becomes 2x2
repart_quadrant = "TR"
else: #if traversal == (-1, -1): # TL becomes 2x2
repart_quadrant = "TL"
else:
raise Exception
repart_flat = list(flatten_repart(repart))
self.repartition[ op.get_name() ] = (repart_size, repart_flat, repart_quadrant)
def set_repartition_size( self ):
self.repartition_size = dict()
traversals = self.linv.traversals[0][0]
#for op in self.linv.operation.operands:
for op in self.linv.linv_operands:
name = op.get_name()
traversal = traversals[op.get_name()]
if traversal == (0, 0):
continue
elif traversal in ( (0, 1), (0, -1) ): # Quadrant is 1
self.repartition_size[ name ] = ( "1", op.size[0], "bs" )
elif traversal in ( (1, 0), (-1, 0) ): # Quadrant is 1
self.repartition_size[ name ] = ( "1", "bs", op.size[1] )
elif traversal in ( (1, 1), (1, -1), (-1, 1), (-1, -1) ): # Quadrant is 11
self.repartition_size[ name ] = ( "11", "bs", "bs" )
else:
print( name, traversal )
raise Exception
def set_basic_repart( self ):
self.basic_repart = dict()
traversals = self.linv.traversals[0][0]
for op in self.linv.linv_operands:
part_size = self.linv.linv_operands_part_shape[ op.get_name() ]
if part_size == (1, 1):
repart_size = (1, 1)
elif part_size == (1, 2):
repart_size = (1, 3)
elif part_size == (2, 1):
repart_size = (3, 1)
elif part_size == (2, 2):
repart_size = (3, 3)
else:
raise Exception
self.basic_repart[ op.get_name() ] = Partitioning.repartition_shape( op, repart_size )
def set_repartition_size( self ):
self.repartition_size = dict()
traversals = self.linv.traversals[0][0]
#for op in self.linv.operation.operands:
for op in self.linv.linv_operands:
name = op.get_name()
traversal = traversals[op.get_name()]
if traversal == (0, 0):
continue
def set_cont_with( self ):
self.cont_with = dict()
traversals = self.linv.traversals[0][0]
#for op in self.linv.operation.operands:
for op in self.linv.linv_operands:
part_size = self.linv.linv_operands_part_shape[ op.get_name() ]
#part_size = self.linv.pme.part_shape[ op.get_name() ]
traversal = traversals[op.get_name()]
if part_size == (1, 1):
continue
elif part_size == (1, 2):
if traversal == (0, 1): # ( 0 | 1 || 2 ) 1 appended to L
cont_with_quadrant = "L"
else: # ( 0 || 1 | 2 ) 1 appended to R
cont_with_quadrant = "R"
elif part_size == (2, 1):
if traversal == (1, 0): # ( 0 / 1 // 2 ) 1 appended to T
cont_with_quadrant = "T"
else: # ( 0 // 1 / 2 ) 1 appended to B
cont_with_quadrant = "B"
elif part_size == (2, 2):
if traversal == (1, 1): # TL grows
cont_with_quadrant = "TL"
elif traversal == (1, -1): # TR grows
cont_with_quadrant = "TR"
elif traversal == (-1, 1): # BL grows
cont_with_quadrant = "BL"
else: #if traversal == (-1, -1): # BR grows
cont_with_quadrant = "BR"
else:
raise Exception
self.cont_with[ op.get_name() ] = cont_with_quadrant
def guard( op, traversal ):
name = op.get_name()
#op = [ o for o in self.operations.operands if o.name == op_name ][0]
if traversal == (0, 1): # L -> R
return ("L", op)
elif traversal == (0, -1): # R -> L
return ("R", op)
elif traversal == (1, 0): # T -> B
return ("T", op)
elif traversal == (-1, 0): # B -> T
return ("B", op)
elif traversal == (1, 1): # TL -> BR
return ("TL", op)
elif traversal == (1, -1): # TR -> BL
return ("TR", op)
elif traversal == (-1, 1): # BL -> TR
return ("BL", op)
elif traversal == (-1, -1): # BR -> TL
return ("BR", op)
else:
print( op_name, traversal )
raise Exception
# Flattens a matrix of matrices resulting from a repartitioning
def flatten_repart( repart ):
r, c = 0, 0
chained = []
for row in repart:
for cell in row:
_r = r
_c = c
for _row in cell:
_c = c
for _cell in _row:
chained.append( (_r, _c, _cell) )
_c += 1
_r += 1
c += len( cell.children[0] )
r = len( cell.children )
c = 0
chained.sort()
for _, _, quadrant in chained:
yield quadrant
|
32603
|
from .defines import MsgLv, UnknownFieldValue, ValidateResult, get_msg_level
from .validators import SpecValidator
def _wrap_error_with_field_info(failure):
if get_msg_level() == MsgLv.VAGUE:
return RuntimeError(f'field: {failure.field} not well-formatted')
if isinstance(failure.value, UnknownFieldValue):
return LookupError(f'field: {failure.field} missing')
msg = f'field: {failure.field}, reason: {failure.error}'
return type(failure.error)(msg)
def _flatten_results(failures, errors=None):
if type(errors) != list:
raise RuntimeError(f'{errors} not a list')
if type(failures) == tuple:
_flatten_results(failures[1], errors)
elif type(failures) == list:
for item in failures:
_flatten_results(item, errors)
elif isinstance(failures, ValidateResult):
if issubclass(type(failures.error), Exception):
error = _wrap_error_with_field_info(failures)
errors.append(error)
return
_flatten_results(failures.error, errors)
def _find_most_significant_error(failures):
errors = []
_flatten_results(failures, errors)
# Build error list by error types
err_map = {}
for err in errors:
if isinstance(err, ValueError):
err_key = 'ValueError'
elif isinstance(err, PermissionError):
err_key = 'PermissionError'
elif isinstance(err, TypeError):
err_key = 'TypeError'
elif isinstance(err, LookupError):
err_key = 'LookupError'
else:
err_key = 'RuntimeError'
err_map.setdefault(err_key, []).append(err)
# Severity, PermissionError > LookupError > TypeError > ValueError > RuntimeError.
errors = (
err_map.get('PermissionError', [])
or err_map.get('LookupError', [])
or err_map.get('TypeError', [])
or err_map.get('ValueError', [])
or err_map.get('RuntimeError', [])
)
# TODO: For better information, we can raise an error with all error messages at one shot
main_error = errors[0]
return main_error
def validate_data_spec(data, spec, **kwargs):
# SPEC validator as the root validator
ok, failures = SpecValidator.validate(data, {SpecValidator.name: spec}, None)
nothrow = kwargs.get('nothrow', False)
if not ok and not nothrow:
error = _find_most_significant_error(failures)
raise error
return ok
|
32604
|
import sys
__all__ = ['IntegerTypes', 'StringTypes']
if sys.version_info < (3,):
IntegerTypes = (int, long)
StringTypes = (str, unicode)
long = long
import __builtin__ as builtins
else:
IntegerTypes = (int,)
StringTypes = (str,)
long = int
import builtins
|
32615
|
try:
import tensorflow
except ModuleNotFoundError:
pkg_name = 'tensorflow'
import os
import sys
import subprocess
from cellacdc import myutils
cancel = myutils.install_package_msg(pkg_name)
if cancel:
raise ModuleNotFoundError(
f'User aborted {pkg_name} installation'
)
subprocess.check_call(
[sys.executable, '-m', 'pip', 'install', 'tensorflow']
)
# numba requires numpy<1.22 but tensorflow might install higher
# so install numpy less than 1.22 if needed
import numpy
np_version = numpy.__version__.split('.')
np_major, np_minor = [int(v) for v in np_version][:2]
if np_major >= 1 and np_minor >= 22:
subprocess.check_call(
[sys.executable, '-m', 'pip', 'install', '--upgrade', 'numpy<1.22']
)
|
32616
|
import aioredis
import trafaret as t
import yaml
from aiohttp import web
CONFIG_TRAFARET = t.Dict(
{
t.Key('redis'): t.Dict(
{
'port': t.Int(),
'host': t.String(),
'db': t.Int(),
'minsize': t.Int(),
'maxsize': t.Int(),
}
),
'host': t.IP,
'port': t.Int(),
}
)
def load_config(fname):
with open(fname, 'rt') as f:
data = yaml.load(f)
return CONFIG_TRAFARET.check(data)
async def init_redis(conf, loop):
pool = await aioredis.create_redis_pool(
(conf['host'], conf['port']),
minsize=conf['minsize'],
maxsize=conf['maxsize'],
loop=loop,
)
return pool
CHARS = "abcdefghijkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789"
def encode(num, alphabet=CHARS):
if num == 0:
return alphabet[0]
arr = []
base = len(alphabet)
while num:
num, rem = divmod(num, base)
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
ShortifyRequest = t.Dict({t.Key('url'): t.URL})
def fetch_url(data):
try:
data = ShortifyRequest(data)
except t.DataError:
raise web.HTTPBadRequest('URL is not valid')
return data['url']
|
32633
|
import sys
from antlr4 import *
from ChatParser import ChatParser
from ChatListener import ChatListener
from antlr4.error.ErrorListener import *
import io
class ChatErrorListener(ErrorListener):
def __init__(self, output):
self.output = output
self._symbol = ''
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
self.output.write(msg)
if offendingSymbol is not None:
self._symbol = offendingSymbol.text
else:
self._symbol = recognizer.getTokenErrorDisplay(offendingSymbol);
@property
def symbol(self):
return self._symbol
|
32634
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.home, name="home"),
path("faq/", views.faq, name="faq"),
path("plagiarism_policy/", views.plagiarism_policy,
name="plagiarism_policy"),
path("privacy_policy/", views.privacy_policy, name="privacy_policy"),
path("post_login/", views.index, name="post_login"),
path("save_partnership_contact_form/", views.save_partnership_contact_form,
name="save_partnership_contact_form"),
path("500/", views.test_500),
path("404/", views.test_404),
]
|
32648
|
from utils import *
import torch
import sys
import numpy as np
import time
import torchvision
from torch.autograd import Variable
import torchvision.transforms as transforms
import torchvision.datasets as datasets
def validate_pgd(val_loader, model, criterion, K, step, configs, logger, save_image=False, HE=False):
# Mean/Std for normalization
mean = torch.Tensor(np.array(configs.TRAIN.mean)[:, np.newaxis, np.newaxis])
mean = mean.expand(3,configs.DATA.crop_size, configs.DATA.crop_size).cuda()
std = torch.Tensor(np.array(configs.TRAIN.std)[:, np.newaxis, np.newaxis])
std = std.expand(3, configs.DATA.crop_size, configs.DATA.crop_size).cuda()
# Initiate the meters
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
eps = configs.ADV.clip_eps
model.eval()
end = time.time()
logger.info(pad_str(' PGD eps: {}, K: {}, step: {} '.format(eps, K, step)))
if HE == True:
is_HE = '_HE'
else:
is_HE = ''
if configs.pretrained:
is_HE = '_pretrained'
for i, (input, target) in enumerate(val_loader):
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
#save original images
if save_image == True and i < 2:
original_images_save = input.clone()
for o in range(input.size(0)):
torchvision.utils.save_image(original_images_save[o, :, :, :], 'saved_images/original_images'+is_HE+'/{}.png'.format(o + configs.DATA.batch_size*i))
randn = torch.FloatTensor(input.size()).uniform_(-eps, eps).cuda()
input += randn
input.clamp_(0, 1.0)
orig_input = input.clone()
for _ in range(K):
invar = Variable(input, requires_grad=True)
in1 = invar - mean
in1.div_(std)
output = model(in1)
ascend_loss = criterion(output, target)
ascend_grad = torch.autograd.grad(ascend_loss, invar)[0]
pert = fgsm(ascend_grad, step)
# Apply purturbation
input += pert.data
input = torch.max(orig_input-eps, input)
input = torch.min(orig_input+eps, input)
input.clamp_(0, 1.0)
#save adv images
if save_image == True and i < 2:
adv_images_save = input.clone()
for o in range(input.size(0)):
torchvision.utils.save_image(adv_images_save[o, :, :, :], 'saved_images/adv_images'+is_HE+'/{}.png'.format(o + configs.DATA.batch_size*i))
#save scaled perturbation
perturbation = input - orig_input
perturbation.clamp_(-eps,eps)
scaled_perturbation = (perturbation.clone() + eps) / (2 * eps)
scaled_perturbation.clamp_(0, 1.0)
if save_image == True and i < 2:
for o in range(input.size(0)):
torchvision.utils.save_image(scaled_perturbation[o, :, :, :], 'saved_images/scaled_perturbation'+is_HE+'/{}.png'.format(o + configs.DATA.batch_size*i))
input.sub_(mean).div_(std)
with torch.no_grad():
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % configs.TRAIN.print_freq == 0:
print('PGD Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' PGD Final Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def validate(val_loader, model, criterion, configs, logger):
# Mean/Std for normalization
mean = torch.Tensor(np.array(configs.TRAIN.mean)[:, np.newaxis, np.newaxis])
mean = mean.expand(3,configs.DATA.crop_size, configs.DATA.crop_size).cuda()
std = torch.Tensor(np.array(configs.TRAIN.std)[:, np.newaxis, np.newaxis])
std = std.expand(3, configs.DATA.crop_size, configs.DATA.crop_size).cuda()
# Initiate the meters
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
with torch.no_grad():
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
input = input - mean
input.div_(std)
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % configs.TRAIN.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' Final Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def validate_ImagetNet_C(val_loader_name, model, criterion, configs, logger):
# Mean/Std for normalization
mean = torch.Tensor(np.array(configs.TRAIN.mean)[:, np.newaxis, np.newaxis])
mean = mean.expand(3,configs.DATA.crop_size, configs.DATA.crop_size).cuda()
std = torch.Tensor(np.array(configs.TRAIN.std)[:, np.newaxis, np.newaxis])
std = std.expand(3, configs.DATA.crop_size, configs.DATA.crop_size).cuda()
# switch to evaluate mode
model.eval()
fil_index = ['/1','/2','/3','/4','/5']
avg_return = 0
for f in fil_index:
valdir = os.path.join(configs.data, val_loader_name+f)
print(' File: ', valdir)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(configs.DATA.img_size),
transforms.CenterCrop(configs.DATA.crop_size),
transforms.ToTensor(),
])),
batch_size=configs.DATA.batch_size, shuffle=False,
num_workers=configs.DATA.workers, pin_memory=True)
# Initiate the meters
top1 = AverageMeter()
end = time.time()
for i, (input, target) in enumerate(val_loader):
with torch.no_grad():
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
input = input - mean
input.div_(std)
output = model(input)
# measure accuracy and record loss
prec1,_ = accuracy(output, target, topk=(1,2))
top1.update(prec1[0], input.size(0))
# if i % configs.TRAIN.print_freq == 0:
# print('PGD Test: [{0}/{1}]\t'
# 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
# i, len(val_loader),top1=top1))
# print('Time: ', time.time() - end)
# sys.stdout.flush()
print('Prec: ',top1.avg.cpu().item())
avg_return += top1.avg.cpu().item()
print('Avergae Classification Accuracy is: ', avg_return / 5.)
return
|
32661
|
from tool.runners.python import SubmissionPy
class DavidSubmission(SubmissionPy):
def bucket_key(self, w, i):
return w[:i] + w[i+1:]
def run(self, s):
words = s.split("\n")
n = len(words[0])
buckets = [set() for i in range(n)]
for w in words:
for i in range(n):
k = self.bucket_key(w, i)
if k in buckets[i]:
return k
buckets[i].add(k)
|
32666
|
import pytest
@pytest.mark.parametrize("cli_options", [
('-k', 'notestdeselect',),
])
def test_autoexecute_yml_keywords_skipped(testdir, cli_options):
yml_file = testdir.makefile(".yml", """
---
markers:
- marker1
- marker2
---
- provider: python
type: assert
expression: "1"
""")
assert yml_file.basename.startswith('test_')
assert yml_file.basename.endswith('.yml')
result = testdir.runpytest(*cli_options)
result.assert_outcomes(passed=0, failed=0, error=0)
# Deselected, not skipped. See #3427
# result.assert_outcomes(skipped=1)
|
32670
|
import solana_rpc as rpc
def get_apr_from_rewards(rewards_data):
result = []
if rewards_data is not None:
if 'epochRewards' in rewards_data:
epoch_rewards = rewards_data['epochRewards']
for reward in epoch_rewards:
result.append({
'percent_change': reward['percentChange'],
'apr': reward['apr']
})
return result
def calc_single_apy(apr, percent_change):
epoch_count = apr / percent_change
result = ((1 + percent_change / 100) ** epoch_count - 1) * 100
return result
def calc_apy_list_from_apr(apr_per_epoch):
l_apy = []
for item in apr_per_epoch:
apy = calc_single_apy(item['apr'], item['percent_change'])
l_apy.append(apy)
return l_apy
def process(validators):
data = []
for validator in validators:
rewards_data = rpc.load_stake_account_rewards(validator['stake_account'])
apr_per_epoch = get_apr_from_rewards(rewards_data)
apy_per_epoch = calc_apy_list_from_apr(apr_per_epoch)
data.append(apy_per_epoch)
return data
|
32675
|
import re
class Solution:
def helper(self, expression: str) -> List[str]:
s = re.search("\{([^}{]+)\}", expression)
if not s: return {expression}
g = s.group(1)
result = set()
for c in g.split(','):
result |= self.helper(expression.replace('{' + g + '}', c, 1))
return result
def braceExpansionII(self, expression: str) -> List[str]:
return sorted(list(self.helper(expression)))
|
32680
|
from unittest.mock import patch
from dependent import parameter_dependent
@patch('math.sqrt')
def test_negative(mock_sqrt):
assert parameter_dependent(-1) == 0
mock_sqrt.assert_not_called()
@patch('math.sqrt')
def test_zero(mock_sqrt):
mock_sqrt.return_value = 0
assert parameter_dependent(0) == 0
mock_sqrt.assert_called_once_with(0)
@patch('math.sqrt')
def test_twenty_five(mock_sqrt):
mock_sqrt.return_value = 5
assert parameter_dependent(25) == 5
mock_sqrt.assert_called_with(25)
@patch('math.sqrt')
def test_hundred(mock_sqrt):
mock_sqrt.return_value = 10
assert parameter_dependent(100) == 10
mock_sqrt.assert_called_with(100)
@patch('math.sqrt')
def test_hundred_and_one(mock_sqrt):
assert parameter_dependent(101) == 10
mock_sqrt.assert_not_called()
|
32683
|
import os
_lab_components = """from api2db.ingest import *
CACHE=True # Caches API data so that only a single API call is made if True
def import_target():
return None
def pre_process():
return None
def data_features():
return None
def post_process():
return None
if __name__ == "__main__":
api_form = ApiForm(name="lab",
pre_process=pre_process(),
data_features=data_features(),
post_process=post_process()
)
api_form.experiment(CACHE, import_target)
"""
def mlab():
"""
This shell command is used for creation of a lab. Labs offer an easier way to design an ApiForm.
Given a project directory
::
project_dir-----/
|
apis-----/
| |- __init__.py
| |- FooCollector.py
| |- BarCollector.py
|
AUTH-----/
| |- bigquery_auth_template.json
| |- omnisci_auth_template.json
| |- sql_auth_template.json
|
CACHE/
|
STORE/
|
helpers.py
|
main.py
**Shell Command:** ``path/to/project_dir> mlab``
::
project_dir-----/
|
apis-------/
| |- __init__.py
| |- FooCollector.py
| |- BarCollector.py
|
AUTH-------/
| |- bigquery_auth_template.json
| |- omnisci_auth_template.json
| |- sql_auth_template.json
|
CACHE/
|
STORE/
|
laboratory-/
| |- lab.py EDIT THIS FILE!
|
helpers.py
|
main.py
Returns:
None
"""
lab_dir_path = os.path.join(os.getcwd(), "laboratory")
if not os.path.isdir(lab_dir_path):
os.makedirs(lab_dir_path)
with open(os.path.join(lab_dir_path, "lab.py"), "w") as f:
for line in _lab_components:
f.write(line)
print("Lab has been created. Edit the file found in laboratory/lab.py")
else:
print("Lab already exists!")
|
32711
|
from .client import Client
class Stats(Client):
def __init__(self, api_key='YourApiKeyToken'):
Client.__init__(self, address='', api_key=api_key)
self.url_dict[self.MODULE] = 'stats'
def get_total_ether_supply(self):
self.url_dict[self.ACTION] = 'ethsupply'
self.build_url()
req = self.connect()
return req['result']
def get_ether_last_price(self):
self.url_dict[self.ACTION] = 'ethprice'
self.build_url()
req = self.connect()
return req['result']
|
32719
|
import MiniNero
import ed25519
import binascii
import PaperWallet
import cherrypy
import os
import time
import bitmonerod
import SimpleXMR2
import SimpleServer
message = "send0d000114545737471em2WCg9QKxRxbo6S3xKF2K4UDvdu6hMc"
message = "send0d0114545747771em2WCg9QKxRxbo6S3xKF2K4UDvdu6hMc"
sec = raw_input("sec?")
print(SimpleServer.Signature(message, sec))
|
32730
|
import datetime
import remi
import core.globals
connected_clients = {} # Dict with key=session id of App Instance and value=ws_client.client_address of App Instance
connected_clients['number'] = 0 # Special Dict Field for amount of active connections
client_route_url_to_view = {} # Dict to store URL extensions related to session. This is used to switch a view based on url
def handle_connections(AppInst=None):
# Take care of the connection. It is only alive if the websocket still is active.
# Check, if there is a new websocket connection for this App session (= Instance)
if AppInst.connection_established == False and len(AppInst.websockets) == 1:
for session_id, app_inst in remi.server.clients.items():
if session_id == AppInst.session:
for ws_client in app_inst.websockets:
AppInst.logger.info(f'New Session with ID <{AppInst.session}> from host {ws_client.client_address}') # Host Information for direct connection
connected_clients[AppInst.session] = ws_client.client_address
AppInst.logger.info(f'Session <{AppInst.session}> host headers: {ws_client.headers}')
connected_clients['number'] = connected_clients['number'] + 1
AppInst.logger.info(f'Connected clients ({connected_clients["number"]} in total): {connected_clients}')
AppInst.connect_time = datetime.datetime.now()
AppInst.connection_established = True # Set Flag. This can be used by other threads as end signal.
# Check, if the websocket connection is still alive. REMI removes the Websocket from the List if dead.
if len(remi.server.clients[AppInst.session].websockets) == 0 and AppInst.connection_established == True:
AppInst.disconnect_time = datetime.datetime.now() # Store the disconnect time
connection_duration = f'{(AppInst.disconnect_time - AppInst.connect_time).seconds} sec'
AppInst.logger.info(f'Session <{AppInst.session}> from host {connected_clients[AppInst.session]} has disconnected. Connection duration: {connection_duration}')
AppInst.connection_established = False # Set Flag. This can be used by other threads as end signal.
del connected_clients[AppInst.session]
connected_clients['number'] = connected_clients['number'] - 1
AppInst.logger.info(f'Still connected clients: {connected_clients}')
|
32756
|
from diofant.utilities.decorator import no_attrs_in_subclass
__all__ = ()
def test_no_attrs_in_subclass():
class A:
x = 'test'
A.x = no_attrs_in_subclass(A, A.x)
class B(A):
pass
assert hasattr(A, 'x') is True
assert hasattr(B, 'x') is False
|
32763
|
from ckan_cloud_operator import kubectl
def get(what, *args, required=True, namespace=None, get_cmd=None, **kwargs):
return kubectl.get(what, *args, required=required, namespace=namespace, get_cmd=get_cmd, **kwargs)
|
32773
|
import os
import tarfile
import time
import pickle
import numpy as np
from Bio.Seq import Seq
from scipy.special import expit
from scipy.special import logit
import torch
import torch.nn.functional as F
""" Get directories for model and seengenes """
module_dir = os.path.dirname(os.path.realpath(__file__))
model_dir = os.path.join(module_dir, "balrog_models")
""" Print what the program is doing."""
verbose = True
""" Use kmer prefilter to increase gene sensitivity.
May not play nice with very high GC genomes."""
protein_kmer_filter = False
""" Nucleotide to amino acid translation table. 11 for most bacteria/archaea.
4 for Mycoplasma/Spiroplasma."""
translation_table = 11
# translation_table = 4
""" Batch size for the temporal convolutional network used to score genes.
Small batches and big batches slow down the model. Very big batches may crash the
GPU. """
gene_batch_size = 200
TIS_batch_size = 1000
""" All following are internal parameters. Change at your own risk."""
weight_gene_prob = 0.9746869839852076
weight_TIS_prob = 0.25380288790532707
score_threshold = 0.47256101519707244
weight_ATG = 0.84249804151264
weight_GTG = 0.7083689705744909
weight_TTG = 0.7512400826652517
unidirectional_penalty_per_base = 3.895921717182765 # 3' 5' overlap
convergent_penalty_per_base = 4.603432608883688 # 3' 3' overlap
divergent_penalty_per_base = 3.3830814940689975 # 5' 5' overlap
k_seengene = 10
multimer_threshold = 2
nuc_encode = {"A": 0,
"T": 1,
"G": 2,
"C": 3,
"N": 0,
"M": 0,
"R": 0,
"Y": 0,
"W": 0,
"K": 0}
start_enc = {"ATG": 0,
"GTG": 1,
"TTG": 2}
aa_table = {"L": 1,
"V": 2,
"I": 3,
"M": 4,
"C": 5,
"A": 6,
"G": 7,
"S": 8,
"T": 9,
"P": 10,
"F": 11,
"Y": 12,
"W": 13,
"E": 14,
"D": 15,
"N": 16,
"Q": 17,
"K": 18,
"R": 19,
"H": 20,
"*": 0,
"X": 0}
# generate ORF sequences from coordinates
# @profile
def generate_sequence(graph_vector, nodelist, node_coords, overlap):
sequence = ""
for i in range(0, len(nodelist)):
id = nodelist[i]
coords = node_coords[i]
# calculate strand based on value of node (if negative, strand is false)
strand = True if id >= 0 else False
if strand:
unitig_seq = graph_vector[abs(id) - 1].seq
else:
unitig_seq = str(Seq(graph_vector[abs(id) - 1].seq).reverse_complement())
if len(sequence) == 0:
substring = unitig_seq[coords[0]:(coords[1] + 1)]
else:
if coords[1] >= overlap:
substring = unitig_seq[overlap:(coords[1] + 1)]
sequence += substring
return sequence
#@profile
def tokenize_aa_seq(aa_seq):
""" Convert amino acid letters to integers."""
tokenized = torch.tensor([aa_table[aa] for aa in aa_seq])
return tokenized
#@profile
def get_ORF_info(ORF_vector, graph, overlap):
ORF_seq_list = []
TIS_seqs = []
# iterate over list of ORFs
for ORFNodeVector in ORF_vector:
# need to determine ORF sequences from paths
ORF_nodelist = ORFNodeVector[0]
ORF_node_coords = ORFNodeVector[1]
TIS_nodelist = ORFNodeVector[3]
TIS_node_coords = ORFNodeVector[4]
# generate ORF_seq, as well as upstream and downstream TIS seq
ORF_seq = graph.generate_sequence(ORF_nodelist, ORF_node_coords, overlap)
upstream_TIS_seq = graph.generate_sequence(TIS_nodelist, TIS_node_coords, overlap)
downstream_TIS_seq = ORF_seq[0:19]
# generate Seq class for translation
seq = Seq(ORF_seq)
# translate once per frame, then slice. Note, do not include start or stop codons
aa = str(seq[3:-3].translate(table=translation_table, to_stop=False))
ORF_seq_list.append(aa)
TIS_seqs.append((upstream_TIS_seq, downstream_TIS_seq))
# convert amino acids into integers
ORF_seq_enc = [tokenize_aa_seq(x) for x in ORF_seq_list]
return ORF_seq_enc, TIS_seqs
#@profile
def predict(model, X):
model.eval()
with torch.no_grad():
if torch.cuda.device_count() > 0:
X_enc = F.one_hot(X, 21).permute(0, 2, 1).float().cuda()
probs = expit(model(X_enc).cpu())
del X_enc
torch.cuda.empty_cache()
else:
X_enc = F.one_hot(X, 21).permute(0, 2, 1).float()
probs = expit(model(X_enc).cpu())
return probs
#@profile
def predict_tis(model_tis, X):
model_tis.eval()
with torch.no_grad():
if torch.cuda.device_count() > 0:
X_enc = F.one_hot(X, 4).permute(0, 2, 1).float().cuda()
else:
X_enc = F.one_hot(X, 4).permute(0, 2, 1).float()
probs = expit(model_tis(X_enc).cpu())
return probs
#@profile
def kmerize(seq, k):
kmerset = set()
for i in range(len(seq) - k + 1):
kmer = tuple(seq[i: i + k].tolist())
kmerset.add(kmer)
return kmerset
def load_kmer_model():
# check if directory exists. If not, unzip file
if not os.path.exists(model_dir):
tar = tarfile.open(model_dir + ".tar.gz", mode="r:gz")
tar.extractall(module_dir)
tar.close()
"""Load k-mer filters"""
genexa_kmer_path = os.path.join(model_dir, "10mer_thresh2_minusARF_all.pkl")
with open(genexa_kmer_path, "rb") as f:
aa_kmer_set = pickle.load(f)
return aa_kmer_set
def load_gene_models():
# check if directory exists. If not, unzip file
if not os.path.exists(model_dir):
tar = tarfile.open(model_dir + ".tar.gz", mode="r:gz")
tar.extractall(module_dir)
tar.close()
torch.hub.set_dir(model_dir)
# print("Loading convolutional model...")
if torch.cuda.device_count() > 0:
# print("GPU detected...")
model = torch.hub.load(model_dir, "geneTCN", source='local').cuda()
model_tis = torch.hub.load(model_dir, "tisTCN", source='local').cuda()
time.sleep(0.5)
else:
# print("No GPU detected, using CPU...")
model = torch.hub.load(model_dir, "geneTCN", source='local')
model_tis = torch.hub.load(model_dir, "tisTCN", source='local')
time.sleep(0.5)
return (model, model_tis)
#@profile
def score_genes(ORF_vector, graph_vector, minimum_ORF_score, overlap, model, model_tis, aa_kmer_set):
# get sequences and coordinates of ORFs
# print("Finding and translating open reading frames...")
ORF_seq_enc, TIS_seqs = get_ORF_info(ORF_vector, graph_vector, overlap)
# seengene check
if protein_kmer_filter:
seengene = []
for s in ORF_seq_enc:
kmerset = kmerize(s, k_seengene)
# s = [x in aa_kmer_set for x in kmerset]
s = np.isin(list(kmerset), aa_kmer_set)
seen = np.count_nonzero(s) >= multimer_threshold
seengene.append(seen)
# score
# print("Scoring ORFs with temporal convolutional network...")
# sort by length to minimize impact of batch padding
ORF_lengths = np.asarray([len(x) for x in ORF_seq_enc])
length_idx = np.argsort(ORF_lengths)
ORF_seq_sorted = [ORF_seq_enc[i] for i in length_idx]
# pad to allow creation of batch matrix
prob_list = []
for i in range(0, len(ORF_seq_sorted), gene_batch_size):
batch = ORF_seq_sorted[i:i + gene_batch_size]
seq_lengths = torch.LongTensor(list(map(len, batch)))
seq_tensor = torch.zeros((len(batch), seq_lengths.max())).long()
for idx, (seq, seqlen) in enumerate(zip(batch, seq_lengths)):
seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
pred_all = predict(model, seq_tensor)
pred = []
for j, length in enumerate(seq_lengths):
subseq = pred_all[j, 0, 0:int(length)]
predprob = float(expit(torch.mean(logit(subseq))))
pred.append(predprob)
prob_list.extend(pred)
prob_arr = np.asarray(prob_list, dtype=float)
# unsort
unsort_idx = np.argsort(length_idx)
ORF_prob = prob_arr[unsort_idx]
# recombine ORFs
idx = 0
ORF_gene_score = [None] * len(ORF_seq_enc)
for k, coord in enumerate(ORF_gene_score):
ORF_gene_score[k] = float(ORF_prob[idx])
idx += 1
# print("Scoring translation initiation sites...")
# extract nucleotide sequence surrounding potential start codons
ORF_TIS_seq_flat = []
ORF_TIS_seq_idx = []
ORF_TIS_prob = [None] * len(TIS_seqs)
ORF_start_codon = [None] * len(ORF_seq_enc)
for i, TIS in enumerate(TIS_seqs):
# unpack tuple. Note, downsteam includes start codon, which needs to be removed
upstream, downstream = TIS
if len(upstream) == 16:
TIS_seq = torch.tensor([nuc_encode[c] for c in (upstream + downstream[3:])[::-1]],
dtype=int) # model scores 3' to 5' direction
ORF_TIS_seq_flat.append(TIS_seq)
ORF_TIS_seq_idx.append(i)
else:
ORF_TIS_prob[i] = 0.5
# encode start codon
start_codon = start_enc[downstream[0:3]]
ORF_start_codon[i] = start_codon
# batch score TIS
TIS_prob_list = []
for i in range(0, len(ORF_TIS_seq_flat), TIS_batch_size):
batch = ORF_TIS_seq_flat[i:i + TIS_batch_size]
TIS_stacked = torch.stack(batch)
pred = predict_tis(model_tis, TIS_stacked)
TIS_prob_list.extend(pred)
y_pred_TIS = np.asarray(TIS_prob_list, dtype=float)
# reindex batched scores
for i, prob in enumerate(y_pred_TIS):
idx = ORF_TIS_seq_idx[i]
ORF_TIS_prob[idx] = float(prob)
# combine all info into single score for each ORF
if protein_kmer_filter:
ORF_score_flat = []
for i, geneprob in enumerate(ORF_gene_score):
if not geneprob:
ORF_score_flat.append(None)
continue
seengene_idx = 0
# calculate length by multiplying number of amino acids by 3, then adding 6 for start and stop
length = (len(ORF_seq_enc[i]) * 3) + 6
TIS_prob = ORF_TIS_prob[i]
start_codon = ORF_start_codon[i]
ATG = start_codon == 0
GTG = start_codon == 1
TTG = start_codon == 2
combprob = geneprob * weight_gene_prob \
+ TIS_prob * weight_TIS_prob \
+ ATG * weight_ATG \
+ GTG * weight_GTG \
+ TTG * weight_TTG
maxprob = weight_gene_prob + weight_TIS_prob + max(weight_ATG, weight_TTG, weight_GTG)
probthresh = score_threshold * maxprob
score = (combprob - probthresh) * length + 1e6 * seengene[seengene_idx]
seengene_idx += 1
ORF_score_flat.append(score)
else:
ORF_score_flat = []
for i, geneprob in enumerate(ORF_gene_score):
if not geneprob:
ORF_score_flat.append(None)
continue
# calculate length by multiplying number of amino acids by 3, then adding 6 for start and stop
length = len(ORF_seq_enc[i]) * 3
TIS_prob = ORF_TIS_prob[i]
start_codon = ORF_start_codon[i]
ATG = start_codon == 0
GTG = start_codon == 1
TTG = start_codon == 2
combprob = geneprob * weight_gene_prob \
+ TIS_prob * weight_TIS_prob \
+ ATG * weight_ATG \
+ GTG * weight_GTG \
+ TTG * weight_TTG
maxprob = weight_gene_prob + weight_TIS_prob + max(weight_ATG, weight_TTG, weight_GTG)
probthresh = score_threshold * maxprob
score = (combprob - probthresh) * length
ORF_score_flat.append(score)
# update initial dictionary, removing low scoring ORFs and create score mapping score within a tuple
ORF_score_dict = {}
for i, score in enumerate(ORF_score_flat):
# if score greater than minimum, add to the ORF_score_dict
if score >= minimum_ORF_score:
ORF_score_dict[i] = score
return ORF_score_dict
|
32787
|
import inspect
from unittest.mock import Mock
from _pytest.monkeypatch import MonkeyPatch
from rasa.core.policies.ted_policy import TEDPolicy
from rasa.engine.training import fingerprinting
from rasa.nlu.classifiers.diet_classifier import DIETClassifier
from rasa.nlu.selectors.response_selector import ResponseSelector
from tests.engine.training.test_components import FingerprintableText
def test_fingerprint_stays_same():
key1 = fingerprinting.calculate_fingerprint_key(
TEDPolicy, TEDPolicy.get_default_config(), {"input": FingerprintableText("Hi")},
)
key2 = fingerprinting.calculate_fingerprint_key(
TEDPolicy, TEDPolicy.get_default_config(), {"input": FingerprintableText("Hi")},
)
assert key1 == key2
def test_fingerprint_changes_due_to_class():
key1 = fingerprinting.calculate_fingerprint_key(
DIETClassifier,
TEDPolicy.get_default_config(),
{"input": FingerprintableText("Hi")},
)
key2 = fingerprinting.calculate_fingerprint_key(
ResponseSelector,
TEDPolicy.get_default_config(),
{"input": FingerprintableText("Hi")},
)
assert key1 != key2
def test_fingerprint_changes_due_to_config():
key1 = fingerprinting.calculate_fingerprint_key(
TEDPolicy, {}, {"input": FingerprintableText("Hi")},
)
key2 = fingerprinting.calculate_fingerprint_key(
ResponseSelector,
TEDPolicy.get_default_config(),
{"input": FingerprintableText("Hi")},
)
assert key1 != key2
def test_fingerprint_changes_due_to_inputs():
key1 = fingerprinting.calculate_fingerprint_key(
TEDPolicy, {}, {"input": FingerprintableText("Hi")},
)
key2 = fingerprinting.calculate_fingerprint_key(
ResponseSelector,
TEDPolicy.get_default_config(),
{"input": FingerprintableText("bye")},
)
assert key1 != key2
def test_fingerprint_changes_due_to_changed_source(monkeypatch: MonkeyPatch):
key1 = fingerprinting.calculate_fingerprint_key(
TEDPolicy, {}, {"input": FingerprintableText("Hi")},
)
get_source_mock = Mock(return_value="other implementation")
monkeypatch.setattr(inspect, inspect.getsource.__name__, get_source_mock)
key2 = fingerprinting.calculate_fingerprint_key(
TEDPolicy, {}, {"input": FingerprintableText("Hi")},
)
assert key1 != key2
get_source_mock.assert_called_once_with(TEDPolicy)
|
32818
|
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from .build import MODEL_REGISTRY
@MODEL_REGISTRY.register()
class CNNRNN(nn.Module):
def __init__(self, cfg):
super().__init__()
input_dim = 512
hidden_dim = 128
num_layers = 1
self.cnn = models.resnet50(pretrained=True)
out_features = self.cnn.fc.in_features
self.fc1 = nn.Linear(out_features, input_dim)
self.fc2 = nn.Linear(hidden_dim, 1)
self.rnn = nn.RNN(input_dim, hidden_dim, num_layers, batch_first=True)
def forward(self, vid, lengths):
B, T, *a = vid.shape
vid = vid.permute(0, 1, 4, 2, 3)
outs = []
def hook(module, input, output):
outs.append(input)
self.cnn.fc.register_forward_hook(hook)
for t in range(T):
# print(t)
frame = vid[:, t, :, :, :]
out = self.cnn(frame)
if outs[0][0].ndim == 2:
outs = [ten[0].unsqueeze(0) for ten in outs]
else:
outs = [ten[0] for ten in outs]
outs = torch.cat(outs, dim=1)
outs = self.fc1(outs)
packed_seq = pack_padded_sequence(outs, lengths, batch_first=True, enforce_sorted=False)
out, hn = self.rnn(packed_seq)
padded_seq, lengths = pad_packed_sequence(out, batch_first=True)
out = self.fc2(padded_seq)
return out
|
32820
|
from os import environ as env
import json
import utils
import utils.aws as aws
import utils.handlers as handlers
def put_record_to_logstream(event: utils.LambdaEvent) -> str:
"""Put a record of source Lambda execution in LogWatch Logs."""
log_group_name = env["REPORT_LOG_GROUP_NAME"]
utils.Log.info("Fetching requestPayload and responsePayload")
req, res = event["requestPayload"], event["responsePayload"]
utils.Log.info("Fetching requestPayload content")
sns_payload = req["Records"][0]["Sns"]
message_id = sns_payload["MessageId"]
message = json.loads(sns_payload["Message"])
url, title = message["url"], message["title"]
try:
body = json.loads(res["body"])
except json.JSONDecodeError as error:
raise utils.HandledError("Failed decoding payload: %s" % error)
name, timestamp = body["name"], body["timestamp"]
if res["statusCode"] != 200:
raise utils.HandledError("Source lambda '%s' failed with status code %d, "
"ignoring report" % (name, res["statusCode"]))
return aws.send_event_to_logstream(log_group=log_group_name,
log_stream=name,
message={
"url": url,
"MessageId": message_id,
"title": title,
"timestamp": timestamp,
})
def handler(event, context) -> utils.Response:
"""Lambda entry point."""
return handlers.EventHandler(
name="send_report",
event=utils.LambdaEvent(event),
context=utils.LambdaContext(context),
action=put_record_to_logstream,
).response
|
32823
|
import os
from bc import Imitator
import numpy as np
from dataset import Example, Dataset
import utils
#from ale_wrapper import ALEInterfaceWrapper
from evaluator import Evaluator
from pdb import set_trace
import matplotlib.pyplot as plt
#try bmh
plt.style.use('bmh')
def smooth(losses, run=10):
new_losses = []
for i in range(len(losses)):
new_losses.append(np.mean(losses[max(0, i - 10):i+1]))
return new_losses
def plot(losses, checkpoint_dir, env_name):
print("Plotting losses to ", os.path.join(checkpoint_dir, env_name + "_loss.png"))
p=plt.plot(smooth(losses, 25))
plt.xlabel("Update")
plt.ylabel("Loss")
plt.legend(loc='lower center')
plt.savefig(os.path.join(checkpoint_dir, env_name + "loss.png"))
def train(env_name,
minimal_action_set,
learning_rate,
alpha,
l2_penalty,
minibatch_size,
hist_len,
discount,
checkpoint_dir,
updates,
dataset,
validation_dataset,
num_eval_episodes,
epsilon_greedy,
extra_info):
import tracemalloc
# create DQN agent
agent = Imitator(list(minimal_action_set),
learning_rate,
alpha,
checkpoint_dir,
hist_len,
l2_penalty)
print("Beginning training...")
log_frequency = 500
log_num = log_frequency
update = 1
running_loss = 0.
best_v_loss = np.float('inf')
count = 0
while update < updates:
# snapshot = tracemalloc.take_snapshot()
# top_stats = snapshot.statistics('lineno')
# import gc
# for obj in gc.get_objects():
# try:
# if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
# print(type(obj), obj.size())
# except:
# pass
#
# print("[ Top 10 ]")
# for stat in top_stats[:10]:
# print(stat)
if update > log_num:
print(str(update) + " updates completed. Loss {}".format(running_loss / log_frequency))
log_num += log_frequency
running_loss = 0
#run validation loss test
v_loss = agent.validate(validation_dataset, 10)
print("Validation accuracy = {}".format(v_loss / validation_dataset.size))
if v_loss > best_v_loss:
count += 1
if count > 5:
print("validation not improing for {} steps. Stopping to prevent overfitting".format(count))
break
else:
best_v_loss = v_loss
print("updating best vloss", best_v_loss)
count = 0
l = agent.train(dataset, minibatch_size)
running_loss += l
update += 1
print("Training completed.")
agent.checkpoint_network(env_name, extra_info)
#Plot losses
#Evaluation
print("beginning evaluation")
evaluator = Evaluator(env_name, num_eval_episodes, checkpoint_dir, epsilon_greedy)
evaluator.evaluate(agent)
return agent
def train_transitions(env_name,
minimal_action_set,
learning_rate,
alpha,
l2_penalty,
minibatch_size,
hist_len,
discount,
checkpoint_dir,
updates,
dataset,
num_eval_episodes):
# create DQN agent
agent = Imitator(list(minimal_action_set),
learning_rate,
alpha,
checkpoint_dir,
hist_len,
l2_penalty)
print("Beginning training...")
log_frequency = 1000
log_num = log_frequency
update = 1
running_loss = 0.
while update < updates:
if update > log_num:
print(str(update) + " updates completed. Loss {}".format(running_loss / log_frequency))
log_num += log_frequency
running_loss = 0
l = agent.train(dataset, minibatch_size)
running_loss += l
update += 1
print("Training completed.")
agent.checkpoint_network(env_name + "_transitions")
#calculate accuacy
#Evaluation
#evaluator = Evaluator(env_name, num_eval_episodes)
#evaluator.evaluate(agent)
return agent
if __name__ == '__main__':
train()
|
32824
|
from IPython import get_ipython
from IPython.display import display
def is_ipynb():
return type(get_ipython()).__module__.startswith('ipykernel.')
|
32857
|
import pytest
@pytest.mark.order(4)
def test_four():
pass
@pytest.mark.order(3)
def test_three():
pass
|
32868
|
import math
import random
from typing import Tuple
import cv2
import numpy as np
def np_free_form_mask(
max_vertex: int, max_length: int, max_brush_width: int, max_angle: int, height: int, width: int
) -> np.ndarray:
mask = np.zeros((height, width), np.float32)
num_vertex = random.randint(0, max_vertex)
start_y = random.randint(0, height - 1)
start_x = random.randint(0, width - 1)
brush_width = 0
for i in range(num_vertex):
angle = random.random() * max_angle
angle = math.radians(angle)
if i % 2 == 0:
angle = 2 * math.pi - angle
length = random.randint(0, max_length)
brush_width = random.randint(10, max_brush_width) // 2 * 2
next_y = start_y + length * np.cos(angle)
next_x = start_x + length * np.sin(angle)
next_y = np.maximum(np.minimum(next_y, height - 1), 0).astype(np.int)
next_x = np.maximum(np.minimum(next_x, width - 1), 0).astype(np.int)
cv2.line(mask, (start_y, start_x), (next_y, next_x), 1, brush_width)
cv2.circle(mask, (start_y, start_x), brush_width // 2, 2)
start_y, start_x = next_y, next_x
cv2.circle(mask, (start_y, start_x), brush_width // 2, 2)
return mask
def generate_stroke_mask(
image_size: Tuple[int, int],
parts: int = 7,
max_vertex: int = 25,
max_length: int = 80,
max_brush_width: int = 80,
max_angle: int = 360,
) -> np.ndarray:
mask = np.zeros(image_size, dtype=np.float32)
for _ in range(parts):
mask = mask + np_free_form_mask(
max_vertex, max_length, max_brush_width, max_angle, image_size[0], image_size[1]
)
return np.minimum(mask, 1.0)
|
32892
|
import json
from pathlib import Path
from typing import Any, Dict
from git import Repo
from cruft.exceptions import CruftAlreadyPresent, NoCruftFound
CruftState = Dict[str, Any]
#######################
# Cruft related utils #
#######################
def get_cruft_file(project_dir_path: Path, exists: bool = True) -> Path:
cruft_file = project_dir_path / ".cruft.json"
if not exists and cruft_file.is_file():
raise CruftAlreadyPresent(cruft_file)
if exists and not cruft_file.is_file():
raise NoCruftFound(project_dir_path.resolve())
return cruft_file
def is_project_updated(repo: Repo, current_commit: str, latest_commit: str, strict: bool) -> bool:
return (
# If the latest commit exactly matches the current commit
latest_commit == current_commit
# Or if there have been no changes to the cookiecutter
or not repo.index.diff(current_commit)
# or if the strict flag is off, we allow for newer commits to count as up to date
or (
repo.is_ancestor(repo.commit(latest_commit), repo.commit(current_commit)) and not strict
)
)
def json_dumps(cruft_state: Dict[str, Any]) -> str:
text = json.dumps(cruft_state, ensure_ascii=False, indent=2, separators=(",", ": "))
return text + "\n"
|
32898
|
import enum
from django.db import models
from care.facility.models import FacilityBaseModel
from care.users.models import User
from django.contrib.postgres.fields import JSONField
class Notification(FacilityBaseModel):
class EventType(enum.Enum):
SYSTEM_GENERATED = 50
CUSTOM_MESSAGE = 100
EventTypeChoices = [(e.value, e.name) for e in EventType]
class Medium(enum.Enum):
SYSTEM = 0
SMS = 100
WHATSAPP = 200
MediumChoices = [(e.value, e.name) for e in Medium]
class Event(enum.Enum):
MESSAGE = 0
PATIENT_CREATED = 20
PATIENT_UPDATED = 30
PATIENT_DELETED = 40
PATIENT_CONSULTATION_CREATED = 50
PATIENT_CONSULTATION_UPDATED = 60
PATIENT_CONSULTATION_DELETED = 70
INVESTIGATION_SESSION_CREATED = 80
INVESTIGATION_UPDATED = 90
PATIENT_FILE_UPLOAD_CREATED = 100
CONSULTATION_FILE_UPLOAD_CREATED = 110
PATIENT_CONSULTATION_UPDATE_CREATED = 120
PATIENT_CONSULTATION_UPDATE_UPDATED = 130
PATIENT_CONSULTATION_ASSIGNMENT = 140
SHIFTING_UPDATED = 200
EventChoices = [(e.value, e.name) for e in Event]
intended_for = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True, related_name="notification_intended_for",
)
medium_sent = models.IntegerField(choices=MediumChoices, default=Medium.SYSTEM.value)
caused_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, related_name="notification_caused_by",)
read_at = models.DateTimeField(null=True, blank=True)
event_type = models.IntegerField(choices=EventTypeChoices, default=EventType.SYSTEM_GENERATED.value)
event = models.IntegerField(choices=EventChoices, default=Event.MESSAGE.value)
message = models.TextField(max_length=2000, null=True, default=None)
caused_objects = JSONField(null=True, blank=True, default=dict)
|
32954
|
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
import pytest
from oddt.scoring.models import classifiers, regressors
@pytest.mark.filterwarnings('ignore:Stochastic Optimizer')
@pytest.mark.parametrize('cls',
[classifiers.svm(probability=True),
classifiers.neuralnetwork(random_state=42)])
def test_classifiers(cls):
# toy data
X = np.concatenate((np.zeros((5, 2)), np.ones((5, 2))))
Y = np.concatenate((np.ones(5), np.zeros(5)))
np.random.seed(42)
cls.fit(X, Y)
assert_array_equal(cls.predict(X), Y)
assert cls.score(X, Y) == 1.0
prob = cls.predict_proba(X)
assert_array_almost_equal(prob, [[0, 1]] * 5 + [[1, 0]] * 5, decimal=1)
log_prob = cls.predict_log_proba(X)
assert_array_almost_equal(np.log(prob), log_prob)
pickled = pickle.dumps(cls)
reloaded = pickle.loads(pickled)
prob_reloaded = reloaded.predict_proba(X)
assert_array_almost_equal(prob, prob_reloaded)
@pytest.mark.parametrize('reg',
[regressors.svm(C=10),
regressors.randomforest(random_state=42),
regressors.neuralnetwork(solver='lbfgs',
random_state=42,
hidden_layer_sizes=(20, 20)),
regressors.mlr()])
def test_regressors(reg):
X = np.vstack((np.arange(30, 10, -2, dtype='float64'),
np.arange(100, 90, -1, dtype='float64'))).T
Y = np.arange(10, dtype='float64')
np.random.seed(42)
reg.fit(X, Y)
pred = reg.predict(X)
assert (np.abs(pred.flatten() - Y) < 1).all()
assert reg.score(X, Y) > 0.9
pickled = pickle.dumps(reg)
reloaded = pickle.loads(pickled)
pred_reloaded = reloaded.predict(X)
assert_array_almost_equal(pred, pred_reloaded)
|
32991
|
from bitmovin_api_sdk.account.organizations.groups.groups_api import GroupsApi
from bitmovin_api_sdk.account.organizations.groups.tenants.tenants_api import TenantsApi
from bitmovin_api_sdk.account.organizations.groups.invitations.invitations_api import InvitationsApi
from bitmovin_api_sdk.account.organizations.groups.permissions.permissions_api import PermissionsApi
|
32993
|
import numpy as np
import random
N = 10
def null(a, rtol=1e-5):
u, s, v = np.linalg.svd(a)
rank = (s > rtol*s[0]).sum()
return rank, v[rank:].T.copy()
def gen_data(N, noisy=False):
lower = -1
upper = 1
dim = 2
X = np.random.rand(dim, N)*(upper-lower)+lower
while True:
Xsample = np.concatenate(
(np.ones((1, dim)), np.random.rand(dim, dim)*(upper-lower)+lower))
k, w = null(Xsample.T)
y = np.sign(np.dot(w.T, np.concatenate((np.ones((1, N)), X))))
if np.all(y):
break
return (X, y, w)
def change_label(y):
idx = random.sample(range(1, N), N/10)
y[idx] = -y[idx]
return y
if __name__ == '__main__':
X, y, w = gen_data(10)
print(X)
|
33004
|
import logging
def create_app(config=None, testing=False):
from airflow.www_rbac import app as airflow_app
app, appbuilder = airflow_app.create_app(config=config, testing=testing)
# only now we can load view..
# this import might causes circular dependency if placed above
from dbnd_airflow.airflow_override.dbnd_aiflow_webserver import (
use_databand_airflow_dagbag,
)
use_databand_airflow_dagbag()
logging.info("Airflow applications has been created")
return app, appbuilder
def cached_appbuilder(config=None, testing=False):
_, appbuilder = create_app(config, testing)
return appbuilder
|
33015
|
import argparse
import json
from easydict import EasyDict
def get_args():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-c', '--config',
metavar='C',
default=None,
help='The Configuration file')
argparser.add_argument(
'-i', '--id',
metavar='I',
default='',
help='The commit id)')
argparser.add_argument(
'-t', '--ts',
metavar='T',
default='',
help='The time stamp)')
argparser.add_argument(
'-d', '--dir',
metavar='D',
default='',
help='The output directory)')
args = argparser.parse_args()
return args
def get_config_from_json(json_file):
# parse the configurations from the configs json file provided
with open(json_file, 'r') as config_file:
config_dict = json.load(config_file)
# convert the dictionary to a namespace using bunch lib
config = EasyDict(config_dict)
return config
def process_config(args):
config = get_config_from_json(args.config)
config.commit_id = args.id
config.time_stamp = args.ts
config.directory = args.dir
return config
if __name__ == '__main__':
config = get_config_from_json('../configs/MUTAG.json')
sub_configurations = config.configurations
print(sub_configurations['pooling'])
|
33017
|
from typing import List
from typing import Optional
from pydantic import BaseModel
class WebFingerRequest(BaseModel):
rel: Optional[str] = 'http://openid.net/specs/connect/1.0/issuer'
resource: str
class AuthorizationRequest(BaseModel):
acr_values: Optional[List[str]]
claims: Optional[dict]
claims_locales: Optional[List[str]]
client_id: str
display: Optional[str]
id_token_hint: Optional[str]
login_hint: Optional[str]
max_age: Optional[int]
nonce: Optional[str]
prompt: Optional[List[str]]
redirect_uri: str
registration: Optional[dict]
request: Optional[str]
request_uri: Optional[str]
response_mode: Optional[str]
response_type: List[str]
scope: List[str]
state: Optional[str]
ui_locales: Optional[List[str]]
|
33029
|
from . import configure, core, draw, io, interp, retrieve, qc
__all__ = ["configure", "core", "draw", "io", "interp", "qc", "retrieve"]
|
33074
|
from avatar2 import *
import sys
import os
import logging
import serial
import time
import argparse
import pyudev
import struct
import ctypes
from random import randint
# For profiling
import pstats
logging.basicConfig(filename='/tmp/inception-tests.log', level=logging.INFO)
# ****************************************************************************
def single_step(target, nb_test):
print("[*] Single step target %d times" % nb_test)
for i in range(nb_test):
pc = target.protocols.execution.read_pc()
print(pc)
target.step()
print('stepped')
next_pc = target.protocols.execution.read_pc()
print(next_pc)
# ****************************************************************************
def read_full_mem(target, nb_test, raw=True, summary=True):
print(" - Read the full memory")
nb_test = 1
average_read = 0
for i in range(nb_test):
t0 = time.time()
target.read_memory(ram.address, 1, ram.size, raw=raw)
t1 = time.time()
average_read += t1 - t0
if summary:
average_read = average_read / nb_test
speed_read = ram.size / average_read / 1024
print(" -> On average raw read of %s bytes takes %.2f sec, speed: %.2f KB/sec" % (ram.size, average_read, speed_read))
# ****************************************************************************
def write_full_mem(target, nb_test, raw=True, summary=True):
print(" - Write the full memory")
nb_test = 1
average_write = 0
buf = ctypes.create_string_buffer(ram.size)
for i in range(int(ram.size / 4)):
struct.pack_into(">I", buf, i * 4, randint(0, 0xffffffff))
for i in range(nb_test):
t0 = time.time()
target.write_memory(ram.address, 1, buf, raw=raw)
t1 = time.time()
average_write += t1 - t0
if summary:
average_write = average_write / nb_test
speed_write = ram.size / average_write / 1024
print(" -> On average raw write of %s bytes takes %.2f sec, speed: %.2f KB/sec" % (ram.size, average_write, speed_write))
# ****************************************************************************
def read_write_full_mem(target, nb_test, raw=True, summary=True):
print(" - Read and write the full memory")
reads = []
average_read_write = 0
for i in range(nb_test):
if raw:
t0 = time.time()
reads.append(target.read_memory(ram.address, 1, ram.size, raw=raw))
target.write_memory(ram.address, 1, reads[i], raw=raw)
t1 = time.time()
else:
t0 = time.time()
reads.append(target.read_memory(ram.address, 1, ram.size, raw=raw))
target.write_memory(ram.address, 1, reads[i], len(reads[i]), raw=raw)
t1 = time.time()
average_read_write += t1 - t0
if summary:
average_read_write = average_read_write / nb_test
speed_read_write = ram.size / average_read_write / 1024
print(" -> On average raw read&write of %s bytes takes %.2f sec, speed: %.2f KB/sec" % (ram.size, average_read_write, speed_read_write))
# Verify all reads are identical
for i in range(len(reads) - 1):
assert(reads[i] == reads[i+1])
#print("[!] Multiple reads produce different values !")
# ****************************************************************************
def random_read_write(target, nb_test, raw=True):
print(" - Random read / writes of random size in the ram")
for i in range(0, nb_test):
size = randint(0, int(ram.size / 8)) * 8
#size = 2**4
# Reset the board and wait to reach the breakpoint
target.reset()
target.wait()
if raw:
m1 = ctypes.create_string_buffer(size)
for j in range(int(size / 4)):
struct.pack_into(">I", m1, j * 4, randint(0, 0xFFFFFFFF))
target.write_memory(ram.address, 1, m1, raw=True)
m2 = target.read_memory(ram.address, 1, size, raw=True)
n1, n2 = ([] for i in range(2))
for j in range(int(size / 4)):
n1.append(struct.unpack_from(">I", m1, j)[0])
n2.append(struct.unpack_from(">I", m2, j)[0])
assert(n1 == n2)
#print("i=%s m1: %s m2: %s" % (i, m1.raw, m2))
#print("[!] Multiple random reads produce different values !")
else:
m1 = []
for j in range(int(size / 4)):
m1.append(randint(0, 0xFFFFFFFF))
target.write_memory(ram.address, 1, m1, size, raw=False)
m2 = target.read_memory(ram.address, 1, size, raw=False)
for j in range(int(size / 4)):
assert(m1[j] == m2[j])
#print("[!] Multiple random reads produce different values !")
#print("i=%s j=%s m1[j]: %s m2[j]: %s" % (i, j, m1[j], m2[j]))
# ****************************************************************************
def random_4bytes_read_write(target, nb_test):
print(" - Random read / writes of 4 bytes in the ram")
for i in range(nb_test):
written_word = randint(0, 0xFFFFFFFF)
address = randint(ram.address, ram.address + ram.size - 4)
target.write_memory(address, 4, written_word, 1, raw=False)
read_word = target.read_memory(address, 4, 1, raw=False)
assert(written_word == read_word)
# ****************************************************************************
def read_write_registers(target, nb_test):
print(" - Read / write registers")
regs = ['R0', 'R1', 'R2', 'R3', 'R4', 'R5', 'R6', 'R7', 'R8', 'R9', 'R10',
'R11', 'R12', 'SP', 'LR', 'PC', 'CPSR']
for i in range(nb_test):
for j in range(17):
written_reg = randint(0, 0xFFFFFFFF)
saved_reg = target.read_register(regs[j])
target.write_register(regs[j], written_reg)
read_reg = target.read_register(regs[j])
'''
if read_reg != written_reg:
print(i)
print(j)
print(hex(read_reg))
print(hex(written_reg))
'''
target.write_register(regs[j], saved_reg)
# ****************************************************************************
def transfer_state(av, target_from, target_to, nb_test, summary=True):
print(" - Transfer state")
average = 0
for i in range(nb_test):
t0 = time.time()
av.transfer_state(target_from, target_to, synced_ranges=[ram])
t1 = time.time()
average += t1 - t0
if summary:
average = average / nb_test
speed = ram.size / average / 1024
print(" -> On average transfer state from %s to %s of %s bytes takes %.2f sec, speed: %.2f KB/sec" % (target_from.name, target_to.name, ram.size, average, speed))
if __name__ == '__main__':
# Number each test is repeated
n = 2
avatar = Avatar(arch=ARMV7M, output_directory='/tmp/inception-tests')
nucleo = avatar.add_target(InceptionTarget, name='nucleo')
dum = avatar.add_target(DummyTarget, name='dum')
#qemu = avatar.add_target(QemuTarget, gdb_port=1236)
# Memory mapping of NUCLEO-L152RE
rom = avatar.add_memory_range(0x08000000, 0x1000000, 'rom',
file=firmware)
ram = avatar.add_memory_range(0x20000000, 0x14000, 'ram')
mmio = avatar.add_memory_range(0x40000000, 0x1000000,
forwarded=True, forwarded_to=nucleo)
ram = avatar.get_memory_range(0x20000000)
avatar.init_targets()
print("Targets initialized")
nucleo.reset()
nucleo.cont()
nucleo.stop()
print("Targets stopped, start tests for n = %s" % n)
print("[*] Raw read / writes tests")
read_full_mem(nucleo, n)
write_full_mem(nucleo, n)
read_write_full_mem(nucleo, n)
random_read_write(nucleo, n)
print("[*] !raw read / writes tests")
read_full_mem(nucleo, n, raw=False, summary=False)
write_full_mem(nucleo, n, raw=False, summary=False)
read_write_full_mem(nucleo, n, raw=False, summary=False)
random_read_write(nucleo, n, raw=False)
random_4bytes_read_write(nucleo, 100 * n)
print("[*] Read / Write registers")
read_write_registers(nucleo, n)
print("[*] Transfer state to dummy target")
transfer_state(avatar, nucleo, dum, n)
#Stop all threads for the profiler
print("[*] Test completed")
avatar.stop()
|
33103
|
import torch
def magic_box(x):
"""DiCE operation that saves computation graph inside tensor
See ``Implementation of DiCE'' section in the DiCE Paper for details
Args:
x (tensor): Input tensor
Returns:
1 (tensor): Tensor that has computation graph saved
References:
https://github.com/alshedivat/lola/blob/master/lola_dice/rpg.py
https://github.com/alexis-jacq/LOLA_DiCE/blob/master/ipd_DiCE.py
"""
return torch.exp(x - x.detach())
def get_dice_loss(logprobs, reward, value, args, i_agent, is_train):
"""Compute DiCE loss
In our code, we use DiCE in the inner loop to be able to keep the dependency in the
adapted parameters. This is required in order to compute the opponent shaping term.
Args:
logprobs (list): Contains log probability of all agents
reward (list): Contains rewards across trajectories for specific agent
value (tensor): Contains value for advantage computed via linear baseline
args (argparse): Python argparse that contains arguments
i_agent (int): Agent to compute DiCE loss for
is_train (bool): Flag to identify whether in meta-train or not
Returns:
dice loss (tensor): DiCE loss with baseline reduction
References:
https://github.com/alshedivat/lola/blob/master/lola_dice/rpg.py
https://github.com/alexis-jacq/LOLA_DiCE/blob/master/ipd_DiCE.py
"""
# Get discounted_reward
reward = torch.stack(reward, dim=1)
cum_discount = torch.cumprod(args.discount * torch.ones(*reward.size()), dim=1) / args.discount
discounted_reward = reward * cum_discount
# Compute stochastic nodes involved in reward dependencies
if args.opponent_shaping and is_train:
logprob_sum, stochastic_nodes = 0., 0.
for logprob in logprobs:
logprob = torch.stack(logprob, dim=1)
logprob_sum += logprob
stochastic_nodes += logprob
dependencies = torch.cumsum(logprob_sum, dim=1)
else:
logprob = torch.stack(logprobs[i_agent], dim=1)
dependencies = torch.cumsum(logprob, dim=1)
stochastic_nodes = logprob
# Get DiCE loss
dice_loss = torch.mean(torch.sum(magic_box(dependencies) * discounted_reward, dim=1))
# Apply variance_reduction if value is provided
baseline_term = 0.
if value is not None:
discounted_value = value.detach() * cum_discount
baseline_term = torch.mean(torch.sum((1 - magic_box(stochastic_nodes)) * discounted_value, dim=1))
return -(dice_loss + baseline_term)
|
33140
|
from openem.models import ImageModel
from openem.models import Preprocessor
import cv2
import numpy as np
import tensorflow as tf
from collections import namedtuple
import csv
Detection=namedtuple('Detection', ['location',
'confidence',
'species',
'frame',
'video_id'])
# Bring in SSD detector to top-level
from openem.Detect.SSD import SSDDetector
class IO:
def from_csv(filepath_like):
detections=[]
with open(filepath_like, 'r') as csv_file:
reader = csv.DictReader(csv_file)
last_idx = -1
for row in reader:
location=np.array([float(row['x']),
float(row['y']),
float(row['w']),
float(row['h'])])
item = Detection(location=location,
confidence=float(row['detection_conf']),
species=int(float(row['detection_species'])),
frame=int(row['frame']),
video_id=row['video_id'])
frame_num = int(float(row['frame']))
if last_idx == frame_num:
detections[last_idx].append(item)
else:
# Add empties
for _ in range(frame_num-1-last_idx):
detections.append([])
detections.append([item])
last_idx = frame_num
return detections
|
33144
|
import pytest
grblas = pytest.importorskip("grblas")
from metagraph.tests.util import default_plugin_resolver
from . import RoundTripper
from metagraph.plugins.numpy.types import NumpyMatrixType
from metagraph.plugins.graphblas.types import GrblasMatrixType
import numpy as np
def test_matrix_roundtrip_dense_square(default_plugin_resolver):
rt = RoundTripper(default_plugin_resolver)
mat = np.array([[1.1, 2.2, 3.3], [3.3, 3.3, 9.9], [3.3, 0.0, -3.3]])
rt.verify_round_trip(mat)
rt.verify_round_trip(mat.astype(int))
rt.verify_round_trip(mat.astype(bool))
def test_matrix_roundtrip_dense_rect(default_plugin_resolver):
rt = RoundTripper(default_plugin_resolver)
mat = np.array(
[[1.1, 2.2, 3.3], [3.3, 3.3, 9.9], [3.3, 0.0, -3.3], [-1.1, 2.7, 3.3]]
)
rt.verify_round_trip(mat)
rt.verify_round_trip(mat.astype(int))
rt.verify_round_trip(mat.astype(bool))
def test_numpy_2_grblas(default_plugin_resolver):
dpr = default_plugin_resolver
x = np.array([[1, 2, 3], [3, 3, 9], [3, 0, 3], [4, 2, 2]])
assert x.shape == (4, 3)
# Convert numpy -> grblas.Matrix
intermediate = grblas.Matrix.from_values(
[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2],
[1, 2, 3, 3, 3, 9, 3, 0, 3, 4, 2, 2],
nrows=4,
ncols=3,
dtype=grblas.dtypes.INT64,
)
y = dpr.translate(x, grblas.Matrix)
dpr.assert_equal(y, intermediate)
# Convert numpy <- grblas.Matrix
x2 = dpr.translate(y, NumpyMatrixType)
dpr.assert_equal(x, x2)
|
33166
|
import os
import sys
import torch
from torch import nn
from torch.nn import functional as F, init
from src.utils import bernoulli_log_pdf
from src.objectives.elbo import \
log_bernoulli_marginal_estimate_sets
class Statistician(nn.Module):
def __init__(self, c_dim, z_dim, hidden_dim_statistic=3, hidden_dim=400):
super(Statistician, self).__init__()
self.c_dim = c_dim
self.z_dim = z_dim
self.hidden_dim_statistic = hidden_dim_statistic
self.hidden_dim = hidden_dim
self.input_dim = 784
self.statistic_net = LinearStatisticNetwork(
self.input_dim, self.c_dim, hidden_dim=self.hidden_dim_statistic)
self.inference_net = LinearInferenceNetwork(
self.input_dim, self.c_dim, self.z_dim,
hidden_dim=self.hidden_dim)
self.latent_decoder = LinearLatentDecoder(
self.input_dim, self.c_dim, self.z_dim,
hidden_dim=self.hidden_dim)
self.observation_decoder = LinearObservationDecoder(
self.input_dim, self.c_dim, self.z_dim,
hidden_dim=self.hidden_dim)
# initialize weights
self.apply(self.weights_init)
def forward(self, x):
batch_size, n_samples = x.size(0), x.size(1)
x = x.view(batch_size, n_samples, self.input_dim)
c_mean, c_logvar = self.statistic_net(x)
c = self.reparameterize_gaussian(c_mean, c_logvar)
qz_mu, qz_logvar = self.inference_net(x, c)
qz_mu = qz_mu.view(batch_size, -1, self.z_dim)
qz_logvar = qz_logvar.view(batch_size, -1, self.z_dim)
z = self.reparameterize_gaussian(qz_mu, qz_logvar)
qz_params = [qz_mu, qz_logvar]
cz_mu, cz_logvar = self.latent_decoder(c)
pz_params = [cz_mu, cz_logvar]
x_mu = self.observation_decoder(z, c)
outputs = (
(c_mean, c_logvar),
(qz_params, pz_params),
(x, x_mu),
)
return outputs
def bernoulli_elbo_loss_sets(self, outputs, reduce=True):
c_outputs, z_outputs, x_outputs = outputs
# 1. reconstruction loss
x, x_mu = x_outputs
recon_loss = bernoulli_log_pdf(x, x_mu) # (n_datasets, batch_size)
# a) Context divergence: this is the positive D_KL
c_mu, c_logvar = c_outputs
kl_c = -0.5 * (1 + c_logvar - c_mu.pow(2) - c_logvar.exp())
kl_c = torch.sum(kl_c, dim=-1) # (n_datasets)
# b) Latent divergence: this is also the positive D_KL
qz_params, pz_params = z_outputs
# this is kl(q_z||p_z)
p_mu, p_logvar = pz_params
q_mu, q_logvar = qz_params
# the dimensions won't line up, so you'll need to broadcast!
p_mu = p_mu.unsqueeze(1).expand_as(q_mu)
p_logvar = p_logvar.unsqueeze(1).expand_as(q_logvar)
kl_z = 0.5 * (p_logvar - q_logvar + ((q_mu - p_mu)**2 + q_logvar.exp())/p_logvar.exp() - 1)
kl_z = torch.sum(kl_z, dim=-1) # (n_datasets, batch_size)
# THESE ARE ALSO UNNORMALIZED!!!
ELBO = -recon_loss + kl_z # these will both be (n_datasets, batch_size)
ELBO = ELBO.sum(-1) / x.size()[1] # averaging over (batch_size == self.sample_size)
ELBO = ELBO + kl_c # now this is (n_datasets,)
if reduce:
return torch.mean(ELBO) # averaging over (n_datasets)
else:
return ELBO # (n_datasets)
def estimate_marginal(self, x, n_samples=100):
# need to compute a bunch of outputs
with torch.no_grad():
elbo_list = []
for i in range(n_samples):
outputs = self.forward(x)
elbo = self.bernoulli_elbo_loss_sets(outputs, reduce=False)
elbo_list.append(elbo)
# bernoulli decoder
log_p_x = log_bernoulli_marginal_estimate_sets(elbo_list)
return log_p_x
@staticmethod
def reparameterize_gaussian(mean, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
@staticmethod
def weights_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.xavier_normal(m.weight.data, gain=init.calculate_gain('relu'))
init.constant(m.bias.data, 0)
elif isinstance(m, nn.BatchNorm1d):
pass
def extract_codes(self, x):
batch_size, n_samples = x.size(0), x.size(1)
x = x.view(batch_size, n_samples, self.input_dim)
c_mean, c_logvar = self.statistic_net(x)
c = self.reparameterize_gaussian(c_mean, c_logvar)
z_mu, _ = self.inference_net(x, c)
return z_mu
class LinearStatisticNetwork(nn.Module):
def __init__(self, n_features, c_dim, hidden_dim=128):
super(LinearStatisticNetwork, self).__init__()
self.n_features = n_features
self.hidden_dim = hidden_dim
self.c_dim = c_dim
self.prepool = PrePool(self.n_features, self.hidden_dim)
self.postpool = PostPool(self.hidden_dim, self.c_dim)
def forward(self, h):
batch_size = h.size(0)
e = self.prepool(h)
e = e.view(batch_size, -1, self.hidden_dim)
e = self.pool(e)
e = self.postpool(e)
return e
def pool(self, e):
"""
average pooling WITHIN each dataset!
"""
e = e.mean(1).view(-1, self.hidden_dim)
return e
class LinearInferenceNetwork(nn.Module):
def __init__(self, n_features, c_dim, z_dim, hidden_dim=128):
super(LinearInferenceNetwork, self).__init__()
self.n_features = n_features
self.hidden_dim = hidden_dim
self.c_dim = c_dim
self.z_dim = z_dim
self.fc_h = nn.Linear(self.n_features, self.hidden_dim)
self.fc_c = nn.Linear(self.c_dim, self.hidden_dim)
self.fc1 = nn.Linear(2 * self.hidden_dim, self.hidden_dim)
self.fc_params = nn.Linear(self.hidden_dim, 2 * self.z_dim)
def forward(self, h, c):
batch_size = h.size(0)
eh = h.view(-1, self.n_features) # embed h
eh = self.fc_h(eh)
eh = eh.view(batch_size, -1, self.hidden_dim)
ec = self.fc_c(c)
ec = ec.view(batch_size, -1, self.hidden_dim).expand_as(eh)
e = torch.cat([eh, ec], dim=2)
e = F.elu(e.view(-1, 2 * self.hidden_dim))
e = F.elu(self.fc1(e))
e = self.fc_params(e)
mean, logvar = torch.chunk(e, 2, dim=1)
return mean, logvar
class LinearLatentDecoder(nn.Module):
def __init__(self, n_features, c_dim, z_dim, hidden_dim=128):
super(LinearLatentDecoder, self).__init__()
self.n_features = n_features
self.hidden_dim = hidden_dim
self.c_dim = c_dim
self.z_dim = z_dim
self.fc_c = nn.Linear(self.c_dim, self.hidden_dim)
self.fc1 = nn.Linear(self.hidden_dim, self.hidden_dim)
self.fc_params = nn.Linear(self.hidden_dim, 2 * self.z_dim)
def forward(self, c):
batch_size = c.size(0)
ec = self.fc_c(c)
ec = ec.view(batch_size, -1, self.hidden_dim)
e = F.elu(ec.view(-1, self.hidden_dim))
e = F.elu(self.fc1(e))
e = self.fc_params(e)
mean, logvar = torch.chunk(e, 2, dim=1)
return mean, logvar
class LinearObservationDecoder(nn.Module):
def __init__(self, n_features, c_dim, z_dim, hidden_dim=128):
super(LinearObservationDecoder, self).__init__()
self.n_features = n_features
self.hidden_dim = hidden_dim
self.c_dim = c_dim
self.z_dim = z_dim
self.fc_z = nn.Linear(self.z_dim, self.hidden_dim)
self.fc_c = nn.Linear(self.c_dim, self.hidden_dim)
self.fc_initial = nn.Linear(2 * self.hidden_dim, 256 * 4 * 4)
self.fc3 = nn.Linear(256 * 4 * 4, 784)
def forward(self, z, c):
batch_size = z.size(0)
ez = self.fc_z(z)
ez = ez.view(batch_size, -1, self.hidden_dim)
ec = self.fc_c(c)
ec = ec.view(batch_size, -1, self.hidden_dim).expand_as(ez)
e = torch.cat([ez, ec], dim=2)
e = F.elu(e)
e = e.view(-1, 2 * self.hidden_dim)
e = F.elu(self.fc_initial(e))
e = self.fc3(e)
e = e.view(batch_size, -1, 784)
e = torch.sigmoid(e)
return e
class PrePool(nn.Module):
def __init__(self, n_features, hidden_dim):
super(PrePool, self).__init__()
self.n_features = n_features
self.hidden_dim = hidden_dim
# modules: 1 fc layer
self.fc = nn.Linear(self.n_features, self.hidden_dim)
def forward(self, h):
# reshape and affine
e = h.view(-1, self.n_features) # batch_size * sample_size
e = F.elu(self.fc(e))
return e
class PostPool(nn.Module):
def __init__(self, hidden_dim, c_dim):
super(PostPool, self).__init__()
self.hidden_dim = hidden_dim
self.c_dim = c_dim
self.fc_params = nn.Linear(self.hidden_dim, 2 * self.c_dim)
def forward(self, e):
e = self.fc_params(e)
mean, logvar = torch.chunk(e, 2, dim=1)
return mean, logvar
|
33192
|
import numpy as np
import pandas as pd
from pandas.util import testing as pdt
import pytest
from spandex import TableFrame
from spandex.io import db_to_df, df_to_db
def test_tableframe(loader):
table = loader.tables.sample.hf_bg
for cache in [False, True]:
tf = TableFrame(table, index_col='gid', cache=cache)
assert isinstance(tf.index, pd.Index)
num_rows = len(tf)
assert num_rows > 1
assert set(tf.columns) == set(table.__table__.columns.keys())
for column_name in tf.columns:
if column_name != 'gid':
if cache:
assert column_name not in tf._cached.keys()
assert isinstance(tf[column_name], pd.Series)
if cache:
assert column_name in tf._cached.keys()
assert isinstance(getattr(tf, column_name), pd.Series)
df = tf[['objectid']]
assert isinstance(df, pd.DataFrame)
assert len(df) == num_rows
assert set(df.columns) == set(['objectid'])
assert np.issubdtype(df.objectid.dtype, int)
def test_sim_export(loader):
# Try importing the UrbanSim simulation framework, otherwise skip test.
sim = pytest.importorskip('urbansim.sim.simulation')
# Register input parcels table.
parcels = loader.tables.sample.heather_farms
parcels_in = TableFrame(parcels, index_col='gid')
sim.add_table('parcels_in', parcels_in, copy_col=False)
# Register output parcels table.
@sim.table()
def parcels_out(parcels_in):
return pd.DataFrame(index=parcels_in.parcel_id)
# Specify default table for output columns as decorator.
out = sim.column('parcels_out')
# Specify some output columns.
@out
def apn(apn='parcels_in.puid'):
return apn.groupby(parcels_in.parcel_id).first().astype(str)
@out
def county_id():
return 13
@out
def area(acr='parcels_in.parcel_acr'):
return 4047. * acr.groupby(parcels_in.parcel_id).median()
# Register model to export output table to database.
@sim.model()
def export(parcels_out):
schema = loader.tables.sample
df_to_db(parcels_out.to_frame(), 'parcels_out', schema=schema)
# Inspect output table.
column_names = ['apn', 'county_id', 'area']
parcels_out_df1 = sim.get_table('parcels_out').to_frame()
assert set(parcels_out_df1.columns) == set(column_names)
assert parcels_out_df1.county_id.unique() == [13]
# Export table to database and import back to compare.
sim.run(['export'])
parcels_out_table = loader.tables.sample.parcels_out
parcels_out_df2 = db_to_df(parcels_out_table, index_col='parcel_id')
pdt.assert_frame_equal(parcels_out_df1[column_names],
parcels_out_df2[column_names])
|
33200
|
print("linear search")
si=int(input("\nEnter the size:"))
data=list()
for i in range(0,si):
n=int(input())
data.append(n)
cot=0
print("\nEnter the number you want to search:")
val=int(input())
for i in range(0,len(data)):
if(data[i]==val):
break;
else:
cot=cot+1
print(cot)#linear search result=4
#binary search
print("\nBinary Search")
cot=0
beg=0
end=len(data)
mid=(beg+end)/2
mid=int(mid)
while beg<end and val!=data[mid]:
if val>data[mid]:
beg=mid+1
else:
end=mid-1
mid=int((beg+end)/2)
cot=cot+1
if 14==data[mid]:
print("\nDATA FOUND")
print(cot)
|
33206
|
from dataclasses import dataclass
from app import crud
from app.schemas import UserCreate, SlackEventHook
from app.settings import REACTION_LIST, DAY_MAX_REACTION
# about reaction
REMOVED_REACTION = 'reaction_removed'
ADDED_REACTION = 'reaction_added'
APP_MENTION_REACTION = 'app_mention'
# about command
CREATE_USER_COMMAND = 'create_user'
@dataclass
class EventDto:
type: str # ex: reaction_added
user: str # 리액션을 한 유저(slack_id)
item: dict # type, channel, ts
reaction: str # 리액션(이모지)
item_user: str # 리액션을 받은 유저(slack_id)
event_ts: str
text: str # app mention text
def __init__(self, event_data):
self.type = event_data.get('type')
self.user = event_data.get('user')
self.item = event_data.get('item')
self.reaction = event_data.get('reaction')
self.item_user = event_data.get('item_user')
self.event_ts = event_data.get('event_ts')
self.text = event_data.get('text')
@dataclass
class AddUserCommandDto:
name: str
slack_id: str
avatar_url: str
def __init__(self, name: str, slack_id: str, avatar_url: str):
self.name = name.strip('name=')
self.slack_id = slack_id.strip('slack_id=')
self.avatar_url = avatar_url.strip('avatar_url=')
class SlackService(object):
def check_challenge(self, event: SlackEventHook, db) -> dict:
# slack Enable Events
if 'challenge' in event:
return {"challenge": event['challenge']}
# check slack event
if "event" in event:
event_dto = EventDto(event['event'])
if event_dto.type in [ADDED_REACTION, REMOVED_REACTION]:
# 다른 사람에게만 이모지 줄 수 있음
if event_dto.item_user != event_dto.user:
self.assign_emoji(event_dto, db)
elif event_dto.type == APP_MENTION_REACTION:
self.manage_app_mention(event_dto, db)
return {}
def assign_emoji(self, event: EventDto, db):
"""
reaction process
"""
if event.reaction not in REACTION_LIST:
return
if event.type == ADDED_REACTION:
user = crud.get_user(db, event.user)
# 멤버에게 줄 수 있는 나의 reaction 개수 체크
if user.my_reaction > 0:
crud.update_my_reaction(db, user, False)
crud.update_added_reaction(db=db, type=event.reaction, item_user=event.item_user,
user=event.user, is_increase=True)
elif event.type == REMOVED_REACTION:
user = crud.get_user(db, event.user)
# 멤버에게 전달한 reaction을 삭제하는 경우 (이미 하루 최대의 reaction 개수인 경우 더이상 추가하지 않음)
if user.my_reaction < DAY_MAX_REACTION:
crud.update_my_reaction(db, user, True)
crud.update_added_reaction(db=db, type=event.reaction, item_user=event.item_user,
user=event.user, is_increase=False)
def manage_app_mention(self, event: EventDto, db):
"""
명령어를 분기 처리하는 함수
ex: <@ABCDEFG> --create_user --name=JAY --slack_id=ABCDEFG --avatar_url=https://blablac.com/abcd
"""
event_command = event.text.split('--')
event_command.pop(0) # 첫번째 값은 user slack_id
if not event_command:
return
_type = event_command.pop(0).strip(' ')
if _type == CREATE_USER_COMMAND:
if len(event_command) == 3:
add_user_cmd_dto = AddUserCommandDto(event_command[0], event_command[1], event_command[2])
self.add_user(add_user_cmd_dto, db)
def add_user(self, add_user_cmd_dto: AddUserCommandDto, db):
"""
user 추가 명령어
"""
db_user = crud.get_user(db, item_user=add_user_cmd_dto.slack_id)
if db_user:
return
user = UserCreate(username=add_user_cmd_dto.name, slack_id=add_user_cmd_dto.slack_id,
using_emoji_count=DAY_MAX_REACTION, get_emoji_count=0,
avatar_url=add_user_cmd_dto.avatar_url)
crud.create_user(db=db, user=user)
|
33249
|
import os
import re
import yaml
for root, dirs, files in os.walk("."):
for file in files:
njk = os.path.join(root, file)
if njk.endswith(".njk"):
with open(njk, "r") as file:
lines = file.read().split("\n")
if not(lines[0].startswith("---")):
continue
end = 1
while not(lines[end].startswith("---")):
end += 1
meta = yaml.safe_load("\n".join(lines[1:end]))
field = "ogDescription"
if not(field in meta) or not("shortTitle" in meta):
continue
meta[field] = (meta["shortTitle"] +
" is a famous and most played DOS game that now is available to play in browser. With virtual" +
" mobile controls you also can play in " + meta["shortTitle"] +
" on mobile. On DOS.Zone " + meta["shortTitle"] + " available to play for free without registration.")
meta = yaml.dump(meta, default_flow_style=False, allow_unicode=True)
lines = [lines[0]] + meta.split("\n") + lines[end:]
with open(njk, "w") as file:
file.write("\n".join(lines))
|
33285
|
import sys
try:
import uos as os
except ImportError:
import os
if not hasattr(os, "unlink"):
print("SKIP")
sys.exit()
# cleanup in case testfile exists
try:
os.unlink("testfile")
except OSError:
pass
try:
f = open("testfile", "r+b")
print("Unexpectedly opened non-existing file")
except OSError:
print("Expected OSError")
pass
f = open("testfile", "w+b")
f.write(b"1234567890")
f.seek(0)
print(f.read())
f.close()
# Open with truncation
f = open("testfile", "w+b")
f.write(b"abcdefg")
f.seek(0)
print(f.read())
f.close()
# Open without truncation
f = open("testfile", "r+b")
f.write(b"1234")
f.seek(0)
print(f.read())
f.close()
# cleanup
try:
os.unlink("testfile")
except OSError:
pass
|
33364
|
def f(x):
if x:
x = 1
else:
x = 'zero'
y = x
return y
f(1)
|
33379
|
import autograd.numpy as anp
import numpy as np
from autograd import value_and_grad
from pymoo.factory import normalize
from pymoo.util.ref_dirs.energy import squared_dist
from pymoo.util.ref_dirs.optimizer import Adam
from pymoo.util.reference_direction import ReferenceDirectionFactory, scale_reference_directions
class LayerwiseRieszEnergyReferenceDirectionFactory(ReferenceDirectionFactory):
def __init__(self,
n_dim,
partitions,
return_as_tuple=False,
n_max_iter=1000,
verbose=False,
X=None,
**kwargs):
super().__init__(n_dim, **kwargs)
self.scalings = None
self.n_max_iter = n_max_iter
self.verbose = verbose
self.return_as_tuple = return_as_tuple
self.X = X
self.partitions = partitions
def _step(self, optimizer, X, scalings):
obj, grad = value_and_grad(calc_potential_energy)(scalings, X)
scalings = optimizer.next(scalings, np.array(grad))
scalings = normalize(scalings, xl=0, xu=scalings.max())
return scalings, obj
def _solve(self, X, scalings):
# initialize the optimizer for the run
optimizer = Adam()
# for each iteration of gradient descent
for i in range(self.n_max_iter):
# execute one optimization step
_scalings, _obj = self._step(optimizer, X, scalings)
# evaluate how much the points have moved
delta = np.abs(_scalings - scalings).sum()
if self.verbose:
print(i, "objective", _obj, "delta", delta)
# if there was only a little delta during the last iteration -> terminate
if delta < 1e-5:
scalings = _scalings
break
# otherwise use the new points for the next iteration
scalings = _scalings
self.scalings = scalings
return get_points(X, scalings)
def do(self):
X = []
scalings = []
for k, p in enumerate(self.partitions):
if p > 1:
val = np.linspace(0, 1, p + 1)[1:-1]
_X = []
for i in range(self.n_dim):
for j in range(i + 1, self.n_dim):
x = np.zeros((len(val), self.n_dim))
x[:, i] = val
x[:, j] = 1 - val
_X.append(x)
X.append(np.row_stack(_X + [np.eye(self.n_dim)]))
elif p == 1:
X.append(np.eye(self.n_dim))
else:
X.append(np.full(self.n_dim, 1 / self.n_dim)[None, :])
scalings.append(1 - k / len(self.partitions))
scalings = np.array(scalings)
X = self._solve(X, scalings)
return X
# ---------------------------------------------------------------------------------------------------------
# Energy Functions
# ---------------------------------------------------------------------------------------------------------
def get_points(X, scalings):
vals = []
for i in range(len(X)):
vals.append(scale_reference_directions(X[i], scalings[i]))
X = anp.row_stack(vals)
return X
def calc_potential_energy(scalings, X):
X = get_points(X, scalings)
i, j = anp.triu_indices(len(X), 1)
D = squared_dist(X, X)[i, j]
if np.any(D < 1e-12):
return np.nan, np.nan
return (1 / D).mean()
|
33406
|
import torch
from torch import Tensor
from torch.utils.data import Dataset
from torchvision import io
from pathlib import Path
from typing import Tuple
from torchvision import transforms as T
class CelebAMaskHQ(Dataset):
CLASSES = [
'background', 'skin', 'nose', 'eye_g', 'l_eye', 'r_eye', 'l_brow', 'r_brow', 'l_ear',
'r_ear', 'mouth', 'u_lip', 'l_lip', 'hair', 'hat', 'ear_r', 'neck_l', 'neck', 'cloth'
]
PALETTE = torch.tensor([
[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, 0, 204], [0, 255, 255], [255, 204, 204], [102, 51, 0], [255, 0, 0],
[102, 204, 0], [255, 255, 0], [0, 0, 153], [0, 0, 204], [255, 51, 153], [0, 204, 204], [0, 51, 0], [255, 153, 51], [0, 204, 0]
])
def __init__(self, root: str, split: str = 'train', transform = None) -> None:
super().__init__()
assert split in ['train', 'val', 'test']
self.root = Path(root)
self.transform = transform
self.n_classes = len(self.CLASSES)
self.ignore_label = 255
self.resize = T.Resize((512, 512))
with open(self.root / f'{split}_list.txt') as f:
self.files = f.read().splitlines()
if not self.files:
raise Exception(f"No images found in {root}")
print(f"Found {len(self.files)} {split} images.")
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, index: int) -> Tuple[Tensor, Tensor]:
img_path = self.root / 'CelebA-HQ-img' / f"{self.files[index]}.jpg"
lbl_path = self.root / 'CelebAMask-HQ-label' / f"{self.files[index]}.png"
image = io.read_image(str(img_path))
image = self.resize(image)
label = io.read_image(str(lbl_path))
if self.transform:
image, label = self.transform(image, label)
return image, label.squeeze().long()
if __name__ == '__main__':
from semseg.utils.visualize import visualize_dataset_sample
visualize_dataset_sample(CelebAMaskHQ, '/home/sithu/datasets/CelebAMask-HQ')
|
33459
|
import _config
import _utils
CONFIG_PATH = "config.toml"
def main():
config = _config.read(CONFIG_PATH)
for path_name in [x.in_ for x in config.directorios]:
_utils.list_jpg_files_in_dir(path_name)
if __name__ == "__main__":
main()
|
33501
|
from pyfasta import Fasta
def writebed(probelist, outbedfile):
'''probe list format:
chr\tstart\tend
'''
outio = open(outbedfile, 'w')
for pbnow in probelist:
print(pbnow, file=outio)
outio.close()
def writefa(genomefile, bedfile, outfile):
fastafile = Fasta(genomefile)
bedio = open(bedfile, 'r')
outio = open(outfile, 'w')
for lin in bedio.readlines():
lin = lin.rstrip()
chrnow, start, end = lin.split('\t')
seqid = '>' + chrnow + ':' + start + '-' + end
nowseq = fastafile[chrnow][int(start):int(end)]
print(seqid, file=outio)
print(nowseq, file=outio)
bedio.close()
outio.close()
# return True
|
33503
|
import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
from action_tutorials_interfaces.action import Fibonacci
class FibonacciActionClient(Node):
def __init__(self):
super().__init__('fibonacci_action_client')
self._action_client = ActionClient(self, Fibonacci, 'fibonacci')
def send_goal(self, order):
goal_msg = Fibonacci.Goal()
goal_msg.order = order
self._action_client.wait_for_server()
return self._action_client.send_goal_async(goal_msg)
def main(args=None):
rclpy.init(args=args)
action_client = FibonacciActionClient()
future = action_client.send_goal(10)
rclpy.spin_until_future_complete(action_client, future)
if __name__ == '__main__':
main()
|
33545
|
from winrt.windows.media.control import GlobalSystemMediaTransportControlsSessionManager
from winrt.windows.storage.streams import DataReader, Buffer, InputStreamOptions
async def get_current_session():
"""
current_session.try_play_async()
current_session.try_pause_async()
current_session.try_toggle_play_pause()
current_session.try_change_shuffle_active()
current_session.try_skip_next()
current_session.try_skip_previous()
current_session.try_stop()
"""
sessions = await GlobalSystemMediaTransportControlsSessionManager.request_async()
return sessions.get_current_session()
async def get_media_info():
current_session = await get_current_session()
if current_session:
media_props = await current_session.try_get_media_properties_async()
return {
song_attr: media_props.__getattribute__(song_attr)
for song_attr in dir(media_props)
if song_attr[0] != '_'
}
async def read_stream_into_buffer(thumbnail_ref) -> bytearray:
buffer = Buffer(5000000)
readable_stream = await thumbnail_ref.open_read_async()
readable_stream.read_async(buffer, buffer.capacity, InputStreamOptions.READ_AHEAD)
buffer_reader = DataReader.from_buffer(buffer)
thumbnail_buffer = buffer_reader.read_bytes(buffer.length)
return bytearray(thumbnail_buffer)
|
33564
|
from collections import namedtuple
import json, logging, socket, re, struct, time
from typing import Tuple, Iterator
from urllib.parse import urlparse, parse_qs
from backend import Backend, Change
from protocol import PacketType, recvall, PKT_CHANGE_TYPES, change_from_packet, packet_from_change, send_packet, recv_packet
# Total number of reconnection tries
RECONNECT_TRIES=5
# Delay in seconds between reconnections (initial)
RECONNECT_DELAY=5
# Scale delay factor after each failure
RECONNECT_DELAY_BACKOFF=1.5
HostPortInfo = namedtuple('HostPortInfo', ['host', 'port', 'addrtype'])
SocketURLInfo = namedtuple('SocketURLInfo', ['target', 'proxytype', 'proxytarget'])
# Network address type.
class AddrType:
IPv4 = 0
IPv6 = 1
NAME = 2
# Proxy type. Only SOCKS5 supported at the moment as this is sufficient for Tor.
class ProxyType:
DIRECT = 0
SOCKS5 = 1
def parse_host_port(path: str) -> HostPortInfo:
'''Parse a host:port pair.'''
if path.startswith('['): # bracketed IPv6 address
eidx = path.find(']')
if eidx == -1:
raise ValueError('Unterminated bracketed host address.')
host = path[1:eidx]
addrtype = AddrType.IPv6
eidx += 1
if eidx >= len(path) or path[eidx] != ':':
raise ValueError('Port number missing.')
eidx += 1
else:
eidx = path.find(':')
if eidx == -1:
raise ValueError('Port number missing.')
host = path[0:eidx]
if re.match('\d+\.\d+\.\d+\.\d+$', host): # matches IPv4 address format
addrtype = AddrType.IPv4
else:
addrtype = AddrType.NAME
eidx += 1
try:
port = int(path[eidx:])
except ValueError:
raise ValueError('Invalid port number')
return HostPortInfo(host=host, port=port, addrtype=addrtype)
def parse_socket_url(destination: str) -> SocketURLInfo:
'''Parse a socket: URL to extract the information contained in it.'''
url = urlparse(destination)
if url.scheme != 'socket':
raise ValueError('Scheme for socket backend must be socket:...')
target = parse_host_port(url.path)
proxytype = ProxyType.DIRECT
proxytarget = None
# parse query parameters
# reject unknown parameters (currently all of them)
qs = parse_qs(url.query)
for (key, values) in qs.items():
if key == 'proxy': # proxy=socks5:127.0.0.1:9050
if len(values) != 1:
raise ValueError('Proxy can only have one value')
(ptype, ptarget) = values[0].split(':', 1)
if ptype != 'socks5':
raise ValueError('Unknown proxy type ' + ptype)
proxytype = ProxyType.SOCKS5
proxytarget = parse_host_port(ptarget)
else:
raise ValueError('Unknown query string parameter ' + key)
return SocketURLInfo(target=target, proxytype=proxytype, proxytarget=proxytarget)
class SocketBackend(Backend):
def __init__(self, destination: str, create: bool):
self.version = None
self.prev_version = None
self.destination = destination
self.url = parse_socket_url(destination)
self.connect()
def connect(self):
if self.url.proxytype == ProxyType.DIRECT:
if self.url.target.addrtype == AddrType.IPv6:
self.sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else: # TODO NAME is assumed to be IPv4 for now
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
assert(self.url.proxytype == ProxyType.SOCKS5)
import socks
self.sock = socks.socksocket()
self.sock.set_proxy(socks.SOCKS5, self.url.proxytarget.host, self.url.proxytarget.port)
logging.info('Connecting to {}:{} (addrtype {}, proxytype {}, proxytarget {})...'.format(
self.url.target.host, self.url.target.port, self.url.target.addrtype,
self.url.proxytype, self.url.proxytarget))
self.sock.connect((self.url.target.host, self.url.target.port))
logging.info('Connected to {}'.format(self.destination))
def _send_packet(self, typ: int, payload: bytes) -> None:
send_packet(self.sock, typ, payload)
def _recv_packet(self) -> Tuple[int, bytes]:
return recv_packet(self.sock)
def initialize(self) -> bool:
'''
Initialize socket backend by request current metadata from server.
'''
logging.info('Initializing backend')
self._request_metadata()
logging.info('Initialized SocketBackend: protocol={}, version={}, prev_version={}, version_count={}'.format(
self.protocol, self.version, self.prev_version, self.version_count
))
return True
def _request_metadata(self) -> None:
self._send_packet(PacketType.REQ_METADATA, b'')
(typ, payload) = self._recv_packet()
assert(typ == PacketType.METADATA)
self.protocol, self.version, self.prev_version, self.version_count = struct.unpack("!IIIQ", payload)
def add_change(self, entry: Change) -> bool:
typ, payload = packet_from_change(entry)
base_version = self.version
retry = 0
retry_delay = RECONNECT_DELAY
need_connect = False
while True: # Retry loop
try:
if need_connect:
self.connect()
# Request metadata, to know where we stand
self._request_metadata()
if self.version == entry.version:
# If the current version at the server side matches the version of the
# entry, the packet was succesfully sent and processed and the error
# happened afterward. Nothing left to do.
return True
elif base_version == self.version:
# The other acceptable option is that the current version still matches
# that on the server side. Then we retry.
pass
else:
raise Exception('Unexpected backup version {} after reconnect'.format(self.version))
self._send_packet(typ, payload)
# Wait for change to be acknowledged before continuing.
(typ, _) = self._recv_packet()
assert(typ == PacketType.ACK)
except (BrokenPipeError, OSError):
pass
else:
break
if retry == RECONNECT_TRIES:
logging.error('Connection was lost while sending change (giving up after {} retries)'.format(retry))
raise IOError('Connection was lost while sending change')
retry += 1
logging.warning('Connection was lost while sending change (retry {} of {}, will try again after {} seconds)'.format(retry, RECONNECT_TRIES, retry_delay))
time.sleep(retry_delay)
retry_delay *= RECONNECT_DELAY_BACKOFF
need_connect = True
self.prev_version = self.version
self.version = entry.version
return True
def rewind(self) -> bool:
'''Rewind to previous version.'''
version = struct.pack("!I", self.prev_version)
self._send_packet(PacketType.REWIND, version)
# Wait for change to be acknowledged before continuing.
(typ, _) = self._recv_packet()
assert(typ == PacketType.ACK)
return True
def stream_changes(self) -> Iterator[Change]:
self._send_packet(PacketType.RESTORE, b'')
version = -1
while True:
(typ, payload) = self._recv_packet()
if typ in PKT_CHANGE_TYPES:
change = change_from_packet(typ, payload)
version = change.version
yield change
elif typ == PacketType.DONE:
break
else:
raise ValueError("Unknown entry type {}".format(typ))
if version != self.version:
raise ValueError("Versions do not match up: restored version {}, backend version {}".format(version, self.version))
assert(version == self.version)
def compact(self):
self._send_packet(PacketType.COMPACT, b'')
(typ, payload) = self._recv_packet()
assert(typ == PacketType.COMPACT_RES)
return json.loads(payload.decode())
|
33569
|
from ..factory import Method
class setBotUpdatesStatus(Method):
pending_update_count = None # type: "int32"
error_message = None # type: "string"
|
33589
|
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from argparse import ArgumentParser, Namespace
class CtlCommand(metaclass=ABCMeta):
"""Implements a subcommand of grouper-ctl."""
@staticmethod
@abstractmethod
def add_arguments(parser):
# type: (ArgumentParser) -> None
"""Add the arguments for this command to the provided parser."""
pass
@abstractmethod
def run(self, args):
# type: (Namespace) -> None
"""Run a command with some arguments."""
pass
|
33602
|
import time
from time import sleep
def print_elapsed_time(exit_event):
start_time = time.time()
while True:
if exit_event.is_set():
break
print(f'Running for {round(time.time() - start_time)} s', end='\r')
sleep(1)
|
33609
|
import os
import tensorflow as tf
from BertLibrary.bert_predictor import BertPredictor
from BertLibrary.bert_trainer import BertTrainer
from BertLibrary.bert_evaluator import BertEvaluator
from tensorflow.estimator import Estimator
from tensorflow.estimator import RunConfig
from BertLibrary.bert.run_classifier import *
import BertLibrary.bert.modeling as modeling
import BertLibrary.bert.tokenization as tokenization
class BertModel:
def __init__(self,
model_dir,
ckpt_name,
do_lower_case,
max_seq_len,
batch_size,
labels,
trainable=True,
keep_checkpoint_max=5,
config=None):
self.model_dir = model_dir
self.bert_config, self.vocab_file, \
self.init_checkpoint = self.get_model_configs(model_dir, ckpt_name)
self.do_lower_case = do_lower_case
self.max_seq_len = max_seq_len
self.batch_size = batch_size
self.processer = None
self.keep_checkpoint_max = keep_checkpoint_max
self.labels = labels
self.config = config if config else None
self.predictor = None
self.trainable = trainable
def build(self, model_fn_args, config_args):
config = self.get_config(**config_args)
model_fn = self.get_model_fn(**model_fn_args)
self.estimator = Estimator(
model_fn=model_fn,
config=config,
params={'batch_size': self.batch_size})
self.tokenizer = tokenization.FullTokenizer(
vocab_file=self.vocab_file, do_lower_case=self.do_lower_case)
def get_model_configs(self, base_dir, ckpt_name):
bert_config_file = os.path.join(base_dir, 'bert_config.json')
vocab_file = os.path.join(base_dir, 'vocab.txt')
init_checkpoint = os.path.join(base_dir, ckpt_name)
bert_config = modeling.BertConfig.from_json_file(bert_config_file)
return bert_config, vocab_file, init_checkpoint
def get_config(self, ckpt_output_dir='./output', save_check_steps=1000):
if not self.config:
self.config = tf.ConfigProto(device_count={'GPU': 1})
self.config.gpu_options.allow_growth = True
self.config.gpu_options.per_process_gpu_memory_fraction = 0.5
run_config = RunConfig(
model_dir=ckpt_output_dir,
session_config=self.config,
keep_checkpoint_max=self.keep_checkpoint_max,
save_checkpoints_steps=save_check_steps)
return run_config
def get_predictor(self):
return BertPredictor(self.estimator, self.processer, self.config)
def get_trainer(self):
assert self.trainable, 'This model cannot be trained'
return BertTrainer(self)
def get_evaluator(self, iter_steps=1000):
return BertEvaluator(self, iter_steps=iter_steps)
def get_model_fn(self, *args):
return NotImplementedError()
|
33631
|
import itertools
import logging
import netCDF4
import numpy
from .. import core
from ..constants import masked as cfdm_masked
from ..decorators import (
_inplace_enabled,
_inplace_enabled_define_and_cleanup,
_manage_log_level_via_verbosity,
)
from ..functions import abspath
from ..mixin.container import Container
from ..mixin.netcdf import NetCDFHDF5
from . import NumpyArray, abstract
logger = logging.getLogger(__name__)
class Data(Container, NetCDFHDF5, core.Data):
"""An orthogonal multidimensional array with masking and units.
.. versionadded:: (cfdm) 1.7.0
"""
def __init__(
self,
array=None,
units=None,
calendar=None,
fill_value=None,
source=None,
copy=True,
dtype=None,
mask=None,
_use_array=True,
**kwargs,
):
"""**Initialisation**
:Parameters:
array: data_like, optional
The array of values.
{{data_like}}
Ignored if the *source* parameter is set.
*Parameter example:*
``array=[34.6]``
*Parameter example:*
``array=[[1, 2], [3, 4]]``
*Parameter example:*
``array=numpy.ma.arange(10).reshape(2, 1, 5)``
units: `str`, optional
The physical units of the data. Ignored if the *source*
parameter is set.
The units may also be set after initialisation with the
`set_units` method.
*Parameter example:*
``units='km hr-1'``
*Parameter example:*
``units='days since 2018-12-01'``
calendar: `str`, optional
The calendar for reference time units. Ignored if the
*source* parameter is set.
The calendar may also be set after initialisation with the
`set_calendar` method.
*Parameter example:*
``calendar='360_day'``
fill_value: optional
The fill value of the data. By default, or if set to
`None`, the `numpy` fill value appropriate to the array's
data type will be used (see
`numpy.ma.default_fill_value`). Ignored if the *source*
parameter is set.
The fill value may also be set after initialisation with
the `set_fill_value` method.
*Parameter example:*
``fill_value=-999.``
dtype: data-type, optional
The desired data-type for the data. By default the
data-type will be inferred form the *array* parameter.
The data-type may also be set after initialisation
with the `dtype` attribute.
*Parameter example:*
``dtype=float``
*Parameter example:*
``dtype='float32'``
*Parameter example:*
``dtype=numpy.dtype('i2')``
mask: data_like, optional
Apply this mask to the data given by the *array*
parameter. By default, or if *mask* is `None`, no mask
is applied. May be any data_like object that
broadcasts to *array*. Masking will be carried out
where mask elements evaluate to `True`.
{{data_like}}
This mask will applied in addition to any mask already
defined by the *array* parameter.
source: optional
Initialise the array, units, calendar and fill value
from those of *source*.
{{init source}}
copy: `bool`, optional
If False then do not deep copy input parameters prior
to initialisation. By default arguments are deep
copied.
kwargs: ignored
Not used. Present to facilitate subclassing.
"""
if dtype is not None:
if isinstance(array, abstract.Array):
array = array.array
elif not isinstance(array, numpy.ndarray):
array = numpy.asanyarray(array)
array = array.astype(dtype)
array = NumpyArray(array)
if mask is not None:
if isinstance(array, abstract.Array):
array = array.array
elif not isinstance(array, numpy.ndarray):
array = numpy.asanyarray(array)
array = numpy.ma.array(array, mask=mask)
array = NumpyArray(array)
super().__init__(
array=array,
units=units,
calendar=calendar,
fill_value=fill_value,
source=source,
copy=copy,
_use_array=_use_array,
)
self._initialise_netcdf(source)
def __array__(self, *dtype):
"""The numpy array interface.
.. versionadded:: (cfdm) 1.7.0
:Parameters:
dtype: optional
Typecode or data-type to which the array is cast.
:Returns:
`numpy.ndarray`
An independent numpy array of the data.
**Examples:**
>>> d = {{package}}.{{class}}([1, 2, 3])
>>> a = numpy.array(d)
>>> print(type(a))
<class 'numpy.ndarray'>
>>> a[0] = -99
>>> d
<{{repr}}{{class}}(3): [1, 2, 3]>
>>> b = numpy.array(d, float)
>>> print(b)
[1. 2. 3.]
"""
array = self.array
if not dtype:
return array
else:
return array.astype(dtype[0], copy=False)
def __repr__(self):
"""Called by the `repr` built-in function.
x.__repr__() <==> repr(x)
"""
try:
shape = self.shape
except AttributeError:
shape = ""
else:
shape = str(shape)
shape = shape.replace(",)", ")")
return f"<{ self.__class__.__name__}{shape}: {self}>"
def __format__(self, format_spec):
"""Interpret format specifiers for size 1 arrays.
**Examples:**
>>> d = {{package}}.{{class}}(9, 'metres')
>>> f"{d}"
'9 metres'
>>> f"{d!s}"
'9 metres'
>>> f"{d!r}"
'<{{repr}}{{class}}(): 9 metres>'
>>> f"{d:.3f}"
'9.000'
>>> d = {{package}}.{{class}}([[9]], 'metres')
>>> f"{d}"
'[[9]] metres'
>>> f"{d!s}"
'[[9]] metres'
>>> f"{d!r}"
'<{{repr}}{{class}}(1, 1): [[9]] metres>'
>>> f"{d:.3f}"
'9.000'
>>> d = {{package}}.{{class}}([9, 10], 'metres')
>>> f"{d}"
>>> '[9, 10] metres'
>>> f"{d!s}"
>>> '[9, 10] metres'
>>> f"{d!r}"
'<{{repr}}{{class}}(2): [9, 10] metres>'
>>> f"{d:.3f}"
Traceback (most recent call last):
...
ValueError: Can't format Data array of size 2 with format code .3f
"""
if not format_spec:
return super().__format__("")
n = self.size
if n == 1:
return "{x:{f}}".format(x=self.first_element(), f=format_spec)
raise ValueError(
f"Can't format Data array of size {n} with "
f"format code {format_spec}"
)
def __getitem__(self, indices):
"""Return a subspace of the data defined by indices.
d.__getitem__(indices) <==> d[indices]
Indexing follows rules that are very similar to the numpy indexing
rules, the only differences being:
* An integer index i takes the i-th element but does not reduce
the rank by one.
* When two or more dimensions' indices are sequences of integers
then these indices work independently along each dimension
(similar to the way vector subscripts work in Fortran). This is
the same behaviour as indexing on a Variable object of the
netCDF4 package.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `__setitem__`, `_parse_indices`
:Returns:
`{{class}}`
The subspace of the data.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(100, 190).reshape(1, 10, 9))
>>> d.shape
(1, 10, 9)
>>> d[:, :, 1].shape
(1, 10, 1)
>>> d[:, 0].shape
(1, 1, 9)
>>> d[..., 6:3:-1, 3:6].shape
(1, 3, 3)
>>> d[0, [2, 9], [4, 8]].shape
(1, 2, 2)
>>> d[0, :, -2].shape
(1, 10, 1)
"""
indices = self._parse_indices(indices)
array = self._get_Array(None)
if array is None:
raise ValueError("No array!!")
array = array[tuple(indices)]
out = self.copy(array=False)
out._set_Array(array, copy=False)
if out.shape != self.shape:
# Delete hdf5 chunksizes
out.nc_clear_hdf5_chunksizes()
return out
def __int__(self):
"""Called by the `int` built-in function.
x.__int__() <==> int(x)
"""
if self.size != 1:
raise TypeError(
"only length-1 arrays can be converted to "
f"Python scalars. Got {self}"
)
return int(self.array)
def __iter__(self):
"""Called when an iterator is required.
x.__iter__() <==> iter(x)
**Examples:**
>>> d = {{package}}.{{class}}([1, 2, 3], 'metres')
>>> for e in d:
... print(repr(e))
...
1
2
3
>>> d = {{package}}.{{class}}([[1, 2], [4, 5]], 'metres')
>>> for e in d:
... print(repr(e))
...
<{{repr}}Data(2): [1, 2] metres>
<{{repr}}Data(2): [4, 5] metres>
>>> d = {{package}}.{{class}}(34, 'metres')
>>> for e in d:
... print(repr(e))
Traceback (most recent call last):
...
TypeError: Iteration over 0-d Data
"""
ndim = self.ndim
if not ndim:
raise TypeError(f"Iteration over 0-d {self.__class__.__name__}")
if ndim == 1:
i = iter(self.array)
while 1:
try:
yield next(i)
except StopIteration:
return
else:
# ndim > 1
for n in range(self.shape[0]):
out = self[n, ...]
out.squeeze(0, inplace=True)
yield out
def __setitem__(self, indices, value):
"""Assign to data elements defined by indices.
d.__setitem__(indices, x) <==> d[indices]=x
Indexing follows rules that are very similar to the numpy indexing
rules, the only differences being:
* An integer index i takes the i-th element but does not reduce
the rank by one.
* When two or more dimensions' indices are sequences of integers
then these indices work independently along each dimension
(similar to the way vector subscripts work in Fortran). This is
the same behaviour as indexing on a Variable object of the
netCDF4 package.
**Broadcasting**
The value, or values, being assigned must be broadcastable to the
shape defined by the indices, using the numpy broadcasting rules.
**Missing data**
Data array elements may be set to missing values by assigning them
to `masked`. Missing values may be unmasked by assigning them to
any other value.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `__getitem__`, `_parse_indices`
:Returns:
`None`
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(100, 190).reshape(1, 10, 9))
>>> d.shape
(1, 10, 9)
>>> d[:, :, 1] = -10
>>> d[:, 0] = range(9)
>>> d[..., 6:3:-1, 3:6] = numpy.arange(-18, -9).reshape(3, 3)
>>> d[0, [2, 9], [4, 8]] = {{package}}.{{class}}([[-2, -3]])
>>> d[0, :, -2] = {{package}}.masked
"""
indices = self._parse_indices(indices)
array = self.array
if value is cfdm_masked or numpy.ma.isMA(value):
# The data is not masked but the assignment is masking
# elements, so turn the non-masked array into a masked
# one.
array = array.view(numpy.ma.MaskedArray)
self._set_subspace(array, indices, numpy.asanyarray(value))
self._set_Array(array, copy=False)
def __str__(self):
"""Called by the `str` built-in function.
x.__str__() <==> str(x)
"""
units = self.get_units(None)
calendar = self.get_calendar(None)
isreftime = False
if units is not None:
if isinstance(units, str):
isreftime = "since" in units
else:
units = "??"
try:
first = self.first_element()
except Exception:
out = ""
if units and not isreftime:
out += f" {units}"
if calendar:
out += f" {calendar}"
return out
size = self.size
shape = self.shape
ndim = self.ndim
open_brackets = "[" * ndim
close_brackets = "]" * ndim
mask = [False, False, False]
if size == 1:
if isreftime:
# Convert reference time to date-time
if first is numpy.ma.masked:
first = 0
mask[0] = True
try:
first = type(self)(
numpy.ma.array(first, mask=mask[0]), units, calendar
).datetime_array
except (ValueError, OverflowError):
first = "??"
out = f"{open_brackets}{first}{close_brackets}"
else:
last = self.last_element()
if isreftime:
if last is numpy.ma.masked:
last = 0
mask[-1] = True
# Convert reference times to date-times
try:
first, last = type(self)(
numpy.ma.array(
[first, last], mask=(mask[0], mask[-1])
),
units,
calendar,
).datetime_array
except (ValueError, OverflowError):
first, last = ("??", "??")
if size > 3:
out = f"{open_brackets}{first}, ..., {last}{close_brackets}"
elif shape[-1:] == (3,):
middle = self.second_element()
if isreftime:
# Convert reference time to date-time
if middle is numpy.ma.masked:
middle = 0
mask[1] = True
try:
middle = type(self)(
numpy.ma.array(middle, mask=mask[1]),
units,
calendar,
).datetime_array
except (ValueError, OverflowError):
middle = "??"
out = (
f"{open_brackets}{first}, {middle}, {last}{close_brackets}"
)
elif size == 3:
out = f"{open_brackets}{first}, ..., {last}{close_brackets}"
else:
out = f"{open_brackets}{first}, {last}{close_brackets}"
if isreftime:
if calendar:
out += f" {calendar}"
elif units:
out += f" {units}"
return out
# ----------------------------------------------------------------
# Private methods
# ----------------------------------------------------------------
def _item(self, index):
"""Return an element of the data as a scalar.
It is assumed, but not checked, that the given index selects
exactly one element.
:Parameters:
index:
:Returns:
The selected element of the data.
**Examples:**
>>> d = {{package}}.{{class}}([[1, 2, 3]], 'km')
>>> x = d._item((0, -1))
>>> print(x, type(x))
3 <class 'int'>
>>> x = d._item((0, 1))
>>> print(x, type(x))
2 <class 'int'>
>>> d[0, 1] = {{package}}.masked
>>> d._item((slice(None), slice(1, 2)))
masked
"""
array = self[index].array
if not numpy.ma.isMA(array):
return array.item()
mask = array.mask
if mask is numpy.ma.nomask or not mask.item():
return array.item()
return numpy.ma.masked
def _parse_axes(self, axes):
"""Parses the data axes and returns valid non-duplicate axes.
:Parameters:
axes: (sequence of) `int`
The axes of the data.
{{axes int examples}}
:Returns:
`tuple`
**Examples:**
>>> d._parse_axes(1)
(1,)
>>> e._parse_axes([0, 2])
(0, 2)
"""
if axes is None:
return axes
ndim = self.ndim
if isinstance(axes, int):
axes = (axes,)
axes2 = []
for axis in axes:
if 0 <= axis < ndim:
axes2.append(axis)
elif -ndim <= axis < 0:
axes2.append(axis + ndim)
else:
raise ValueError(f"Invalid axis: {axis!r}")
# Check for duplicate axes
n = len(axes2)
if n > len(set(axes2)) >= 1:
raise ValueError(f"Duplicate axis: {axes2}")
return tuple(axes2)
def _set_Array(self, array, copy=True):
"""Set the array.
.. seealso:: `_set_CompressedArray`
:Parameters:
array: `numpy` array_like or `Array`, optional
The array to be inserted.
:Returns:
`None`
**Examples:**
>>> d._set_Array(a)
"""
if not isinstance(array, abstract.Array):
if not isinstance(array, numpy.ndarray):
array = numpy.asanyarray(array)
array = NumpyArray(array)
super()._set_Array(array, copy=copy)
def _set_CompressedArray(self, array, copy=True):
"""Set the compressed array.
.. versionadded:: (cfdm) 1.7.11
.. seealso:: `_set_Array`
:Parameters:
array: subclass of `CompressedArray`
The compressed array to be inserted.
:Returns:
`None`
**Examples:**
>>> d._set_CompressedArray(a)
"""
self._set_Array(array, copy=copy)
@classmethod
def _set_subspace(cls, array, indices, value):
"""Set a subspace of the data array defined by indices."""
axes_with_list_indices = [
i for i, x in enumerate(indices) if not isinstance(x, slice)
]
if len(axes_with_list_indices) < 2:
# --------------------------------------------------------
# At most one axis has a list-of-integers index so we can
# do a normal numpy assignment
# --------------------------------------------------------
array[tuple(indices)] = value
else:
# --------------------------------------------------------
# At least two axes have list-of-integers indices so we
# can't do a normal numpy assignment
# --------------------------------------------------------
indices1 = indices[:]
for i, x in enumerate(indices):
if i in axes_with_list_indices:
# This index is a list of integers
y = []
args = [iter(x)] * 2
for start, stop in itertools.zip_longest(*args):
if not stop:
y.append(slice(start, start + 1))
else:
step = stop - start
stop += 1
y.append(slice(start, stop, step))
indices1[i] = y
else:
indices1[i] = (x,)
if numpy.size(value) == 1:
for i in itertools.product(*indices1):
array[i] = value
else:
indices2 = []
ndim_difference = array.ndim - numpy.ndim(value)
for i, n in enumerate(numpy.shape(value)):
if n == 1:
indices2.append((slice(None),))
elif i + ndim_difference in axes_with_list_indices:
y = []
start = 0
while start < n:
stop = start + 2
y.append(slice(start, stop))
start = stop
indices2.append(y)
else:
indices2.append((slice(None),))
for i, j in zip(
itertools.product(*indices1), itertools.product(*indices2)
):
array[i] = value[j]
# ----------------------------------------------------------------
# Attributes
# ----------------------------------------------------------------
@property
def compressed_array(self):
"""Returns an independent numpy array of the compressed data.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `get_compressed_axes`, `get_compressed_dimension`,
`get_compression_type`
:Returns:
`numpy.ndarray`
An independent numpy array of the compressed data.
**Examples:**
>>> a = d.compressed_array
"""
ca = self._get_Array(None)
if not ca.get_compression_type():
raise ValueError("not compressed: can't get compressed array")
return ca.compressed_array
@property
def datetime_array(self):
"""Returns an independent numpy array of datetimes.
Specifically, returns an independent numpy array containing
the date-time objects corresponding to times since a reference
date.
Only applicable for reference time units.
If the calendar has not been set then the CF default calendar of
'standard' (i.e. the mixed Gregorian/Julian calendar as defined by
Udunits) will be used.
Conversions are carried out with the `netCDF4.num2date` function.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `array`, `datetime_as_string`
:Returns:
`numpy.ndarray`
An independent numpy array of the date-time objects.
**Examples:**
>>> d = {{package}}.{{class}}([31, 62, 90], units='days since 2018-12-01')
>>> a = d.datetime_array
>>> print(a)
[cftime.DatetimeGregorian(2019, 1, 1, 0, 0, 0, 0)
cftime.DatetimeGregorian(2019, 2, 1, 0, 0, 0, 0)
cftime.DatetimeGregorian(2019, 3, 1, 0, 0, 0, 0)]
>>> print(a[1])
2019-02-01 00:00:00
>>> d = {{package}}.{{class}}(
... [31, 62, 90], units='days since 2018-12-01', calendar='360_day')
>>> a = d.datetime_array
>>> print(a)
[cftime.Datetime360Day(2019, 1, 2, 0, 0, 0, 0)
cftime.Datetime360Day(2019, 2, 3, 0, 0, 0, 0)
cftime.Datetime360Day(2019, 3, 1, 0, 0, 0, 0)]
>>> print(a[1])
2019-02-03 00:00:00
"""
array = self.array
mask = None
if numpy.ma.isMA(array):
# num2date has issues if the mask is nomask
mask = array.mask
if mask is numpy.ma.nomask or not numpy.ma.is_masked(array):
mask = None
array = array.view(numpy.ndarray)
if mask is not None and not array.ndim:
# Fix until num2date copes with scalar aarrays containing
# missing data
return array
array = netCDF4.num2date(
array,
units=self.get_units(None),
calendar=self.get_calendar("standard"),
only_use_cftime_datetimes=True,
)
if mask is None:
# There is no missing data
array = numpy.array(array, dtype=object)
else:
# There is missing data
array = numpy.ma.masked_where(mask, array)
if not numpy.ndim(array):
array = numpy.ma.masked_all((), dtype=object)
return array
@property
def datetime_as_string(self):
"""Returns an independent numpy array with datetimes as strings.
Specifically, returns an independent numpy array containing
string representations of times since a reference date.
Only applicable for reference time units.
If the calendar has not been set then the CF default calendar of
"standard" (i.e. the mixed Gregorian/Julian calendar as defined by
Udunits) will be used.
Conversions are carried out with the `netCDF4.num2date` function.
.. versionadded:: (cfdm) 1.8.0
.. seealso:: `array`, `datetime_array`
:Returns:
`numpy.ndarray`
An independent numpy array of the date-time strings.
**Examples:**
>>> d = {{package}}.{{class}}([31, 62, 90], units='days since 2018-12-01')
>>> print(d.datetime_as_string)
['2019-01-01 00:00:00' '2019-02-01 00:00:00' '2019-03-01 00:00:00']
>>> d = {{package}}.{{class}}(
... [31, 62, 90], units='days since 2018-12-01', calendar='360_day')
>>> print(d.datetime_as_string)
['2019-01-02 00:00:00' '2019-02-03 00:00:00' '2019-03-01 00:00:00']
"""
return self.datetime_array.astype(str)
@property
def mask(self):
"""The Boolean missing data mask of the data array.
The Boolean mask has True where the data array has missing data
and False otherwise.
:Returns:
`{{class}}`
The Boolean mask as data.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.ma.array(
... [[280.0, -99, -99, -99],
... [281.0, 279.0, 278.0, 279.5]],
... mask=[[0, 1, 1, 1], [0, 0, 0, 0]]
... ))
>>> d
<{{repr}}Data(2, 4): [[280.0, ..., 279.5]]>
>>> print(d.array)
[[280.0 -- -- --]
[281.0 279.0 278.0 279.5]]
>>> d.mask
<{{repr}}Data(2, 4): [[False, ..., False]]>
>>> print(d.mask.array)
[[False True True True]
[False False False False]]
"""
return type(self)(numpy.ma.getmaskarray(self.array))
# ----------------------------------------------------------------
# Methods
# ----------------------------------------------------------------
def any(self):
"""Test whether any data array elements evaluate to True.
Performs a logical or over the data array and returns the
result. Masked values are considered as False during computation.
:Returns:
`bool`
`True` if any data array elements evaluate to True,
otherwise `False`.
**Examples:**
>>> d = {{package}}.{{class}}([[0, 0, 0]])
>>> d.any()
False
>>> d[0, 0] = {{package}}.masked
>>> print(d.array)
[[-- 0 0]]
>>> d.any()
False
>>> d[0, 1] = 3
>>> print(d.array)
[[-- 3 0]]
>>> d.any()
True
>>> d[...] = {{package}}.masked
>>> print(d.array)
[[-- -- --]]
>>> d.any()
False
"""
masked = self.array.any()
if masked is numpy.ma.masked:
masked = False
return masked
@_inplace_enabled(default=False)
def apply_masking(
self,
fill_values=None,
valid_min=None,
valid_max=None,
valid_range=None,
inplace=False,
):
"""Apply masking.
Masking is applied according to the values of the keyword
parameters.
Elements that are already masked remain so.
.. versionadded:: (cfdm) 1.8.2
.. seealso:: `get_fill_value`, `mask`
:Parameters:
fill_values: `bool` or sequence of scalars, optional
Specify values that will be set to missing data. Data
elements exactly equal to any of the values are set to
missing data.
If True then the value returned by the `get_fill_value`
method, if such a value exists, is used.
Zero or more values may be provided in a sequence of
scalars.
*Parameter example:*
Specify a fill value of 999: ``fill_values=[999]``
*Parameter example:*
Specify fill values of 999 and -1.0e30:
``fill_values=[999, -1.0e30]``
*Parameter example:*
Use the fill value already set for the data:
``fill_values=True``
*Parameter example:*
Use no fill values: ``fill_values=False`` or
``fill_value=[]``
valid_min: number, optional
A scalar specifying the minimum valid value. Data elements
strictly less than this number will be set to missing
data.
valid_max: number, optional
A scalar specifying the maximum valid value. Data elements
strictly greater than this number will be set to missing
data.
valid_range: (number, number), optional
A vector of two numbers specifying the minimum and maximum
valid values, equivalent to specifying values for both
*valid_min* and *valid_max* parameters. The *valid_range*
parameter must not be set if either *valid_min* or
*valid_max* is defined.
*Parameter example:*
``valid_range=[-999, 10000]`` is equivalent to setting
``valid_min=-999, valid_max=10000``
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`{{class}}` or `None`
The data with masked values. If the operation was in-place
then `None` is returned.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(12).reshape(3, 4), 'm')
>>> d[1, 1] = {{package}}.masked
>>> print(d.array)
[[0 1 2 3]
[4 -- 6 7]
[8 9 10 11]]
>>> print(d.apply_masking().array)
[[0 1 2 3]
[4 -- 6 7]
[8 9 10 11]]
>>> print(d.apply_masking(fill_values=[0]).array)
[[-- 1 2 3]
[4 -- 6 7]
[8 9 10 11]]
>>> print(d.apply_masking(fill_values=[0, 11]).array)
[[-- 1 2 3]
[4 -- 6 7]
[8 9 10 --]]
>>> print(d.apply_masking(valid_min=3).array)
[[-- -- -- 3]
[4 -- 6 7]
[8 9 10 11]]
>>> print(d.apply_masking(valid_max=6).array)
[[0 1 2 3]
[4 -- 6 --]
[-- -- -- --]]
>>> print(d.apply_masking(valid_range=[2, 8]).array)
[[-- -- 2 3]
[4 -- 6 7]
[8 -- -- --]]
>>> d.set_fill_value(7)
>>> print(d.apply_masking(fill_values=True).array)
[[0 1 2 3]
[4 -- 6 --]
[8 9 10 11]]
>>> print(d.apply_masking(fill_values=True,
... valid_range=[2, 8]).array)
[[-- -- 2 3]
[4 -- 6 --]
[8 -- -- --]]
"""
if valid_range is not None:
if valid_min is not None or valid_max is not None:
raise ValueError(
"Can't set 'valid_range' parameter with either the "
"'valid_min' nor 'valid_max' parameters"
)
try:
if len(valid_range) != 2:
raise ValueError(
"'valid_range' parameter must be a vector of "
"two elements"
)
except TypeError:
raise ValueError(
"'valid_range' parameter must be a vector of "
"two elements"
)
valid_min, valid_max = valid_range
d = _inplace_enabled_define_and_cleanup(self)
if fill_values is None:
fill_values = False
if isinstance(fill_values, bool):
if fill_values:
fill_value = self.get_fill_value(None)
if fill_value is not None:
fill_values = (fill_value,)
else:
fill_values = ()
else:
fill_values = ()
else:
try:
_ = iter(fill_values)
except TypeError:
raise TypeError(
"'fill_values' parameter must be a sequence or "
f"of type bool. Got type {type(fill_values)}"
)
else:
if isinstance(fill_values, str):
raise TypeError(
"'fill_values' parameter must be a sequence or "
f"of type bool. Got type {type(fill_values)}"
)
mask = None
if fill_values:
array = self.array
mask = array == fill_values[0]
for fill_value in fill_values[1:]:
mask |= array == fill_value
if valid_min is not None:
if mask is None:
array = self.array
mask = array < valid_min
else:
mask |= array < valid_min
if valid_max is not None:
if mask is None:
array = self.array
mask = array > valid_max
else:
mask |= array > valid_max
if mask is not None:
array = numpy.ma.where(mask, cfdm_masked, array)
d._set_Array(array, copy=False)
return d
def copy(self, array=True):
"""Return a deep copy.
``d.copy()`` is equivalent to ``copy.deepcopy(d)``.
:Parameters:
array: `bool`, optional
If False then do not copy the array. By default the array
is copied.
:Returns:
`{{class}}`
The deep copy.
**Examples:**
>>> e = d.copy()
>>> e = d.copy(array=False)
"""
return super().copy(array=array)
def creation_commands(
self, name="data", namespace=None, indent=0, string=True
):
"""Return the commands that would create the data object.
.. versionadded:: (cfdm) 1.8.7.0
:Parameters:
name: `str` or `None`, optional
Set the variable name of `Data` object that the commands
create.
{{namespace: `str`, optional}}
{{indent: `int`, optional}}
{{string: `bool`, optional}}
:Returns:
{{returns creation_commands}}
**Examples:**
>>> d = {{package}}.{{class}}([[0.0, 45.0], [45.0, 90.0]],
... units='degrees_east')
>>> print(d.creation_commands())
data = {{package}}.{{class}}([[0.0, 45.0], [45.0, 90.0]], units='degrees_east', dtype='f8')
>>> d = {{package}}.{{class}}(['alpha', 'beta', 'gamma', 'delta'],
... mask = [1, 0, 0, 0])
>>> d.creation_commands(name='d', namespace='', string=False)
["d = Data(['', 'beta', 'gamma', 'delta'], dtype='U5', mask=Data([True, False, False, False], dtype='b1'))"]
"""
namespace0 = namespace
if namespace is None:
namespace = self._package() + "."
elif namespace and not namespace.endswith("."):
namespace += "."
mask = self.mask
if mask.any():
if name == "mask":
raise ValueError(
"When the data is masked, the 'name' parameter "
"can not have the value 'mask'"
)
masked = True
array = self.filled().array.tolist()
else:
masked = False
array = self.array.tolist()
units = self.get_units(None)
if units is None:
units = ""
else:
units = f", units={units!r}"
calendar = self.get_calendar(None)
if calendar is None:
calendar = ""
else:
calendar = f", calendar={calendar!r}"
fill_value = self.get_fill_value(None)
if fill_value is None:
fill_value = ""
else:
fill_value = f", fill_value={fill_value}"
dtype = self.dtype.descr[0][1][1:]
if masked:
mask = mask.creation_commands(
name="mask", namespace=namespace0, indent=0, string=True
)
mask = mask.replace("mask = ", "mask=", 1)
mask = f", {mask}"
else:
mask = ""
if name is None:
name = ""
else:
name = name + " = "
out = []
out.append(
f"{name}{namespace}{self.__class__.__name__}({array}{units}"
f"{calendar}, dtype={dtype!r}{mask}{fill_value})"
)
if string:
indent = " " * indent
out[0] = indent + out[0]
out = ("\n" + indent).join(out)
return out
@_inplace_enabled(default=False)
def filled(self, fill_value=None, inplace=False):
"""Replace masked elements with the fill value.
.. versionadded:: (cfdm) 1.8.7.0
:Parameters:
fill_value: scalar, optional
The fill value. By default the fill returned by
`get_fill_value` is used, or if this is not set then the
netCDF default fill value for the data type is used (as
defined by `netCDF.fillvals`).
{{inplace: `bool`, optional}}
:Returns:
`Data` or `None`
The filled data, or `None` if the operation was in-place.
**Examples:**
>>> d = {{package}}.{{class}}([[1, 2, 3]])
>>> print(d.filled().array)
[[1 2 3]]
>>> d[0, 0] = {{package}}.masked
>>> print(d.filled().array)
[[-9223372036854775806 2 3]]
>>> d.set_fill_value(-99)
>>> print(d.filled().array)
[[-99 2 3]]
>>> print(d.filled(1e10).array)
[[10000000000 2 3]]
"""
d = _inplace_enabled_define_and_cleanup(self)
if fill_value is None:
fill_value = d.get_fill_value(None)
if fill_value is None:
default_fillvals = netCDF4.default_fillvals
fill_value = default_fillvals.get(d.dtype.str[1:], None)
if fill_value is None and d.dtype.kind in ("SU"):
fill_value = default_fillvals.get("S1", None)
if fill_value is None: # should not be None by this stage
raise ValueError(
"Can't determine fill value for "
f"data type {d.dtype.str!r}"
) # pragma: no cover
array = self.array
if numpy.ma.isMA(array):
array = array.filled(fill_value)
d._set_Array(array, copy=False)
return d
@_inplace_enabled(default=False)
def insert_dimension(self, position=0, inplace=False):
"""Expand the shape of the data array.
Inserts a new size 1 axis, corresponding to a given position in
the data array shape.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `flatten`, `squeeze`, `transpose`
:Parameters:
position: `int`, optional
Specify the position that the new axis will have in the
data array. By default the new axis has position 0, the
slowest varying position. Negative integers counting from
the last position are allowed.
*Parameter example:*
``position=2``
*Parameter example:*
``position=-1``
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`{{class}}` or `None`
The data with expanded axes. If the operation was in-place
then `None` is returned.
**Examples:**
>>> d.shape
(19, 73, 96)
>>> d.insert_dimension('domainaxis3').shape
(1, 96, 73, 19)
>>> d.insert_dimension('domainaxis3', position=3).shape
(19, 73, 96, 1)
>>> d.insert_dimension('domainaxis3', position=-1, inplace=True)
>>> d.shape
(19, 73, 1, 96)
"""
d = _inplace_enabled_define_and_cleanup(self)
# Parse position
ndim = d.ndim
if -ndim - 1 <= position < 0:
position += ndim + 1
elif not 0 <= position <= ndim:
raise ValueError(
f"Can't insert dimension: Invalid position: {position!r}"
)
array = numpy.expand_dims(self.array, position)
d._set_Array(array, copy=False)
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
def get_count(self, default=ValueError()):
"""Return the count variable for a compressed array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `get_index`, `get_list`
:Parameters:
default: optional
Return the value of the *default* parameter if a count
variable has not been set. If set to an `Exception`
instance then it will be raised instead.
:Returns:
The count variable.
**Examples:**
>>> c = d.get_count()
"""
try:
return self._get_Array().get_count()
except (AttributeError, ValueError):
return self._default(
default, f"{self.__class__.__name__!r} has no count variable"
)
def get_index(self, default=ValueError()):
"""Return the index variable for a compressed array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `get_count`, `get_list`
:Parameters:
default: optional
Return *default* if index variable has not been set.
default: optional
Return the value of the *default* parameter if an index
variable has not been set. If set to an `Exception`
instance then it will be raised instead.
:Returns:
The index variable.
**Examples:**
>>> i = d.get_index()
"""
try:
return self._get_Array().get_index()
except (AttributeError, ValueError):
return self._default(
default, f"{self.__class__.__name__!r} has no index variable"
)
def get_list(self, default=ValueError()):
"""Return the list variable for a compressed array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `get_count`, `get_index`
:Parameters:
default: optional
Return the value of the *default* parameter if an index
variable has not been set. If set to an `Exception`
instance then it will be raised instead.
:Returns:
The list variable.
**Examples:**
>>> l = d.get_list()
"""
try:
return self._get_Array().get_list()
except (AttributeError, ValueError):
return self._default(
default, f"{self.__class__.__name__!r} has no list variable"
)
def get_compressed_dimension(self, default=ValueError()):
"""Returns the compressed dimension's array position.
That is, returns the position of the compressed dimension
in the compressed array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `compressed_array`, `get_compressed_axes`,
`get_compression_type`
:Parameters:
default: optional
Return the value of the *default* parameter there is no
compressed dimension. If set to an `Exception` instance
then it will be raised instead.
:Returns:
`int`
The position of the compressed dimension in the compressed
array.
**Examples:**
>>> d.get_compressed_dimension()
2
"""
try:
return self._get_Array().get_compressed_dimension()
except (AttributeError, ValueError):
return self._default(
default,
f"{ self.__class__.__name__!r} has no compressed dimension",
)
def _parse_indices(self, indices):
"""Parse indices of the data and return valid indices in a list.
:Parameters:
indices: `tuple` (not a `list`!)
:Returns:
`list`
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(100, 190).reshape(1, 10, 9))
>>> d._parse_indices((slice(None, None, None), 1, 2))
[slice(None, None, None), slice(1, 2, 1), slice(2, 3, 1)]
>>> d._parse_indices((1,))
[slice(1, 2, 1), slice(None, None, None), slice(None, None, None)]
"""
shape = self.shape
parsed_indices = []
if not isinstance(indices, tuple):
indices = (indices,)
# Initialise the list of parsed indices as the input indices
# with any Ellipsis objects expanded
length = len(indices)
n = len(shape)
ndim = n
for index in indices:
if index is Ellipsis:
m = n - length + 1
parsed_indices.extend([slice(None)] * m)
n -= m
else:
parsed_indices.append(index)
n -= 1
length -= 1
len_parsed_indices = len(parsed_indices)
if ndim and len_parsed_indices > ndim:
raise IndexError(
f"Invalid indices for data with shape {shape}: "
f"{parsed_indices}"
)
if len_parsed_indices < ndim:
parsed_indices.extend([slice(None)] * (ndim - len_parsed_indices))
if not ndim and parsed_indices:
raise IndexError(
"Scalar data can only be indexed with () or Ellipsis"
)
for i, (index, size) in enumerate(zip(parsed_indices, shape)):
if isinstance(index, slice):
continue
if isinstance(index, int):
# E.g. 43 -> slice(43, 44, 1)
if index < 0:
index += size
index = slice(index, index + 1, 1)
else:
if getattr(getattr(index, "dtype", None), "kind", None) == "b":
# E.g. index is [True, False, True] -> [0, 2]
#
# Convert Booleans to non-negative integers. We're
# assuming that anything with a dtype attribute also
# has a size attribute.
if index.size != size:
raise IndexError(
"Invalid indices for data "
f"with shape {shape}: {parsed_indices}"
)
index = numpy.where(index)[0]
if not numpy.ndim(index):
if index < 0:
index += size
index = slice(index, index + 1, 1)
else:
len_index = len(index)
if len_index == 1:
# E.g. [3] -> slice(3, 4, 1)
index = index[0]
if index < 0:
index += size
index = slice(index, index + 1, 1)
else:
# E.g. [1, 3, 4] -> [1, 3, 4]
pass
parsed_indices[i] = index
return parsed_indices
def maximum(self, axes=None):
"""Return the maximum of an array or the maximum along axes.
Missing data array elements are omitted from the calculation.
.. versionadded:: (cfdm) 1.8.0
.. seealso:: `minimum`
:Parameters:
axes: (sequence of) `int`, optional
The axes over which to take the maximum. By default the
maximum over all axes is returned.
{{axes int examples}}
:Returns:
`{{class}}`
Maximum of the data along the specified axes.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(24).reshape(1, 2, 3, 4))
>>> d
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> print(d.array)
[[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]]
>>> e = d.max()
>>> e
<{{repr}}Data(1, 1, 1, 1): [[[[23]]]]>
>>> print(e.array)
[[[[23]]]]
>>> e = d.max(2)
>>> e
<{{repr}}Data(1, 2, 1, 4): [[[[8, ..., 23]]]]>
>>> print(e.array)
[[[[ 8 9 10 11]]
[[20 21 22 23]]]]
>>> e = d.max([-2, -1])
>>> e
<{{repr}}Data(1, 2, 1, 1): [[[[11, 23]]]]>
>>> print(e.array)
[[[[11]]
[[23]]]]
"""
# Parse the axes. By default flattened input is used.
try:
axes = self._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't find maximum of data: {error}")
array = self.array
array = numpy.amax(array, axis=axes, keepdims=True)
out = self.copy(array=False)
out._set_Array(array, copy=False)
if out.shape != self.shape:
# Delete hdf5 chunksizes
out.nc_clear_hdf5_chunksizes()
return out
def minimum(self, axes=None):
"""Return the minimum of an array or minimum along axes.
Missing data array elements are omitted from the calculation.
.. versionadded:: (cfdm) 1.8.0
.. seealso:: `maximum`
:Parameters:
axes: (sequence of) `int`, optional
The axes over which to take the minimum. By default the
minimum over all axes is returned.
{{axes int examples}}
:Returns:
`{{class}}`
Minimum of the data along the specified axes.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(24).reshape(1, 2, 3, 4))
>>> d
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> print(d.array)
[[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]]
>>> e = d.min()
>>> e
<{{repr}}Data(1, 1, 1, 1): [[[[0]]]]>
>>> print(e.array)
[[[[0]]]]
>>> e = d.min(2)
>>> e
<{{repr}}Data(1, 2, 1, 4): [[[[0, ..., 15]]]]>
>>> print(e.array)
[[[[ 0 1 2 3]]
[[12 13 14 15]]]]
>>> e = d.min([-2, -1])
>>> e
<{{repr}}Data(1, 2, 1, 1): [[[[0, 12]]]]>
>>> print(e.array)
[[[[ 0]]
[[12]]]]
"""
# Parse the axes. By default flattened input is used.
try:
axes = self._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't find minimum of data: {error}")
array = self.array
array = numpy.amin(array, axis=axes, keepdims=True)
out = self.copy(array=False)
out._set_Array(array, copy=False)
if out.shape != self.shape:
# Delete hdf5 chunksizes
out.nc_clear_hdf5_chunksizes()
return out
@_inplace_enabled(default=False)
def squeeze(self, axes=None, inplace=False):
"""Remove size 1 axes from the data.
By default all size 1 axes are removed, but particular axes may be
selected with the keyword arguments.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `flatten`, `insert_dimension`, `transpose`
:Parameters:
axes: (sequence of) `int`, optional
The positions of the size one axes to be removed. By
default all size one axes are removed.
{{axes int examples}}
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`Data` or `None`
The data with removed data axes. If the operation was
in-place then `None` is returned.
**Examples:**
>>> d.shape
(1, 73, 1, 96)
>>> f.squeeze().shape
(73, 96)
>>> d.squeeze(0).shape
(73, 1, 96)
>>> d.squeeze([-3, 2]).shape
(73, 96)
>>> d.squeeze(2, inplace=True)
>>> d.shape
(1, 73, 96)
"""
d = _inplace_enabled_define_and_cleanup(self)
try:
axes = d._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't squeeze data: {error}")
shape = d.shape
if axes is None:
axes = tuple([i for i, n in enumerate(shape) if n == 1])
else:
# Check the squeeze axes
for i in axes:
if shape[i] > 1:
raise ValueError(
"Can't squeeze data: "
f"Can't remove axis of size {shape[i]}"
)
if not axes:
return d
array = self.array
array = numpy.squeeze(array, axes)
d._set_Array(array, copy=False)
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
def sum(self, axes=None):
"""Return the sum of an array or the sum along axes.
Missing data array elements are omitted from the calculation.
.. seealso:: `max`, `min`
:Parameters:
axes: (sequence of) `int`, optional
The axes over which to calculate the sum. By default the
sum over all axes is returned.
{{axes int examples}}
:Returns:
`{{class}}`
The sum of the data along the specified axes.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(24).reshape(1, 2, 3, 4))
>>> d
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> print(d.array)
[[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]]
>>> e = d.sum()
>>> e
<{{repr}}Data(1, 1, 1, 1): [[[[276]]]]>
>>> print(e.array)
[[[[276]]]]
>>> e = d.sum(2)
>>> e
<{{repr}}Data(1, 2, 1, 4): [[[[12, ..., 57]]]]>
>>> print(e.array)
[[[[12 15 18 21]]
[[48 51 54 57]]]]
>>> e = d.sum([-2, -1])
>>> e
<{{repr}}Data(1, 2, 1, 1): [[[[66, 210]]]]>
>>> print(e.array)
[[[[ 66]]
[[210]]]]
"""
# Parse the axes. By default flattened input is used.
try:
axes = self._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't sum data: {error}")
array = self.array
array = numpy.sum(array, axis=axes, keepdims=True)
d = self.copy(array=False)
d._set_Array(array, copy=False)
if d.shape != self.shape:
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
@_inplace_enabled(default=False)
def transpose(self, axes=None, inplace=False):
"""Permute the axes of the data array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `flatten`, `insert_dimension`, `squeeze`
:Parameters:
axes: (sequence of) `int`
The new axis order. By default the order is reversed.
{{axes int examples}}
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`{{class}}` or `None`
The data with permuted data axes. If the operation was
in-place then `None` is returned.
**Examples:**
>>> d.shape
(19, 73, 96)
>>> d.transpose().shape
(96, 73, 19)
>>> d.transpose([1, 0, 2]).shape
(73, 19, 96)
>>> d.transpose([-1, 0, 1], inplace=True)
>>> d.shape
(96, 19, 73)
"""
d = _inplace_enabled_define_and_cleanup(self)
ndim = d.ndim
# Parse the axes. By default, reverse the order of the axes.
try:
axes = d._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't transpose data: {error}")
if axes is None:
if ndim <= 1:
return d
axes = tuple(range(ndim - 1, -1, -1))
elif len(axes) != ndim:
raise ValueError(
f"Can't transpose data: Axes don't match array: {axes}"
)
# Return unchanged if axes are in the same order as the data
if axes == tuple(range(ndim)):
return d
array = self.array
array = numpy.transpose(array, axes=axes)
d._set_Array(array, copy=False)
return d
def get_compressed_axes(self):
"""Returns the dimensions that are compressed in the array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `compressed_array`, `get_compressed_dimension`,
`get_compression_type`
:Returns:
`list`
The dimensions of the data that are compressed to a single
dimension in the underlying array. If the data are not
compressed then an empty list is returned.
**Examples:**
>>> d.shape
(2, 3, 4, 5, 6)
>>> d.compressed_array.shape
(2, 14, 6)
>>> d.get_compressed_axes()
[1, 2, 3]
>>> d.get_compression_type()
''
>>> d.get_compressed_axes()
[]
"""
ca = self._get_Array(None)
if ca is None:
return []
return ca.get_compressed_axes()
def get_compression_type(self):
"""Returns the type of compression applied to the array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `compressed_array`, `compression_axes`,
`get_compressed_dimension`
:Returns:
`str`
The compression type. An empty string means that no
compression has been applied.
**Examples:**
>>> d.get_compression_type()
''
>>> d.get_compression_type()
'gathered'
>>> d.get_compression_type()
'ragged contiguous'
"""
ma = self._get_Array(None)
if ma is None:
return ""
return ma.get_compression_type()
@classmethod
def empty(cls, shape, dtype=None, units=None, calendar=None):
"""Create a new data array without initialising the elements.
Note that the mask of the returned empty data is hard.
.. seealso:: `full`, `ones`, `zeros`
:Parameters:
shape: `int` or `tuple` of `int`
The shape of the new array.
dtype: `numpy.dtype` or any object convertible to `numpy.dtype`
The data-type of the new array. By default the
data-type is ``float``.
units: `str` or `Units`
The units for the empty data array.
calendar: `str`, optional
The calendar for reference time units.
:Returns:
`{{class}}`
**Examples:**
>>> d = {{package}}.{{class}}.empty((96, 73))
"""
return cls(
numpy.empty(shape=shape, dtype=dtype),
units=units,
calendar=calendar,
)
@_manage_log_level_via_verbosity
def equals(
self,
other,
rtol=None,
atol=None,
verbose=None,
ignore_data_type=False,
ignore_fill_value=False,
ignore_compression=True,
ignore_type=False,
_check_values=True,
):
"""Whether two data arrays are the same.
Equality is strict by default. This means that for data arrays to
be considered equal:
* the units and calendar must be the same,
..
* the fill value must be the same (see the *ignore_fill_value*
parameter), and
..
* the arrays must have same shape and data type, the same missing
data mask, and be element-wise equal (see the *ignore_data_type*
parameter).
{{equals tolerance}}
Any compression is ignored by default, with only the arrays in
their uncompressed forms being compared. See the
*ignore_compression* parameter.
Any type of object may be tested but, in general, equality is only
possible with another cell measure construct, or a subclass of
one. See the *ignore_type* parameter.
.. versionadded:: (cfdm) 1.7.0
:Parameters:
other:
The object to compare for equality.
{{atol: number, optional}}
{{rtol: number, optional}}
ignore_fill_value: `bool`, optional
If True then the fill value is omitted from the
comparison.
{{ignore_data_type: `bool`, optional}}
{{ignore_compression: `bool`, optional}}
{{ignore_type: `bool`, optional}}
{{verbose: `int` or `str` or `None`, optional}}
:Returns:
`bool`
Whether the two data arrays are equal.
**Examples:**
>>> d.equals(d)
True
>>> d.equals(d.copy())
True
>>> d.equals('not a data array')
False
"""
pp = super()._equals_preprocess(
other, verbose=verbose, ignore_type=ignore_type
)
if pp is True or pp is False:
return pp
other = pp
# Check that each instance has the same shape
if self.shape != other.shape:
logger.info(
f"{self.__class__.__name__}: Different shapes: "
f"{self.shape} != {other.shape}"
) # pragma: no cover
return False
# Check that each instance has the same fill value
if not ignore_fill_value and self.get_fill_value(
None
) != other.get_fill_value(None):
logger.info(
f"{self.__class__.__name__}: Different fill value: "
f"{self.get_fill_value(None)} != {other.get_fill_value(None)}"
) # pragma: no cover
return False
# Check that each instance has the same data type
if not ignore_data_type and self.dtype != other.dtype:
logger.info(
f"{self.__class__.__name__}: Different data types: "
f"{self.dtype} != {other.dtype}"
) # pragma: no cover
return False
# Return now if we have been asked to not check the array
# values
if not _check_values:
return True
# Check that each instance has the same units
for attr in ("units", "calendar"):
x = getattr(self, "get_" + attr)(None)
y = getattr(other, "get_" + attr)(None)
if x != y:
logger.info(
f"{self.__class__.__name__}: Different {attr}: "
f"{x!r} != {y!r}"
) # pragma: no cover
return False
if not ignore_compression:
# --------------------------------------------------------
# Check for equal compression types
# --------------------------------------------------------
compression_type = self.get_compression_type()
if compression_type != other.get_compression_type():
logger.info(
f"{self.__class__.__name__}: Different compression types: "
f"{compression_type} != {other.get_compression_type()}"
) # pragma: no cover
return False
# --------------------------------------------------------
# Check for equal compressed array values
# --------------------------------------------------------
if compression_type:
if not self._equals(
self.compressed_array,
other.compressed_array,
rtol=rtol,
atol=atol,
):
logger.info(
f"{self.__class__.__name__}: Different compressed "
"array values"
) # pragma: no cover
return False
# ------------------------------------------------------------
# Check for equal (uncompressed) array values
# ------------------------------------------------------------
if not self._equals(self.array, other.array, rtol=rtol, atol=atol):
logger.info(
f"{self.__class__.__name__}: Different array values "
f"(atol={atol}, rtol={rtol})"
) # pragma: no cover
return False
# ------------------------------------------------------------
# Still here? Then the two data arrays are equal.
# ------------------------------------------------------------
return True
def get_filenames(self):
"""Return the name of the file containing the data array.
:Returns:
`set`
The file name in normalised, absolute form. If the
data is are memory then an empty `set` is returned.
**Examples:**
>>> f = {{package}}.example_field(0)
>>> {{package}}.write(f, 'temp_file.nc')
>>> g = {{package}}.read('temp_file.nc')[0]
>>> d = g.data
>>> d.get_filenames()
{'/data/user/temp_file.nc'}
>>> d[...] = -99
>>> d.get_filenames()
set()
"""
source = self.source(None)
if source is None:
return set()
try:
filename = source.get_filename()
except AttributeError:
return set()
else:
return set((abspath(filename),))
def first_element(self):
"""Return the first element of the data as a scalar.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `last_element`, `second_element`
:Returns:
The first element of the data.
**Examples:**
>>> d = {{package}}.{{class}}(9.0)
>>> x = d.first_element()
>>> print(x, type(x))
9.0 <class 'float'>
>>> d = {{package}}.{{class}}([[1, 2], [3, 4]])
>>> x = d.first_element()
>>> print(x, type(x))
1 <class 'int'>
>>> d[0, 0] = {{package}}.masked
>>> y = d.first_element()
>>> print(y, type(y))
-- <class 'numpy.ma.core.MaskedConstant'>
>>> d = {{package}}.{{class}}(['foo', 'bar'])
>>> x = d.first_element()
>>> print(x, type(x))
foo <class 'str'>
"""
return self._item((slice(0, 1),) * self.ndim)
@_inplace_enabled(default=False)
def flatten(self, axes=None, inplace=False):
"""Flatten axes of the data.
Any subset of the axes may be flattened.
The shape of the data may change, but the size will not.
The flattening is executed in row-major (C-style) order. For
example, the array ``[[1, 2], [3, 4]]`` would be flattened across
both dimensions to ``[1 2 3 4]``.
.. versionadded:: (cfdm) 1.7.11
.. seealso:: `insert_dimension`, `squeeze`, `transpose`
:Parameters:
axes: (sequence of) `int`, optional
Select the axes. By default all axes are flattened. No
axes are flattened if *axes* is an empty sequence.
{{axes int examples}}
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`Data` or `None`
The flattened data, or `None` if the operation was
in-place.
**Examples**
>>> d = {{package}}.{{class}}(numpy.arange(24).reshape(1, 2, 3, 4))
>>> d
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> print(d.array)
[[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]]
>>> e = d.flatten()
>>> e
<{{repr}}Data(24): [0, ..., 23]>
>>> print(e.array)
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23]
>>> e = d.flatten([])
>>> e
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> e = d.flatten([1, 3])
>>> e
<{{repr}}Data(1, 8, 3): [[[0, ..., 23]]]>
>>> print(e.array)
[[[ 0 4 8]
[ 1 5 9]
[ 2 6 10]
[ 3 7 11]
[12 16 20]
[13 17 21]
[14 18 22]
[15 19 23]]]
>>> d.flatten([0, -1], inplace=True)
>>> d
<{{repr}}Data(4, 2, 3): [[[0, ..., 23]]]>
>>> print(d.array)
[[[ 0 4 8]
[12 16 20]]
[[ 1 5 9]
[13 17 21]]
[[ 2 6 10]
[14 18 22]]
[[ 3 7 11]
[15 19 23]]]
"""
d = _inplace_enabled_define_and_cleanup(self)
try:
axes = d._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't flatten data: {error}")
ndim = d.ndim
if ndim <= 1:
return d
if axes is None:
# By default flatten all axes
axes = tuple(range(ndim))
else:
if len(axes) <= 1:
return d
# Note that it is important that the first axis in the
# list is the left-most flattened axis
axes = sorted(axes)
# Save the shape before we transpose
shape = list(d.shape)
order = [i for i in range(ndim) if i not in axes]
order[axes[0] : axes[0]] = axes
d.transpose(order, inplace=True)
new_shape = [n for i, n in enumerate(shape) if i not in axes]
new_shape.insert(axes[0], numpy.prod([shape[i] for i in axes]))
array = d.array.reshape(new_shape)
out = type(self)(
array,
units=d.get_units(None),
calendar=d.get_calendar(None),
fill_value=d.get_fill_value(None),
)
if inplace:
d.__dict__ = out.__dict__
return out
def last_element(self):
"""Return the last element of the data as a scalar.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `first_element`, `second_element`
:Returns:
The last element of the data.
**Examples:**
>>> d = {{package}}.{{class}}(9.0)
>>> x = d.last_element()
>>> print(x, type(x))
9.0 <class 'float'>
>>> d = {{package}}.{{class}}([[1, 2], [3, 4]])
>>> x = d.last_element()
>>> print(x, type(x))
4 <class 'int'>
>>> d[-1, -1] = {{package}}.masked
>>> y = d.last_element()
>>> print(y, type(y))
-- <class 'numpy.ma.core.MaskedConstant'>
>>> d = {{package}}.{{class}}(['foo', 'bar'])
>>> x = d.last_element()
>>> print(x, type(x))
bar <class 'str'>
"""
return self._item((slice(-1, None),) * self.ndim)
def second_element(self):
"""Return the second element of the data as a scalar.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `first_element`, `last_element`
:Returns:
The second element of the data.
**Examples:**
>>> d = {{package}}.{{class}}([[1, 2], [3, 4]])
>>> x = d.second_element()
>>> print(x, type(x))
2 <class 'int'>
>>> d[0, 1] = {{package}}.masked
>>> y = d.second_element()
>>> print(y, type(y))
-- <class 'numpy.ma.core.MaskedConstant'>
>>> d = {{package}}.{{class}}(['foo', 'bar'])
>>> x = d.second_element()
>>> print(x, type(x))
bar <class 'str'>
"""
return self._item((slice(0, 1),) * (self.ndim - 1) + (slice(1, 2),))
def to_memory(self):
"""Bring data on disk into memory and retain it there.
There is no change to data that is already in memory.
:Returns:
`None`
**Examples:**
>>> f = {{package}}.example_field(4)
>>> f.data
<{{repr}}Data(3, 26, 4): [[[290.0, ..., --]]] K>
>>> f.data.to_memory()
"""
self._set_Array(self.source().to_memory())
@_inplace_enabled(default=False)
def uncompress(self, inplace=False):
"""Uncompress the underlying array.
.. versionadded:: (cfdm) 1.7.3
.. seealso:: `array`, `compressed_array`, `source`
:Parameters:
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`{{class}}` or `None`
The uncompressed data, or `None` if the operation was
in-place.
**Examples:**
>>> d.get_compression_type()
'ragged contiguous'
>>> d.source()
<RaggedContiguousArray(4, 9): >
>>> d.uncompress(inpalce=True)
>>> d.get_compression_type()
''
>>> d.source()
<NumpyArray(4, 9): >
"""
d = _inplace_enabled_define_and_cleanup(self)
if d.get_compression_type():
d._set_Array(d.array, copy=False)
return d
def unique(self):
"""The unique elements of the data.
The unique elements are sorted into a one dimensional array. with
no missing values.
.. versionadded:: (cfdm) 1.7.0
:Returns:
`{{class}}`
The unique elements.
**Examples:**
>>> d = {{package}}.{{class}}([[4, 2, 1], [1, 2, 3]], 'metre')
>>> d.unique()
<{{repr}}Data(4): [1, ..., 4] metre>
>>> d[1, -1] = {{package}}.masked
>>> d.unique()
<{{repr}}Data(3): [1, 2, 4] metre>
"""
array = self.array
array = numpy.unique(array)
if numpy.ma.is_masked(array):
array = array.compressed()
d = self.copy(array=False)
d._set_Array(array, copy=False)
if d.shape != self.shape:
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
# ----------------------------------------------------------------
# Aliases
# ----------------------------------------------------------------
def max(self, axes=None):
"""Alias for `maximum`."""
return self.maximum(axes=axes)
def min(self, axes=None):
"""Alias for `minimum`."""
return self.minimum(axes=axes)
|
33658
|
import pytest
import yaml
from nequip.utils import instantiate
simple_default = {"b": 1, "d": 31}
class SimpleExample:
def __init__(self, a, b=simple_default["b"], d=simple_default["d"]):
self.a = a
self.b = b
self.d = d
nested_default = {"d": 37}
class NestedExample:
def __init__(self, cls_c, a, cls_c_kwargs={}, d=nested_default["d"]):
self.c_obj = cls_c(**cls_c_kwargs)
self.a = a
self.d = d
def assert_dict(d):
for k, v in d.items():
if isinstance(v, dict):
assert_dict(v)
elif isinstance(v, str):
assert k == v
@pytest.mark.parametrize("positional_args", [dict(a=3, b=4), dict(a=5), dict()])
@pytest.mark.parametrize("optional_args", [dict(a=3, b=4), dict(a=5), dict()])
@pytest.mark.parametrize("all_args", [dict(a=6, b=7), dict(a=8), dict()])
@pytest.mark.parametrize("prefix", [True, False])
def test_simple_init(positional_args, optional_args, all_args, prefix):
union = {}
union.update(all_args)
union.update(optional_args)
union.update(positional_args)
if "a" not in union:
return
# decorate test with prefix
_all_args = (
{"simple_example_" + k: v for k, v in all_args.items()} if prefix else all_args
)
# check key mapping is correct
km, params = instantiate(
builder=SimpleExample,
prefix="simple_example",
positional_args=positional_args,
optional_args=optional_args,
all_args=_all_args,
return_args_only=True,
)
for t in km:
for k, v in km[t].items():
assert k in locals()[t + "_args"]
if prefix and t == "all":
assert v == "simple_example_" + k
else:
assert v == k
km, _ = instantiate(
builder=SimpleExample,
prefix="simple_example",
positional_args=positional_args,
all_args=params,
return_args_only=True,
)
assert_dict(km)
# check whether it gets the priority right
a1, params = instantiate(
builder=SimpleExample,
prefix="simple_example",
positional_args=positional_args,
optional_args=optional_args,
all_args=_all_args,
)
assert a1.a == union["a"]
if "b" in union:
assert a1.b == union["b"]
else:
assert a1.b == simple_default["b"]
for k in params:
if k in simple_default:
assert params[k] == union.get(k, simple_default[k])
# check whether the return value is right
a2 = SimpleExample(**positional_args, **params)
assert a1.a == a2.a
assert a1.b == a2.b
def test_prefix_priority():
args = {"prefix_a": 3, "a": 4}
a, params = instantiate(
builder=SimpleExample,
prefix="prefix",
all_args=args,
)
assert a.a == 3
@pytest.mark.parametrize("optional_args", [dict(a=3, b=4), dict(a=5), dict()])
@pytest.mark.parametrize("all_args", [dict(a=6, b=7), dict(a=8), dict()])
@pytest.mark.parametrize("prefix", [True, False])
def test_nested_kwargs(optional_args, all_args, prefix):
union = {}
union.update(all_args)
union.update(optional_args)
if "a" not in union:
return
c, params = instantiate(
builder=NestedExample,
prefix="prefix",
positional_args={"cls_c": SimpleExample},
optional_args=optional_args,
all_args=all_args,
)
def test_default():
"""
check the default value will not contaminate the other class
"""
c, params = instantiate(
builder=NestedExample,
prefix="prefix",
positional_args={"cls_c": SimpleExample},
optional_args={"a": 11},
)
c.d = nested_default["d"]
c.c_obj.d = simple_default["d"]
class A:
def __init__(self, cls_a, cls_a_kwargs):
self.a_obj = cls_a(**cls_a_kwargs)
class B:
def __init__(self, cls_b, cls_b_kwargs):
self.b_obj = cls_b(**cls_b_kwargs)
class C:
def __init__(self, cls_c, cls_c_kwargs): # noqa
self.c_obj = c_cls(**c_cls_kwargs) # noqa
def test_deep_nests():
all_args = {"a": 101, "b": 103, "c": 107}
obj, params = instantiate(
builder=NestedExample,
optional_args={"cls_c": A, "cls_a": B, "cls_b": SimpleExample},
all_args=all_args,
)
print(yaml.dump(params))
assert obj.c_obj.a_obj.b_obj.a == all_args["a"]
assert obj.c_obj.a_obj.b_obj.b == all_args["b"]
assert obj.c_obj.a_obj.b_obj.d == simple_default["d"]
assert obj.d == nested_default["d"]
obj = NestedExample(**params)
assert obj.c_obj.a_obj.b_obj.a == all_args["a"]
assert obj.c_obj.a_obj.b_obj.b == all_args["b"]
assert obj.c_obj.a_obj.b_obj.d == simple_default["d"]
assert obj.d == nested_default["d"]
km, params = instantiate(
builder=NestedExample,
optional_args={"cls_c": A, "cls_a": B, "cls_b": SimpleExample},
all_args=all_args,
return_args_only=True,
)
print(yaml.dump(km))
# check the key mapping is unique for
km, _ = instantiate(
builder=NestedExample, optional_args=params, return_args_only=True
)
assert_dict(km)
def test_recursion_nests():
with pytest.raises(RuntimeError) as excinfo:
b, params = instantiate(
builder=A,
positional_args={"cls_a": B},
optional_args={"cls_b": A},
)
assert "cyclic" in str(excinfo.value)
print(excinfo)
def test_cyclic_nests():
with pytest.raises(RuntimeError) as excinfo:
c, params = instantiate(
builder=A,
positional_args={"cls_a": B},
optional_args={"cls_b": C},
all_args={"cls_c": A},
)
assert "cyclic" in str(excinfo.value)
print(excinfo, "hello")
class BadKwargs1:
def __init__(self, thing_kwargs={}):
pass
class BadKwargs2:
def __init__(self, thing="a string", thing_kwargs={}):
pass
def test_bad_kwargs():
with pytest.raises(KeyError):
_ = instantiate(BadKwargs1)
with pytest.raises(ValueError):
_ = instantiate(BadKwargs2)
|
33692
|
from pathlib import Path
from tkinter import Frame, Canvas, Entry, Text, Button, PhotoImage, messagebox
import controller as db_controller
OUTPUT_PATH = Path(__file__).parent
ASSETS_PATH = OUTPUT_PATH / Path("./assets")
def relative_to_assets(path: str) -> Path:
return ASSETS_PATH / Path(path)
def add_reservations():
AddReservations()
class AddReservations(Frame):
def __init__(self, parent, controller=None, *args, **kwargs):
Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.data = {"g_id": "", "check_in": "", "meal": "", "r_id": ""}
self.configure(bg="#FFFFFF")
self.canvas = Canvas(
self,
bg="#FFFFFF",
height=432,
width=797,
bd=0,
highlightthickness=0,
relief="ridge",
)
self.canvas.place(x=0, y=0)
self.entry_image_1 = PhotoImage(file=relative_to_assets("entry_1.png"))
entry_bg_1 = self.canvas.create_image(137.5, 153.0, image=self.entry_image_1)
self.canvas.create_text(
52.0,
128.0,
anchor="nw",
text="Guest Id",
fill="#5E95FF",
font=("Montserrat Bold", 14 * -1),
)
self.entry_image_2 = PhotoImage(file=relative_to_assets("entry_2.png"))
entry_bg_2 = self.canvas.create_image(141.5, 165.0, image=self.entry_image_2)
entry_2 = Entry(
self,
bd=0,
bg="#EFEFEF",
highlightthickness=0,
font=("Montserrat Bold", 18 * -1),
foreground="#777777",
)
entry_2.place(x=52.0, y=153.0, width=179.0, height=22.0)
self.data["g_id"] = entry_2
self.entry_image_3 = PhotoImage(file=relative_to_assets("entry_3.png"))
entry_bg_3 = self.canvas.create_image(137.5, 259.0, image=self.entry_image_3)
self.canvas.create_text(
52.0,
234.0,
anchor="nw",
text="Is Taking Meal",
fill="#5E95FF",
font=("Montserrat Bold", 14 * -1),
)
self.entry_image_4 = PhotoImage(file=relative_to_assets("entry_4.png"))
entry_bg_4 = self.canvas.create_image(141.5, 271.0, image=self.entry_image_4)
entry_4 = Entry(
self,
bd=0,
bg="#EFEFEF",
highlightthickness=0,
font=("Montserrat Bold", 18 * -1),
foreground="#777777",
)
entry_4.place(x=52.0, y=259.0, width=179.0, height=22.0)
self.data["r_id"] = entry_4
self.entry_image_5 = PhotoImage(file=relative_to_assets("entry_5.png"))
entry_bg_5 = self.canvas.create_image(378.5, 153.0, image=self.entry_image_5)
self.canvas.create_text(
293.0,
128.0,
anchor="nw",
text="Room Id",
fill="#5E95FF",
font=("Montserrat Bold", 14 * -1),
)
self.entry_image_6 = PhotoImage(file=relative_to_assets("entry_6.png"))
entry_bg_6 = self.canvas.create_image(382.5, 165.0, image=self.entry_image_6)
entry_6 = Entry(
self,
bd=0,
bg="#EFEFEF",
highlightthickness=0,
foreground="#777777",
font=("Montserrat Bold", 18 * -1),
)
entry_6.place(x=293.0, y=153.0, width=179.0, height=22.0)
self.data["meal"] = entry_6
self.entry_image_7 = PhotoImage(file=relative_to_assets("entry_7.png"))
entry_bg_7 = self.canvas.create_image(378.5, 259.0, image=self.entry_image_7)
self.canvas.create_text(
293.0,
234.0,
anchor="nw",
text="Check-in Time",
fill="#5E95FF",
font=("Montserrat Bold", 14 * -1),
)
self.entry_image_8 = PhotoImage(file=relative_to_assets("entry_8.png"))
entry_bg_8 = self.canvas.create_image(382.5, 271.0, image=self.entry_image_8)
entry_8 = Entry(
self,
bd=0,
bg="#EFEFEF",
highlightthickness=0,
foreground="#777777",
font=("Montserrat Bold", 18 * -1),
)
entry_8.place(x=293.0, y=259.0, width=179.0, height=22.0)
self.data["check_in"] = entry_8
self.button_image_1 = PhotoImage(file=relative_to_assets("button_1.png"))
button_1 = Button(
self,
image=self.button_image_1,
borderwidth=0,
highlightthickness=0,
command=self.save,
relief="flat",
)
button_1.place(x=164.0, y=322.0, width=190.0, height=48.0)
self.canvas.create_text(
139.0,
59.0,
anchor="nw",
text="Add a Reservation",
fill="#5E95FF",
font=("Montserrat Bold", 26 * -1),
)
self.canvas.create_text(
549.0,
59.0,
anchor="nw",
text="Operations",
fill="#5E95FF",
font=("Montserrat Bold", 26 * -1),
)
self.canvas.create_rectangle(
515.0, 59.0, 517.0, 370.0, fill="#EFEFEF", outline=""
)
self.button_image_2 = PhotoImage(file=relative_to_assets("button_2.png"))
button_2 = Button(
self,
image=self.button_image_2,
borderwidth=0,
highlightthickness=0,
command=lambda: self.parent.navigate("view"),
relief="flat",
)
button_2.place(x=547.0, y=116.0, width=209.0, height=74.0)
self.button_image_3 = PhotoImage(file=relative_to_assets("button_3.png"))
button_3 = Button(
self,
image=self.button_image_3,
borderwidth=0,
highlightthickness=0,
command=lambda: self.parent.navigate("edit"),
relief="flat",
)
button_3.place(x=547.0, y=210.0, width=209.0, height=74.0)
# Set default value for entry
self.data["check_in"].insert(0, "now")
# Save the data to the database
def save(self):
# check if any fields are empty
for label in self.data.keys():
if self.data[label].get() == "":
messagebox.showinfo("Error", "Please fill in all the fields")
return
# Save the reservation
result = db_controller.add_reservation(
*[self.data[label].get() for label in ("g_id", "meal", "r_id", "check_in")]
)
if result:
messagebox.showinfo("Success", "Reservation added successfully")
self.parent.navigate("view")
self.parent.refresh_entries()
# clear all fields
for label in self.data.keys():
self.data[label].delete(0, "end")
else:
messagebox.showerror(
"Error",
"Unable to add reservation. Please make sure the data is validated",
)
|
33727
|
from com.huawei.iotplatform.client.dto.DeviceCommandCancelTaskRespV4 import DeviceCommandCancelTaskRespV4
from com.huawei.iotplatform.client.dto.Pagination import Pagination
class QueryDeviceCmdCancelTaskOutDTO(object):
pagination = Pagination()
data = DeviceCommandCancelTaskRespV4()
def __init__(self):
pass
def getPagination(self):
return self.pagination
def setPagination(self, pagination):
self.pagination = pagination
def getData(self):
return self.data
def setData(self, data):
self.data = data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.