seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
516642055
|
r"""Test `lmp.tokenizer.CharListTokenizer.detokenize`.
Usage:
python -m unittest \
test/lmp/tokenizer/_char_list_tokenizer/test_detokenize.py
"""
# built-in modules
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import inspect
import gc
import math
import unittest
from typing import Iterable
# self-made modules
from lmp.tokenizer import CharListTokenizer
class TestDetokenize(unittest.TestCase):
r"""Test Case for `lmp.tokenizer.CharListTokenizer.detokenize`."""
def setUp(self):
r"""Setup both cased and uncased tokenizer instances."""
self.cased_tokenizer = CharListTokenizer()
self.uncased_tokenizer = CharListTokenizer(is_uncased=True)
self.tokenizers = [self.cased_tokenizer, self.uncased_tokenizer]
def tearDown(self):
r"""Delete both cased and uncased tokenizer instances."""
del self.tokenizers
del self.cased_tokenizer
del self.uncased_tokenizer
gc.collect()
def test_signature(self):
r"""Ensure signature consistency."""
msg = 'Inconsistent method signature.'
self.assertEqual(
inspect.signature(CharListTokenizer.detokenize),
inspect.Signature(
parameters=[
inspect.Parameter(
name='self',
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=inspect.Parameter.empty
),
inspect.Parameter(
name='tokens',
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=Iterable[str],
default=inspect.Parameter.empty
)
],
return_annotation=str
),
msg=msg
)
def test_invalid_input(self):
r"""Raise `TypeError` when input is invalid."""
msg1 = 'Must raise `TypeError` when input is invalid.'
msg2 = 'Inconsistent error message.'
examples = (
0, 1, -1, 0.0, 1.0, math.nan, math.inf, True, False,
(1, 2, 3), [1, 2, 3], {1, 2, 3}, None,
)
for invalid_input in examples:
for tokenizer in self.tokenizers:
with self.assertRaises(TypeError, msg=msg1) as ctx_man:
tokenizer.detokenize(invalid_input)
self.assertEqual(
ctx_man.exception.args[0],
'`tokens` must be instance of `Iterable[str]`.',
msg=msg2
)
def test_expected_return(self):
r"""Return expected strings."""
msg = 'Inconsistent detokenization result.'
examples = (
(
['H', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', '!'],
'Hello world!'
),
(
[],
''
)
)
for tokens, ans_sequence in examples:
for tokenizer in self.tokenizers:
out_sequence = tokenizer.detokenize(tokens)
self.assertIsInstance(out_sequence, str, msg=msg)
self.assertEqual(out_sequence, ans_sequence, msg=msg)
def test_case_insensitive(self):
r"""Detokenize does not consider cases."""
msg = 'Inconsistent detokenization result.'
examples = (
['H', 'e', 'L', 'l', 'O', ' ', 'W', 'o', 'R', 'l', 'D', '!'],
['h', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', '!'],
)
for tokens in examples:
self.assertEqual(
self.cased_tokenizer.detokenize(tokens),
self.uncased_tokenizer.detokenize(tokens),
msg=msg
)
if __name__ == '__main__':
unittest.main()
| null |
test/lmp/tokenizer/_char_list_tokenizer/test_detokenize.py
|
test_detokenize.py
|
py
| 3,951 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "lmp.tokenizer.CharListTokenizer",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "lmp.tokenizer.CharListTokenizer",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "inspect.signature",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "lmp.tokenizer.CharListTokenizer.detokenize",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "lmp.tokenizer.CharListTokenizer",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "inspect.Signature",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "inspect.Parameter",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "inspect.Parameter",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "inspect.Parameter",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "inspect.Parameter",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "inspect.Parameter",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "typing.Iterable",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "inspect.Parameter",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "math.nan",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "math.inf",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "unittest.main",
"line_number": 125,
"usage_type": "call"
}
] |
307704489
|
import string
from hashlib import sha1
from re import compile
from json import dump
from csv import reader
from unicodedata import normalize
from model import restaurant_types, remove_from_place
from utils import calculate_distance
class ItemParser():
def __init__(self, filename, item_types):
self.filename = filename
self.item_types = item_types
def read_items_from_file(self, has_headers=True):
with open(self.filename) as csvfile:
csvreader = reader(csvfile)
if has_headers:
next(csvreader, None)
for row in csvreader:
item = {}
for index, elem in enumerate(row):
item[self.item_types[index][0]] = self.item_types[index][1](elem)
yield item
def process(self):
raise NotImplementedError
class RestaurantParser(ItemParser):
""" doctring """
restaurants = {}
closest = {}
furthest = {}
plus10 = []
def parse_item(self, item):
""" docstring """
justthename = compile(r"(?<!^)\s" + r'|(?<!^)\s'.join(remove_from_place) +r"|\s\(.*\)|^.*\sat\s|[pP]o-?[bB]oys?|['’]s.*")
nameandkind = compile(r"\s\(.*\)|^.*\sat\s")
print(justthename.pattern)
nplace = normalize('NFKC', item['Place'])
dirty = justthename.sub("", nplace)
cleaner = dirty.maketrans('', '', string.punctuation + string.whitespace)
name = dirty.translate(cleaner).lower()
fullname = nameandkind.sub("", nplace)
self.restaurants[name] = {'fullname' : fullname,
'address' : item['Address'],
'latitude' : item['Latitude'],
'longitude': item['Longitude'],
'tips' : item['Tips']
}
def min_max_distance(self):
""" docstring """
mindist, maxdist = 99999,-1
for idx1, rest1 in enumerate(self.restaurants.keys()):
point1 = (self.restaurants[rest1]['latitude'],
self.restaurants[rest1]['longitude'])
for idx2, rest2 in enumerate(self.restaurants.keys()):
point2 = (self.restaurants[rest2]['latitude'],
self.restaurants[rest2]['longitude'])
if idx2 > idx1:
distance = calculate_distance((point1[0],point1[1]), (point2[0],point2[1]))
if distance > maxdist:
maxdist = distance
self.furthest = {'rest1': self.restaurants[rest1]['fullname'],
'rest2': self.restaurants[rest2]['fullname'],
'distance' : distance }
if distance < mindist and distance > 0:
mindist = distance
self.closest = {'rest1': self.restaurants[rest1]['fullname'],
'rest2': self.restaurants[rest1]['fullname'],
'distance' : distance }
def menu_gt10(self):
""" docstring """
price = compile(r"\$(\d+(\.\d{2})?)")
for rest in self.restaurants.keys():
value = price.search(self.restaurants[rest]["tips"])
if value and float(value.group(1)) > 10:
self.plus10.append(self.restaurants[rest]['fullname'])
def process(self):
for item in self.read_items_from_file():
self.parse_item(item)
self.min_max_distance()
self.menu_gt10()
if __name__ == '__main__':
restparser = RestaurantParser('restaurants.csv', restaurant_types)
restparser.process()
print("\nUnique restaurants: {}".format(len(restparser.restaurants)))
print("Furthest: {} and {} at {} km".format(restparser.furthest["rest1"],
restparser.furthest["rest2"],
restparser.furthest["distance"]))
print("Closest: {} and {} at {} km".format(restparser.closest["rest1"],
restparser.closest["rest2"],
restparser.closest["distance"]))
print("Restaurants with items that cost more than $10: {}\n".format(", ".join(restparser.plus10)))
| null |
xtras/poc.py
|
poc.py
|
py
| 4,565 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "csv.reader",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "model.remove_from_place",
"line_number": 50,
"usage_type": "argument"
},
{
"api_name": "re.compile",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "unicodedata.normalize",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "string.punctuation",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "string.whitespace",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "utils.calculate_distance",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "model.restaurant_types",
"line_number": 121,
"usage_type": "argument"
}
] |
301590972
|
"""
This module contains the implementations of all three SniffNet models
"""
from keras.layers import Conv2D
from keras.models import Sequential
from keras.layers import AveragePooling2D
from keras.layers import MaxPooling2D
from keras.layers import BatchNormalization
from keras.layers import Flatten, Dropout, Add
from keras.layers import Dense
from keras.layers import Input
from keras.models import Model
from keras.layers.merge import concatenate
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier as KNN
def sniffnet(input_shape, n_classes):
kernel = (20, input_shape[1] // 2 - 1)
multiplier = 10
out_channels = 5 * multiplier
# convolutional components
model = Sequential()
model.add(Conv2D(out_channels, kernel, input_shape=input_shape, use_bias=True,
activation='relu', name='first_conv'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
kernel = ((input_shape[0] - kernel[0] + 1) // 2, kernel[1])
model.add(Conv2D(out_channels, kernel, use_bias=True,
activation='relu', name='second_conv'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(AveragePooling2D())
model.add(Flatten())
model.add(Dense(out_channels, use_bias=True, activation='relu', name="camada_fc1"))
model.add(BatchNormalization())
model.add(Dense(n_classes, use_bias=True, activation='softmax', name="classificaiton"))
return model
def sniffresnet(input_shape, n_classes):
multiplier = 4
kernel = (8, input_shape[1] // 2 - 1)
out_channels = 5 * multiplier
# First Part of the convolution
x_input = Input(input_shape)
x_skip = Conv2D(out_channels, kernel,
activation='relu', name='first_conv1')(x_input)
layer_x = Conv2D(out_channels, kernel,
padding='same', activation='relu', name='first_conv2')(x_skip)
layer_x = BatchNormalization()(layer_x)
layer_x = Add()([layer_x, x_skip])
layer_x = MaxPooling2D((2, 1), padding='same', name="max_pool1")(layer_x)
# Second Part of the convolution
out_channels = out_channels * multiplier
x_skip = Conv2D(out_channels, kernel, activation='relu', name='second_conv1')(layer_x)
layer_x = Conv2D(out_channels, kernel, padding='same', use_bias=True,
activation='relu', name='second_conv2')(x_skip)
layer_x = BatchNormalization()(layer_x)
layer_x = Add()([layer_x, x_skip])
layer_x = MaxPooling2D((2, 1), name="max_pool2")(layer_x)
# Fully Connected Part
layer_x = Flatten()(layer_x)
layer_x = Dense(100, use_bias=True, activation="relu", name="fc1")(layer_x)
layer_x = Dropout(.25)(layer_x)
layer_x = Dense(n_classes, use_bias=True, activation="softmax", name="class")(layer_x)
model = Model(inputs=x_input, outputs=layer_x, name="SniffResnet")
return model
def sniffmultinose(input_shape, n_classes):
inputs_list = []
multinose_out = []
for i in range(input_shape[1]):
x_input = Input((input_shape[0],), name=("input_nose_" + str(i)))
inputs_list.append(x_input)
layer_x = Dense(input_shape[0], input_shape=(input_shape[0],),
use_bias=True, activation='relu',
name=("fc1_nose_" + str(i)))(x_input)
layer_x = Dense(input_shape[0] // 4, use_bias=True,
activation='tanh',
name=("fc2_nose_" + str(i)))(layer_x)
layer_x = Dense(input_shape[0] // 8, use_bias=True,
activation='tanh',
name=("fc3_nose_" + str(i)))(layer_x)
multinose_out.append(layer_x)
concat = concatenate(multinose_out)
layer_x = Dense(100, activation='tanh', use_bias=True)(concat)
layer_x = Dense(100, activation='relu', use_bias=True)(layer_x)
x_out = Dense(n_classes, activation='softmax', name="class")(layer_x)
model = Model(inputs=inputs_list, outputs=x_out, name="SniffNetMultiNose")
return model
def get_knn_classifier(n_neighbors):
return KNN(n_neighbors=3)
def get_svm(m_gamma=8.3):
return SVC(gamma=m_gamma, C=10, kernel='rbf')
def get_mlp(input_shape, n_classes):
x_input = Dense(100, input_shape=input_shape, activation='tanh')
x = Dense(30, activation='tanh')(x_input)
x = Dense(30, activation='tanh')(x)
x = Dense(30, activation='tanh')(x)
x = Dense(30, activation='tanh')(x)
x = Dense(30, activation='tanh')(x)
x = Dense(30, activation='tanh')(x)
x_out = Dense(n_classes, activation='softmax')(x)
model = Model(inputs=x_input, outputs=x_out, name='Simple MLP')
return model
| null |
models.py
|
models.py
|
py
| 4,694 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "keras.models.Sequential",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "keras.layers.AveragePooling2D",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "keras.layers.Flatten",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "keras.layers.Add",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "keras.layers.MaxPooling2D",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "keras.layers.Conv2D",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "keras.layers.Add",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "keras.layers.MaxPooling2D",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "keras.layers.Flatten",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dropout",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "keras.layers.merge.concatenate",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 121,
"usage_type": "call"
}
] |
143130756
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 5 18:09:10 2020
イベントランキング
"""
from bs4 import BeautifulSoup
import AbsHtmlPage
import ProfilePage
from datetime import datetime
import csv
import pandas as pd
import json
class EventPage(AbsHtmlPage.AbsHtmlPage):
eventPageName = 'event'
pageDatetime = 'a'
contribution_dfs = pd.DataFrame()
ForDebug = False
"""
イベントページの取得
"""
def getPage(self, eventName, eventId):
self.eventPageName = eventName
self.eventId = eventId
print(self.eventPageName)
# for debug. 通信しないでファイルから読み込む.
if(self.ForDebug == True):
with open(self.eventPageName + ".html", mode='r', encoding='utf-8') as fileObj:
text = fileObj.read()
else:
text = super().getHtmlPage("/event/" + eventName)
self.pageDatetime = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
return text
"""
イベントページ参加者データの追加取得
(30人以上の参加者の場合は追加ページが存在する)
"""
def getNextPage(self, nextPage):
print(nextPage)
if(self.ForDebug == True):
with open(self.eventPageName + str(nextPage) + ".json", mode='r', encoding='utf-8') as fileObj:
text = fileObj.read()
else:
text = super().getHtmlPage("/event/room_list?event_id=" + str(self.eventId) + "&p=" + str(nextPage))
return text
"""
貢献ユーザリストページの取得
"""
def getContributionPage(self, roomId):
print(roomId)
# for debug. 通信しないでファイルから読み込む.
if(self.ForDebug == True):
htmlfile = 'Contribution.html'
if roomId == 268535 :
htmlfile = 'Contribution2.html'
with open(htmlfile, mode='r', encoding='utf-8') as f:
text = f.read()
else:
# 通信してhtmlを取得
text = super().getHtmlPage("/event/contribution/" + self.eventPageName + "?room_id=" + str(roomId) )
# 取得した日付時刻を保持
self.pageDatetime = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
return text
"""
単一メンバーの現在のイベントポイントを取得
"""
def getMemberTotalPoint(self, roomId):
print(roomId)
# Jsonでページデータを取得する
# for debug. 通信しないでファイルから読み込む.
if(self.ForDebug == True):
htmlfile = 'event_and_support.html'
if roomId == 268535 :
htmlfile = 'Contribution2.html'
with open(htmlfile, mode='r', encoding='utf-8') as f:
text = f.read()
else:
# 通信してhtmlを取得
text = super().getHtmlPage("/api/room/event_and_support?room_id=" + str(roomId) )
# 取得した日付時刻を保持
self.pageDatetime = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
#print text
"""
イベントページからの情報抽出
"""
def extractData(self, text) :
soup = BeautifulSoup(text, 'lxml')
# ランキングのメンバー抽出
#RankingMembers = soup.find("ul", attrs={"class": "contentlist-rowlist", "id": "list-ranking"}).find_all("li")
RankingMembers = soup.find_all("li", class_="js-follow-li")
seeMore = soup.find_all("a", attrs={"class":"see-more", "data-type":"ranking"})
nextPage = None
if 0 < len(seeMore):
nextPage = seeMore[0]['data-page']
while nextPage != None:
print(seeMore)
if seeMore[0].text == "もっと見る":
jsondata = self.getNextPage(nextPage)
data = json.loads(jsondata)
nextPage = data['next_page']
soupdata = BeautifulSoup(data['html'], 'lxml')
sublist = soupdata.find_all("li")
RankingMembers.extend(sublist)
else:
print('nothing')
break
currentPointList = []
with open(self.eventPageName + '.csv', mode='a', encoding='utf8') as fileObj:
writer = csv.writer(fileObj, delimiter=',', lineterminator='\n', skipinitialspace=True)
for member in RankingMembers:
# 現在の順位
roomRankingNum = member.find("div", class_="label-ranking")
if roomRankingNum != None:
roomRankingNum = roomRankingNum.text.split()[0]
# ルーム名
roomName = member.find("h4", class_="listcardinfo-main-text").text
# イベント貢献ランキングへの相対リンク (/event/contribution/イベント名?room_id=XXXXX)
roomContributeLink = member.find("a", class_="room-ranking-link")["href"]
# プロフィールページへの相対リンク (/)
roomProfileLink = member.find("a", class_="profile-link")["href"]
roomId = member.find("a", class_="js-follow-btn")["data-room-id"]
singleData = [roomRankingNum, self.pageDatetime, roomId, roomName]
print(singleData)
writer.writerow(singleData)
# 貢献ユーザリストの取得
dfs_new = self.extractContribution(roomId, roomName)
self.contribution_dfs = pd.concat([self.contribution_dfs, dfs_new], axis=1)
print(type(self.contribution_dfs), type(dfs_new))
print(self.contribution_dfs)
# プロフィールページの解析
self.getSingleProfle(roomId)
# 現在ポイント数を取得
#self.getMemberTotalPoint(roomId)
#currentPointList =
savetime = datetime.now().strftime('%Y%m%d_%H%M')
self.contribution_dfs.to_csv('contributor_' + self.eventPageName + savetime + '.csv')
return "aa"
"""
プロフィールページの取得解析
"""
def getSingleProfle(self, roomId):
profile = ProfilePage.ProfilePage()
text = profile.getPage(roomId)
profile.extractData(text)
profile.saveData()
"""
貢献ユーザリストの取得
"""
def extractContribution(self, roomId, roomName):
html = self.getContributionPage(roomId)
dfs = pd.read_html(html)
#print(len(dfs))
idx = 0
if 2 == len(dfs):
idx = 1
dfs_new = dfs[idx].add_prefix(str(roomId) + '_')
#print(dfs_new)
return dfs_new
# def getSingleContribute(self, )
if __name__ == "__main__":
eventPage = EventPage()
events = [{'name': "spinnsfmodel_sa_final", 'id': 18460}, \
{'name': "popentertainment_sr4_semif_b", 'id': 18578}]
text = eventPage.getPage(events[0]['name'], events[0]['id'])
eventPage.extractData(text)
| null |
getsrdata/EventPage.py
|
EventPage.py
|
py
| 7,432 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "AbsHtmlPage.AbsHtmlPage",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "ProfilePage.ProfilePage",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "pandas.read_html",
"line_number": 180,
"usage_type": "call"
}
] |
173670654
|
# Class for serial port open/read/write
import re, logging, sys, time, os, signal, platform
import pexpect
from framework.globalconfig.gc import *
from framework.connection import ConnectionInterface
DEFAULT_LOGIN_PATTERN = ".*login:.*"
DEFAULT_PWD_PATTERN = "Password:"
class SerialPort(ConnectionInterface.ConnectionInterface):
def __init__(self, portname, baudrate=DEFAULT_SERIAL_BAUDRATE, uname=DEFAULT_ROVER_USER, passwd=DEFAULT_ROVER_PWD,
prompt=DEFAULT_PROMPT, uprompt=DEFAULT_UBOOT_PROMPT):
super(SerialPort, self).__init__(uname, passwd, prompt, uprompt)
self.portname = portname
self.baudrate = int(baudrate)
self.conn_cmd = "cu -l %s -s %s" % (self.portname, self.baudrate)
if not self.openConnection():
raise IOError('Serial connectivity could not be established')
def openConnection(self):
status = False
if self.handle:
return True
self.handle = pexpect.spawn(command='/bin/bash', args=['-c', self.conn_cmd])
self.handle.logfile_read = sys.stdout
try:
result = self.handle.expect(".*Connected.*")
if result == 0:
status = True
except pexpect.EOF as e:
logging.error("Board is in use (connection refused).")
finally:
return status
def __str__(self):
return ("handle[%s]" % (self.handle))
def __del__(self):
self.close()
def close(self):
if self.handle:
self.handle.sendline("~.")
self.handle.close()
self.handle = None
def get_handle(self):
return self.handle
def waitForReboot(self, waittime=100):
'''Verify Linux starts up.'''
self.handle.expect(['Booting Linux', 'Starting kernel ...'], timeout=45)
i = self.handle.expect(['Please press Enter to activate this console', 'U-Boot'], timeout=150)
if i == 1:
raise Exception('U-Boot came back when booting kernel')
# Give things time to start or crash on their own.
# Some things, like wifi, take a while.
colorBlue("Waiting for %s seconds for linux prompt" % waittime)
time.sleep(waittime)
self.handle.sendline('\r')
self.handle.expect(self.prompt)
self.handle.sendline('uname -a')
self.handle.expect('Linux ')
self.handle.expect(self.prompt)
def wait_for_message(self, expected_message, timeout=100):
"""
Waits for expected message to show in serial output
:param expected_message: the message to look for in the serial output
:param timeout: the time to allow for message to appear in the serial output
:return: the time in seconds it took for message to appear in the serial output.
-1 if something went wrong
"""
duration = -1
try:
if not expected_message:
raise ValueError("Invalid value for input expected_message. Expected not None or not empty message.")
if timeout < 0:
raise ValueError("Invalid value for input timeout. Expected a positive timeout.")
start_time = time.time()
case_matched = self.handle.expect([expected_message, pexpect.EOF, pexpect.TIMEOUT], timeout=timeout)
if case_matched == 0:
end_time = time.time()
duration = end_time - start_time
logging.info("Found message [%s]. Took %s seconds for message to come up." % (expected_message, duration))
elif case_matched == 1:
error_msg = "Found End of File before finding expected message %s" % expected_message
raise ValueError(error_msg)
elif case_matched == 2:
error_msg = "Got Timeout after %s seconds before finding expected message %s" % (time, expected_message)
raise ValueError(error_msg)
except Exception as e:
logging.exception("There was an exception while trying to wait for message. Exception is %s" % e)
finally:
return duration
def rebootPi(self):
"""
Executes reboot command on prompt. Logs in if needed.
:return: None
"""
self.handle.sendline('\r')
rc = self.handle.expect([DEFAULT_LOGIN_PATTERN, self.prompt], timeout=10)
if rc == 0:
self.handle.sendline(self.uname)
self.handle.expect(DEFAULT_PWD_PATTERN)
self.handle.sendline(self.passwd)
self.handle.expect(self.prompt)
self.handle.sendline('\r')
self.write(cmd="reboot")
def waitForPiReboot(self, waittime=100):
"""
Waits for PI to reboot into Raspbian
:param waittime: time to wait for the prompt
:return: None.
"""
'''Verify Linux starts up.'''
self.handle.expect(['Booting Linux', 'Starting kernel ...', 'U-Boot'], timeout=45)
colorBlue("Waiting for %s seconds for linux prompt" % str(waittime))
self.handle.expect('Raspbian ', timeout=waittime)
rc = self.handle.expect([DEFAULT_LOGIN_PATTERN, self.prompt], timeout=10)
if rc == 0:
self.handle.sendline(self.uname)
self.handle.expect(DEFAULT_PWD_PATTERN)
self.handle.sendline(self.passwd)
self.handle.expect(self.prompt)
self.handle.sendline('\r\n')
self.handle.sendline('uname -a')
self.handle.expect('Linux ')
self.handle.expect(self.prompt)
def check_for_reset(self, timeout=60):
colorBlue("Waiting %s seconds for Rover reset" % timeout)
index = self.handle.expect(['reboot: Restarting system', 'U-Boot'], timeout=timeout)
if index == 0:
colorBlue("\"Restarting system\" string found")
self.waitForReboot()
def enterUboot(self, uprompt=None, upattern=None):
time.sleep(2)
status = False
try:
self.handle.expect(u"U-Boot", timeout=30)
self.handle.expect(u'Hit any key ')
self.handle.sendline(u'\n\n\n\n\n\n\n') # try really hard
self.handle.expect(unicode(self.uprompt), timeout=4)
self.handle.sendline(u'echo FOO')
time.sleep(1)
self.handle.expect(u'echo FOO', timeout=4)
self.handle.expect(u'FOO')
self.handle.expect(unicode(self.uprompt), timeout=4)
status = True
except pexpect.EOF as eof:
logging.error(eof)
raise
except pexpect.TIMEOUT as to:
logging.error(to)
raise
except Exception as e:
logging.error(e)
raise
finally:
return status
| null |
framework/serial/serialD.py
|
serialD.py
|
py
| 6,823 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "framework.connection.ConnectionInterface.ConnectionInterface",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "framework.connection.ConnectionInterface",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pexpect.spawn",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "pexpect.EOF",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "logging.error",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pexpect.EOF",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "pexpect.TIMEOUT",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "logging.exception",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "pexpect.EOF",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "logging.error",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "pexpect.TIMEOUT",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "logging.error",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 170,
"usage_type": "call"
}
] |
361742187
|
import mistune
import latex2mathml.converter
import os
def py_states(content):
states = '```{}```'
state = 0
new_content = ''
word = ''
for c in content:
if states[state] == c:
if state == 7:
new_content = new_content + '</pre></div>'
word = ''
state = 0
else:
if (state >= 0 and state <= 3) or (state == 5 or state == 6):
word = word + c
elif state == 4:
new_content = new_content + '<div class="codeblock"><pre class="prettyprint">'
word = ''
state = state + 1
else:
if (state >= 1 and state <= 3) or (state == 6 or state == 7):
new_content = new_content + word + c
elif state != 4:
new_content = new_content + c
new_content = new_content + '<script src="https://cdn.jsdelivr.net/gh/google/code-prettify@master/loader/run_prettify.js"></script>'
return new_content
def html_states(content):
html = '<script src="https://fred-wang.github.io/mathml.css/mspace.js"></script>\n'
state = 0
states = [[1 ,2 ,0], [0 ,1 ,1], [0 ,0 ,0]]
math = ''
for c in content:
if state == 1 and c != '$':
math = math + c
elif '$' == c:
if state == 1:
html = html + latex2mathml.converter.convert(math)
math = ''
state = states[state][0]
elif '\\' == c:
state = states[state][1]
else:
if state == 2:
html = html + '\\' + c
elif state == 0:
html = html + c
state = states[state][2]
return html
def math_states(content):
new_content = ''
word = ''
state = 0
states = '<math>'
for c in content:
if states[state] == c:
word = word + c
if state == len(states) - 1:
new_content = new_content + '<math xmlns="http://www.w3.org/1998/Math/MathML">'
state = 0
else:
state = state + 1
else:
new_content = new_content + word + c
word = ''
state = 0
return new_content
def table_states(content):
new_content = ''
word = ''
state = 0
states = '<table>>ad>'
for c in content:
if states[state] == c:
if state == 6:
new_content = new_content + word + ' class="table table-striped table-bordered">'
state = 0
word = ''
elif state == 7:
new_content = new_content + word + ' scope="col">'
state = 0
word = ''
elif state == 10:
new_content = new_content + word + ' class="thead-light">'
state = 0
word = ''
else:
state = state + 1
word = word + c
else:
if state == 2 and c == 'h':
state = 7
word = word + c
elif state == 7 and c == 'e':
state = state + 1
word = word + c
else:
new_content = new_content + word + c
word = ''
state = 0
return new_content
def insert_at_id(html, identity, content):
states = '<div id="{}">'.format(identity)
state = 0
new_html = ''
word = ''
for c in html:
if state == len(states)-2:
if c == '>':
new_html += word + c + content
state = 0
word = ''
else:
word += c
elif states[state] == c:
state += 1
word += c
else:
state = 0
new_html += word + c
word = ''
return new_html
def insert_content(input_template, new_content, title):
template = open(input_template, 'r')
if template.mode == 'r':
print('Inserting HTML')
html_template = template.read()
html_template = insert_at_id(html_template, 'inject', new_content)
new_html = insert_at_id(html_template, 'title', title)
template.close()
return new_html
else:
print('Input template file not found.')
def create_page_content(input_file, title, subject):
print(input_file)
file = open(input_file, 'r')
if file.mode == 'r':
print('Creating HTML content')
md_content = file.read()
md_parser = mistune.Markdown()
parsed_code = py_states(md_content)
html = md_parser(parsed_code)
html = math_states(html)
html = table_states(html)
template = 'subtemplate_updated.html' if subject != None else 'template_updated.html'
final_output = insert_content(template, html, title)
return final_output
else:
print('Input file not found.')
def test2():
output = open('md_output2.html', 'w+')
output.write(create_page_content('example_sheet.Rmd'))
output.close()
#test2()
| null |
MDgen.py
|
MDgen.py
|
py
| 5,318 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "latex2mathml.converter.converter.convert",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "latex2mathml.converter.converter",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "latex2mathml.converter",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "mistune.Markdown",
"line_number": 155,
"usage_type": "call"
}
] |
653333658
|
import numpy as np
from scipy import linalg as lin
from matplotlib import pyplot as plt
from matplotlib import animation
import mpl_toolkits.mplot3d.axes3d as p3
def project_3d(arr):
return [*arr.real[view_dims], arr.imag[0]]
view_dims = [0, 1]
matrix = np.matrix([
[0, 1, 0],
[0, 0, 1],
[-1, 0, 0],
])
print(lin.det(matrix))
num_points = 200
line_res = 10
anim_speed = 10
bounds = 1
res_steps = 2 * np.pi / line_res
data = np.array([[np.cos(t) * np.sin(u) + np.cos(u) * 1j, np.sin(t) * np.sin(u) + np.cos(u) * 1j, v]
for t in np.arange(line_res) * res_steps
for u in np.arange(line_res) * res_steps
for v in [-1, 0, 1]]).T
# data = np.random.randn(matrix.shape[0], num_points) / 2
np.random.randn(matrix.shape[0], num_points) / 2
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.set_xlabel('X')
ax.set_xlim3d([-bounds, bounds])
ax.set_ylabel('Y')
ax.set_ylim3d([-bounds, bounds])
ax.set_zlabel('i')
ax.set_zlim3d([-bounds, bounds])
ax.plot(*np.array([[np.cos(t), np.sin(t)] for t in np.arange(line_res + 1) * res_steps]).T, c='lightgrey')
points = ax.scatter(*project_3d(data))
for v, vec in zip(*lin.eig(matrix)):
print(v, vec)
ax.plot(*np.array([[0, 0, 0], project_3d(vec)]).T)
inc_matrix = lin.fractional_matrix_power(matrix, anim_speed / 1000)
def display(t):
global data
data = np.matmul(inc_matrix, data)
points._offsets3d = project_3d(data)
return [points]
ani = animation.FuncAnimation(fig, display, interval=30)
try:
plt.show()
except:
pass
| null |
matvis2ni.py
|
matvis2ni.py
|
py
| 1,562 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.matrix",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "scipy.linalg.det",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "scipy.linalg",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "numpy.pi",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "mpl_toolkits.mplot3d.axes3d.Axes3D",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "mpl_toolkits.mplot3d.axes3d",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "scipy.linalg.eig",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "scipy.linalg",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "scipy.linalg.fractional_matrix_power",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "scipy.linalg",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "numpy.matmul",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.animation.FuncAnimation",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.animation",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 66,
"usage_type": "name"
}
] |
306880342
|
# pylint: disable=R0913,R0903
"""
dayong.configs
~~~~~~~~~~~~~~
Initial setup and configuration logic.
"""
import json
import os
from pydantic import BaseModel
from dayong.settings import CONFIG_FILE
class DayongConfig(BaseModel):
"""Data model for Dayong's configuration."""
bot_prefix: str
bot_token: str
database_uri: str
embeddings: dict
guild_id: int
@classmethod
def load(
cls,
bot_prefix: str,
bot_token: str,
database_uri: str,
embeddings: dict,
guild_id: int,
) -> "DayongConfig":
"""Construct an instance of `dayong.configs.DayongConfig`.
Returns:
An instance of `dayong.configs.DayongConfig`.
"""
return cls(
bot_prefix=bot_prefix,
bot_token=bot_token,
database_uri=database_uri,
guild_id=guild_id,
embeddings=embeddings,
)
class DayongConfigLoader:
"""Configuration loader for Dayong."""
def __init__(self) -> None:
self.load_cfg()
self.load_env()
def load_cfg(self) -> None:
"""Load comments, flags, settings, and paths from config file."""
with open(CONFIG_FILE, encoding="utf-8") as cfp:
config = dict(json.load(cfp))
self.bot_prefix = config["bot_prefix"]
self.guild_id = config["guild_id"]
self.embeddings = config["embeddings"]
def load_env(self) -> None:
"""Load environment variables."""
self.bot_token = os.environ["BOT_TOKEN"]
self.database_uri = os.environ["DATABASE_URI"]
@staticmethod
def load() -> DayongConfig:
"""Load configs into `dayong.configs.DayongConfig`.
Returns:
DayongConfig: An instance of `dayong.configs.DayongConfig`.
"""
loader = DayongConfigLoader().__dict__
return DayongConfig.load(*tuple(loader[key] for key in sorted(loader.keys())))
| null |
dayong/configs.py
|
configs.py
|
py
| 1,960 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pydantic.BaseModel",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "dayong.settings.CONFIG_FILE",
"line_number": 57,
"usage_type": "argument"
},
{
"api_name": "json.load",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 66,
"usage_type": "attribute"
}
] |
58083649
|
import pygame
from plane_sprites import*
class PlaneGame(object):
def __init__(self):
print('游戏初始化')
#1.创建游戏的窗口
self.screen=pygame.display.set_mode(SCREEN_RECT.size)
#2.创建游戏的始终
self.clock=pygame.time.Clock()
#3.调用私有方法,精灵和精灵组的创建
self.__create_sprites()
#4.设置定时器事件-创建敌机 ls (毫秒)
pygame.time.set_timer(CREATE_ENEMY_EVENT,1000)
def __create_sprites(self):
#创建背景精灵和精灵组
bg1=Background()
bg2=Background(True)
self.back_group=pygame.sprite.Group(bg1,bg2)
#创建敌机的精灵组
self.enemy_group=pygame.sprite.Group()
def start_game(self):
print('游戏快开始...')
#游戏循环
while 1:
#1.设置刷新频率
self.clock.tick(FRAME_PER_SEC)
#2.事件监听
self.__event_handler()
#3.碰撞检测
self.__check__collide()
#4.更新/绘制精灵组
self.__update_sprites()
#5.更新显示
pygame.display.update()
def __event_handler(self):
#用这个方法来返回当前的操作信息
for event in pygame.event.get():
#判断是否退出游戏(类名+函数调用静态方法)
if event.type==pygame.QUIT:
PlaneGame.__game_over()
elif event.type==CREATE_ENEMY_EVENT:
print('敌机出场...')
#创建敌机精灵
enemy= Enemy()
#将敌机精灵添加到敌机精灵组
self.enemy_group.add(enemy)
def __check__collide(self):
pass
def __update_sprites(self):
self.back_group.update()
self.back_group.draw(self.screen)
self.enemy_group.update()
self.enemy_group.draw(self.screen)
#静态方法
@staticmethod
def __game_over():
print('游戏结束...')
pygame.quit()
exit()
if __name__ == "__main__":
#创建游戏对象
game=PlaneGame()
#启动游戏
game.start_game()
| null |
飞机大战/plane_main.py
|
plane_main.py
|
py
| 2,239 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygame.display.set_mode",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Clock",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.set_timer",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Group",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 70,
"usage_type": "call"
}
] |
514215494
|
import json
import unittest
import boto3
from pygglz.dynamodb import DynamodbRepository
from pygglz.feature_state import FeatureState
class LocalDynamodb(object):
def __init__(self):
self.endpoint_url = "http://localhost:4569"
self.resource = boto3.resource('dynamodb', endpoint_url=self.endpoint_url)
self.client = boto3.client('dynamodb', endpoint_url=self.endpoint_url)
def create_schema(self, schema=None):
table_names = self.client.list_tables()["TableNames"]
for table_name, table in schema["Tables"].items():
if table_name in table_names:
self.client.delete_table(TableName=table_name)
create_table_args = {
"TableName": table_name,
"AttributeDefinitions": table["AttributeDefinitions"],
"KeySchema": table["KeySchema"],
"BillingMode": table.get("BillingMode", "PAY_PER_REQUEST")
}
self.client.create_table(**create_table_args)
def load_items(self, items=None):
for table_name, items in items.items():
table = self.resource.Table(table_name)
for item in items:
table.put_item(Item=item)
def assert_contains_item(self, table_name=None, key=None):
table = self.resource.Table(table_name)
response = table.get_item(TableName=table_name, Key=key)
if not "Item" in response:
raise AssertionError("Item with key={} not found in {}.".format(json.dumps(key), table_name))
SCHEMA = {"Tables": {
"features": {
"AttributeDefinitions": [
{
"AttributeName": "featureName",
"AttributeType": "S"
}
],
"KeySchema": [
{
"AttributeName": "featureName",
"KeyType": "HASH"
}
]
}
}}
class DynamodbRepositoryIntegrationTest(unittest.TestCase):
def setUp(self) -> None:
self.local_dynamodb = LocalDynamodb()
self.local_dynamodb.create_schema(SCHEMA)
def test_get_feature_state(self):
self.local_dynamodb.load_items(items={"features": [{"featureName": "F1", "featureState": {"enabled": True}}]})
self.repo = DynamodbRepository(self.local_dynamodb.resource)
self.assertTrue(self.repo.get_feature_state("F1").enabled)
def test_set_feature_state(self):
self.repo = DynamodbRepository(self.local_dynamodb.resource)
self.repo.set_feature_state(FeatureState("F1", True))
self.local_dynamodb.assert_contains_item(table_name="features", key={"featureName": "F1"})
| null |
integration_tests/dynamodb_repository_integration_test.py
|
dynamodb_repository_integration_test.py
|
py
| 2,651 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "boto3.resource",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "pygglz.dynamodb.DynamodbRepository",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "pygglz.dynamodb.DynamodbRepository",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "pygglz.feature_state.FeatureState",
"line_number": 75,
"usage_type": "call"
}
] |
552043694
|
#-*- coding: utf-8 -*-
##############################################################################
#
# NUMA Extreme Systems (www.numaes.com)
# Copyright (C) 2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from openerp.osv.osv import except_osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
class account_bank_printing_model(models.Model):
_name = 'account.bank_printing_model'
name = fields.Char("Model's name", size=128, required=True)
notes = fields.Text('Notes')
report = fields.Many2one('ir.actions.report.xml',
'Current printing model',
domain="[('model','=','account.payable_document')]")
class res_bank(models.Model):
_inherit = "res.bank"
current_printing_model = fields.Many2one('account.bank_printing_model',
'Current printing model')
class account_payable_document(models.Model):
_inherit = "account.payable_document"
auto_printed = fields.Boolean('Already printed', default=False)
_defaults = {
'name': '********',
}
class res_partner_bank(models.Model):
_inherit = "res.partner.bank"
auto_cheque_printing = fields.Boolean('Auto cheque writing')
auto_mask = fields.Char('Mask for numbering generation',
help="Use the following expresions in mask for proper number generation\n"
"- %(y4)d for the 4 digits year, %(y2) for 2 digits year (issued date)\n"
"- %(m)d for the month number\n"
"- %(d)d for the day number\n"
"- %(n)d for the number. Normal modifiers, like %(n)08d could be used\n",
default="%(n)08d", required=True)
def action_print_checks(self, cr, uid, ids, context=None):
assert ids and len(ids)==1, 'One at the time'
rpd_obj = self.pool['account.payable_document']
rpb = self.browse(cr, uid, ids[0], context=context)
if not rpb.auto_cheque_printing:
raise except_osv(_("Error"),
_("This account is not configured for automatic check printing!"))
if not rpb.bank.current_printing_model:
raise except_osv(_("Error"),
_("Bank %s has currently no printing model assigned! Please check") % rpb.bank.name)
to_print = rpd_obj.search(cr, uid,
[('issuer_account','=',rpb.id),
('auto_printed','=',False)],
context=context)
if len(to_print):
return {
'name':_("Print cheques for bank %s") % rpb.name,
'view_mode': 'form',
'view_type': 'form',
'res_model': 'account.cheque_writing',
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'new',
'context': context or {},
}
else:
raise except_osv(_("Warning"),
_("This account has no pending cheque to print!"))
class account_cheque_writing(models.TransientModel):
_name = 'account.cheque_writing'
first_number = fields.Integer('First number to assign', default=1)
bank_account = fields.Many2one('res.partner.bank', 'Bank account')
cheque_count = fields.Integer('# of cheques to print',
compute='getCheques')
@api.depends('bank_account')
def getCheques(self):
rpd_obj = self.env['account.payable_document']
for rec in self:
cheques = rpd_obj.search([('issuer_account','=',rec.bank_account.id),
('auto_printed','=',False)])
rec.cheque_count = len(cheques)
@api.model
def default_get(self, fields):
rpb_obj = self.env['res.partner.bank']
rpd_obj = self.env['account.payable_document']
res = super(account_cheque_writing, self).default_get(fields)
active_id = self.env.context['active_id']
rpb = rpb_obj.browse(active_id)
if 'bank_account' in fields:
res['bank_account'] = rpb.id
if 'first_number' in fields:
last_docs = rpd_obj.search([('issuer_account','=',rpb.id),
('auto_printed','=',True)],
order="name desc",
limit=1)
if last_docs:
last_doc = last_docs[0]
first_number = 1
try:
last_number = int(last_doc.name)
first_number = last_number + 1
except Exception:
pass
res['first_number'] = first_number
return res
def action_print(self, cr, uid, ids, context=None):
assert ids and len(ids)==1, 'One at the time'
acw = self.browse(cr, uid, ids[0], context=context)
today = fields.Date.context_today(acw)
rpd_obj = self.pool['account.payable_document']
cheque_ids = rpd_obj.search(cr, uid,
[('issuer_account','=',acw.bank_account.id),
('auto_printed','=',False)],
context=context)
cheques = rpd_obj.browse(cr, uid, cheque_ids, context=context)
if len(cheques):
# Number assignation
n = acw.first_number
for cheque in cheques:
cheque.name = cheque.issuer_account.auto_mask % {
'y4': int(cheque.issued_date[0:4]),
'y2': int(cheque.issued_date[2:4]),
'm': int(cheque.issued_date[5:7]),
'd': int(cheque.issued_date[8:10]),
'n': n,
}
cheque.auto_printed = True
cheque.issued_date = today
n += 1
report_ids = [ch.id for ch in cheques]
return self.pool['report'].get_action(
cr, uid,
report_ids,
acw.bank_account.bank.current_printing_model.report.report_name,
context=dict(context, active_model='account.payable_document'),
)
else:
return {'type': 'ir.actions.act_window_close'}
| null |
numa_cheque_printing/payable_cheque.py
|
payable_cheque.py
|
py
| 7,524 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "openerp.models.Model",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "openerp.models",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "openerp.fields.Char",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "openerp.fields",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "openerp.fields.Text",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "openerp.fields",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "openerp.fields.Many2one",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "openerp.fields",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "openerp.models.Model",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "openerp.models",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "openerp.fields.Many2one",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "openerp.fields",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "openerp.models.Model",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "openerp.models",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "openerp.fields.Boolean",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "openerp.fields",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "openerp.models.Model",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "openerp.models",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "openerp.fields.Boolean",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "openerp.fields",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "openerp.fields.Char",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "openerp.fields",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "openerp.osv.osv.except_osv",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "openerp.tools.translate._",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "openerp.models.TransientModel",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "openerp.models",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "openerp.fields.Integer",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "openerp.fields",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "openerp.fields.Many2one",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "openerp.fields",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "openerp.fields.Integer",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "openerp.fields",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "openerp.api.depends",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "openerp.api",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "openerp.fields",
"line_number": 122,
"usage_type": "argument"
},
{
"api_name": "openerp.fields",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "openerp.fields",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "openerp.api.model",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "openerp.api",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "openerp.fields.Date.context_today",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "openerp.fields.Date",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "openerp.fields",
"line_number": 152,
"usage_type": "name"
}
] |
32107906
|
import json
import logging
from string import Template
from flask_restful import Resource, request
from flask import jsonify
from accessors.s3_accessor import S3Accessor
from resource_helpers.abort_logger import AbortLogger
MESSAGE_500 = Template("File failed to download: $error")
class VariantListReturn(Resource):
def __init__(self):
self.logger = logging.getLogger(__name__)
def get(self):
specified_gene_nm = request.args["gene_nm"]
download_file_url = ""
file_path = "variant.txt"
path = ""
# --------------download file from s3--------------
try:
download_file_url = S3Accessor().get_download_url(file_path)
except Exception as e:
AbortLogger.log_and_abort(500, self.logger.error, MESSAGE_500.substitute(error=e))
try:
if download_file_url != "":
path = S3Accessor().download(file_path)
except Exception as e:
AbortLogger.log_and_abort(500, self.logger.error, MESSAGE_500.substitute(error=e))
if str(specified_gene_nm) != "":
return jsonify({
"keys": json.loads(open(path).read())[specified_gene_nm],
})
else:
return jsonify({
"error": "Query must have a specified gene_nm."
})
| null |
resources/variant_list_return.py
|
variant_list_return.py
|
py
| 1,343 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "string.Template",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask_restful.Resource",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask_restful.request.args",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "flask_restful.request",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "accessors.s3_accessor.S3Accessor",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "resource_helpers.abort_logger.AbortLogger.log_and_abort",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "resource_helpers.abort_logger.AbortLogger",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "accessors.s3_accessor.S3Accessor",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "resource_helpers.abort_logger.AbortLogger.log_and_abort",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "resource_helpers.abort_logger.AbortLogger",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 42,
"usage_type": "call"
}
] |
631254577
|
"""
Irenaeus Chan
11/27/2015
Ramachandran Plot Generator
"""
import sys
from itertools import tee, islice, chain, izip
import vector
def previousAndNext(some_iterable):
#http://stackoverflow.com/questions/1011938/python-previous-and-next-values-inside-a-loop
prevs, items, nexts = tee(some_iterable, 3)
prevs = chain([None], prevs)
nexts = chain(islice(nexts, 1, None), [None])
return izip(prevs, items, nexts)
def calculatePhiPsi(protein, center, filename):
write = 'w'
if (len(sys.argv) > 2 and (sys.argv[1] == "Helix" or sys.argv[1] == "helix")):
write = 'a'
elif (len(sys.argv) > 2 and (sys.argv[1] == "Sheets" or sys.argv[1] == "sheets")):
write = 'a'
elif (len(sys.argv) > 2 and (sys.argv[1] == "Coil" or sys.argv[1] == "coil")):
write = 'a'
elif (sys.argv[1] == "all" and len(sys.argv) > 2):
write = 'a'
with open('{0}.txt'.format(filename), write) as output:
#Sets an iterator to examine the previous, current, and next values
for prev, AA, nxt in previousAndNext(protein.amino_acids):
#Due to how PhiPsi angles are calculated we can't calculate the beginning and end of residues
# The first argument ensures it's not the first in the sequence, the second ensures it's not the beginning of the residue
if (prev is None or prev.seqres != AA.seqres):
C = [AA.backbone[2].x, AA.backbone[2].y, AA.backbone[2].z]
continue
#This checks if it's the end of the ENTIRE sequence
elif (nxt is None):
continue
#This checks whether or not it is at the end of the residue
elif (AA.seqres != nxt.seqres):
continue
aa = AA.amino_acid
pos = AA.position
N = [AA.backbone[0].x, AA.backbone[0].y, AA.backbone[0].z]
Ca = [AA.backbone[1].x, AA.backbone[1].y, AA.backbone[1].z]
vectorCN = vector.vectorCalculation(C, N)
vectorNCa = vector.vectorCalculation(N, Ca)
normalVector1 = vector.crossProduct(vectorCN, vectorNCa)
C = [AA.backbone[2].x, AA.backbone[2].y, AA.backbone[2].z]
vectorCaC = vector.vectorCalculation(Ca, C)
normalVector2 = vector.crossProduct(vectorNCa, vectorCaC)
phi = vector.dihedralAngle(normalVector1, normalVector2)
#The cross product vectors are both normal to the axis vectorNCa (central vector),
# so the angle between them is the dihedral angle that we are looking for.
# However, since "angle" only returns values between 0 and pi, we need to make
# sure we get the right sign relative to the rotation axis
if vector.dotProduct(vector.crossProduct(normalVector1, normalVector2), vectorNCa) < 0:
phi = -phi
normalVector1 = vector.crossProduct(vectorNCa,vectorCaC)
N = [nxt.backbone[0].x, nxt.backbone[0].y, nxt.backbone[0].z]
vectorCN = vector.vectorCalculation(C, N)
normalVector2 = vector.crossProduct(vectorCaC, vectorCN)
psi = vector.dihedralAngle(normalVector1, normalVector2)
#The cross product vectors are both normal to the axis vectorNCa (central vector),
# so the angle between them is the dihedral angle that we are looking for.
# However, since "angle" only returns values between 0 and pi, we need to make
# sure we get the right sign relative to the rotation axis
if vector.dotProduct(vector.crossProduct(normalVector1, normalVector2), vectorCaC) < 0:
psi = -psi
aminoacid = [AA.avgx, AA.avgy, AA.avgz]
d = vector.vectorMagnitude(vector.vectorCalculation(center, aminoacid))
#Writes the Phi, Psi, and Distances for the specific Amino Acid
output.write(str(pos) + ' ' + aa + ' ' + str(phi) + ' ' + str(psi) + ' ' + str(d) + '\n')
| null |
Library/phipsi.py
|
phipsi.py
|
py
| 3,531 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "itertools.tee",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "itertools.islice",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "itertools.izip",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "vector.vectorCalculation",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "vector.vectorCalculation",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "vector.crossProduct",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "vector.vectorCalculation",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "vector.crossProduct",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "vector.dihedralAngle",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "vector.dotProduct",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "vector.crossProduct",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "vector.crossProduct",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "vector.vectorCalculation",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "vector.crossProduct",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "vector.dihedralAngle",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "vector.dotProduct",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "vector.crossProduct",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "vector.vectorMagnitude",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "vector.vectorCalculation",
"line_number": 80,
"usage_type": "call"
}
] |
179775440
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 6 13:38:57 2020
@author: nkraj
"""
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt
# model libraries
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX
from pandas.plotting import autocorrelation_plot
from statsmodels.tsa.stattools import adfuller, acf, pacf,arma_order_select_ic
import statsmodels.formula.api as smf
import statsmodels.tsa.api as smt
import statsmodels.api as sm
import scipy.stats as scs
from data_preprocessing import sales
# establish time series for whole company
ts = sales.groupby(['date_block_num'])['item_cnt_day'].sum()
ts.astype('float')
# multiplicative
res = sm.tsa.seasonal_decompose(ts.values,period=12, model='multiplicative')
fig = res.plot()
# additive
res = sm.tsa.seasonal_decompose(ts.values,period=12, model='additive')
fig = res.plot()
# stationarity tests
def test_stationarity(timeseries):
# perform dickey fuller
print('Results of Dickey-Fuller Test:')
dftest = adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic', 'p-value','#Lags used', 'Nubmer of Obs used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print(dfoutput)
test_stationarity(ts)
from pandas import Series as Series
# remove trend
# create a differenced series
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# invert differenced forecast
def inverse_difference(last_ob, value):
return value + last_ob
# plot old time series then ts without trend and without seasonality
plt.figure(figsize=(16,16))
plt.subplot(311)
plt.title('Original')
plt.xlabel('Time')
plt.ylabel('Sales')
plt.plot(ts)
plt.subplot(312)
plt.title('After De-trend')
plt.xlabel('Time')
plt.ylabel('Sales')
new_ts=difference(ts)
plt.plot(new_ts)
plt.plot()
plt.subplot(313)
plt.title('After De-seasonalization')
plt.xlabel('Time')
plt.ylabel('Sales')
new_ts=difference(new_ts,12) # assuming the seasonality is 12 months long
plt.plot(new_ts)
plt.plot()
# test stationarity again after removing seasonality
test_stationarity(new_ts)
# use ARMA model
best_aic = np.inf
best_order = None
best_mdl = None
rng = range(5)
for i in rng:
for j in rng:
try:
tmp_mdl = smt.ARMA(new_ts.values, order=(i,j)).fit(method='css-mle',trend='nc', solver='nm')
tmp_aic = tmp_mdl.aic
if tmp_aic < best_aic:
best_aic = tmp_aic
best_order = (i,j)
best_model = tmp_mdl
except: continue
print('AIC: {:6.5f} | order: {}'.format(best_aic, best_order))
# add dates
ts.index=pd.date_range(start = '2013-01-01', end='2015-10-01', freq='MS')
ts=ts.reset_index()
ts.head()
best_mdl.predict()
| null |
model_building.py
|
model_building.py
|
py
| 2,976 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "data_preprocessing.sales.groupby",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "data_preprocessing.sales",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "statsmodels.api.tsa.seasonal_decompose",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "statsmodels.api.tsa",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "statsmodels.api",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "statsmodels.api.tsa.seasonal_decompose",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "statsmodels.api.tsa",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "statsmodels.api",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "statsmodels.tsa.stattools.adfuller",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "numpy.inf",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "statsmodels.tsa.api.ARMA",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "statsmodels.tsa.api",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "pandas.date_range",
"line_number": 108,
"usage_type": "call"
}
] |
156017325
|
from scatter_net_convolution_train import forwardprop
from scatter_net_convolution_train import init_weights
from scatter_net_convolution_train import init_bias
import tensorflow as tf
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
import os
import time
import argparse, os
import random
def get_spect(data,singular = False):
y_file = data+"_val.csv"
x_file = data+".csv"
X = np.transpose(np.genfromtxt(x_file,delimiter=','))
Y = np.genfromtxt(y_file,delimiter=',')
x = (list(X.shape))
x.append(1)
X = np.reshape(X,x)
if singular == False:
index = random.choice(list(range(len(Y))))
return np.array([X[index]]), np.array([Y[index]])
else:
return np.array([X]), np.array([Y])
def main(data,reuse_weights,output_folder,weight_name_save,weight_name_load,n_batch,numEpochs,lr_rate,lr_decay,num_layers,n_hidden,percent_val,kernel_size,kernel_no):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
test_X, test_Y = get_spect(data,singular=True)
x_size = test_X.shape[1]
y_size = test_Y.shape[1]
# Symbols
X = tf.placeholder("float", shape=[None, x_size, 1])
y = tf.placeholder("float", shape=[None, y_size])
weights = []
biases = []
# Weight initializations
for i in range(0,num_layers):
if i == 0:
weights.append(init_weights((kernel_size,1,kernel_no)))
biases.append(init_bias(kernel_no))
elif i==1:
weights.append(init_weights((int(0.5*(x_size-kernel_size+1))*kernel_no,n_hidden)))
biases.append(init_bias(n_hidden))
else:
weights.append(init_weights((n_hidden,n_hidden)))
biases.append(init_bias(n_hidden))
weights.append(init_weights((n_hidden,y_size)))
biases.append(init_bias(y_size))
# Forward propagation
yhat = forwardprop(X, weights,biases,num_layers)
with tf.Session() as sess:
saver = tf.train.Saver()
saver.restore(sess,output_folder+weight_name_save+".ckpt")
out = sess.run(yhat,feed_dict = {X:test_X,y:test_Y})
print("Computed: "+str(out))
print("Expected: "+str(test_Y))
sess.close()
if __name__=="__main__":
parser = argparse.ArgumentParser(
description="Physics Net Training")
parser.add_argument("--data",type=str,default='data/test')
#parser.add_argument("--data",type=str,default='data/CompleteDataFiles/3_layer_tio2_fixed_06_21_1')
parser.add_argument("--reuse_weights",type=str,default='False')
parser.add_argument("--output_folder",type=str,default='results/3_Layer_TiO2_20Kernel_Convolution_5layers_650per_Positive/')
#Generate the loss file/val file name by looking to see if there is a previous one, then creating/running it.
parser.add_argument("--weight_name_load",type=str,default="")#This would be something that goes infront of w_1.txt. This would be used in saving the weights
parser.add_argument("--weight_name_save",type=str,default="Weights_and_Biases")
parser.add_argument("--n_batch",type=int,default=100)
parser.add_argument("--numEpochs",type=int,default=100)
parser.add_argument("--lr_rate",default=0.000001)
parser.add_argument("--lr_decay",default=.9)
parser.add_argument("--num_layers",default=5)
parser.add_argument("--n_hidden",default=650)
parser.add_argument("--percent_val",default=.2)
parser.add_argument("--kernel_size",default=5)
parser.add_argument("--kernel_no",default=20)
args = parser.parse_args()
dict = vars(args)
for i in dict:
if (dict[i]=="False"):
dict[i] = False
elif dict[i]=="True":
dict[i] = True
kwargs = {
'data':dict['data'],
'reuse_weights':dict['reuse_weights'],
'output_folder':dict['output_folder'],
'weight_name_save':dict['weight_name_save'],
'weight_name_load':dict['weight_name_load'],
'n_batch':dict['n_batch'],
'numEpochs':dict['numEpochs'],
'lr_rate':dict['lr_rate'],
'lr_decay':dict['lr_decay'],
'num_layers':dict['num_layers'],
'n_hidden':dict['n_hidden'],
'percent_val':dict['percent_val'],
'kernel_size':dict['kernel_size'],
'kernel_no':dict['kernel_no']}
main(**kwargs)
| null |
scatter_net_convolution_match.py
|
scatter_net_convolution_match.py
|
py
| 4,419 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.transpose",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "scatter_net_convolution_train.init_weights",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "scatter_net_convolution_train.init_bias",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "scatter_net_convolution_train.init_weights",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "scatter_net_convolution_train.init_bias",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "scatter_net_convolution_train.init_weights",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "scatter_net_convolution_train.init_bias",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "scatter_net_convolution_train.init_weights",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "scatter_net_convolution_train.init_bias",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "scatter_net_convolution_train.forwardprop",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Saver",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 68,
"usage_type": "call"
}
] |
364370968
|
import pandas as pd
import tushare as ts
import time,datetime
from easyutils import timeutils
from hq.HqUtils import *
from easytrader import log
sleepInterval = 1
peLow=0 #市盈率下限
peHigh=30 #市盈率上限
jiejinCount=30 #解禁数量
jiejinRatio=3 #解禁比例
yuyingLow=0.1 #预赢每股收入
engine = get_engine()
#更新当日最新数据
def get_day_hq():
tradeDay = timeutils.get_last_trade_day()
log.info("开始处理行情数据%s", tradeDay)
get_stock_basics()
get_stock_jiejin()
get_stock_yuying()
compute_today_monitor()
log.info("行情数据处理完成%s", tradeDay)
#计算当日监控股票
def compute_today_monitor():
sql = "select distinct * from stock_k_d where date = (select date from stock_k_d order by date desc limit 1) and close>ma5 and ma5>ma10 and ma10 > ma20 and code in (select code from stock_yuying) and code not in (select code from stock_jiejin)"
#rs = engine.execute(sql)
#df = pd.DataFrame(rs.fetchall())
df = pd.read_sql(sql, con=engine)
#del df["index"]
df.set_index(["date", "code"], inplace=True)
log.info("今日监控总数:%s", str(len(df)))
save_table(engine, 'stock_monitor', df, "append")
#全量股票信息
def get_stock_basics():
df = ts.get_stock_basics()
df = df[(df['pe'] > peLow) & (df['pe'] < peHigh)]
df["date"] = timeutils.get_last_trade_day()
save_table(engine, 'stock_basic', df)
for i in range(0, len(df)):
code = df.index[i]
get_k_today(code)
time.sleep(sleepInterval)
log.info("今日更新基本股票总数:%s", str(len(df)))
#解禁股票
def get_stock_jiejin():
curMonth = timeutils.get_month_cur()
nextMonth = timeutils.get_month_next()
df = ts.xsg_data(year= curMonth[0], month=curMonth[1])
df = df.append(ts.xsg_data(year= nextMonth[0], month=nextMonth[1]))
if len(df) < 1 : return
df['count'] = df['count'].astype("float")
df['ratio'] = df['ratio'].astype("float")
df = df[(df['count'] > jiejinCount) | (df['ratio'] > jiejinRatio)]
save_table(engine, 'stock_jiejin', df)
log.info("更新上一交易日解禁数据总数:%s", str(len(df)))
#预赢股票
def get_stock_yuying():
q = timeutils.get_quarter_cur()
df = ts.forecast_data(timeutils.get_year_cur(), q)
if q > 1:
df = df.append(ts.forecast_data(timeutils.get_year_cur(), q - 1))
if len(df) < 1 : return
df = df[(df['pre_eps'] > yuyingLow)]
save_table(engine, 'stock_yuying', df)
log.info("更新上一交易日预赢数据总数:%s", str(len(df)))
#取得当日K线
def get_k_today(code):
lastday = timeutils.get_last_trade_day()
df = ts.get_hist_data(code, start= lastday, end= lastday, ktype="D")
if df is None: return
df["code"] = code
df.reset_index(level=0, inplace=True)
df.set_index(["date","code"],inplace=True)
save_table(engine, 'stock_k_d', df, "append")
log.info(">>>>更新[%s]K线数据:%s" , lastday, code)
| null |
hq/GetDayHQ.py
|
GetDayHQ.py
|
py
| 3,055 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "easyutils.timeutils.get_last_trade_day",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "easyutils.timeutils",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "easytrader.log.info",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "easytrader.log",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "easytrader.log.info",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "easytrader.log",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pandas.read_sql",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "easytrader.log.info",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "easytrader.log",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "tushare.get_stock_basics",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "easyutils.timeutils.get_last_trade_day",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "easyutils.timeutils",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "easytrader.log.info",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "easytrader.log",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "easyutils.timeutils.get_month_cur",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "easyutils.timeutils",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "easyutils.timeutils.get_month_next",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "easyutils.timeutils",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "tushare.xsg_data",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "tushare.xsg_data",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "easytrader.log.info",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "easytrader.log",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "easyutils.timeutils.get_quarter_cur",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "easyutils.timeutils",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "tushare.forecast_data",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "easyutils.timeutils.get_year_cur",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "easyutils.timeutils",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "tushare.forecast_data",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "easyutils.timeutils.get_year_cur",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "easyutils.timeutils",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "easytrader.log.info",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "easytrader.log",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "easyutils.timeutils.get_last_trade_day",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "easyutils.timeutils",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "tushare.get_hist_data",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "easytrader.log.info",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "easytrader.log",
"line_number": 93,
"usage_type": "name"
}
] |
374554750
|
from tools import models
"""
Use this script like this
python manage.py shell < script.py
"""
def delete_object(object_list):
for item in object_list:
item.delete()
# Reference
Reference = models.Reference
delete_object(Reference.objects.all())
url_list = ["https://www.eia.gov/energyexplained/units-and-calculators/energy-conversion-calculators.php",
"",
"https://en.wikipedia.org/wiki/Energy_density",
"https://neutrium.net/properties/specific-energy-and-energy-density-of-fuels/",
]
book_list =["",
"BP Statistical review of world energy",
"",
"",
]
for k in range(0, len(url_list)):
item = Reference(url=url_list[k], book=book_list[k])
item.save()
print("\n", Reference.objects.all())
# Physical state
PhysicalState = models.PhysicalState
delete_object(PhysicalState.objects.all())
state_list = ["solid", "liquid", "gas"]
for state in state_list:
item = PhysicalState(state=state)
item.save()
print("\n", PhysicalState.objects.all())
# Physical quantity
PhysicalQuantity = models.PhysicalQuantity
delete_object(PhysicalQuantity.objects.all())
quantitys = ["energy",
"mass",
"volume",
"density",
"specific volume",
"power",
"energy density",
"specific energy",
"speed",
"acceleration",
"distance",
"energy consumption per distance",
"time",
]
for quantity in quantitys:
item = PhysicalQuantity(physical_quantity=quantity)
item.save()
print("\n", PhysicalQuantity.objects.all())
# Unit
Unit = models.Unit
delete_object(Unit.objects.all())
unit_list = ["joule",
"kilogram",
"cubic metre",
"litre",
"kilogram per cubic metre",
"cubic metre per kilogram",
"watt",
"kilo-watt",
"cheval-vapeur",
"joule per cubic metre",
"joule per kilogram",
"metre per second",
"metre per second squared",
"metre",
"kilometre",
"litre per 100km",
"seconde",
"hour",
]
symbol_list = ["J",
"kg",
"m^3",
"l",
"kg/m^3",
"m^3/kg",
"W",
"kW",
"ch",
"J/m^3",
"J/kg",
"m/s",
"m/s^(-2)",
"m",
"km",
"l/100km",
"s",
"h",
]
physical_quantity_list = [
PhysicalQuantity.objects.get(physical_quantity="energy"),
PhysicalQuantity.objects.get(physical_quantity="mass"),
PhysicalQuantity.objects.get(physical_quantity="volume"),
PhysicalQuantity.objects.get(physical_quantity="volume"),
PhysicalQuantity.objects.get(physical_quantity="density"),
PhysicalQuantity.objects.get(physical_quantity="specific volume"),
PhysicalQuantity.objects.get(physical_quantity="power"),
PhysicalQuantity.objects.get(physical_quantity="power"),
PhysicalQuantity.objects.get(physical_quantity="power"),
PhysicalQuantity.objects.get(physical_quantity="energy density"),
PhysicalQuantity.objects.get(physical_quantity="specific energy"),
PhysicalQuantity.objects.get(physical_quantity="speed"),
PhysicalQuantity.objects.get(physical_quantity="acceleration"),
PhysicalQuantity.objects.get(physical_quantity="distance"),
PhysicalQuantity.objects.get(physical_quantity="distance"),
PhysicalQuantity.objects.get(physical_quantity="energy consumption per distance"),
PhysicalQuantity.objects.get(physical_quantity="time"),
PhysicalQuantity.objects.get(physical_quantity="time"),
]
for k in range(0, len(unit_list)):
item = Unit(unit=unit_list[k],
symbol=symbol_list[k],
physical_quantity=physical_quantity_list[k])
item.save()
print("\n", Unit.objects.all())
# To populate PhysicalConstant
PhysicalConstant = models.PhysicalConstant
name_list = [
"Earth's gravity",
]
value_list = [
9.81,
]
unit_list = [
models.Unit.objects.get(symbol="m/s^(-2)")
]
for k in range(0, len(name_list)):
item = PhysicalConstant(
name=name_list[k],
value=value_list[k],
unit=unit_list[k]
)
item.save()
print("\n", PhysicalConstant.objects.all())
# Energy type
EnergyType = models.EnergyType
delete_object(EnergyType.objects.all())
energy_type_list = ["thermal"]
for k in range(0, len(energy_type_list)):
item = EnergyType(energy_type=energy_type_list[k])
item.save()
print("\n", EnergyType.objects.all())
# Resource
Resource = models.Resource
delete_object(Resource.objects.all())
name_list = ["oil",
"liquid petroleum gas (LPG)",
"gasoline",
"kerosene",
"diesel",
]
weight_list = [1000,
1000,
1000,
1000,
1000,
]
weight_unit_list = [Unit.objects.get(symbol="kg"),
Unit.objects.get(symbol="kg"),
Unit.objects.get(symbol="kg"),
Unit.objects.get(symbol="kg"),
Unit.objects.get(symbol="kg"),
]
volume_list = [1.165,
1.844,
1.328,
1.253,
1.186,
]
volume_unit_list = [Unit.objects.get(symbol="m^3"),
Unit.objects.get(symbol="m^3"),
Unit.objects.get(symbol="m^3"),
Unit.objects.get(symbol="m^3"),
Unit.objects.get(symbol="m^3"),
]
density_list = [1000/1.165,
1000/1.844,
1000/1.328,
1000/1.253,
1000/1.186,
]
density_unit_list = [Unit.objects.get(symbol="kg/m^3"),
Unit.objects.get(symbol="kg/m^3"),
Unit.objects.get(symbol="kg/m^3"),
Unit.objects.get(symbol="kg/m^3"),
Unit.objects.get(symbol="kg/m^3"),
]
density_ref_list = [
Reference.objects.get(book="BP Statistical review of world energy"),
Reference.objects.get(book="BP Statistical review of world energy"),
Reference.objects.get(book="BP Statistical review of world energy"),
Reference.objects.get(book="BP Statistical review of world energy"),
Reference.objects.get(book="BP Statistical review of world energy"),
]
state_list = [PhysicalState.objects.get(state="liquid"),
PhysicalState.objects.get(state="liquid"),
PhysicalState.objects.get(state="liquid"),
PhysicalState.objects.get(state="liquid"),
PhysicalState.objects.get(state="liquid"),
]
price_list = [2,
2,
2,
2,
2,
]
for k in range(0,len(name_list)):
item = Resource(name = name_list[k],
weight = weight_list[k],
weight_unit = weight_unit_list[k],
volume = volume_list[k],
volume_unit = volume_unit_list[k],
density = density_list[k],
density_unit = density_unit_list[k],
density_ref = density_ref_list[k],
state=state_list[k],
price=price_list[k])
item.save()
print("\n", Resource.objects.all())
# Energy
# IMPORTANT : see the eia.gov https://www.eia.gov/energyexplained/units-and-calculators/energy-conversion-calculators.php to see a conversion calculator
Energy = models.Energy
delete_object(Energy.objects.all())
resource_list = [Resource.objects.get(name="oil"),
Resource.objects.get(name="liquid petroleum gas (LPG)"),
Resource.objects.get(name="gasoline"),
Resource.objects.get(name="kerosene"),
Resource.objects.get(name="diesel"),
]
unit_list = [Unit.objects.get(unit="joule"),
Unit.objects.get(unit="joule"),
Unit.objects.get(unit="joule"),
Unit.objects.get(unit="joule"),
Unit.objects.get(unit="joule"),
]
#value_list = [42E9,
# 0.542*42E9,
#
# ]
primary_list = [True,
False,
False,
False,
False,
]
final_list = [False,
True,
True,
True,
True,
]
# exemple J/m^3
energy_density_list = [
37.859E9,
27.7E9,
33.539E9,
38.346E9,
38.290E9,
]
energy_density_unit_list = [Unit.objects.get(symbol="J/m^3"),
Unit.objects.get(symbol="J/m^3"),
Unit.objects.get(symbol="J/m^3"),
Unit.objects.get(symbol="J/m^3"),
Unit.objects.get(symbol="J/m^3"),
]
energy_density_ref_list = [Reference.objects.all()[0],
Reference.objects.all()[2],
Reference.objects.all()[0],
Reference.objects.all()[3],
Reference.objects.all()[0],
]
# exemple J/kg
specific_energy_list = [41.868E9,
49.1E9,
46.4E9,
46.2E9,
45.6E9,
]
specific_energy_unit_list = [Unit.objects.get(symbol="J/kg"),
Unit.objects.get(symbol="J/kg"),
Unit.objects.get(symbol="J/kg"),
Unit.objects.get(symbol="J/kg"),
Unit.objects.get(symbol="J/kg"),
]
specific_energy_ref_list = [Reference.objects.all()[2],
Reference.objects.all()[2],
Reference.objects.all()[2],
Reference.objects.all()[3],
Reference.objects.all()[2],
]
energy_type_list = [EnergyType.objects.get(energy_type="thermal"),
EnergyType.objects.get(energy_type="thermal"),
EnergyType.objects.get(energy_type="thermal"),
EnergyType.objects.get(energy_type="thermal"),
EnergyType.objects.get(energy_type="thermal"),
]
for k in range(0, len(resource_list)):
item = Energy(resource=resource_list[k],
unit=unit_list[k],
# value=value_list[k],
primary=primary_list[k],
final=final_list[k],
energy_density=energy_density_list[k],
energy_density_unit=energy_density_unit_list[k],
energy_density_ref=energy_density_ref_list[k],
specific_energy=specific_energy_list[k],
specific_energy_unit=specific_energy_unit_list[k],
specific_energy_ref=specific_energy_ref_list[k],
energy_type=energy_type_list[k])
item.save()
print("\n", Energy.objects.all())
## Power
#Power = models.Power
#delete_object(Power.objects.all())
#unit_list = [
# Unit.objects.get(symbol="W"),
#]
##value_list = [1]
#for k in range(0, len(value_list)):
# item = Power(
# unit=unit_list[k],
## value=value_list[k],
# )
# item.save()
#print("\n", Power.objects.all())
# Machine
Machine = models.Machine
delete_object(Machine.objects.all())
name_list = ["car"]
resource_input_list = [Resource.objects.get(name="oil")]
resource_output_list = [Resource.objects.get(name="oil")]
energy_input_list = [Energy.objects.filter(resource__name__contains="oil")]
energy_output_list = [Energy.objects.filter(resource__name__contains="oil")]
efficiency_list = [0.3]
price_list = [10E3]
power_list = [75,
]
power_unit_list = [Unit.objects.get(symbol="ch"),
]
consumption_list = [6,]
consumption_unit_list = [
Unit.objects.get(symbol="l/100km"),
]
for k in range(0, len(name_list)):
item = Machine(name=name_list[k],
resource_input=resource_input_list[k],
resource_output=resource_input_list[k],
energy_input=energy_input_list[k][0],
energy_output=energy_output_list[k][0],
efficiency=efficiency_list[k],
price=price_list[k],
power=power_list[k],
power_unit=power_unit_list[k],
consumption=consumption_list[k],
consumption_unit=consumption_unit_list[k],
)
item.save()
print("\n", Machine.objects.all())
# To populate the Human class
Human = models.Human
delete_object(Human.objects.all())
power_unit = models.Unit.objects.get(symbol="W")
weight_unit = models.Unit.objects.get(symbol="kg")
human = Human(arms_power=10,
arms_power_unit=power_unit,
legs_power=100,
legs_power_unit=power_unit,
weight=100,
weight_unit=weight_unit)
human.save()
print("\n", Human.objects.all())
# To populate HeightScale
HeightScale = models.HeightScale
delete_object(HeightScale.objects.all())
name_list = ["Tour Eiffel"]
height_list = [324]
height_unit_list = [
models.Unit.objects.get(symbol="m"),
]
for k in range(0, len(name_list)):
item = HeightScale(name=name_list[k],
height=height_list[k],
height_unit=height_unit_list[k])
item.save()
print("\n", HeightScale.objects.all())
# To populate ConversonCoefficient
ConversionCoefficient = models.ConversionCoefficient
delete_object(ConversionCoefficient.objects.all())
unit_from_list = [
models.Unit.objects.get(symbol="W"),
models.Unit.objects.get(symbol="ch"),
models.Unit.objects.get(symbol="kW"),
models.Unit.objects.get(symbol="h"),
models.Unit.objects.get(symbol="m"),
models.Unit.objects.get(symbol="km"),
models.Unit.objects.get(symbol="km"),
models.Unit.objects.get(symbol="l/100km"),
models.Unit.objects.get(symbol="m^3"),
models.Unit.objects.get(symbol="l"),
models.Unit.objects.get(symbol="J"),
]
unit_to_list = [
models.Unit.objects.get(symbol="W"),
models.Unit.objects.get(symbol="W"),
models.Unit.objects.get(symbol="W"),
models.Unit.objects.get(symbol="s"),
models.Unit.objects.get(symbol="m"),
models.Unit.objects.get(symbol="km"),
models.Unit.objects.get(symbol="m"),
models.Unit.objects.get(symbol="l/100km"),
models.Unit.objects.get(symbol="l"),
models.Unit.objects.get(symbol="m^3"),
models.Unit.objects.get(symbol="J"),
]
value_list = [
1,
735.5,
1000,
3600,
1,
1,
1000,
1,
1000,
1/1000,
1,
]
for k in range(0, len(unit_from_list)):
item = ConversionCoefficient(unit_from=unit_from_list[k],
unit_to=unit_to_list[k],
value=value_list[k])
item.save()
print("\n", ConversionCoefficient.objects.all())
| null |
exsys/script_db.py
|
script_db.py
|
py
| 15,394 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tools.models.Reference",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "tools.models.PhysicalState",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "tools.models.PhysicalQuantity",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "tools.models.PhysicalConstant",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "tools.models.EnergyType",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "tools.models.Resource",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "tools.models.Energy",
"line_number": 247,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 247,
"usage_type": "name"
},
{
"api_name": "tools.models.Machine",
"line_number": 356,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 356,
"usage_type": "name"
},
{
"api_name": "tools.models.Human",
"line_number": 391,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 391,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 393,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 393,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 394,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 394,
"usage_type": "name"
},
{
"api_name": "tools.models.HeightScale",
"line_number": 406,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 406,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 411,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 411,
"usage_type": "name"
},
{
"api_name": "tools.models.ConversionCoefficient",
"line_number": 422,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 422,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 425,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 425,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 426,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 426,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 427,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 427,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 428,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 428,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 429,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 429,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 430,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 430,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 431,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 431,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 432,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 432,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 433,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 433,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 434,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 434,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 435,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 435,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 438,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 438,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 439,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 439,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 440,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 440,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 440,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 441,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 441,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 442,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 442,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 443,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 443,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 444,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 444,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 445,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 445,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 446,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 446,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 447,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 447,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 447,
"usage_type": "name"
},
{
"api_name": "tools.models.Unit.objects.get",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "tools.models.Unit",
"line_number": 448,
"usage_type": "attribute"
},
{
"api_name": "tools.models",
"line_number": 448,
"usage_type": "name"
}
] |
185095412
|
import sys
import unittest
import numpy as np
import torch
from metal.label_model.label_model import LabelModel
from metal.label_model.baselines import (
RandomVoter,
MajorityClassVoter,
MajorityLabelVoter,
)
sys.path.append("../synthetics")
from synthetics.generate import (
SingleTaskTreeDepsGenerator,
HierarchicalMultiTaskTreeDepsGenerator
)
# TODO: Put in tests for LabelModel baseline again!
class LabelModelTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.n_iters = 3
cls.n = 10000
cls.m = 10
cls.k = 2
def _test_label_model(self, data, test_acc=True, mts=False):
if mts:
label_model = LabelModel(data.m, task_graph=data.task_graph,
p=data.p, deps=data.E)
else:
label_model = LabelModel(data.m, k=data.k, p=data.p, deps=data.E)
label_model.train(data.L, n_epochs=1000, print_every=200)
# Test parameter estimation error
c_probs_est = label_model.get_conditional_probs()
err = np.mean(np.abs(data.c_probs - c_probs_est))
print(f"Parameter Estimation Error={err}")
self.assertLess(err, 0.015)
# Test label prediction accuracy
if test_acc:
Y_pred = label_model.get_label_probs(data.L).argmax(axis=1) + 1
acc = np.where(data.Y == Y_pred, 1, 0).sum() / data.n
print(f"Label Prediction Accuracy={acc}")
self.assertGreater(acc, 0.95)
def test_no_deps(self):
for seed in range(self.n_iters):
np.random.seed(seed)
print(f">>> Testing for seed={seed}")
data = SingleTaskTreeDepsGenerator(self.n, self.m, k=self.k,
edge_prob=0.0)
self._test_label_model(data)
def test_augmented_L_construction(self):
# 5 LFs: a triangle, a connected edge to it, and a singleton source
n = 3
m = 5
k = 2
E = [(0,1), (1,2), (2,0), (0,3)]
L = np.array([
[1, 1, 1, 2, 1],
[1, 2, 2, 1, 0],
[1, 1, 1, 1, 0]
])
lm = LabelModel(m, k=k, deps=E)
L_aug = lm._get_augmented_label_matrix(L, offset=1, higher_order=True)
# Should have 22 columns:
# - 5 * 2 = 10 for the sources
# - 8 + 4 for the 3- and 2-clique resp. --> = 22
self.assertEqual(L_aug.shape, (3,22))
# Same as above but minus 2 abstains = 19 total nonzero entries
self.assertEqual(L_aug.sum(), 19)
# Next, check the singleton entries
for i in range(n):
for j in range(m):
if L[i,j] > 0:
self.assertEqual(L_aug[i, j * k + L[i,j] - 1], 1)
# Finally, check the clique entries
# Triangle clique
self.assertEqual(len(lm.c_tree.node[1]['members']), 3)
j = lm.c_tree.node[1]['start_index']
self.assertEqual(L_aug[0, j], 1)
self.assertEqual(L_aug[1, j + 3], 1)
self.assertEqual(L_aug[2, j], 1)
# Binary clique
self.assertEqual(len(lm.c_tree.node[2]['members']), 2)
j = lm.c_tree.node[2]['start_index']
self.assertEqual(L_aug[0, j+1], 1)
self.assertEqual(L_aug[1, j], 1)
self.assertEqual(L_aug[2, j], 1)
def test_with_deps(self):
for seed in range(self.n_iters):
np.random.seed(seed)
print(f">>> Testing for seed={seed}")
data = SingleTaskTreeDepsGenerator(self.n, self.m, k=self.k,
edge_prob=1.0)
self._test_label_model(data, test_acc=False)
def test_mts(self):
for seed in range(self.n_iters):
np.random.seed(seed)
print(f">>> Testing for seed={seed}")
data = HierarchicalMultiTaskTreeDepsGenerator(self.n, self.m,
edge_prob=0.0)
self._test_label_model(data, test_acc=False, mts=True)
if __name__ == '__main__':
unittest.main()
| null |
tests/metal/label_model/test_label_model.py
|
test_label_model.py
|
py
| 4,002 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.append",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "metal.label_model.label_model.LabelModel",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "metal.label_model.label_model.LabelModel",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "synthetics.generate.SingleTaskTreeDepsGenerator",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "metal.label_model.label_model.LabelModel",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "synthetics.generate.SingleTaskTreeDepsGenerator",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "synthetics.generate.HierarchicalMultiTaskTreeDepsGenerator",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "unittest.main",
"line_number": 120,
"usage_type": "call"
}
] |
476898207
|
from pymongo import MongoClient
import json
client = MongoClient('proximus.modulusmongo.net:27017')
client.tepO9seb.authenticate('nasahack', 'hacking4nasa')
db = client.tepO9seb
if __name__ == '__main__':
data = json.load(open('data/defense_ngram_np.json'))
db.datasets.insert(data)
| null |
mongoWork/insert_defense_ngram_kwds.py
|
insert_defense_ngram_kwds.py
|
py
| 293 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pymongo.MongoClient",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 9,
"usage_type": "call"
}
] |
535483665
|
#!/usr/bin/env python3
"""
bayesian test for periodicity in equally spaced spatial/time data (ti,yi) etc
following Jaynes, 'Probability Theory: The Logic of Science' section 17.6
#======================================
MODEL: y(t) = A.cos wt + B.sin wt + mu + Gaussian_noise
n data points
PRIORS: Jeffrey's priors, p(mu) = const., p(sigma) = 1/sigma
p(w) = const. over w=0 to Nyqvist limit pi/dt
p(A,B) = const.exp(-(A^2 + B^2)/2.delta^2)
delta is width for coefficient priors, on the order of several times range of y
with 'radial' symmetry for R^2 = A^2 + B^2, i.e. uniform in phase 0-2pi of periodicity
LIKELIHOOD
after marginalizing over offset mu, and magnitude of noise sigma
the likelihood is p(Data|A,B,w) = 1/s^(n-1)
where
s^2 = (<d^2> - <d>^2) is variance of derived 'data'
d(t) = y(t) - A.cos wt - B.sin wt
POSTERIOR
p(A,B,w|Data) = const.exp(-(A^2 + B^2)/2.delta^2)/s^(n-1)
finally we need to marginalize over A, B to get
p(w|Data)
maximum in this gives frequency, and then we can back calculate A,B
#======================================
"""
import math
import numpy as np
import matplotlib.pyplot as plt
from SciInf_utilities import *
import sys
def sum_sq(Acoeff,Bcoeff,freq,y,ndata):
d_sum = 0.
d_sum_sq = 0.
for i in range(ndata):
d_i = y[i] - Acoeff*math.cos(freq*i) - Bcoeff*math.sin(freq*i)
d_sum = d_sum + d_i
d_sum_sq = d_sum_sq + d_i**2
s2 = d_sum_sq/ndata - (d_sum/ndata)**2
return s2
""" main
"""
#
print("bayesian test for periodicity in equally spaced spatial/time data (ti,yi) etc")
print("following Jaynes, 'Probability Theory: The Logic of Science' section 17.6 \n")
# get data
#
if(len(sys.argv) == 2):
input_file = sys.argv[1]
else:
input_file = input("file with one t, y data pair per line> ")
#input_file = 'CparkT_1930.dat' # average january temp in Central Park NY
#input_file = 'cos4.dat'
print('input file: ',input_file)
t = []
y = []
ndata = read_xy(t,y,input_file)
#
# basic averages, etc
#
av_t = average_x(t)
av_y = average_x(y)
print('av t %12.5f y %12.5f ' % (av_t,av_y))
min_t = min(t)
min_y = min(y)
print('min t %12.5f y %12.5f ' % (min_t,min_y))
max_t = max(t)
max_y = max(y)
print('max t %12.5f y %12.5f ' % (max_t,max_y))
#
t_span = t[ndata-1] - t[0]
dt = t_span/(ndata -1)
print('t span %11.5f dt %11.5f ' % (t_span,dt))
#
## shift y data so <y> = 0
#for i in range(ndata):
# y[i] = y[i] - av_y
#
# set up frequency range from 0 to Nyqvist upper limit =
# = minimum period of 2 time intervals
# use unitless time intervals dt = 1 for frequency,
# and convert to 'real' time only for period axis for output
#
freq_axis = np.zeros(ndata+1,'float')
period_axis = np.zeros(ndata+1,'float')
freq_pdf = np.zeros(ndata+1,'float')
for i in range(ndata+1):
freq_axis[i] = math.pi*i/ndata
for i in range(1,ndata+1):
period_axis[i] = dt*2.*math.pi/freq_axis[i]
period_axis[0] = period_axis[1] + 1. # dummy value for infinite period- i.e. constant value
#
delta = 2.*(max_y - min_y) # width of gaussian prior for A,B
#ngrid = 51 # grid for marginalization over coefficient magnitudes
ngrid = 37 # grid for marginalization over coefficient magnitudes
r_up = 2.*delta
dr = r_up/(ngrid - 1)
#print(r_up,dr)
dtheta = 2.*math.pi/(ngrid-1)
expnt = -0.5*(float(ndata) - 1.)
#print(expnt)
#
# find posterior p(freq|Data)
#
print("doing marginalization integrals, please wait...")
for k in range(0,ndata+1):
freq_pdf[k] = 0.
freq = freq_axis[k]
# for each frequency marginalize over A,B
for i in range(ngrid):
r_val = i*dr
#print(" {:12.5f}".format(r_val))
for j in range(ngrid):
theta = j*dtheta
Acoeff = r_val*math.cos(theta)
Bcoeff = r_val*math.sin(theta)
probAB = math.exp(-0.5*(r_val/delta)**2)
#print(" {:12.5f} {:12.5f} {:12.5f} {:12.5f}".format(theta,Acoeff,Bcoeff,probAB))
s2 = sum_sq(Acoeff,Bcoeff,freq,y,ndata)
probABw = probAB*s2**expnt
freq_pdf[k] += probABw*dtheta*dr
pdf_max = max(freq_pdf)
freq_pdf /= pdf_max
#
# find most probable frequency
# and get best A,B
#
pdf_max = 0.
for k in range(1,ndata+1):
if(freq_pdf[k] > pdf_max):
pdf_max = freq_pdf[k]
freq_max = freq_axis[k]
period_max = dt*2.*math.pi/freq_max
print("max pdf {:12.5f} for frequency {:12.5f}, period{:12.5f} ".format(pdf_max,freq_max,period_max))
pdf_max = 0.
for i in range(ngrid):
r_val = i*dr
for j in range(ngrid):
theta = j*dtheta
Acoeff = r_val*math.cos(theta)
Bcoeff = r_val*math.sin(theta)
probAB = math.exp(-0.5*(r_val/delta)**2)
#print(" {:12.5f} {:12.5f} {:12.5f} {:12.5f}".format(theta,Acoeff,Bcoeff,probAB))
s2 = sum_sq(Acoeff,Bcoeff,freq_max,y,ndata)
probABw = probAB*s2**expnt
if(probABw > pdf_max):
pdf_max = probABw
Acoeff_best = Acoeff
Bcoeff_best = Bcoeff
# print('new max: ',pdf_max,Acoeff_best,Bcoeff_best)
print("best p(ABw) {:12.5f} best parameters {:12.5f} {:12.5f}".format(pdf_max,Acoeff_best,Bcoeff_best))
y_calc = np.zeros(ndata,'float')
for i in range(ndata):
y_calc[i] = Acoeff_best*math.cos(freq_max*i) + Bcoeff_best*math.sin(freq_max*i) + av_y
#
# plot original data
#
#for i in range(ndata):
# y[i] = y[i] + av_y
freq_pdf[0] = freq_pdf[1]
MAKEPLOT = True
if(MAKEPLOT):
plt.figure(1)
plt.subplot(211)
plt.scatter(t,y,color='red',marker='o')
plt.plot(t,y_calc,color='blue')
plt.xlabel('t')
plt.ylabel('y')
#plt.ylim(ymin=0.)
#plt.title('T Series ')
plt.grid(True)
#
# plot posterior pdf of frequency/period
#
plt.subplot(212)
#plt.plot(freq_axis,freq_pdf,color='red')
#plt.xlabel('frequency')
plt.plot(period_axis,freq_pdf,color='red')
plt.xlabel('period')
plt.ylabel('pdf(period)')
#plt.title('PDF ')
plt.grid(True)
plt.show()
| null |
src/PeriodicSeries.py
|
PeriodicSeries.py
|
py
| 5,801 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "math.cos",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "math.cos",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "math.exp",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "math.cos",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "math.exp",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 183,
"usage_type": "name"
}
] |
4509549
|
# -*- coding: utf-8 -*-
"""
Signals and receivers for Course Access Groups.
"""
import logging
from .models import Membership
log = logging.getLogger(__name__)
def on_learner_account_activated(sender, user, **kwargs):
"""
Receive the `USER_ACCOUNT_ACTIVATED` signal to apply MembershipRule.
:param sender: The sender class.
:param user: The activated learner.
:param kwargs: Extra keyword args.
"""
try:
Membership.create_from_rules(user)
except Exception:
log.exception('Error receiving USER_ACCOUNT_ACTIVATED signal for user %s pk=%s, is_active=%s, sender=%s',
user.email, user.pk, user.is_active, sender)
raise
| null |
course_access_groups/signals.py
|
signals.py
|
py
| 698 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.Membership.create_from_rules",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "models.Membership",
"line_number": 22,
"usage_type": "name"
}
] |
65990093
|
'''
Serves through a (super) simplified version of http protocol.
Warning: Running this may expose your computer to attacks.
Don't run this.
'''
from threading import Thread
from queue import Queue, Empty
from socket import socket as Socket, timeout, gethostname, gethostbyname
import logging
from mythread import Safe
__all__ = ['BadRequest', 'ClientShutdown', 'Request',
'OneServer', 'Server', 'Intent', 'log', 'logging',
'respond', 'myLogger',
]
class BadRequest(BaseException):
pass
class ClientShutdown(BaseException):
pass
logging.basicConfig(format='%(asctime)s %(message)s',
filename = 'log.log')
logging.root.setLevel(logging.NOTSET)
class MyLogger:
def __init__(self):
self.verbose = True
def log(self, *args, sep = ' ', end = '\n', flush = False, level = logging.INFO):
text = sep.join([str(x) for x in args]) + end
if self.verbose:
print(text, flush = flush, end = '')
logging.log(level, text)
myLogger = MyLogger()
log = myLogger.log
class Intent:
pass
class DeregisterOneServer(Intent):
def __init__(self, oneServer):
self.oneServer = oneServer
class Request:
def __init__(self, command, target, http_version):
self.command = command
self.target = target
self.http_version = http_version
self.options = {}
self.body = ''
def add(self, kw, value):
self.options[kw] = value
def get(self, kw):
return self.options[kw]
def __str__(self):
if self.command == 'POST':
return self.command + ' ' + self.target + ' ' + self.body
else:
return self.command + ' ' + self.target
def parseHead(text):
whats_bad = ''
try:
lines = text.split('\r\n')
whats_bad = lines[0]
request = Request(*lines[0].split(' '))
for line in lines[1:]:
whats_bad = line
kw, value = line.split(':', 1)
kw = kw.strip(' ')
value = value.strip(' ')
request.add(kw, value)
return request
except Exception as e:
log('Bad line:', whats_bad, level = logging.ERROR)
raise BadRequest
def respond(socket, data):
response = '''HTTP/1.1 200 OK\r
Content-Length: %d\r
Content-Type: text/html\r\n\r\n''' % len(data)
socket.send(response.encode())
socket.send(data)
class OneServer(Thread):
'''
Subclass this class and override:
handle(request) where request is a Request object
request_filter is a list of request that you don't wanna log
`close()`
'''
request_filter = []
def __init__(self, addr, socket, parentQueue):
'''
You shouldn't override this. OneServer doesn't need any
runtime state. Keep-alive should not be abused.
'''
Thread.__init__(self)
self.addr = addr
self.socket = socket
socket.settimeout(.4)
self.parentQueue = parentQueue
self.queue = Queue()
self._go_on = Safe(True)
def close(self):
self._go_on.set(False)
def respond(self, data, do_log = True):
respond(self.socket, data)
if do_log:
if len(data) < 50:
log(self, data.decode())
def handle(self, request):
# Override this
respond(self.socket, b'''<html>What a shame.
The programmer didn't override the request handler. </html>''')
def __str__(self):
return self.addr.__str__()
def run(self):
log(self, 'service begin. ')
chunk = b''
try:
while self._go_on.get():
try:
recved = self.socket.recv(4096)
if recved == b'':
raise ClientShutdown
else:
chunk += recved
while b'\r\n\r\n' in chunk:
bytes_head, chunk = chunk.split(b'\r\n\r\n', 1)
request = parseHead(bytes_head.decode())
if request.command == 'POST':
content_len = int(request.get('Content-Length'))
if len(chunk) >= content_len:
bytes_body = chunk[:content_len]
chunk = chunk[content_len:]
request.body = bytes_body.decode()
else:
chunk = b'\r\n\r\n'.join([bytes_head, chunk])
break
do_log = True
for filter in self.request_filter:
if filter in request.target:
do_log = False
break
if do_log:
log(self, 'Request', request)
self.handle(request)
except timeout:
pass
# self.close() called
except (ClientShutdown, ConnectionAbortedError, ConnectionResetError):
log(self, 'client shutdown')
finally:
self.parentQueue.put(DeregisterOneServer(self))
self.socket.close()
log(self, 'Thread has stopped. ')
class Server(Thread):
'''
Subclass this class and override:
handleQueue()
interval()
`close()`
'''
def __init__(self, my_OneServer = OneServer, port = 80,
listen = 1, accept_timeout = .5):
# Pass in your subclassed OneServer
Thread.__init__(self)
self.queue = Queue()
self.OneServer = my_OneServer
self.listen = listen
self.socket = Socket()
self.socket.bind(('', port))
self.socket.settimeout(accept_timeout)
self._go_on = Safe(True)
self.oneServers = []
self.max_connection = Safe(4 * 32)
self.showing_max_waring = False
def setMaxConnection(self, number):
self.max_connection.set(number)
def getMaxConnection(self):
return self.max_connection.get()
def interval(self):
'''
Override this.
'''
pass
def handleQueue(self, intent):
'''
Override this.
'''
pass
def __handleQueue(self, intent):
if type(intent) is DeregisterOneServer:
self.oneServers.remove(intent.oneServer)
else:
self.handleQueue(intent)
def close(self):
if self.isAlive():
with self._go_on:
self._go_on.value = False
#self.join() public method
def onConnect(self, addr):
pass # to override.
def run(self):
self.socket.listen(self.listen)
log('listening at', gethostbyname(gethostname()), '...')
while self._go_on.get():
if len(self.oneServers) >= self.getMaxConnection():
if not self.showing_max_waring:
log('Max connection reached. ')
self.showing_max_waring = True
else:
if self.showing_max_waring:
log("Max connection isn't reached anymore. ")
self.showing_max_waring = False
try:
socket, addr = self.socket.accept()
log(addr, 'Accepted. ')
self.onConnect(addr)
oneServer = self.OneServer(addr, socket, self.queue)
self.oneServers.append(oneServer)
oneServer.start()
except timeout:
pass
try:
while self._go_on.get():
self.__handleQueue(self.queue.get_nowait())
except Empty:
pass
self.interval()
self.socket.close()
log('Closing', len(self.oneServers), 'oneServers.')
for oneServer in self.oneServers:
oneServer.close()
log('Server thread has stopped. ')
| null |
myhttp.py
|
myhttp.py
|
py
| 8,182 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.basicConfig",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.root.setLevel",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logging.root",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "logging.NOTSET",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "logging.log",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "logging.ERROR",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "socket.send",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "socket.send",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "socket.settimeout",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "queue.Queue",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "mythread.Safe",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "socket.timeout",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "queue.Queue",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "mythread.Safe",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "mythread.Safe",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "socket.gethostbyname",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "socket.gethostname",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "socket.timeout",
"line_number": 245,
"usage_type": "name"
},
{
"api_name": "queue.Empty",
"line_number": 250,
"usage_type": "name"
}
] |
31727530
|
#glob模块提供了一个函数用于从目录通配符搜索中生成文件列表:
import glob
print(glob.glob('*.py'))
#['primes.py', 'random.py', 'quote.py']
# 命令行参数
# 通用工具脚本经常调用命令行参数。这些命令行参数以链表形式存储于 sys 模块的 argv 变量。例如在命令行中执行 "python demo.py one two three" 后可以得到以下输出结果:
import sys
print(sys.argv)
['demo.py', 'one', 'two', 'three']
# 错误输出重定向和程序终止
# sys 还有 stdin,stdout 和 stderr 属性,即使在 stdout 被重定向时,后者也可以用于显示警告和错误信息。
sys.stderr.write('Warning, log file not found starting a new one\n')
# Warning, log file not found starting a new one
# 大多脚本的定向终止都使用 "sys.exit()"。
# secrets模块基于os.urandom()和random.SystemRandom(), 它们是操作系统最好的加密随机性源码的接口。
import secrets
num = secrets.randbelow(10)
randomnum = secrets.SystemRandom()
randomnum.choice(range(9))
randomnum.choices(range(9),k=3)
randomnum.sample(range(9),3)
randomnum.uniform(2.5,25.5)
# operator : 内置的操作符模块
# collections : 简化容器类型的一些操作和使用
# itertools : 可迭代类型工具
# functools : 函数工具,尤其是装饰器
import operator
operator.add(1,2)
operator.contains("1234","2")
# Python classmethod 修饰符
# classmethod 修饰符对应的函数不需要实例化,不需要 self 参数
# 但第一个参数需要是表示自身类的 cls 参数,可以来调用类的属性,类的方法,实例化对象等。
class A(object):
bar = 1
def func1(self):
print ('foo')
@classmethod
def func2(cls):
print ('func2')
print (cls.bar)
cls().func1() # 调用 foo 方法
A.func2() # 不需要实例化
# python staticmethod 返回函数的静态方法。
# python staticmethod 返回函数的静态方法。
# 该方法不强制要求传递参数
class C(object):
@staticmethod
def f():
print('runoob');
C.f(); # 静态方法无需实例化
cobj = C()
cobj.f() # 也可以实例化后调用
| null |
note-taking/stdlib.py
|
stdlib.py
|
py
| 2,221 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "glob.glob",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "secrets.randbelow",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "secrets.SystemRandom",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "operator.add",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "operator.contains",
"line_number": 45,
"usage_type": "call"
}
] |
21500883
|
# See LICENSE file for full copyright and licensing details
from odoo import models, fields, api
from datetime import datetime
from odoo.exceptions import ValidationError
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT
class CommissionReport(models.TransientModel):
_name = 'commission.report'
start_date = fields.Date(
string='Start date',
required=True)
end_date = fields.Date(
string='End date',
required=True)
@api.constrains('start_date', 'end_date')
def check_date_overlap(self):
"""
This is a constraint method used to check the from date
smaller than the Expiration date.
-------------------------------------------------------
@param self : object pointer
"""
for ver in self:
if ver.start_date and ver.end_date:
dt_from = datetime.strptime(
ver.start_date, DEFAULT_SERVER_DATE_FORMAT)
dt_to = datetime.strptime(
ver.end_date, DEFAULT_SERVER_DATE_FORMAT)
if dt_to < dt_from:
raise ValidationError(
'End date should be greater than Start date.')
def print_report(self):
if self._context is None:
self._context = {}
data = {
'ids': self.ids,
'model': 'commission.report',
'form': self.read(['start_date', 'end_date'])[0]
}
return self.env.ref('property_commission_ee.commission_for_invoice_report').report_action([], data=data)
| null |
property_commission_ee/wizard/commission_report.py
|
commission_report.py
|
py
| 1,584 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "odoo.models.TransientModel",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Date",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Date",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "odoo.tools.DEFAULT_SERVER_DATE_FORMAT",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "datetime.datetime",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "odoo.tools.DEFAULT_SERVER_DATE_FORMAT",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "datetime.datetime",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "odoo.exceptions.ValidationError",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "odoo.api.constrains",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "odoo.api",
"line_number": 19,
"usage_type": "name"
}
] |
19470117
|
import xlrd
import xlwt
# from xlutils.copy import copy #暂时用不上
import os
l_p = [] # 定义两个全局list,分别存储原始和目的需要对比的数据
l_t = []
def read_excel():
wb_pri = xlrd.open_workbook('test_modify.xlsx') # 打开原始文件
wb_tar = xlrd.open_workbook('test_origin.xlsx') # 打开目标文件
wb_result = xlwt.Workbook() # 新建一个文件,用来保存结果
sheet_result = wb_result.add_sheet('result', cell_overwrite_ok=True)
result_i = 0
result_j = 0
for sheet_i in range(1):
sheet_pri = wb_pri.sheet_by_index(sheet_i) # 通过index获取每个sheet,为了省心,我根据自己的需要限定为第2-21个sheet
sheet_tar = wb_tar.sheet_by_index(sheet_i)
#sheet_backup = wb_backup.get_sheet(sheet_i)
print(sheet_pri.name, sheet_tar.name)
# 为什么是取这一列,因为这就是需要对比的数据阿
l_p = sheet_pri.col_values(2)
l_t = sheet_tar.col_values(2)
# tmp =[var for val in a if val in b] #这个是求交集,老大没要求是用不上的
# 求参数在pri(原始数据)中存在,而在tar(目标)中不存在的
tmp_pd = list(set(l_p).difference(set(l_t)))
# 求参数在tar中存在,而在pri中不存在的
tmp_td = list(set(l_t).difference(set(l_p)))
if result_i < result_j:
result_i = result_j
else:
result_j = result_i
for pd_i in tmp_pd:
result_i = result_i + 1
sheet_result.write(result_i, 0, sheet_pri.name)
sheet_result.write(result_i, 2, pd_i)
for td_i in tmp_td:
result_j = result_j + 1
sheet_result.write(result_j, 1, sheet_tar.name)
sheet_result.write(result_j, 3, td_i)
# 好了,可以去名为result的excel中查看结果了
wb_result.save('result.xls')
if __name__ == '__main__':
read_excel()
| null |
excel-in-python/test_compare.py
|
test_compare.py
|
py
| 1,968 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "xlrd.open_workbook",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "xlrd.open_workbook",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "xlwt.Workbook",
"line_number": 13,
"usage_type": "call"
}
] |
577276761
|
#!/usr/bin/env python3
"""The setup script."""
from setuptools import setup, find_packages
import versioneer
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
name = 'pycookiecutter'
packages = find_packages()
url = 'https://github.com/mqlab-dev/pycookiecutter'
version = versioneer.get_version()
cmdclass = versioneer.get_cmdclass()
author = "Qiong X. Michaels"
author_email = '[email protected]'
description = "Starting template for creating a Python package."
keywords = ['cookiecutter', 'template', 'package']
requirements = ['versioneer==0.18', ]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest', ]
setup(
name=name,
packages=packages,
url=url,
version=version,
cmdclass=cmdclass,
author=author,
author_email=author_email,
license="BSD license",
description=description,
setup_requires=setup_requirements,
install_requires=requirements,
tests_require=test_requirements,
long_description=readme + '\n\n' + history,
keywords=keywords,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development',
]
)
| null |
setup.py
|
setup.py
|
py
| 1,594 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "setuptools.find_packages",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "versioneer.get_version",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "versioneer.get_cmdclass",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 29,
"usage_type": "call"
}
] |
125765735
|
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
import time
from fbchat import Client, log
from fbchat.models import *
options = webdriver.ChromeOptions()
#options.add_argument('headless')
#driver = webdriver.Chrome(ChromeDriverManager().install())
#driver = webdriver.Chrome(options=options)
class MessengerBot(Client):
def onMessage(self, author_id, message_object, thread_id, thread_type, **kwargs):
self.markAsDelivered(thread_id, message_object.uid)
self.markAsRead(thread_id)
users = client.fetchAllUsers()
threads = client.fetchThreadList()
for thread in threads:
recentMessages=client.fetchThreadMessages(thread.uid,1)
msg=recentMessages[0].text.lower()
print(msg)
trigger1="msgtrigger"
if(msg==trigger1):
client.sendMessage("triggeranswer",thread.uid,ThreadType.USER)
client = MessengerBot("username","password")
client.listen()
| null |
msgReader.py
|
msgReader.py
|
py
| 1,091 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "fbchat.Client",
"line_number": 14,
"usage_type": "name"
}
] |
265063260
|
from keras.models import Sequential
from keras.layers import Concatenate,Input, Dense, concatenate
from keras.models import Model
from keras.optimizers import Adam, RMSprop
from keras.models import model_from_json
from keras.layers import Dropout
from keras.layers import Flatten
from keras.constraints import maxnorm
from keras.optimizers import SGD
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras import backend as K
K.set_image_dim_ordering('th')
import numpy as np
seed = 7
np.random.seed(seed)
import pickle
#import gym
from sawyer import sawyer
import random
from collections import deque
import copy
import rospy
import csv
import sys
def writelog(reward,done,p1,p2,values1,values2,message):
fname = 'rewards.csv'
file1 = open(fname, 'a')
writer = csv.writer(file1)
fields1=[reward,done,p1,p2,values1,values2,message]
writer.writerow(fields1)
file1.close()
def report_stats(e, episodes, time_t,agent_num_head_train,agent_memory_head,agent_head_history,agent_num_hand_train,agent_memory_hand,agent_hand_history):
print("episode: {}/{}, score: {}"
.format(e, episodes, time_t))
print("Number of time head trained ",agent_num_head_train)
print("Stored memory length for head images ",len(agent_memory_head))
try:
print("Head training history : ",agent_head_history.history['loss'])
except:
print("Model not trained yet ")
print("Number of times hand trained ",agent_num_hand_train)
print("Stored memory length for hand images ",len(agent_memory_hand))
try:
print("Hand training history : ",agent_hand_history.history['loss'])
except:
print("Model not trained yet ")
def model_pred_to_robot_acts(action,flag):
actions=[]
if(flag==False):
for i in action[:-1]: #Last one for the switch
if(i%3==0):
actions.append(0.0)
if(i%3==1):
actions.append(0.05)
if(i%3==2):
actions.append(-0.05)
if(flag==True):
for i in action[:-1]: #Last one for the switch
if(i%3==0):
actions.append(0.0)
if(i%3==1):
actions.append(0.05)
if(i%3==2):
actions.append(-0.05)
return actions
class DQNAgent:
def __init__(self, env, action_size, switches):
obs = env.reset()
#env.render()
#print('initial observation:', obs)
#action = env.action_space.sample()
#obs, r, done = env.step(action)
#print('next observation:', obs)
#print('reward:', r)
#print('done:', done)
#print('info:', info)
self.state_size = (obs["image"].shape[0])**2
self.imsize=obs["image"].shape[0]
print("state size ",self.state_size)
self.switches=switches
self.action_size = action_size+switches
self.memory_head = deque(maxlen=3000)
self.memory_hand = deque(maxlen=3000)
self.gamma = 0.95 # discount rate
self.epsilon = 0.1 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
self.numruns=0
self.ver=0
self.cur_mem_head=0
self.prev_mem_head=0
self.cur_mem_hand=0
self.prev_mem_hand=0
self.view_state="head"
self.evaluate=True
self.model_head = self._build_model()
self.target_model_head = self._build_model()
self.model_hand = self._build_model()
self.target_model_hand = self._build_model()
#self.model_head.load_weights("model_weights_head.h5")
#self.model_hand.load_weights("model_weights_hand.h5")
self.continuation=True
if(self.continuation==True):
self.model_head.load_weights("saved_weights/model_weights_head.h5")
self.model_hand.load_weights("saved_weights/model_weights_hand.h5")
if(self.evaluate==False):
print("Loading the past experience ")
try:
file = open('Experience/experience2.obj', 'r')
self.memory_head = pickle.load(file)
file = open('Experience/experience2.obj', 'r')
self.memory_hand = pickle.load(file)
except:
print("Failed to load past experience. Make sure you have it")
#self.memory_head = []
#self.memory_hand = []
#sample=self.model_head.predict([np.zeros((1,1,80,80)),np.zeros((1,7))])
#print("sample prediction ",sample)
#print("sample prediction shape ",sample.shape)
#sys.exit(0)
self.num_head_train=0
self.num_hand_train=0
self.head_history=[]
self.hand_history=[]
self.TAU=0.01
# serialize model to JSON
model_json = self.model_head.to_json() #Model_head and model_hand have the same architecture
with open("model.json", "w") as json_file:
json_file.write(model_json)
print("initialized agent and built model")
def target_train(self,view):
actor_weights=[]
actor_target_weights=[]
if(view=="hand"):
actor_weights = self.model_hand.get_weights()
actor_target_weights = self.target_model_hand.get_weights()
if(view=="head"):
actor_weights = self.model_head.get_weights()
actor_target_weights = self.target_model_head.get_weights()
for i in xrange(len(actor_weights)):
actor_target_weights[i] = self.TAU * actor_weights[i] + (1 - self.TAU)* actor_target_weights[i]
if(view=="hand"):
self.target_model_hand.set_weights(actor_target_weights)
if(view=="head"):
self.target_model_head.set_weights(actor_target_weights)
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
image_input= Input(shape=(1, 80, 80))
joint_input= Input(shape=(7,))
x=Conv2D(32, (3, 3), input_shape=(1, 80, 80), padding='same', activation='relu', kernel_constraint=maxnorm(3))(image_input)
x=Conv2D(32, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3))(x)
x=MaxPooling2D(pool_size=(2, 2))(x)
x=Conv2D(64, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3))(x)
x=Conv2D(64, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3))(x)
x=MaxPooling2D(pool_size=(2, 2))(x)
x=Conv2D(128, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3))(x)
x=Conv2D(128, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3))(x)
x=MaxPooling2D(pool_size=(2, 2))(x)
x=Flatten()(x)
x=Dense(512, activation='relu', kernel_constraint=maxnorm(3))(x)
x=Dense(100, activation='relu', kernel_constraint=maxnorm(3))(x)
x1 = Dense(30, activation='relu')(x)
y1= concatenate([x1,joint_input])
y1 = Dense(10, activation='relu')(y1)
x12 = Dense(3, activation='linear')(y1)
x2 = Dense(30, activation='relu')(x)
y2= concatenate([x2,joint_input])
y2 = Dense(10, activation='relu')(y2)
x22 = Dense(3, activation='linear')(y2)
x3 = Dense(30, activation='relu')(x)
y3= concatenate([x3,joint_input])
y3 = Dense(10, activation='relu')(y3)
x32 = Dense(3, activation='linear')(y3)
x4 = Dense(30, activation='relu')(x)
y4= concatenate([x4,joint_input])
y4 = Dense(10, activation='relu')(y4)
x42 = Dense(3, activation='linear')(y4)
x5 = Dense(30, activation='relu')(x)
y5= concatenate([x5,joint_input])
y5 = Dense(10, activation='relu')(y5)
x52 = Dense(3, activation='linear')(y5)
x6 = Dense(30, activation='relu')(x)
y6= concatenate([x6,joint_input])
y6 = Dense(10, activation='relu')(y6)
x62 = Dense(3, activation='linear')(y6)
x7 = Dense(30, activation='relu')(x)
y7= concatenate([x7,joint_input])
y7 = Dense(10, activation='relu')(y7)
x72 = Dense(3, activation='linear')(y7)
x8 = Dense(10, activation='relu')(x)
y8= concatenate([x8,joint_input])
y8 = Dense(10, activation='relu')(y8)
x82 = Dense(2, activation='sigmoid')(y8)
combined_action = concatenate([x12, x22, x32, x42, x52, x62, x72, x82])
model = Model(inputs=[image_input,joint_input], outputs=combined_action)
def huber_loss(a, b, in_keras=True):
error = a - b
quadratic_term = error*error / 2
linear_term = abs(error) - 1/2
use_linear_term = (abs(error) > 1.0)
if in_keras:
# Keras won't let us multiply floats by booleans, so we explicitly cast the booleans to floats
use_linear_term = K.cast(use_linear_term, 'float32')
return use_linear_term * linear_term + (1-use_linear_term) * quadratic_term
#model.add(Dense(200, activation='relu', kernel_constraint=maxnorm(3)))
#model.add(Dense(100, activation='relu', kernel_constraint=maxnorm(3)))
#model.add(Dense(self.action_size, activation='linear'))
#model.add(Dense(self.action_size, activation='tanh'))
#model.add(Dense(200, input_dim=self.state_size, activation='relu'))
#model.add(Dense(50, activation='relu'))
#model.add(Dense(self.action_size, activation='linear'))
#model.load_weights("model_weights.h5")
#print("Successfully loaded model_weights")
#model.compile(loss='mse',optimizer=Adam(lr=self.learning_rate))
#model.compile(loss='mse',optimizer=RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0))
model.compile(loss=huber_loss,optimizer=RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0))
return model
def remember(self, state, action, reward, next_state, done, switched):
if(self.view_state=="head"):
self.memory_head.append((state, action, reward, next_state, done, switched))
self.cur_mem_head+=1
if(self.view_state=="hand"):
self.memory_hand.append((state, action, reward, next_state, done, switched))
self.cur_mem_hand+=1
self.numruns+=1
save_file_name_head='Experience/experience_head'+str(self.ver)+'.obj'
save_file_name_hand='Experience/experience_hand'+str(self.ver)+'.obj'
if(self.numruns%2000==0 and self.ver<3):#Adds a new experience file for every 2000 runs
self.ver+=1
if(self.numruns%100==0): #update memory every 100 runs
print("Saving experience head")
exp_file = open(save_file_name_head, 'w')
pickle.dump(self.memory_head, exp_file)
exp_file.close()
print("Saving experience hand")
exp_file = open(save_file_name_hand, 'w')
pickle.dump(self.memory_hand, exp_file)
exp_file.close()
def act(self, state):
act_values=[]
if(self.evaluate==False):
if ((np.random.rand() <= self.epsilon)or len(self.memory_head)<100 or len(self.memory_hand)<100):
#if (np.random.rand() <= self.epsilon):
#act_values= np.random.rand(1,self.action_size)
print("Random action taken")
acts=np.random.randint(3,size=7)
for a in range(len(acts)): #Major bug fix..random actions were always been wrongly taken
add=3*a
acts[a]+=add
switches=np.random.randint(2,size=1)
return np.concatenate((acts,switches)),acts,acts,"random"
if(self.view_state=="head"):
act_values = self.model_head.predict([state["image"],state["joints"]])
if(self.view_state=="hand"):
act_values = self.model_hand.predict([state["image"],state["joints"]])
#print("got act values ",act_values)
a_v=act_values[0][:-self.switches]
acts=[]
for i in range(0,len(a_v),3):#3 possible actions for each joint: increase, decrease or remain same
j=a_v[i:i+3]
acts.append(np.argmax(j)+i) #Major bug fix
s_v=act_values[0][-self.switches:]
for i in range(0,len(s_v),2):#2 possible actions for each switch: change to the other state or remain same
j=s_v[i:i+2]
acts.append(np.argmax(j))
return acts,self.model_head.predict([state["image"],state["joints"]]),self.model_hand.predict([state["image"],state["joints"]]),self.view_state
#j1,j2,j3,j4,j5,j6,j7=a_v[0:3],a_v[3:6],a_v[6:9],a_v[9:12],a_v[12:15],a_v[15:18],a_v[18:21]
#return [np.argmax(j1),np.argmax(j2),np.argmax(j3),np.argmax(j4),np.argmax(j5),np.argmax(j6),np.argmax(j7)] # returns action
def replay(self, batch_size):
minibatch=[]
model=[]
model_switch=[]
model_target=[]
if(self.view_state=="head"):
minibatch = random.sample(self.memory_head, batch_size)
model=self.model_head
model_switch=self.model_hand
model_target=self.target_model_head
if(self.view_state=="hand"):
minibatch = random.sample(self.memory_hand, batch_size)
model=self.model_hand
model_switch=self.model_head
model_target=self.target_model_hand
states_images=np.zeros((1,1,80,80))
states_joints=np.zeros((1,7))
target_fs=np.zeros((1,23)) #7*3 for actions and 2 for switching
for state, action, reward, next_state, done, switched in minibatch:
target1 = reward
target2 = reward
target3 = reward
target4 = reward
target5 = reward
target6 = reward
target7 = reward
target8 = reward
if not done:
model_pred=model_target.predict([next_state["image"],next_state["joints"]])[0]
target1 = reward + self.gamma * np.amax(model_pred[0:3])
target2 = reward + self.gamma * np.amax(model_pred[3:6])
target3 = reward + self.gamma * np.amax(model_pred[6:9])
target4 = reward + self.gamma * np.amax(model_pred[9:12])
target5 = reward + self.gamma * np.amax(model_pred[12:15])
target6 = reward + self.gamma * np.amax(model_pred[15:18])
target7 = reward + self.gamma * np.amax(model_pred[18:21])
target8 = reward + self.gamma * np.amax(model_pred[21:23])
target_f =[]
if(switched==True):
target_f = model_switch.predict([state["image"],state["joints"]])
if(switched==False):
target_f = model.predict([state["image"],state["joints"]])
target_f[0][action[0]] = target1
target_f[0][action[1]] = target2
target_f[0][action[2]] = target3
target_f[0][action[3]] = target4
target_f[0][action[4]] = target5
target_f[0][action[5]] = target6
target_f[0][action[6]] = target7
target_f[0][action[7]+21] = target8 #was making a major mistake for this variable responsible for switching
states_images=np.vstack((states_images,state["image"]))
states_joints=np.vstack((states_joints,state["joints"]))
target_fs=np.vstack((target_fs,target_f))
print("#####################")
print("Please wait, training model "+self.view_state)
print("#####################")
if(self.view_state=="head"):
self.head_history=self.model_head.fit([states_images[1:],states_joints[1:]], target_fs[1:], epochs=1, verbose=2)#One minibatch update
self.model_head.save_weights("model_weights_head.h5")
self.target_model_head.save_weights("target_model_weights_head.h5")
if(self.view_state=="hand"):
self.hand_history=self.model_hand.fit([states_images[1:],states_joints[1:]], target_fs[1:], epochs=1, verbose=2)#One minibatch update
self.model_hand.save_weights("model_weights_hand.h5")
self.target_model_hand.save_weights("target_model_weights_hand.h5")
# serialize weights to HDF5
print("Saved model to disk")
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
if __name__ == "__main__":
env= sawyer()
#rospy.spin()
num_joints=7
outputs=7*3#Increase, decrease by 0.1 or remain same for each of the joints
agent = DQNAgent(env,outputs,2)
agent.evaluate=True
episodes=10000
# Iterate the game
for e in range(episodes):
# reset state in the beginning of each game
state = env.reset()
for time_t in range(20): #Gives 50 tries to the robot to keep moving the arm towards the goal
state["image"]=np.reshape(state["image"], [1, 1,agent.imsize,agent.imsize])
action,values1,values2,message = agent.act(state) #batch_size=1, num_channels=1
actions=model_pred_to_robot_acts(action,agent.evaluate)
switch=action[-1]
actions.append(switch)
switched=False
if(switch==0):
print("Using head camera ")
if(agent.view_state=="hand"):
switched=True #No experience is stored for switching events
agent.view_state="head"
if(switch==1):
print("Using hand camera ")
if(agent.view_state=="head"):
switched=True
agent.view_state="hand"
#switch self.model here
print("Sending joint inc. actions to robot ",actions)
next_state, reward, done, performance1, performance2 = env.step(actions)
#writing the reward history
writelog(reward,done,performance1,performance2,values1,values2,message)
state["image"]=np.reshape(state["image"], [1, 1, agent.imsize,agent.imsize])
next_state["image"]=np.reshape(next_state["image"], [1, 1, agent.imsize,agent.imsize])
if(agent.evaluate==False):
agent.remember(state, action, reward, next_state, done, switched)
# make next_state the new current state for the next frame.
state = copy.copy(next_state)
# done becomes True when the game ends
# ex) The agent drops the pole
if done:
# print the score and break out of the loop
report_stats(e, episodes, time_t,agent.num_head_train,agent.memory_head,agent.head_history,agent.num_hand_train,agent.memory_hand,agent.hand_history)
break
# train the agent with the experience of the episode
if(agent.evaluate==False):
if(agent.cur_mem_head-agent.prev_mem_head>32):
agent.prev_mem_head=copy.copy(agent.cur_mem_head)
cur_state=copy.copy(agent.view_state)
agent.view_state="head"
agent.replay(32)
agent.target_train("head")
agent.view_state=copy.copy(cur_state)
agent.num_head_train+=1
if(agent.cur_mem_hand-agent.prev_mem_hand>32):
agent.prev_mem_hand=copy.copy(agent.cur_mem_hand)
cur_state=copy.copy(agent.view_state)
agent.view_state="hand"
agent.replay(32)
agent.target_train("hand")
agent.view_state=copy.copy(cur_state)
agent.num_hand_train+=1
| null |
switching_dqn/dqn_adv.py
|
dqn_adv.py
|
py
| 20,250 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "keras.backend.set_image_dim_ordering",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "numpy.random.seed",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "csv.writer",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "keras.layers.convolutional.Conv2D",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "keras.constraints.maxnorm",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "keras.layers.convolutional.Conv2D",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "keras.constraints.maxnorm",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "keras.layers.convolutional.MaxPooling2D",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "keras.layers.convolutional.Conv2D",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "keras.constraints.maxnorm",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "keras.layers.convolutional.Conv2D",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "keras.constraints.maxnorm",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "keras.layers.convolutional.MaxPooling2D",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "keras.layers.convolutional.Conv2D",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "keras.constraints.maxnorm",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "keras.layers.convolutional.Conv2D",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "keras.constraints.maxnorm",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "keras.layers.convolutional.MaxPooling2D",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "keras.layers.Flatten",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "keras.constraints.maxnorm",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "keras.constraints.maxnorm",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "keras.layers.concatenate",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "keras.layers.concatenate",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "keras.layers.concatenate",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "keras.layers.concatenate",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "keras.layers.concatenate",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "keras.layers.concatenate",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "keras.layers.concatenate",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "keras.layers.concatenate",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "keras.layers.concatenate",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "keras.models.Model",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "keras.backend.cast",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "keras.optimizers.RMSprop",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 271,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 275,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "sawyer.sawyer",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 423,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 424,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 428,
"usage_type": "call"
}
] |
218689858
|
# Miro Community - Easiest way to make a video website
#
# Copyright (C) 2011, 2012 Participatory Culture Foundation
#
# Miro Community is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# Miro Community is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Miro Community. If not, see <http://www.gnu.org/licenses/>.
import sys
import time
from django.core.management.base import BaseCommand
from mirocommunity_saas import models, tiers
class Command(BaseCommand):
def handle(self, *args, **options):
self.handle_check_for_invalid_ipn_state()
self.handle_site_settings_emails()
def handle_site_settings_emails(self):
column2template = {
'video_allotment_warning_sent': (
'mirocommunity_saas/tiers_emails/video_allotment.txt', 'Upgrade your Miro Community site to store more video'),
'free_trial_warning_sent': (
'mirocommunity_saas/tiers_emails/free_trial_warning_sent.txt', 'Only five more days left in your Miro Community free trial'),
'inactive_site_warning_sent': (
'mirocommunity_saas/tiers_emails/inactive_site_warning_sent.txt', 'Your Miro Community site has been inactive, come back!')
}
tier_info = models.TierInfo.objects.get_current()
for tier_info_column in tiers.nightly_warnings():
# Save a note saying we sent the notice
setattr(tier_info, tier_info_column, True)
tier_info.save()
template_name, subject = column2template[tier_info_column]
tiers.send_tiers_related_email(subject, template_name, tier_info)
def handle_check_for_invalid_ipn_state(self):
# Is the site in a paid tier?
tier_info = models.TierInfo.objects.get_current()
# First of all: If the site is 'subsidized', then we skip the
# rest of these checks.
if tier_info.current_paypal_profile_id == 'subsidized':
return
# Okay. Well, the point of this isto check if the site is in a
# paid tier but should not be.
in_paid_tier = (tier_info.tier_name and
tier_info.tier_name != 'basic')
# Is the free trial used up?
# Note that premium sites have *not* used up their free trial.
if (in_paid_tier and
tier_info.free_trial_available and
tier_info.tier_name == 'max'):
print >> sys.stderr, (
"UM YIKES, I THOUGHT THE SITE SHOULD BE SUBSIDIZED",
tier_info.site_settings.site.domain)
return
# Is there something stored in the
# tier_info.current_paypal_profile_id? If so, great.
if (in_paid_tier and
not tier_info.current_paypal_profile_id and
not tier_info.free_trial_available):
# So, one reason this could happen is that PayPal is being really
# slow to send us data over PDT.
#
# Maybe that's what's happening. Let's sleep for a few seconds.
time.sleep(10)
# Then re-do the check. If it still looks bad, then print a warning.
if (in_paid_tier and
not tier_info.current_paypal_profile_id and
not tier_info.free_trial_available):
print >> sys.stderr, ('This site looks delinquent: ',
tier_info.site_settings.site.domain)
| null |
mirocommunity_saas/management/commands/nightly_tiers_events.py
|
nightly_tiers_events.py
|
py
| 3,905 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "mirocommunity_saas.models.TierInfo.objects.get_current",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "mirocommunity_saas.models.TierInfo",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "mirocommunity_saas.models",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "mirocommunity_saas.tiers.nightly_warnings",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "mirocommunity_saas.tiers",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "mirocommunity_saas.tiers.send_tiers_related_email",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "mirocommunity_saas.tiers",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "mirocommunity_saas.models.TierInfo.objects.get_current",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "mirocommunity_saas.models.TierInfo",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "mirocommunity_saas.models",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "sys.stderr",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 90,
"usage_type": "attribute"
}
] |
173122570
|
'''
Created on 28 Oct 2020
@author: 612563313
'''
import pytest
import time
from selenium.webdriver.common.by import By
from AppTests.test_InitWebdriver import BaseTest
from Config.config import SetupConfiguration as scd
from AppPages.aFPLaunch import LaunchFP
from AppTestData.LoginPage_TestData import LoginPage_TestData as lptd
from AppTestData.HomePage_TestData import HomePage_TestData as hptd
class XTest_LaunchFP(BaseTest):
def test_ValidateUserLogin(self):
self.LaunchFP = LaunchFP(self.driver)
self.LaunchFP.LoginToFP(scd.UsrName,scd.UsrPwd)
time.sleep(3)
HmePgTitle = self.LaunchFP.getCurrentPgTitle()
self.LaunchFP.GetScreenShot(lptd.SSpath, hptd.HP_ssName)
exp_LogInUsr = scd.UsrName[:-7].replace('.', ' ').title()
locVal = "//a[contains(text(),'"+exp_LogInUsr+"')]"
LoggedInUsr = self.driver.find_element(By.XPATH,locVal).text
assert HmePgTitle == hptd.HP_title
assert LoggedInUsr == exp_LogInUsr
print('LoggedInUsr:{} matches exp_LogInUsr:{} '.format(LoggedInUsr,exp_LogInUsr))
@pytest.mark.parametrize("Usr,Pwd",
[ pytest.param(scd.UsrName,scd.UsrPwd, id = "valid creds"),
pytest.param("Usrabc","Pwdqdfe",marks = pytest.mark.xfail,id="invalid creds")]
)
def test_ValidateUserLogin_withMultipleInput(self,Usr,Pwd):
self.LaunchFP = LaunchFP(self.driver)
self.LaunchFP.LoginToFP(Usr,Pwd)
| null |
com.myportaltest.rivusfleet/AppTests/test_LaunchFP.py
|
test_LaunchFP.py
|
py
| 1,626 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "AppTests.test_InitWebdriver.BaseTest",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "AppPages.aFPLaunch.LaunchFP",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "Config.config.SetupConfiguration.UsrName",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "Config.config.SetupConfiguration",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "Config.config.SetupConfiguration.UsrPwd",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "AppTestData.LoginPage_TestData.LoginPage_TestData.SSpath",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "AppTestData.LoginPage_TestData.LoginPage_TestData",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "AppTestData.HomePage_TestData.HomePage_TestData.HP_ssName",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "AppTestData.HomePage_TestData.HomePage_TestData",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "Config.config.SetupConfiguration.UsrName",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "Config.config.SetupConfiguration",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "AppTestData.HomePage_TestData.HomePage_TestData.HP_title",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "AppTestData.HomePage_TestData.HomePage_TestData",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "AppPages.aFPLaunch.LaunchFP",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "pytest.param",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "Config.config.SetupConfiguration.UsrName",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "Config.config.SetupConfiguration",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "Config.config.SetupConfiguration.UsrPwd",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "pytest.param",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 35,
"usage_type": "attribute"
}
] |
216602983
|
import date_format as df
from rich.console import Console
from rich import print
from rich.table import Table, Column
from pprint import pprint
import json
# global variable calender_id
calendar_id = '[email protected]'
def get_slot_attendee_names(event):
"""[Takes in an event and returns whos the patient and whos the clinician]
Args:
event ([dictionary]): [dictionary where well check whos the clinician and patient]
Returns:
[tuple of strings]: [patient and clinician usernames]
"""
events_list = event['attendees']
if len(events_list) == 1:
clinician_dict = events_list[0]
clinician = clinician_dict['displayName']
patient = ''
else:
clinician_dict = events_list[0]
patient_dict = events_list[1]
clinician = clinician_dict['displayName']
patient = patient_dict['displayName']
return clinician, patient
def print_events(start, event, description):
"""[Prints out the a code clinic slot in table format using rich module (check imports)]
Args:
start ([string]): [start time of the event]
event ([dictionary]): [the event that has to be printed out as a slot]
description ([type]): [The description of what the event does]
"""
console = Console()
table = Table(show_header=True, header_style="bold cyan")
table.add_column("Date", style="dim", width=18)
table.add_column("Summary", style="dim", width=25)
table.add_column("Description", style="dim", width=30)
table.add_column("ID", style="dim", width=30)
table.add_column("Attendees", style="dim", width=15)
clinician, patient = get_slot_attendee_names(event)
table.add_row(start, event['summary'], description, event['id'], f"{clinician}\n{patient}")
console.print(table)
def get_events_for_next_7_days_to_delete(username, service):
"""[This function gets all the slots that the particular user has created for the next 7 days ]
Args:
username ([string]): [student username]
service ([object]): [the api object that allows us to connect to google calenders]
Return:
events ([list]): list of dictionaries, with each dictionary being a google cal event.
count ([int]): either 1 or 0, returns 1 if there where events returned and 0 if there arent any events.
"""
event_list = {"events" : []}
print("\nThese are your upcoming slots for the next 7 days: \n")
time = df.get_current_and_7_days_date_and_time_in_RFC3339()
events_result = service.events().list(calendarId=calendar_id, timeMin=time[0],
singleEvents=True, timeMax=time[1],
orderBy='startTime').execute()
events = events_result.get('items', [])
if not events:
print('No upcoming events found.')
count = 0
for event in events:
start = df.format_time_to_make_readable(event)
description = event['description']
if event['summary'] == f'{username} - Code Clinic':
count = 1
print_events(start, event, description)
event_list['events'].append({event['id'] : event})
with open('functions/data_files/events.json', 'w+') as outfile:
json.dump(event_list, outfile, sort_keys=True, indent=4)
outfile.close()
return events, count
def simple_get_events_without_printing_anything(username, service):
time = df.get_current_and_7_days_date_and_time_in_RFC3339()
events_result = service.events().list(calendarId=calendar_id, timeMin=time[0],
singleEvents=True, timeMax=time[1],
orderBy='startTime').execute()
return events_result.get('items', [])
def get_all_code_clinic_slots_to_signup(service, username):
"""[This function gets all the slots available to the student to sign up to.
it checks that the length of the list with all the attendees is 1 and that they are not the
one that created the slot in the first place.]
Args:
username ([string]): [student username]
service ([object]): [the api object that allows us to connect to google calenders]
Return:
events ([list]): list of dictionaries, with each dictionary being a google cal event.
count ([int]): either 1 or 0, returns 1 if there where events returned and 0 if there arent any events.
"""
print("\nThese are all the available slots you can choose from.\n")
events = get_events_from_service(service)
count = 0
for event in events:
start = df.format_time_to_make_readable(event)
description = event['description']
items_list = event['attendees']
if len(items_list) == 1 and username not in event['summary']:
count = 1
print_events(start, event, description)
return events, count
def get_all_code_clinic_slots_to_signup_without_printing_anything(service, username):
"""
[Does everything above function does, without printing anything in the process.]
Args:
username ([string]): [student username]
service ([object]): [the api object that allows us to connect to google calenders]
Return:
events ([list]): list of dictionaries, with each dictionary being a google cal event.
count ([int]): either 1 or 0, returns 1 if there where events returned and 0 if there arent any events.
"""
events = get_events_from_service(service)
count = 0
for event in events:
items_list = event['attendees']
if len(items_list) == 1 and username not in event['summary']:
count = 1
return events, count
def get_events_from_service(service):
"""
[Uses service object to get list of all events found on calender]
Args:
service ([object]): [the object that allows us to connect to the google calender api]
Returns:
[list]: [list of dictionaries with each dictionary being a calender event]
"""
time = df.get_current_and_7_days_date_and_time_in_RFC3339()
events_result = service.events().list(calendarId=calendar_id, timeMin=time[0],
singleEvents=True, timeMax=time[1],
orderBy='startTime').execute()
events = events_result.get('items', [])
if not events:
print('No upcoming events found.')
return events
def get_all_code_clinic_slots_to_delete(service, username):
"""
[Gets all slots student has signed up to as a patient.]
Args:
username ([string]): [student username]
service ([object]): [the api object that allows us to connect to google calenders]
Return:
events ([list]): list of dictionaries, with each dictionary being a google cal event.
count ([int]): either 1 or 0, returns 1 if there where events returned and 0 if there arent any events.
"""
print("\nThese are the clinics you've signed up for: \n")
time = df.get_current_and_7_days_date_and_time_in_RFC3339()
events_result = service.events().list(calendarId=calendar_id, timeMin=time[0],
singleEvents=True, timeMax=time[1],
orderBy='startTime').execute()
events = events_result.get('items', [])
count = 0
for event in events:
start = df.format_time_to_make_readable(event)
description = event['description']
items_list = event['attendees']
if len(items_list) == 2:
items_dict = items_list[0]
items_dict2 = items_list[1]
else:
items_dict = items_list[0]
items_dict2 = {'displayName': 'placeholder'}
if (items_dict['displayName'] == username or items_dict2['displayName'] == username) and username not in event['summary']:
count = 1
print_events(start, event, description)
return events, count
def get_all_code_clinic_slots_to_delete_without_printing(service, username):
"""
[Does everything above function does, without printing anything in the process.]
Args:
username ([string]): [student username]
service ([object]): [the api object that allows us to connect to google calenders]
Return:
events ([list]): list of dictionaries, with each dictionary being a google cal event.
count ([int]): either 1 or 0, returns 1 if there where events returned and 0 if there arent any events.
"""
events = get_events_from_service(service)
count = 0
for event in events:
items_list = event['attendees']
if len(items_list) == 2:
items_dict = items_list[0]
items_dict2 = items_list[1]
else:
items_dict = items_list[0]
items_dict2 = {'displayName': 'placeholder'}
if (items_dict['displayName'] == username or items_dict2['displayName'] == username) and username not in event['summary']:
count = 1
return events, count
def get_username():
f = open("username_file", "r")
username_list = (f.readlines())
f.close()
username = username_list[1]
return username
| null |
functions/get_events.py
|
get_events.py
|
py
| 9,464 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "rich.console.Console",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "rich.table.Table",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "rich.print",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "date_format.get_current_and_7_days_date_and_time_in_RFC3339",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "rich.print",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "date_format.format_time_to_make_readable",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "date_format.get_current_and_7_days_date_and_time_in_RFC3339",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "rich.print",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "date_format.format_time_to_make_readable",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "date_format.get_current_and_7_days_date_and_time_in_RFC3339",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "rich.print",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "rich.print",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "date_format.get_current_and_7_days_date_and_time_in_RFC3339",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "date_format.format_time_to_make_readable",
"line_number": 177,
"usage_type": "call"
}
] |
291496527
|
#!/usr/bin/env python3
import os
import sympy
import mpmath as sm
import scipy.signal
import matplotlib.pyplot as plt
# precision
sm.mp.prec = 512
def daubechies(N):
# p vanishing moments.
p = int(N/2)
# make polynomial; see Mallat, 7.96
Py = [sm.binomial(p-1+k, k) for k in reversed(range(p))]
# get polynomial roots y[k]
Py_roots = sm.mp.polyroots(Py, maxsteps=200, extraprec=64)
z = []
for yk in Py_roots:
# substitute y = -1/4z + 1/2 - 1/4/z to factor f(y) = y - y[k]
# We've found the roots of P(y). We need the roots of Q(z) = P((1-z-1/z)/4)
f = [sm.mpf('-1/4'), sm.mpf('1/2') - yk, sm.mpf('-1/4')]
# get polynomial roots z[k]
z += sm.mp.polyroots(f)
# make polynomial using the roots within unit circle
h0z = sm.sqrt('2')
for zk in z:
if sm.fabs(zk) < 1:
# This calculation is superior to Mallat, (equation between 7.96 and 7.97)
h0z *= sympy.sympify('(z-zk)/(1-zk)').subs('zk',zk)
# adapt vanishing moments
hz = (sympy.sympify('(1+z)/2')**p*h0z).expand()
# get scaling coefficients
return [sympy.re(hz.coeff('z',k)) for k in reversed(range(p*2))]
def main():
for p in range(1,30):
# get dbN coeffients
dbN = daubechies(2*p)
# write coeffients
filename = os.path.join(os.getcwd(), 'coefficients/daub' + str(2*p).zfill(2) +'_coefficients.txt')
print("Writing file {}".format(filename))
with open(filename, 'w+') as f:
f.write('# Daubechies ' + str(2*p) + ' scaling coefficients\n')
f.write(" else if constexpr (N == " + str(2*p) + ")\n {\n")
f.write(" if constexpr (std::is_same<float, Real>::value) {\n return {")
for i, h in enumerate(dbN):
f.write(sm.nstr(h, 9) + 'f, ')
f.write("};\n }\n")
f.write(" else if constexpr (std::is_same<double, Real>::value) {\n return {")
for i, h in enumerate(dbN):
f.write(sm.nstr(h, 17) + ', ')
f.write("};\n }\n")
f.write(" else if constexpr (std::is_same<long double, Real>::value) {\n return {")
for i, h in enumerate(dbN):
# log2(64) + some leeway
f.write(sm.nstr(h, 22) + 'L, ')
f.write("};\n }\n")
f.write(" #ifdef BOOST_HAS_FLOAT128\n")
f.write(" else if constexpr (std::is_same<boost::multiprecision::float128, Real>::value) {\n return {")
for i, h in enumerate(dbN):
# log10(2**123) + some leeway
f.write(sm.nstr(h, 37) + 'Q,\n ')
f.write("};\n }\n")
f.write(" #endif\n")
f.write(' else { throw std::logic_error("Wavelet transform coefficients for this precision have not been implemented."); }\n')
f.write(" }\n")
# get an approximation of scaling function
'''x, phi, psi = scipy.signal.cascade(dbN)
# plot scaling function
plt.plot(x, phi, 'k')
plt.grid()
plt.title('db' + str(2*N) + ' scaling function')
plt.savefig('scaling_png/daub' + str(2*N).zfill(2) + '_scaling' + '.png')
plt.clf()
# plot wavelet
plt.plot(x, psi, 'k')
plt.grid()
plt.title( 'db' + str(2*N) + " wavelet" )
plt.savefig('wavelet_png/daub' + str(2*N).zfill(2) + '_wavelet' + '.png')
plt.clf()'''
if __name__ == '__main__':
main()
| null |
test.py
|
test.py
|
py
| 3,708 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "mpmath.mp",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "mpmath.binomial",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "mpmath.mp.polyroots",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "mpmath.mp",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "mpmath.mpf",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "mpmath.mp.polyroots",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "mpmath.mp",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "mpmath.sqrt",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "mpmath.fabs",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sympy.sympify",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sympy.sympify",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sympy.re",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "mpmath.nstr",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "mpmath.nstr",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "mpmath.nstr",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "mpmath.nstr",
"line_number": 73,
"usage_type": "call"
}
] |
491261114
|
# Copyright (C) 2012 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
import numpy
import pyopencl
from pycbc.types import zeros, Array
from pyopencl.array import to_device
from pyopencl.array import zeros as pzeros
from pyopencl.tools import get_or_register_dtype, dtype_to_ctype
from pyopencl.elementwise import ElementwiseKernel
from pycbc.scheme import mgr
threshold_op = """
if (i == 0)
bn[0] = 0;
cfloat_t val = in[i];
if ( cfloat_abs(val) > threshold){
int n_w = atomic_add(bn, 1);
outv[n_w] = val;
outl[n_w] = i;
}
"""
threshold_kernel = ElementwiseKernel(mgr.state.context,
" %(tp_in)s *in, %(tp_out1)s *outv, %(tp_out2)s *outl, %(tp_th)s threshold, %(tp_n)s *bn" % {
"tp_in": dtype_to_ctype(numpy.complex64),
"tp_out1": dtype_to_ctype(numpy.complex64),
"tp_out2": dtype_to_ctype(numpy.uint32),
"tp_th": dtype_to_ctype(numpy.float32),
"tp_n": dtype_to_ctype(numpy.uint32),
},
threshold_op,
"getstuff")
n = pzeros(mgr.state.queue, 1, numpy.uint32)
val = pzeros(mgr.state.queue, 4096*256, numpy.complex64)
loc = pzeros(mgr.state.queue, 4096*256, numpy.uint32)
def threshold(series, value):
threshold_kernel(series.data, val, loc, value, n)
n0 = n.get()[0]
return loc[0:n0].get(), val[0:n0].get()
| null |
pycbc/events/threshold_opencl.py
|
threshold_opencl.py
|
py
| 2,286 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pyopencl.elementwise.ElementwiseKernel",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pycbc.scheme.mgr.state",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "pycbc.scheme.mgr",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "pyopencl.tools.dtype_to_ctype",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.complex64",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "pyopencl.tools.dtype_to_ctype",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.complex64",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "pyopencl.tools.dtype_to_ctype",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.uint32",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "pyopencl.tools.dtype_to_ctype",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "pyopencl.tools.dtype_to_ctype",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.uint32",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "pyopencl.array.zeros",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "pycbc.scheme.mgr.state",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "pycbc.scheme.mgr",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "numpy.uint32",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "pyopencl.array.zeros",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pycbc.scheme.mgr.state",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "pycbc.scheme.mgr",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "numpy.complex64",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "pyopencl.array.zeros",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "pycbc.scheme.mgr.state",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "pycbc.scheme.mgr",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "numpy.uint32",
"line_number": 59,
"usage_type": "attribute"
}
] |
479122583
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 23 23:27:29 2018
@author: vivekmishra
"""
import os
os.chdir('/Users/vivekmishra/Desktop/USC/599-DSS/StanceDataset')
import requests
import pandas as pd
import re
import string
import unicodedata
import seaborn as sns
import matplotlib as plt
#import emoji
from nltk.stem import PorterStemmer
from nltk.corpus import words
import preprocessor as p
from senti import senti
import nltk
nltk.download('words')
from nltk.tokenize.toktok import ToktokTokenizer
tokenizer = ToktokTokenizer()
nltk.download('stopwords')
stopword_list = nltk.corpus.stopwords.words('english')
import spacy
nlp = spacy.load('en', parse=True, tag=True, entity=True)
from sklearn.feature_extraction.text import TfidfVectorizer
df = pd.read_csv('train_ch.csv')
df_test = pd.read_csv('test.csv')
df = df.append(df_test)
tweet = list(df['Tweet'])
def remove_hashtag(input_text):
return re.sub(r'(\s)#\w+', '', input_text)
def strip_links(text):
link_regex = re.compile('((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)', re.DOTALL)
links = re.findall(link_regex, text)
for link in links:
text = text.replace(link[0], ', ')
return text
def remove_at(input_text):
return re.sub(r'(\s)@\w+', '', input_text)
def preproc(sent):
return p.clean(sent)
def lemmatize_text(text):
text = nlp(text)
text = ' '.join([word.lemma_ if word.lemma_ != '-PRON-' else word.text for word in text])
return text
def remove_special_characters(text, remove_digits=False):
pattern = r'[^a-zA-z0-9\s]' if not remove_digits else r'[^a-zA-z\s]'
text = re.sub(pattern, ' ', text)
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore')
return text
def remove_stopwords(text, is_lower_case=False):
tokens = tokenizer.tokenize(text)
tokens = [token.strip() for token in tokens]
whitelist = ["n't","not", "no"]
if is_lower_case:
filtered_tokens = [token for token in tokens if (token not in stopword_list or token in whitelist)]
else:
filtered_tokens = [token for token in tokens if (token.lower() not in stopword_list or token in whitelist)]
filtered_text = ' '.join(filtered_tokens)
return filtered_text
#counter = 0
#for sent in tweet:
# tweet[counter]=remove_hashtag(sent)
# counter += 1
#counter = 0
#for sent in tweet:
# print(sent)
# tweet[counter]=remove_at(sent)
# counter += 1
#counter = 0
#for sent in tweet:
# tweet[counter]=strip_links(sent)
# counter += 1
counter = 0
for sent in tweet:
tweet[counter]=preproc(sent)
counter += 1
counter = 0
for sent in tweet:
tweet[counter]=remove_special_characters(sent,
remove_digits=True)
counter += 1
counter = 0
for sent in tweet:
tweet[counter]=remove_stopwords(sent)
counter += 1
counter = 0
for sent in tweet:
tweet[counter]=lemmatize_text(sent)
counter += 1
vectorizer = TfidfVectorizer(strip_accents='unicode')
tweet_mat = vectorizer.fit_transform(tweet)
tweet_mat = tweet_mat.toarray()
tweet_mat = pd.DataFrame(tweet_mat)
#Features
senti_obj = senti()
df['senti_tweet'] = df['Tweet'].apply(lambda x : senti_obj.main(x))
#Define target
target = list(df['Stance'])
counter = 0
for val in target:
if val == 'AGAINST':
target[counter] = 0
elif val == 'FAVOR':
target[counter] = 1
else:
target[counter] = 2
counter += 1
tweet_mat['target'] = target
#Model
import xgboost as xgb
y= tweet_mat['target'].values
X = tweet_mat.drop(['target'],axis=1).values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=.1, random_state=42)
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)
#default parameters
params = {
'max_depth':6,
'min_child_weight': 1,
'eta':.3,
'subsample': 1,
'colsample_bytree': 1,
# Other parameters
'objective':'multi:softprob',
}
params['eval_metric'] = "merror"
params['num_class'] = 3
num_boost_round = 999
#Hyperparameter tuning
gridsearch_params = [
(max_depth, min_child_weight)
for max_depth in range(6,8)
for min_child_weight in range(4,6)
]
min_merror = float("Inf")
best_params = None
for max_depth, min_child_weight in gridsearch_params:
print("CV with max_depth={}, min_child_weight={}".format(
max_depth,
min_child_weight))
# Update our parameters
params['max_depth'] = max_depth
params['min_child_weight'] = min_child_weight
# Run CV
cv_results = xgb.cv(
params,
dtrain,
num_boost_round=num_boost_round,
seed=42,
nfold=3,
metrics={'merror'},
early_stopping_rounds=10
)
# Update best MError
mean_merror = cv_results['test-merror-mean'].min()
boost_rounds = cv_results['test-merror-mean'].argmin()
print("\tMerror {} for {} rounds".format(mean_merror, boost_rounds))
if mean_merror < min_merror:
min_merror = mean_merror
best_params = (max_depth,min_child_weight)
params['max_depth'] = best_params[0]
params['min_child_weight'] = best_params[1]
#tune subsample,colsample
gridsearch_params = [
(subsample, colsample)
for subsample in [i/10. for i in range(9,11)]
for colsample in [i/10. for i in range(9,11)]
]
min_merror = float("Inf")
best_params = None
for subsample, colsample in reversed(gridsearch_params):
print("CV with subsample={}, colsample={}".format(
subsample,
colsample))
# Update our parameters
params['subsample'] = subsample
params['colsample_bytree'] = colsample
# Run CV
cv_results = xgb.cv(
params,
dtrain,
num_boost_round=num_boost_round,
seed=42,
nfold=3,
metrics={'merror'},
early_stopping_rounds=10
)
# Update best Merror
mean_merror = cv_results['test-merror-mean'].min()
boost_rounds = cv_results['test-merror-mean'].argmin()
print("\tMerror {} for {} rounds".format(mean_merror, boost_rounds))
if mean_merror < min_merror:
min_merror = mean_merror
best_params = (subsample,colsample)
params['subsample'] = best_params[0]
params['colsample_bytree'] = best_params[1]
min_merror = float("Inf")
best_params = None
for eta in [0.5,0.3, 0.03]:
print("CV with eta={}".format(eta))
# Update our parameters
params['eta'] = eta
# Run CV
cv_results = xgb.cv(
params,
dtrain,
num_boost_round=num_boost_round,
seed=42,
nfold=3,
metrics={'merror'},
early_stopping_rounds=10
)
# Update best Merror
mean_merror = cv_results['test-merror-mean'].min()
boost_rounds = cv_results['test-merror-mean'].argmin()
print("\tMerror {} for {} rounds".format(mean_merror, boost_rounds))
if mean_merror < min_merror:
min_merror = mean_merror
best_params = eta
params['eta'] = best_params
model = xgb.train(
params,
dtrain,
num_boost_round=num_boost_round,
evals=[(dtest, "Test")],
early_stopping_rounds=10
)
num_boost_round = model.best_iteration + 1
best_model = xgb.train(
params,
dtrain,
num_boost_round=num_boost_round,
evals=[(dtest, "Test")]
)
best_model.save_model("my_model.model")
loaded_model = xgb.Booster()
loaded_model.load_model("my_model.model")
# And use it for predictions.
loaded_model.predict(dtest)
#Plots for poster
#df['Stance'].hist(by=df['Target'])
df = df.replace("Climate Change is a Real Concern", value="Climate Change")
df =df.replace("Legalization of Abortion", value="Abortion")
import seaborn as sns
import itertools
#sns.set(style="darkgrid")
ax = sns.countplot(y="Stance", hue="Target", data=df,palette="Paired",orient="v")
plt.lengend(loc="bottom")
fig = ax.get_figure()
fig.savefig("output.png")
palette = itertools.cycle(sns.color_palette("Paired"))
import matplotlib.pyplot as plt
#for i in range(1, 7):
fig = plt.figure()
ax1 = fig.add_subplot(2, 3, 1)
c= next(palette)
sns.distplot(df[df['Target'] == 'Hillary Clinton']['senti_tweet'],label='Clinton', color=c)
ax1.legend()
ax1 = fig.add_subplot(2, 3, 2)
c= next(palette)
sns.distplot(df[df['Target'] == 'Legalization of Abortion']['senti_tweet'],label='Abortion', color=c)
ax1.legend()
ax1 = fig.add_subplot(2, 3, 3)
c= next(palette)
sns.distplot(df[df['Target'] == 'Atheism']['senti_tweet'],label='Atheism', color=c)
ax1.legend()
ax1 = fig.add_subplot(2, 3, 4)
c= next(palette)
sns.distplot(df[df['Target'] == 'Climate Change is a Real Concern']['senti_tweet'],label='Climate', color=c)
ax1.legend()
ax1 = fig.add_subplot(2, 3, 5)
c= next(palette)
sns.distplot(df[df['Target'] == 'Feminist Movement']['senti_tweet'],label='Feminism', color=c)
ax1.legend()
ax1 = fig.add_subplot(2, 3, 6)
c= next(palette)
sns.distplot(df[df['Target'] == 'Donald Trump']['senti_tweet'],label='Trump', color=c)
ax1.legend()
fig.savefig('dist.png')
| null |
StanceDataset/stance.py
|
stance.py
|
py
| 9,256 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.chdir",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.toktok.ToktokTokenizer",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "nltk.corpus",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "spacy.load",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "re.DOTALL",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "preprocessor.clean",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "unicodedata.normalize",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "senti.senti",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "xgboost.DMatrix",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "xgboost.DMatrix",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "xgboost.cv",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "xgboost.cv",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "xgboost.cv",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "xgboost.train",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "xgboost.train",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "xgboost.Booster",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "seaborn.countplot",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "matplotlib.lengend",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "itertools.cycle",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "seaborn.color_palette",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 331,
"usage_type": "name"
},
{
"api_name": "seaborn.distplot",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "seaborn.distplot",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "seaborn.distplot",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "seaborn.distplot",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "seaborn.distplot",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "seaborn.distplot",
"line_number": 360,
"usage_type": "call"
}
] |
444608156
|
#!/home/yli11/.conda/envs/py2/bin/python
import sys
import os
p_dir = os.path.dirname(os.path.realpath(__file__)) + "/"
sys.path.append(os.path.abspath(p_dir+"../utils/"))
# from liyc_utils import *
import pandas as pd
import argparse
import getpass
import datetime
import matplotlib
import pandas as pd
matplotlib.use('agg')
import matplotlib.pyplot as plt
import uuid
current_file_base_name = __file__.split("/")[-1].split(".")[0]
def general_df_reader(args):
if "npz" == args.input.split(".")[-1]:
npz = np.load('result.npz')
df = pd.DataFrame(npz['matrix'])
df.columns = npz['labels']
return df
if args.header:
if args.index:
df = pd.read_csv(args.input,sep=args.sep,index_col=0)
else:
df = pd.read_csv(args.input,sep=args.sep)
else:
if args.index:
df = pd.read_csv(args.input,sep=args.sep,index_col=0,header=None)
else:
df = pd.read_csv(args.input,sep=args.sep,header=None)
return df
def guess_sep(x):
with open(x) as f:
for line in f:
tmp1 = len(line.strip().split(","))
tmp2 = len(line.strip().split("\t"))
# print (tmp1,tmp2)
if tmp1 > tmp2:
return ","
if tmp2 > tmp1:
return "\t"
else:
print ("Can't determine the separator. Please input manually")
exit()
def my_args():
username = getpass.getuser()
addon_string = str(uuid.uuid4()).split("-")[-1]
mainParser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,description="given a dataframe, plot a column as a pie char")
mainParser.add_argument('-o',"--output", help="enter a job ID, which is used to make a new directory. Every output will be moved into this folder.", default=current_file_base_name+'_'+username+"_"+str(datetime.date.today()))
mainParser.add_argument('-f',"--input", help="data table input",required=True)
mainParser.add_argument('-t',"--title", help="figure title",default=None)
mainParser.add_argument("--use_col", help="which color to use for pie chart, if the input file contains column name, please use column name; otherwise, 0 will be the first column, and 1 will be the second column and so on")
mainParser.add_argument('--index', help=" index is false", action='store_true')
mainParser.add_argument("--header", help="input table has header", action='store_true')
mainParser.add_argument("--homer", help="input table is homer", action='store_true')
mainParser.add_argument("--order", help="pie chart category order, to keep color assignment consistent", default=None)
mainParser.add_argument('--just_plot', help="provide a ready to plot dataframe", action='store_true')
mainParser.add_argument('-s',"--sep", help="separator",default="auto")
##------- add parameters above ---------------------
args = mainParser.parse_args()
return args
def pie_chart(char_list,value_list,output,args):
# from adjustText import adjust_text
color_set = ['#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4', '#46f0f0', '#f032e6', '#bcf60c', '#fabebe', '#008080', '#e6beff', '#9a6324', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1', '#000075', '#808080', '#ffffff', '#000000']
plt.rcParams['font.size'] = '16'
plt.figure()
if len(value_list) > len(color_set):
print ("Too many categories!")
colors = color_set[:len(char_list)]
df = pd.DataFrame()
df[0] = char_list
df[1] = value_list
df[2] = colors
df2 = df[df[1]>0]
df1 = df[df[1]==0]
[w,t1,t2] = plt.pie(df2[1], labels=df2[0], autopct='%1.1f%%',shadow=False, startangle=90,colors=df2[2])
# adjust_text(t1)
if df1.shape[0] > 0:
# plt.title()
print ("These categories are not found: %s"%(", ".join(df1[0].tolist())))
if args.title:
plt.title(args.title)
plt.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.savefig("%s.pdf"%(output),bbox_inches='tight')
def get_homer_category(x):
return x.split()[0]
if "3' UTR" in x:
return "3' UTR"
if "5' UTR" in x:
return "5' UTR"
if "non-coding" in x:
return "Exon (non-coding)"
if "promoter" in x:
return "Promoter"
x = x.split()[0]
x = list(x)
x[0] = x[0].upper()
return "".join(x[0])
# return x
def main():
args = my_args()
if args.sep=="auto":
args.sep = guess_sep(args.input)
df = general_df_reader(args)
print (df.head())
if args.just_plot:
char_list = df[0].tolist()
value_list = df[1].tolist()
pie_chart(char_list,value_list,args.output,args)
exit()
if args.use_col == "-1":
args.use_col = df.columns.tolist()[-1]
if args.use_col == "-2":
args.use_col = df.columns.tolist()[-2]
if args.homer:
df[args.use_col] = df[args.use_col].apply(get_homer_category)
my_cat = df[args.use_col].value_counts(normalize=True).sort_values().to_dict()
char_list = my_cat.keys()
if args.order:
char_list = args.order.split(",")
value_list = []
for k in char_list:
try:
value_list.append(my_cat[k])
except:
value_list.append(0)
pie_chart(char_list,value_list,args.output,args)
if __name__ == "__main__":
main()
| null |
bin/pie_plot.py
|
pie_plot.py
|
py
| 4,975 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.dirname",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.use",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "getpass.getuser",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.today",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pie",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 104,
"usage_type": "name"
}
] |
94208053
|
import maya.cmds as cmds
import os as os
import os
import os.path
import shutil
### reference and rig related Function ------
def findRigByName(sel):
'''
still in WIP
try finding the rig by it's name
'''
out = []
for o in sel:
if '_Rig_Grp' in o:
out.append(o)
out = findTopParent(out)
return out
def findRigByReference(sel):
'''
find the rig by reference active and containing the word 'RIG'
:param sel:
:return:
'''
out = []
for o in sel:
if 'RIG' in o and cmds.referenceQuery(o, isLoaded=True) == True:
out.append(o)
return out
def listVisibleMesh():
list = cmds.ls("*RIG:*", visible=True, type='mesh')
for l in list:
print(l)
return list
def splitCharAndProps(rigList):
'''
Split a list of reference rig between Characters and props depending on their filepath
:param rigList:
:return:
'''
propsList = []
charList = []
print('THIS IS RIG LIST')
for r in rigList: print
r
for rig in rigList:
rigPath = rig.split('_')[0] + '_RIGRN'
# print rig
# print rigPath
path = cmds.referenceQuery(rigPath, filename=True)
if 'characters' in str(path):
charList.append(rig)
elif 'props' in str(path):
propsList.append(rig)
outRigList = [charList, propsList]
return outRigList
### file handling Functions -----
def listFiles(dirPath):
'''
List all files from a directory and sort them alphabeticly
:param dirPath:
:return:
'''
print(dirPath)
onlyFiles = [str(f) for f in os.listdir(dirPath) if os.path.isfile(os.path.join(dirPath, f))]
onlyFiles = sorted(onlyFiles, key=str.lower)
print(onlyFiles)
return onlyFiles
def sortOnlyLastVersions(fileList, baseFile):
'''
take all the files from a list and identify the last one from a same nameRoot (basefile)
:param fileList:
:param baseFile:
:return:
'''
outLastVersions = []
otherBaseFiles = [baseFile]
# check if there is other root name than the baseFile
for f in fileList:
if f[0:-6] != baseFile:
otherBaseFiles.append(f[0:-6])
# remove duplicate root base name from list
otherBaseFiles = list(set(otherBaseFiles))
# check very root base name to see wich file has it
for baseFile in otherBaseFiles:
baseFileGrp = []
for f in fileList:
if baseFile in f:
# if same bas name, only append the version extension to baseFileGrp
baseFileGrp.append(f[-6:-3])
# if there is some version for baseFileGrp
if baseFileGrp != []:
# sort all values to get only the last and so bigger one
baseFileGrp.sort(key=int)
outLast = baseFileGrp[-1]
baseFileLastItem = baseFile + str(outLast) + '.ma'
outLastVersions.append(baseFileLastItem)
print("Input folder file list:")
print(fileList)
print("Output folder last version file list:")
print(outLastVersions)
return outLastVersions
### UI Functions ---------
def getRigList():
'''
get the complete rig list
:return:
'''
rigListOut = []
rigList = findRigByReference(cmds.ls(references=True))
for rig in rigList:
nameSpace = cmds.referenceQuery(rig, namespace=True).replace(':', '')
print(nameSpace)
rigO = rig.replace('RN', '')
rigListOut.append(nameSpace)
return rigListOut
def addRig(rig):
if rig in activeRigList:
activeRigList.remove(rig)
selectRigList()
cmds.button(rig, edit=True, bgc=[0.6, 0.6, 0.7])
else:
activeRigList.append(rig)
selectRigList()
cmds.button(rig, edit=True, bgc=[0.6, 0.9, 0.6])
print(activeRigList)
def AllRigListUI():
global rigList
global activeRigList
global activeCol
activeRigList = rigList
for rig in rigList:
cmds.button(str(rig) + 'BTN', edit=True, bgc=activeCol)
def NoneRigListUI():
global rigList
global activeRigList
global activeCol
activeRigList = []
for rig in rigList:
cmds.button(str(rig) + 'BTN', edit=True, bgc=inactiveCol)
def propsRigUI():
global rigList
global activeRigList
global activeCol
global rigListSplit
print(rigListSplit)
for rig in rigListSplit[1]:
activeRigList.append(rig)
cmds.button(str(rig) + 'BTN', edit=True, bgc=activeCol)
def charRigUI():
global rigList
global activeRigList
global activeCol
global rigListSplit
print(rigListSplit)
for rig in rigListSplit[0]:
activeRigList.append(rig)
cmds.button(str(rig) + 'BTN', edit=True, bgc=activeCol)
def addItem(item):
global inactiveCol
global activeCol
global activeRigList
bgc = cmds.button(item + 'BTN', query=True, bgc=True)
if bgc == activeCol:
cmds.button(item + 'BTN', edit=True, bgc=inactiveCol)
activeRigList.remove(item)
else:
cmds.button(item + 'BTN', edit=True, bgc=activeCol)
activeRigList.append(item)
print(activeRigList)
def exportAlembicUI():
global activeRigList
cmds.select(clear=True)
print('-------------------------------------')
print('Exporting Alembic for following rigs:')
print('-------------------------------------')
for rig in activeRigList:
rigExportList = cmds.ls(rig + ":*Geo*", long=True)
rigOutList = []
# exclude everything that is not inside the Geo_Grp
for rigO in rigExportList:
if 'Rig' in str(rigO):
print(str(rigO) + ' is not a part of the asset Geo_Grp')
elif cmds.listRelatives(rigO, shapes=True) == None and cmds.nodeType(rigO) != 'mesh':
print(str(rigO) + ' is a group')
else:
rigOutList.append(rigO)
cmds.select(rigOutList, replace=True)
filePath = 'A:/VEJFESTEN/sequences/' + sequence + '/' + sht + '/anim/alembic/' + sequence + '_' + sht + '_' + \
rig.split('_')[0] + '_v' + publishVersion + '.abc'
# generate object array for current rig
objectArray = "["
for o in rigOutList:
objectArray = objectArray + str(o) + ' '
objectArray = objectArray + ']'
print(objectArray)
# generate the command for the abc export in mel script language
command = 'AbcExport -j"-file ' + filePath + ' -frameRange ' + \
str(cmds.playbackOptions(query=True, animationStartTime=True)).split('.')[0] + ' ' + \
str(cmds.playbackOptions(query=True, animationEndTime=True)).split('.')[
0] + ' -uvWrite -saveMultipleFiles -selection -writeUVSet -dataFormat ogawa ";'
print(command)
mel.eval(command)
### --- Basic Variable Init : ------------------------------------------------------------------------
# basic file analyse to find sht and sequence
currentFilePath = str(cmds.file(query=True, sceneName=True))
currentFile = currentFilePath.split('/')[-1]
currentFilePath = currentFilePath.replace('/', '\\')
currentFileStripped = currentFile.split('_')
sequence = currentFileStripped[0]
sht = currentFileStripped[1]
rootPath = 'A:\VEJFESTEN\sequences'
# finding last version of the publish
oldPublishPath = rootPath + '\\' + sequence + '\\' + sht + '\\anim\\publish\\old\\'
oldPublishList = listFiles(oldPublishPath)
publishVersion = sortOnlyLastVersions(oldPublishList, sequence + '_' + sht + '_ANM_old_')[0][-6:-3]
print(publishVersion)
rigList = getRigList()
rigListSplit = splitCharAndProps(rigList)
activeRigList = []
rigBtnList = []
VejfestAlembicExport = []
activeCol = [0.6, 0.7500114442664225, 0.6]
inactiveCol = [0.6, 0.6, 0.6]
fileStructure = """sht: """ + sequence + """_""" + sht + """
Last Publish : """ + str(publishVersion) + """
Number of characters : """ + str(len(rigListSplit[1])) + """
Number of props : """ + str(len(rigListSplit[0])) + """
Animation Start/End: """ + str(cmds.playbackOptions(query=True, animationStartTime=True)) + """
Animation Length: """ + str(cmds.playbackOptions(query=True, animationEndTime=True))
# ---- UiStartingPoint---------------------------------------------------------------------------------
windowID = 'VejfestAlembicExport'
windowWTot = 600
windowW = 300
windowH = 300
buttonH = 20
colorBase = [0.2, 0.75, 0.4]
contrastVal = 0.1
contrastSatVal = -0.2
try:
if cmds.window(windowID, exists=True):
cmds.deleteUI(windowID)
except:
print('first iteration of VejfestAlembicExport')
VejfestAlembicExport = cmds.window(windowID, title=windowID, resizeToFitChildren=True, sizeable=True, w=windowWTot,
h=windowH)
cmds.columnLayout()
cmds.text(label='Vejfest Alembic Export', w=windowWTot, h=15, bgc=colorBase)
cmds.rowLayout(numberOfColumns=2)
cmds.columnLayout()
cmds.rowLayout(numberOfColumns=4)
menuButtonSize = windowW / 4 - 2
cmds.button("Char", h=buttonH, w=menuButtonSize, c='charRigUI()')
cmds.button("Props", h=buttonH, w=menuButtonSize, c='propsRigUI()')
cmds.button("All", h=buttonH, w=menuButtonSize, c='AllRigListUI()')
cmds.button("None", h=buttonH, w=menuButtonSize, c='NoneRigListUI()')
cmds.setParent('..')
cmds.text(label='', w=windowW, h=5)
scrollLayout = cmds.scrollLayout(borderVisible=True, h=200, w=windowW, verticalScrollBarAlwaysVisible=True)
print(rigListSplit[0])
for rig in rigListSplit[0]:
rigButton = cmds.button(str(rig) + 'BTN', label='CHAR - ' + str(rig), w=windowW, h=buttonH,
bgc=inactiveCol, c="addItem('" + str(rig) + "')")
rigBtnList.append(str(rig) + 'BTN')
cmds.separator('listSeparator')
for rig in rigListSplit[1]:
rigButton = cmds.button(str(rig) + 'BTN', label='PROP - ' + str(rig), w=windowW - 20, h=buttonH,
bgc=inactiveCol, c="addItem('" + str(rig) + "')")
rigBtnList.append(str(rig) + 'BTN')
cmds.setParent('..')
cmds.setParent('..')
cmds.columnLayout()
textW = windowWTot - windowW - 10
fileInfoTxt = cmds.text(align='left', label=fileStructure, w=textW)
cmds.text(label='', w=textW, h=35)
cmds.button("Export sht Alembic", h=buttonH, w=textW, c='exportAlembicUI()')
cmds.setParent('..')
cmds.showWindow(VejfestAlembicExport)
| null |
Maya/ProjectRelated/Vejfesten/Pipeline/shotAbcExporter/old/shotAbcExporter_v0.2.py
|
shotAbcExporter_v0.2.py
|
py
| 10,443 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "maya.cmds.referenceQuery",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "maya.cmds.ls",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "maya.cmds.referenceQuery",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "maya.cmds.ls",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "maya.cmds.referenceQuery",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "maya.cmds.button",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "maya.cmds.button",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "maya.cmds.button",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "maya.cmds.button",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "maya.cmds.button",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "maya.cmds.button",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "maya.cmds.button",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "maya.cmds.button",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "maya.cmds.button",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "maya.cmds.select",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "maya.cmds.ls",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "maya.cmds.listRelatives",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "maya.cmds.nodeType",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "maya.cmds.select",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "maya.cmds.playbackOptions",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "maya.cmds.playbackOptions",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "maya.cmds.file",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "maya.cmds.playbackOptions",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "maya.cmds.playbackOptions",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "maya.cmds.window",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 295,
"usage_type": "name"
},
{
"api_name": "maya.cmds.deleteUI",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "maya.cmds.window",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "maya.cmds.columnLayout",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "maya.cmds.text",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "maya.cmds.rowLayout",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 305,
"usage_type": "name"
},
{
"api_name": "maya.cmds.columnLayout",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "maya.cmds.rowLayout",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "maya.cmds.button",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "maya.cmds.button",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "maya.cmds.button",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 311,
"usage_type": "name"
},
{
"api_name": "maya.cmds.button",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 312,
"usage_type": "name"
},
{
"api_name": "maya.cmds.setParent",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 313,
"usage_type": "name"
},
{
"api_name": "maya.cmds.text",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 314,
"usage_type": "name"
},
{
"api_name": "maya.cmds.scrollLayout",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 316,
"usage_type": "name"
},
{
"api_name": "maya.cmds.button",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 319,
"usage_type": "name"
},
{
"api_name": "maya.cmds.separator",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 322,
"usage_type": "name"
},
{
"api_name": "maya.cmds.button",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 325,
"usage_type": "name"
},
{
"api_name": "maya.cmds.setParent",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 329,
"usage_type": "name"
},
{
"api_name": "maya.cmds.setParent",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 330,
"usage_type": "name"
},
{
"api_name": "maya.cmds.columnLayout",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "maya.cmds.text",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 334,
"usage_type": "name"
},
{
"api_name": "maya.cmds.text",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 335,
"usage_type": "name"
},
{
"api_name": "maya.cmds.button",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 336,
"usage_type": "name"
},
{
"api_name": "maya.cmds.setParent",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 337,
"usage_type": "name"
},
{
"api_name": "maya.cmds.showWindow",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "maya.cmds",
"line_number": 338,
"usage_type": "name"
}
] |
73947750
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 14:01:42 2019
@author: sebas
"""
# Imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import statistics
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score, roc_curve
# Path on external hard drive
path = 'E:/Sebastiaan/Biochemistry and biotechnology/Internship/'
# Path on local device
local_path = 'C:/Users/sebas/Desktop/School/Ma 2/Stage/Repositories/Machine-learning-model/'
# Path to input file
infile = local_path + 'code/cleaned_input_file.csv'
infile_2 = local_path + 'code/input_file_hardfiltered.csv'
infile_3 = local_path + 'belgian_cohort_input_matrix.csv'
infile_4 = local_path + 'topfeature_matrix.csv'
# Complete dataset
data = pd.read_csv(infile)
data.set_index('sample', inplace=True)
data = data.reindex(sorted(data.columns), axis=1)
# # Smaller dataset
# data_2 = pd.read_csv(infile_2)
# data_2.set_index('sample', inplace=True)
# Validation dataset (belgian cohort)
validation = pd.read_csv(infile_3)
validation.set_index('sample', inplace=True)
validation = validation.reindex(sorted(validation.columns), axis=1)
# Top feature dataset
top50 = pd.read_csv(infile_4)
top50.set_index('sample', inplace=True)
top50 = top50.reindex(sorted(top50.columns), axis=1)
# PGEN values
header = ['CDR3', 'Pgen']
pgen = pd.read_csv(local_path + 'code/pgen_cleaned.txt',
sep=':',
names=header)
def principal_component(n_comp=10, cutoff=140, df=data, screePlot=False):
'''
Parameters
----------
n_comp : TYPE = int
DESCRIPTION: number of principal components
cutoff: TYPE = int
DESCRIPTION: publicness cut-off
df : TYPE = pd.DataFrame, optional
DESCRIPTION: Input DataFrame. The default is 'data'.
Returns
-------
TYPE = np.array
DESCRIPTION: Explained variance of each component.
'''
df = df.loc[:, df.sum() >= cutoff]
# Separate out the features - samples:
X = df.loc[:, df.columns != 'status'].values
# Separate out the target:
y = df.loc[:, ['status']].values
# PCA
pca = PCA(n_components=n_comp)
scaler = pca.fit(X)
principalComponents = scaler.transform(X)
# DF columns
cols = []
for i in range(n_comp):
component = 'PC_' + str(i+1)
cols.append(component)
# Construct PCA dataframe
principalDF = pd.DataFrame(data=principalComponents,
columns=cols)
# Construct target dataframe
targetDF = pd.DataFrame(y,
columns=['status'])
# Concatenate target column to PC columns
finalDF = pd.concat([principalDF, targetDF], axis=1)
max_range = n_comp + 1
# Plot PCA results
for i in range(2,max_range,1):
fig = plt.figure(figsize=(8,8))
ax1 = fig.add_subplot(1,1,1)
ax1.set_xlabel('Principal Component 1', fontsize = 15)
ax1.set_ylabel('Principal Component ' + str(i), fontsize = 15)
ax1.set_title('Biplot of PC1 and PC' + str(i), fontsize = 20)
labels = [1, 0]
colors = ['g', 'b']
for label, color in zip(labels,colors):
indicesToKeep = finalDF['status'] == label
ax1.scatter(finalDF.loc[indicesToKeep, 'PC_2'],
finalDF.loc[indicesToKeep, 'PC_' + str(i)],
c = color,
s=50)
ax1.legend(labels)
ax1.grid()
# Variance explained by each component
var_explained = pca.explained_variance_ratio_
# Calculate eigenvalues from covariance matrix
eigenvalues = pca.explained_variance_
# Data for scree plot
x = range(n_comp)
[i+1 for i in x]
y = eigenvalues
if screePlot==True:
# Plot scree plot
fig2 = plt.figure(figsize=(8,8))
ax2 = fig2.add_subplot(1,1,1)
ax2.set_xlabel('n principal components', fontsize=15)
ax2.set_ylabel('eigenvalue', fontsize=15)
ax2.set_title('Scree plot', fontsize=20)
ax2.plot(x, y)
plt.show()
return var_explained, finalDF
def classifier(tcr_df=data, lower_limit=0, upper_limit=1, k=10, n_comp=40, ratio=0.1, custom_pgen=1e-09, showROC=True, legendROC=True, featuresPCA=False, validate=False, val_set=validation, optimization=False, alternative_features=False, top50_tcr_df=top50):
'''
Parameters
----------
df : TYPE = pd.DataFrame
DESCRIPTION = Input df.
lower_limut : TYPE = float
DESCRIPTION: minimal proportion of training samples in which a TCR must be present.
upper_limit : TYPE = float
DESCRIPTION: maximal proportion of training samples in which a TCR can be present.
k : TYPE = int
DESCRIPTION: number of cross-validation folds.
n_comp : TYPE = int
DESCRIPTION = number of principal components.
ratio : TYPE = float
DESCRIPTION = relative size of test set.
custom_pgen : TYPE = float
DESCRIPTION = custom value for generation probability of CDR3 region in TCR.
showROC: TYPE = Bool
DESCRIPTION = plot ROC curve associated with prediction.
legendROC : TYPE = Bool
DESCRIPTION = display legend containing information about model.
featuresPCA : TYPE = Bool
DESCRIPTION = use n principal components of data to train the model.
validate : TYPE = Bool
DESCRIPTION = validate model on independent data.
val_set : TYPE = pd.DataFrame
DESCRIPTION = data set used to validate the model.
optimization : TYPE = Bool
DESCRIPTION = optimization of hyperparameters
alternative_features : TYPE = Bool
DESCRIPTION = train model on top features.
top50_tcr_df : TYPE = pd.DataFrame
DESCRIPTION = df of top 50 TCRs.
Returns
-------
statistics.mean(performance) : TYPE = float
DESCRIPTION = average AUC value of k folds.
statistics.stdev(performance) : TYPE = float
DESCRIPTION = standard deviation of AUC value over k folds.
'''
if alternative_features==True:
tdf = top50_tcr_df.T
else:
tdf = tcr_df.T
# Prepare data
tdf.reset_index(inplace=True)
new = tdf['index'] = tdf['index'].str.split('_', expand=True)
tdf.drop(columns=['index'], inplace=True)
tdf['CDR3'] = new[0]
tdf['V_gene'] = new[1]
tdf['J_gene'] = new[2]
co = pgen[pgen['Pgen'] <= custom_pgen]
pgenfilter = co['CDR3'].tolist()
filtered = tdf[tdf['CDR3'].isin(pgenfilter)]
filtered['TCR'] = filtered[['CDR3', 'V_gene', 'J_gene']].apply(lambda x: '_'.join(x.astype(str)), axis=1)
filtered.drop(columns=['CDR3', 'V_gene', 'J_gene'], inplace=True)
filtered.set_index('TCR', inplace=True)
df = filtered.T
if alternative_features==True:
df['status'] = top50_tcr_df['status']
else:
df['status'] = tcr_df['status']
df = df.iloc[1:]
# Initialize lists to return model outputs
performance = []
fprs = []
tprs = []
if showROC==True:
plt.figure(figsize=(12,10))
else:
pass
if optimization==True:
repertoire_features = df.drop(columns=['status'])
repertoire_labels = df.loc[:, 'status']
# Specify hyperparameters to optimize
param_grid ={
'n_estimators':[800, 850, 900, 950, 1000]
}
randomized_search = RandomizedSearchCV(RandomForestClassifier(), param_grid, cv=5)
randomized_search.fit(repertoire_features, repertoire_labels)
best_parameters = randomized_search.best_params_
print(best_parameters)
feature_importances = randomized_search.best_estimator_.feature_importances_
attributes = repertoire_features.columns
best_features = sorted(zip(feature_importances, attributes), reverse=True)
else:
pass
if validate==True:
train_set = df
# Isolate labels training data
y_train = train_set.loc[:, ['status']].values
# Isolate training data
train_set.drop(columns=['status'], inplace=True)
# TCR publicness
low_cut = lower_limit*((1-ratio)*len(df.index))
high_cut = upper_limit*((1-ratio)*len(df.index))
train_set = train_set.loc[:, train_set.sum() >= int(low_cut)]
train_set = train_set.loc[:, train_set.sum() <= int(high_cut)]
# Transform into np.array
X_train = train_set.loc[:, train_set.columns].values
# Train model
rnd_clf = RandomForestClassifier(n_estimators=1000, max_leaf_nodes=16, criterion="entropy", bootstrap=True, n_jobs=-1)
rnd_clf.fit(X_train, y_train)
# Isolate labels validation data
y_test_validation = val_set.loc[:, ['status']].values
# Isolate validation data
val_set.drop(columns=['status'], inplace=True)
# Transform into np.array
X_validation = val_set.loc[:, val_set.columns].values
# Predict sample labels of validation data
y_pred_validation = rnd_clf.predict_proba(X_validation)
# Calculate AUC
auc_validation = roc_auc_score(y_test_validation, y_pred_validation[:, 1])
# ROC curve
fpr_val, tpr_val, thresholds_val = roc_curve(y_test_validation, y_pred_validation[:, 1], pos_label=1)
print(auc_validation)
else:
# Split data into training and test set
for i in range(k):
train_set, test_set = train_test_split(df, test_size=ratio)
# Separate sample labels
y_train = train_set.loc[:, ['status']].values
y_test = test_set.loc[:, ['status']].values
# Remove labels
train_set.drop(columns=['status'], inplace=True)
test_set.drop(columns=['status'], inplace=True)
# Select public TCRs ~cutoff
low_cut = lower_limit*((1-ratio)*len(df.index))
high_cut = upper_limit*((1-ratio)*len(df.index))
train_set = train_set.loc[:, train_set.sum() >= int(low_cut)]
train_set = train_set.loc[:, train_set.sum() <= int(high_cut)]
# Filter out non-public TCRs in test set
training_list = train_set.columns.tolist()
print('length of train set: ' + str(len(training_list))) # check TCR number
test_set = test_set.loc[:, test_set.columns.isin(training_list)]
test_list = test_set.columns.tolist()
print('length of test set: ' + str(len(test_list))) # check TCR number
# Features & labels training data:
X_train = train_set.loc[:, train_set.columns].values
# Features & labels test data:
X_test = test_set.loc[:, test_set.columns].values
if featuresPCA==True:
# Fit PCA model on training data
pca = PCA(n_components=n_comp)
scaler = pca.fit(X_train)
# Transform training and test data
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
else:
pass
# Train random forest classifier
rnd_clf = RandomForestClassifier(n_estimators=1000, max_leaf_nodes=16, criterion="entropy", bootstrap=True, n_jobs=-1)
rnd_clf.fit(X_train, y_train)
# Prediction on test data
y_pred = rnd_clf.predict_proba(X_test)
# Calculate AUC
auc = roc_auc_score(y_test, y_pred[:, 1])
performance.append(float(auc))
# ROC curve
fpr, tpr, thresholds = roc_curve(y_test, y_pred[:, 1], pos_label=1)
fprs.append(fpr)
tprs.append(tpr)
if showROC==True:
plt.plot(fpr, tpr)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False positive rate', fontsize=25)
plt.ylabel('True positive rate', fontsize=25)
plt.title('Receiver operating characteristic', fontsize=30)
plt.rc('xtick',labelsize=8)
plt.rc('ytick',labelsize=8)
else:
pass
if legendROC==True:
# param_1 = mpatches.Patch(color='white', label='lower limit = ' + str(lower_limit))
# param_2 = mpatches.Patch(color='white', label='upper limit = ' + str(upper_limit))
param_3 = mpatches.Patch(color='white', label='test set size = ' + str(ratio))
param_4 = mpatches.Patch(color='white', label='k = ' + str(k))
auc_label_1 = mpatches.Patch(color='white', label='avg AUC = ' + str("{0:.2f}".format(statistics.mean(performance))))
auc_label_2 = mpatches.Patch(color='white', label='std AUC = ' + str("{0:.2f}".format(statistics.stdev(performance))))
plt.legend(handles=[param_3, param_4, auc_label_1, auc_label_2])
else:
pass
if showROC==True:
plt.show()
else:
pass
if validate==True:
return fpr_val, tpr_val, auc_validation
else:
return statistics.mean(performance), statistics.stdev(performance)
def param_tuning(PGEN=False, LOWER=False, UPPER=False):
if PGEN==True:
val = []
average = []
stddeviation = []
for i in np.arange(16,8,-0.25):
val.append(1*10**(-float(i)))
avg, std = classifier(custom_pgen=1*10**(-float(i)), showROC=False)
average.append(avg)
stddeviation.append(std)
x = val
y = np.asarray(average)
error = np.asarray(stddeviation)
plt.figure(figsize=(18,16))
plt.plot(x, y)
plt.plot([1e-15,1e-08], [1, 1], color='navy', linestyle='--')
plt.fill_between(x, y-error, y+error, color='red')
plt.xlim([1e-16,5.6e-09])
plt.ylim([0.45, 1.05])
plt.xlabel('Pgen cutoff', fontsize=30)
plt.ylabel('AUC', fontsize=30)
plt.title('Influence of Pgen on prediction performance', fontsize=38)
plt.rc('xtick',labelsize=30)
plt.rc('ytick',labelsize=30)
plt.show()
else:
pass
if LOWER==True:
val = []
average = []
stddeviation = []
for i in np.arange(0,0.35,0.025):
val.append(i)
avg, std = classifier(lower_limit=i, tcr_df=data, showROC=False)
average.append(avg)
stddeviation.append(std)
x = val
y = np.asarray(average)
error = np.asarray(stddeviation)
plt.figure(figsize=(18,16))
plt.plot(x, y)
plt.plot([0, 1], [1, 1], color='navy', linestyle='--')
plt.fill_between(x, y-error, y+error, color='red')
plt.xlim([0, 0.4])
plt.ylim([0.40, 1.0])
plt.xlabel('Lower cutoff', fontsize=30)
plt.ylabel('AUC', fontsize=30)
plt.title('Influence of lower cutoff on prediction performance', fontsize=38)
plt.rc('xtick',labelsize=30)
plt.rc('ytick',labelsize=30)
plt.show()
else:
pass
if UPPER==True:
val = []
average = []
stddeviation = []
for i in np.arange(0.01,0.8,0.01):
val.append(i)
avg, std = classifier(upper_limit=i, tcr_df=data, showROC=False)
average.append(avg)
stddeviation.append(std)
x = val
y = np.asarray(average)
error = np.asarray(stddeviation)
plt.figure(figsize=(18,16))
plt.plot(x, y)
plt.plot([0, 1], [1, 1], color='navy', linestyle='--')
plt.fill_between(x, y-error, y+error, color='red')
plt.xlim([0.01, 1])
plt.ylim([0.35, 1.0])
plt.xlabel('Upper cutoff', fontsize=30)
plt.ylabel('AUC', fontsize=30)
plt.title('Influence of upper cutoff on prediction performance', fontsize=38)
plt.rc('xtick',labelsize=30)
plt.rc('ytick',labelsize=30)
plt.show()
else:
pass
return val, average, stddeviation
# a, b, c = param_tuning(UPPER=True)
FPR, TPR, val_AUC = classifier(k=2, ratio=0.1, featuresPCA=False, optimization=False,
validate=True, showROC=True, alternative_features=False)
plt.figure(figsize=(12,10))
plt.plot(FPR, TPR)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate', fontsize=25)
plt.ylabel('True Positive Rate', fontsize=25)
plt.title('Receiver operating characteristic', fontsize=30)
plt.rc('xtick',labelsize=8)
plt.rc('ytick',labelsize=8)
ROC_AUC = mpatches.Patch(color='white', label='AUC = ' + str("{0:.2f}".format(val_AUC)))
plt.legend(handles=[ROC_AUC])
plt.show()
| null |
code/CMV_classifier.py
|
CMV_classifier.py
|
py
| 17,810 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.RandomizedSearchCV",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_curve",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_curve",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 386,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 387,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 388,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 389,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 390,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 391,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 392,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 393,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 394,
"usage_type": "name"
},
{
"api_name": "matplotlib.patches.Patch",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches",
"line_number": 404,
"usage_type": "name"
},
{
"api_name": "matplotlib.patches.Patch",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches",
"line_number": 405,
"usage_type": "name"
},
{
"api_name": "matplotlib.patches.Patch",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches",
"line_number": 406,
"usage_type": "name"
},
{
"api_name": "statistics.mean",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches.Patch",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches",
"line_number": 407,
"usage_type": "name"
},
{
"api_name": "statistics.stdev",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 408,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 414,
"usage_type": "name"
},
{
"api_name": "statistics.mean",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "statistics.stdev",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 440,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 443,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 445,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 446,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.fill_between",
"line_number": 447,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 447,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 448,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 449,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 450,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 451,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 452,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 453,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 454,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 454,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 456,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 472,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 474,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 474,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 476,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 477,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.fill_between",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 478,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 479,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 480,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 481,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 481,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 482,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 482,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 483,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 484,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 485,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 487,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 495,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 503,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 505,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 507,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 507,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 508,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 508,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.fill_between",
"line_number": 509,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 509,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 510,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 510,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 511,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 511,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 512,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 512,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 513,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 514,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 515,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 516,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 516,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 518,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 518,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 530,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 531,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 531,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 532,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 532,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 533,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 533,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 534,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 534,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 535,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 535,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 536,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 536,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 537,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 537,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 538,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 539,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 539,
"usage_type": "name"
},
{
"api_name": "matplotlib.patches.Patch",
"line_number": 541,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches",
"line_number": 541,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 542,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 542,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 544,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 544,
"usage_type": "name"
}
] |
61399322
|
from cms.apps.admin.permission import site_permission_required, super_user_permission
from flask import request, render_template, redirect, url_for, jsonify, \
make_response
from babel.messages.catalog import Catalog
from babel.messages.pofile import write_po
from StringIO import StringIO
import datetime
from cms.models import site
from cms.ext import db
from cms.apps.admin import admin
from flask.ext.login import login_required
@admin.route('/sites', defaults={'page': 1}, methods=['GET'])
@admin.route('/sites/page/<int:page>', methods=['GET'])
@super_user_permission.require(403)
def get_all_sites(page):
q = site.Site.query
return render_template('sites/index.j2',
pagination=q.paginate(page, 20))
@admin.route('/sites/<int:id>/edit', methods=['GET'])
@admin.route('/sites/new', defaults={'id': -1}, methods=['GET'])
@super_user_permission.require(403)
def get_or_new_site(id):
entity = None
if id > 0:
entity = site.Site.query.get_or_404(id)
return render_template('sites/edit.j2', site=entity)
@admin.route('/sites/<int:id>', methods=['GET'])
@site_permission_required('id')
def site_dashboard(id):
e = site.Site.query.get_or_404(id)
return render_template('sites/index.j2',
site=e)
@admin.route('/sites/<int:id>/modules')
@site_permission_required('id')
def get_modules(id):
m = site.Site.query.get_or_404(id).modules
return jsonify(dict(data=[x.to_tree_dict() for x in m]))
@admin.route('/sites', defaults={'id': -1}, methods=['POST'])
@admin.route('/sites/<int:id>', methods=['POST'])
@super_user_permission.require(403)
def save_site(id):
entity = site.Site(
key=request.form['key'],
name=request.form['name'],
update_time=datetime.datetime.now()
)
if id > 0:
entity.id = id
db.session.merge(entity)
else:
db.session.add(entity)
db.session.commit()
return redirect(url_for('.get_or_new_site', id=entity.id))
@admin.route('/sites/<int:id>', methods=['GET'])
@super_user_permission.require(403)
def delete_site(id):
entity = site.Site.query.get_or_404(id)
db.session.delete(entity)
db.session.commit()
return redirect(url_for('.get_all_sites'))
def is_i18n(f):
return 'i18n' in f.field_config_dict\
and f.field_config_dict['i18n'] is True
@admin.route('/sites/<int:id>/po', methods=['GET'])
@site_permission_required("id")
def export_po(id):
site_entity = site.Site.query.get_or_404(id)
modules = site_entity.modules.all()
catalog = Catalog(project=site_entity.name,
version='1.0',
msgid_bugs_address='[email protected]',
charset='utf8')
for m in modules:
i18n_fields = filter(is_i18n, m.fields.all())
for data_item in m.items.all():
for f in i18n_fields:
catalog.add(data_item.value_dict[f.key],
None,
[('%s:%s' % (m.name, f.name), data_item.id)])
out = StringIO('')
write_po(out, catalog)
out.seek(0)
resp = make_response(out.read())
resp.headers['Content-Type'] = 'text/x-gettext-translation'
return resp
| null |
cms/apps/admin/views/sites.py
|
sites.py
|
py
| 3,247 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cms.models.site.Site",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "cms.models.site",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.admin.route",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.admin",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "cms.apps.admin.admin.route",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.admin",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "cms.apps.admin.permission.super_user_permission.require",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.permission.super_user_permission",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "cms.models.site.Site.query.get_or_404",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cms.models.site.Site",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "cms.models.site",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.admin.route",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.admin",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "cms.apps.admin.admin.route",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.admin",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "cms.apps.admin.permission.super_user_permission.require",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.permission.super_user_permission",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "cms.models.site.Site.query.get_or_404",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cms.models.site.Site",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "cms.models.site",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.admin.route",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.admin",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "cms.apps.admin.permission.site_permission_required",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cms.models.site.Site.query.get_or_404",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cms.models.site.Site",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "cms.models.site",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.admin.route",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.admin",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "cms.apps.admin.permission.site_permission_required",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "cms.models.site.Site",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "cms.models.site",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "cms.ext.db.session.merge",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "cms.ext.db.session",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "cms.ext.db",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "cms.ext.db.session.add",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "cms.ext.db.session",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "cms.ext.db",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "cms.ext.db.session.commit",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "cms.ext.db.session",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "cms.ext.db",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.admin.route",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.admin",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "cms.apps.admin.admin.route",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.admin",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "cms.apps.admin.permission.super_user_permission.require",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.permission.super_user_permission",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "cms.models.site.Site.query.get_or_404",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "cms.models.site.Site",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "cms.models.site",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "cms.ext.db.session.delete",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "cms.ext.db.session",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "cms.ext.db",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "cms.ext.db.session.commit",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "cms.ext.db.session",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "cms.ext.db",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.admin.route",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.admin",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "cms.apps.admin.permission.super_user_permission.require",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.permission.super_user_permission",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "cms.models.site.Site.query.get_or_404",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "cms.models.site.Site",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "cms.models.site",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "babel.messages.catalog.Catalog",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "StringIO.StringIO",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "babel.messages.pofile.write_po",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.admin.route",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "cms.apps.admin.admin",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "cms.apps.admin.permission.site_permission_required",
"line_number": 82,
"usage_type": "call"
}
] |
51905365
|
from django.urls import reverse
from django.conf import settings
def context(request):
""" This function adds some app-specific values to the django template context """
claims = request.identity_context_data._id_token_claims
exclude_claims = ['iat', 'exp', 'nbf', 'uti', 'aio', 'rh']
claims_to_display = {claim: value for claim, value in claims.items() if claim not in exclude_claims}
client_id=settings.AAD_CONFIG.client.client_id
aad_link="https://portal.azure.com/#blade/Microsoft_AAD_RegisteredApps/ApplicationMenuBlade/Authentication/appId/" + client_id +"/isMSAApp/"
return dict(claims_to_display=claims_to_display,
redirect_uri_external_link = request.build_absolute_uri(reverse(settings.AAD_CONFIG.django.auth_endpoints.redirect)),
aad_link=aad_link)
| null |
1-Authentication/sign-in-b2c/Sample/context_processors.py
|
context_processors.py
|
py
| 827 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.conf.settings.AAD_CONFIG",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.AAD_CONFIG",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 14,
"usage_type": "name"
}
] |
463164967
|
from neo4j import GraphDatabase, basic_auth
def neoArticlesSentencesFetch(chosenLang):
driver = GraphDatabase.driver("bolt://semantiqa.com:7687", auth=("neo4j", "cazzhack"))
def get_sentences(article, language):
with driver.session() as session:
results = session.run(
"""MATCH (a:Article {{article: "{}" }}) <-[t:HAS_LANGUAGE_{}]-(s:Sentence) RETURN a,s,t""".format(
article, language))
nodes = []
for count, record in enumerate(results):
nodes.append(
{"rightAnswer": record["a"]["article"], "sentence": record['s']['text'], "id": count})
return nodes
final_sentences = []
final_sentences.append(get_sentences(article='A', language=chosenLang))
final_sentences.append(get_sentences(article='THE', language=chosenLang))
final_sentences.append(get_sentences(article='(-)', language=chosenLang))
final_sentences.append(get_sentences(article='AN', language=chosenLang))
flat_sentences_list = [item for sublist in final_sentences for item in sublist]
from random import shuffle
# randomizing in place
shuffle(flat_sentences_list)
return flat_sentences_list
| null |
views_utils/neoArticlesSentencesFetch.py
|
neoArticlesSentencesFetch.py
|
py
| 1,271 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "neo4j.GraphDatabase.driver",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "neo4j.GraphDatabase",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "random.shuffle",
"line_number": 31,
"usage_type": "call"
}
] |
455581979
|
import os
import os.path as osp
import json
import torch
import pandas
import numpy as npD
from dateutil.parser import parse, parserinfo
from torch_sparse import coalesce
from torch_geometric.data import (Data, InMemoryDataset, download_url,
extract_gz, extract_tar)
from torch_geometric.data.makedirs import makedirs
from torch_geometric.utils import to_undirected
def read_ca_web_amazon_p2p_roadnet(files, name):
edge_index = pandas.read_csv(files[0], sep='\t', header=None,
skiprows=4)
edge_index = torch.from_numpy(edge_index.to_numpy()).t()
idx_assoc = {}
for i, j in enumerate(torch.unique(edge_index).tolist()):
idx_assoc[j] = i
edge_index = edge_index.flatten()
for i, e in enumerate(edge_index.tolist()):
edge_index[i] = idx_assoc[e]
edge_index = edge_index.view(2, -1)
num_nodes = edge_index.max() + 1
if 'ca-' in name:
edge_index = to_undirected(edge_index, num_nodes)
return [Data(edge_index=edge_index, num_nodes=num_nodes)]
def read_cit(files, name):
if name == 'patents':
edge_file = files[0]
x = None
elif name == 'hepph' or name == 'hepth':
edge_file = files[1]
d = pandas.read_csv(files[0], sep='\t', header=None, skiprows=1)
idx, x = d[[0]], d[[1]]
# parse date and calculate difference in days to oldest date
x = x.to_numpy().flatten().tolist()
for i in range(len(x)):
x[i] = parse(x[i], parserinfo(yearfirst=True))
oldest_date = min(x)
for i in range(len(x)):
x[i] = (x[i] - oldest_date).days
x = torch.tensor(x)
idx = torch.from_numpy(idx.to_numpy()).flatten()
idx_assoc = {}
for i, j in enumerate(idx.tolist()):
idx_assoc[j] = i
edge_index = pandas.read_csv(edge_file, sep='\t', header=None,
skiprows=4)
edge_index = torch.from_numpy(edge_index.to_numpy()).t()
if name == 'patents':
idx_assoc = {}
for i, j in enumerate(torch.unique(edge_index).tolist()):
idx_assoc[j] = i
edge_index = edge_index.flatten()
for i, e in enumerate(edge_index.tolist()):
try:
edge_index[i] = idx_assoc[e]
# handle nodes, which don't have features
except KeyError:
max_assoc = max(list(idx_assoc.values()))
idx_assoc[e] = max_assoc + 1
edge_index[i] = idx_assoc[e]
x = torch.cat((x, torch.tensor([-1])))
edge_index = edge_index.view(2, -1)
num_nodes = edge_index.max() + 1
return [Data(x=x, edge_index=edge_index, num_nodes=num_nodes)]
def read_email(files, name):
if name == 'eu-core':
y = pandas.read_csv(files[0], sep=' ', header=None)
y = torch.from_numpy(y.to_numpy())
y = y[:, 1].to(torch.long)
edge_index = pandas.read_csv(files[1], sep=' ', header=None)
edge_index = torch.from_numpy(edge_index.to_numpy()).t()
assert torch.eq(torch.unique(edge_index.flatten()),
torch.arange(0, edge_index.max() + 1)).all()
num_nodes = edge_index.max() + 1
return [Data(edge_index=edge_index, num_nodes=num_nodes, y=y)]
elif name == 'enron' or name == 'euall':
edge_index = pandas.read_csv(files[0], sep='\t', header=None,
skiprows=4)
edge_index = torch.from_numpy(edge_index.to_numpy()).t()
assert torch.eq(torch.unique(edge_index.flatten()),
torch.arange(0, edge_index.max() + 1)).all()
num_nodes = edge_index.max() + 1
if name == 'enron':
edge_index = to_undirected(edge_index, num_nodes)
return [Data(edge_index=edge_index, num_nodes=num_nodes)]
def read_com(files, name):
for file in files:
if '.ungraph' in file:
edge_index = pandas.read_csv(file, sep='\t', header=None,
skiprows=4)
edge_index = torch.from_numpy(edge_index.to_numpy()).t()
# there are multiple duplicated edges
idx_assoc = {}
for i, j in enumerate(torch.unique(edge_index).tolist()):
idx_assoc[j] = i
edge_index = edge_index.flatten()
for i, e in enumerate(edge_index.tolist()):
edge_index[i] = idx_assoc[e]
edge_index = edge_index.view(2, -1)
num_nodes = edge_index.max() + 1
edge_index = to_undirected(edge_index, num_nodes)
for file in files:
if '.all' in file:
communities = []
communities_batch = []
with open(file, 'r') as f:
for i, com in enumerate(f.read().split('\n')[:-1]):
com = [idx_assoc[int(c)] for c in com.split()]
communities += com
communities_batch += [i] * len(com)
communities = torch.tensor(communities)
communities_batch = torch.tensor(communities_batch)
data = Data(edge_index=edge_index, num_nodes=num_nodes,
communities=communities, communities_batch=communities_batch)
return [data]
class EgoData(Data):
def __inc__(self, key, item):
if key == 'circle':
return self.num_nodes
elif key == 'circle_batch':
return item.max().item() + 1 if item.numel() > 0 else 0
else:
return super(EgoData, self).__inc__(key, item)
def read_ego(files, name):
all_featnames = []
for i in range(4, len(files), 5):
featnames_file = files[i]
with open(featnames_file, 'r') as f:
featnames = f.read().split('\n')[:-1]
featnames = [' '.join(x.split(' ')[1:]) for x in featnames]
all_featnames += featnames
all_featnames = sorted(list(set(all_featnames)))
all_featnames = {key: i for i, key in enumerate(all_featnames)}
data_list = []
for i in range(0, len(files), 5):
circles_file = files[i]
edges_file = files[i + 1]
egofeat_file = files[i + 2]
feat_file = files[i + 3]
featnames_file = files[i + 4]
x = pandas.read_csv(feat_file, sep=' ', header=None, dtype=np.float32)
x = torch.from_numpy(x.values)
idx, x = x[:, 0].to(torch.long), x[:, 1:].to(torch.float)
idx_assoc = {}
for i, j in enumerate(idx.tolist()):
idx_assoc[j] = i
circles = []
circles_batch = []
with open(circles_file, 'r') as f:
for i, circle in enumerate(f.read().split('\n')[:-1]):
circle = [int(idx_assoc[int(c)]) for c in circle.split()[1:]]
circles += circle
circles_batch += [i] * len(circle)
circle = torch.tensor(circles)
circle_batch = torch.tensor(circles_batch)
edge_index = pandas.read_csv(edges_file, sep=' ', header=None,
dtype=np.int64)
edge_index = torch.from_numpy(edge_index.values).t()
edge_index = edge_index.flatten()
for i, e in enumerate(edge_index.tolist()):
edge_index[i] = idx_assoc[e]
edge_index = edge_index.view(2, -1)
if name == 'facebook':
# undirected edges
edge_index = to_undirected(edge_index, x.size(0))
row, col = edge_index
x_ego = pandas.read_csv(egofeat_file, sep=' ', header=None,
dtype=np.float32)
x_ego = torch.from_numpy(x_ego.values)
row_ego = torch.full((x.size(0), ), x.size(0), dtype=torch.long)
col_ego = torch.arange(x.size(0))
# Ego node should be connected to every other node.
row = torch.cat([row, row_ego, col_ego], dim=0)
col = torch.cat([col, col_ego, row_ego], dim=0)
edge_index = torch.stack([row, col], dim=0)
x = torch.cat([x, x_ego], dim=0)
# Reorder `x` according to `featnames` ordering.
x_all = torch.zeros(x.size(0), len(all_featnames))
with open(featnames_file, 'r') as f:
featnames = f.read().split('\n')[:-1]
featnames = [' '.join(x.split(' ')[1:]) for x in featnames]
indices = [all_featnames[featname] for featname in featnames]
x_all[:, torch.tensor(indices)] = x
edge_index, _ = coalesce(edge_index, None, x.size(0), x.size(0))
data = Data(x=x_all, edge_index=edge_index, circle=circle,
circle_batch=circle_batch)
data_list.append(data)
return data_list
def read_soc(files, name):
if 'sign-bitcoin-' in name:
d = pandas.read_csv(files[0], header=None, skiprows=0)
edge_index = torch.from_numpy(d[[0, 1]].to_numpy()).t()
edge_attr = torch.from_numpy(d[[2, 3]].to_numpy())
idx = torch.unique(edge_index.flatten())
idx_assoc = torch.full((edge_index.max() + 1, ), -1, dtype=torch.long)
idx_assoc[idx] = torch.arange(idx.size(0))
edge_index = idx_assoc[edge_index]
num_nodes = edge_index.max().item() + 1
return [Data(edge_index=edge_index,
edge_attr=edge_attr,
num_nodes=num_nodes)]
else:
skiprows = 4
if name == 'pokec':
skiprows = 0
edge_index = pandas.read_csv(files[0], sep='\t', header=None,
skiprows=skiprows, dtype=np.int64)
edge_index = torch.from_numpy(edge_index.values).t()
num_nodes = edge_index.max().item() + 1
edge_index, _ = coalesce(edge_index, None, num_nodes, num_nodes)
return [Data(edge_index=edge_index, num_nodes=num_nodes)]
def read_wiki(files, name):
if name == 'vote' or name == 'talk':
edge_file = files[0]
skiprows = 4
sep = '\t'
elif name == 'topcats':
edge_file = files[1]
skiprows = 0
sep = ' '
edge_index = pandas.read_csv(edge_file, sep=sep, header=None,
skiprows=skiprows, dtype=np.int64)
edge_index = torch.from_numpy(edge_index.values).t()
idx = torch.unique(edge_index.flatten())
idx_assoc = torch.full((edge_index.max() + 1, ), -1, dtype=torch.long)
idx_assoc[idx] = torch.arange(idx.size(0))
edge_index = idx_assoc[edge_index]
num_nodes = edge_index.max().item() + 1
edge_index, _ = coalesce(edge_index, None, num_nodes, num_nodes)
if name == 'topcats':
cat_file = files[0]
with open(cat_file, 'r') as f:
lines = f.readlines()
categories = []
categories_batch = []
for i, line in enumerate(lines):
category = [int(idx_assoc[int(c)]) for c in line.split()[1:]]
categories += category
categories_batch += [i] * len(category)
categories = torch.tensor(categories)
categories_batch = torch.tensor(categories_batch)
return [Data(edge_index=edge_index, num_nodes=num_nodes,
categories=categories, categories_batch=categories_batch)]
else:
return [Data(edge_index=edge_index, num_nodes=num_nodes)]
def read_gemsec(files, name):
data_list = []
for file in files:
if 'edges' in file:
edge_index = pandas.read_csv(file, header=None, skiprows=1)
edge_index = torch.from_numpy(edge_index.to_numpy()).t()
assert torch.eq(torch.unique(edge_index.flatten()),
torch.arange(0, edge_index.max() + 1)).all()
num_nodes = edge_index.max().item() + 1
# undirected edges
edge_index = to_undirected(edge_index, num_nodes)
edge_index, _ = coalesce(edge_index, None, num_nodes, num_nodes)
data = Data(edge_index=edge_index, num_nodes=num_nodes)
data_list.append(data)
return data_list
def read_musae(paths, name):
if name == 'twitch':
data_list = []
for path in paths:
if osp.isdir(path):
_paths = [osp.join(path, file) for file in os.listdir(path)]
data = read_musae_helper(_paths, name)
data_list.append(data)
return data_list
else:
data = read_musae_helper(paths, name)
return [data]
def read_musae_helper(paths, name):
x, edge_index, y, num_nodes = None, None, None, None
for file in paths:
if 'target' in file:
y = pandas.read_csv(osp.join(path, file),
header=None, skiprows=1)
# drop columns with string attribute
if name == 'github':
y = y.drop([1], axis=1)
if name == 'facebook':
y = y.drop([2, 3], axis=1)
y = torch.from_numpy(y.to_numpy(dtype=np.int))
elif 'edges' in file:
edge_index = pandas.read_csv(osp.join(path, file),
header=None, skiprows=1)
edge_index = torch.from_numpy(edge_index.to_numpy()).t()
assert torch.eq(torch.unique(edge_index.flatten()),
torch.arange(0, edge_index.max() + 1)).all()
num_nodes = edge_index.max() + 1
# undirected edges
edge_index = to_undirected(edge_index, num_nodes)
elif file.endswith('.json'):
with open(osp.join(path, file)) as f:
dict = json.load(f)
if name == 'twitch':
num_features = 3170
x = torch.zeros(len(dict), num_features)
for i in range(len(dict)):
for f in dict[str(i)]:
x[i][f] = 1
else:
features = np.unique(np.asarray(
[i for _list in list(dict.values())
for i in _list]))
f_assoc = {}
for i, j in enumerate(features):
f_assoc[j] = i
num_features = i+1
x = torch.zeros(len(dict), num_features)
for i in range(len(dict)):
for f in dict[str(i)]:
x[i][f_assoc[f]] = 1
return Data(x=x, edge_index=edge_index, y=y, num_nodes=num_nodes)
class SNAPDataset(InMemoryDataset):
r"""A variety of graph datasets collected from `SNAP at Stanford University
<https://snap.stanford.edu/data>`_.
Args:
root (string): Root directory where the dataset should be saved.
name (string): The name of the dataset.
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
pre_filter (callable, optional): A function that takes in an
:obj:`torch_geometric.data.Data` object and returns a boolean
value, indicating whether the data object should be included in the
final dataset. (default: :obj:`None`)
"""
url = 'https://snap.stanford.edu/data'
available_datasets = {
'ego-facebook': ['facebook.tar.gz'],
'ego-gplus': ['gplus.tar.gz'],
'ego-twitter': ['twitter.tar.gz'],
'soc-epinions1': ['soc-Epinions1.txt.gz'],
'soc-livejournal1': ['soc-LiveJournal1.txt.gz'],
'soc-pokec': ['soc-pokec-relationships.txt.gz'],
'soc-slashdot0811': ['soc-Slashdot0811.txt.gz'],
'soc-slashdot0922': ['soc-Slashdot0902.txt.gz'],
'soc-sign-bitcoin-otc': ['soc-sign-bitcoinotc.csv.gz'],
'soc-sign-bitcoin-alpha': ['soc-sign-bitcoinalpha.csv.gz'],
'wiki-vote': ['wiki-Vote.txt.gz'],
'wiki-topcats': ['wiki-topcats.txt.gz',
'wiki-topcats-categories.txt.gz'],
'wiki-talk': ['wiki-Talk.txt.gz'],
'gemsec-deezer': ['gemsec_deezer_dataset.tar.gz'],
'gemsec-facebook': ['gemsec_facebook_dataset.tar.gz'],
'musae-twitch': ['twitch.zip'],
'musae-facebook': ['facebook_large.zip'],
'musae-github': ['git_web_ml.zip'],
'com-livejournal': ['com-lj.ungraph.txt.gz',
'com-lj.all.cmty.txt.gz'],
'com-friendster': ['com-friendster.ungraph.txt.gz',
'com-friendster.all.cmty.txt.gz'],
'com-orkut': ['com-orkut.ungraph.txt.gz',
'com-orkut.all.cmty.txt.gz'],
'com-youtube': ['com-youtube.ungraph.txt.gz',
'com-youtube.all.cmty.txt.gz'],
'com-dblp': ['com-dblp.ungraph.txt.gz',
'com-dblp.all.cmty.txt.gz'],
'com-amazon': ['com-amazon.ungraph.txt.gz',
'com-amazon.all.cmty.txt.gz'],
'email-eu-core': ['email-Eu-core.txt.gz',
'email-Eu-core-department-labels.txt.gz'],
'email-euall': ['email-EuAll.txt.gz'],
'email-enron': ['email-Enron.txt.gz'],
'cit-hepph': ['cit-HepPh.txt.gz',
'cit-HepPh-dates.txt.gz'],
'cit-hepth': ['cit-HepTh.txt.gz',
'cit-HepTh-dates.txt.gz'],
'cit-patents': ['cit-Patents.txt.gz'],
'ca-astroph': ['ca-AstroPh.txt.gz'],
'ca-condmat': ['ca-CondMat.txt.gz'],
'ca-grqc': ['ca-GrQc.txt.gz'],
'ca-hepph': ['ca-HepPh.txt.gz'],
'ca-hepth': ['ca-HepTh.txt.gz'],
'web-berkstan': ['web-BerkStan.txt.gz'],
'web-google': ['web-Google.txt.gz'],
'web-notredame': ['web-NotreDame.txt.gz'],
'web-stanford': ['web-Stanford.txt.gz'],
'amazon-0302': ['amazon0302.txt.gz'],
'amazon-0312': ['amazon0312.txt.gz'],
'amazon-0505': ['amazon0505.txt.gz'],
'amazon-0601': ['amazon0601.txt.gz'],
'p2p-gnutella04': ['p2p-Gnutella04.txt.gz'],
'p2p-gnutella05': ['p2p-Gnutella05.txt.gz'],
'p2p-gnutella06': ['p2p-Gnutella06.txt.gz'],
'p2p-gnutella08': ['p2p-Gnutella08.txt.gz'],
'p2p-gnutella09': ['p2p-Gnutella09.txt.gz'],
'p2p-gnutella24': ['p2p-Gnutella24.txt.gz'],
'p2p-gnutella25': ['p2p-Gnutella25.txt.gz'],
'p2p-gnutella30': ['p2p-Gnutella30.txt.gz'],
'p2p-gnutella31': ['p2p-Gnutella31.txt.gz'],
'roadnet-ca': ['roadNet-CA.txt.gz'],
'roadnet-pa': ['roadNet-PA.txt.gz'],
'roadnet-tx': ['roadNet-TX.txt.gz'],
}
big_datasets = ['com-livejournal',
'com-friendster',
'com-orkut',
'com-youtube',
'com-dblp',
'com-amazon']
def __init__(self, root, name, transform=None, pre_transform=None,
pre_filter=None):
self.name = name.lower()
assert self.name in self.available_datasets.keys()
if self.name in self.big_datasets:
self.url = 'https://snap.stanford.edu/data/bigdata/communities/'
super(SNAPDataset, self).__init__(root, transform, pre_transform,
pre_filter)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_dir(self):
return osp.join(self.root, self.name, 'raw')
@property
def processed_dir(self):
return osp.join(self.root, self.name, 'processed')
@property
def processed_file_names(self):
return 'data.pt'
def _download(self):
if osp.isdir(self.raw_dir) and len(os.listdir(self.raw_dir)) > 0:
return
makedirs(self.raw_dir)
self.download()
def download(self):
for name in self.available_datasets[self.name]:
path = download_url('{}/{}'.format(self.url, name), self.raw_dir)
print(path)
if name.endswith('.tar.gz'):
extract_tar(path, self.raw_dir)
elif name.endswith('.gz'):
extract_gz(path, self.raw_dir)
elif name.endswith('.zip'):
extract_zip(path, self.raw_dir)
os.unlink(path)
def process(self):
raw_dir = self.raw_dir
filenames = os.listdir(self.raw_dir)
if len(filenames) == 1 and osp.isdir(osp.join(raw_dir, filenames[0])):
raw_dir = osp.join(raw_dir, filenames[0])
raw_files = sorted([osp.join(raw_dir, f) for f in os.listdir(raw_dir)])
print('Raw Files:', raw_files, '\n')
if self.name[:4] == 'ego-':
data_list = read_ego(raw_files, self.name[4:])
elif self.name[:4] == 'soc-':
data_list = read_soc(raw_files, self.name[4:])
elif self.name[:5] == 'wiki-':
data_list = read_wiki(raw_files, self.name[5:])
elif self.name[:7] == 'gemsec-':
data_list = read_gemsec(raw_files, self.name[7:])
elif self.name[:6] == 'musae-':
data_list = read_musae(raw_files, self.name[6:])
elif self.name[:4] == 'com-':
data_list = read_com(raw_files, self.name[4:])
elif self.name[:6] == 'email-':
data_list = read_email(raw_files, self.name[6:])
elif self.name[:4] == 'cit-':
data_list = read_cit(raw_files, self.name[4:])
elif self.name[:3] == 'ca-':
data_list = read_ca_web_amazon_p2p_roadnet(raw_files, self.name)
elif self.name[:4] == 'web-':
data_list = read_ca_web_amazon_p2p_roadnet(raw_files, self.name)
elif self.name[:7] == 'amazon-':
data_list = read_ca_web_amazon_p2p_roadnet(raw_files, self.name)
elif self.name[:4] == 'p2p-':
data_list = read_ca_web_amazon_p2p_roadnet(raw_files, self.name)
elif self.name[:8] == 'roadnet-':
data_list = read_ca_web_amazon_p2p_roadnet(raw_files, self.name)
else:
raise NotImplementedError
if len(data_list) > 1 and self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
torch.save(self.collate(data_list), self.processed_paths[0])
def __repr__(self):
return 'SNAP-{}({})'.format(self.name, len(self))
if __name__ == '__main__':
dataset_name = 'roadNet-TX'
path = osp.join(osp.dirname(osp.realpath(__file__)),
'..', '..', 'data', dataset_name)
dataset = SNAPDataset(path, dataset_name)
data = dataset[0]
print(data)
| null |
torch_geometric/datasets/snap_dataset.py
|
snap_dataset.py
|
py
| 22,939 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.unique",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch_geometric.utils.to_undirected",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.Data",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "dateutil.parser.parse",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "dateutil.parser.parserinfo",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.unique",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.Data",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "torch.eq",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "torch.unique",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "torch.arange",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.Data",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "torch.eq",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "torch.unique",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "torch.arange",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torch_geometric.utils.to_undirected",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.Data",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "torch.unique",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "torch_geometric.utils.to_undirected",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.Data",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.Data",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "torch.float",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "torch_geometric.utils.to_undirected",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "torch.full",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 217,
"usage_type": "attribute"
},
{
"api_name": "torch.arange",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "torch_sparse.coalesce",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.Data",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "torch.unique",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "torch.full",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "torch.arange",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.Data",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "torch_sparse.coalesce",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.Data",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "torch.unique",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "torch.full",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 289,
"usage_type": "attribute"
},
{
"api_name": "torch.arange",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "torch_sparse.coalesce",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.Data",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.Data",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "torch.eq",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "torch.unique",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "torch.arange",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "torch_geometric.utils.to_undirected",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "torch_sparse.coalesce",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.Data",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 343,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 344,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 358,
"usage_type": "name"
},
{
"api_name": "torch.from_numpy",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 370,
"usage_type": "name"
},
{
"api_name": "torch.from_numpy",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "torch.eq",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "torch.unique",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "torch.arange",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "torch_geometric.utils.to_undirected",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 381,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 399,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.Data",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.InMemoryDataset",
"line_number": 407,
"usage_type": "name"
},
{
"api_name": "torch.load",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 519,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 519,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 523,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 523,
"usage_type": "name"
},
{
"api_name": "os.path.isdir",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 530,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.makedirs.makedirs",
"line_number": 533,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.download_url",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.extract_tar",
"line_number": 541,
"usage_type": "call"
},
{
"api_name": "torch_geometric.data.extract_gz",
"line_number": 543,
"usage_type": "call"
},
{
"api_name": "os.unlink",
"line_number": 546,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 550,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 551,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 551,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 551,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 552,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 552,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 554,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 554,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 554,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 592,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 600,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 600,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 600,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 600,
"usage_type": "call"
}
] |
348365806
|
from typing import Tuple
import pygame
import sys
import os
import math
from inventory import Inventory
"""
HactuallyBenji
https://opensource.com/article/17/12/game-python-moving-player
"""
class Player(pygame.sprite.Sprite):
def __init__(self, bounds):
pygame.sprite.Sprite.__init__(self)
self.speed = 1
self.movex = 0
self.movey = 0
self.frame = 0
self.images = []
self.animation = 4
self.sprite_scale = (150, 175)
self.bounds = (bounds[0] - self.sprite_scale[0], bounds[1] - self.sprite_scale[1])
#found this if we need more than one player
for i in range(1, 5):
img = pygame.image.load('Assets/player_right.png')
#img.convert_alpha() # optimise alpha
#img.set_colorkey(ALPHA) # set alpha
img = pygame.transform.scale(img, self.sprite_scale)
self.images.append(img)
self.image = self.images[0]
self.rect = self.image.get_rect()
# Player movement control
def control(self, x, y):
self.movex += x
self.movey += y
# Update player position and direction
def update(self):
self.rect.x = min(max(self.rect.x + self.movex*self.speed, 110), self.bounds[0]-110)
self.rect.y = min(max(self.rect.y + self.movey*self.speed, 110), self.bounds[1]-110)
# moving left
if self.movex < 0:
self.frame += 1
if self.frame > 3*self.animation:
self.frame = 0
self.image = pygame.transform.flip(self.images[self.frame // self.animation], True, False)
# moving right
if self.movex > 0:
self.frame += 1
if self.frame > 3*self.animation:
self.frame = 0
self.image = self.images[self.frame//self.animation]
"""
# Check to see if the player is in range of chest.
# Returns boolean value for whether or not inventory should be accessible
"""
def is_chest_in_player_range(self, inventory: Inventory, desired_range: int):
if not inventory.chest_button:
return False
return ((self.rect.centerx - inventory.chest_button.rect.centerx)**2 + (self.rect.centery - inventory.chest_button.rect.centery)**2) < desired_range**2
"""
Returns distance from player to specified chest
"""
def distance_from_chest(self, chest: Inventory):
if chest.chest_button:
return math.sqrt((self.rect.centerx - chest.chest_button.rect.centerx)**2 + (self.rect.centery - chest.chest_button.rect.centery)**2)
"""
Finds the nearest chest from a list of chests passed in
"""
def get_nearest_chest(self, chests: list):
closest = self.distance_from_chest(chests[0])
closest_chest = chests[0]
for chest in chests:
if self.distance_from_chest(chest) < closest:
closest = self.distance_from_chest(chest)
closest_chest = chest
return closest_chest
| null |
player.py
|
player.py
|
py
| 3,053 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygame.sprite",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.flip",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "inventory.Inventory",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "inventory.chest_button",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "inventory.chest_button",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "inventory.Inventory",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "math.sqrt",
"line_number": 80,
"usage_type": "call"
}
] |
11841345
|
__author__ = "Manouchehr Rasouli"
__date__ = "5/Aug/2017, 8/Aug/2017"
import requests
import json
import threading
import time
import datetime
from config_pack import configuration_manager
from interupt_service_connector import inter_upt_logger
from exception_log_service_connection import exception_logger
class InterUptService:
def __init__(self):
self.conf = None
self.logger = inter_upt_logger.InterUptLogger()
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
while True:
self.conf = configuration_manager.ConfigPack()
try:
if int(self.logger.check_size()) > 0:
service_url = self.conf.get_service_url()
service_inter_upt_url = self.conf.get_service_interupt_url()
inter_upt = self.logger.pop_interupt()
result_json = json.dumps(inter_upt)
data = [('interupt', '{"interupt":'+ result_json +'}')]
requests.put(service_url + service_inter_upt_url, data=data)
except Exception as e:
error = {"service_name" : "inter_upt_service/inter_upt_service", "date" : datetime.datetime.now().strftime("%y/%m/%d %H:%M"), "exception" : str(e)}
logger = exception_logger.ExceptionLogger()
logger.put_exception(error)
time.sleep(self.conf.get_interupt_service_sleep_time())
| null |
inter_upt_service/inter_upt_service.py
|
inter_upt_service.py
|
py
| 1,506 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "interupt_service_connector.inter_upt_logger.InterUptLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "interupt_service_connector.inter_upt_logger",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "config_pack.configuration_manager.ConfigPack",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "config_pack.configuration_manager",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "requests.put",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "exception_log_service_connection.exception_logger.ExceptionLogger",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "exception_log_service_connection.exception_logger",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 37,
"usage_type": "call"
}
] |
210350037
|
import logging
import re
import sys
import urllib.parse
from concurrent.futures import ThreadPoolExecutor
import requests
from bs4 import BeautifulSoup
def fetch_afisha_page(url="https://www.afisha.ru/msk/schedule_cinema/"):
response = requests.get(url)
html = response.text
return html
def parse_afisha_list(raw_html):
soup = BeautifulSoup(raw_html, "html.parser")
cards = soup.select("div.card.cards-grid__item")
afisha_movie_infos = [
(
card.select_one("h3.card__title").string.strip(),
"https://www.afisha.ru{}".format(
card.select_one("a.card__link")["href"]
),
card.select_one("img.card__image")["src"],
)
for card in cards
]
return afisha_movie_infos
def fetch_movie_info(movie_title):
movie_title = re.sub(r"«|»", "", movie_title)
url = "https://www.kinopoisk.ru/index.php"
params = {"kp_query": movie_title}
response = requests.get(url, params=params)
html = response.text
soup = BeautifulSoup(html, "html.parser")
non_breaking_space = "\u00A0"
page_title = soup.title.string.strip()
most_wanted = soup.select_one(".most_wanted")
if most_wanted is not None:
link = most_wanted.select_one("div.info > p > a")
title = link.string.strip()
if title not in movie_title:
logging.warning("Could not find movie {}".format(movie_title))
return None
rating = most_wanted.select_one(".rating")
if rating is None:
return "—", "—"
rating = rating["title"].replace(non_breaking_space, "")
match = re.search(r"(\d\.\d*) \((\d*)\)", rating)
value_group_index = 1
count_group_index = 2
rating_value = match.group(value_group_index)
rating_count = match.group(count_group_index)
return rating_value, rating_count
elif movie_title in page_title:
rating_value = soup.find("meta", attrs={"itemprop": "ratingValue"})[
"content"
]
rating_count = soup.find("meta", attrs={"itemprop": "ratingCount"})[
"content"
]
return rating_value, rating_count
else:
logging.warning("Could not find movie {}".format(movie_title))
return None
def sort_movies_by_rating(movies):
default_rating = 0.0
rating_value_index = 3
return sorted(
movies,
key=lambda movie: float(movie[rating_value_index])
if re.search(r"\d\.\d*", movie[rating_value_index]) is not None
else default_rating,
reverse=True,
)
def output_movies_to_console(movies):
for movie in movies:
print("{} | {} | {} | {} | {}".format(*movie))
def get_movies(max_movies=10):
movies = []
try:
html = fetch_afisha_page()
afisha_infos = parse_afisha_list(html)
title_index = 0
titles = list(map(lambda info: info[title_index], afisha_infos))
executor = ThreadPoolExecutor()
kinopoisk_infos = executor.map(fetch_movie_info, titles)
movies = [
(*afisha_info, *kinopoisk_info)
for afisha_info, kinopoisk_info in zip(
afisha_infos, kinopoisk_infos
)
if kinopoisk_info is not None
]
movies = sort_movies_by_rating(movies)[:max_movies]
return movies
except requests.RequestException:
logging.exception("Error occured")
return movies
if __name__ == "__main__":
movies = get_movies()
output_movies_to_console(movies)
| null |
cinemas.py
|
cinemas.py
|
py
| 3,583 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.ThreadPoolExecutor",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "requests.RequestException",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "logging.exception",
"line_number": 111,
"usage_type": "call"
}
] |
106158690
|
from django import forms
PRODUCT_QUANTITY_CHOICES = [(i, str(i)) for i in range(1, 15)]
class CartAddProductForm(forms.Form):
quantity = forms.TypedChoiceField(
choices=PRODUCT_QUANTITY_CHOICES,
coerce=int,
label='Кол-во',
widget=forms.NumberInput(
attrs={
'type': 'number',
'min': '1',
'value': '1',
}
)
)
update = forms.BooleanField(required=False,
initial=False,
widget=forms.HiddenInput)
| null |
shop/cart/forms.py
|
forms.py
|
py
| 591 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.forms.Form",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.forms.TypedChoiceField",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.forms.NumberInput",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.forms.BooleanField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.forms.HiddenInput",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 23,
"usage_type": "name"
}
] |
548477051
|
from typing import Dict, List, Tuple, NewType
with open("data") as f:
data = f.readlines()
IntervalDict = NewType("IntervalDict", Dict[int, List[Tuple[int,int]]])
def sortSeries(data:List[str]) -> Tuple[List[int], List[str]]:
parsed1 = [d[1:].split("]")[0].split(" ") for d in data]
times = [ [*d[0].split("-"), *d[1].split(":") ] for d in parsed1 ]
sort = sorted(zip(times,data), key = lambda x: x[0])
minutes = [int(d[0][-1]) for d in sort ]
timeseries = [d[1].split("] ")[1] for d in sort]
return minutes, timeseries
def getIntervals(timeseries:List[str], minutes:List[int]) -> Dict[int, List[Tuple[int,int]]]:
guard = -1
intermed: Dict[int, List[int]] = {}
for i,val in enumerate(timeseries):
vals = val.split(" ")
if vals[0] =="Guard":
guard = int(vals[1][1:])
if guard not in intermed:
intermed[guard] = []
else:
intermed[guard].append(minutes[i])
res:Dict[int, List[Tuple[int,int]]]= {}
for key in intermed:
v = []
for i in range(len(intermed[key])//2):
v.append( (intermed[key][2*i], intermed[key][2*i+1]) )
res[key] = v
return res
def maxTimeAndGuard(intervals:Dict[int, List[Tuple[int,int]]]) -> Tuple[int,int]:
guard = -1
maxi = 0
for i in intervals:
val = sum([ d[1] - d[0] for d in intervals[i]])
if not maxi or maxi < val:
maxi = val
guard = i
return guard, maxi
def sleepiestMinute(interval:List[Tuple[int,int]]) -> Tuple[int,int]:
minutes = [0 for i in range(60)]
for start,stop in interval:
for i in range(start,stop):
minutes[i] += 1
return max(minutes), minutes.index(max(minutes))
minutes, timeseries = sortSeries(data)
intervals = getIntervals(timeseries, minutes)
guard = -1
overallMax = 0
overallMinute = -1
sleepyList = [ [ *sleepiestMinute(intervals[g]), g] for g in intervals]
maxi = max(sleepyList)
print(maxi[1] * maxi[2])
| null |
four/b.py
|
b.py
|
py
| 2,291 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "typing.NewType",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 53,
"usage_type": "name"
}
] |
30471702
|
import openpyxl
from openpyxl.styles import Border, Side, Font
from time import sleep
import time
class ParseExcel(object):
def __init__(self):
self.workbook = None
self.excelFile = None
self.font = Font(color = None) #设置字体的颜色
# 颜色对应的RGB值
self.RGBDict = {'red': 'FFFF3030', 'green':'FF008B00'}
def loadWorkBook(self, excelPathAndName):
# 将Excel文件加载到内存,并获取其workbook对象
try:
self.workbook = openpyxl.load_workbook(excelPathAndName)
except Exception as e:
raise e
self.excelFile = excelPathAndName
return self.workbook
def getSheetByName(self, sheetName):
# 根据sheet名获取该sheet对象
try:
sheet = self.workbook[sheetName]
return sheet
except Exception as e:
raise e
def getSheetByIndex(self, sheetIndex):
# 根据sheet的索引号获取该sheet对象
try:
sheet = self.workbook.worksheets[sheetIndex]
# print(type(sheet))
except Exception as e:
raise e
return sheet
def getRowsNumber(self, sheet):
# 获取sheet中数据区域的结束行号
return sheet.max_row
def getColsNumber(self, sheet):
# 获取sheet中数据区域的结束列号
return sheet.max_column
def getStartRowNum(self, sheet):
# 获取sheet中有数据区域的开始的行号
return sheet.min_row
def getStartColNumber(self, sheet):
# 获取sheet中有数据区域的开始的列号
return sheet.min_column
def getRow(self, sheet, rowNo):
# 获取sheet中某一行,返回的是这一行所有的数据内容组成的tuple
# 下标从1开始,sheet.rows[1]表示第一行
try:
# print("rowNo:%d" % rowNo)
return list(sheet.rows)[rowNo -1]
except Exception as e:
raise e
def getColumn(self, sheet, colNo):
# 获取sheet中某一列,返回的是这一列所有的数据内容组成tuple
# 从小标1开始,sheet.columns[1]表示第一列
try:
return list(sheet.columns)[colNo -1]
except Exception as e:
raise e
def getCellOfValue(self, sheet , coordinate = None, rowNo = None, colsNo = None):
# 根据单元格所在的位置索引获取该单元格的值,小标从1开始
# sheet.cell(row=1, column=1).value,表示Excel中第一行第一列的值
# coordinate是坐标,比如A1,B1
# coordinat特殊格式sheetObj['A2'].value,此处容易出错
# print("coordinate: %s" % coordinate)
if coordinate != None:
try:
return sheet[coordinate].value
except Exception as e:
raise e
elif coordinate is None and rowNo is not None and colsNo is not None:
try:
return sheet.cell(row = rowNo, column = colsNo).value
except Exception as e:
raise e
else:
raise Exception("Insufficient Coordinates of cell!")
def getCellOfObject(self, sheet, coordinate = None, rowNo = None, colsNo = None):
# 获取摸个单元格的对象,可以根据单元格所在位置的数字索引
# 也可以直接根据Excel中单元格的编码及坐标
# 如 getCellObject(sheet, coordinate = "A1") or
# getCellObject(sheet, rowNo = 1, colsNo = 2)
if coordinate != None:
try:
return sheet[coordinate].value
except Exception as e:
raise e
elif coordinate == None and rowNo is not None and colsNo is not None:
try:
return sheet.cell(row = rowNo, column = colsNo)
except Exception as e:
raise e
else:
raise Exception("Insufficient Coordinate of cell !")
def writeCell(self, sheet, content, coordinate = None, rowNo = None, colsNo = None, style= None):
# 根据单元格在Excel中的编码坐标或者数字索引坐标向单元格中写入数据,
# 下表从1开始,参数style表示字体的颜色的名字,比如red,green
if coordinate is not None:
try:
sheet.cell(coordinate = coordinate).value = content
if style is not None:
sheet[coordinate].font = Font(color = self.RGBDict[style])
self.workbook.save(self.excelFile)
except Exception as e:
raise e
elif coordinate == None and rowNo is not None and colsNo is not None:
try:
sheet.cell(row = rowNo, column = colsNo).value = content
if style:
sheet.cell(row = rowNo, column = colsNo).font = Font(color = self.RGBDict[style])
self.workbook.save(self.excelFile)
except Exception as e:
raise e
else:
raise Exception("Insufficient Coordinates of cell!")
def writeCellCurrentTime(self, sheet, coordinate=None, rowNo = None, colsNo = None,style=None):
# 写入当前的时间,下标从1开始
now = int(time.time()) #显示时间戳
timeArray = time.localtime(now)
currentTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
if coordinate is not None:
try:
sheet[coordinate].value = currentTime
self.workbook.save(self.excelFile)
except Exception as e:
raise e
elif coordinate == None and rowNo is not None and colsNo is not None:
try:
sheet.cell(row=rowNo, column=colsNo).value=currentTime
self.workbook.save(self.excelFile)
except Exception as e:
raise e
else:
raise Exception("Insufficient Coordinates of cell")
if __name__ == "__main__":
pe = ParseExcel()
# 测试所用的Excel文件
pe.loadWorkBook("F:/pythonWorkplace/keywordAndDataAppium/data/登录2.xlsx")
# print("通过名称获得sheet对象的名字:%s" % pe.getSheetByIndex(0).title)
# print("zjq通过名称获得sheet对象的名字:%s" % pe.getSheetByName("登录").title)
sheetObj = pe.getSheetByName("测试用例")
# print(sheetObj['A2'].value)
# print(sheetObj(coordinate = "A2").value)
# print(pe.getRowsNumber(sheetObj))
# print(pe.getColsNumber(sheetObj))
# pe.writeCell(sheetObj,"pass",coordinate = None, rowNo = 2, colsNo = 8, style= None)
print(pe.getCellOfValue(sheetObj,"A2"))
# sheet = pe.getSheetByIndex(0)
# # print(type(sheet))
# print("最大行号:%d" % pe.getRowsNumber(sheet)) #获取最大行号
# print("最大列号:%d" % pe.getColsNumber(sheet)) #获取最大列号
# rows = pe.getRow(sheet, 1) #获取第一行
# for i in rows:
# print(i.value)
#
# print("="*10)
# cols = pe.getColumn(sheet,5)# 获取第5列
# for j in cols:
# print(j.value)
# # 获取第一行第一列单元格内容
# #print(pe.getCellOfValue(sheet, rowNo=1, colsNo=1))
# pe.writeCell(sheet, "我爱祖国",rowNo=10,colsNo=10)
# pe.writeCellCurrentTime(sheet, rowNo=10, colsNo=11)
| null |
common/ParseExcel.py
|
ParseExcel.py
|
py
| 7,388 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "openpyxl.styles.Font",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "openpyxl.load_workbook",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Font",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Font",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 145,
"usage_type": "call"
}
] |
15503330
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 21 14:50:35 2017
@author: student
"""
import argparse
import redis
import sys
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Export data related to IP and MAC addresses into a matrix which will be used as data source for Circos')
parser.add_argument('-s', '--source', type=str, nargs=1, help='Sensor used as data source (ex: "chp-5890-1")')
parser.add_argument('-d', '--date', type=str, nargs=1, help='Date (day) of the informations to display (with the format YYYY-MM-DD)')
parser.add_argument('-u', '--unix', type=str, nargs=1, help='Unix socket to connect to redis-server')
parser.add_argument("-o","--outputdir", type=str, nargs=1, help="Output directory")
args = parser.parse_args()
if args.source is None:
source = "potiron"
else:
source = args.source[0]
if args.date is None:
sys.stderr.write('A date must be specified.\nThe format is : YYYY-MM\n')
sys.exit(1)
date = args.date[0]
if args.unix is None:
sys.stderr.write('A Unix socket must be specified.\n')
sys.exit(1)
usocket = args.unix[0]
red = redis.Redis(unix_socket_path=usocket)
if args.outputdir is None:
outputdir = "./out/"
else:
outputdir = args.outputdir[0]
if not outputdir.endswith('/'):
outputdir = "{}/".format(outputdir)
if not os.path.exists(outputdir):
os.makedirs(outputdir)
redisKey = '{}*{}*'.format(source, date)
mat = {}
mactab = []
for k in red.keys(redisKey):
key = k.decode()
ip = key.split('_')[1]
mac = red.hget(key, 'rep_src_arp_mac')
if mac is None:
continue
mac = mac.decode()
mac = mac.replace(':','')
if mac not in mactab:
mactab.append(mac)
if ip not in mat:
mat[ip] = {}
if mac in mat[ip]:
mat[ip][mac] += 1
else:
mat[ip][mac] = 1
output_file_name = '{}matrix_{}_{}.circos'.format(outputdir, source, date)
with open(output_file_name, 'w') as f:
f.write("mac\t")
f.write("{}\n".format("\t".join(mactab)))
for i in mat:
f.write(i)
for m in mactab:
if m in mat[i]:
f.write("\t{}".format(mat[i][m]))
else:
f.write("\t0")
f.write("\n")
| null |
bin/create-circos-matrix.py
|
create-circos-matrix.py
|
py
| 2,529 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "redis.Redis",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 45,
"usage_type": "call"
}
] |
223357160
|
import datetime
import h5py
import pytest
from mongomock import MongoClient
from splash_ingest.server.api_auth_service import create_api_client, init_api_service as init_api_key
from splash_ingest.server.model import IngestType
from splash_ingest.model import Mapping
from ..ingest_service import (
bluesky_context,
find_job,
find_unstarted_jobs,
init_ingest_service,
service_context,
create_job,
set_job_status,
create_mapping,
find_mapping,
ingest
)
from ..model import JobStatus, StatusItem
@pytest.fixture(scope="session", autouse=True)
def init_mongomock():
databroker_db = MongoClient().databroker_db
ingest_db = MongoClient().ingest_db
init_ingest_service(ingest_db, databroker_db)
init_api_key(ingest_db)
create_api_client('user1', 'sirius_cybernetics_gpp', 'door_operation')
def test_jobs_init():
assert service_context.ingest_jobs is not None, "test that init creates a collection"
assert len(service_context.ingest_jobs.index_information()) == 4
assert service_context.ingest_mappings is not None, "test that init creates a collection"
assert len(service_context.ingest_mappings.index_information()) == 3
def test_job_create():
document_path = "/foo/bar.hdf5"
job = create_job("user1", document_path, "magrathia_42", [IngestType.databroker])
assert job.id is not None, "Job gets a new uid"
assert job.submit_time is not None, "Job gets a submit time"
assert job.submitter == "user1", "Job gets provided submitter"
assert job.status == JobStatus.submitted, "Job gets provided submitter"
return_job = find_job(job.id)
assert return_job.submit_time is not None, "return Job gets a submit time"
assert return_job.submitter == "user1", "return Job gets provided submitter"
assert return_job.status == JobStatus.submitted, "return Job gets provided submitter"
def test_update_non_existant_job():
result = set_job_status("42",
StatusItem(
submitter="slartibartfast",
time=datetime.datetime.utcnow(),
status=JobStatus.running))
assert not result, "tested return code for non-existent job"
def test_query_unstarted_jobs():
document_path = "/foo/bar.hdf5"
job = create_job("user1", document_path, "magrathia", [IngestType.databroker])
job = create_job("user1", document_path, "magrathia", [IngestType.databroker])
jobs = find_unstarted_jobs()
for job in jobs:
assert job.status == JobStatus.submitted
time = datetime.datetime.utcnow()
set_job_status(job.id,
StatusItem(
time=time,
submitter="slartibartfast",
status=JobStatus.running,
log="rebuild earth"))
job = find_job(job.id)
assert len(job.status_history) > 1
assert job.status_history[-1].submitter == "slartibartfast", "most recent status correct user"
assert (abs(job.status_history[-1].time - time) < datetime.timedelta(milliseconds=1)), \
"most recent status data within Mongo accuracy of milliseconds"
assert job.status_history[-1].status == JobStatus.running, "most recent status correct user"
assert job.status_history[-1].log == "rebuild earth", "most recent status correct user"
jobs = list(find_unstarted_jobs())
assert len(jobs) == 0, "all jobs should be set to started"
@pytest.fixture
def sample_file(tmp_path):
file = h5py.File(tmp_path / 'test.hdf5', 'w')
file.create_dataset('/measurement/sample/name', data=b'my sample', dtype='|S256')
file.close()
file = h5py.File(tmp_path / 'test.hdf5', 'r')
yield file
print('closing file')
file.close()
def test_ingest_databroker(sample_file, init_mongomock):
mapping = Mapping(**mapping_dict)
create_mapping("slartibartfast", mapping)
mapping = find_mapping("slartibartfast", "magrathia")
assert mapping.resource_spec == "MultiKeySlice", "test a field"
job = create_job(
"user1",
sample_file.filename,
"magrathia",
[IngestType.databroker])
start_uid = ingest("slartibartfast", job)
job = find_job(job.id)
assert job is not None
assert job.status == JobStatus.successful, f'injest completed {job.status_history[-1]}'
assert bluesky_context.db['run_start'].find_one({"uid": start_uid}) is not None, "job wrote start doc"
# def test_ingest_types(sample_file, init_mongomock, monkeypatch):
# from suitcase.mongo_normalized import Serializer
# class MockSerializer(Serializer):
# def __call__(self, name, doc):
# return super().__call__(name, doc)
# def db_call(name, doc):
# print(name, doc)
# databroker_db = MongoClient().databroker_db
# # serializer = MockSerializer(metadatastore_db=databroker_db, asset_registry_db=databroker_db)
# monkeypatch.setattr("suitcase.mongo_normalized", "Serializer", MockSerializer)
# serializer("start", {})
# # mapping = Mapping(**mapping_dict)
# # create_mapping("slartibartfast", mapping)
# # mapping = find_mapping("slartibartfast", "magrathia")
# # assert mapping.resource_spec == "MultiKeySlice", "test a field"
# # job = create_job(
# # "user1",
# # sample_file.filename,
# # "magrathia",
# # [IngestType.databroker])
# # start_uid = ingest("slartibartfast", job)
mapping_dict = {
"name": "magrathia",
"description": "test descriptions",
"version": "42",
"resource_spec": "MultiKeySlice",
"md_mappings": [
{"field": "/measurement/sample/name"}
],
}
| null |
splash_ingest/server/tests/test_workflow.py
|
test_workflow.py
|
py
| 5,803 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "mongomock.MongoClient",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "mongomock.MongoClient",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "ingest_service.init_ingest_service",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "splash_ingest.server.api_auth_service.init_api_service",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "splash_ingest.server.api_auth_service.create_api_client",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "ingest_service.service_context.ingest_jobs",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "ingest_service.service_context",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "ingest_service.service_context.ingest_jobs.index_information",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "ingest_service.service_context.ingest_jobs",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "ingest_service.service_context",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "ingest_service.service_context.ingest_mappings",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "ingest_service.service_context",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "ingest_service.service_context.ingest_mappings.index_information",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "ingest_service.service_context.ingest_mappings",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "ingest_service.service_context",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "ingest_service.create_job",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "splash_ingest.server.model.IngestType.databroker",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "splash_ingest.server.model.IngestType",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "model.JobStatus.submitted",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "model.JobStatus",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "ingest_service.find_job",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "model.JobStatus.submitted",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "model.JobStatus",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "ingest_service.set_job_status",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "model.StatusItem",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "model.JobStatus.running",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "model.JobStatus",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "ingest_service.create_job",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "splash_ingest.server.model.IngestType.databroker",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "splash_ingest.server.model.IngestType",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "ingest_service.create_job",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "splash_ingest.server.model.IngestType.databroker",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "splash_ingest.server.model.IngestType",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "ingest_service.find_unstarted_jobs",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "model.JobStatus.submitted",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "model.JobStatus",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "ingest_service.set_job_status",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "model.StatusItem",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "model.JobStatus.running",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "model.JobStatus",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "ingest_service.find_job",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "model.JobStatus.running",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "model.JobStatus",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "ingest_service.find_unstarted_jobs",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "splash_ingest.model.Mapping",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "ingest_service.create_mapping",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "ingest_service.find_mapping",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "ingest_service.create_job",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "splash_ingest.server.model.IngestType.databroker",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "splash_ingest.server.model.IngestType",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "ingest_service.ingest",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "ingest_service.find_job",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "model.JobStatus.successful",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "model.JobStatus",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "ingest_service.bluesky_context.db",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "ingest_service.bluesky_context",
"line_number": 117,
"usage_type": "name"
}
] |
295050664
|
import asyncio
from discord.ext.commands import Bot
cachedName = {}
async def get_name_by_discord_id(bot: Bot, id: int):
if (id not in cachedName):
user = await bot.fetch_user(id)
cachedName[id] = user.name
return cachedName[id]
async def add_pagination_arrow_reaction(res):
await asyncio.gather(
res.add_reaction('⬅️'),
res.add_reaction('➡️')
)
| null |
app/discord/module/helper.py
|
helper.py
|
py
| 412 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "discord.ext.commands.Bot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "asyncio.gather",
"line_number": 16,
"usage_type": "call"
}
] |
551295644
|
#Python program to download Grok source code into local file.
#this should be used by PTC employee only!
#Please don't use this tool to violate any company policy!
from argparse import ArgumentParser
from bs4 import BeautifulSoup
import urllib2
try:
from urllib.request import urlopen
from urllib.error import HTTPError
from urllib.error import URLError
except ImportError:
from urllib2 import urlopen
from urllib2 import HTTPError
from urllib2 import URLError
import os
import re
def build_parser():
'''deal with argument in command line'''
parser = ArgumentParser()
parser.add_argument('-zip',dest='zipcodes',help='zip code list to search',metavar='02462,02494',required=False,default='02462')
#parser.add_argument('-folder',dest='folder',default='./src',help='Folder to store source code',metavar='d:\source or .\source or .',required=False)
return parser
def getBeds(searchresult):
'''
:param searchresult: 3 bds
:return: 3 or 0 if any error
'''
if searchresult is None:
return 0
else:
num_beds = (searchresult.group()).strip().split(" ", 1)[0]
try:
return int(num_beds)
except ValueError:
return 0
def getBath(searchresult):
'''
:param searchresult: 3 ba
:return: 3 or 0 if any error
'''
if searchresult is None:
return 0
else:
num_bath = (searchresult.group()).strip().split(" ", 1)[0]
try:
return int(num_bath)
except ValueError:
return 0
def getSQFT(searchresult):
'''
:param searchresult: 1,279 sqft or --- sqft
:return: 1279 float or 0.0 if any error
'''
if searchresult is None:
return 0.0
else:
num_sqft = (searchresult.group()).strip().split(" ", 1)[0]
try:
return float(num_sqft.replace(",",""))
except ValueError:
return 0.0
def getPrice(price):
'''
:param price: $1,289
:return: 1289 float or 0 if any error
'''
if len(price) > 1:
price = (price[1:]).strip()
try:
return float(price.replace(",",""))
except ValueError:
return 0.0
else:
return 0.0
def getHouseLists(zipcodes):
'''
:param zipcodes: 02494,02464
:return: yield zipcode houseprice, housesqft, housebeds, housebaths
'''
for zipcode in zipcodes.split(','):
zipcode = zipcode.strip()
url = 'http://www.zillow.com/homes/%s_rb/' % zipcode
#print("url=%s" % url)
try:
html = urlopen(url)
except urllib.error.URLError as e:
if hasattr(e, 'reason'):
print('Fail in reaching the server -> ', e.reason)
return
elif hasattr(e, 'code'):
print('The server couldn\'t fulfill the request -> ', e.code)
return
else:
#print(html)
page = html.read()
#print(page)
with open(os.path.join('./', zipcode + '.html'), 'wb') as fo:
fo.write(page)
#print('Url saved as %s' % filename)
bsObj = BeautifulSoup(page, "html.parser")
'''
<dt class="price-large zsg-h2 zsg-content_collapsed">$999,500</dt>
<dt class="property-data">
<span class="beds-baths-sqft">4 bds • 3 ba • 2,870 sqft</span>
<span class="lot-size"> • 10,454 sqft lot</span>
<span class="built-year"> • Built 1948</span>
</dt>
<dt class="price-large zsg-h2 zsg-content_collapsed">$3,975/mo</dt>
<dt class="property-data">
<span class="beds-baths-sqft">3 bds • 1.5 ba</span>
</dt>
<p class="zsg-photo-card-spec">
<span class="zsg-photo-card-price">$1,075,000</span>
<span class="zsg-photo-card-info">
3 bds
<span class='interpunct'>·</span>
3 ba <span class='interpunct'>·</span>
2,440 sqft</span></p>
'''
prices = bsObj.findAll("dt", {"class": "price-large zsg-h2 zsg-content_collapsed"})
if prices == None or len(prices) == 0:
prices = bsObj.findAll("span",{'class':'zsg-photo-card-price'})
re_bds = re.compile(r'[0-9] bds')
re_ba = re.compile(r'[0-9] ba')
re_sqft = re.compile(r'[0-9-,]+ sqft')
for price in prices:
#print(price.text)
houseprice = 0.0
housebeds = 0
housebaths = 0
housesqft = 0.0
houseprice = getPrice(price.text)
#print("houseprice:%.2f" % houseprice)
if houseprice >0.0:
nextDT = price.nextSibling
houseproperties = ''
if nextDT is not None:
if nextDT['class'] == 'property-data':
bbs = nextDT.findAll("span",{'class':'beds-baths-sqft'})
if bbs is not None:
houseproperties=bbs.text
elif nextDT.name == 'zsg-photo-card-info':
houseproperties = nextDT.text
else:
houseproperties = nextDT.text
#print(houseproperties)
housebeds = getBeds(re_bds.search(houseproperties))
housebaths = getBath(re_ba.search(houseproperties))
housesqft = getSQFT(re_sqft.search(houseproperties))
#print("Price:%.1f, Size:%.1f, beds:%d, bath:%d" % (houseprice, housesqft, housebeds, housebaths))
yield zipcode,houseprice, housesqft, housebeds, housebaths
def getMAZipCodes():
'''
return all zip code in MA stats
:return:
'''
url = 'http://www.zipcodestogo.com/Massachusetts/'
# print("url=%s" % url)
try:
html = urlopen(url)
except urllib.error.URLError as e:
if hasattr(e, 'reason'):
print('Fail in reaching the server -> ', e.reason)
return
elif hasattr(e, 'code'):
print('The server couldn\'t fulfill the request -> ', e.code)
return
else:
# print(html)
page = html.read()
bsObj = BeautifulSoup(page, "html.parser")
re_zipcode = re.compile(r'[0-9]{5}')
zipcodes = bsObj.findAll("a")
zipCodeList = []
for zipcodestr in zipcodes:
zipcode_result = re_zipcode.match(zipcodestr.text)
if zipcode_result:
zipCodeList.append(zipcode_result.group())
#careful, if string 02462ABC, then 02462 will be appended
return zipCodeList
def main():
parser = build_parser()
options = parser.parse_args()
zipcodes=options.zipcodes
#zipcodes = ",".join(getMAZipCodes())
for zipcode,houseprice, housesqft, housebeds, housebaths in getHouseLists(zipcodes):
print(zipcode,houseprice, housesqft, housebeds, housebaths)
#print(getMAZipCodes())
if __name__== '__main__':
main()
| null |
zillowscrapy.py
|
zillowscrapy.py
|
py
| 7,281 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "urllib.request.error",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "urllib.request.error",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 188,
"usage_type": "call"
}
] |
628086066
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""Custom operations for storage file datalake"""
from azure.cli.core.profiles import ResourceType
from knack.util import todict
def exists(cmd, client, timeout=None):
from azure.core.exceptions import HttpResponseError
try:
client.get_directory_properties(timeout=timeout)
return True
except HttpResponseError as ex:
from azure.cli.command_modules.storage.track2_util import _dont_fail_on_exist
StorageErrorCode = cmd.get_models("_shared.models#StorageErrorCode",
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
_dont_fail_on_exist(ex, StorageErrorCode.blob_not_found)
return False
def list_fs_directories(client, path=None, recursive=True, num_results=None, timeout=None):
generator = client.get_paths(path=path, recursive=recursive, timeout=timeout, max_results=num_results)
return list(f for f in generator if f.is_directory)
def get_directory_properties(client, timeout=None):
from .._transformers import transform_fs_access_output
prop = todict(client.get_directory_properties(timeout=timeout))
acl = transform_fs_access_output(client.get_access_control(timeout=timeout))
result = dict(prop, **acl)
return result
def remove_access_control_recursive(client, acl, **kwargs):
failed_entries = []
# the progress callback is invoked each time a batch is completed
def progress_callback(acl_changes):
# keep track of failed entries if there are any
if acl_changes.batch_failures:
failed_entries.extend(acl_changes.batch_failures)
result = client.remove_access_control_recursive(acl=acl, progress_hook=progress_callback, **kwargs)
result = todict(result)
result['failedEntries'] = failed_entries
return result
def set_access_control_recursive(client, acl, **kwargs):
failed_entries = []
# the progress callback is invoked each time a batch is completed
def progress_callback(acl_changes):
# keep track of failed entries if there are any
if acl_changes.batch_failures:
failed_entries.extend(acl_changes.batch_failures)
result = client.set_access_control_recursive(acl=acl, progress_hook=progress_callback, **kwargs)
result = todict(result)
result['failedEntries'] = failed_entries
return result
def update_access_control_recursive(client, acl, **kwargs):
failed_entries = []
# the progress callback is invoked each time a batch is completed
def progress_callback(acl_changes):
# keep track of failed entries if there are any
if acl_changes.batch_failures:
failed_entries.extend(acl_changes.batch_failures)
result = client.update_access_control_recursive(acl=acl, progress_hook=progress_callback, **kwargs)
result = todict(result)
result['failedEntries'] = failed_entries
return result
| null |
src/azure-cli/azure/cli/command_modules/storage/operations/fs_directory.py
|
fs_directory.py
|
py
| 3,253 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "azure.core.exceptions.HttpResponseError",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "azure.cli.core.profiles.ResourceType.DATA_STORAGE_FILEDATALAKE",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "azure.cli.core.profiles.ResourceType",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "azure.cli.command_modules.storage.track2_util._dont_fail_on_exist",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "knack.util.todict",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "_transformers.transform_fs_access_output",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "knack.util.todict",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "knack.util.todict",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "knack.util.todict",
"line_number": 79,
"usage_type": "call"
}
] |
345194848
|
#!/usr/bin/env python3
# Kebechet
# Copyright(C) 2018, 2019 Kevin Postlethwait
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Consume Thoth Output for Kebechet auto-dependency management."""
import hashlib
import os
import logging
import json
import typing
from thamos import lib
import git
from kebechet.exception import DependencyManagementError
from kebechet.exception import InternalError
from kebechet.exception import PipenvError
from kebechet.managers.manager import ManagerBase
from kebechet.source_management import Issue
from kebechet.source_management import MergeRequest
from kebechet.utils import cloned_repo
_BRANCH_NAME = "kebechet_thoth"
_LOGGER = logging.getLogger(__name__)
class ThothAdviseManager(ManagerBase):
"""Manage updates of dependencies using Thoth."""
def __init__(self, *args, **kwargs):
"""Initialize ThothAdvise manager."""
# We do API calls once for merge requests and we cache them for later use.
self._cached_merge_requests = None
super().__init__(*args, **kwargs)
@property
def sha(self):
"""Get SHA of the current head commit."""
return self.repo.head.commit.hexsha
def _construct_branch_name(self) -> str:
"""Construct branch name for the updated dependency."""
return f"{_BRANCH_NAME}-{self.sha[:10]}"
def _git_push(
self, commit_msg: str, branch_name: str, files: list, force_push: bool = False
) -> None:
"""Perform git push after adding files and giving a commit message."""
self.repo.index.add(files)
self.repo.index.commit(commit_msg)
self.repo.remote().push(branch_name, force=force_push)
def _open_merge_request(
self, branch_name: str, labels: list, files: list
) -> typing.Optional[int]:
"""Open a pull/merge request for dependency update."""
commit_msg = "Auto generated update"
body = "Pipfile.lock updated by kebechet-thoth manager"
# Delete branch if it didn't change Pipfile.lock
diff = self.repo.git.diff("master", files)
if diff == "":
_LOGGER.info("No changes necessary, exiting...")
return
# push force always to keep branch up2date with the recent master and avoid merge conflicts.
_LOGGER.info('Pushing changes')
self._git_push(":pushpin: " + commit_msg, branch_name, files, force_push=True)
# Check if the merge request already exists
for mr in self._cached_merge_requests:
if mr.head_branch_name == branch_name:
_LOGGER.info('Merge request already exists, updating...')
return
_LOGGER.info('Opening merge request')
merge_request = self.sm.open_merge_request(
commit_msg, branch_name, body, labels
)
return merge_request
@staticmethod
def _write_advise(adv_results: list):
lock_info = adv_results[0]["report"][0][1]["requirements_locked"]
with open("Pipfile.lock", "w+") as f:
_LOGGER.info('Writing to Pipfile.lock')
_LOGGER.debug(f"{json.dumps(lock_info)}")
f.write(json.dumps(lock_info))
def _issue_advise_error(self, adv_results: list, labels: list):
"""Create an issue if advise fails."""
_LOGGER.debug(json.dumps(adv_results))
textblock = ""
errors = adv_results[0]["report"][0][0]
for error in errors:
justification = error["justification"]
type_ = error["type"]
_LOGGER.info(f"Error type: {type_}")
textblock = (
textblock
+ f"## Error type: {type_}\n"
+ f"**Justification**: {justification}\n"
)
checksum = hashlib.md5(textblock.encode("utf-8")).hexdigest()[:10]
_LOGGER.info('Creating issue')
self.sm.open_issue_if_not_exist(
f"{checksum} - Automated kebechet thoth-advise Issue",
lambda: textblock,
labels=labels,
)
def run(self, labels: list, analysis_id=None):
"""Run Thoth Advising Bot."""
if analysis_id is None:
with cloned_repo(self.service_url, self.slug, depth=1) as repo:
self.repo = repo
if not os.path.isfile("Pipfile"):
_LOGGER.warning("Pipfile not found in repo... Creating issue")
self.sm.open_issue_if_not_exist(
"Missing Pipfile",
lambda: "Check your repository to make sure Pipfile exists",
labels=labels
)
return False
lib.advise_here(nowait=True, origin=(f"{self.service_url}/{self.slug}"))
return True
else:
with cloned_repo(self.service_url, self.slug, depth=1) as repo:
self.repo = repo
_LOGGER.info("Using analysis results from %s", analysis_id)
res = lib.get_analysis_results(analysis_id)
branch_name = self._construct_branch_name()
branch = self.repo.git.checkout("-B", branch_name)
self._cached_merge_requests = self.sm.repository.merge_requests
if res is None:
_LOGGER.error("Advise failed on server side, contact the maintainer")
return False
_LOGGER.debug(json.dumps(res))
if res[1] is False:
_LOGGER.info('Advise succeeded')
self._write_advise(res)
self._open_merge_request(branch_name, labels, ["Pipfile.lock"])
return True
else:
_LOGGER.warning('Found error while running adviser... Creating issue')
self._issue_advise_error(res, labels)
return False
| null |
kebechet/managers/thoth_advise/thoth_advise.py
|
thoth_advise.py
|
py
| 6,499 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "kebechet.managers.manager.ManagerBase",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "hashlib.md5",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "kebechet.utils.cloned_repo",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "thamos.lib.advise_here",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "thamos.lib",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "kebechet.utils.cloned_repo",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "thamos.lib.get_analysis_results",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "thamos.lib",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 155,
"usage_type": "call"
}
] |
642655417
|
# Nurse shift replication by Cassio Amorim, CJS Inc.
# Licensed under Apache 2.0 License.
# Original paper by Ikeda, Nakamura, Humble. DOI: 10.1038/s41598-019-49172-3
# Original licensed under Creative Commons
## Target Hamiltonian:
## 目的ハミルトニアン:
## H(q) = \sum_n,n' \sum_d, d' J_i(n, d)j(n',d')q_iq_j
## + λ \sum_d [\sum_n E(n)*q_i - W(d)]^2
## + γ \sum_n [sum_d G(n,d)q_i - F(n)]^2
##
from dwave.system import LeapHybridSampler
from dwave.system.samplers import DWaveSampler
import dwave_networkx as dnx
import networkx as nx
from dwave.embedding import embed_bqm, embed_qubo, unembed_sampleset
from minorminer import find_embedding
from dimod import BinaryQuadraticModel
from collections import defaultdict
from copy import deepcopy
import pickle
## Setup functions and parameters
## 関数とパラメーターを設定します
### Size parameters
### サイズ パラメーター
numSampling = 1000
for nurses in range(3,5):
for days in range(6,15):
#everything below could be a function of `days` and `nurses`
size = days * nurses
### Hard nurse constraint: no nurse on consecutive days
### ハード看護師制約:連日出勤は禁止
a = 7 / 2
### Hard shift constraint: enough effort on shift to cover workforce needs
### ハード シフト制約:必要なワークフォースを対応できるエフォートの出勤
lagrange_hard_shift = 1.3
effort = lambda n : 1.0 # E(n)
workforce = lambda d : 1.0 # W(d)
### Soft nurse constraint: reflect each nurse's preferences
### ソフト看護師制約:各々の出勤希望の反映
lagrange_soft_nurse = 0.3
preference = lambda n, d : 1.0 # G(n,d)
duty_days = int(days / nurses) # even distribution
### Index function. n = index // days, d = index % days
### インデックス関数
index = lambda n,d: n * days + d
## Build Hamiltonian
## ハミルトニアンを構築します
### hard nurse constraint: \sum_n,n' \sum_d, d' J_i(n, d)j(n',d')q_iq_j
### J = a δ_(n,n') δ_(d',d+1)
J = defaultdict(int)
for nurse in range(nurses):
for day in range(days - 1):
index_d1 = index(nurse, day)
index_d2 = index(nurse, day + 1)
J[index_d1, index_d2] = a
### Copy to add shift constraints
### コピーしてシフトの制約を追加します
Q = deepcopy(J)
### hard shift constraint: λ \sum_d [\sum_n E(n)*q_i - W(d)]^2
for day in range(days):
for nurse in range(nurses):
idx = index(nurse, day)
Q[idx, idx] += (effort(nurse) - (2 * workforce(day))) * effort(nurse) * lagrange_hard_shift
for partner in range(nurse +1, nurses):
idx2 = index(partner, day)
Q[idx, idx2] += 2 * lagrange_hard_shift * effort(nurse) * effort(partner)
### soft shift contraint: \sum_n [sum_d G(n,d)q_i - F(n)]^2
for nurse in range(nurses):
for day in range(days):
idx = index(nurse, day)
Q[idx, idx] += lagrange_soft_nurse * preference(nurse, day) * (preference(nurse, day) - (2 * duty_days))
for day2 in range(day + 1, days):
idx2 = index(nurse, day2)
Q[idx, idx2] += 2 * lagrange_soft_nurse * preference(nurse, day) * preference(nurse, day2)
## Solve
## 解きます
### Graph embedding
topology = 'pegasus' # 'chimera' or 'pegasus'
sampler = DWaveSampler(solver={'topology__type': topology,'qpu': True})
embedding = find_embedding(Q.keys(), sampler.edgelist)
embeddedQ = embed_qubo(Q, embedding, sampler.adjacency)
### Energy offset
### エネルギー オフセット
e_offset = lagrange_hard_shift * days * workforce(1) ** 2
e_offset += lagrange_soft_nurse * nurses * duty_days ** 2
### BQM
bqm = BinaryQuadraticModel.from_qubo(embeddedQ, offset=e_offset)
sbqm = BinaryQuadraticModel.from_qubo(Q, offset=e_offset)
# Sample solution
# 解をサンプリングします
print("Connected to {}. N = {}, D = {}".format(sampler.solver.id, nurses, days))
results = sampler.sample(bqm, num_reads=numSampling)
samples = unembed_sampleset(results, embedding, sbqm, chain_break_fraction=True)
### Save data with pickle for analysis and reverse annealing
### 結果分析と逆アニーリングのため pickle を用いてデータを保存します
fout = "results_%s_N%d_D%d_s%d.p" % (topology, nurses, days, numSampling)
saveDict = {'results' : results, 'embedding' : embedding, 'bqm': sbqm, 'samples' : samples}
pickle.dump(saveDict, open(fout, "wb"))
| null |
Nurse Shift.py
|
Nurse Shift.py
|
py
| 5,105 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.defaultdict",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "dwave.system.samplers.DWaveSampler",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "minorminer.find_embedding",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "dwave.embedding.embed_qubo",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "dimod.BinaryQuadraticModel.from_qubo",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "dimod.BinaryQuadraticModel",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "dimod.BinaryQuadraticModel.from_qubo",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "dimod.BinaryQuadraticModel",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "dwave.embedding.unembed_sampleset",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 118,
"usage_type": "call"
}
] |
472541643
|
import enum
from ctssimu.data import Machine, AbstractState, states, AllOtherStates
from ctssimu.cts import Process, ProcessState
from ctssimu.io import CtEvetAnnoodeOff, CtEvetPowerOffCooling, CtEvetPowerOffFinished, CtEvetPowerOffStart
class PowerOffOperatorState(AbstractState, enum.Enum):
IDLE = 0
RUNNING = 1
CANCELLING = 2
CANCELLED = 3
ANNODE_OFF = 4
COOLING = 5
FINISHED = 6
ANNODE_OFF_PROGRESS = 50
COOLING_PROGRESS = 80
class PowerOffOperator(Machine):
STATE_ENUM = PowerOffOperatorState
def __init__(self, state=PowerOffOperatorState.IDLE, off_process=None, stdout=None):
super().__init__(state, stdout=stdout)
self._process = self.install(off_process, self.default_process)
def default_process(self):
return Process(step=10)
def run(self):
self._running2annode_off()
self._annode_off2cooling()
self._cooling2finish()
@states([PowerOffOperatorState.RUNNING], AllOtherStates)
def _running2annode_off(self):
if self._process.progress > ANNODE_OFF_PROGRESS:
self._state = PowerOffOperatorState.ANNODE_OFF
self._stdout.push(CtEvetAnnoodeOff())
@states([PowerOffOperatorState.ANNODE_OFF], AllOtherStates)
def _annode_off2cooling(self):
if self._process.progress > COOLING_PROGRESS:
self._state = PowerOffOperatorState.COOLING
self._stdout.push(CtEvetPowerOffCooling())
@states([PowerOffOperatorState.COOLING], AllOtherStates)
def _cooling2finish(self):
if self._process.state == ProcessState.COMPLETED:
self._state = PowerOffOperatorState.FINISHED
self._stdout.push(CtEvetPowerOffFinished())
@states([PowerOffOperatorState.IDLE], AllOtherStates)
def start(self):
self._state = PowerOffOperatorState.RUNNING
self._process.start()
self._stdout.push(CtEvetPowerOffStart())
@states([PowerOffOperatorState.RUNNING,
PowerOffOperatorState.ANNODE_OFF,
PowerOffOperatorState.COOLING],
AllOtherStates)
def cancel(self):
if self._state in (PowerOffOperatorState.ANNODE_OFF, PowerOffOperatorState.COOLING):
return
self._process.stop()
self._state = PowerOffOperatorState.CANCELLED
def is_working(self):
return self._state in (PowerOffOperatorState.RUNNING,
PowerOffOperatorState.ANNODE_OFF,
PowerOffOperatorState.COOLING)
def is_canceled(self):
return self._state == PowerOffOperatorState.CANCELLED
| null |
ctssimu/ctssimu/cts/power/off.py
|
off.py
|
py
| 2,621 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "ctssimu.data.AbstractState",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "ctssimu.data.Machine",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "ctssimu.cts.Process",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "ctssimu.io.CtEvetAnnoodeOff",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "ctssimu.data.states",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "ctssimu.data.AllOtherStates",
"line_number": 36,
"usage_type": "argument"
},
{
"api_name": "ctssimu.io.CtEvetPowerOffCooling",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "ctssimu.data.states",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "ctssimu.data.AllOtherStates",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "ctssimu.cts.ProcessState.COMPLETED",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "ctssimu.cts.ProcessState",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "ctssimu.io.CtEvetPowerOffFinished",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "ctssimu.data.states",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "ctssimu.data.AllOtherStates",
"line_number": 48,
"usage_type": "argument"
},
{
"api_name": "ctssimu.io.CtEvetPowerOffStart",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "ctssimu.data.states",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "ctssimu.data.AllOtherStates",
"line_number": 54,
"usage_type": "argument"
},
{
"api_name": "ctssimu.data.states",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "ctssimu.data.AllOtherStates",
"line_number": 63,
"usage_type": "argument"
}
] |
367550628
|
from sklearn import preprocessing
import pandas as pd
import matplotlib.pyplot as plt
def iris_type(s):
s = str(s,'utf-8')
# print(type(s))
it = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}
return it[s]
if __name__ == "__main__":
path = u'8.iris.data' # 数据文件路径
df = pd.read_csv(path, header=0)
x = df.values[:, :-1]
y = df.values[:, -1]
le = preprocessing.LabelEncoder()
le.fit(['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'])
y = le.transform(y)
x1 = x[:,:1]
x2 = x[:, 1:2]
col = ['black','blue','red']
marks = ['o', 'v', '+']
# print(len(y),'=',len(x1),'=',len(x2))
plt.grid()
for k in range(len(y)):
plt.plot(x1[k], x2[k],marker=marks[y[k]],color=col[y[k]])
plt.show()
x3 = x[:, 2:3]
x4 = x[:, -1]
# print(x[0],'<<',x3[0],',',x4[0])
plt.grid()
for k in range(len(y)):
plt.plot(x3[k], x4[k],marker=marks[y[k]],color=col[y[k]])
plt.show()
| null |
sklearn_loc/Iris/iris-plot.py
|
iris-plot.py
|
py
| 994 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
}
] |
57437260
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from contextlib import closing
from six import StringIO
class Manifest(object):
"""
Implements the basics of the jar manifest specification.
See: http://docs.oracle.com/javase/1.5.0/docs/guide/jar/jar.html#Manifest Specification
"""
@staticmethod
def _wrap(text):
text = text.encode('ascii')
with closing(StringIO(text)) as fp:
yield fp.read(70)
while True:
chunk = fp.read(69)
if not chunk:
return
yield ' {}'.format(chunk)
PATH = 'META-INF/MANIFEST.MF'
MANIFEST_VERSION = 'Manifest-Version'
CREATED_BY = 'Created-By'
MAIN_CLASS = 'Main-Class'
CLASS_PATH = 'Class-Path'
def __init__(self, contents=''):
self._contents = contents.strip().encode('ascii')
def addentry(self, header, value):
if len(header) > 68:
raise ValueError('Header name must be 68 characters or less, given {}'.format(header))
if self._contents:
self._contents += '\n'
self._contents += '\n'.join(self._wrap('{header}: {value}'.format(header=header, value=value)))
def contents(self):
padded = self._contents + '\n'
return padded.encode('ascii')
def is_empty(self):
if self._contents.strip():
return False
return True
| null |
src/python/pants/java/jar/manifest.py
|
manifest.py
|
py
| 1,537 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "contextlib.closing",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "six.StringIO",
"line_number": 23,
"usage_type": "call"
}
] |
438629179
|
import PyPDF2
from sys import argv
def extract(old_path, new_path):
old_pdf = PyPDF2.PdfFileReader(open(old_path, 'rb'))
new_pdf = PyPDF2.PdfFileWriter()
get = lambda x : old_pdf.getPage(x).extractText()
old_sz = old_pdf.getNumPages()
for i in range(old_sz):
if i == old_sz-1 or not get(i+1).startswith(get(i)):
new_pdf.addPage(old_pdf.getPage(i))
new_pdf.write(open(new_path, 'wb'))
if __name__ == '__main__':
old_path = argv[1] if len(argv) >= 2 else 'in.pdf'
new_path = argv[2] if len(argv) >= 3 else 'out.pdf'
extract(old_path, new_path)
| null |
fix-pdf/main.py
|
main.py
|
py
| 625 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "PyPDF2.PdfFileReader",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "PyPDF2.PdfFileWriter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "sys.argv",
"line_number": 21,
"usage_type": "argument"
}
] |
240285070
|
import logging
import tasks.tasks as conc
import Jython_tasks.task as jython_tasks
from couchbase_helper.documentgenerator import doc_generator
from membase.api.rest_client import RestConnection
from sdk_client3 import SDKClient as VBucketAwareMemcached
from BucketLib.BucketOperations import BucketHelper
"""An API for scheduling tasks that run against Couchbase Server
This module is contains the top-level API's for scheduling and executing tasks.
The API provides a way to run task do syncronously and asynchronously.
"""
class ServerTasks(object):
"""
A Task API for performing various operations synchronously or
asynchronously on Couchbase cluster
"""
def __init__(self, task_manager):
self.jython_task_manager = task_manager
self.log = logging.getLogger("infra")
self.log.debug("Initiating ServerTasks")
def async_create_bucket(self, server, bucket):
"""
Asynchronously creates the default bucket
Parameters:
bucket_params - a dictionary containing bucket creation parameters.
Returns:
BucketCreateTask - Task future that is a handle to the scheduled task
"""
# bucket_params['bucket_name'] = 'default'
_task = conc.BucketCreateTask(server, bucket,
task_manager=self.jython_task_manager)
self.jython_task_manager.schedule(_task)
return _task
def sync_create_bucket(self, server, bucket):
"""
Synchronously creates the default bucket
Parameters:
bucket_params - a dictionary containing bucket creation parameters.
Returns:
BucketCreateTask - Task future that is a handle to the scheduled task
"""
# bucket_params['bucket_name'] = 'default'
_task = conc.BucketCreateTask(server, bucket,
task_manager=self.jython_task_manager)
self.jython_task_manager.schedule(_task)
return _task.get_result()
def async_failover(self, servers=[], failover_nodes=[], graceful=False,
use_hostnames=False, wait_for_pending=0):
"""
Asynchronously failover a set of nodes
Parameters:
servers - servers used for connection. (TestInputServer)
failover_nodes - Servers that will be failed over (TestInputServer)
graceful = True/False. True - graceful, False - hard. (Boolean)
Returns:
FailOverTask - A task future that is a handle to the scheduled task
"""
_task = conc.FailoverTask(
servers, task_manager=self.jython_task_manager,
to_failover=failover_nodes, graceful=graceful,
use_hostnames=use_hostnames, wait_for_pending=wait_for_pending)
self.jython_task_manager.schedule(_task)
return _task
def async_init_node(self, server, disabled_consistent_view=None,
rebalanceIndexWaitingDisabled=None,
rebalanceIndexPausingDisabled=None,
maxParallelIndexers=None,
maxParallelReplicaIndexers=None, port=None,
quota_percent=None, services=None,
index_quota_percent=None, gsi_type='forestdb'):
"""
Asynchronously initializes a node
The task scheduled will initialize a nodes username and password and
will establish the nodes memory quota to be 2/3 of the available
system memory.
Parameters:
server - The server to initialize. (TestInputServer)
disabled_consistent_view - disable consistent view
rebalanceIndexWaitingDisabled - index waiting during rebalance(Bool)
rebalanceIndexPausingDisabled - index pausing during rebalance(Bool)
maxParallelIndexers - max parallel indexers threads(int)
index_quota_percent - index quote used by GSI service
(added due to sherlock)
maxParallelReplicaIndexers - max replica indexers threads (int)
port - port to initialize cluster
quota_percent - percent of memory to initialize
services - can be kv, n1ql, index
gsi_type - Indexer Storage Mode
Returns:
NodeInitTask - A task future that is a handle to the scheduled task
"""
_task = conc.NodeInitializeTask(
server, self.jython_task_manager, disabled_consistent_view,
rebalanceIndexWaitingDisabled, rebalanceIndexPausingDisabled,
maxParallelIndexers, maxParallelReplicaIndexers,
port, quota_percent, services=services,
index_quota_percent=index_quota_percent, gsi_type=gsi_type)
self.jython_task_manager.schedule(_task)
return _task
def async_load_gen_docs(self, cluster, bucket, generator, op_type, exp=0,
flag=0, persist_to=0, replicate_to=0,
only_store_hash=True, batch_size=1, pause_secs=1,
timeout_secs=5, compression=True,
process_concurrency=8, retries=5,
active_resident_threshold=100,
durability=""):
self.log.debug("Loading documents to {}".format(bucket.name))
clients = []
gen_start = int(generator.start)
gen_end = max(int(generator.end), 1)
gen_range = max(int((generator.end-generator.start) / process_concurrency), 1)
for _ in range(gen_start, gen_end, gen_range):
client = VBucketAwareMemcached(RestConnection(cluster.master),
bucket)
clients.append(client)
if active_resident_threshold == 100:
_task = jython_tasks.LoadDocumentsGeneratorsTask(
cluster, self.jython_task_manager, bucket, clients,
[generator], op_type, exp, exp_unit="second", flag=flag, persist_to=persist_to,
replicate_to=replicate_to, only_store_hash=only_store_hash,
batch_size=batch_size, pause_secs=pause_secs,
timeout_secs=timeout_secs, compression=compression,
process_concurrency=process_concurrency, retries=retries,
durability=durability)
else:
_task = jython_tasks.LoadDocumentsForDgmTask(
cluster, self.jython_task_manager, bucket, client, [generator],
op_type, exp, flag=flag, persist_to=persist_to,
replicate_to=replicate_to, only_store_hash=only_store_hash,
batch_size=batch_size, pause_secs=pause_secs,
timeout_secs=timeout_secs, compression=compression,
process_concurrency=process_concurrency, retries=retries,
active_resident_threshold=active_resident_threshold)
self.jython_task_manager.add_new_task(_task)
return _task
def async_continuous_update_docs(self, cluster, bucket, generator, exp=0,
flag=0, persist_to=0, replicate_to=0,
only_store_hash=True, batch_size=1,
pause_secs=1, timeout_secs=5,
compression=True,
process_concurrency=8, retries=5):
self.log.debug("Mutating documents to {}".format(bucket.name))
client = VBucketAwareMemcached(RestConnection(cluster.master), bucket)
_task = jython_tasks.ContinuousDocUpdateTask(
cluster, self.jython_task_manager, bucket, client, [generator],
"update", exp, flag=flag, persist_to=persist_to,
replicate_to=replicate_to, only_store_hash=only_store_hash,
batch_size=batch_size, pause_secs=pause_secs,
timeout_secs=timeout_secs, compression=compression,
process_concurrency=process_concurrency, retries=retries)
self.jython_task_manager.add_new_task(_task)
return _task
def async_load_gen_docs_atomicity(self, cluster, buckets, generator, op_type,
exp=0, flag=0, persist_to=0, replicate_to=0,
only_store_hash=True, batch_size=1, pause_secs=1,
timeout_secs=5, compression=True,
process_concurrency=1, retries=5,
transaction_timeout=5, commit=True, durability=0):
self.log.debug("Loading documents")
bucket_list=[]
client_list=[]
for bucket in buckets:
client = VBucketAwareMemcached(RestConnection(cluster.master), bucket)
client_list.append(client)
bucket_list.append(client.collection)
_task = jython_tasks.Atomicity(cluster, self.jython_task_manager, bucket_list, client, client_list, [generator],
op_type, exp, flag=flag, persist_to=persist_to,
replicate_to=replicate_to, only_store_hash=only_store_hash,
batch_size=batch_size,
pause_secs=pause_secs, timeout_secs=timeout_secs,
compression=compression,
process_concurrency=process_concurrency, retries=retries,transaction_timeout=transaction_timeout, commit=commit, durability=durability)
self.jython_task_manager.add_new_task(_task)
return _task
def async_load_gen_docs_durable(self, cluster, bucket, generator, op_type,
exp=0, flag=0, persist_to=0,
replicate_to=0, only_store_hash=True,
batch_size=1, pause_secs=1,
timeout_secs=5, compression=True,
process_concurrency=1, retries=5,
durability=""):
self.log.debug("Loading documents to {}".format(bucket.name))
clients = []
gen_start = int(generator.start)
gen_end = max(int(generator.end), 1)
gen_range = max(int((generator.end-generator.start) / process_concurrency), 1)
for _ in range(gen_start, gen_end, gen_range):
client = VBucketAwareMemcached(RestConnection(cluster.master),
bucket)
clients.append(client)
majority_value = (bucket.replicaNumber + 1)/2 + 1
_task = jython_tasks.Durability(
cluster, self.jython_task_manager, bucket, clients, generator,
op_type, exp, flag=flag, persist_to=persist_to,
replicate_to=replicate_to, only_store_hash=only_store_hash,
batch_size=batch_size, pause_secs=pause_secs,
timeout_secs=timeout_secs, compression=compression,
process_concurrency=process_concurrency, retries=retries,
durability=durability, majority_value=majority_value)
self.jython_task_manager.add_new_task(_task)
return _task
def async_load_bucket_for_dgm(self, cluster, bucket, generator, opt_type,
active_resident_threshold,
exp=0, flag=0, only_store_hash=True,
batch_size=1, pause_secs=1, timeout_secs=5,
compression=True, process_concurrency=4):
"""
Loads specified bucket with docs until specified DGM percentage is
achieved
Parameters:
cluster - Cluster object
bucket - Bucket object to which docs needs to be loaded
generator - Document generator object
opt_type - Operation type
active_resident_threshold - Percentage of DGM needs to be achieved
Returns:
_task - Async task created for DGM task
"""
self.log.debug("Loading doc into {0} until dgm is {1}%"
.format(bucket.name, active_resident_threshold))
client = VBucketAwareMemcached(RestConnection(cluster.master), bucket)
_task = jython_tasks.LoadDocumentsForDgmTask(
cluster, self.jython_task_manager, bucket, client, [generator],
opt_type, exp, flag=flag, only_store_hash=only_store_hash,
batch_size=batch_size, pause_secs=pause_secs,
timeout_secs=timeout_secs, compression=compression,
process_concurrency=process_concurrency,
active_resident_threshold=active_resident_threshold)
self.jython_task_manager.add_new_task(_task)
return _task
def load_bucket_into_dgm(self, cluster, bucket, key, num_items,
active_resident_threshold, load_batch_size=20000,
batch_size=10, process_concurrency=4,
persist_to=None, replicate_to=None):
rest = BucketHelper(cluster.master)
bucket_stat = rest.get_bucket_stats_for_node(bucket.name,
cluster.master)
while bucket_stat["vb_active_resident_items_ratio"] > \
active_resident_threshold:
gen_load = doc_generator(key, num_items,
num_items+load_batch_size,
doc_type="binary")
num_items += load_batch_size
task = self.async_load_gen_docs(
cluster, bucket, gen_load, "create", 0,
batch_size=batch_size, process_concurrency=process_concurrency,
persist_to=persist_to, replicate_to=replicate_to)
self.jython_task_manager.get_task_result(task)
bucket_stat = rest.get_bucket_stats_for_node(bucket.name,
cluster.master)
return num_items
def async_validate_docs(self, cluster, bucket, generator, opt_type, exp=0,
flag=0, only_store_hash=True, batch_size=1,
pause_secs=1, timeout_secs=5, compression=True,
process_concurrency=4):
self.log.debug("Validating documents")
client = VBucketAwareMemcached(RestConnection(cluster.master), bucket)
_task = jython_tasks.DocumentsValidatorTask(
cluster, self.jython_task_manager, bucket, client, [generator],
opt_type, exp, flag=flag, only_store_hash=only_store_hash,
batch_size=batch_size, pause_secs=pause_secs,
timeout_secs=timeout_secs, compression=compression,
process_concurrency=process_concurrency)
self.jython_task_manager.add_new_task(_task)
return _task
def async_rebalance(self, servers, to_add, to_remove, use_hostnames=False,
services=None, check_vbucket_shuffling=True):
"""
Asynchronously rebalances a cluster
Parameters:
servers - Servers participating in the rebalance ([TestServers])
to_add - Servers being added to the cluster ([TestServers])
to_remove - Servers being removed from the cluster ([TestServers])
use_hostnames - True if nodes should be added using hostnames (Bool)
Returns:
RebalanceTask - A task future that is a handle to the scheduled task
"""
_task = jython_tasks.RebalanceTask(
servers, to_add, to_remove, use_hostnames=use_hostnames,
services=services, check_vbucket_shuffling=check_vbucket_shuffling)
self.jython_task_manager.add_new_task(_task)
return _task
def async_wait_for_stats(self, shell_conn_list, bucket, stat_cmd, stat,
comparison, value, timeout=60):
"""
Asynchronously wait for stats
Waits for stats to match the criteria passed by the stats variable.
See couchbase.stats_tool.StatsCommon.build_stat_check(...) for a
description of the stats structure and how it can be built.
Parameters:
shell_conn_list - Objects of type 'RemoteMachineShellConnection'.
Uses this object to execute cbstats binary in the
cluster nodes
bucket - The name of the bucket (String)
stat_cmd - The stats name to fetch using cbstats. (String)
stat - The stat that we want to get the value from. (String)
comparison - How to compare the stat result to the value specified.
value - The value to compare to.
timeout - Timeout for stat verification task
Returns:
RebalanceTask - Task future that is a handle to the scheduled task
"""
self.log.debug("Starting StatsWaitTask for %s on bucket %s" % (stat, bucket.name))
_task = jython_tasks.StatsWaitTask(shell_conn_list, bucket, stat_cmd,
stat, comparison, value,
timeout=timeout)
self.jython_task_manager.add_new_task(_task)
return _task
def async_monitor_db_fragmentation(self, server, bucket, fragmentation,
get_view_frag=False):
"""
Asyncronously monitor db fragmentation
Parameters:
servers - server to check(TestInputServers)
bucket - bucket to check
fragmentation - fragmentation to reach
get_view_frag - Monitor view fragmentation.
In case enabled when <fragmentation_value> is
reached this method will return (boolean)
Returns:
MonitorDBFragmentationTask - A task future that is a handle to the
scheduled task
"""
_task = jython_tasks.MonitorDBFragmentationTask(server, fragmentation,
bucket, get_view_frag)
self.jython_task_manager.add_new_task(_task)
return _task
def create_default_bucket(self, bucket_params, timeout=600):
"""
Synchronously creates the default bucket
Parameters:
bucket_params - A dictionary containing a list of bucket
creation parameters (dict)
Returns:
boolean - Whether or not the bucket was created.
"""
_task = self.async_create_default_bucket(bucket_params)
return _task.get_result(timeout)
def create_sasl_bucket(self, name, password, bucket_params, timeout=None):
"""Synchronously creates a sasl bucket
Parameters:
bucket_params - A dictionary containing a list of bucket creation
parameters. (Dict)
Returns:
boolean - Whether or not the bucket was created."""
_task = self.async_create_sasl_bucket(name, password, bucket_params)
self.jython_task_manager.schedule(_task)
return _task.get_result(timeout)
def create_standard_bucket(self, name, port, bucket_params, timeout=None):
"""Synchronously creates a standard bucket
Parameters:
bucket_params - A dictionary containing a list of bucket creation
parameters. (Dict)
Returns:
boolean - Whether or not the bucket was created."""
_task = self.async_create_standard_bucket(name, port, bucket_params)
return _task.get_result(timeout)
def init_node(self, server, async_init_node=True,
disabled_consistent_view=None, services=None,
index_quota_percent=None):
"""Synchronously initializes a node
The task scheduled will initialize a nodes username and password and
will establish the nodes memory quota to be 2/3 of the available
system memory.
Parameters:
server - The server to initialize. (TestInputServer)
index_quota_percent - index quota percentage
disabled_consistent_view - disable consistent view
Returns:
boolean - Whether or not the node was properly initialized."""
_task = self.async_init_node(
server, async_init_node, disabled_consistent_view,
services=services, index_quota_percent=index_quota_percent)
return _task.result()
def rebalance(self, servers, to_add, to_remove, timeout=None,
use_hostnames=False, services=None):
"""
Synchronously rebalances a cluster
Parameters:
servers - Servers participating in the rebalance ([TestServers])
to_add - Servers being added to the cluster ([TestServers])
to_remove - Servers being removed from the cluster ([TestServers])
use_hostnames - True if nodes should be added using hostnames (Bool)
services - Services definition per Node, default is None
(since Sherlock release)
Returns:
boolean - Whether or not the rebalance was successful
"""
_task = self.async_rebalance(servers, to_add, to_remove, use_hostnames,
services=services)
result = self.jython_task_manager.get_task_result(_task)
return result
def load_gen_docs(self, cluster, bucket, generator, op_type, exp=0, flag=0,
persist_to=0, replicate_to=0, only_store_hash=True,
batch_size=1, compression=True, process_concurrency=8,
retries=5):
_task = self.async_load_gen_docs(
cluster, bucket, generator, op_type, exp, flag,
persist_to=persist_to, replicate_to=replicate_to,
only_store_hash=only_store_hash, batch_size=batch_size,
compression=compression, process_concurrency=process_concurrency,
retries=retries)
return self.jython_task_manager.get_task_result(_task)
def verify_data(self, server, bucket, kv_store, timeout=None,
compression=True):
_task = self.async_verify_data(server, bucket, kv_store,
compression=compression)
return _task.result(timeout)
def async_verify_data(self, server, bucket, kv_store, max_verify=None,
only_store_hash=True, batch_size=1,
replica_to_read=None, timeout_sec=5,
compression=True):
if batch_size > 1:
_task = conc.BatchedValidateDataTask(
server, bucket, kv_store, max_verify, only_store_hash,
batch_size, timeout_sec, self.jython_task_manager,
compression=compression)
else:
_task = conc.ValidateDataTask(
server, bucket, kv_store, max_verify, only_store_hash,
replica_to_read, self.jython_task_manager, compression=compression)
self.jython_task_manager.schedule(_task)
return _task
def wait_for_stats(self, cluster, bucket, param, stat, comparison, value,
timeout=None):
"""Synchronously wait for stats
Waits for stats to match the criteria passed by the stats variable. See
couchbase.stats_tool.StatsCommon.build_stat_check(...) for a
description of the stats structure and how it can be built.
Parameters:
servers - The servers to get stats from. Specifying multiple
servers will cause the result from each server to be
added together before comparing. ([TestInputServer])
bucket - The name of the bucket (String)
param - The stats parameter to use. (String)
stat - The stat that we want to get the value from. (String)
comparison - How to compare the stat result to the value specified.
value - The value to compare to.
Returns:
boolean - Whether or not the correct stats state was seen"""
_task = self.async_wait_for_stats(cluster, bucket, param, stat,
comparison, value)
return self.jython_task_manager.get_task_result(_task)
def shutdown(self, force=False):
self.jython_task_manager.shutdown(force)
if force:
self.log.warning("Cluster instance shutdown with force")
def async_n1ql_query_verification(self, server, bucket, query,
n1ql_helper=None,
expected_result=None,
is_explain_query=False,
index_name=None, verify_results=True,
retry_time=2, scan_consistency=None,
scan_vector=None):
"""Asynchronously runs n1ql querya and verifies result if required
Parameters:
server - Server to handle query verification task (TestInputServer)
query - Query params being used with the query. (dict)
expected_result - expected result after querying
is_explain_query - is query explain query
index_name - index related to query
bucket - Name of the bucket containing items for this view (String)
verify_results - Verify results after query runs successfully
retry_time - Seconds to wait before retrying failed queries (int)
n1ql_helper - n1ql helper object
scan_consistency - consistency value for querying
scan_vector - scan vector used for consistency
Returns:
N1QLQueryTask - A task future that is a handle to the scheduled task
"""
_task = jython_tasks.N1QLQueryTask(
n1ql_helper=n1ql_helper, server=server, bucket=bucket,
query=query, expected_result=expected_result,
verify_results=verify_results, is_explain_query=is_explain_query,
index_name=index_name, retry_time=retry_time,
scan_consistency=scan_consistency, scan_vector=scan_vector)
self.jython_task_manager.add_new_task(_task)
return _task
def n1ql_query_verification(self, server, bucket, query, n1ql_helper=None,
expected_result=None, is_explain_query=False,
index_name=None, verify_results=True,
scan_consistency=None, scan_vector=None,
retry_time=2, timeout=60):
"""
Synchronously runs n1ql querya and verifies result if required
Parameters:
server - Server to handle query verification task (TestInputServer)
query - Query params being used with the query. (dict)
expected_result - expected result after querying
is_explain_query - is query explain query
index_name - index related to query
bucket - Name of the bucket containing items for this view (String)
verify_results - Verify results after query runs successfully
retry_time - Seconds to wait before retrying failed queries (int)
n1ql_helper - n1ql helper object
scan_consistency - consistency used during querying
scan_vector - vector used during querying
timeout - timeout for task
Returns:
N1QLQueryTask - A task future that is a handle to the scheduled task
"""
_task = self.async_n1ql_query_verification(
n1ql_helper=n1ql_helper, server=server, bucket=bucket, query=query,
expected_result=expected_result, is_explain_query=is_explain_query,
index_name=index_name, verify_results=verify_results,
retry_time=retry_time, scan_consistency=scan_consistency,
scan_vector=scan_vector)
return self.jython_task_manager.get_task_result(_task)
def async_create_index(self, server, bucket, query, n1ql_helper=None,
index_name=None, defer_build=False, retry_time=2,
timeout=240):
"""
Asynchronously runs create index task
Parameters:
server - Server to handle query verification task (TestInputServer)
query - Query params being used with the query.
bucket - Name of the bucket containing items for this view (String)
index_name - Name of the index to be created
defer_build - build is defered
retry_time - Seconds to wait before retrying failed queries (int)
n1ql_helper - n1ql helper object
timeout - timeout for index to come online
Returns:
CreateIndexTask - A task future that is a handle for scheduled task
"""
_task = jython_tasks.CreateIndexTask(
n1ql_helper=n1ql_helper, server=server, bucket=bucket,
defer_build=defer_build, index_name=index_name, query=query,
retry_time=retry_time, timeout=timeout)
self.jython_task_manager.add_new_task(_task)
return _task
def async_monitor_index(self, server, bucket, n1ql_helper=None,
index_name=None, retry_time=2, timeout=240):
"""
Asynchronously runs create index task
Parameters:
server - Server to handle query verification task (TestInputServer)
query - Query params being used with the query.
bucket - Name of the bucket containing items for this view (String)
index_name - Name of the index to be created
retry_time - Seconds to wait before retrying failed queries (int)
timeout - timeout for index to come online
n1ql_helper - n1ql helper object
Returns:
MonitorIndexTask - A task future that is a handle for scheduled task
"""
_task = jython_tasks.MonitorIndexTask(
n1ql_helper=n1ql_helper, server=server, bucket=bucket,
index_name=index_name, retry_time=retry_time, timeout=timeout)
self.jython_task_manager.add_new_task(_task)
return _task
def async_build_index(self, server, bucket, query, n1ql_helper=None,
retry_time=2):
"""
Asynchronously runs create index task
Parameters:
server - Server to handle query verification task (TestInputServer)
query - Query params being used with the query.
bucket - Name of the bucket containing items for this view (String)
retry_time - Seconds to wait before retrying failed queries (int)
n1ql_helper - n1ql helper object
Returns:
BuildIndexTask - A task future that is a handle to the scheduled task
"""
_task = jython_tasks.BuildIndexTask(
n1ql_helper=n1ql_helper, server=server, bucket=bucket, query=query,
retry_time=retry_time)
self.jython_task_manager.add_new_task(_task)
return _task
def create_index(self, server, bucket, query, n1ql_helper=None,
index_name=None, defer_build=False, retry_time=2,
timeout=60):
"""
Asynchronously runs drop index task
Parameters:
server - Server to handle query verification task. (TestInputServer)
query - Query params being used with the query.
bucket - Name of the bucket containing items for this view (String)
index_name - Name of the index to be created
retry_time - Seconds to wait before retrying failed queries (int)
n1ql_helper - n1ql helper object
defer_build - defer the build
timeout - timeout for the task
Returns:
N1QLQueryTask - A task future that is a handle to the scheduled task
"""
_task = self.async_create_index(
n1ql_helper=n1ql_helper, server=server, bucket=bucket, query=query,
index_name=index_name, defer_build=defer_build,
retry_time=retry_time)
return self.jython_task_manager.get_task_result(_task)
def async_drop_index(self, server=None, bucket="default", query=None,
n1ql_helper=None, index_name=None, retry_time=2):
"""
Synchronously runs drop index task
Parameters:
server - Server to handle query verification task (TestInputServer)
query - Query params being used with the query.
bucket - Name of the bucket containing items for this view (String)
index_name - Name of the index to be dropped
retry_time - Seconds to wait before retrying failed queries (int)
n1ql_helper - n1ql helper object
Returns:
DropIndexTask - A task future that is a handle to the scheduled task
"""
_task = jython_tasks.DropIndexTask(
n1ql_helper=n1ql_helper, server=server, bucket=bucket, query=query,
index_name=index_name, retry_time=retry_time)
self.jython_task_manager.add_new_task(_task)
return _task
def drop_index(self, server, bucket, query, n1ql_helper=None,
index_name=None, retry_time=2, timeout=60):
"""
Synchronously runs drop index task
Parameters:
server - Server to handle query verification task (TestInputServer)
query - Query params being used with the query. (dict)
bucket - Name of the bucket containing items for this view. (String)
index_name - Name of the index to be created
retry_time - Seconds to wait before retrying failed queries (int)
n1ql_helper - n1ql helper object
timeout - timeout for the task
Returns:
N1QLQueryTask - A task future that is a handle to the scheduled task
"""
_task = self.async_drop_index(
n1ql_helper=n1ql_helper, server=server, bucket=bucket, query=query,
index_name=index_name, retry_time=retry_time)
return self.jython_task_manager.get_task_result(_task)
def failover(self, servers=[], failover_nodes=[], graceful=False,
use_hostnames=False, timeout=None):
"""Synchronously flushes a bucket
Parameters:
servers - node used for connection (TestInputServer)
failover_nodes - Servers to be failed over (TestInputServer)
bucket - The name of the bucket to be flushed. (String)
Returns:
boolean - Whether or not the bucket was flushed."""
_task = self.async_failover(servers, failover_nodes, graceful,
use_hostnames)
return _task.result(timeout)
def async_bucket_flush(self, server, bucket='default'):
"""
Asynchronously flushes a bucket
Parameters:
server - The server to flush the bucket on. (TestInputServer)
bucket - The name of the bucket to be flushed. (String)
Returns:
BucketFlushTask - A task future that is a handle for scheduled task
"""
_task = conc.BucketFlushTask(server, self.jython_task_manager, bucket)
self.jython_task_manager.schedule(_task)
return _task
def bucket_flush(self, server, bucket='default', timeout=None):
"""Synchronously flushes a bucket
Parameters:
server - The server to flush the bucket on. (TestInputServer)
bucket - The name of the bucket to be flushed. (String)
Returns:
boolean - Whether or not the bucket was flushed."""
_task = self.async_bucket_flush(server, bucket)
return _task.get_result(timeout)
def async_compact_bucket(self, server, bucket="default"):
"""Asynchronously starts bucket compaction
Parameters:
server - source couchbase server
bucket - bucket to compact
Returns:
boolean - Whether or not the compaction started successfully"""
_task = conc.CompactBucketTask(server, self.jython_task_manager, bucket)
self.jython_task_manager.schedule(_task)
return _task
def compact_bucket(self, server, bucket="default"):
"""Synchronously runs bucket compaction and monitors progress
Parameters:
server - source couchbase server
bucket - bucket to compact
Returns:
boolean - Whether or not the cbrecovery completed successfully"""
_task = self.async_compact_bucket(server, bucket)
status = _task.get_result()
return status
def async_cbas_query_execute(self, master, cbas_server, cbas_endpoint,
statement, bucket='default', mode=None,
pretty=True):
"""
Asynchronously execute a CBAS query
:param master: Master server
:param cbas_server: CBAS server
:param cbas_endpoint: CBAS Endpoint URL (/analytics/service)
:param statement: Query to be executed
:param bucket: bucket to connect
:param mode: Query Execution mode
:param pretty: Pretty formatting
:return: task with the output or error message
"""
_task = conc.CBASQueryExecuteTask(
master, cbas_server, self.jython_task_manager, cbas_endpoint, statement,
bucket, mode, pretty)
self.jython_task_manager.schedule(_task)
return _task
| null |
lib/couchbase_helper/cluster.py
|
cluster.py
|
py
| 37,834 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tasks.tasks.BucketCreateTask",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "tasks.tasks",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "tasks.tasks.BucketCreateTask",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "tasks.tasks",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "tasks.tasks.FailoverTask",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "tasks.tasks",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "tasks.tasks.NodeInitializeTask",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "tasks.tasks",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "sdk_client3.SDKClient",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "membase.api.rest_client.RestConnection",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task.LoadDocumentsGeneratorsTask",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "Jython_tasks.task.LoadDocumentsForDgmTask",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "sdk_client3.SDKClient",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "membase.api.rest_client.RestConnection",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task.ContinuousDocUpdateTask",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "sdk_client3.SDKClient",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "membase.api.rest_client.RestConnection",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task.Atomicity",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "sdk_client3.SDKClient",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "membase.api.rest_client.RestConnection",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task.Durability",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "sdk_client3.SDKClient",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "membase.api.rest_client.RestConnection",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task.LoadDocumentsForDgmTask",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "BucketLib.BucketOperations.BucketHelper",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "couchbase_helper.documentgenerator.doc_generator",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "sdk_client3.SDKClient",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "membase.api.rest_client.RestConnection",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task.DocumentsValidatorTask",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "Jython_tasks.task.RebalanceTask",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "Jython_tasks.task.StatsWaitTask",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task",
"line_number": 340,
"usage_type": "name"
},
{
"api_name": "Jython_tasks.task.MonitorDBFragmentationTask",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task",
"line_number": 361,
"usage_type": "name"
},
{
"api_name": "tasks.tasks.BatchedValidateDataTask",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "tasks.tasks",
"line_number": 468,
"usage_type": "name"
},
{
"api_name": "tasks.tasks.ValidateDataTask",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "tasks.tasks",
"line_number": 473,
"usage_type": "name"
},
{
"api_name": "Jython_tasks.task.N1QLQueryTask",
"line_number": 532,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task",
"line_number": 532,
"usage_type": "name"
},
{
"api_name": "Jython_tasks.task.CreateIndexTask",
"line_number": 591,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task",
"line_number": 591,
"usage_type": "name"
},
{
"api_name": "Jython_tasks.task.MonitorIndexTask",
"line_number": 614,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task",
"line_number": 614,
"usage_type": "name"
},
{
"api_name": "Jython_tasks.task.BuildIndexTask",
"line_number": 634,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task",
"line_number": 634,
"usage_type": "name"
},
{
"api_name": "Jython_tasks.task.DropIndexTask",
"line_number": 679,
"usage_type": "call"
},
{
"api_name": "Jython_tasks.task",
"line_number": 679,
"usage_type": "name"
},
{
"api_name": "tasks.tasks.BucketFlushTask",
"line_number": 732,
"usage_type": "call"
},
{
"api_name": "tasks.tasks",
"line_number": 732,
"usage_type": "name"
},
{
"api_name": "tasks.tasks.CompactBucketTask",
"line_number": 757,
"usage_type": "call"
},
{
"api_name": "tasks.tasks",
"line_number": 757,
"usage_type": "name"
},
{
"api_name": "tasks.tasks.CBASQueryExecuteTask",
"line_number": 788,
"usage_type": "call"
},
{
"api_name": "tasks.tasks",
"line_number": 788,
"usage_type": "name"
}
] |
353756888
|
import discord, asyncio, random, os
client = discord.Client()
async def action(message):
reactList = ["IL EST MALADE", "Mais qu'est ce qu'il est en train de faire", \
"Mais je pensais vraiment pas qu'il allait faire ça"]
introList = ["Alerte, si vous voyez des chips vertes dans votre paquet"\
" ne les mangez surtout pas !", "Hé ouais, il ne fallait pas"\
" faire l'imbécile dans le tobogland", "J'ai hacké la machine"\
" à pièces, je suis riche", "Le chien de cette fille est décédé"\
", laissez un gros pouce bleu pour la soutenir"]
if message.author == client.user:
pass
elif message.content.lower() == "€forlan help" or message.content.lower() == "€forlan":
await client.delete_message(message)
await client.send_message(message.channel, "Aide : https://github.com/ioanbht/forlanDiscordBot/blob/master/README.md")
elif message.content.lower() == "€forlan fortnite":
await client.delete_message(message)
await client.send_message(message.channel, "Bonjour à tous les amis, c'est Fortnite, j'espère que vous allez bien, moi en tout cas ça va super !")
elif message.content.lower() == "€forlan card":
await client.delete_message(message)
await client.send_message(message.channel, "Une carte Google Play de 50€ est cachée dans cette vidéo soyez le premier à la retrouver pour gagner cet argent !")
elif "12/04/2018" in message.content.lower():
await client.send_message(message.channel, "Ma chaîne a été cloturée par YouTube en cette date, laissez un gros pouce bleu pour me soutenir")
elif message.content.lower() == "€forlan giveway":
await client.delete_message(message)
await client.send_message(message.channel, "Et sachez que je pretends encore vous faire gagner un iPhone X d'une valeur de 1000€, pour participer c'est vraiment très simple il suffit de marquer \"1000subsNoBrain est le meilleur youtuber\" en commentaires")
elif message.content.lower() == "€forlan react":
await client.delete_message(message)
await client.send_message(message.channel, random.choice(reactList))
elif message.content.lower() == "1000subsnobrain est le meilleur youtuber":
await client.send_message(message.channel, "1000subsNoBrain vous dit merci, (vidéo de remerciement : https://www.youtube.com/watch?v=lHS3coval5g) \n à par contre, vu que je suis le pire YouTuber francophone, je ne vous donnerais jamais l'iPhone X,\n merci pour vos dons au passage")
elif message.content.lower() == "€forlan intro":
await client.delete_message(message)
await client.send_message(message.channel, random.choice(introList))
elif message.content.lower().split()[0] == "€forlan":
await client.delete_message(message)
await client.send_message(message.channel, "Commande incorrecte")
@client.event
async def on_ready():
await client.change_presence(game=discord.Game(name="arnaquer des gens"))
@client.event
async def on_message(message):
await action(message)
@client.event
async def on_message_edit(b, after):
await action(after)
client.run(os.environ['BOT_TOKEN'])
| null |
forlanBot.py
|
forlanBot.py
|
py
| 3,286 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "discord.Client",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "discord.Game",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 55,
"usage_type": "attribute"
}
] |
330565394
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
import numpy as np
import glob
import cv2
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def get_images(list_images):
# We read the images
array_imgs = []
for name in list_images:
img = cv2.imread(name)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
array_imgs.append(img)
return array_imgs
def filter_image(image):
# RGB model change to HSV
image_HSV = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
# Minimum and maximum values of the red
value_min_HSV = np.array([0, 235, 60])
value_max_HSV = np.array([180, 255, 255])
# Filtering images
image_HSV_filtered = cv2.inRange(image_HSV, value_min_HSV, value_max_HSV)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (31, 31))
image_HSV_filtered = cv2.morphologyEx(image_HSV_filtered, cv2.MORPH_CLOSE, kernel)
return image_HSV_filtered
def calculate_postion_vectors(img):
# We look for the position on the x axis of the pixels that have value 1 in different positions and
position_x_down = np.where(img[350, :])
position_x_above = np.where(img[260, :])
return position_x_down, position_x_above
def calculate_centroid(positionx):
if (len(positionx[0]) > 1):
x_middle = (positionx[0][0] + positionx[0][len(positionx[0]) - 1]) / 2
not_found = False
else:
x_middle = None
not_found = True
return x_middle, not_found
def draw_centroids(array_images, marker, ax1, ax2, ax3):
for i in range(0, len(array_images)):
img = filter_image(array_images[i])
# We calculate vectors
position_x_down, position_x_above = calculate_postion_vectors(img)
# We see that white pixels have been located and we look if the center is located
x_middle_down, not_found_down = calculate_centroid(position_x_down)
x_middle_above, not_found_above = calculate_centroid(position_x_above)
print(x_middle_down, not_found_down, x_middle_above, not_found_above)
if not_found_down:
ax3.plot([0.5], [x_middle_above], marker)
elif not_found_above:
ax1.plot([x_middle_down], [0.5], marker)
else:
ax2.plot([x_middle_down], [x_middle_above], marker)
return ax1, ax2, ax3
if __name__ == "__main__":
# Load data
list_images_dataset = glob.glob('Dataset/Train/Images/' + '*')
images_dataset = sorted(list_images_dataset, key=lambda x: int(x.split('/')[3].split('.png')[0]))
list_images_driving = glob.glob('Failed_driving/Images/' + '*')
images_driving = sorted(list_images_driving, key=lambda x: int(x.split('/')[2].split('.png')[0]))
# We preprocess images
array_images_dataset = get_images(images_dataset)
array_images_driving = get_images(images_driving)
# We create the figure and subplots
fig = plt.figure()
plt.suptitle('Datatset against Driving')
gs = gridspec.GridSpec(2, 2, width_ratios=[4, 1], height_ratios=[1, 4])
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[2])
ax3 = plt.subplot(gs[3])
ax4 = plt.subplot(gs[1])
ax1.set_title('Nan values of L1')
ax2.set_title('Represent pairs of L1-L2')
ax3.set_title('Nan values of L2')
ax4.set_title('Legend')
ax1, ax2, ax3 = draw_centroids(array_images_dataset, 'ro', ax1, ax2, ax3)
ax1, ax2, ax3 = draw_centroids(array_images_driving, 'bx', ax1, ax2, ax3)
ax1.axis([0, 640, 0, 1])
ax2.axis([0, 640, 0, 640])
ax2.set_xlabel('L2 (Row 350)')
ax2.set_ylabel('L1 (Row 260)')
ax3.axis([0, 1, 0, 640])
ax4.axis([0, 1, 0, 1])
ax4.plot([-1], [-1], 'ro', label='Dataset')
ax4.plot([-1], [-1], 'bx', label='Driving')
plt.legend()
plt.show()
| null |
Follow Line/analysis_vectors.py
|
analysis_vectors.py
|
py
| 3,768 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.imread",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2HSV",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.inRange",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.getStructuringElement",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_ELLIPSE",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "cv2.morphologyEx",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_CLOSE",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "numpy.where",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.suptitle",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "matplotlib.gridspec.GridSpec",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.gridspec",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 115,
"usage_type": "name"
}
] |
344020708
|
from pygame import Surface, SRCALPHA
from pygame import Rect
from pygame.image import load as load_img
from pygame.transform import flip
import pygame
from .. import Colors
from .. import screen_size, gravity
from .position import Position
walking_sprites = (
20, 114,
48, 64,
)
class Hero(object):
size = (50, 64)
image_path = 'Witcher 2D_r/game/res/hero_spritesheet.png'
frames = {
'stay': 4,
'walk': 1,
'jump': 4,
}
animation_speed = 0.25
speedx = 200
speedy = 0
on_walk = False
anim_jump = False
on_ground = False
flipx = False
pos = Position()
def __init__(self, start_pos=Position(x=100, y=100)):
self.surface = Surface(self.size, SRCALPHA)
self.spritesheet = load_img(self.image_path)
self.surface.fill((0, 0, 0, 0))
self.pos = start_pos
self.rect = Rect(
self.pos.x, self.pos.y,
self.size[0], self.size[1]
)
self.current_frame = 0
self.last_frame_time = 0
def update_anim(self, time):
self.last_frame_time += time
if self.anim_jump:
row = 64
frames = self.frames['jump']
elif self.on_walk:
row = 0
frames = self.frames['walk']
else:
row = 0
frames = self.frames['stay']
while self.last_frame_time > self.animation_speed:
self.current_frame += 1
self.last_frame_time = self.last_frame_time - self.animation_speed
if not self.anim_jump:
self.current_frame = self.current_frame % frames
else:
self.current_frame = min(self.current_frame, frames)
self.surface.fill((0, 0, 0, 0))
self.surface.blit(
self.spritesheet,
(0, 0),
(
42*self.current_frame,
row,
43, 64
)
)
self.surface = flip(self.surface, self.flipx, False)
def update_pos(self, keys, platforms,td):
self.on_walk = False
self.speedy += gravity
if keys[pygame.K_SPACE] and self.on_gorund:
self.speedy = -0.2
self.current_frame = 0
self.anim_jump = True
if keys[pygame.K_a]:
self.pos.x -= self.speedx * td
self.flipx = True
self.on_walk = True
if keys[pygame.K_d]:
self.pos.x += self.speedx * td
self.flipx = False
self.on_walk = True
self.pos.y += self.speedy * td
self.pos.y += self.speedy
self.on_ground = False
if self.pos.x < 0:
self.pos.x = 0
if self.pos.y < 0:
self.pos.y = 0
if self.pos.x > screen_size[0] - self.rect.w:
self.pos.x = screen_size[0] - self.rect.w
if self.pos.y > screen_size[1] - self.rect.h:
self.pos.y = screen_size[1] - self.rect.h
self.speedy = 0
self.on_gorund = True
self.anim_jump = False
self.on_gorund = False
self.rect.x = self.pos.x
for item in platforms:
if self.rect.colliderect(item.rect):
if (keys[pygame.K_d]):
self.rect.x = item.rect.x - self.rect.w
self.pos.x = self.rect.x
if (keys[pygame.K_a]):
self.rect.x = item.rect.x + item.rect.w
self.pos.x = self.rect.x
self.rect.y = self.pos.y
for item in platforms:
if self.rect.colliderect(item.rect):
if (self.speedy > 0):
self.rect.y = item.rect.y - self.rect.h
self.speedy = 0
self.on_gorund = True
self.anim_jump = False
self.pos.y = self.rect.y
if (self.speedy < 0):
self.rect.y = item.rect.y + item.rect.h
self.speedy = 0
self.pos.y = self.rect.y
def put_on_screen(self, screen):
screen.blit(self.surface, self.rect)
| null |
Witcher 2D/Witcher 2D_r/game/objects/hero.py
|
hero.py
|
py
| 4,166 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "position.Position",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "position.Position",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pygame.SRCALPHA",
"line_number": 33,
"usage_type": "argument"
},
{
"api_name": "pygame.image.load",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pygame.transform.flip",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pygame.K_SPACE",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_a",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_d",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_d",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_a",
"line_number": 115,
"usage_type": "attribute"
}
] |
201387667
|
# coding: utf-8
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
data = pd.read_csv('Li2CO3.dos1ev', delim_whitespace=True, skiprows=3, names = ['E','tot','Li','C','O1','O2'])
data.describe()
data['O1'] = data['O1']+data['O2']
for i in np.arange(4):
plt.plot(data['E'],data[data.keys()[i+1]])
plt.axvline(x=0) #Fermi energy
plt.title("DOS of Li2CO3")
plt.xlim([-5,15])
plt.xlabel("Energy (eV)")
plt.ylabel("DOS (States/eV cell")
plt.show()
| null |
Images/DOS_plots/LCO_plot.py
|
LCO_plot.py
|
py
| 469 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axvline",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
}
] |
299133036
|
# coding: utf-8
from datetime import datetime
from random import choice
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from core.models import Post, Photo
import logging
AUTHORS = ['LAhmatyi', 'tinki', 'skyslayer', 'akafist', 'prophoter']
log = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
count = 0
for item in Post.all.filter(status='deferred', date_created__lt=datetime.now()):
log.info('Deferred publication for post %s (%s)', item.pk, item.get_absolute_url())
if item.author.username == 'LAhmatyi':
author = User.objects.get(username=choice(AUTHORS))
item.author = author
item.abstract = 'LAhmatyi'
log.info('Author changed to %s for post %s (%s)', author.username, item.pk, item.get_absolute_url())
for p in Photo.objects.filter(post=item):
p.author = author
p.save()
item.status = 'pub'
item.save()
count += 1
log.info('publish deferred: %s processed', count)
| null |
src/core/management/commands/publish_deferred.py
|
publish_deferred.py
|
py
| 1,183 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "core.models.Post.all.filter",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "core.models.Post.all",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "core.models.Post",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.User.objects.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "core.models.Photo.objects.filter",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "core.models.Photo.objects",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "core.models.Photo",
"line_number": 29,
"usage_type": "name"
}
] |
577424660
|
#!/usr/bin/env python3
import argparse
import requests
import sys
from .cli import CLI
from .library import ShibbolethError
def run():
"""Authenticate via U-M Shibboleth from the command line."""
# Argument parsing
parser = argparse.ArgumentParser(
description="Authenticate to U-M Shibboleth from the command line."
)
parser.add_argument(
"cookie_file",
default=".cookies.tmp",
nargs="?",
help="a Netscape-style cookie file (e.g. one generated by cURL)"
)
args = parser.parse_args()
cookie_file = args.cookie_file
# Perform authentication
try:
cli = CLI(cookie_file)
request = requests.Request("GET", "https://weblogin.umich.edu/")
result = cli.perform(request)
return 0
except requests.exceptions.ConnectionError as e:
print("Error connecting to Shibboleth server(s):", file=sys.stderr)
return 1
except requests.exceptions.Timeout:
print("A request timed out.", file=sys.stderr)
return 2
except requests.exceptions.TooManyRedirects:
print("Too many redirects.", file=sys.stderr)
return 3
except ShibbolethError as e:
print(e, file=sys.stderr)
return 4
except KeyboardInterrupt:
return 130
if __name__ == "__main__":
sys.exit(run())
| null |
src/__main__.py
|
__main__.py
|
py
| 1,347 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cli.CLI",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "requests.Request",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cli.perform",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "requests.exceptions",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "requests.exceptions",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "library.ShibbolethError",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "sys.stderr",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 47,
"usage_type": "call"
}
] |
101955264
|
#-*- coding: utf-8 -*-
import numpy as np
import itertools
from fix_axis import fix_axis
from tomo_seq import tomo_seq_all_axis
from GP_data_processor import GP_data_processor
from numpy import random as nprd
from numpy import array as npar
from data_manager import data_manager
from scipy.misc import derivative
class expression_simulator:
def point_mat_exp(point_mat, test_func):
if point_mat.shape[0] > 0:
exp_list = np.apply_along_axis(test_func, 1, point_mat)
else:
exp_list = np.array([0])
return(np.sum(exp_list))
def slice_exp(point_mat, angle, divnum, test_func):
slice_list = fix_axis.get_slice_list(point_mat, angle, divnum)
expression_array \
= np.array([exp_simulator.point_mat_exp
(slice_mat, test_func)
for slice_mat in slice_list])
return(expression_array, slice_list)
def get_exp_idx_mat(point_mat, idx_mat, test_func):
exp_vec = np.array([
exp_simulator.point_mat_exp(
point_mat[idx_mat[row_num]], test_func)
for row_num in np.arange(idx_mat.shape[0])])
return(exp_vec)
def register_points(self, point_mat):
self.point_mat = point_mat
def register_function(self, func):
self.func = func
def __init__(self, point_mat, func, axis):
divnum = np.arange(-1200, 1200, 19)
gene_id = "sim"
self.slice_idx_mat = fix_axis.get_slice_idx_mat_axis(point_mat,axis, divnum)
cell_hist = fix_axis.z_divide_count_axis(
point_mat, axis, divnum)
self.nonzero_idx = np.nonzero(cell_hist)[0]
exp_vec = exp_simulator.get_exp_idx_mat(
point_mat, self.slice_idx_mat, func)
self.gene_dict = {gene_id: exp_vec}
self.cell_num = point_mat.shape[0]
def get_reg_exp(self, gene_id):
exp_vec = self.gene_dict[gene_id]
return(exp_vec[self.nonzero_idx])
def get_slice_idx_mat(self):
return(self.slice_idx_mat[self.nonzero_idx, :])
def tomoseq_all_sim(point_mat, func):
axis_list = ["x", "y", "z"]
ts_all = tomo_seq_all_axis(point_mat)
for axis in axis_list:
ts_sim = expression_simulator(point_mat, func, axis)
ts_all.ts_dict[axis] = ts_sim
return(ts_all)
def sim_func(x, x0, amplitude, width):
rsq = np.linalg.norm(x - x0)**2
val = amplitude * np.exp(-rsq/(2*width**2))
return(val)
def gen_sim_func(x0, amplitude, width):
return(lambda x: sim_func(x, x0, amplitude, width))
def t_sim_func(t, t0, t_sigmoid_gain, amplitude, sign, max_t):
modified_t = (t - t0)*sign
val = (1/(1+np.exp(-t_sigmoid_gain*(modified_t))))*amplitude
return(val)
def gen_t_sim_func(t0, t_sigmoid_gain, amplitude, negative_t, max_t):
return(lambda t: t_sim_func(
t, t0, t_sigmoid_gain, amplitude, negative_t, max_t))
class simulated_data_manager(data_manager):
def add_time_points(t_vec, new_t_vec):
"""
Add time points to original time points
"""
new_point_idx = np.logical_not(np.isin(new_t_vec, t_vec))
added_t_vec = np.append(
t_vec,
new_t_vec[new_point_idx])
return(added_t_vec)
def gen_func_list(gene_num, pmat, amplitude, width):
"""
Gnerate function list.
Each function correspond to each gene expression
"""
x0_idx_list = nprd.randint(pmat.shape[0], size=gene_num)
func_list = [gen_sim_func(pmat[x0_idx], amplitude, width)
for x0_idx in x0_idx_list]
return(func_list)
def gen_t_func_list(gene_num, t_vec, t_sigmoid_gain, amplitude=1):
"""
Gnerate time function list.
Each function correspond to time coefficient each gene
"""
min_t = np.min(t_vec)
max_t = np.max(t_vec)
dbl_min_t = min_t - (max_t - min_t)
dbl_max_t = max_t + (max_t - min_t)
t0_list = nprd.uniform(dbl_min_t, dbl_max_t, gene_num)
sign_list = nprd.choice([-1, 1], size=gene_num)
func_list = [gen_t_sim_func(t0, t_sigmoid_gain, amplitude, sign, max_t)
for t0, sign in
zip(t0_list, sign_list)]
return(func_list)
def gen_base_exp(gene_num, pmat):
"""
Gnerate function list.
Each function correspond to each gene expression
"""
func_list = simulated_data_manager.gen_func_list(
gene_num, pmat)
exp_mat = np.array(
[[func(pmat[i, :])
for func in func_list]
for i in range(pmat.shape[0])])
return(exp_mat)
def gen_exp_mat(func_list, pmat):
"""
Gnerate function list.
Each function correspond to each gene expression
"""
exp_mat = np.array(
[[func(pmat[i, :])
for func in func_list]
for i in range(pmat.shape[0])])
return(exp_mat)
def gen_time_course_exp_dict(func_list, time_func_list, ct, t_vec):
"""
Gnerate expression adn its time derivative dictionary
key is observed time points
"""
exp_mat_dict = {}
exp_dt_mat_dict = {}
base_t = np.min(t_vec)
for t in t_vec:
(pmat_base, pmat) = ct.get_pmat_pmat(base_t, t)
exp_mat = simulated_data_manager.gen_exp_mat(func_list, pmat)
time_coff_vec = npar([time_func(t)
for time_func in time_func_list])
time_coff_vec_t = time_coff_vec.reshape(1, len(func_list))
dtime_coff_vec = npar([derivative(time_func, t, dx=1.0e-6)
for time_func in time_func_list])
dtime_coff_vec_t = dtime_coff_vec.reshape(1, len(func_list))
exp_mat_dict[t] = exp_mat * time_coff_vec_t
exp_dt_mat_dict[t] = exp_mat * dtime_coff_vec_t
return(exp_mat_dict, exp_dt_mat_dict)
def gen_exp_mat_dict(
gene_num, ct, t_vec, amplitude, width, t_sigmoid_gain):
"""
Simulate expression in all time points
"""
base_t = np.min(t_vec)
base_pmat = ct.get_pmat(base_t)
func_list = simulated_data_manager.gen_func_list(
gene_num, base_pmat, amplitude, width)
t_func_list = simulated_data_manager.gen_t_func_list(
gene_num, t_vec, t_sigmoid_gain)
exp_dict, exp_dt_dict \
= simulated_data_manager.gen_time_course_exp_dict(
func_list, t_func_list, ct, t_vec)
return(exp_dict, exp_dt_dict)
def sample_ts_exp(true_exp_dict, A, t_vec):
"""
Sample tomo seq expression from true trend
"""
true_exp_mat = np.concatenate(
[true_exp_dict[t] for t in t_vec],
axis=0)
ts_exp_mat = A @ true_exp_mat
sampled_ts_exp_mat = nprd.poisson(ts_exp_mat)
return(sampled_ts_exp_mat)
def sample_sc_exp(true_exp_mat_dict, sc_num, t_vec):
"""
Sample single cell seq expression from true trend
"""
sc_dict = {}
sc_idx_dict = {}
for t in t_vec:
sampled_idx = nprd.randint(
true_exp_mat_dict[t].shape[0], size=sc_num)
partial_true_exp_mat = true_exp_mat_dict[t][sampled_idx, :]
sc_dict[t] = np.transpose(nprd.poisson(partial_true_exp_mat))
sc_idx_dict[t] = sampled_idx
return(sc_dict, sc_idx_dict)
def gen_simulation(self, gene_num, sc_num,
amplitude=300, width=200, t_sigmoid_gain=1.0):
self.true_exp_dict, self.true_exp_dt_dict\
= simulated_data_manager.gen_exp_mat_dict(
gene_num, self.ct, self.sim_t_vec,
amplitude, width, t_sigmoid_gain)
self.sc_dict, self.sc_idx_dict = simulated_data_manager.sample_sc_exp(
self.true_exp_dict, sc_num, self.sc_t_vec)
A = self.get_ts_assignment_matrix()
self.Yt = simulated_data_manager.sample_ts_exp(
self.true_exp_dict, A, self.t_vec)
def increase_time_points(self, new_t_vec):
"""
Add time points to original time points
"""
self.sim_t_vec = simulated_data_manager.add_time_points(
self.t_vec, new_t_vec)
self.t_vec = simulated_data_manager.add_time_points(
self.t_vec, new_t_vec)
def increase_sc_time_points(self, new_t_vec):
"""
Add time points to original time points, for only simulation
"""
self.sim_t_vec = simulated_data_manager.add_time_points(
self.t_vec, new_t_vec)
self.t_vec = self.sim_t_vec
self.sc_t_vec = self.sim_t_vec
| null |
stge/simulated_data_manager.py
|
simulated_data_manager.py
|
py
| 8,772 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.apply_along_axis",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "fix_axis.fix_axis.get_slice_list",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "fix_axis.fix_axis",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "fix_axis.fix_axis.get_slice_idx_mat_axis",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "fix_axis.fix_axis",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "fix_axis.fix_axis.z_divide_count_axis",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "fix_axis.fix_axis",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "numpy.nonzero",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "tomo_seq.tomo_seq_all_axis",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "numpy.exp",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "data_manager.data_manager",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "numpy.logical_not",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.isin",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "numpy.min",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "numpy.random.choice",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "scipy.misc.derivative",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "numpy.random.poisson",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "numpy.random.randint",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "numpy.transpose",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "numpy.random.poisson",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 210,
"usage_type": "name"
}
] |
353977539
|
#!/usr/bin/env python3
#this program just alternates between various incline positions
import serial
import time
import binascii
import oly_lib
#===============main=====================
#open port
ser = serial.Serial(
port='/dev/ttyUSB0',
baudrate=38400,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_TWO,
bytesize=serial.SEVENBITS
)
ser.isOpen();#just a safety check
#ask rhymebus what the transmax is
a = oly_lib.sendMsg(ser,"Irtm")
maxIncl = oly_lib.getOlydata(a)
print("here's your transmax:",maxIncl,"(",hex(maxIncl),")")
if not maxIncl:
print("calibrating...")
oly_lib.sendMsg(ser,"Iwca")
input("enter when done calibrating ")
print("here's your transmax:")
a = oly_lib.sendMsg(ser,"Irtm")
maxIncl = oly_lib.getOlydata(a)
print("here's your transmax:",maxIncl,"(",hex(maxIncl),")")
maxIncl = 36 #hardcoded to the treadmill
#send the olympus to the bottom and wait 5 seconds
oly_lib.sendMsg(ser,"Iwdi 0")
time.sleep(5)
#should alternate between 1% and 7% grade indefinitely
while 1:
oly_lib.sendMsg(ser,"Iwdi 8")
time.sleep(15)
oly_lib.sendMsg(ser,"Iwdi 20")
time.sleep(10)
ser.close();
print("good job, son")
| null |
pbvr_Pi/RaspberryPi/Jaron/oly_utils-master/oly_automaton.py
|
oly_automaton.py
|
py
| 1,205 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "serial.Serial",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "serial.PARITY_NONE",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "serial.STOPBITS_TWO",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "serial.SEVENBITS",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "oly_lib.sendMsg",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "oly_lib.getOlydata",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "oly_lib.sendMsg",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "oly_lib.sendMsg",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "oly_lib.getOlydata",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "oly_lib.sendMsg",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "oly_lib.sendMsg",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "oly_lib.sendMsg",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 49,
"usage_type": "call"
}
] |
393466733
|
import numpy as np
from matplotlib import pyplot as plt
N = 330
I0 = 1
mu_0 = 4*np.pi*10**(-7)
R = 0.07
a_liste = [2*R, R, R/2]
def B_felt_antihelmholtz(x,a):
prefaktor = N*mu_0*I0/(2*R)
return 10000*prefaktor*((1 +(x-a/2)**2/R**2)**(-1.5) - (1 +(x+a/2)**2/R**2)**(-1.5))
def brgn_avvik(B, B_brgn):
return (B_brgn-B)
def unpacking_to_array(file):
f = open(file, "r")
B = []
x = []
for line in f:
liste = line.split()
B.append(liste[0])
x.append(liste[1])
f.close()
del B[0]
del x[0]
x = np.asarray(x, dtype=np.float64)
B = np.asarray(B, dtype=np.float64)
return x,B
def lag_error_punkter(x,B):
error_x_verdier = np.array([x[0]])
error_B_verdier = np.array([B[0]])
for i in range(len(x)):
if x[i] - error_x_verdier[-1] >= 0.01:
error_x_verdier = np.append(error_x_verdier,[x[i]])
error_B_verdier = np.append(error_B_verdier,[B[i]])
if x[i] - error_x_verdier[-1] <= -0.01:
error_x_verdier = np.append(error_x_verdier,[x[i]])
error_B_verdier = np.append(error_B_verdier,[B[i]])
return error_x_verdier, error_B_verdier
def brgn_og_maalte_graf(maaltefil1, maaltefil2, maaltefil3):
x1, B_2R = unpacking_to_array(maaltefil1)
x2, B_R = unpacking_to_array(maaltefil2)
x3, B_R_2 = unpacking_to_array(maaltefil3)
x1 -= 0.2565
x2 -= 0.2545
x3 -= 0.257
x_new1 = np.linspace(x1[10], x1[-1], 100)
x_new2 = np.linspace(x2[10], x2[-1], 100)
x_new3 = np.linspace(x3[10], x3[-1], 100)
error_x_verdier1, error_B_verdier1 = lag_error_punkter(x1, B_2R)
error_x_verdier2, error_B_verdier2 = lag_error_punkter(x2, B_R)
error_x_verdier3, error_B_verdier3 = lag_error_punkter(x3, B_R_2)
plt.plot(x1, B_2R, label="Måledata a=2R", linestyle='dashed', color='r')
plt.plot(x2, B_R, label="Måledata a=R", linestyle='dashed', color='b')
plt.plot(x3, B_R_2, label="Måledata a=R/2", linestyle='dashed', color='g')
plt.plot(x_new1, B_felt_antihelmholtz(x_new1, 2*R), label="Beregnet a=2R", color='r')
plt.plot(x_new2, B_felt_antihelmholtz(x_new2, R), label="Beregnet a=R", color='b')
plt.plot(x_new3, B_felt_antihelmholtz(x_new3, R/2), label="Beregnet a=R/2", color='g')
plt.margins(0.03)
plt.errorbar(error_x_verdier1, error_B_verdier1, yerr=0.0486, fmt="m|", label="Standardavvik")
plt.errorbar(error_x_verdier2, error_B_verdier2, yerr=0.0486, fmt="m|")
plt.errorbar(error_x_verdier3, error_B_verdier3, yerr=0.0486, fmt="m|")
plt.xlabel("x [m]")
plt.ylabel("B [Gauss]")
plt.legend(loc=2, prop={'size': 10})
#plt.savefig('antihelmot3.pdf')
plt.show()
brgn_og_maalte_graf("antihelmot2_2R.txt", "antihelmot2_R.txt", "antihelmot2_R_2.txt")
def avvik_graf(maaltefil_2R, maaltefil_R, maaltefil_R_2):
x1, B_2R = unpacking_to_array(maaltefil_2R)
x2, B_R = unpacking_to_array(maaltefil_R)
x3, B_R_2 = unpacking_to_array(maaltefil_R_2)
x1 -= 0.2545
x2 -= 0.2545
x3 -= 0.2545
brgn_2R = B_felt_antihelmholtz(x1, 2*R)
brgn_R = B_felt_antihelmholtz(x2, R)
brgn_R_2 = B_felt_antihelmholtz(x3, R/2)
avvik_2R = brgn_avvik(B_2R, brgn_2R)
avvik_R = brgn_avvik(B_R, brgn_R)
avvik_R_2 = brgn_avvik(B_R_2, brgn_R_2)
gj_2R = np.mean(avvik_2R)
gj_R = np.mean(avvik_R)
gj_R_2 = np.mean(avvik_R_2)
plt.plot(x1, avvik_2R, label="Avvik, a=2R", color='r')
plt.axhline(y=gj_2R, color='r', linestyle='dashed', label='Gjennomsnittlig avvik a=2R')
plt.plot(x2, avvik_R, label="Avvik, a=R", color='b')
plt.axhline(y=gj_R, color='b', linestyle='dashed', label='Gjennomsnittlig avvik a=R')
plt.plot(x3, avvik_R_2, label="Avvik, a=R/2", color='g')
plt.axhline(y=gj_R_2, color='g', linestyle='dashed', label='Gjennomsnittlig avvik a=R/2')
plt.xlabel("x [m]")
plt.ylabel("Avvik i B [Gauss]")
plt.title('Absolutte avvik Anti-Helmholtz')
plt.legend()
plt.show()
avvik_graf("antihelmot2_2R.txt", "antihelmot2_R.txt", "antihelmot2_R_2.txt")
| null |
Lab/ElMag_FY1003/antihelmoltz.py
|
antihelmoltz.py
|
py
| 4,058 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.pi",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.margins",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.errorbar",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.errorbar",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.errorbar",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axhline",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axhline",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axhline",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 116,
"usage_type": "name"
}
] |
513440772
|
from lama.img_processing import normalise
from logzero import logger as logging
from lama import common
import os
import nrrd
from pathlib import Path
from scipy import ndimage
import numpy as np
import SimpleITK as sitk
import pandas as pd
from lama.stats.permutation_stats import bin_heatmap
from lama.utilities import prep_for_man_valid as pfmv
def main():
print("something")
wt_dir = Path(
"Z:/ArkellLab/Lab Members/Kyle/PhD/vmshare/Zic2_Kumba_LAMA/210519_int_anal/wt")
mut_dir = Path(
"Z:/ArkellLab/Lab Members/Kyle/PhD/vmshare/Zic2_Kumba_LAMA/210519_int_anal/non_wt")
mask, mask_h = nrrd.read(
"Z:/ArkellLab/Lab Members/Kyle/PhD/vmshare/Zic2_Kumba_LAMA/210423_g_by_e_stand_out/210415_g_by_e_anal/target/stats_mask.nrrd")
pop_avg, pop_h = nrrd.read(
"Z:/ArkellLab/Lab Members/Kyle/PhD/vmshare/Zic2_Kumba_LAMA/210423_g_by_e_stand_out/210415_g_by_e_anal/target/210224_pop_avg_deformable_8.nrrd")
s = ndimage.find_objects(mask)[0]
# get the images
wt_imgs, wt_names = pfmv.get_images(wt_dir, s)
mut_imgs, mut_names = pfmv.get_images(mut_dir, s)
int_norm = normalise.IntensityMaskNormalise()
# normalise the images
int_norm.add_reference(wt_imgs)
int_norm.normalise(mut_imgs)
int_norm.normalise(wt_imgs)
wt_arrays = []
for img in wt_imgs:
binned = bin_heatmap.make_blocks_vectorized(img, 40)
# Summarise each bin by the non-zero mean. i.e. the faces/stickers of the
# Rubik's cube
face_val = [np.mean(cube[cube != 0]) for cube in binned]
wt_arrays.append(face_val)
# write to csv
wt_df = pd.DataFrame(wt_arrays, index=wt_names)
wt_df.to_csv("test_wt.csv")
mut_arrays = []
for img in mut_imgs:
binned = bin_heatmap.make_blocks_vectorized(img, 40)
# Summarise each bin by the non-zero mean. i.e. the faces/stickers of the
# Rubik's cube
face_val = [np.mean(cube[cube != 0]) for cube in binned]
mut_arrays.append(face_val)
# write to csv
wt_df = pd.DataFrame(mut_arrays, index=mut_names)
wt_df.to_csv("test_mut.csv")
if __name__ == '__main__':
main()
| null |
lama/stats/permutation_stats/bin_and_norm.py
|
bin_and_norm.py
|
py
| 2,256 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pathlib.Path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "nrrd.read",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "nrrd.read",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.find_objects",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "lama.utilities.prep_for_man_valid.get_images",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "lama.utilities.prep_for_man_valid",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "lama.utilities.prep_for_man_valid.get_images",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "lama.utilities.prep_for_man_valid",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "lama.img_processing.normalise.IntensityMaskNormalise",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "lama.img_processing.normalise",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "lama.stats.permutation_stats.bin_heatmap.make_blocks_vectorized",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "lama.stats.permutation_stats.bin_heatmap",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "lama.stats.permutation_stats.bin_heatmap.make_blocks_vectorized",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "lama.stats.permutation_stats.bin_heatmap",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 71,
"usage_type": "call"
}
] |
478238134
|
import torch
import time
import math
from visdom import Visdom
# from util import epoch_time
from nltk.translate.bleu_score import SmoothingFunction
from nltk.translate.meteor_score import meteor_score
from rouge import Rouge
import nltk
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Sampler, random_split
import torch.utils.data as Data
from get_A import read_batchA
from get_embed import get_embed
from util import epoch_time
from MySet import MySet, MySampler
from gcn_model import AST_Model, GCNEncoder
# from transformer2 import Transformer2
from trans_model import Transformer
from train_eval import train, evaluate
from make_data import load_nl_data, load_code_data
import torch.optim as optim
import argparse
import numpy as np
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
parser = argparse.ArgumentParser()
parser.add_argument('--epoches', type=int, default=200,
help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.0001,
help='Initial learning rate.')
parser.add_argument('--nl_length', type=int, default=30,
help='NL-MAX-Length.')
parser.add_argument('--AST_Node', type=int, default=30,
help='Number of AST Nodes.')
parser.add_argument('--Train_data', type=int, default=62738,
help='Number of training data.')
parser.add_argument('--code_length', type=int, default=300,
help='code-MAX-Length.')
parser.add_argument('--batch_size', type=int, default=16,
help='Number of the batch.')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
tgt_vocab_size, tgt_inv_vocab_dict, dec_inputs, tgt_vocab, dec_outputs = load_nl_data('java_nl.txt', args.nl_length)
src_vocab_size, enc_inputs, src_vocab = load_code_data('java_code.txt', args.code_length)
# print(src_vocab)
# print(tgt_vocab)
# print(tgt_vocab_size)
# exit()
A, A2, A3 = read_batchA('java_ast.txt', args.AST_node)
X = get_embed('java_ast.txt', args.AST_node)
A_1 = A[0:args.Train_data]
A_2 = A[args.Train_data:len(A)]
# print(A_2)
A2_1 = A2[0:args.Train_data]
A2_2 = A2[args.Train_data:len(A2)]
A3_1 = A3[0:args.Train_data]
A3_2 = A3[args.Train_data:len(A3)]
X_1 = X[0:args.Train_data]
X_2 = X[args.Train_data:len(X)]
enc_inputs = torch.LongTensor(enc_inputs)
dec_inputs = torch.LongTensor(dec_inputs)
dec_outputs = torch.LongTensor(dec_outputs)
enc_1 = enc_inputs[:args.Train_data]
enc_2 = enc_inputs[args.Train_data:]
dec_in_1 = dec_inputs[:args.Train_data]
dec_in_2 = dec_inputs[args.Train_data:]
dec_out_1 = dec_outputs[:args.Train_data]
dec_out_2 = dec_outputs[args.Train_data:]
# exit()
# dataset = MySet(A, X, A2, A3, A4, A5, enc_inputs, dec_inputs, dec_outputs)
train_data = MySet(A_1, X_1, A2_1, A3_1, enc_1, dec_in_1, dec_out_1)
evl_data = MySet(A_2, X_2, A2_2, A3_2, enc_2, dec_in_2, dec_out_2)
# train_data, evl_data = random_split(dataset, [1040, 260])
# exit()
my_sampler1 = MySampler(train_data, args.batch_size)
my_sampler2 = MySampler(evl_data, args.batch_size)
evl_data_loader = DataLoader(evl_data, batch_sampler=my_sampler2)
train_data_loader = DataLoader(train_data, batch_sampler=my_sampler1)
# trans_loader = Data.DataLoader(MyDataSet(enc_inputs, dec_inputs, dec_outputs), batch_size=batch_size, shuffle=True)
gcn_model = GCNEncoder().to(device)
trans_model = Transformer(src_vocab_size, tgt_vocab_size, args.AST_node, args.code_length).to(device)
# trans2_model = Transformer2(src_vocab_size, tgt_vocab_size).to(device)
criterion = nn.CrossEntropyLoss(ignore_index=0)
gcn_optimizer = optim.SGD(gcn_model.parameters(), lr=0.0001, momentum=0.99)
tran_optimizer = optim.SGD(trans_model.parameters(), lr=0.0001, momentum=0.99)
# exit()
best_test_loss = float('inf')
# viz = Visdom()
# viz.line([0.], [0.], win='train_loss', opts=dict(title='train_loss'))
# viz.line([0.], [0.], win='val_loss', opts=dict(title='val_loss'))
for epoch in range(args.epoches):
start_time = time.time()
train_loss = train(gcn_optimizer, tran_optimizer, train_data_loader, gcn_model, trans_model, criterion, device)
eval_loss, perplexity = evaluate(evl_data_loader, gcn_model, trans_model, criterion, device)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print('Epoch:', '%04d' % (epoch + 1), f'Time: {epoch_mins}m {epoch_secs}s')
print('\ttrain loss: ', '{:.4f}'.format(train_loss))
print('\t eval_loss: ', '{:.4f}'.format(eval_loss))
print('\tperplexity: ', '{:.4f}'.format(perplexity))
if eval_loss < best_test_loss:
best_test_loss = eval_loss
torch.save(gcn_model.state_dict(), 'save_model/gcn_model.pt')
torch.save(trans_model.state_dict(), 'save_model/trans_loss1.pt')
# torch.save(trans2_model.state_dict(), 'save_model/multi_loss2.pt')
| null |
M2TS_model/run.py
|
run.py
|
py
| 5,062 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "make_data.load_nl_data",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "make_data.load_code_data",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "get_A.read_batchA",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "get_embed.get_embed",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "MySet.MySet",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "MySet.MySet",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "MySet.MySampler",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "MySet.MySampler",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "gcn_model.GCNEncoder",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "trans_model.Transformer",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "torch.optim.SGD",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "gcn_model.parameters",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "torch.optim.SGD",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "trans_model.parameters",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "train_eval.train",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "train_eval.evaluate",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "util.epoch_time",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "gcn_model.state_dict",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "trans_model.state_dict",
"line_number": 118,
"usage_type": "call"
}
] |
69527100
|
#from django.conf.urls import url
from django.urls import re_path as url
from .views import formset, advanced, index
urlpatterns = [
url(r'^formset/', formset, name='example-formset'),
url(r'^advanced/', advanced, name='example-advanced'),
url(r'^', index, name='example-index'),
]
| null |
example/core/urls.py
|
urls.py
|
py
| 296 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.re_path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "views.formset",
"line_number": 7,
"usage_type": "argument"
},
{
"api_name": "django.urls.re_path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "views.advanced",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "django.urls.re_path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "views.index",
"line_number": 9,
"usage_type": "argument"
}
] |
431060408
|
import requests
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.core.oauth2.provider import OAuth2Provider
class TwentyThreeAndMeAccount(ProviderAccount):
pass
class TwentyThreeAndMeProvider(OAuth2Provider):
id = 'twentythreeandme'
name = '23andMe'
account_class = TwentyThreeAndMeAccount
access_token_url = 'https://api.23andme.com/token'
authorize_url = 'https://api.23andme.com/authorize'
profile_url = 'https://api.23andme.com/1/user/'
def complete_login(self, request, app, token, **kwargs):
headers = {'Authorization': 'Bearer {0}'.format(token.token)}
resp = requests.get(self.get_profile_url(request), headers=headers)
extra_data = resp.json()
return self.sociallogin_from_response(request, extra_data)
def extract_uid(self, data):
return data['id']
def get_default_scope(self):
scope = ['basic']
return scope
def extract_common_fields(self, data):
return dict(
email=data.get('email'),
)
provider_classes = [TwentyThreeAndMeProvider]
| null |
allauth/socialaccount/providers/other/twentythreeandme/provider.py
|
provider.py
|
py
| 1,141 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "allauth.socialaccount.providers.base.ProviderAccount",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "allauth.socialaccount.providers.core.oauth2.provider.OAuth2Provider",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 22,
"usage_type": "call"
}
] |
443382926
|
import numpy as np
import tensorflow as tf
import _pickle as pickle
from tqdm import tqdm
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class Network(object):
def __init__(self, io_nn, latent_size, input_size, time_series_length, output_size,
encoder_num_units=[100, 100], decoder_num_units=[100, 100], euler_num_units=[], name='Unnamed',
tot_epochs=0, load_file=None):
"""
Parameters:
input_size: number of time steps used for initial input into the network.
latent_size: number of latent neurons to be used.
time_series_length: number of time steps (although each time step can contain mutliple values).
output_size: number of values in each time step (e.g. 2 if each time step is a vector in R^2).
encoder_num_units, decoder_num_units: Number of neurons in encoder and decoder hidden layers. Everything is fully connected.
name: Used for tensorboard
tot_epochs and load_file are used internally for loading and saving, don't pass anything to them manually.
"""
self.io_nn = io_nn
self.graph = tf.Graph()
self.input_step = 1
self.input_size = input_size
self.latent_size = latent_size
self.encoder_num_units = encoder_num_units
self.decoder_num_units = decoder_num_units
self.name = name
self.tot_epochs = tot_epochs
self.euler_num_units = euler_num_units
self.output_size = output_size
self.time_series_length = time_series_length
self.rnn_depth = time_series_length - self.input_step
# Set up neural network
self.graph_setup()
self.session = tf.Session(graph=self.graph)
with self.graph.as_default():
initialize_uninitialized(self.session)
# Load saved network
self.load_file = load_file
if self.load_file is not None:
self.load(self.load_file)
#########################################
# Public interface #
#########################################
def train(self, epoch_num, batch_size, learning_rate, training_data, validation_data,
beta_fun=lambda x: 0.001, euler_l2_coeff=1.e-5, test_step=None):
"""
Trains the network.
Parameters:
epoch_num (int): number of training epochs
batch_size (int), learning_rate (float): self-explanatory
training_data, validation_data (list): format as in data_generator
reg_constant (float, optional): constant for regularization
beta_fun: gives the beta as a function of the epoch number
test_step (int, optional): network is tested on validation data after this number of epochs and tensorboard summaries are written
"""
train_loss_per_epoch = []
val_loss_per_epoch = []
best_epoch_losses = [1000, 1000, 1000, 1000, 1000]
with self.graph.as_default():
initialize_uninitialized(self.session)
for epoch_iter in tqdm(range(epoch_num)):
self.tot_epochs += 1
print("Epoch: " + str(self.tot_epochs))
current_beta = beta_fun(self.tot_epochs)
if test_step is not None and self.tot_epochs >= 0 and (self.tot_epochs-1) % test_step == 0:
self.test(validation_data, beta=current_beta)
# save the val epoch loss:
val_loss_per_epoch.append(self.val_loss)
# save the val epoch losses to disk:
print("validation loss: %g" % self.val_loss)
batch_losses = []
for data_dict in self.gen_batch(training_data, batch_size):
parameter_dict = {self.learning_rate: learning_rate, self.beta: current_beta, self.euler_l2_coeff: euler_l2_coeff}
parameter_dict.update(data_dict)
self.session.run(self.training_op, feed_dict=parameter_dict)
self.batch_loss = self.session.run(self.cost, feed_dict=parameter_dict)
batch_losses.append(self.batch_loss)
summary = self.session.run(self.all_summaries, feed_dict=parameter_dict)
self.summary_writer.add_summary(summary, global_step=self.tot_epochs)
# print("step: %d/%d, training batch loss: %g" % (step + 1, training_data.shape[0], self.batch_loss))
# print("Step %d/%d" %(step, batch_size))
# compute the train epoch loss:
train_epoch_loss = np.mean(batch_losses)
# save the train epoch loss:
train_loss_per_epoch.append(train_epoch_loss)
print("training loss: %g" % train_epoch_loss)
if self.val_loss < max(best_epoch_losses): # (if top 5 performance on val:)
# save the model weights to disk:
checkpoint_path = (self.io_nn.model_dir_checkpoints + "model_" +
self.name + "_epoch_" + str(epoch_iter + 1) + ".ckpt")
saver = tf.train.Saver(tf.trainable_variables())
saver.save(self.session, checkpoint_path)
print("checkpoint saved in file: %s" % checkpoint_path)
# update the top 5 val losses:
index = best_epoch_losses.index(max(best_epoch_losses))
best_epoch_losses[index] = self.val_loss
# plot the training loss vs epoch and save to disk:
plt.figure(1)
plt.plot(train_loss_per_epoch, "k^")
plt.plot(train_loss_per_epoch, "k")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.title("training loss per epoch")
plt.savefig("%strain_loss_per_epoch.png" % self.io_nn.log_dir_train)
plt.close(1)
# plot the val loss vs epoch and save to disk:
plt.figure(1)
plt.plot(val_loss_per_epoch, "k^")
plt.plot(val_loss_per_epoch, "k")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.title("validation loss per epoch")
plt.savefig("%sval_loss_per_epoch.png" % self.io_nn.log_dir_val)
plt.close(1)
def test(self, data, beta=0, l2_coeff=0):
"""
Test accuracy of neural network by comparing mean of output distribution to actual values.
Parameters:
data (list, same format as training data): Dataset used to determine accuracy
"""
with self.graph.as_default():
data_dict = self.gen_data_dict(data, random_epsilon=False)
parameter_dict = {self.beta: beta, self.euler_l2_coeff: l2_coeff}
parameter_dict.update(data_dict)
self.val_loss = self.session.run(self.cost, feed_dict=parameter_dict)
summary = self.session.run(self.all_summaries_val, feed_dict=parameter_dict)
self.summary_writer_val.add_summary(summary, global_step=self.tot_epochs)
def run(self, data, layer, random_epsilon=False, additional_params={}):
"""
Run the network and output return the result.
Params:
data: Data used for running the network. Same format as training data
layer: Specifies the layer that is run. If none, then the latent means will be used.
random_epsilon (bool): If True, the network will be run with noise injection, otherwise without
"""
with self.graph.as_default():
data_dict = self.gen_data_dict(data, random_epsilon)
return self.session.run(layer, feed_dict=dict(data_dict, **additional_params))
def save(self, file_name):
"""
Saves state variables (weights, biases) of neural network
Params:
file_name (str): model is saved in folder tf_save as file_name.ckpt
"""
with self.graph.as_default():
saver = tf.train.Saver()
saver.save(self.session, self.io_nn.model_dir_checkpoints + file_name + '.ckpt')
params = {'latent_size': self.latent_size,
'input_size': self.input_size,
'encoder_num_units': self.encoder_num_units,
'decoder_num_units': self.decoder_num_units,
'tot_epochs': self.tot_epochs,
'name': self.name,
'time_series_length': self.time_series_length,
'euler_num_units': self.euler_num_units,
'output_size': self.output_size}
with open(self.io_nn.model_dir_checkpoints + file_name + '.pkl', 'wb') as f:
pickle.dump(params, f)
print("Saved network to file " + file_name)
#########################################
# Public helper functions #
#########################################
@classmethod
def from_saved(cls, file_name, io_nn, change_params={}):
"""
Initializes a new network from saved data.
file_name (str): model is loaded from tf_save/file_name.ckpt
"""
with open(io_nn.model_dir_checkpoints + file_name + '.pkl', 'rb') as f:
params = pickle.load(f)
params['load_file'] = file_name
params['io_nn'] = io_nn
for p in change_params:
params[p] = change_params[p]
print(params)
return cls(**params)
#########################################
# Private helper functions #
#########################################
def recon_loss_fun(self, prediction, euler_index):
# the full time series goes in strides of output_size (each observation contains output_size data points)
# ind = self.output_size * self.input_size + self.output_size * (euler_index - 1)
# observation = self.full_time_series[:, ind: ind + self.output_size]
ind = euler_index * self.output_size
observation = self.speed_torque_full[:, ind: ind + self.output_size]
return tf.squared_difference(prediction, observation)
def graph_setup(self):
"""
Set up the computation graph for the neural network based on the parameters set at initialization
"""
with self.graph.as_default():
#######################
# Define placeholders #
#######################
self.current = tf.placeholder(tf.float32, [None, self.input_size], name='input_current')
self.speed_torque = tf.placeholder(tf.float32, [None, self.output_size], name='input_speed_torque')
self.speed_torque_next = tf.placeholder(tf.float32, [None, self.rnn_depth * self.output_size], name='speed_torque_next')
self.speed_torque_full = tf.concat(values=[self.speed_torque, self.speed_torque_next], axis=1, name='speed_torque_full')
self.epsilon = tf.placeholder(tf.float32, [None, self.latent_size], name='epsilon')
self.learning_rate = tf.placeholder(tf.float32, shape=[], name='learning_rate')
self.beta = tf.placeholder(tf.float32, shape=[], name='beta')
self.euler_l2_coeff = tf.placeholder(tf.float32, shape=[], name='euler_l2_coeff')
##################
# Set up encoder #
##################
with tf.name_scope('prepare_in1'):
#self.in1 = self.full_time_series[:, :self.output_size * self.input_size]
# in1 = concat(current, speed)
# self.in1 = tf.concat(values=[self.current,
# self.speed_torque], axis=1, name='in1')
self.in1 = self.speed_torque
# input and output dimensions for each of the weight tensors
# enc_in_num = [self.output_size + self.input_size] + self.encoder_num_units
enc_in_num = [self.output_size] + self.encoder_num_units
enc_out_num = self.encoder_num_units + [2 * self.latent_size]
encoder_input = self.in1
with tf.variable_scope('dynamic_encoder'):
previous_enc_layer = encoder_input
for k in range(len(enc_out_num)):
with tf.variable_scope('{}th_enc_layer'.format(k)):
w = tf.get_variable('w_enc{}'.format(k), [enc_in_num[k], enc_out_num[k]], initializer=tf.glorot_normal_initializer())
b = tf.get_variable('b_enc{}'.format(k), [enc_out_num[k]], initializer=tf.random_normal_initializer())
# create next layer
squash = (k != (len(enc_out_num) - 1))
previous_enc_layer = forwardprop(previous_enc_layer, w, b, squash=squash, name='{}th_enc_layer'.format(k))
with tf.name_scope('dynamic_state'):
pre_state = previous_enc_layer
self.state_means = tf.nn.tanh(pre_state[:, :self.latent_size])
self.state_log_sigma = tf.clip_by_value(pre_state[:, self.latent_size:], -5., 0.5)
self.state_log_sigma = pre_state[:, self.latent_size:]
with tf.name_scope('state_sample'):
self.state_sample = tf.add(self.state_means, tf.exp(self.state_log_sigma) * self.epsilon, name='add_noise')
print(self.state_means.shape)
with tf.name_scope('kl_loss'):
self.kl_loss = kl_divergence(self.state_means, self.state_log_sigma, self.latent_size)
###################################
# Set up variables for Euler step #
###################################
in_euler = [self.latent_size] + self.euler_num_units
out_euler = self.euler_num_units + [self.latent_size]
with tf.variable_scope('RNN'):
###################
# Prepare decoder #
###################
dec_in_num = [self.latent_size + self.input_size] + self.decoder_num_units
dec_out_num = self.decoder_num_units + [self.output_size]
with tf.variable_scope('decoder_vars'):
self.dec_weights = []
self.dec_biases = []
self.decoder_l2_loss = tf.constant(0.)
for k in range(len(dec_out_num)):
self.dec_weights.append(tf.get_variable('w_dec{}'.format(k),
[dec_in_num[k], dec_out_num[k]],
initializer=tf.glorot_normal_initializer()))
self.dec_biases.append(tf.get_variable('b_dec{}'.format(k),
[dec_out_num[k]],
initializer=tf.random_normal_initializer()))
self.decoder_l2_loss = self.decoder_l2_loss + tf.nn.l2_loss(self.dec_weights[-1]) + tf.nn.l2_loss(self.dec_biases[-1])
def decoder_net(latent_state):
temp_state = latent_state
# Append input current to latent vector
temp_state = tf.concat(values=[temp_state, self.current], axis=1)
for k, (w, b) in enumerate(zip(self.dec_weights, self.dec_biases)):
squash = ((k + 1) != len(self.dec_weights)) # don't squash last layer
temp_state = forwardprop(temp_state, w, b, name='{}th_dec_layer'.format(k), squash=squash)
return temp_state
with tf.variable_scope('euler_vars'):
self.euler_weights = [
tf.get_variable('w_euler{}'.format(k),
[in_euler[k], out_euler[k]],
initializer=tf.glorot_normal_initializer())
for k in range(len(out_euler))
]
self.euler_biases = [
tf.get_variable('b_euler{}'.format(k),
[out_euler[k]],
initializer=tf.random_normal_initializer())
for k in range(len(out_euler))
]
with tf.name_scope('euler_l2_loss'):
self.euler_l2_loss = tf.add_n([tf.nn.l2_loss(self.euler_weights[i]) for i in range(len(out_euler))])
###########################################
# Define computation graph for Euler step #
###########################################
self.latent_vector_list = [self.state_sample]
with tf.name_scope('initial_euler_loss'):
self.decoded_list = [decoder_net(self.state_sample)]
recon_losses_list = [self.recon_loss_fun(self.decoded_list[-1], 0)]
for s in range(self.rnn_depth):
with tf.name_scope('{}th_euler_step'.format(s + 1)):
temp_state = self.latent_vector_list[-1]
for j, (w, b) in enumerate(zip(self.euler_weights, self.euler_biases)):
# To use the Euler weights, replace this line by
# temp_state = my_activation_function(tf.matmul(temp_state, w) + b)
temp_state = temp_state + b
self.latent_vector_list.append(temp_state)
with tf.name_scope('decode_{}th_euler_step'.format(s + 1)):
self.decoded_list.append(decoder_net(temp_state))
recon_losses_list.append(self.recon_loss_fun(self.decoded_list[-1], s + 1))
with tf.name_scope('gather_recon_losses'):
self.recon_loss = tf.reduce_mean(tf.stack(recon_losses_list))
####################
# Cost and training #
####################
with tf.name_scope('cost'):
self.cost = tf.add_n([self.recon_loss,
self.beta * self.kl_loss,
self.euler_l2_coeff * self.euler_l2_loss], name='add_costs')
with tf.name_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
gvs = optimizer.compute_gradients(self.cost)
capped_gvs = [(tf.clip_by_value(grad, -10., 10.), var) for grad, var in gvs]
self.training_op = optimizer.apply_gradients(capped_gvs)
#########################
# Tensorboard summaries #
#########################
tf.summary.histogram('state_means', self.state_means)
tf.summary.histogram('state_log_sigma', self.state_log_sigma)
for i, (w, b) in enumerate(zip(self.euler_weights, self.euler_biases)):
tf.summary.histogram('euler_weight_{}'.format(i), w)
tf.summary.histogram('euler_bias_{}'.format(i), b)
tf.summary.scalar('cost', self.cost)
tf.summary.scalar('reconstruction_cost', self.recon_loss)
tf.summary.scalar('kl_cost', self.kl_loss)
tf.summary.scalar('euler_l2_loss', self.euler_l2_loss)
tf.summary.scalar('beta', self.beta)
tf.summary.scalar('L2_coeff', self.euler_l2_coeff)
self.summary_writer = tf.summary.FileWriter(self.io_nn.log_dir_train + self.name + '/', graph=self.graph)
self.summary_writer.flush()
self.all_summaries = tf.summary.merge_all()
self.summary_writer_val = tf.summary.FileWriter(self.io_nn.log_dir_val + self.name + '/', graph=self.graph)
self.summary_writer_val.flush()
self.all_summaries_val = tf.summary.merge_all()
def gen_batch(self, data, batch_size, shuffle=True, random_epsilon=True):
"""
Generate batches for training the network.
Params:
data: same format as training data (see Data_loader)
batch_size (int)
shuffle (bool): if true, data is shuffled before batches are created
random_epsilon (bool): if true, epsilon is drawn from a normal distribution; otherwise, epsilon=0
"""
epoch_size = len(data[0]) // batch_size
if shuffle:
p = np.random.permutation(len(data[0]))
data = [data[i][p] for i in [0, 1, 2]]
for i in range(epoch_size):
batch_slice = slice(i * batch_size, (i + 1) * batch_size)
batch = [data[j][batch_slice] for j in [0, 1, 2]]
yield self.gen_data_dict(batch, random_epsilon=random_epsilon)
def gen_data_dict(self, data, random_epsilon=True):
"""
Params:
data: same format as training data (see data_loader)
random_epsilon (bool): if true, epsilon is drawn from a normal distribution; otherwise, epsilon=0
"""
if random_epsilon is True:
eps = np.random.normal(size=[len(data[0]), self.latent_size])
else:
eps = np.zeros([len(data[0]), self.latent_size])
return {self.current: data[0],
self.speed_torque : data[1],
self.speed_torque_next : data[2],
self.epsilon: eps}
def load(self, file_name):
"""
Loads network, params as in save
"""
with self.graph.as_default():
saver = tf.train.Saver(tf.trainable_variables())
saver.restore(self.session, self.io_nn.model_dir_checkpoints + file_name + '.ckpt')
print("Loaded network from file " + file_name)
###########
# Helpers #
###########
def forwardprop(x, w, b, squash=True, act_fun=tf.nn.elu, name=''):
"""
Forward-propagation.
"""
if name != '':
name = '_' + name
pre_act = tf.add(tf.matmul(x, w, name=('w_mul' + name)), b, name=('b_add' + name))
if name != '':
tf.summary.histogram('pre-act' + name, pre_act)
if squash:
return act_fun(pre_act, name=('act_fun' + name))
else:
return pre_act
def initialize_uninitialized(sess):
global_vars = tf.global_variables()
is_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars])
not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]
if len(not_initialized_vars):
sess.run(tf.variables_initializer(not_initialized_vars))
def kl_divergence(means, log_sigma, dim, target_sigma=0.1):
# KL divergence between given distribution and unit Gaussian
target_sigma = tf.constant(target_sigma, shape=[dim])
return 1 / 2. * tf.reduce_mean(tf.reduce_sum(1 / target_sigma**2 * means**2 +
tf.exp(2 * log_sigma) / target_sigma**2 - 2 * log_sigma + 2 * tf.log(target_sigma), axis=1) - dim)
| null |
scinet_motor/model_new.py
|
model_new.py
|
py
| 23,015 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.use",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tensorflow.Graph",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Saver",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.trainable_variables",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "tensorflow.train.Saver",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "_pickle.dump",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "_pickle.load",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "tensorflow.squared_difference",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.concat",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "tensorflow.glorot_normal_initializer",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "tensorflow.random_normal_initializer",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.tanh",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 261,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.clip_by_value",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "tensorflow.add",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "tensorflow.exp",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "tensorflow.glorot_normal_initializer",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "tensorflow.random_normal_initializer",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.l2_loss",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 294,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.concat",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "tensorflow.glorot_normal_initializer",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "tensorflow.random_normal_initializer",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "tensorflow.add_n",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.l2_loss",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 320,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_mean",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "tensorflow.stack",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "tensorflow.add_n",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "tensorflow.name_scope",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.AdamOptimizer",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 353,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.clip_by_value",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary.histogram",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 361,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.histogram",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 362,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.histogram",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 364,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.histogram",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 365,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.scalar",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 366,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.scalar",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 367,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.scalar",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 368,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.scalar",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 369,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.scalar",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 370,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.scalar",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 371,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.FileWriter",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 372,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.merge_all",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 374,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.FileWriter",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 375,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.merge_all",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 377,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.permutation",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 390,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.normal",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 404,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Saver",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 417,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.trainable_variables",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 426,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.add",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "tensorflow.matmul",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary.histogram",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 434,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.global_variables",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "tensorflow.is_variable_initialized",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "tensorflow.variables_initializer",
"line_number": 447,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_mean",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_sum",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "tensorflow.exp",
"line_number": 454,
"usage_type": "call"
},
{
"api_name": "tensorflow.log",
"line_number": 454,
"usage_type": "call"
}
] |
164395540
|
from tkinter import *
from PIL import ImageTk,Image
from face_recognition import *
import os
window = Tk()
window.title("Graduate Thesis")
window.geometry('1000x600')
lbl = Label(window, text="Graduate Thesis",fg="Red", font=("arial",27,"bold"))
lbl.place(x=370,y=10)
lb2 = Label(window, text="Khoa điện - điện tử",fg="Blue", font=("arial",15,"bold"))
lb2.place(x=30,y=60)
lb3 = Label(window, text="Ngành tự động hóa",fg="Blue", font=("arial",15,"bold"))
lb3.place(x=30,y=90)
lb4 = Label(window, text="Đề tài:",fg="Black", font=("arial",13,"bold"))
lb4.place(x=30,y=120)
lb5 = Label(window, text="Nhận dạng khuôn mặt và lưu trữ thông tin",fg="Black", font=("arial",15,"bold"))
lb5.place(x=100,y=130)
lb6 = Label(window, text="Xuất thông tin để lưu trữ và kiểm tra",fg="Black", font=("arial",15,"bold"))
lb6.place(x=100,y=160)
lb7 = Label(window, text="Tên GVHD:",fg="Black", font=("arial",10,"bold"))
lb7.place(x=50,y=210)
lb8 = Label(window, text=" Nguyễn Hoàng Giáp",fg="Black", font=("arial",12,"bold"))
lb8.place(x=140,y=210)
lb9 = Label(window, text="Tên SVTH:",fg="Black", font=("arial",10,"bold"))
lb9.place(x=50,y=240)
lb10 = Label(window, text="Vũ Gia Bảo",fg="Black", font=("arial",12,"bold"))
lb10.place(x=140,y=240)
#inset logo
pic_frame = Frame(window, width=100, height=50)
pic_frame.place(x=600,y=100)
my_image = ImageTk.PhotoImage(Image.open("LOGO.png"))
LabelFrame= Label(pic_frame, image= my_image)
LabelFrame.pack()
def But_Start():
main()
def But_Stop():
stop_program()
def But_Check():
lb1x.configure(text="Be my girl !!")
btn_quit = Button(window, text="Quit",bg="white",fg="Black", command=window.destroy)
btn_quit.place(x=950, y=10)
btn_start = Button(window, text="Bắt đầu",bg="Green",fg="Yellow", command= But_Start)
btn_start.place(x=500, y=500)
btn_stop = Button(window, text="Kết thúc",bg="Red",fg="Black", command=But_Stop)
btn_stop.place(x=700, y=500)
btn_check = Button(window, text="Kiểm tra",bg="Pink",fg="Black", command=But_Check)
btn_check.place(x=300, y=500)
lb1x = Label(window, text="Vũ Gia Bảo",fg="Black", font=("arial",12,"bold"))
lb1x.place(x=140,y=300)
window.mainloop()
| null |
GUI_official.py
|
GUI_official.py
|
py
| 2,308 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 49,
"usage_type": "name"
}
] |
259457475
|
# Javier Mariño
import matplotlib.pyplot as plt
import numpy as np
plt.ion()
dimt = 2000 # número de iteraciones temporales
deltat = 0.1; deltax = 0.5; alfa = 0.1; N = 20 # parámetros naturales de la discretización
s = alfa*deltat/deltax**2 # calculamos la estabilidad
T = np.zeros(N+1); T[5:9]=np.random.sample(4)*10; T[N]=10 # vector inicial, con las c.i. que queramos
plt.close('all')
plt.plot(np.arange(N+1)*deltax,T,'-r'); plt.pause(0.00001) # primera representacion
for n in range(dimt):
for i in range(1,N):
T[i] = T[i]+alfa*deltat/deltax**2*(T[i+1]-2.*T[i]+T[i-1]) # calculamos el siguiente vector punto por punto
T[0]=0 #aplicamos cc
T[N]=10
if n%20==0:
plt.plot(np.arange(N+1)*deltax,T) #un plot un poco más discretizado para liberar memoria de python
plt.pause(0.00001)
plt.show()
plt.xlabel('$x$')
plt.ylabel('$T$')
plt.title('FTCS')
| null |
bol8/bol8_ex1ayb_Mariño_Villadamigo_Javier.py
|
bol8_ex1ayb_Mariño_Villadamigo_Javier.py
|
py
| 871 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.random.sample",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pause",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pause",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 30,
"usage_type": "name"
}
] |
363057054
|
from django.urls import path
from . import views
urlpatterns = [
path('home/', views.home, name="home"),
path('', views.blog, name="blog"),
path('<int:blog_id>/', views.detail, name="detail"),
# <type:변수이름>, 이 변수이름은 argument로 views에 전달한다. path converterㅍ
path('create/', views.create, name='create'),
path('newblog/', views.blogpost, name= "newblog") ,
]
| null |
mysite/myapp/urls.py
|
urls.py
|
py
| 425 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
}
] |
330838265
|
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from momentjs import momentjs
import bmemcached
# app object
app = Flask(__name__)
# config file
app.config.from_object('config')
# DB
db = SQLAlchemy(app)
# login
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'
# Moment JS: expose our class as a global variable to all templates
app.jinja_env.globals['momentjs'] = momentjs
# Remove Jinja2 whitespace
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
# memcached
mc = bmemcached.Client(
app.config['MC_SERVERS'],
app.config['MC_USERNAME'],
app.config['MC_PASSWORD']
)
###
from app import views, models
| null |
app/__init__.py
|
__init__.py
|
py
| 712 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.ext.sqlalchemy.SQLAlchemy",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.ext.login.LoginManager",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "momentjs.momentjs",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "bmemcached.Client",
"line_number": 24,
"usage_type": "call"
}
] |
161447871
|
from collections import Counter
import sys
sys.setrecursionlimit(5000000)
S = list(input())
L = len(S)
A = Counter(S)
nCr = {}
def cmb(n, r):
if r == 0 or r == n: return 1
if r == 1: return n
if (n,r) in nCr: return nCr[(n,r)]
nCr[(n,r)] = cmb(n-1,r) + cmb(n-1,r-1)
return nCr[(n,r)]
if L == 1:
print(1)
else:
ans = cmb(L, 2) + 1
for v in A.values():
if v == 1: continue
ans -= cmb(v, 2)
print(ans)
| null |
Python_codes/p03618/s011382759.py
|
s011382759.py
|
py
| 453 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.setrecursionlimit",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 7,
"usage_type": "call"
}
] |
421816810
|
import numpy as np
import pygame as pg
from Code4Fun.Utility.Vec2 import Vec2
from numba import guvectorize, complex64, int32
from PIL import Image
# size = Vec2(1400, 1000)
# size = Vec2(1120, 400)
size = Vec2(1680, 600)
man_min = Vec2(-2.5, -1.25)
man_max = Vec2(1, 1.25)
jul_min = Vec2(-1.75, -1.25)
jul_max = Vec2(1.75, 1.25)
julia_size = Vec2(int(size.x / 2), size.y)
man_size = Vec2(int(size.x / 2), size.y)
# x_size = 3000
# size = Vec2(int(x_size / 3.5 * 2.5), x_size)
origin = size / 2
iterations = 150
julia_iterations = 150
draw_guidlines = False
draw_min_mandel = True
lock_julia = False
julia_pos = 0 + 0j
pg.init()
screen = pg.display.set_mode(size.tuple_int)
pg.display.set_caption("Mandelbrot Numba")
font = pg.font.SysFont("comicsansms", 50)
text = font.render(" ", True, (100, 50, 50))
mandelbrot_screen = np.empty((man_size.x, man_size.y))
julia_screen = np.empty((julia_size.x, julia_size.y))
iterator = 0
def save_image():
Image.fromarray(pg.surfarray.pixels3d(screen)).save("julia_set.png")
def init():
global iterator
global mandelbrot_values
global indices
global text
global mandelbrot_screen
mandelbrot_screen = mandelbrot_set(man_min.x, man_max.x, man_min.y, man_max.y, man_size.x, man_size.y, iterations)
pg.surfarray.pixels2d(screen)[0:man_size.x, :] = mandelbrot_screen
update_julia()
def update_julia():
global julia_screen, julia_pos
if not lock_julia:
julia_pos = mouse_to_complex()
julia_screen = julia_set(jul_min.x, jul_max.x, jul_min.y, jul_max.y, julia_size.x, julia_size.y, julia_iterations, julia_pos)
pg.surfarray.pixels2d(screen)[man_size.x: man_size.x + julia_size.x, :] = julia_screen
if draw_min_mandel:
mini_mandel()
pg.display.flip()
def mini_mandel():
mini_mandel_min, mini_mandel_max = mini_mandel_pos()
mini_mandel = mandelbrot_set(mini_mandel_min.x, mini_mandel_max.x, mini_mandel_min.y, mini_mandel_max.y, 175, 125, iterations * 3)
mini_max = np.max(mini_mandel)
if mini_max < 255:
mini_mandel = mini_mandel / mini_max * 255
pg.surfarray.pixels2d(screen)[10: 185, 10: 135] = mini_mandel
pg.draw.line(screen, (100, 80, 50), (int(175 / 2) + 10, 10), (int(175 / 2) + 10, 135))
pg.draw.line(screen, (100, 80, 50), (10, int(125 / 2) + 10), (185, int(125 / 2) + 10))
pg.draw.line(screen, (100, 50, 50), (10, 10), (10, 135), 2)
pg.draw.line(screen, (100, 50, 50), (185, 10), (185, 135), 2)
pg.draw.line(screen, (100, 50, 50), (10, 135), (185, 135), 2)
pg.draw.line(screen, (100, 50, 50), (10, 10), (185, 10), 2)
def mouse_to_complex():
mpos = pg.mouse.get_pos()
m_x = mpos[0]
m_y = mpos[1]
real = m_x / man_size.x * (man_max.x - man_min.x) + man_min.x
imag = m_y / man_size.y * (man_max.y - man_min.y) + man_min.y
return real + imag * 1j
def mini_mandel_pos():
m_pos = mouse_to_complex()
m_vec = Vec2(m_pos.real, m_pos.imag)
delta = (man_min - man_max) / 20
return m_vec + delta, m_vec - delta
def zoom(zoom_factor, **kwargs):
global man_min, man_max
if "pos" in kwargs:
center = Vec2(kwargs["pos"].real, kwargs["pos"].imag)
else:
center = (man_min + man_max) / 2
min_rel = man_min - center
max_rel = man_max - center
min_rel *= zoom_factor
max_rel *= zoom_factor
man_min = center + min_rel
man_max = center + max_rel
init()
def zoom_julia(zoom_factor):
global jul_min, jul_max
center = (jul_min + jul_max) / 2
min_rel = jul_min - center
max_rel = jul_max - center
min_rel *= zoom_factor
max_rel *= zoom_factor
jul_min = center + min_rel
jul_max = center + max_rel
update_julia()
def zoom_in():
zoom(0.5)
def zoom_out():
zoom(1.5)
def center_on(pos):
global man_min, man_max, stepsize
d_m = Vec2(man_size.x / 2 - pos[0], man_size.y / 2 - pos[1])
delta = Vec2(d_m.x / man_size.x * (man_max.x - man_min.x), d_m.y / man_size.y * (man_max.y - man_min.y))
man_min -= delta
man_max -= delta
init()
def center_on_mouse():
center_on(pg.mouse.get_pos())
def reset():
global man_min, man_max, jul_min, jul_max
man_min = Vec2(-2.5, -1.25)
man_max = Vec2(1, 1.25)
jul_min = Vec2(-1.75, -1.25)
jul_max = Vec2(1.75, 1.25)
init()
@guvectorize([(complex64[:], int32, int32[:])], '(n),()->(n)', target='parallel')
def mandelbrot_numpy(c, maxit, output):
maxiter = maxit
step_size = 255 / maxiter
for i in range(c.shape[0]):
single_c = c[i]
nreal = 0
real = 0
imag = 0
iterations_done = 0
for n in range(maxiter):
nreal = real * real - imag * imag + single_c.real
imag = 2 * real * imag + single_c.imag
real = nreal;
if real * real + imag * imag > 4.0:
iterations_done = n
break
output[i] = iterations_done * step_size
def mandelbrot_set(xmin, xmax, ymin, ymax, width, height, maxiter):
r1 = np.linspace(xmin, xmax, width, dtype=np.float32)
r2 = np.linspace(ymin, ymax, height, dtype=np.float32)
c = r1 + r2[:, None] * 1j
n3 = mandelbrot_numpy(c, maxiter)
return n3.T
@guvectorize([(complex64[:], complex64, int32, int32[:])], '(n),(),()->(n)', target='parallel')
def julia_numpy(c, pos, maxit, output):
maxiter = maxit
step_size = 255 / maxiter
for i in range(c.shape[0]):
single_c = pos
nreal = 0
real = c[i].real
imag = c[i].imag
iterations_done = 0
for n in range(maxiter):
nreal = real * real - imag * imag + single_c.real
imag = 2 * real * imag + single_c.imag
real = nreal;
if real * real + imag * imag > 4.0:
iterations_done = n
break
output[i] = iterations_done * step_size
def julia_set(xmin, xmax, ymin, ymax, width, height, maxiter, pos):
r1 = np.linspace(xmin, xmax, width, dtype=np.float32)
r2 = np.linspace(ymin, ymax, height, dtype=np.float32)
c = r1 + r2[:, None] * 1j
n3 = julia_numpy(c, pos, maxiter)
return n3.T
def offset_julia(offset):
global jul_min, jul_max
x_tot = jul_max.x - jul_min.x
y_tot = jul_max.y - jul_min.y
offset.x *= x_tot
offset.y *= y_tot
jul_min += offset
jul_max += offset
update_julia()
init()
loop = True
while loop:
for e in pg.event.get():
if e.type == pg.QUIT:
loop = False
elif e.type == pg.MOUSEBUTTONDOWN:
# leftklick
if e.button == 1:
zoom_julia(0.5)
update_julia()
# middle mouse button
if e.button == 2:
center_on_mouse()
# rightklick
elif e.button == 3:
zoom_julia(1.5)
update_julia()
# scroll_up
elif e.button == 4:
zoom(0.5, pos=mouse_to_complex())
# scroll_up
elif e.button == 5:
zoom(1.5, pos=mouse_to_complex())
elif e.button == 6:
iterations = int(iterations / 2)
init()
elif e.button == 7:
iterations *= 2
init()
elif e.type == pg.KEYDOWN:
if e.key == pg.K_RIGHT:
offset_julia(Vec2(0.1, 0))
elif e.key == pg.K_LEFT:
offset_julia(Vec2(-0.1, 0))
elif e.key == pg.K_SPACE:
reset()
elif e.key == pg.K_DOWN:
offset_julia(Vec2(0, 0.1))
elif e.key == pg.K_UP:
offset_julia(Vec2(0, -0.1))
elif e.key == pg.K_c:
draw_guidlines = not draw_guidlines
elif e.key == pg.K_KP_PLUS:
zoom_julia(0.5)
update_julia()
elif e.key == pg.K_KP_MINUS:
zoom_julia(1.5)
update_julia()
elif e.key == pg.K_m:
draw_min_mandel = not draw_min_mandel
init()
elif e.key == pg.K_p:
lock_julia = not lock_julia
elif e.key == pg.K_o:
julia_iterations *= 2
update_julia()
elif e.key == pg.K_l:
julia_iterations = int(julia_iterations / 2)
update_julia()
elif e.key == pg.K_s:
save_image()
elif e.type == pg.MOUSEMOTION:
update_julia()
pg.quit()
| null |
Projects/Fractals/Mandelbrot/JuliaSetNumba.py
|
JuliaSetNumba.py
|
py
| 8,570 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "numpy.empty",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "pygame.surfarray.pixels3d",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pygame.surfarray",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "pygame.surfarray.pixels2d",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pygame.surfarray",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "pygame.surfarray.pixels2d",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pygame.surfarray",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.flip",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "numpy.max",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "pygame.surfarray.pixels2d",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "pygame.surfarray",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.line",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.line",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.line",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.line",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.line",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.line",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.get_pos",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "pygame.mouse.get_pos",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "numba.guvectorize",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "numba.complex64",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "numba.int32",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "numba.guvectorize",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "numba.complex64",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "numba.int32",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 236,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 237,
"usage_type": "attribute"
},
{
"api_name": "pygame.MOUSEBUTTONDOWN",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RIGHT",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "pygame.K_LEFT",
"line_number": 266,
"usage_type": "attribute"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "pygame.K_SPACE",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_DOWN",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "pygame.K_UP",
"line_number": 272,
"usage_type": "attribute"
},
{
"api_name": "Code4Fun.Utility.Vec2.Vec2",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "pygame.K_c",
"line_number": 275,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_KP_PLUS",
"line_number": 277,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_KP_MINUS",
"line_number": 280,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_m",
"line_number": 283,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_p",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_o",
"line_number": 288,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_l",
"line_number": 291,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_s",
"line_number": 294,
"usage_type": "attribute"
},
{
"api_name": "pygame.MOUSEMOTION",
"line_number": 296,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 299,
"usage_type": "call"
}
] |
424252688
|
import numpy as np
import scipy.io as sio
from os import listdir
from pick import pick
import sklearn
from sklearn.model_selection import train_test_split
from default_params import num_classes
from datetime import date
# for automated SVM experiments
base_path = "../data/animals/015/"
op_date = date(2017, 3, 20)
# currently not in use
def get_num_trials_complete(animal_no):
""" counts the total number of trials for each dataset of one animal """
# get all datasets for one animal
filepath = '../data/animals/' + animal_no
entries = listdir(filepath)
for entry in entries:
print(entry)
num_trials = 0
entry_filepath = filepath + '/' + str(entry)
entry_content = sio.loadmat(entry_filepath)
# [('cerp', (1, 9), 'cell')]
cerp = entry_content.get('cerp')
for i in range(9): # 9 different stimuli
num_trials += cerp[0, i].shape[2]
print('num trials stimuli number: ' + str(i + 1) + ' ==> ' + str(cerp[0, i].shape[2]))
print(entry + " -- number trials in total: " + str(num_trials))
print('---------------------------')
def get_num_trials(dataset):
""" counts the number of trials for each stimulus for one dataset """
num_trials = 0
entry_content = sio.loadmat(dataset)
# [('cerp', (1, 9), 'cell')]
cerp = entry_content.get('cerp')
for i in range(9): # 9 different stimuli
# print(i)
num_trials += cerp[0, i].shape[2]
# print(dataset + " -- number trials in total: " + str(num_trials))
return num_trials
def choose_dataset():
""" lets the user choose a dataset from all available ones """
subj_text = 'Choose a subject: '
test_subjects = listdir('../data/animals/')
animal_no, _ = pick(test_subjects, subj_text)
filepath = '../data/animals/' + animal_no + '/'
entries = listdir(filepath)
data_text = 'Choose one of the following datasets: '
entry, _ = pick(entries, data_text)
full_path = filepath + str(entry)
return full_path
def balance_dataset(x, y):
""" balances the train set """
np.random.seed(42) # to consistently create the same random numpy array
unique, counts = np.unique(y, return_counts=True)
max_count = max(counts)
x_train_bal = x # maybe remove and just overwrite x
y_train_bal = y # maybe remove and just overwrite y
for i in range(num_classes):
class_indices = np.where(y == i)
diff = max_count - counts[i] # calculate how many datapoints to resample
oversampling_idx = np.random.randint(counts[i], size=diff) # get random indices for resampling for curr class
data_idx = class_indices[0][oversampling_idx] # get indices in complete y array for oversampling indices
oversampled_x = x[data_idx]
oversampled_y = y[data_idx] # get actual datapoints at oversampling indices
# append these samples to x and y respectively
x_train_bal = np.concatenate((x_train_bal, oversampled_x), axis=0)
y_train_bal = np.concatenate((y_train_bal, oversampled_y), axis=0)
# shuffle and return created balanced training dataset
x_train_bal, y_train_bal = sklearn.utils.shuffle(x_train_bal, y_train_bal, random_state=42)
return x_train_bal, y_train_bal
def split_dataset_same_distribution():
""" split one or more datasets mixed into train/train-dev/test-dev/test """
entry_list = []
add = 'y'
while add == 'y':
# add another dataset
full_path = choose_dataset()
entry_list.append(full_path)
# print(entry_list)
add_text = 'Add another dataset? '
add, _ = pick(('y', 'n'), add_text)
cerps = np.empty((601, 32, 0)) # (samples, channels, trials)
targets = np.empty(0) # init y array
for entry_path in entry_list:
curr_cerp, curr_target = get_data(entry_path)
cerps = np.concatenate((cerps, curr_cerp), axis=-1)
targets = np.concatenate((targets, curr_target), axis=0)
cerps = np.moveaxis(cerps, -1, 0) # move trial axis to the front as required for train_test_split
# (trials, samples, channels)
print('splitting and standardizing dataset(s)...')
# split 40/20/20/20
x_train_tmp, x_tmp, y_train_tmp, y_tmp = train_test_split(cerps, targets, train_size=0.6,
shuffle=True, random_state=42)
x_train, x_train_dev, y_train, y_train_dev = train_test_split(x_train_tmp, y_train_tmp,
train_size=2 / 3,
random_state=42)
x_test, x_dev, y_test, y_dev = train_test_split(x_tmp, y_tmp, test_size=0.5,
random_state=42)
# standardize after splitting because we cannot expect to have the full test set available in reality
x_train_std, x_train_dev_std, x_dev_std, x_test_std = standardize_datasets(x_train, x_train_dev, x_dev, x_test)
# balance after standardizing, balance only train set
x_train_bal, y_train_bal = balance_dataset(x_train_std, y_train)
return x_train_bal, y_train_bal, x_train_dev_std, y_train_dev, x_dev_std, y_dev, x_test_std, y_test
def get_data(entry_filepath):
""" load single-stimuli cerps in one comprehensive array and create corresponding targets for training """
entry_content = sio.loadmat(entry_filepath)
cerp = entry_content.get('cerp')
curr_c = np.empty((601, 32, 0)) # (samples, channels, trials)
curr_t = np.empty(get_num_trials(entry_filepath)) # init y array
lower = 0
upper = 0
if num_classes == 9:
for i in range(9):
curr_c = np.concatenate((curr_c, cerp[0, i]), axis=-1) # concatenate along trial-axis
upper += cerp[0, i].shape[2]
curr_t[lower:upper] = i # target_stimuli range from 0 to 8 -> stimuli 1 to 9
lower = upper
elif num_classes == 3:
# cerp 0, 1, 2 -> t = 0; cerp 3, 4, 5 -> t = 1; cerp 6, 7, 8 -> t = 2
for i in range(3):
curr_c = np.concatenate((curr_c, cerp[0, num_classes * i]), axis=-1) # cerp 0, 3 and 6
curr_c = np.concatenate((curr_c, cerp[0, num_classes * i + 1]), axis=-1) # cerp 1, 4 and 7
curr_c = np.concatenate((curr_c, cerp[0, num_classes * i + 2]), axis=-1) # cerp 2, 5 and 8
upper = upper + cerp[0, num_classes * i].shape[2] + cerp[0, num_classes * i + 1].shape[2] + \
cerp[0, num_classes * i + 2].shape[2]
curr_t[lower:upper] = i # target_stimuli range from 0 to 2 -> stimuli 1,2,3 - 4,5,6 - 7,8,9
lower = upper
return curr_c, curr_t.astype(int)
def create_data_meshes(data):
""" create data meshes needed as input for the CNN-part of the models
spatial channel ordering:
23 19 15 11 6 2 30 26
24 20 16 12 5 1 29 25
22 18 14 10 7 3 31 27
21 17 13 9 8 4 32 28
"""
data_mesh_2d = np.empty((data.shape[0], 601, 4, 8, 1))
# reorder according to spatial arrangement
data_mesh_2d[:, :, 0, 0, 0] = data[:, :, 23 - 1]
data_mesh_2d[:, :, 0, 1, 0] = data[:, :, 19 - 1]
data_mesh_2d[:, :, 0, 2, 0] = data[:, :, 15 - 1]
data_mesh_2d[:, :, 0, 3, 0] = data[:, :, 11 - 1]
data_mesh_2d[:, :, 0, 4, 0] = data[:, :, 6 - 1]
data_mesh_2d[:, :, 0, 5, 0] = data[:, :, 2 - 1]
data_mesh_2d[:, :, 0, 6, 0] = data[:, :, 30 - 1]
data_mesh_2d[:, :, 0, 7, 0] = data[:, :, 26 - 1]
data_mesh_2d[:, :, 1, 0, 0] = data[:, :, 24 - 1]
data_mesh_2d[:, :, 1, 1, 0] = data[:, :, 20 - 1]
data_mesh_2d[:, :, 1, 2, 0] = data[:, :, 16 - 1]
data_mesh_2d[:, :, 1, 3, 0] = data[:, :, 12 - 1]
data_mesh_2d[:, :, 1, 4, 0] = data[:, :, 5 - 1]
data_mesh_2d[:, :, 1, 5, 0] = data[:, :, 1 - 1]
data_mesh_2d[:, :, 1, 6, 0] = data[:, :, 29 - 1]
data_mesh_2d[:, :, 1, 7, 0] = data[:, :, 25 - 1]
data_mesh_2d[:, :, 2, 0, 0] = data[:, :, 22 - 1]
data_mesh_2d[:, :, 2, 1, 0] = data[:, :, 18 - 1]
data_mesh_2d[:, :, 2, 2, 0] = data[:, :, 14 - 1]
data_mesh_2d[:, :, 2, 3, 0] = data[:, :, 10 - 1]
data_mesh_2d[:, :, 2, 4, 0] = data[:, :, 7 - 1]
data_mesh_2d[:, :, 2, 5, 0] = data[:, :, 3 - 1]
data_mesh_2d[:, :, 2, 6, 0] = data[:, :, 31 - 1]
data_mesh_2d[:, :, 2, 7, 0] = data[:, :, 27 - 1]
data_mesh_2d[:, :, 3, 0, 0] = data[:, :, 21 - 1]
data_mesh_2d[:, :, 3, 1, 0] = data[:, :, 17 - 1]
data_mesh_2d[:, :, 3, 2, 0] = data[:, :, 13 - 1]
data_mesh_2d[:, :, 3, 3, 0] = data[:, :, 9 - 1]
data_mesh_2d[:, :, 3, 4, 0] = data[:, :, 8 - 1]
data_mesh_2d[:, :, 3, 5, 0] = data[:, :, 4 - 1]
data_mesh_2d[:, :, 3, 6, 0] = data[:, :, 32 - 1]
data_mesh_2d[:, :, 3, 7, 0] = data[:, :, 28 - 1]
return data_mesh_2d
def standardize_datasets(x_train, x_train_dev, x_dev, x_test):
"""
get normalization "scale" on the training set and apply it on the dev/test set
"""
scaler = sklearn.preprocessing.StandardScaler()
# get separate scale for every channel over all trials
for trial in range(x_train.shape[0]):
std_scale = scaler.fit(x_train[trial, :, :]) # shape [n_samples, n_features]
x_train_std = standardize_dataset(x_train, std_scale)
x_dev_std = standardize_dataset(x_dev, std_scale)
x_train_dev_std = standardize_dataset(x_train_dev, std_scale)
x_test_std = standardize_dataset(x_test, std_scale)
return x_train_std, x_train_dev_std, x_dev_std, x_test_std
def standardize_dataset(dataset, scale):
dataset_std = np.empty(dataset.shape)
for trial in range(dataset.shape[0]):
dataset_std[trial, :, :] = scale.transform(dataset[trial, :, :])
return dataset_std
| null |
preprocessing_data.py
|
preprocessing_data.py
|
py
| 9,849 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.date",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "scipy.io.loadmat",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "scipy.io.loadmat",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "pick.pick",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pick.pick",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "numpy.unique",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "default_params.num_classes",
"line_number": 75,
"usage_type": "argument"
},
{
"api_name": "numpy.where",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.shuffle",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "sklearn.utils",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "pick.pick",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.moveaxis",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "scipy.io.loadmat",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "numpy.empty",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "default_params.num_classes",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "default_params.num_classes",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "default_params.num_classes",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "default_params.num_classes",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "default_params.num_classes",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "default_params.num_classes",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "default_params.num_classes",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "numpy.empty",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "numpy.empty",
"line_number": 229,
"usage_type": "call"
}
] |
369235337
|
import autograd.numpy as np
from autograd import grad
from tdm.base.base import BaseEstimator
from tdm.metrics.metrics import mean_squared_error
class BaseRegression(BaseEstimator):
def __init__(self, lr=0.01, penatly='None', C=0.01, tolerance=0.0001, max_iters=1000):
self.C = C
self.lr = lr
self.penatly=penatly
self.max_iters = max_iters
self.theta = []
self.errors = []
self.n_samples, self.n_features = None, None
def init_cost(self):
raise NotImplementedError()
def train(self):
self.theta, self.errors = self.gradient_descent()
def fit(self, X, y=None):
self._setup_input(X, y)
self.init_cost()
self.n_samples, self.n_features = X.shape
self.theta = np.random.normal(size=(self.n_features+1), scale=0.5)
self.X = self._add_intercept(self.X)
self.train()
@staticmethod
def _add_intercept(X):
b = np.ones([X.shape[0], 1])
return np.concatenate([b, X], axis=1)
def cost(self, X, y, theta):
prediction = X.dot(theta)
error = self.cost_func(y, prediction)
return error
def loss(self, w):
raise NotImplementedError()
def gradient_descent(self):
theta = self.theta
errors = [self.cost(self.X, self.y, theta)]
cost_d = grad(self.loss)
for i in range(1, self.max_iters+1):
delta = cost_d(theta)
theta -=self.lr*delta
errors.append(self.cost(self.X, self.y, theta))
error_diff = np.linalg.norm(errors[i-1], errors[i])
if(error_diff< self.tolerance):
break
return theta, errors
def _add_penalty(self, loss, w):
if(self.penatly=='l1'):
loss += self.C*np.abs(w[1:]).sum()
elif(self.penatly=='l2'):
loss+=(0.5*self.C)*(w[1:]**2).sum()
return loss
class LinearRegression(BaseRegression):
def loss(self, w):
loss = self.cost_func(self.y, np.dot(self.X, w))
return self._add_penalty(loss, w)
def init_cost(self):
self.cost_func = mean_squared_error
| null |
tdm/linear_model.py
|
linear_model.py
|
py
| 2,182 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tdm.base.base.BaseEstimator",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "autograd.numpy.random.normal",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "autograd.numpy.random",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "autograd.numpy",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "autograd.numpy.ones",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "autograd.numpy",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "autograd.numpy.concatenate",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "autograd.numpy",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "autograd.grad",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "autograd.numpy.linalg.norm",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "autograd.numpy.linalg",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "autograd.numpy",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "autograd.numpy.abs",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "autograd.numpy",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "autograd.numpy.dot",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "autograd.numpy",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "tdm.metrics.metrics.mean_squared_error",
"line_number": 64,
"usage_type": "name"
}
] |
232067299
|
###############################################################################
# SKA South Africa (http://ska.ac.za/) #
# Author: [email protected] #
# Copyright @ 2013 SKA SA. All rights reserved. #
# #
# THIS SOFTWARE MAY NOT BE COPIED OR DISTRIBUTED IN ANY FORM WITHOUT THE #
# WRITTEN PERMISSION OF SKA SA. #
###############################################################################
import unittest2 as unittest
import logging
import copy
import time
import threading
import tornado
import mock
from thread import get_ident as get_thread_ident
from functools import partial
from concurrent.futures import Future, TimeoutError
from katcp.testutils import (DeviceTestServer, DeviceTestSensor,
start_thread_with_cleanup, TimewarpAsyncTestCase,
TimewarpAsyncTestCaseTimeAdvancer)
from katcp import resource, inspecting_client, ioloop_manager, Message, Sensor
from katcp.core import AttrDict, AsyncEvent
# module under test
from katcp import resource_client
logger = logging.getLogger(__name__)
class test_transform_future(tornado.testing.AsyncTestCase):
def test_transform(self):
orig_f = tornado.concurrent.Future()
transform = mock.Mock()
trans_f = resource_client.transform_future(transform, orig_f)
retval = mock.Mock()
orig_f.set_result(retval)
self.assertIs(trans_f.result(), transform.return_value)
transform.assert_called_once_with(retval)
@tornado.testing.gen_test
def test_exception_in_future(self):
class AnException(Exception): pass
@tornado.gen.coroutine
def raiser():
raise AnException
orig_f = raiser()
transform = mock.Mock()
trans_f = resource_client.transform_future(transform, orig_f)
with self.assertRaises(AnException):
trans_f.result()
def test_exception_in_transform(self):
orig_f = tornado.concurrent.Future()
transform = mock.Mock()
class AnException(Exception): pass
transform.side_effect = AnException
trans_f = resource_client.transform_future(transform, orig_f)
retval = mock.Mock()
orig_f.set_result(retval)
transform.assert_called_once_with(retval)
with self.assertRaises(AnException):
trans_f.result()
class test_KATCPClientresourceRequest(unittest.TestCase):
def setUp(self):
self.mock_client = mock.Mock()
self.DUT = resource_client.KATCPClientResourceRequest(
'the-request', 'The description', self.mock_client)
def test_init(self):
self.assertEqual(self.DUT.name, 'the-request')
self.assertEqual(self.DUT.description, 'The description')
# Check that we are registered to the correct ABC
self.assertIsInstance(self.DUT, resource.KATCPRequest)
def test_request(self):
reply = self.DUT('parm1', 2)
self.mock_client.wrapped_request.assert_called_once_with(
'the-request', 'parm1', 2)
self.assertIs(reply, self.mock_client.wrapped_request.return_value)
class test_KATCPClientResource(tornado.testing.AsyncTestCase):
def test_init(self):
resource_spec = dict(
name='testdev',
description='resource for testing',
address=('testhost', 12345),
controlled=True)
DUT = resource_client.KATCPClientResource(dict(resource_spec))
self.assertEqual(DUT.address, resource_spec['address'])
self.assertEqual(DUT.state, 'disconnected')
self.assertEqual(DUT.name, resource_spec['name'])
self.assertEqual(DUT.description, resource_spec['description'])
self.assertEqual(DUT.parent, None)
self.assertEqual(DUT.children, {})
self.assertEqual(DUT.controlled, True)
# Now try with a parent and no control
resource_spec['controlled'] = False
parent = mock.Mock()
DUT = resource_client.KATCPClientResource(
dict(resource_spec), parent=parent)
self.assertEqual(DUT.parent, parent)
self.assertEqual(DUT.controlled, False)
@tornado.testing.gen_test
def test_control(self):
always_allow = ('req-one', 'req_two', 'exclude_one')
always_exclude = ('exclude_one', 'exclude-two')
normal = ('normal', 'another-normal')
def katcp_form(reqs):
return tuple(r.replace('_', '-') for r in reqs)
dev_requests = set(katcp_form(always_allow + always_exclude + normal))
resource_spec = dict(
name='testdev',
address=('testhost', 12345),
always_allowed_requests=always_allow,
always_excluded_requests=always_exclude,
controlled=True)
def get_DUT():
DUT = resource_client.KATCPClientResource(dict(resource_spec))
ic = DUT._inspecting_client = mock.Mock()
def future_get_request(key):
f = tornado.concurrent.Future()
f.set_result(key)
return f
ic.future_get_request.side_effect = future_get_request
return DUT
DUT = get_DUT()
yield DUT._add_requests(dev_requests)
# We expect all the requests, except for those in the always_exclude list to be
# available. Note, exclude-one should not be available even though it is in
# always_allow, since always_exclude overrides always_allow.
self.assertEqual(sorted(DUT.req),
sorted(['req_one', 'req_two', 'normal', 'another_normal']))
# Now try one with no control, only req-one and req-two should be available
resource_spec['controlled'] = False
DUT = get_DUT()
yield DUT._add_requests(dev_requests)
self.assertEqual(sorted(DUT.req), sorted(['req_one', 'req_two']))
@tornado.testing.gen_test
def test_list_sensors(self):
resource_spec = dict(
name='testdev',
address=('testhost', 12345))
DUT = resource_client.KATCPClientResource(resource_spec)
sens_manager = mock.create_autospec(
resource_client.KATCPClientResourceSensorsManager(mock.Mock(), "test"))
test_sensors_info = AttrDict(
sens_one=AttrDict(name='sens-one', description='sensor one', value=1),
sens_two=AttrDict(name='sens.two', description='sensor one', value=2),
sens_three=AttrDict(name='sens_three', description='sensor three', value=3))
sensor_strategies = dict(sens_one='event', sens_three='period 10')
def make_test_sensors(sensors_info):
test_sensors = AttrDict()
for sens_pyname, info in sensors_info.items():
info = dict(info)
info['sensor_type'] = Sensor.INTEGER
val = info.pop('value')
timestamp = val*10
received_timestamp = timestamp + 1
sens = test_sensors[sens_pyname] = resource.KATCPSensor(
info, sens_manager)
sens._reading = resource.KATCPSensorReading(
received_timestamp, timestamp, Sensor.NOMINAL, val)
test_sensors[sens_pyname] = sens
return test_sensors
test_sensors = make_test_sensors(test_sensors_info)
sens_manager.get_sampling_strategy.side_effect = (
lambda sens_name: resource.normalize_strategy_parameters(
sensor_strategies.get(
resource.escape_name(sens_name), 'none')) )
DUT.sensor.update(test_sensors)
# Simple search based on python identifier
result = yield DUT.list_sensors('sens_one')
self.assertEqual(len(result), 1)
self.assertEqual(result[0], resource.SensorResultTuple(
test_sensors.sens_one, test_sensors_info.sens_one.name,
'sens_one', test_sensors_info.sens_one.description, 'integer', '',
test_sensors.sens_one.reading))
# Now get all the sensors
result = yield DUT.list_sensors('')
expected_result = sorted(resource.SensorResultTuple(
test_sensors[s_id], test_sensors_info[s_id].name,
s_id, test_sensors_info[s_id].description, 'integer', '',
test_sensors[s_id].reading)
for s_id in test_sensors_info)
self.assertEqual(sorted(result), expected_result)
# Test that all sensors are found using their Python identifiers
result = yield DUT.list_sensors('sens_two')
self.assertEqual(len(result), 1)
self.assertEqual(result[0].object, test_sensors.sens_two)
result = yield DUT.list_sensors('sens_three')
self.assertEqual(len(result), 1)
self.assertEqual(result[0].object, test_sensors.sens_three)
# Test using actual sensor name
result = yield DUT.list_sensors('sens_one', use_python_identifiers=False)
self.assertEqual(len(result), 0)
result = yield DUT.list_sensors('sens-one', use_python_identifiers=False)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].name, 'sens-one')
# Now test with strategy filter
result = yield DUT.list_sensors('', strategy=True)
self.assertEqual(len(result), len(sensor_strategies))
def test_until_sync_states(self):
resource_spec = dict(
name='testdev',
address=('testhost', 12345))
DUT = resource_client.KATCPClientResource(resource_spec)
# We expect the initial state to be 'disconnected', which means until_synced()
# should return an unresolved future and until_not_synced() a resolved future
self.assertEqual(DUT.state, 'disconnected')
self.assertFalse(DUT.until_synced().done())
self.assertTrue(DUT.until_not_synced().done())
# Force state to 'syncing', same expectation as for 'disconnected'
DUT._state.set_state('syncing')
self.assertFalse(DUT.until_synced().done())
self.assertTrue(DUT.until_not_synced().done())
# Force state to 'synced', opposite expectation as for 'disconnected'
DUT._state.set_state('synced')
self.assertTrue(DUT.until_synced().done())
self.assertFalse(DUT.until_not_synced().done())
class test_KATCPClientResource_Integrated(tornado.testing.AsyncTestCase):
def setUp(self):
super(test_KATCPClientResource_Integrated, self).setUp()
self.server = DeviceTestServer('', 0)
start_thread_with_cleanup(self, self.server)
self.host, self.port = self.server.bind_address
self.default_resource_spec = dict(
name='thething',
address=self.server.bind_address,
controlled=True)
@tornado.gen.coroutine
def _get_DUT_and_sync(self, resource_spec):
DUT = resource_client.KATCPClientResource(self.default_resource_spec)
DUT.start()
yield DUT.until_state('synced')
raise tornado.gen.Return(DUT)
@tornado.testing.gen_test(timeout=1)
def test_requests(self):
DUT = yield self._get_DUT_and_sync(self.default_resource_spec)
# Check that all the test-device requests are listed
self.assertEqual(sorted(DUT.req),
sorted(n.replace('-', '_')
for n in self.server.request_names))
@tornado.testing.gen_test(timeout=1)
def test_active(self):
DUT = yield self._get_DUT_and_sync(self.default_resource_spec)
self.assertTrue(DUT.is_active(), 'Expect DUT to be active initialy')
reply = yield DUT.req.new_command()
self.assertTrue(reply.succeeded, 'Expect request to be succesful in active state')
# Set DUT to 'inactive'
DUT.set_active(False)
with self.assertRaises(resource.KATCPResourceInactive):
# Should raise if we attempt to do the request when inactive
yield DUT.req.new_command()
# Set DUT to back to 'active'
DUT.set_active(True)
reply = yield DUT.req.new_command()
self.assertTrue(reply.succeeded, 'Expect request to be succesful in active state')
@tornado.testing.gen_test(timeout=1)
def test_sensors(self):
DUT = yield self._get_DUT_and_sync(self.default_resource_spec)
# Check that all the test-device sensors are listed
self.assertEqual(sorted(DUT.sensor),
sorted(n.replace('-', '_').replace('.', '_')
for n in self.server.sensor_names))
@tornado.testing.gen_test(timeout=1)
def test_interface_change(self):
DUT = yield self._get_DUT_and_sync(self.default_resource_spec)
sensors_before = set(DUT.sensor)
reqs_before = set(DUT.req)
# Add a new sensor to the server
sensor = DeviceTestSensor(DeviceTestSensor.INTEGER, "another.int",
"An Integer.",
"count", [-5, 5], timestamp=self.io_loop.time(),
status=DeviceTestSensor.NOMINAL, value=3)
self.server.add_sensor(sensor)
# Check that the sensor does not exist currently
self.assertNotIn(resource.escape_name(sensor.name), sensors_before)
# Add a new request to the server
def request_sparkling_new(self, req, msg):
"""A new command."""
return Message.reply(msg.name, "ok", "bling1", "bling2")
self.server._request_handlers['sparkling-new'] = request_sparkling_new
# Check that the request did not exist before
self.assertNotIn('sparkling-new', reqs_before)
# Issue #interface-changed
self.server.mass_inform(Message.inform('interface-changed'))
yield DUT.until_state('syncing')
yield DUT.until_state('synced')
# Check if sensor/request was added
self.assertEqual(set(DUT.sensor) - sensors_before, set(['another_int']))
self.assertEqual(set(DUT.req) - reqs_before, set(['sparkling_new']))
# And now remove them again
self.server._request_handlers.pop('sparkling-new')
self.server.remove_sensor('another.int')
# Issue #interface-changed
self.server.mass_inform(Message.inform('interface-changed'))
yield DUT.until_state('syncing')
yield DUT.until_state('synced')
# Check if sensor/request was removed
self.assertEqual(set(DUT.sensor), sensors_before)
self.assertEqual(set(DUT.req), reqs_before)
class test_KATCPClientResource_IntegratedTimewarp(TimewarpAsyncTestCase):
def setUp(self):
super(test_KATCPClientResource_IntegratedTimewarp, self).setUp()
self.server = DeviceTestServer('', 0)
start_thread_with_cleanup(self, self.server)
self.host, self.port = self.server.bind_address
self.default_resource_spec = dict(
name='thething',
address=self.server.bind_address,
controlled=True)
@tornado.gen.coroutine
def _get_DUT_and_sync(self, resource_spec):
DUT = resource_client.KATCPClientResource(self.default_resource_spec)
DUT.start()
yield DUT.until_state('synced')
raise tornado.gen.Return(DUT)
@tornado.testing.gen_test
def test_disconnect(self):
# Test that a device disconnect / reconnect is correctly handled
DUT = yield self._get_DUT_and_sync(self.default_resource_spec)
initial_reqs = set(DUT.req)
initial_sensors = set(DUT.sensor)
self.server.stop()
self.server.join(timeout=1)
yield DUT.until_state('disconnected')
# Test that requests fail
rep = yield DUT.req.watchdog()
self.assertFalse(rep.succeeded)
# Restart device so that we can reconnect
self.server.start()
# timewarp beyond reconect delay
self.set_ioloop_time(self.ioloop_time + 1)
yield DUT.until_state('syncing')
yield DUT.until_state('synced')
# check that sensors / requests are unchanged
self.assertEqual(set(DUT.req), initial_reqs)
self.assertEqual(set(DUT.sensor), initial_sensors)
# Now disconnect and change the device, to check that it is properly resynced.
self.server.stop()
self.server.join(timeout=1)
yield DUT.until_state('disconnected')
# Add a new request to the server
def request_sparkling_new(self, req, msg):
"""A new command."""
return Message.reply(msg.name, "ok", "bling1", "bling2")
self.server._request_handlers['sparkling-new'] = request_sparkling_new
# Check that the request does not exist currently
self.assertNotIn('sparkling_new', initial_reqs)
# Add a new sensor to the server
sensor = DeviceTestSensor(DeviceTestSensor.INTEGER, "another.int",
"An Integer.",
"count", [-5, 5], timestamp=self.io_loop.time(),
status=DeviceTestSensor.NOMINAL, value=3)
self.server.add_sensor(sensor)
# Check that the sensor does not exist currently
escaped_new_sensor = resource.escape_name(sensor.name)
self.assertNotIn(resource.escape_name(sensor.name), initial_sensors)
# Restart device so that we can reconnect
self.server.start()
# timewarp beyond reconect delay
self.set_ioloop_time(self.ioloop_time + 1)
yield DUT.until_state('syncing')
yield DUT.until_state('synced')
# check that sensors / requests are correctly updated
self.assertEqual(set(DUT.req), initial_reqs | set(['sparkling_new']))
self.assertEqual(set(DUT.sensor), initial_sensors | set([escaped_new_sensor]))
@tornado.testing.gen_test(timeout=1000)
def test_set_sensor_sampling(self):
self.server.stop()
self.server.join()
DUT = resource_client.KATCPClientResource(self.default_resource_spec)
DUT.start()
yield tornado.gen.moment
test_strategy = ('period', '2.5')
yield DUT.set_sensor_strategy('an_int', test_strategy)
# Double-check that the sensor does not yet exist
self.assertNotIn('an_int', DUT.sensor)
self.server.start()
self.server.wait_running(timeout=1)
advancer = TimewarpAsyncTestCaseTimeAdvancer(self, quantum=0.55)
advancer.start()
yield DUT.until_synced()
self.assertEqual(DUT.sensor.an_int.sampling_strategy, test_strategy)
# Now call set_sensor_strategy with a different strategy and check that it is
# applied to the real sensor
new_test_strategy = ('event',)
yield DUT.set_sensor_strategy('an_int', new_test_strategy)
self.assertEqual(DUT.sensor.an_int.sampling_strategy, new_test_strategy)
@tornado.testing.gen_test(timeout=1000)
def test_set_sensor_listener(self):
self.server.stop()
self.server.join()
resource_spec = self.default_resource_spec
DUT = resource_client.KATCPClientResource(resource_spec)
DUT.start()
yield tornado.gen.moment
test_listener1 = lambda *x : None
test_listener2 = lambda *y : None
DUT.set_sensor_listener('an_int', test_listener1)
# Double-check that the sensor does not yet exist
self.assertNotIn('an_int', DUT.sensor)
self.server.start()
self.server.wait_running(timeout=1)
advancer = TimewarpAsyncTestCaseTimeAdvancer(self, quantum=0.55)
advancer.start()
yield DUT.until_synced()
self.assertTrue(DUT.sensor.an_int.is_listener, test_listener1)
# Now call set_sensor_lister with a different listener and check that it is
# also subscribed
DUT.set_sensor_listener('an_int', test_listener2)
self.assertTrue(DUT.sensor.an_int.is_listener, test_listener2)
# TODO tests
#
# * Sensor strategy re-application
# * Request through request object, also with timeouts
# * Sensor callbacks (probably in test_resource.py, no need for full integrated test)
class test_KATCPClientResourceContainer(tornado.testing.AsyncTestCase):
def setUp(self):
self.default_spec_orig = dict(clients={
'client1': dict(address=('client1-addr', 1234), controlled=True),
'client-2': dict(address=('client2-addr', 1235), controlled=True),
'another-client': dict(address=('another-addr', 1231), controlled=True)},
name='test-container',
description='container for testing')
# make a copy in case the test or DUT messes up any of the original dicts.
self.default_spec = copy.deepcopy(self.default_spec_orig)
super(test_KATCPClientResourceContainer, self).setUp()
@tornado.testing.gen_test
def test_groups(self):
spec = self.default_spec
spec['groups'] = dict(group1=['client1', 'another-client'],
group2=['client1', 'client-2'],
group3=['client1', 'client-2', 'another-client'])
DUT = resource_client.KATCPClientResourceContainer(copy.deepcopy(spec))
self.assertEqual(sorted(DUT.groups), ['group1', 'group2', 'group3'])
for group_name, group in DUT.groups.items():
# Smoke test that no errors are raised
group.req
# Check that the correct clients are in each group
self.assertEqual(sorted(client.name for client in group.clients),
sorted(spec['groups'][group_name]))
# now some surgery, mocking _inspecting_client and calling _add_requests manually
def mock_inspecting_client(client):
make_fake_requests = lambda mock_client: {
req: resource_client.KATCPClientResourceRequest(
req, 'Description for {}'.format(req), mock_client)
for req in ['req-1', 'req-2', 'req-3']}
def _install_inspecting_client_mocks(mock_client):
fake_requests = make_fake_requests(mock_client)
def future_get_request(key):
f = tornado.concurrent.Future()
f.set_result(fake_requests[key])
return f
def wrapped_request(request_name, *args, **kwargs):
f = tornado.concurrent.Future()
retval = resource.KATCPReply(Message.reply(request_name, 'ok'), [])
f.set_result(retval)
return f
mock_client.future_get_request.side_effect = future_get_request
mock_client.wrapped_request.side_effect = wrapped_request
return future_get_request
client._inspecting_client = mock_inspecting_client = mock.Mock(
spec_set=resource_client.ReplyWrappedInspectingClientAsync)
_install_inspecting_client_mocks(mock_inspecting_client)
return mock_inspecting_client
m_i_c_1 = mock_inspecting_client(DUT.children.client1)
m_i_c_2 = mock_inspecting_client(DUT.children.client_2)
m_i_c_a = mock_inspecting_client(DUT.children.another_client)
normalize_reply = lambda reply: {c:r if r is None else str(r.reply)
for c, r in reply.items()}
yield DUT.children.client1._add_requests(['req-1'])
g1_reply = yield DUT.groups.group1.req.req_1()
self.assertEqual(normalize_reply(g1_reply),
{'client1': '!req-1 ok', 'another-client': None})
# Should evaluate false since not all the clients replied
self.assertFalse(g1_reply)
yield DUT.children.another_client._add_requests(['req-1'])
g1_reply = yield DUT.groups.group1.req.req_1()
self.assertEqual(normalize_reply(g1_reply),
{'client1': '!req-1 ok', 'another-client': '!req-1 ok'})
# Should evaluate True since all the clients replied succesfully
self.assertTrue(g1_reply)
yield DUT.children.client_2._add_requests(['req-2'])
# client-2 is in group2 and group3, so req-2 should now show up.
self.assertIn('req_2', DUT.groups.group2.req)
self.assertIn('req_2', DUT.groups.group3.req)
# Check that the requests weren't accidentally added to another group
self.assertFalse('req_2' in DUT.groups.group1.req)
def test_init(self):
m_logger = mock.Mock()
DUT = resource_client.KATCPClientResourceContainer(
self.default_spec, logger=m_logger)
self.assertEqual(DUT.name, 'test-container')
self.assertEqual(DUT.description, 'container for testing')
child_specs = self.default_spec_orig['clients']
self.assertEqual(sorted(DUT.children),
sorted(resource.escape_name(n) for n in child_specs))
for child_name, child_spec in child_specs.items():
child = DUT.children[resource.escape_name(child_name)]
self.assertEqual(child.name, child_name)
self.assertEqual(child.parent, DUT)
self.assertEqual(child.address, child_spec['address'])
self.assertIs(child._logger, m_logger)
def test_set_active(self):
DUT = resource_client.KATCPClientResourceContainer(self.default_spec)
mock_children = {n: mock.Mock(spec_set=c, wraps=c)
for n, c in dict.items(DUT.children)}
dict.update(DUT.children, mock_children)
self.assertTrue(DUT.is_active(), "'active' should be True initially")
for child_name, child in DUT.children.items():
self.assertTrue(child.is_active(),
"Child {} should be active".format(child_name))
# Now set active to false
DUT.set_active(False)
self.assertFalse(DUT.is_active(),
"'active' should be False after set_active(False)")
for child_name, child in DUT.children.items():
self.assertFalse(child.is_active(),
"Child {} should not be active".format(child_name))
# And now back to to active
DUT.set_active(True)
self.assertTrue(DUT.is_active(),
"'active' should be True after set_active(True)")
for child_name, child in DUT.children.items():
self.assertTrue(child.is_active(),
"Child {} should be active".format(child_name))
def test_until_sync_states(self):
DUT = resource_client.KATCPClientResourceContainer(self.default_spec)
# All children should be in 'disconnected' state, so until_synced() should return
# an unresolved future and until_not_synced() a resolved future
self.assertFalse(DUT.until_synced().done())
self.assertTrue(DUT.until_not_synced().done())
# Set all child states sync functions to resolved at not-synced to unresolved
for child in DUT.children.values():
f = tornado.concurrent.Future()
f.set_result(None)
# Need to use partial since the closure is shared between all
# loop iterations
child.until_synced = partial(lambda x : x, f)
child.until_not_synced = tornado.concurrent.Future
# Now until_synced() should be resolved and until_not_synced() unresolved
self.assertTrue(DUT.until_synced().done())
self.assertFalse(DUT.until_not_synced().done())
# Set only _one_ of the children to not-synced, should be the same as if all of
# them are disconnected
for i, child in enumerate(DUT.children.values()):
if i == 1:
# Set child to not synced
f = tornado.concurrent.Future()
f.set_result(None)
# Need to use partial since the closure is shared between all
# loop iterations
child.until_not_synced = partial(lambda x : x, f)
child.until_synced = tornado.concurrent.Future
else:
f = tornado.concurrent.Future()
f.set_result(None)
# Need to use partial since the closure is shared between all
# loop iterations
child.until_synced = partial(lambda x : x, f)
child.until_not_synced = tornado.concurrent.Future
self.assertFalse(DUT.until_synced().done())
self.assertTrue(DUT.until_not_synced().done())
def test_set_ioloop(self):
# Make two tornado IOLoop instances, one that is installed as the current thread
# IOLoop, and one that we will explicity pass to set_ioloop. If set_ioloop is not
# doing it's job, the children would automatically use thread_ioloop instance.
thread_ioloop = tornado.ioloop.IOLoop()
self.addCleanup(thread_ioloop.close, all_fds=True)
thread_ioloop.make_current()
our_ioloop = tornado.ioloop.IOLoop()
self.addCleanup(our_ioloop.close, all_fds=True)
DUT = resource_client.KATCPClientResourceContainer(self.default_spec)
DUT.set_ioloop(our_ioloop)
DUT.start()
for child_name in self.default_spec_orig['clients']:
self.assertIs(DUT.children[resource.escape_name(child_name)].ioloop,
our_ioloop)
class test_KATCPClientResourceContainerIntegrated(tornado.testing.AsyncTestCase):
def setUp(self):
super(test_KATCPClientResourceContainerIntegrated, self).setUp()
self.default_spec = dict(clients={
'resource1' : dict(controlled=True),
'resource2' : dict(controlled=True),
'resource3' : dict(controlled=True)},
name='intgtest')
self.resource_names = self.default_spec['clients'].keys()
self.servers = {rn: DeviceTestServer('', 0) for rn in self.resource_names}
for i, (s_name, s) in enumerate(sorted(self.servers.items())):
start_thread_with_cleanup(self, s)
self.default_spec['clients'][s_name]['address'] = s.bind_address
# Add a unique sensor to each server
sensor = DeviceTestSensor(DeviceTestSensor.INTEGER, "int."+s_name,
"An Integer.",
"count", [-50, 50], timestamp=self.io_loop.time(),
status=DeviceTestSensor.NOMINAL, value=i)
s.add_sensor(sensor)
# Add a unique request to each server
def handler(self, req, msg):
"""A new command."""
return Message.reply(msg.name, "ok", "bling1", "bling2")
s._request_handlers['sparkling-new-'+s_name] = handler
@tornado.testing.gen_test(timeout=1000)
def test_set_sensor_sampling(self):
self.default_spec_orig = copy.deepcopy(self.default_spec)
DUT = resource_client.KATCPClientResourceContainer(self.default_spec)
DUT.start()
def side_effect(*args, **kwargs):
f = tornado.concurrent.futures.Future()
f.set_result(None)
return f
additional = {'resource1': 'sensor_1',
'resource2': 'agg_sensor,sensor_1',
'resource3': 'sensor_3'}
for x in additional:
s = self.servers[x]
for sens in additional[x].split(","):
sensor = DeviceTestSensor(DeviceTestSensor.INTEGER, sens,
"An Integer.",
"count", [-50, 50], timestamp=self.io_loop.time(),
status=DeviceTestSensor.NOMINAL, value=0)
s.add_sensor(sensor)
yield DUT.until_synced()
DUT.children.resource1.set_sensor_strategy = mock.Mock(side_effect=side_effect)
DUT.children.resource2.set_sensor_strategy = mock.Mock(side_effect=side_effect)
DUT.children.resource3.set_sensor_strategy = mock.Mock(side_effect=side_effect)
strat1 = ('period', '2.1')
strat2 = ('event',)
strat3 = ('event-rate', '2', '3')
yield DUT.set_sensor_strategy('resource1.sensor_1', strat1)
DUT.children.resource1.set_sensor_strategy.assert_called_once_with(
'sensor_1', strat1)
yield DUT.set_sensor_strategy('resource2_sensor_1', strat2)
DUT.children.resource2.set_sensor_strategy.assert_called_once_with(
'sensor_1', strat2)
DUT.children.resource2.set_sensor_strategy.reset_mock()
yield DUT.set_sensor_strategy('agg_sensor', strat1)
DUT.children.resource2.set_sensor_strategy.assert_called_once_with(
'agg_sensor', strat1)
yield DUT.set_sensor_strategy('resource3.sensor_3', strat3)
DUT.children.resource3.set_sensor_strategy.assert_called_once_with(
'sensor_3', strat3)
@tornado.testing.gen_test(timeout=1000)
def test_set_sensor_listener(self):
self.default_spec_orig = copy.deepcopy(self.default_spec)
DUT = resource_client.KATCPClientResourceContainer(self.default_spec)
DUT.start()
def side_effect(*args, **kwargs):
f = tornado.concurrent.futures.Future()
f.set_result(None)
return f
additional = {'resource1': 'sensor_1',
'resource2': 'agg_sensor,sensor_1',
'resource3': 'sensor_3'}
for x in additional:
s = self.servers[x]
for sens in additional[x].split(","):
sensor = DeviceTestSensor(DeviceTestSensor.INTEGER, sens,
"An Integer.",
"count", [-50, 50], timestamp=self.io_loop.time(),
status=DeviceTestSensor.NOMINAL, value=0)
s.add_sensor(sensor)
yield DUT.until_synced()
DUT.children.resource1.set_sensor_listener = mock.Mock(side_effect=side_effect)
DUT.children.resource2.set_sensor_listener = mock.Mock(side_effect=side_effect)
DUT.children.resource3.set_sensor_listener = mock.Mock(side_effect=side_effect)
listener1 = lambda *x : None
listener2 = lambda *y : None
listener3 = lambda *z : None
DUT.set_sensor_listener('resource1.sensor_1', listener1)
DUT.children.resource1.set_sensor_listener.assert_called_once_with(
'sensor_1', listener1)
DUT.children.resource1.set_sensor_listener.reset_mock()
DUT.set_sensor_listener('resource2_sensor_1', listener2)
DUT.children.resource2.set_sensor_listener.assert_called_once_with(
'sensor_1', listener2)
DUT.children.resource2.set_sensor_listener.reset_mock()
DUT.set_sensor_listener('agg_sensor', listener2)
DUT.children.resource2.set_sensor_listener.assert_called_once_with(
'agg_sensor', listener2)
DUT.children.resource2.set_sensor_listener.reset_mock()
DUT.set_sensor_listener('resource3.sensor_3', listener3)
DUT.children.resource3.set_sensor_listener.assert_called_once_with(
'sensor_3', listener3)
DUT.children.resource3.set_sensor_listener.reset_mock()
return
def get_expected(self, testserv_attr):
expected_items = []
for i, (serv_name, serv) in enumerate(sorted(self.servers.items())):
for item_name in getattr(serv, testserv_attr):
expected_items.append((serv_name+'_'+item_name)
.replace('.', '_')
.replace('-', '_'))
return expected_items
@tornado.gen.coroutine
def get_DUT_synced(self):
# make a copy in case the test or DUT messes up any of the original dicts.
self.default_spec_orig = copy.deepcopy(self.default_spec)
DUT = resource_client.KATCPClientResourceContainer(self.default_spec)
DUT.start()
yield DUT.until_synced()
raise tornado.gen.Return(DUT)
@tornado.testing.gen_test(timeout=1)
def test_sensors(self):
DUT = yield self.get_DUT_synced()
expected_sensors = self.get_expected('sensor_names')
self.assertEqual(sorted(DUT.sensor), sorted(expected_sensors))
# Test that some sensor objects are correctly mapped between container and client
self.assertIs(DUT.sensor.resource1_int_resource1,
DUT.children['resource1'].sensor.int_resource1)
self.assertIs(DUT.sensor.resource2_int_resource2,
DUT.children['resource2'].sensor.int_resource2)
self.assertIs(DUT.sensor.resource3_an_int,
DUT.children['resource3'].sensor.an_int)
@tornado.testing.gen_test(timeout=1)
def test_requests(self):
r2_spec = self.default_spec['clients']['resource2']
r2_spec['always_allowed_requests'] = ['sparkling-new-resource2']
r2_spec['controlled'] = False
DUT = yield self.get_DUT_synced()
# Strip out all resource2 requests (since it is not controlled) except for
# sparkling-new-resource2 which is in always_allowed_requests.
expected_requests = [r for r in self.get_expected('request_names')
if (not r.startswith('resource2_') or
r == 'resource2_sparkling_new_resource2')]
self.assertEqual(sorted(DUT.req), sorted(expected_requests))
# Test that some request objects are correctly mapped between container and client
self.assertIs(DUT.req.resource1_sparkling_new_resource1,
DUT.children['resource1'].req.sparkling_new_resource1)
self.assertIs(DUT.req.resource2_sparkling_new_resource2,
DUT.children['resource2'].req.sparkling_new_resource2)
self.assertIs(DUT.req.resource3_halt,
DUT.children['resource3'].req.halt)
class test_ThreadsafeMethodAttrWrapper(unittest.TestCase):
def setUp(self):
self.ioloop_manager = ioloop_manager.IOLoopManager(managed_default=True)
self.ioloop = self.ioloop_manager.get_ioloop()
self.ioloop_thread_wrapper = resource_client.IOLoopThreadWrapper(self.ioloop)
start_thread_with_cleanup(self, self.ioloop_manager, start_timeout=1)
def test_wrapping(self):
test_inst = self
class Wrappee(object):
def __init__(self, ioloop_thread_id):
self.thread_id = ioloop_thread_id
def a_callable(self, arg, kwarg='abc'):
test_inst.assertEqual(get_thread_ident(), self.thread_id)
return (arg * 2, kwarg * 3)
@property
def not_in_ioloop(self):
test_inst.assertNotEqual(get_thread_ident(), self.thread_id)
return 'not_in'
@property
def only_in_ioloop(self):
test_inst.assertEqual(get_thread_ident(), self.thread_id)
return 'only_in'
class TestWrapper(resource_client.ThreadSafeMethodAttrWrapper):
@property
def only_in_ioloop(self):
return self._getattr('only_in_ioloop')
id_future = Future()
self.ioloop.add_callback(lambda : id_future.set_result(get_thread_ident()))
wrappee = Wrappee(id_future.result(timeout=1))
wrapped = TestWrapper(wrappee, self.ioloop_thread_wrapper)
# First test our assumptions about Wrappee
with self.assertRaises(AssertionError):
wrappee.a_callable(3, 'a')
with self.assertRaises(AssertionError):
wrappee.only_in_ioloop
self.assertEqual(wrappee.not_in_ioloop, 'not_in')
# Now test the wrapped version
self.assertEqual(wrapped.a_callable(5, kwarg='bcd'), (10, 'bcd'*3))
self.assertEqual(wrapped.only_in_ioloop, 'only_in')
self.assertEqual(wrapped.not_in_ioloop, 'not_in')
class test_AttrMappingProxy(unittest.TestCase):
def test_wrapping(self):
test_dict = AttrDict(a=2, b=1)
class TestWrapper(object):
def __init__(self, wrappee):
self.wrappee = wrappee
def __eq__(self, other):
return self.wrappee == other.wrappee
wrapped_dict = resource_client.AttrMappingProxy(test_dict, TestWrapper)
# Test keys
self.assertEqual(wrapped_dict.keys(), test_dict.keys())
# Test key access:
for key in test_dict:
self.assertEqual(wrapped_dict[key].wrappee, test_dict[key])
# Test attribute access
for key in test_dict:
self.assertEqual(getattr(wrapped_dict, key).wrappee,
getattr(test_dict, key))
# Test whole dict comparison
self.assertEqual(wrapped_dict,
{k : TestWrapper(v) for k, v in test_dict.items()})
class test_ThreadSafeKATCPClientResourceWrapper(unittest.TestCase):
def setUp(self):
self.server = DeviceTestServer('', 0)
start_thread_with_cleanup(self, self.server)
self.ioloop_manager = ioloop_manager.IOLoopManager(managed_default=True)
self.io_loop = self.ioloop_manager.get_ioloop()
self.host, self.port = self.server.bind_address
self.default_resource_spec = dict(
name='thething',
address=self.server.bind_address,
controlled=True)
self.client_resource = resource_client.KATCPClientResource(
self.default_resource_spec)
self.client_resource.set_ioloop(self.io_loop)
self.io_loop.add_callback(self.client_resource.start)
self.ioloop_thread_wrapper = resource_client.IOLoopThreadWrapper(self.io_loop)
start_thread_with_cleanup(self, self.ioloop_manager, start_timeout=1)
self.ioloop_thread_wrapper.default_timeout = 1
self.DUT = resource_client.ThreadSafeKATCPClientResourceWrapper(
self.client_resource, self.ioloop_thread_wrapper)
self.DUT.until_synced()
def test_wrapped_timeout(self):
self.assertEqual(self.client_resource.state, 'synced')
# Test timeout
self.ioloop_thread_wrapper.default_timeout = 0.001
t0 = time.time()
with self.assertRaises(TimeoutError):
self.DUT.until_state('disconnected')
self.assertLess(time.time() - t0, 0.2)
# Now make sure we can actualy still wait on the state
self.ioloop_thread_wrapper.default_timeout = 1
self.server.stop()
self.server.join()
self.DUT.until_state('disconnected')
self.assertEqual(self.client_resource.state, 'disconnected')
self.server.start()
self.DUT.until_state('synced')
self.assertEqual(self.client_resource.state, 'synced')
def test_request(self):
reply = self.DUT.req.sensor_value('an.int')
last_server_msg = self.server.messages[-1]
self.assertTrue(reply.succeeded)
self.assertEqual(str(last_server_msg),
'?sensor-value[{}] an.int'.format(reply.reply.mid))
def test_sensor(self):
server_sensor = self.server.get_sensor('an.int')
reading = self.DUT.sensor.an_int.get_reading()
self.assertEqual(reading.value, server_sensor.read().value)
server_sensor.set_value(server_sensor.read().value + 5)
reading = self.DUT.sensor.an_int.get_reading()
self.assertEqual(reading.value, server_sensor.read().value)
class test_ThreadSafeKATCPClientResourceWrapper_container(unittest.TestCase):
def setUp(self):
self.ioloop_manager = ioloop_manager.IOLoopManager(managed_default=True)
self.io_loop = self.ioloop_manager.get_ioloop()
self.io_loop.make_current()
self.ioloop_thread_wrapper = resource_client.IOLoopThreadWrapper(self.io_loop)
start_thread_with_cleanup(self, self.ioloop_manager, start_timeout=1)
self.ioloop_thread_wrapper.default_timeout = 1
self.default_spec = dict(clients={
'resource1' : dict(controlled=True),
'resource2' : dict(controlled=True)},
name='wraptest')
self.resource_names = self.default_spec['clients'].keys()
self.servers = {rn: DeviceTestServer('', 0) for rn in self.resource_names}
for i, (s_name, s) in enumerate(sorted(self.servers.items())):
start_thread_with_cleanup(self, s)
self.default_spec['clients'][s_name]['address'] = s.bind_address
# Add a unique sensor to each server
sensor = DeviceTestSensor(DeviceTestSensor.INTEGER, "int."+s_name,
"An Integer.",
"count", [-50, 50], timestamp=self.io_loop.time(),
status=DeviceTestSensor.NOMINAL, value=i)
s.add_sensor(sensor)
# Add a unique request to each server
def handler(self, req, msg):
"""A new command."""
return Message.reply(msg.name, "ok", "bling1", "bling2")
s._request_handlers['sparkling-new-'+s_name] = handler
self.resource_container = resource_client.KATCPClientResourceContainer(
self.default_spec)
self.DUT = resource_client.ThreadSafeKATCPClientResourceWrapper(
self.resource_container, self.ioloop_thread_wrapper)
self.DUT.start()
self.DUT.until_synced()
def test_sensor(self):
self.assertEqual(self.DUT.sensor.resource1_int_resource1,
self.DUT.children['resource1'].sensor.int_resource1)
self.assertIs(self.DUT.sensor.resource1_int_resource1.reading,
self.resource_container.sensor.resource1_int_resource1.reading)
self.servers['resource2'].get_sensor('int.resource2').set_value(17)
reading = self.DUT.sensor.resource2_int_resource2.get_reading()
self.assertEqual(reading.value, 17)
self.assertEqual(reading.status, Sensor.STATUSES[Sensor.NOMINAL])
self.servers['resource2'].get_sensor('int.resource2').set_value(14)
self.assertEqual(self.DUT.sensor.resource2_int_resource2.get_value(), 14)
self.servers['resource2'].get_sensor('int.resource2').set_value(
10, Sensor.WARN)
self.assertEqual(self.DUT.sensor.resource2_int_resource2.get_status(),
Sensor.STATUSES[Sensor.WARN])
self.assertEqual(self.DUT.sensor.resource2_int_resource2.value, 10)
def test_children(self):
self.assertIs(type(self.DUT.children['resource1']),
resource_client.ThreadSafeKATCPClientResourceWrapper)
self.assertIs(self.DUT.children['resource1'].__subject__,
self.resource_container.children['resource1'])
self.assertIs(type(self.DUT.children['resource2']),
resource_client.ThreadSafeKATCPClientResourceWrapper)
self.assertIs(self.DUT.children['resource2'].__subject__,
self.resource_container.children['resource2'])
class test_monitor_resource_sync_state(tornado.testing.AsyncTestCase):
@tornado.testing.gen_test
def test_monitor_resource_sync_state(self):
m_res = mock.Mock()
callback = mock.Mock()
exit_event = AsyncEvent()
synced = AsyncEvent()
not_synced = AsyncEvent()
m_res.until_synced = synced.until_set
m_res.until_not_synced = not_synced.until_set
def set_synced(sync):
if sync:
not_synced.clear()
synced.set()
else:
synced.clear()
not_synced.set()
loop_done_future = resource_client.monitor_resource_sync_state(
m_res, callback, exit_event)
yield tornado.gen.moment
self.assertEqual(callback.call_args_list, [mock.call(False)])
callback.reset_mock()
# Check that it exits if exit_event is set
exit_event.set()
yield tornado.gen.moment
self.assertFalse(callback.called,
'No callback should be made when exit_event is set')
self.assertTrue(loop_done_future.done(),
'Monitor loop should terminate when exit_event is set')
exit_event.clear()
loop_done_future = resource_client.monitor_resource_sync_state(
m_res, callback, exit_event)
set_synced(True)
yield tornado.gen.moment
self.assertEqual(callback.call_args_list, [mock.call(False), mock.call(True)])
callback.reset_mock()
set_synced(False)
yield tornado.gen.moment
self.assertEqual(callback.call_args_list, [mock.call(False)])
callback.reset_mock()
# Now check exit_event when synced is set
set_synced(True)
yield tornado.gen.moment
self.assertEqual(callback.call_args_list, [mock.call(True)])
callback.reset_mock()
self.assertFalse(loop_done_future.done(),
'Monitor loop should only terminate is exit_event is set')
exit_event.set()
yield tornado.gen.moment
self.assertFalse(callback.called)
self.assertTrue(loop_done_future.done(),
'Monitor loop should terminate when exit_event is set')
| null |
katcp/test/test_resource_client.py
|
test_resource_client.py
|
py
| 49,897 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tornado.testing",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "tornado.concurrent.Future",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "tornado.concurrent",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "mock.Mock",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.transform_future",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "tornado.gen",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "mock.Mock",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.transform_future",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "tornado.testing",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "tornado.concurrent.Future",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "tornado.concurrent",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "mock.Mock",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.transform_future",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "unittest2.TestCase",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "mock.Mock",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.KATCPClientResourceRequest",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "katcp.resource.KATCPRequest",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "katcp.resource",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "tornado.testing",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "katcp.resource_client.KATCPClientResource",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.KATCPClientResource",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "katcp.resource_client.KATCPClientResource",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "tornado.concurrent.Future",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "tornado.concurrent",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "tornado.testing",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "katcp.resource_client.KATCPClientResource",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "mock.create_autospec",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.KATCPClientResourceSensorsManager",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "katcp.core.AttrDict",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "katcp.core.AttrDict",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "katcp.core.AttrDict",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "katcp.core.AttrDict",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "katcp.core.AttrDict",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "katcp.Sensor.INTEGER",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "katcp.Sensor",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "katcp.resource.KATCPSensor",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "katcp.resource",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "katcp.resource.KATCPSensorReading",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "katcp.resource",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "katcp.Sensor.NOMINAL",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "katcp.Sensor",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "katcp.resource.normalize_strategy_parameters",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "katcp.resource",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "katcp.resource.escape_name",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "katcp.resource",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "katcp.resource.SensorResultTuple",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "katcp.resource",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "katcp.resource.SensorResultTuple",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "katcp.resource",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "tornado.testing",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "katcp.resource_client.KATCPClientResource",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "tornado.testing",
"line_number": 252,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestServer",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "katcp.testutils.start_thread_with_cleanup",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.KATCPClientResource",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 265,
"usage_type": "name"
},
{
"api_name": "tornado.gen.Return",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "tornado.gen",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "tornado.gen",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "tornado.testing",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "katcp.resource.KATCPResourceInactive",
"line_number": 287,
"usage_type": "attribute"
},
{
"api_name": "katcp.resource",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "tornado.testing",
"line_number": 278,
"usage_type": "attribute"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "tornado.testing",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestSensor",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "katcp.testutils.DeviceTestSensor.INTEGER",
"line_number": 312,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestSensor.NOMINAL",
"line_number": 315,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestSensor",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "katcp.resource.escape_name",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "katcp.resource",
"line_number": 318,
"usage_type": "name"
},
{
"api_name": "katcp.Message.reply",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "katcp.Message",
"line_number": 323,
"usage_type": "name"
},
{
"api_name": "katcp.Message.inform",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "katcp.Message",
"line_number": 329,
"usage_type": "name"
},
{
"api_name": "katcp.Message.inform",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "katcp.Message",
"line_number": 342,
"usage_type": "name"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "tornado.testing",
"line_number": 305,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.TimewarpAsyncTestCase",
"line_number": 351,
"usage_type": "name"
},
{
"api_name": "katcp.testutils.DeviceTestServer",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "katcp.testutils.start_thread_with_cleanup",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.KATCPClientResource",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 364,
"usage_type": "name"
},
{
"api_name": "tornado.gen.Return",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "tornado.gen",
"line_number": 367,
"usage_type": "attribute"
},
{
"api_name": "tornado.gen",
"line_number": 362,
"usage_type": "attribute"
},
{
"api_name": "katcp.Message.reply",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "katcp.Message",
"line_number": 401,
"usage_type": "name"
},
{
"api_name": "katcp.testutils.DeviceTestSensor",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "katcp.testutils.DeviceTestSensor.INTEGER",
"line_number": 407,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestSensor.NOMINAL",
"line_number": 410,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestSensor",
"line_number": 410,
"usage_type": "name"
},
{
"api_name": "katcp.resource.escape_name",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "katcp.resource",
"line_number": 413,
"usage_type": "name"
},
{
"api_name": "katcp.resource.escape_name",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "katcp.resource",
"line_number": 414,
"usage_type": "name"
},
{
"api_name": "tornado.testing",
"line_number": 369,
"usage_type": "attribute"
},
{
"api_name": "katcp.resource_client.KATCPClientResource",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 430,
"usage_type": "name"
},
{
"api_name": "tornado.gen",
"line_number": 432,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.TimewarpAsyncTestCaseTimeAdvancer",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "tornado.testing",
"line_number": 426,
"usage_type": "attribute"
},
{
"api_name": "katcp.resource_client.KATCPClientResource",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 455,
"usage_type": "name"
},
{
"api_name": "tornado.gen",
"line_number": 457,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.TimewarpAsyncTestCaseTimeAdvancer",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "tornado.testing",
"line_number": 450,
"usage_type": "attribute"
},
{
"api_name": "tornado.testing",
"line_number": 482,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 491,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.KATCPClientResourceContainer",
"line_number": 500,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 500,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 500,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.KATCPClientResourceRequest",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 514,
"usage_type": "name"
},
{
"api_name": "tornado.concurrent.Future",
"line_number": 522,
"usage_type": "call"
},
{
"api_name": "tornado.concurrent",
"line_number": 522,
"usage_type": "attribute"
},
{
"api_name": "tornado.concurrent.Future",
"line_number": 527,
"usage_type": "call"
},
{
"api_name": "tornado.concurrent",
"line_number": 527,
"usage_type": "attribute"
},
{
"api_name": "katcp.resource.KATCPReply",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "katcp.resource",
"line_number": 528,
"usage_type": "name"
},
{
"api_name": "katcp.Message.reply",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "katcp.Message",
"line_number": 528,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 536,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.ReplyWrappedInspectingClientAsync",
"line_number": 537,
"usage_type": "attribute"
},
{
"api_name": "katcp.resource_client",
"line_number": 537,
"usage_type": "name"
},
{
"api_name": "tornado.testing",
"line_number": 494,
"usage_type": "attribute"
},
{
"api_name": "mock.Mock",
"line_number": 571,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.KATCPClientResourceContainer",
"line_number": 572,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 572,
"usage_type": "name"
},
{
"api_name": "katcp.resource.escape_name",
"line_number": 578,
"usage_type": "call"
},
{
"api_name": "katcp.resource",
"line_number": 578,
"usage_type": "name"
},
{
"api_name": "katcp.resource.escape_name",
"line_number": 580,
"usage_type": "call"
},
{
"api_name": "katcp.resource",
"line_number": 580,
"usage_type": "name"
},
{
"api_name": "katcp.resource_client.KATCPClientResourceContainer",
"line_number": 587,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 587,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 588,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.KATCPClientResourceContainer",
"line_number": 615,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 615,
"usage_type": "name"
},
{
"api_name": "tornado.concurrent.Future",
"line_number": 623,
"usage_type": "call"
},
{
"api_name": "tornado.concurrent",
"line_number": 623,
"usage_type": "attribute"
},
{
"api_name": "functools.partial",
"line_number": 627,
"usage_type": "call"
},
{
"api_name": "tornado.concurrent",
"line_number": 628,
"usage_type": "attribute"
},
{
"api_name": "tornado.concurrent.Future",
"line_number": 639,
"usage_type": "call"
},
{
"api_name": "tornado.concurrent",
"line_number": 639,
"usage_type": "attribute"
},
{
"api_name": "functools.partial",
"line_number": 643,
"usage_type": "call"
},
{
"api_name": "tornado.concurrent",
"line_number": 644,
"usage_type": "attribute"
},
{
"api_name": "tornado.concurrent.Future",
"line_number": 646,
"usage_type": "call"
},
{
"api_name": "tornado.concurrent",
"line_number": 646,
"usage_type": "attribute"
},
{
"api_name": "functools.partial",
"line_number": 650,
"usage_type": "call"
},
{
"api_name": "tornado.concurrent",
"line_number": 651,
"usage_type": "attribute"
},
{
"api_name": "tornado.ioloop.IOLoop",
"line_number": 661,
"usage_type": "call"
},
{
"api_name": "tornado.ioloop",
"line_number": 661,
"usage_type": "attribute"
},
{
"api_name": "tornado.ioloop.IOLoop",
"line_number": 664,
"usage_type": "call"
},
{
"api_name": "tornado.ioloop",
"line_number": 664,
"usage_type": "attribute"
},
{
"api_name": "katcp.resource_client.KATCPClientResourceContainer",
"line_number": 666,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 666,
"usage_type": "name"
},
{
"api_name": "katcp.resource.escape_name",
"line_number": 670,
"usage_type": "call"
},
{
"api_name": "katcp.resource",
"line_number": 670,
"usage_type": "name"
},
{
"api_name": "tornado.testing",
"line_number": 674,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestServer",
"line_number": 683,
"usage_type": "call"
},
{
"api_name": "katcp.testutils.start_thread_with_cleanup",
"line_number": 685,
"usage_type": "call"
},
{
"api_name": "katcp.testutils.DeviceTestSensor",
"line_number": 688,
"usage_type": "call"
},
{
"api_name": "katcp.testutils.DeviceTestSensor.INTEGER",
"line_number": 688,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestSensor.NOMINAL",
"line_number": 691,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestSensor",
"line_number": 691,
"usage_type": "name"
},
{
"api_name": "katcp.Message.reply",
"line_number": 696,
"usage_type": "call"
},
{
"api_name": "katcp.Message",
"line_number": 696,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 701,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.KATCPClientResourceContainer",
"line_number": 702,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 702,
"usage_type": "name"
},
{
"api_name": "tornado.concurrent.futures.Future",
"line_number": 706,
"usage_type": "call"
},
{
"api_name": "tornado.concurrent",
"line_number": 706,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestSensor",
"line_number": 716,
"usage_type": "call"
},
{
"api_name": "katcp.testutils.DeviceTestSensor.INTEGER",
"line_number": 716,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestSensor.NOMINAL",
"line_number": 719,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestSensor",
"line_number": 719,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 724,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 725,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 726,
"usage_type": "call"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 699,
"usage_type": "call"
},
{
"api_name": "tornado.testing",
"line_number": 699,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 751,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.KATCPClientResourceContainer",
"line_number": 752,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 752,
"usage_type": "name"
},
{
"api_name": "tornado.concurrent.futures.Future",
"line_number": 756,
"usage_type": "call"
},
{
"api_name": "tornado.concurrent",
"line_number": 756,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestSensor",
"line_number": 766,
"usage_type": "call"
},
{
"api_name": "katcp.testutils.DeviceTestSensor.INTEGER",
"line_number": 766,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestSensor.NOMINAL",
"line_number": 769,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestSensor",
"line_number": 769,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 774,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 775,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 776,
"usage_type": "call"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 748,
"usage_type": "call"
},
{
"api_name": "tornado.testing",
"line_number": 748,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 815,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.KATCPClientResourceContainer",
"line_number": 816,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 816,
"usage_type": "name"
},
{
"api_name": "tornado.gen.Return",
"line_number": 819,
"usage_type": "call"
},
{
"api_name": "tornado.gen",
"line_number": 819,
"usage_type": "attribute"
},
{
"api_name": "tornado.gen",
"line_number": 812,
"usage_type": "attribute"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 822,
"usage_type": "call"
},
{
"api_name": "tornado.testing",
"line_number": 822,
"usage_type": "attribute"
},
{
"api_name": "tornado.testing.gen_test",
"line_number": 836,
"usage_type": "call"
},
{
"api_name": "tornado.testing",
"line_number": 836,
"usage_type": "attribute"
},
{
"api_name": "unittest2.TestCase",
"line_number": 857,
"usage_type": "attribute"
},
{
"api_name": "katcp.ioloop_manager.IOLoopManager",
"line_number": 859,
"usage_type": "call"
},
{
"api_name": "katcp.ioloop_manager",
"line_number": 859,
"usage_type": "name"
},
{
"api_name": "katcp.resource_client.IOLoopThreadWrapper",
"line_number": 861,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 861,
"usage_type": "name"
},
{
"api_name": "katcp.testutils.start_thread_with_cleanup",
"line_number": 862,
"usage_type": "call"
},
{
"api_name": "thread.get_ident",
"line_number": 871,
"usage_type": "call"
},
{
"api_name": "thread.get_ident",
"line_number": 876,
"usage_type": "call"
},
{
"api_name": "thread.get_ident",
"line_number": 881,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.ThreadSafeMethodAttrWrapper",
"line_number": 884,
"usage_type": "attribute"
},
{
"api_name": "katcp.resource_client",
"line_number": 884,
"usage_type": "name"
},
{
"api_name": "concurrent.futures.Future",
"line_number": 890,
"usage_type": "call"
},
{
"api_name": "thread.get_ident",
"line_number": 891,
"usage_type": "call"
},
{
"api_name": "unittest2.TestCase",
"line_number": 907,
"usage_type": "attribute"
},
{
"api_name": "katcp.core.AttrDict",
"line_number": 909,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.AttrMappingProxy",
"line_number": 917,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 917,
"usage_type": "name"
},
{
"api_name": "unittest2.TestCase",
"line_number": 932,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestServer",
"line_number": 934,
"usage_type": "call"
},
{
"api_name": "katcp.testutils.start_thread_with_cleanup",
"line_number": 935,
"usage_type": "call"
},
{
"api_name": "katcp.ioloop_manager.IOLoopManager",
"line_number": 937,
"usage_type": "call"
},
{
"api_name": "katcp.ioloop_manager",
"line_number": 937,
"usage_type": "name"
},
{
"api_name": "katcp.resource_client.KATCPClientResource",
"line_number": 944,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 944,
"usage_type": "name"
},
{
"api_name": "katcp.resource_client.IOLoopThreadWrapper",
"line_number": 949,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 949,
"usage_type": "name"
},
{
"api_name": "katcp.testutils.start_thread_with_cleanup",
"line_number": 950,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.ThreadSafeKATCPClientResourceWrapper",
"line_number": 953,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 953,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 961,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.TimeoutError",
"line_number": 962,
"usage_type": "argument"
},
{
"api_name": "time.time",
"line_number": 964,
"usage_type": "call"
},
{
"api_name": "unittest2.TestCase",
"line_number": 991,
"usage_type": "attribute"
},
{
"api_name": "katcp.ioloop_manager.IOLoopManager",
"line_number": 994,
"usage_type": "call"
},
{
"api_name": "katcp.ioloop_manager",
"line_number": 994,
"usage_type": "name"
},
{
"api_name": "katcp.resource_client.IOLoopThreadWrapper",
"line_number": 998,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 998,
"usage_type": "name"
},
{
"api_name": "katcp.testutils.start_thread_with_cleanup",
"line_number": 999,
"usage_type": "call"
},
{
"api_name": "katcp.testutils.DeviceTestServer",
"line_number": 1007,
"usage_type": "call"
},
{
"api_name": "katcp.testutils.start_thread_with_cleanup",
"line_number": 1009,
"usage_type": "call"
},
{
"api_name": "katcp.testutils.DeviceTestSensor",
"line_number": 1012,
"usage_type": "call"
},
{
"api_name": "katcp.testutils.DeviceTestSensor.INTEGER",
"line_number": 1012,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestSensor.NOMINAL",
"line_number": 1015,
"usage_type": "attribute"
},
{
"api_name": "katcp.testutils.DeviceTestSensor",
"line_number": 1015,
"usage_type": "name"
},
{
"api_name": "katcp.Message.reply",
"line_number": 1020,
"usage_type": "call"
},
{
"api_name": "katcp.Message",
"line_number": 1020,
"usage_type": "name"
},
{
"api_name": "katcp.resource_client.KATCPClientResourceContainer",
"line_number": 1023,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 1023,
"usage_type": "name"
},
{
"api_name": "katcp.resource_client.ThreadSafeKATCPClientResourceWrapper",
"line_number": 1025,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 1025,
"usage_type": "name"
},
{
"api_name": "katcp.Sensor.STATUSES",
"line_number": 1038,
"usage_type": "attribute"
},
{
"api_name": "katcp.Sensor",
"line_number": 1038,
"usage_type": "name"
},
{
"api_name": "katcp.Sensor.NOMINAL",
"line_number": 1038,
"usage_type": "attribute"
},
{
"api_name": "katcp.Sensor.WARN",
"line_number": 1042,
"usage_type": "attribute"
},
{
"api_name": "katcp.Sensor",
"line_number": 1042,
"usage_type": "name"
},
{
"api_name": "katcp.Sensor.STATUSES",
"line_number": 1044,
"usage_type": "attribute"
},
{
"api_name": "katcp.Sensor",
"line_number": 1044,
"usage_type": "name"
},
{
"api_name": "katcp.Sensor.WARN",
"line_number": 1044,
"usage_type": "attribute"
},
{
"api_name": "katcp.resource_client.ThreadSafeKATCPClientResourceWrapper",
"line_number": 1049,
"usage_type": "attribute"
},
{
"api_name": "katcp.resource_client",
"line_number": 1049,
"usage_type": "name"
},
{
"api_name": "katcp.resource_client.ThreadSafeKATCPClientResourceWrapper",
"line_number": 1054,
"usage_type": "attribute"
},
{
"api_name": "katcp.resource_client",
"line_number": 1054,
"usage_type": "name"
},
{
"api_name": "tornado.testing",
"line_number": 1059,
"usage_type": "attribute"
},
{
"api_name": "mock.Mock",
"line_number": 1062,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 1063,
"usage_type": "call"
},
{
"api_name": "katcp.core.AsyncEvent",
"line_number": 1064,
"usage_type": "call"
},
{
"api_name": "katcp.core.AsyncEvent",
"line_number": 1065,
"usage_type": "call"
},
{
"api_name": "katcp.core.AsyncEvent",
"line_number": 1066,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client.monitor_resource_sync_state",
"line_number": 1076,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 1076,
"usage_type": "name"
},
{
"api_name": "tornado.gen",
"line_number": 1078,
"usage_type": "attribute"
},
{
"api_name": "mock.call",
"line_number": 1079,
"usage_type": "call"
},
{
"api_name": "tornado.gen",
"line_number": 1083,
"usage_type": "attribute"
},
{
"api_name": "katcp.resource_client.monitor_resource_sync_state",
"line_number": 1089,
"usage_type": "call"
},
{
"api_name": "katcp.resource_client",
"line_number": 1089,
"usage_type": "name"
},
{
"api_name": "tornado.gen",
"line_number": 1092,
"usage_type": "attribute"
},
{
"api_name": "mock.call",
"line_number": 1093,
"usage_type": "call"
},
{
"api_name": "tornado.gen",
"line_number": 1096,
"usage_type": "attribute"
},
{
"api_name": "mock.call",
"line_number": 1097,
"usage_type": "call"
},
{
"api_name": "tornado.gen",
"line_number": 1101,
"usage_type": "attribute"
},
{
"api_name": "mock.call",
"line_number": 1102,
"usage_type": "call"
},
{
"api_name": "tornado.gen",
"line_number": 1107,
"usage_type": "attribute"
},
{
"api_name": "tornado.testing",
"line_number": 1060,
"usage_type": "attribute"
}
] |
441745577
|
#Same as previous exrecise, but employs scikit learn inbuilt RBF KPCA function
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
from sklearn.decomposition import KernelPCA
X, y = make_moons(n_samples=100, random_state=123)
#optional inbuilt kernel. See what other choices there are.
scikit_kpca = KernelPCA(n_components=2, kernel='rbf', gamma=15)
X_skernpca = scikit_kpca.fit_transform(X)
plt.figure(figsize=(8,6))
plt.scatter(X_skernpca[y==0, 0], X_skernpca[y==0, 1], color='red', alpha=0.5)
plt.scatter(X_skernpca[y==1, 0], X_skernpca[y==1, 1], color='blue', alpha=0.5)
plt.text(-0.48, 0.35, 'gamma = 15', fontsize=12)
plt.title('First 2 principal components after RBF Kernel PCA via scikit-learn')
plt.xlabel('PC1')
plt.ylabel('PC2')
#plt.show()
plt.savefig('../figs/tutorial/sebraex2_1.png')
plt.close()
scikit_kpca = KernelPCA(n_components=1, kernel='rbf', gamma=15)
X_skernpca = scikit_kpca.fit_transform(X)
plt.figure(figsize=(8,6))
plt.scatter(X_skernpca[y==0, 0], np.zeros((50,1)), color='red', alpha=0.5)
plt.scatter(X_skernpca[y==1, 0], np.zeros((50,1)), color='blue', alpha=0.5)
plt.text(-0.48, 0.007, 'gamma = 15', fontsize=12)
plt.title('First principal component after RBF Kernel PCA')
plt.xlabel('PC1')
#plt.show()
plt.savefig('../figs/tutorial/sebraex2_2.png')
plt.close()
| null |
tutorials/sebraex2.py
|
sebraex2.py
|
py
| 1,446 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sklearn.datasets.make_moons",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.KernelPCA",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "sklearn.decomposition.KernelPCA",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
}
] |
331849879
|
# encoding:utf-8
from __future__ import print_function
import sys
import os
import time
import libvirt
domxml = """<domain type='kvm'>
<name>example</name>
<memory>131072</memory>
<vcpu>1</vcpu>
<os>
<type arch='x86_64' machine='pc-0.13'>hvm</type>
</os>
<devices>
<disk type='file' device='disk'>
<vmware name='qemu' type='qed'/>
<source file='/var/lib/libvirt/images/example.qed' />
<target dev='vda' bus='virtio'/>
</disk>
</devices>
</domain>"""
def do_cmd(cmdline):
status = os.system(cmdline)
if status < 0:
return -1
return status
def make_domain(conn):
do_cmd("qemu-img create -f raw /var/lib/libvirt/images/backing.qed 100M")
do_cmd("qemu-img create -f qed -b /var/libvirt/images/backing.qed " +
"/var/lib/libvirt/images/example.qed")
dom = conn.createXML(domxml, 0)
return dom
disk = "/var/lib/libvirt/images/example.qed"
conn = libvirt.open('qemu:///system')
if conn == None:
print('Failed to open connection to qemu:///system', file=sys.stderr)
exit(1)
dom = make_domain(conn)
if dom == None:
print('Failed to create domain', file=sys.stderr)
exit(1)
if dom.blockPull(disk, 0, 0) < 0:
print('Failed to start block pull', file=sys.stderr)
exit(1)
while 1:
info = dom.blockJobInfo(disk, 0)
if info != None:
if info.cur == info.end:
print('BlockPull complete')
break
else:
print('BlockPull progress: %0.0f %%',
float(100 * info.cur / info.end))
else:
print('Failed to query block jobs', file=sys.stderr)
break
time.sleep(1)
os.unlink('/var/lib/libvirt/images/backing.qed')
os.unlink('/var/lib/libvirt/images/example.qed')
if dom != None:
conn.destroy(dom)
conn.close()
exit(0)
| null |
projects/openstack/libvirt/guide/blockjob_example.py
|
blockjob_example.py
|
py
| 1,862 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.system",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "libvirt.open",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.unlink",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.unlink",
"line_number": 71,
"usage_type": "call"
}
] |
424249046
|
import math
from collections import Counter
import cv2
import numpy as np
frame_counter=0
kernel = np.ones((3,3), np.uint8)
def rect_to_bb(rect):
x = rect.left()
y = rect.top()
w = rect.right() - x
h = rect.bottom() - y
return x, y, w, h
def distance(point1, point2):
dist = math.sqrt((point2[0]-point1[0])**2+(point2[1]-point1[1])**2)
return dist
def return_color_mask(min_color_value, max_color_value, image_hsv, morph_kernel, original_image):
color_mask = cv2.inRange(image_hsv, min_color_value, max_color_value)
resultant_color_image = cv2.bitwise_and(image_hsv, image_hsv, mask=color_mask)
h, s, resultant_color_gray = cv2.split(resultant_color_image)
_, thresh = cv2.threshold(resultant_color_gray, 0, 255, 0)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, morph_kernel)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, morph_kernel)
thresh = cv2.dilate(thresh, morph_kernel, iterations=3)
ref_image = cv2.imwrite('ref_image.jpg',thresh)
color_image = cv2.bitwise_or(original_image, ref_image, mask=thresh)
return color_image,thresh
check_list = []
most_pixel = []
cap = cv2.VideoCapture(0)
while True:
_, image = cap.read()
copied_image = image.copy()
copied_image1= image.copy()
imgray=cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
green_min = np.array([30, 35, 35], np.uint8)
green_max = np.array([90, 240, 240], np.uint8)
green_mask, thresh = return_color_mask(green_min, green_max, hsv_image, kernel, image)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
thresh = cv2.dilate(thresh, kernel, iterations=2)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if contours:
areas = [cv2.contourArea(c) for c in contours]
mask = np.zeros_like(image)
mask = cv2.bitwise_not(mask)
max_index = np.argmax(areas)
cnt = contours[max_index]
mask = np.zeros((image.shape[0], image.shape[1], 1), np.uint8)
mask = cv2.bitwise_not(mask)
cv2.drawContours(mask, [cnt], -1, (0, 0, 0), -1)
mask = cv2.bitwise_not(mask)
M = cv2.moments(cnt)
dst = cv2.inpaint(image, mask, 1, cv2.INPAINT_TELEA)
magic_image = cv2.bitwise_and(dst, dst, mask=mask)
edges = cv2.Canny(magic_image, 50, 300, apertureSize=3)
dst = cv2.inpaint(dst, edges, 1, cv2.INPAINT_TELEA)
magic_image = cv2.bitwise_and(dst, dst, mask=mask)
edges = cv2.Canny(magic_image, 50, 300, apertureSize=3)
dst = cv2.inpaint(dst, edges, 1, cv2.INPAINT_TELEA)
cv2.imshow('result_frame', dst)
cv2.imshow('frame', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| null |
detecting_and_removing_green_color.py
|
detecting_and_removing_green_color.py
|
py
| 2,884 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.ones",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "math.sqrt",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.inRange",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_and",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.split",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cv2.morphologyEx",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_CLOSE",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "cv2.morphologyEx",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_OPEN",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "cv2.dilate",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_or",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "cv2.morphologyEx",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_CLOSE",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "cv2.morphologyEx",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_OPEN",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "cv2.dilate",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "cv2.findContours",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_TREE",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "cv2.contourArea",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_not",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "cv2.bitwise_not",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "cv2.drawContours",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_not",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "cv2.moments",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "cv2.inpaint",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "cv2.INPAINT_TELEA",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "cv2.bitwise_and",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "cv2.Canny",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "cv2.inpaint",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "cv2.INPAINT_TELEA",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "cv2.bitwise_and",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "cv2.Canny",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "cv2.inpaint",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "cv2.INPAINT_TELEA",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 89,
"usage_type": "call"
}
] |
285753376
|
#!/usr/bin/python
from log2chart.argparser import ArgParser
from log2chart.loggingobject import LoggingObject
from log2chart.moduleobjectproxy import ModuleObjectProxy
from log2chart.renderer.registry import RendererParserRegistry, RendererRegistry
import argparse
import pkgutil
import os
class RendererObjectProxy(ModuleObjectProxy):
def __init__(self, rendererName, module, registry, *args, **kwargs):
super(RendererObjectProxy, self).__init__('renderer',
rendererName, module, registry, *args, **kwargs)
class RendererParserProxy(RendererObjectProxy):
def __init__(self, rendererName, parser):
super(RendererParserProxy, self).__init__(rendererName,
'parser', RendererParserRegistry, parser)
class RendererProxy(RendererObjectProxy):
def __init__(self, rendererName, module, parser):
super(RendererProxy, self).__init__(rendererName,
module, RendererRegistry, parser)
class RendererFactory(LoggingObject):
class RendererParser(ArgParser):
parser = argparse.ArgumentParser(add_help = False)
subparser = parser.add_subparsers(dest = "command",
metavar = "command")
def getArgParser(self):
return self.parser
def __init__(self):
self.root = os.path.dirname(os.path.abspath(__file__))
# create a list of all renderer sub-packages
self.renderers = [ name for _, name, isPkg in
pkgutil.walk_packages(path = [self.root]) if isPkg ]
# mapping: command -> module
self.cmdToModule = {}
# mapping: command -> renderer
self.cmdToRenderer = {}
def makeArgParser(self):
parser = RendererFactory.RendererParser()
for r in self.renderers:
try:
p = RendererParserProxy(r, parser.subparser)
cmds = p.getCommands()
self.cmdToModule.update(cmds)
self.cmdToRenderer.update(dict(
zip(cmds.keys(), [r] * len(cmds))))
except ImportError as e:
self.logger.warning("Skipping [%s] due to: %s" %
(r, str(e)))
except KeyError as e:
self.logger.warning("Skipping [%s], most likely not registered: %s" %
(r, str(e)))
return parser
def makeRenderer(self, parser, cmd):
try:
renderer = RendererProxy(
self.cmdToRenderer[cmd],
self.cmdToModule[cmd],
parser)
except ImportError as e:
self.logger.warning("Failed [%s] due to: %s" %
(cmd, str(e)))
raise
except KeyError as e:
self.logger.warning("Couldn't lookup data for command: [%s], or renderer not registered: %s" %
(cmd, str(e)))
raise
else:
return renderer
| null |
log2chart/renderer/factory.py
|
factory.py
|
py
| 2,963 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "log2chart.moduleobjectproxy.ModuleObjectProxy",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "log2chart.renderer.registry.RendererParserRegistry",
"line_number": 23,
"usage_type": "argument"
},
{
"api_name": "log2chart.renderer.registry.RendererRegistry",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "log2chart.loggingobject.LoggingObject",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "log2chart.argparser.ArgParser",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pkgutil.walk_packages",
"line_number": 49,
"usage_type": "call"
}
] |
432779268
|
# -*- coding: utf-8 -*-
"""
Project : CoCoA
Date : april-june 2020
Authors : Olivier Dadoun, Julien Browaeys, Tristan Beau
Copyright ©CoCoa-team-17
License: See joint LICENSE file
Module : cocoaplot
About :
An interface module to easily plot cocoa data with bokeh
"""
import random
import math
import pandas as pd
from datetime import datetime as dt
from cocoa import covid19 as cc
import bokeh
from bokeh.io import show, output_notebook
from bokeh.models import ColumnDataSource, ColorBar, HoverTool, Legend
from bokeh.plotting import figure, output_file, show
from bokeh.palettes import brewer
from bokeh.layouts import row, column, gridplot
from bokeh.models import CustomJS, Slider, Select, Plot, \
Button, LinearAxis, Range1d, DatetimeTickFormatter
from bokeh.models import CheckboxGroup, RadioGroup, Toggle, RadioGroup
from bokeh.models.widgets import Tabs, Panel
from bokeh.models import Label, LabelSet
from bokeh.models import ColumnDataSource, Grid, Line, LinearAxis, Plot
from bokeh.models import DataRange1d
import bokeh.palettes
import plotly.express as px
import plotly.graph_objects as go
from branca.colormap import LinearColormap
import folium
import json
from geopy.geocoders import Nominatim
import altair as alt
# output_notebook(hide_banner=True)
class CocoDisplay():
def __init__(self, d=0):
self.colors = bokeh.palettes.d3['Category10'][10]
self.hover_tool = HoverTool(tooltips=[
('cases', '@cases'),
('date', '@date{%F}')],
formatters={'date': 'datetime'}
)
self.coco_circle = []
self.coco_line = []
self.database = ''
self.p = cc.Parser()
def DefFigStatic(self, **kwargs):
if not isinstance(kwargs['country'], list):
clist = [kwargs['country']]
else:
clist = kwargs['country']
panels = []
option = kwargs.get('option', None)
if option == 'nonneg':
babypandas = self.p.getStats(country=clist, type=kwargs['type'], which=kwargs['which'],
output='pandas', option='nonneg')
else:
babypandas = self.p.getStats(
country=clist, type=kwargs['type'], which=kwargs['which'], output='pandas')
data = pd.pivot_table(babypandas, index='date',
columns='country', values='cases').reset_index()
for axis_type in ["linear", "log"]:
fig = figure(plot_width=600, plot_height=400, y_axis_type=axis_type,
tools=[self.hover_tool, 'box_zoom,box_select,crosshair,reset'])
fig.xaxis.formatter = DatetimeTickFormatter(
days=["%d %B %Y"], months=["%d %B %Y"], years=["%d %B %Y"])
i = 0
for coun in sorted(clist):
filter_data = data[['date', coun]].rename(
columns={coun: 'cases'})
src = ColumnDataSource(filter_data)
fig.line(x='date', y='cases', source=src,
line_color=self.colors[i], legend_label=coun, line_width=2)
i += 1
fig.legend.location = "top_left"
if kwargs['which'] == 'confirmed' and self.database == 'aphp':
kwargs['which'] = 'Rea.'
fig.legend.title = kwargs['which'].upper()
fig.legend.title_text_font_style = "bold"
fig.legend.title_text_font_size = "15px"
panel = Panel(child=fig, title=axis_type)
panels.append(panel)
tabs = Tabs(tabs=panels)
return tabs
def DefFigInteractive(self, **kwargs):
if not isinstance(kwargs['country'], list):
clist = [kwargs['country']]
else:
clist = kwargs['country']
panels = []
curvos = []
option = kwargs.get('option', None)
if option == 'nonneg':
babypandas = self.p.getStats(country=clist, type=kwargs['type'], which=kwargs['which'],
output='pandas', option='nonneg')
else:
babypandas = self.p.getStats(
country=clist, type=kwargs['type'], which=kwargs['which'], output='pandas')
data = pd.pivot_table(babypandas, index='date',
columns='country', values='cases').reset_index()
filter_data1 = data[['date', clist[0]]].rename(
columns={clist[0]: 'cases'})
src1 = ColumnDataSource(filter_data1)
filter_data2 = data[['date', clist[1]]].rename(
columns={clist[1]: 'cases'})
src2 = ColumnDataSource(filter_data2)
for axis_type in ["linear", "log"]:
fig = figure(plot_width=600, plot_height=400, y_axis_type=axis_type,
tools=[self.hover_tool, 'box_zoom,box_select,crosshair,reset'])
fig.xaxis.formatter = DatetimeTickFormatter(
days=["%d %B %Y"], months=["%d %B %Y"], years=["%d %B %Y"])
fig.circle('date', 'cases', size=7, color='red', source=src1)
fig.line(x='date', y='cases', source=src1,
line_color='red', line_width=3, line_alpha=.8)
fig.circle('date', 'cases', size=7, color='blue', source=src2)
fig.line(x='date', y='cases', source=src2,
line_color='blue', line_width=3, line_alpha=.8)
if kwargs['which'] == 'confirmed' and self.database == 'aphp':
kwargs['which'] = 'Rea.'
label = Label(x=70, y=350, x_units='screen', y_units='screen',
text=kwargs['which'], render_mode='css',
border_line_color='black', border_line_alpha=1.0,
background_fill_color='white', background_fill_alpha=1.0)
fig.add_layout(label)
panel = Panel(child=fig, title=axis_type)
panels.append(panel)
code = """
var c = cb_obj.value;
var y = s0.data[c];
s1.data['cases'] = y;
s1.change.emit();
ax=p1.yaxis[0]
"""
source = ColumnDataSource(data)
callback1 = CustomJS(args=dict(s0=source, s1=src1), code=code)
callback2 = CustomJS(args=dict(s0=source, s1=src2), code=code)
select_countries1 = Select(
title="RED CURVE:", value=clist[0], options=clist)
select_countries1.js_on_change('value', callback1)
select_countries2 = Select(
title="BLUE CURVE", value=clist[1], options=clist)
select_countries2.js_on_change('value', callback2)
tabs = Tabs(tabs=panels)
layout = row(
column(row(select_countries1, select_countries2), row(tabs)))
return layout
def CrystalFig(self, crys, err_y):
sline = []
scolumn = []
i = 1
list_fits_fig = crys.GetListFits()
for dct in list_fits_fig:
for key, value in dct.items():
country = key
if math.nan not in value[0] and math.nan not in value[1]:
maxy = crys.GetFitsParameters()[country][1]
if math.isnan(maxy) == False:
maxy = int(maxy)
leg = 'From fit : tmax:' + \
str(crys.GetFitsParameters()[country][0])
leg += ' Tot deaths:' + str(maxy)
fig = figure(plot_width=300, plot_height=200,
tools=['box_zoom,box_select,crosshair,reset'], title=leg, x_axis_type="datetime")
date = [datetime.strptime(i, '%m/%d/%y')
for i in self.p.getDates()]
if err_y:
fig.circle(
date, value[0], color=self.colors[i % 10], legend_label=country)
y_err_x = []
y_err_y = []
for px, py in zip(date, value[0]):
err = np.sqrt(np.abs(py))
y_err_x.append((px, px))
y_err_y.append((py - err, py + err))
fig.multi_line(y_err_x, y_err_y,
color=self.colors[i % 10])
else:
fig.line(
date, value[0], line_color=self.colors[i % 10], legend_label=country)
fig.line(date[:crys.GetTotalDaysConsidered(
)], value[1][:crys.GetTotalDaysConsidered()], line_color='red', line_width=2)
fig.xaxis.formatter = DatetimeTickFormatter(
days=["%d %b %y"], months=["%d %b %y"], years=["%d %b %y"])
fig.xaxis.major_label_orientation = math.pi/4
fig.xaxis.ticker.desired_num_ticks = 10
# tot_type_country=self.p.getStats(country=country,type='Cumul',which='deaths')[-1]
fig.legend.location = "top_left"
fig.legend.title_text_font_style = "bold"
fig.legend.title_text_font_size = "5px"
scolumn.append(fig)
if i % 2 == 0:
sline.append(scolumn)
scolumn = []
i += 1
fig = gridplot(sline)
return fig
def __delete__(self, instance):
print("deleted in descriptor object")
del self.value
class WorldMapDisplay():
def __init__(self, countries, cumul_or_diff, which_data):
self.geolocator = Nominatim(
user_agent="Worldmap for Covid-19 studing case")
# ,tiles="cartodbpositron")#,"CartoDB dark_matter")
self.world_map = folium.Map(width=600, height=400, location=[
48.52, 2.19], zoom_start=3)
self.countries = sorted(countries)
self.which_data = which_data
p = cc.Parser()
babypandas = (p.getStats(country=self.countries,type=cumul_or_diff,
which=which_data, output='pandas'))
babypandascumul = babypandas
babypandascumul['cumul'] = babypandas.groupby(
['country'])['cases'].apply(lambda x: x.cumsum())
mask_date_max = babypandas.groupby(['country'])['date'].max()
babypandascumulmasked_date = babypandascumul['date'].isin(
mask_date_max)
self.data = pd.pivot_table(
babypandas, index='date', columns='country', values='cases').reset_index()
if cumul_or_diff == 'cumul':
self.data = pd.pivot_table(
babypandascumul, index='date', columns='country', values='cumul').reset_index()
map_data = pd.DataFrame({
'country': self.countries,
'totcases': babypandascumul[babypandascumulmasked_date]['cumul'].to_list()
})
self.totalsallcountries = sum(
babypandascumul[babypandascumulmasked_date]['cumul'])
self.maxdeaths = max(
babypandascumul[babypandascumulmasked_date]['cumul'])
self.map_dict = map_data.set_index('country')['totcases'].to_dict()
def LatLong(self, country):
if country != None:
location = self.geolocator.geocode(country)
if location != None:
Lat = location.latitude # , location.longitude)
Long = location.longitude
else:
Lat = float("Nan") # , location.longitude)
Long = float("Nan")
return (Lat, Long)
def DrawPopUpCircle(self):
for coun in self.countries:
filter_data = self.data[['date', coun]].rename(
columns={coun: 'cases'})
tot = self.map_dict[coun]
latlong = self.LatLong(coun)
start_coords = [latlong[0], latlong[1]]
source = pd.DataFrame(
{
'date': filter_data['date'],
'cases': filter_data['cases'],
})
if sum(filter_data['cases']) != 0:
chart = alt.Chart(source).mark_line().encode(
alt.X('date', axis=alt.Axis(title='Date')),
alt.Y('cases', axis=alt.Axis(title='Cases'))).properties(title=coun.upper())
vis1 = chart.to_json()
vega = folium.features.VegaLite(
vis1, width='100%', height='100%')
#
maxrad = 50
circ_mkr = folium.CircleMarker(
location=start_coords,
radius=maxrad*tot/self.totalsallcountries,
color='blue',
fill=True,
fill_color='red',
fillOpacity=1.0,
opacity=1.0,
tooltip=coun,
popup=folium.Popup(max_width=300).add_child(vega))
circ_mkr.add_to(self.world_map)
def drawCountry(self):
folium.GeoJson(
data='https://raw.githubusercontent.com/johan/world.geo.json/master/countries.geo.json',
style_function=lambda feature: {
'fillColor': self.getColor(feature),
'caption': 'Total deaths',
'fillOpacity': 0.5,
'weight': 0.5
}).add_to(self.world_map)
def getColor(self, feature):
value = self.map_dict.get(feature['properties']['name'])
self.color_scale = LinearColormap(['yellow', 'red'],
vmin=min(self.map_dict.values()), vmax=max(self.map_dict.values()))
# vmin = 0, vmax = 150)
if value is None:
return '#8c8c8c' # MISSING -> gray
else:
return self.color_scale(value)
def returnMap(self):
self.drawCountry()
self.DrawPopUpCircle()
colormap = self.color_scale.to_step(len(self.countries))
colormap.caption = self.which_data.upper()
self.world_map.add_child(colormap)
return self.world_map
| null |
cocoaplot/display.py
|
display.py
|
py
| 14,144 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "bokeh.palettes",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "bokeh.models.HoverTool",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "cocoa.covid19.Parser",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "cocoa.covid19",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "pandas.pivot_table",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.figure",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "bokeh.models.DatetimeTickFormatter",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "bokeh.models.ColumnDataSource",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "bokeh.models.widgets.Panel",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "bokeh.models.widgets.Tabs",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "pandas.pivot_table",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "bokeh.models.ColumnDataSource",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "bokeh.models.ColumnDataSource",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.figure",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "bokeh.models.DatetimeTickFormatter",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "bokeh.models.Label",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "bokeh.models.widgets.Panel",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "bokeh.models.ColumnDataSource",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "bokeh.models.CustomJS",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "bokeh.models.CustomJS",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "bokeh.models.Select",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "bokeh.models.Select",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "bokeh.models.widgets.Tabs",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "bokeh.layouts.row",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "bokeh.layouts.column",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "bokeh.layouts.row",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "math.nan",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "math.isnan",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.figure",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "datetime.strptime",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "plotly.express",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "bokeh.models.DatetimeTickFormatter",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "bokeh.layouts.gridplot",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "geopy.geocoders.Nominatim",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "folium.Map",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "cocoa.covid19.Parser",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "cocoa.covid19",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "pandas.pivot_table",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "pandas.pivot_table",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "altair.Chart",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "altair.X",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "altair.Axis",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "altair.Y",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "altair.Axis",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "folium.features.VegaLite",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "folium.features",
"line_number": 309,
"usage_type": "attribute"
},
{
"api_name": "folium.CircleMarker",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "folium.Popup",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "folium.GeoJson",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "branca.colormap.LinearColormap",
"line_number": 338,
"usage_type": "call"
}
] |
641837362
|
import os
import json
from flask import Flask
import boto3
from werkzeug.utils import secure_filename
from flask import request
import tempfile
def create_app():
app = Flask(__name__)
services = json.loads(os.getenv("VCAP_SERVICES"))
host = services["predix-blobstore"][0]["credentials"]["host"]
if "https://" not in host:
host = "https://" + host
credentials = services["predix-blobstore"][0]["credentials"]
access_key_id = credentials["access_key_id"]
secret_access_key = credentials["secret_access_key"]
bucket_name = credentials["bucket_name"]
session = boto3.session.Session(
aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key
)
config = boto3.session.Config(
signature_version="s3",
s3={"addressing_style": "virtual"},
max_pool_connections=10000,
)
client = session.client("s3", endpoint_url=host, config=config)
@app.route("/", methods=["POST"])
def upload_files():
logs = []
for file in request.files.getlist("files[]"):
with tempfile.NamedTemporaryFile(prefix="upload_", dir="/tmp") as tmpfile:
file.save(tmpfile.name)
filename = secure_filename(file.filename)
logs.append(
client.upload_file(
tmpfile.name,
bucket_name,
filename,
ExtraArgs={"ServerSideEncryption": "AES256"},
)
)
return " ".join(str(x) for x in logs), 200
return app
| null |
server/__init__.py
|
__init__.py
|
py
| 1,614 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "boto3.session.Session",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "boto3.session",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "boto3.session.Config",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "boto3.session",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "flask.request.files.getlist",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "flask.request.files",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.secure_filename",
"line_number": 40,
"usage_type": "call"
}
] |
100688706
|
# memorandi.location.models
# Models for the location metadata
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Tue Feb 11 14:41:06 2014 -0500
#
# Copyright (C) 2014 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: models.py [] [email protected] $
"""
Models for the location metadata
"""
##########################################################################
## Imports
##########################################################################
import os
from .managers import *
from utils import nullable
from django.db import models
from model_utils import Choices
from model_utils.models import TimeStampedModel
##########################################################################
## Models
##########################################################################
class Location(TimeStampedModel):
"""
A generic wrapper class that embeds location meta data into the memos.
Note that a location can be very generic (including a completely null
location). There are some constraints on uniqueness in the meta, but
this data type should be handled well.
Note that I chose not to use the Django contrib GeoDjango package for
spatial data. I felt that this was WAY too much information. However,
I believe the data types stored in this model can be used to leverage
GIS data in the future.
"""
name = models.CharField( max_length=255, **nullable ) # A place name, e.g. "Home"
address = models.CharField( max_length=255, **nullable ) # A specific address
city = models.CharField( max_length=255, **nullable ) # Name of the city
country = models.ForeignKey( "GeoEntity", related_name="+", **nullable ) # Country GeoEntity
region = models.ForeignKey( "GeoEntity", related_name="+", **nullable ) # Region GeoEntity
latitude = models.FloatField( **nullable ) # Decimal latitude
longitude = models.FloatField( **nullable ) # Decimal longitude
postal_code = models.CharField( max_length=31, **nullable ) # Postal code
ipaddr = models.GenericIPAddressField( **nullable ) # IP Address of request
station = models.CharField( max_length=50, **nullable ) # Prefered Weather Station
# Location manager
objects = LocationManager()
class Meta:
db_table = "location"
verbose_name = "location"
unique_together = (
("latitude", "longitude"),
("name", "address", "city", "country", "region", "postal_code"),
)
ordering = ["-modified",]
get_latest_by = "modified"
verbose_name_plural = "locations"
@classmethod
def from_mmdb(klass, record):
"""
Constructs a location instance from a Maximind DB record
"""
kwargs = {
'city': record.city.name,
'country': GeoEntity.objects.iso_code(record.country.iso_code),
'postal_code': record.postal.code,
'latitude': record.location.latitude,
'longitude': record.location.longitude,
'ipaddr': record.traits.ip_address,
}
if len(record.subdivisions) > 0:
kwargs['region'] = GeoEntity.objects.iso_code(record.subdivisions[0].iso_code)
return klass(**kwargs)
def __unicode__(self):
"""
Construct a string representation of location.
"""
s = u"" # Begin string
f = [] # Begin format
# Add name format
if self.name:
s += "%s "
f.append(self.name)
# Try city and state
if self.city and self.region:
s = s + "in %s, %s" if s else "%s, %s"
f.append(self.city)
f.append(self.region.iso_code)
# Try city and country
elif self.city and self.country:
s = s + "in %s, %s" if s else "%s, %s"
f.append(self.city)
f.append(self.country)
# Try Postal Code
elif self.postal_code:
s += "(%s)" if s else "%s"
f.append(self.postal_code)
# Try Longitude and Latitude
elif self.longitude and self.latitude:
s += "(%f, %f)"
f.append(self.latitude)
f.append(self.longitude)
s = s % tuple(f)
return s.strip()
def to_query(self):
"""
Returns a string to send to the Weather Underground API.
Is this too coupled to the weather app?
"""
if self.station:
return "pws:%s" % self.station
elif self.latitude and self.longitude:
return ",".join((str(self.latitude), str(self.longitude)))
elif self.country:
if self.country.iso_code in ("US", "USA"):
if self.postal_code:
return self.postal_code
elif self.region and self.city:
return os.path.join(self.region.name, self.city)
elif self.city:
return os.path.join(self.country.name, self.city)
elif self.region and self.city:
return os.path.join(self.region, self.city)
else:
return self.name # Will work if it's an airport code ...
class GeoEntity(TimeStampedModel):
"""
A database of geographic entities, e.g. regions or countries that have
ISO codes and common names associated with them in different languages.
This is simply for ease of data storage and lookups on location table.
Technically, if leveraging the parent- any Geographic Entity can be
referenced through the smallest entity (the region) and then all other
data can be grabbed upwards. However, it's nice to have a reference to
the country and the region in the location model.
"""
TYPES = Choices(
(0, "continent", "Continent"),
(1, "country", "Country"),
(2, "region", "Region"),
)
name = models.CharField( max_length=255 ) # Name of the region or country
iso_code = models.CharField( max_length=3 ) # ISO Code for the region or country
region_type = models.PositiveSmallIntegerField( choices=TYPES, default=TYPES.country ) # Type of Geographic Region
parent = models.ForeignKey( "GeoEntity", related_name="+", **nullable ) # Regions specify country as parent
# Geography Manager
objects = GeographyManager()
class Meta:
db_table = "geographic_entity"
verbose_name = "geographic entity"
unique_together = ("name", "iso_code", "region_type")
verbose_name_plural = "geographic entities"
def __unicode__(self):
return self.name
| null |
memorandi/location/models.py
|
models.py
|
py
| 6,769 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "model_utils.models.TimeStampedModel",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "utils.nullable",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "utils.nullable",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "utils.nullable",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "utils.nullable",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "utils.nullable",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "utils.nullable",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.db.models.FloatField",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "utils.nullable",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "utils.nullable",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "django.db.models.GenericIPAddressField",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "utils.nullable",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "utils.nullable",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "model_utils.models.TimeStampedModel",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "model_utils.Choices",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "django.db.models.CharField",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "django.db.models.PositiveSmallIntegerField",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "utils.nullable",
"line_number": 175,
"usage_type": "name"
}
] |
396369260
|
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
import sklearn.linear_model
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets()
datasets = {"noisy_circles": noisy_circles,
"noisy_moons": noisy_moons,
"blobs": blobs,
"gaussian_quantiles": gaussian_quantiles}
# (choose your dataset)
dataset = "gaussian_quantiles"
X, Y = datasets[dataset]
X, Y = X.T, Y.reshape(1, Y.shape[0])
# make blobs binary
if dataset == "blobs":
Y = Y%2
# Visualize the data
plt.scatter(X[0, :], X[1, :], c=Y.ravel(), s=40, cmap=plt.cm.Spectral);
plt.show()
def layer_sizes(X, Y):
n_x = X.shape[0]
n_h = 4
n_y = Y.shape[0]
return (n_x, n_h, n_y)
def initialize_parameters(n_x, n_h, n_y):
b1 = np.zeros((n_h,1))
b2 = np.zeros((n_y,1))
W1 = np.random.randn(n_h, n_x) * 0.01
W2 = np.random.randn(n_y, n_h) * 0.01
return b1, W1, b2, W2
def ReLu(x):
return np.maximum(0, x)
def forward_propagation(X, b1, W1, b2, W2):
Z1 = np.dot(W1,X) + b1
A1 = np.tanh(Z1)
# A1 = ReLu(Z1) # using ReLu as the activation function.
Z2 = np.dot(W2,A1) + b2
A2 = sigmoid(Z2)
return Z1, A1, Z2, A2
def compute_cost(A2, Y, W1, W2):
m = Y.shape[1]
cost = (-1/m) * ( np.dot(np.log(A2),Y.T) + np.dot(np.log(1 - A2),(1 - Y).T) )
cost = np.squeeze(cost)
return cost
def reluDerivative(x):
x[x<=0] = 0
x[x>0] = 1
return x
def backward_propagation(b1, W1, b2, W2, Z1, A1, Z2, A2, X, Y):
m = X.shape[1]
dZ2 = A2 - Y
dW2 = (1/m) * np.dot(dZ2, A1.T)
db2 = (1/m) * np.sum(dZ2, axis=1, keepdims=True)
dZ1 = np.dot(W2.T, dZ2) * (1 - np.power(A1, 2)) # tanh(Z1) = A1 (tanh()).prime = 1 - tanh()**2
# dZ1 = np.dot(W2.T, dZ2) * reluDerivative(Z1) #f or relu
dW1 = (1/m) * np.dot(dZ1, X.T)
db1 = (1/m) * np.sum(dZ1, axis=1, keepdims=True)
return dW1, db1, dW2, db2
def update_parameters(W1, b1, W2, b2, dW1, db1, dW2, db2, learning_rate=1.2):
W1 = W1 - learning_rate * dW1
W2 = W2 - learning_rate * dW2
b1 = b1 - learning_rate * db1
b2 = b2 - learning_rate * db2
return W1, b1, W2, b2
def nn_model(X, Y, n_h, num_iterations=10000, print_cost=False):
np.random.seed(3)
n_x, _, n_y = layer_sizes(X, Y)
b1, W1, b2, W2 = initialize_parameters(n_x, n_h, n_y)
for i in range(num_iterations):
Z1, A1, Z2, A2 = forward_propagation(X, b1, W1, b2, W2)
dW1, db1, dW2, db2 = backward_propagation(b1, W1, b2, W2, Z1, A1, Z2, A2, X, Y)
W1, b1, W2, b2 = update_parameters(W1, b1, W2, b2, dW1, db1, dW2, db2, learning_rate=1.2)
if print_cost:
if i%1000==0:
print ("Cost after iteration %i: %f" % (i, compute_cost(A2, Y, W1, W2)))
return W1, b1, W2, b2
def predict(W1, b1, W2, b2, X):
_, _, _, A2 = forward_propagation(X, b1, W1, b2, W2)
Y_hat = np.round(A2)
return Y_hat
W1, b1, W2, b2 = nn_model(X, Y, n_h = 4, num_iterations=10000, print_cost=True)
plot_decision_boundary(lambda x: predict( W1, b1, W2, b2, x.T), X, Y)
plt.title("Decision Boundary for hidden layer size " + str(4))
plt.show()
predictions = predict(W1, b1, W2, b2, X)
print ('Accuracy: %d' % float((np.dot(Y, predictions.T) + np.dot(1 - Y, 1 - predictions.T)) / float(Y.size) * 100) + '%')
# plt.figure(figsize=(16, 32))
# hidden_layer_sizes = [1, 2, 3, 4, 5, 20, 50]
# for i, n_h in enumerate(hidden_layer_sizes):
# plt.subplot(5, 2, i + 1)
# plt.title('Hidden Layer of size %d' % n_h)
# W1, b1, W2, b2 = nn_model(X, Y, n_h, num_iterations=5000)
# plot_decision_boundary(lambda x: predict(W1, b1, W2, b2, x.T), X, Y)
# predictions = predict(W1, b1, W2, b2, X)
# accuracy = float((np.dot(Y, predictions.T) + np.dot(1 - Y, 1 - predictions.T)) / float(Y.size) * 100)
# print ("Accuracy for {} hidden units: {} %".format(n_h, accuracy))
# plt.show()
# """
# Interpretation:
# The larger models (with more hidden units) are able to fit the training set better,
# until eventually the largest models overfit the data.
# The best hidden layer size seems to be around n_h = 5. Indeed, a value around here seems to
# fits the data well without also incurring noticable overfitting.
# You will also learn later about regularization, which lets you use very large models (such as n_h = 50)
# without much overfitting.
# """
# using ReLu function accuracy results :
# Accuracy for 1 hidden units: 63.74999999999999 %
# Accuracy for 2 hidden units: 63.74999999999999 %
# Accuracy for 3 hidden units: 61.25000000000001 %
# Accuracy for 4 hidden units: 70.75 %
# Accuracy for 5 hidden units: 68.25 %
# Accuracy for 20 hidden units: 76.0 %
# Accuracy for 50 hidden units: 82.75 %
| null |
extra_datasets.py
|
extra_datasets.py
|
py
| 4,937 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "planar_utils.load_extra_datasets",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.cm",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randn",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "numpy.maximum",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.tanh",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "planar_utils.sigmoid",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.squeeze",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "numpy.round",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "planar_utils.plot_decision_boundary",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "numpy.dot",
"line_number": 122,
"usage_type": "call"
}
] |
205159755
|
import json
import os
import ffmpy
CONFIG_PATH = os.path.join(os.path.dirname(__file__), 'config.json')
with open(CONFIG_PATH, 'r', encoding='utf-8') as f:
CONFIG = json.load(f)
def compression(source_file, input_file, quality, path_to_ffmpeg):
if not path_to_ffmpeg:
path_to_ffmpeg = CONFIG['ffmpeg']
ff = ffmpy.FFmpeg(
executable=path_to_ffmpeg,
global_options=None,
inputs={source_file: None},
outputs={input_file: '-strict -2 -vf scale=-2:%s' % quality})
ff.run()
def get_parameters(filename, path_to_ffprobe):
if not path_to_ffprobe:
path_to_ffprobe = CONFIG['ffprobe']
ff = ffmpy.FFprobe(
executable=path_to_ffprobe,
global_options=['-v error -select_streams v:0 -show_entries stream=width,height,duration -of json'],
inputs={filename: None}
)
with open('temp.json', 'w') as f:
info = ff.run(stdout=f)
with open('temp.json', 'r') as f:
return json.load(f)
| null |
core.py
|
core.py
|
py
| 992 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "ffmpy.FFmpeg",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "ffmpy.FFprobe",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 33,
"usage_type": "call"
}
] |
562049240
|
## Initialization
import asyncio
import discord
import youtube_dl
from discord.ext import commands
from common import config, embedMessage, ytdlSrc, category
## Class setup
class play(commands.Cog):
def __init__(self, bot):
self.bot = bot
## Help stuff
self.hidden = False
self.category = category.getCategory(self.__module__)
self.description = 'Plays the specified media in voice chat.'
self.usage = f"""
{config.cfg['options']['prefix']}play <link>
{config.cfg['options']['prefix']}play <search>
"""
self.mustJoin = False
self.joinTarget = None
## Command defining
@commands.command(aliases=['p'])
async def play(self, ctx, *args):
if len(args) == 0:
if ctx.voice_client != None:
if ctx.voice_client.is_paused():
ctx.voice_client.resume()
embed = embedMessage.embed(
title = 'SUCCESS',
description = 'Playback has been resumed.'
)
await ctx.send(embed=embed)
return
embed = embedMessage.embed(
title = 'ERROR',
description = 'You did not specify what I should play!',
color = embedMessage.errorColor
)
return
elif not ctx.author.voice.channel:
embed = embedMessage.embed(
title = 'ERROR',
description = 'You must be in a voice channel to play media.',
color = embedMessage.errorColor
)
await ctx.send(embed=embed)
return
elif not ctx.me.voice:
self.mustJoin = True
self.joinTarget = ctx.author.voice.channel
elif ctx.author.voice.channel != ctx.me.voice.channel:
embed = embedMessage.embed(
title = 'ERROR',
description = 'You must be connected to the same voice channel as the bot to play media.',
color = embedMessage.errorColor
)
await ctx.send(embed=embed)
return
## If more than one word is passed, collapse args into one string
if len(args) > 1:
media = " ".join(args)
else:
media = args[0]
if not ctx.guild.id in self.bot.player.nowPlaying.keys():
embed = embedMessage.embed(
title = "Loading...",
footer = 'Playlist loading can take a while,\nplease be patient.'
)
reply = await ctx.send(embed=embed)
self.bot.player.nowPlaying[ctx.guild.id] = {
"message":reply,
"song":None,
'url':None
}
await self.playAudio(media,ctx.guild)
async def playAudio(self,media,guild):
if guild.id in self.bot.player.nowPlaying.keys():
channel = self.bot.player.nowPlaying[guild.id]["message"].channel
title = None
if type(media) == dict:
title = media["title"]
media = media["name"]
ytdl_src = await ytdlSrc.ytdlSrc.from_url(media, self.bot, guild, loop=self.bot.loop, stream=True)
if not ytdl_src:
embed = embedMessage.embed(
title = 'ERROR',
description = 'Age-restricted video detected. Aborting.',
color = embedMessage.errorColor
)
await self.bot.player.nowPlaying[guild.id]["message"].delete()
self.bot.player.nowPlaying[guild.id]["message"] = await channel.send(embed=embed)
return
if not title:
title = ytdl_src.title
try:
voiceClient = await self.joinTarget.connect()
self.bot.player.connectedChannel[guild.id] = voiceClient
except discord.ClientException as er:
if er.args[0] == 'Already connected to a voice channel.':
pass
except AttributeError:
pass
try:
self.bot.player.connectedChannel[guild.id].play(ytdl_src, after=lambda e: self.onFinish(guild))
except discord.ClientException as er:
if er.args[0] == 'Already playing audio.':
if not guild.id in self.bot.player.queue.keys():
self.bot.player.queue[guild.id] = []
self.bot.player.queue[guild.id].append({
"name":media,
"title":title
})
embed = embedMessage.embed(
title = "Queued:",
description = title
)
await self.bot.player.nowPlaying[guild.id]["message"].delete()
self.bot.player.nowPlaying[guild.id]["message"] = await channel.send(embed=embed)
else:
embed = embedMessage.embed(
title = "Now Playing:",
description = title
)
await self.bot.player.nowPlaying[guild.id]["message"].delete()
self.bot.player.nowPlaying[guild.id] = {
"message":await channel.send(embed=embed),
"song":title,
'url':media
}
if ytdl_src.toQueue:
if not guild.id in self.bot.player.queue.keys():
self.bot.player.queue[guild.id] = []
for song in ytdl_src.toQueue:
self.bot.player.queue[guild.id].append({
"name":song['url'],
"title":song['title']
})
embed = embedMessage.embed(
title = "Queued:",
description = f"**{len(ytdl_src.toQueue) + 1}** songs from **{ytdl_src.data['title']}**"
)
ytdl_src.toQueue = None
await self.bot.player.nowPlaying[guild.id]["message"].delete()
self.bot.player.nowPlaying[guild.id]["message"] = await channel.send(embed=embed)
def onFinish(self, guild):
if not guild.me.voice:
self.bot.player.nowPlaying[guild.id]["song"] = None
return
if not guild.id in self.bot.player.queue.keys():
coroutine = self.bot.player.connectedChannel[guild.id].disconnect()
self.bot.player.nowPlaying[guild.id]["song"] = None
return
if not guild.id in self.bot.player.loopQueue.keys():
self.bot.player.loopQueue[guild.id] = False
if len(self.bot.player.queue[guild.id]) > 0:
song = self.bot.player.queue[guild.id].pop(0)
if self.bot.player.loopQueue[guild.id]:
self.bot.player.queue[guild.id].append(song)
coroutine = self.playAudio(song,guild)
else:
coroutine = self.bot.player.connectedChannel[guild.id].disconnect()
self.bot.player.nowPlaying[guild.id]["song"] = None
future = asyncio.run_coroutine_threadsafe(coroutine,self.bot.loop)
try:
future.result()
except Exception as er:
print(er)
pass
## Allow use of cog class by main bot instance
def setup(bot):
bot.add_cog(play(bot))
| null |
cogs/Music/play.py
|
play.py
|
py
| 7,261 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "discord.ext.commands.Cog",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "common.category.getCategory",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "common.category",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "common.config.cfg",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "common.config",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "common.config.cfg",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "common.config",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "common.embedMessage.embed",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "common.embedMessage",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "common.embedMessage.embed",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "common.embedMessage",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "common.embedMessage.errorColor",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "common.embedMessage",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "common.embedMessage.embed",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "common.embedMessage",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "common.embedMessage.errorColor",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "common.embedMessage",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "common.embedMessage.embed",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "common.embedMessage",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "common.embedMessage.errorColor",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "common.embedMessage",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "common.embedMessage.embed",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "common.embedMessage",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "common.ytdlSrc.ytdlSrc.from_url",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "common.ytdlSrc.ytdlSrc",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "common.ytdlSrc",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "common.embedMessage.embed",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "common.embedMessage",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "common.embedMessage.errorColor",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "common.embedMessage",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "discord.ClientException",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "discord.ClientException",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "common.embedMessage.embed",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "common.embedMessage",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "common.embedMessage.embed",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "common.embedMessage",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "common.embedMessage.embed",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "common.embedMessage",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "asyncio.run_coroutine_threadsafe",
"line_number": 176,
"usage_type": "call"
}
] |
295854624
|
from PIL.Image import open as im_open
from requests import get
from urllib.request import quote
from re import search, findall
from random import shuffle, choice
from tool import process_img
class Baidu(object):
""" 百度 """
def __init__(self, father):
self.father = father
self.name = 'Baidu'
self.page_idx, self.img_idx = choice([x*30 for x in range(30)]), 0
self.img_set = []
self.current_id = 0
self.current_data = []
self.repeat = 0
self.cate = ['随机', '风景', '雪景', '宇宙', '山水', '夜景', '蓝天', '秋天', '田园', '日出', '火焰', '沙漠', '公路', '星空', '海底', '自然', '冰雪', '海滩', '美女', '唯美', '可爱', '小清新', '插画', '水墨画', '个性', '简约', '护眼', '节日', '日历', '非主流', '中国风', '搞笑', '帅哥', '情侣', '另类', '萝莉', '炫酷', '性感', '3D', '科幻', '时尚', '星座', '涂鸦', '古典', '淡雅', '创意设计', 'lomo风', '苹果壁纸', '美食', '三维立体', '高清壁纸', '萌宠', '卡通', '体育', '国家地理', '手绘素描', '旅游风光', '治愈系', '卡通动漫', '游戏动漫', '动物', '影视', '游戏', '花草', '明星', '跑车']
def get_all_img(self):
try:
if self.father.config['category'] == '随机':
cate = choice(list((set(self.cate[1:]) | set(self.father.data['api'][self.name]['ignore_list']))-(set(self.cate[1:]) & set(self.father.data['api'][self.name]['ignore_list']))))
else:
cate = self.father.config['category']
r = ''.join(get('https://image.baidu.com/search/index?tn=baiduimage&word=%s+%s&pn=%s' % ('%E5%A3%81%E7%BA%B8', quote(cate), self.page_idx), timeout=3).content.decode().split())
except:
if not self.father.switch_frame.abort:
self.get_all_img()
return
self.img_set = findall(r'"pageNum":.*?,"objURL":"(.*?)","fromURL":.*?"width":(\d*),"height":(\d*).*?"di":"(\d*)"', r)
self.img_idx = 0
if self.img_set:
if self.father.config['random_switch']:
shuffle(self.img_set)
self.page_idx += 30
else:
self.page_idx = choice([x*30 for x in range(30)])
def download_img(self):
try:
self.img_set[self.img_idx]
except IndexError:
self.get_all_img()
if self.father.switch_frame.abort:
return
try:
data = self.img_set[self.img_idx]
except IndexError:
return
url, width, height, id_ = data
if self.father.config['category'] == '随机':
self.img_idx += 3
else:
self.img_idx += 1
# 筛选: 分辨率比屏幕大, 不在黑名单
if id_ not in self.father.data["api"][self.name]['hate_list'] and int(width) >= self.father.resolving[0] and int(height) >= self.father.resolving[1]:
try:
img = get(url, stream=True, timeout=3)
tile = search('.*?/([a-zA-Z]{3,4})$', img.headers['Content-Type']).group(1)
except:
if not self.father.switch_frame.abort and self.repeat <= 20:
self.repeat += 1
self.download_img()
return
try:
length = int(img.headers['Content-Length'])
except KeyError:
length = 1024
# 传输正常且大小正常
if img.ok and length / 1000000 <= self.father.config['length']:
download_rate = 0
with open('image/wall.' + tile, 'wb') as fp:
try:
for part in img.iter_content(length // 100):
# 中断
if self.father.switch_frame.abort:
fp.close()
return
fp.write(part)
download_rate += 1
self.father.switch_frame.white_label.move(0, -download_rate // 2)
except:
self.father.switch_frame.abort = True
return
fp.close()
im = im_open('image/wall.' + tile)
process_img(im, self.father.resolving)
self.father.last_api = self.name
self.current_id = id_
self.current_data = data
self.repeat = 0
else:
self.download_img()
else:
self.download_img()
def static_download(self):
if not self.father.data['api'][self.name]['like_list']:
return False
else:
data = choice(self.father.data['api']['Baidu']['like_list'])[1]
url, width, height, id_ = data
try:
img = get(url, stream=True, timeout=3)
tile = search('.*?/([a-zA-Z]{3,4})$', img.headers['Content-Type']).group(1)
except:
if not self.father.switch_frame.abort and self.repeat <= 20:
self.repeat += 1
self.static_download()
return True
try:
length = int(img.headers['Content-Length'])
except KeyError:
length = 1024
# 传输正常且大小正常
if img.ok:
download_rate = 0
with open('image/wall.' + tile, 'wb') as fp:
try:
for part in img.iter_content(length // 100):
# 中断
if self.father.switch_frame.abort:
fp.close()
return
fp.write(part)
download_rate += 1
self.father.switch_frame.white_label.move(0, -download_rate // 2)
except:
self.father.switch_frame.abort = True
return
fp.close()
im = im_open('image/wall.' + tile)
process_img(im, self.father.resolving)
self.father.last_api = self.name
self.current_id, self.current_data = id_, data
self.repeat = 0
return True
else:
self.static_download()
| null |
pyqt/wallpaper switcher/api/Baidu.py
|
Baidu.py
|
py
| 6,601 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "random.choice",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "urllib.request.quote",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "tool.process_img",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "tool.process_img",
"line_number": 135,
"usage_type": "call"
}
] |
204301336
|
"""
rl environment
By : ya0000000
2021/08/31
"""
import numpy as np
import os
import math
import time
import inverseKinematics as IK
from IK_FindOptSol import FindOptSol
from robot_vrep import my_robot
import config
import cv2 as cv
from yolo import *
def creat_path(path):
if path_exsit(path=path):
print(path+' exist')
else:
os.makedirs(path)
def path_exsit(path):
if os.path.exists(path):
return True
else:
return False
radtodeg = 180 / math.pi # 弧度轉角度
degtorad = math.pi / 180 # 角度轉弧度
terminal_reward = 1000
finalpos = [0, 0, 180]
#這裡單位是 cm 吸嘴加0.125
DH_table = np.array([[0, 0.345, 0.08, math.pi / 2],
[0+math.pi / 2 , 0, 0.27, 0],
[0, 0, 0.09, math.pi / 2],
[0, 0.295, 0, -math.pi / 2],
[0, 0, 0, math.pi / 2],
[0, 0.102+0.125, 0, 0]])
def save_txt(data, fmt='%f'):
f = open('C:/Users/user/Desktop/rl/data.txt', 'a')
np.savetxt(f, data, fmt=fmt)
f.close()
class robot_env(object):
degtorad = math.pi / 180
state_dim = config.state_dim
action_dim = config.action_dim
def __init__(self):
self.radtodeg = 180 / math.pi # 弧度轉角度
self.degtorad = math.pi / 180 # 角度轉弧度
self.my_robot = my_robot()
self.my_robot.connection()
self.yolo = YOLOV3()
self.yolo_coco = YOLOV3_coco()
self.random_flag = 1
self.object_flag = 0
self.random_train = config.random_train
def initial(self):
self.my_robot.stop_sim()
self.my_robot.start_sim()
def reset(self,i):
""" 拿 camera 位置,角度,矩陣 """
self.cam_position, self.cam_orientation ,self.cam_rotm, self.cam_pose= self.my_robot.get_depth_camera_pose()
""" robot初始姿態 """
self.joint = [0, 0, 0, 0, -1.57, 0]
self.my_robot.move_all_joint(self.joint)
print('reset')
""" 物體是否要隨機擺放 """
if (self.random_train):
if (i+1) % 100 == 0: # 每100回合換一次物體
#self.object_flag = np.random.randint(3, size=1)
self.object_flag = self.object_flag - 1
if self.object_flag <= -1:
self.object_flag =np.random.randint(3,size=1)
print('object',self.object_flag)
if self.random_flag == 1:
self.my_robot.random_object(self.object_flag) # 物體位置隨機放
else:
self.my_robot.no_random_object(self.object_flag) # 物體位置固定放
else:
self.my_robot.no_random_object(self.object_flag) # 物體位置固定放
time.sleep(0.2)
""" --- 隨機選擇yolo偵測的物體 或 yolo偵測到的第一個物體 --- """
# self.index = np.random.randint(config.num_object, size=1)
self.index = 0
return self.get_state()
def get_state(self):
# -----************************Img Initial************************-----#
""" 拿 彩色圖,深度資訊(16位元),深度圖(8位元) """
self.color_img, self.depth_img, self.depth_img_for_show = self.my_robot.get_camera_data()
""" 存照片然後讀RGB和深度圖片 """
if config.show_yolo: # (存照片)
self.my_robot.arrayToImage(self.color_img)
self.my_robot.arrayToDepthImage(self.depth_img_for_show)
RGB_Img = cv.imread(config.yolo_Img_path) # 讀RGB圖片 (480, 640, 3)
Dep_Img = cv.imread(config.yolo_Dep_path) # 讀深度圖片 (480, 640, 3)
ROI = self.color_img[config.RoiOffset_Y:(config.resolutionY_C-config.RoiOffset_Y_), config.RoiOffset_X:(config.resolutionX_C-config.RoiOffset_X_)]
# /////////////////////////////////////////////////////////////////////#
# YOLO Detect #
# /////////////////////////////////////////////////////////////////////#
if(config.yolo_detect):
if(self.object_flag == 3): # coco dataset
self.Yolo_Det_frame, self.coordinate, self.cls, self.label, self.Width_and_Height = self.yolo_coco.detectFrame(ROI) # 得到框的中心點
else: # cubic dataset
self.Yolo_Det_frame, self.coordinate,self.cls,self.label,self.Width_and_Height = self.yolo.detectFrame(ROI) # 得到框的中心點
else: # 沒有yolo
self.Yolo_Det_frame = ROI
self.coordinate =np.array([[int((self.Yolo_Det_frame.shape[1]/2)),int((self.Yolo_Det_frame.shape[0]/2))]])
self.cls =np.array([0])
self.label =['cubic']
self.Width_and_Height =np.array([[self.Yolo_Det_frame.shape[1] ,self.Yolo_Det_frame.shape[0]]])
""" 若yolo沒偵測到就重置物件並重新取得狀態 """
while not (self.Width_and_Height.any()):
# self.my_robot.random_object(self.object_flag)
self.my_robot.no_random_object(self.object_flag)
time.sleep(0.2)
self.get_state()
print('No Object !!! ')
""" 顯示yolo結果 """
if config.show_yolo:
color = (0, 0, 255) # BGR
cv2.circle(self.Yolo_Det_frame, (self.coordinate[self.index][0], self.coordinate[self.index][1]), 2, color, -1)
cv2.circle(self.color_img, (self.coordinate[self.index][0]+config.RoiOffset_X, self.coordinate[self.index][1]+config.RoiOffset_Y), 2, color, -1)
# cv2.imshow('color_img', self.color_img)
cv2.imshow('yolo',self.Yolo_Det_frame)
# cv2.waitKey(0)
cv2.imwrite(config.yolo_Det_Img_path, np.array(self.Yolo_Det_frame)) # 储存检测结果图
cv2.imwrite(config.yolo_Det_Img_path, np.array(self.color_img)) # 储存检测结果图
# /////////////////////////////////////////////////////////////////////#
# YOLO END #
# /////////////////////////////////////////////////////////////////////#
""" 彩色影像的state """
if(config.color_state):
""" yolo邊界框中心點座標 """
color_coordinate = np.zeros((2, 1), np.float64)
color_coordinate[0] = self.coordinate[self.index][0] + config.RoiOffset_X
color_coordinate[1] = self.coordinate[self.index][1] + config.RoiOffset_Y
""" yolo邊界框對角線座標 """
color_left = np.array([color_coordinate[0] - self.Width_and_Height[self.index][0] / 2,
color_coordinate[1] + self.Width_and_Height[self.index][1] / 2])
color_right = np.array([color_coordinate[0] + self.Width_and_Height[self.index][0] / 2,
color_coordinate[1] - self.Width_and_Height[self.index][1] / 2])
""" ---- 畫RGB圖顯示 ---- """
# cv2.circle(self.color_img, (color_coordinate[0], color_coordinate[1]), 2, (0, 255, 0), -1)
# cv2.rectangle(self.color_img, (color_left[0], color_left[1]), (color_right[0], color_right[1]), (0, 255, 0), 2)
# cv2.imshow('depth123', self.depth_img_for_show)
# cv2.imwrite(config.yolo_Dep_path, np.array(self.depth_img)) # 储存检测结果图
# cv2.waitKey(0)
""" ---- 畫RGB圖顯示 ---- """
""" y 的座標 與 x 的座標 """
cy = np.array([int(color_coordinate[1] - self.Width_and_Height[self.index][1] / 2),
int(color_coordinate[1] + self.Width_and_Height[self.index][1] / 2)])
cx = np.array([int(color_coordinate[0] - self.Width_and_Height[self.index][0] / 2),
int(color_coordinate[0] + self.Width_and_Height[self.index][0] / 2)])
cy = np.clip(cy, 0, 424)
cx = np.clip(cx, 0, 512)
""" 拿到邊界框範圍內的影像 """
self.ROI = self.color_img[cy[0]:cy[1], cx[0]:cx[1]]
""" resize 為 64*64 """
color_img = cv.resize(self.ROI, (64,64), interpolation=cv.INTER_CUBIC)
"""
transpose 為將img的data重新排列
img為[h,w,channel]
pytorch 輸入為 [batch,channel,h,w]
"""
color_img = color_img.transpose((2,0,1))
self.color_img_input = color_img[np.newaxis, ...]
s = self.color_img_input
# ********* 深度影像的state *********
else:
""" yolo邊界框中心點 與 對角座標 """
dep_coordinate = np.zeros((2, 1), np.float64)
dep_coordinate[0] = self.coordinate[self.index][0] + config.RoiOffset_X
dep_coordinate[1] = self.coordinate[self.index][1] + config.RoiOffset_Y
dep_left = np.array([dep_coordinate[0] - self.Width_and_Height[self.index][0] / 2, dep_coordinate[1] + self.Width_and_Height[self.index][1] / 2])
dep_right = np.array([dep_coordinate[0] + self.Width_and_Height[self.index][0] / 2, dep_coordinate[1] - self.Width_and_Height[self.index][1] / 2])
""" ---- 畫深度圖 ---- """
# cv2.circle( self.depth_img_for_show , (dep_coordinate[0], dep_coordinate[1]), 2, (196, 114, 68), -1)
# cv2.rectangle( self.depth_img_for_show , (dep_left[0], dep_left[1]), (dep_right[0], dep_right[1]), (196, 114, 68), 2)
# cv2.imshow('depth123', self.depth_img_for_show)
# cv2.imwrite(config.yolo_Dep_path, np.array(self.depth_img)) # 储存检测结果图
# cv2.waitKey(0)
""" ---- 畫深度圖 ---- """
""" y 的座標 與 x 的座標 """
cy = np.array([int(dep_coordinate[1] - self.Width_and_Height[self.index][1] / 2), int(dep_coordinate[1] + self.Width_and_Height[self.index][1] / 2)])
cx = np.array([int(dep_coordinate[0] - self.Width_and_Height[self.index][0] / 2), int(dep_coordinate[0] + self.Width_and_Height[self.index][0] / 2)])
cy = np.clip(cy, 0, 424)
cx = np.clip(cx, 0, 512)
""" 拿到邊界框範圍內的影像 """
self.ROI = self.depth_img[cy[0]:cy[1], cx[0]:cx[1]]
##########################################################################################
"""顯示深度state(碩論出圖)"""
# self.depth_img_for_show = self.depth_img_for_show[cy[0]:cy[1], cx[0]:cx[1]]
# self.color_img= self.color_img[cy[0]:cy[1], cx[0]:cx[1]]
# # print(self.ROI.shape) #(137, 131)
# depth_img_64_show = cv.resize( self.depth_img_for_show,(64,64),interpolation = cv.INTER_CUBIC)
# cv2.imshow('self.depth_img_64_show', depth_img_64_show)
# cv2.imshow('self.depth_img_for_show', self.depth_img_for_show)
# cv2.imwrite('imgTemp\\depth.png', self.depth_img_for_show) # 储存检测结果图
# cv2.imwrite('imgTemp\\depth_64.png', depth_img_64_show) # 储存检测结果图
# cv2.imwrite('imgTemp\\color.png', self.color_img) # 储存检测结果图
# cv2.waitKey(0)
########################################################################################
""" resize 為 64*64 """
depth_img = cv.resize( self.ROI,(64,64),interpolation = cv.INTER_CUBIC)
self.depth_img_input = depth_img[np.newaxis, ...]
s = self.depth_img_input
s = s[np.newaxis, ...]
return s
def step(self, action):
#action 是 U,V
done = False
reward = 0
success = 0
suction_value = 0
""" SAC 之動作輸出映射至感興趣物件影像平面上的位移向量 """
u1 = 1 + math.floor(action[0] * (self.Width_and_Height[self.index][0]/2 - 0.99)) #-------四捨五入(64 ~ 1)
v1 = 1 + math.floor(action[1] * (self.Width_and_Height[self.index][1]/2 - 0.99)) #-------四捨五入(64 ~ 1)
""" 最終拾取點座標 u,v """
u = int(u1 + self.coordinate[self.index][0] + config.RoiOffset_X)
v = int(v1 + self.coordinate[self.index][1] + config.RoiOffset_Y)
""" 將點顯示在彩色與深度圖上 """
if config.show_yolo:
# cv.circle(self.Yolo_Det_frame, (u, v), 5, (255, 0, 255), -1)
cv.circle( self.depth_img_for_show , (u, v), 5, (255, 0, 255), -1)
cv.circle(self.color_img, (u, v), 5, (255, 255, 0), -1)
cv.imshow('color_img', self.color_img )
cv.imshow('self.depth_img_for_show',self.depth_img_for_show)
# 按下任意鍵則關閉所有視窗
cv.waitKey(0)
""" 座標(u,v)的深度 """
pixel_depth = self.my_robot.get_depth(u,v,self.depth_img)
#------------------------------------------------座標轉換------------------------------------------------#
""" (u,v)轉到camera frame """
x, y = self.my_robot.uv_2_xyz(pixel_depth, u, v, config.resolutionX_C, config.resolutionY_C, config.theta)
img_coor = np.array([x,y, pixel_depth])
""" camera frame轉到 robot frame """
world_coor = self.my_robot.coor_trans_AtoB(self.cam_position, self.cam_orientation, img_coor)
""" 逆向運動學求各軸角度 """
[tip_Jangle, flag, Singularplace] = IK.InverseKinematics(finalpos, world_coor , DH_table)
joint,num_of_sol = FindOptSol(tip_Jangle, self.joint,Singularplace)
if num_of_sol == 0: # 選到奇異點的解扣分
reward = reward-0.01
done= True
""" vrep移動robot到目標角度 """
self.my_robot.move_all_joint(joint)
self.joint = joint # 更新目前joint位置
time.sleep(1)
""" 啟動吸嘴 """
suction_value = self.my_robot.enable_suction(True)
time.sleep(0.2)
""" 抬高手臂 """
joint = [-0.01142807, -0.2 , 0.03640932, 0. , -1.03999794, -0.01142807]
self.my_robot.move_all_joint(joint)
self.joint = joint
time.sleep(0.5)
""" 取得cubic現在位置看是否拾取成功 """
cuboid_pos_now= self.my_robot.get_cuboid_pos(self.object_flag) # dim=3
time.sleep(0.2)
""" 若cubic z位置在0.15之上代表成功反之失敗 """
if (abs(cuboid_pos_now[2]) > 0.15 ):
success = 1
else:
success = 0
""" 關掉吸嘴 """
suction_value = self.my_robot.enable_suction(False)
time.sleep(0.2)
""" 若成功則結束此回合;若未成功則需查看cubic是否超過制定範圍,超過則重置物體 """
if (success >= 1):
done = True
print('lift done')
else:
if (abs(cuboid_pos_now[1]) > 0.32 or cuboid_pos_now[0] < 0.2 or cuboid_pos_now[0] > 0.6):
self.my_robot.random_object(self.object_flag) # 如果還沒done 物件又超出範圍則重置物體
time.sleep(0.2)
""" 計算獎勵值 """
reward = reward+success
""" 獲得下一刻狀態 """
s_ = self.get_state()
return s_, reward, done
if __name__ == '__main__':
pass
##################### record data #####################
# error_record = np.reshape(error, (1, 6))
# # print(joint_out_record)
# path = './Trajectory/'
# name = 'error_record.txt'
# f = open(path + name, mode='a')
# np.savetxt(f, error_record, fmt='%f')
# f.close()
##################### record data #####################
| null |
vrep/SAC_camera_version2/env_new2.py
|
env_new2.py
|
py
| 15,741 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.makedirs",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "numpy.savetxt",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "config.state_dim",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "config.action_dim",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "robot_vrep.my_robot",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "config.random_train",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "config.show_yolo",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "config.yolo_Img_path",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "config.yolo_Dep_path",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "config.RoiOffset_Y",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "config.resolutionY_C",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "config.RoiOffset_Y_",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "config.RoiOffset_X",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "config.resolutionX_C",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "config.RoiOffset_X_",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "config.yolo_detect",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "config.show_yolo",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "cv2.circle",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "config.RoiOffset_X",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "config.RoiOffset_Y",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "config.yolo_Det_Img_path",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "config.yolo_Det_Img_path",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "config.color_state",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "config.RoiOffset_X",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "config.RoiOffset_Y",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "numpy.newaxis",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 207,
"usage_type": "attribute"
},
{
"api_name": "config.RoiOffset_X",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "config.RoiOffset_Y",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "numpy.newaxis",
"line_number": 246,
"usage_type": "attribute"
},
{
"api_name": "numpy.newaxis",
"line_number": 248,
"usage_type": "attribute"
},
{
"api_name": "math.floor",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "config.RoiOffset_X",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "config.RoiOffset_Y",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "config.show_yolo",
"line_number": 271,
"usage_type": "attribute"
},
{
"api_name": "cv2.circle",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "config.resolutionX_C",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "config.resolutionY_C",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "config.theta",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "inverseKinematics.InverseKinematics",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "IK_FindOptSol.FindOptSol",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 335,
"usage_type": "call"
}
] |
565126991
|
#!/usr/bin/python3
from base import *
from tag import *
import yaml
import datetime
import re
class Solution():
"""."""
def __init__(self, file):
file = Path(file)
self.path = file
self.oj, self.id, self.name = file.with_suffix(
'').name.split(' ', maxsplit=2)
self.date = datetime.datetime.utcfromtimestamp(
file.stat().st_mtime
).strftime('%Y-%m-%d %H:%M:%S')
self.link = ojlink_patterns[self.oj](self.id)
self.text = read_text(file).strip()
if self.text[:3] == '---':
pos = self.text.find('---', 3)
if pos == -1:
self.text += '\n---'
pos = self.text.find('---', 3)
self.meta = yaml.load(self.text[3:pos])
self.text = self.text[pos + 3:].strip()
else:
self.meta = dict()
tags = self.meta.get('tags', [])
self.tags = Tag_list()
for tag in tags:
self.tags.append(Tag(tag))
self.tags.sort()
p1 = self.text.find('## 题目描述')
p2 = self.text.find('## 输入格式')
p3 = self.text.find('## 输出格式')
p4 = self.text.find('## 样例')
p5 = self.text.find('## 数据范围与提示')
p6 = self.text.find('## 题解')
p7 = self.text.find('## 代码')
self.description = parse(self.text[p1 + len('## 题目描述'):p2].strip())
self.input_format = parse(self.text[p2 + len('## 输入格式'):p3].strip())
self.output_format = parse(self.text[p3 + len('## 输出格式'):p4].strip())
self.example = parse(self.text[p4 + len('## 样例'):p5].strip())
self.hint = parse(self.text[p5 + len('## 数据范围与提示'):p6].strip())
self.solution = parse(self.text[p6 + len('## 题解'):p7].strip())
self.code = parse(self.text[p7 + len('## 代码'):].strip())
self.memory_limit = self.meta.get('memory_limit', 256)
self.time_limit = self.meta.get('time_limit', 1000)
# def __str__(self):
# return '%s %s %s' % (self.oj, self.id, self.name)
def parse(self):
return solution_html.format(solution=self, config=config)
class Solution_list():
"""."""
def sort(self, key=lambda x: (x.oj + '%05s' % x.id)):
self.solutions = sorted(self.solutions, key=key)
def __init__(self, solutions=[]):
self.solutions = solutions
self.sort()
def append(self, solution):
self.solutions.append(solution)
def parse(self):
__solution_list__ = ''
for solution in self.solutions:
__tag_list__ = solution.tags.parse()
__solution_list__ += solution_list_td_html.format(
solution=solution,
__tag_list__=__tag_list__,
) + '\n'
return solution_list_html.format(
__solution_list__=__solution_list__,
config=config,
)
| null |
Python/myblog/lib/script/solution.py
|
solution.py
|
py
| 2,970 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.datetime.utcfromtimestamp",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "yaml.load",
"line_number": 33,
"usage_type": "call"
}
] |
570591905
|
#################################################################
######################## CODE FIGURE 4 BN ######################
#################################################################
# Run with passing number of hidden layers as argument
import numpy as np
import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
import sys
import torch.nn.functional as func
import pickle
import sys, getopt
import IPython
import math
dtype = torch.cuda.FloatTensor
dtype_labels = torch.cuda.LongTensor
no_of_hl= int(sys.argv[1]) # number of hidden layers
HUs=128 # number of hidden units
step_size = 0.01 # stepsize
min_batch_size = 64 # minibatch for SGD
batch_norm_size = 10
hidden_layers=np.ones(no_of_hl,dtype=int)*HUs
######## 1. GET DATA ##########
import torchvision
import torchvision.transforms as transforms
transform = transforms.Compose(
[transforms.ToTensor()])
train_dataset = torchvision.datasets.CIFAR10(root='CIFAR10', train=True,
download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=min_batch_size,
shuffle=True, num_workers=1)
X = torch.tensor(train_dataset.data).type(dtype)
X = torch.flatten(X,1)
y = torch.tensor(train_dataset.targets).type(dtype_labels)
test_dataset = torchvision.datasets.CIFAR10(root='CIFAR10', train=False,
download=True, transform=transform)
X_test = torch.tensor(test_dataset.data).type(dtype)
X_test=X_test.reshape(-1,3*32*32)
y_test = torch.tensor(test_dataset.targets).type(dtype_labels)
######## 2. COMPILE NETS ##########
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.view(-1,3*32*32)
D_in=images[0].shape[0]
D_out=10 # for mnist is 10
_layers=np.append(hidden_layers,[D_out])
layers=np.append([D_in], _layers) #this variable contains the Network arcitecture in terms of units in each layer,e.g. 5,10,10,1 (D_in,hidden 1, hidden 2, D_out)
print('Network architecture (no of units in layer): ',layers)
#networks
# MLP without batch normalization
class MlpPlane(torch.nn.Module):
def __init__(self,h_sizes):
super(MlpPlane,self).__init__()
self.h_sizes = h_sizes
self.layers = nn.ModuleList()
for k in range(len(h_sizes)-1):
linear_module = nn.Linear(h_sizes[k].item(), h_sizes[k+1].item())
variance = np.sqrt(2.0/(h_sizes[k].item() + h_sizes[k+1].item()))
linear_module.weight.data.normal_(0.0, variance)
self.layers.append(linear_module)
def forward(self,x):
for k in range(len(self.h_sizes)-2):
x = torch.relu(self.layers[k](x))
return self.layers[len(self.h_sizes)-2](x)
def get_weights(self):
ws = [None]*(len(self.h_sizes)-1)
for k in range(len(self.h_sizes)-1):
ws[k] = self.layers[k].weight
return ws
def getlayerloss(self,x,layer_num): # approximate
for k in range(layer_num+1):
x = torch.relu(self.layers[k](x))
M = x.t().mm(x)/x.size(0)
return x,torch.trace(M.mm(M))/torch.trace(M)**2 #+ troch.norm(M)
def getblanceloss(self,x):
lo = 0
for k in range(len(self.h_sizes)-1):
x = torch.relu(self.layers[k](x))
M = x.mm(x.t())/float(min_batch_size)
# print(M.size())
lo = lo + torch.trace(M.mm(M))/torch.trace(M)**2 #+ torch.norm(M)
return lo
##### BATCH Normalization
class MlpBatch(MlpPlane):
def __init__(self,h_sizes):
super(MlpBatch,self).__init__(h_sizes)
self.batches = nn.ModuleList()
for k in range(len(h_sizes)-2):
self.batches.append(torch.nn.BatchNorm1d(num_features=h_sizes[k+1].item()))#,momentum=0.0
def forward(self,x):
for k in range(len(self.h_sizes)-2):
x = torch.relu(self.batches[k](self.layers[k](x)))
return self.layers[len(self.h_sizes)-2](x)
######## 3. COMPILE TRAINING ROUTINES ##########
import torch.nn.functional as f
def run_training(mlp, epochs = 6,ss = step_size):
errors = []
test_errors = []
accuracies = []
criterion = torch.nn.CrossEntropyLoss(size_average=True)
opt2= torch.optim.SGD(mlp.parameters(),lr =ss )
loss_epoch = 0
data_counter = 0
N = X.size(0)
break_outer=False
for epoch in range(epochs): # loop over the dataset multiple times
rperm = torch.randperm(N).cuda()
loss_epoch = 0
data_counter = 0
i = 0
accuracy=[]
while data_counter<N-1:
opt2.zero_grad()
fidx = i*(min_batch_size)
tidx = min((i+1)*(min_batch_size),N-1)
data_counter = tidx
inputs = X[rperm[fidx:tidx]]
labels = y[rperm[fidx:tidx]]
outputs = mlp.forward(inputs)
loss = criterion(outputs, labels)
if math.isnan(loss.item()):
break_outer=True
loss.backward()
opt2.step()
loss_epoch += loss.item()*inputs.shape[0]/float(N)
i = i + 1
accuracy.append(torch.mean(torch.eq(labels, torch.argmax(outputs, 1)).float()).data)
test_loss=criterion(mlp.forward(X_test), y_test).cpu().item()
acc=torch.mean(torch.stack(accuracy)).cpu().item()
accuracies.append(acc)
print('epoch:',epoch,',loss:',loss_epoch,'accuracy', acc, 'test_loss: ', test_loss)
errors.append(loss_epoch)
test_errors.append(test_loss)
if break_outer:
break
return errors,accuracies, test_errors
######## 4. COMPILE FUNCTION FOR REPEATED RUNS ##########
def run_indep(mlp, mlpclass, num_runs = 5,epoch_num = 60):
# GRID SEARCH STEPSIZE
stepss = [0.01,0.001,0.0001]
our_errors = []
for si in stepss:
print('==============')
copy_model = mlpclass(layers).cuda()
copy_model.load_state_dict(mlp.state_dict())
our_error,our_accuracy, test_errors = run_training(copy_model,epochs = epoch_num,ss = si)
our_errors.append(our_error)
best_end = 1000
best_idx = 0
for i in range(len(stepss)):
if our_errors[i][-1] < best_end:
print(our_errors[i][-1],best_end,'#')
best_end = our_errors[i][-1]
best_idx = i
best_stepsize = stepss[best_idx]
print('BEST STEPSIZE: ', best_stepsize)
errors = []
accuracies = []
test_errors = []
for j in range(num_runs):
print('>>>>>>>> new indp. run <<<<<<<<<')
copy_model = mlpclass(layers).cuda()
copy_model.load_state_dict(mlp.state_dict())
error,accuracy,test_error = run_training(copy_model,epochs = epoch_num,ss = best_stepsize)
errors.append(error)
accuracies.append(accuracy)
test_errors.append(test_error)
return errors,accuracies,test_errors
######## 5. RUN ##########
bn_errors, bn_accuracies, bn_test_loss = run_indep(MlpBatch(layers).cuda(),MlpBatch,epoch_num=100)
name = 'result_bn_%d_loss' % no_of_hl
np.save(name, bn_errors)
name = 'result_bn_%d_acc' % no_of_hl
np.save(name, bn_accuracies)
name = 'result_bn_%d_test_loss' % no_of_hl
np.save(name, bn_test_loss)
| null |
code_fig_4_bn.py
|
code_fig_4_bn.py
|
py
| 7,334 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.cuda",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "torchvision.datasets.CIFAR10",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.flatten",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets.CIFAR10",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "numpy.sqrt",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "torch.relu",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "torch.relu",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "torch.trace",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "torch.relu",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "torch.trace",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm1d",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "torch.relu",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.SGD",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "torch.randperm",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "math.isnan",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "torch.eq",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "torch.argmax",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 210,
"usage_type": "call"
}
] |
150098585
|
from backbones.resnet_fpn import ResNetFPN
from backbones.resnet import ResNet
from heads.cls_bbox import ClsBBoxHead_fc as ClsBBoxHead
from heads.mask import MaskHead
from tools.detect_utils import calc_iou, bbox_corner2center, bbox_center2corner
from proposal.rpn import RPN
from pooling.roi_align import RoiAlign
from libs.nms.pth_nms import pth_nms as nms
import os
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from configparser import ConfigParser
class MaskRCNN(nn.Module):
"""Mask R-CNN model.
References: Mask R-CNN: https://arxiv.org/pdf/1703.06870.pdf
Notes: In docstring, N: batch size, M: number of ground-truth objects,
C: number of feature map channels, H: image height, W: image width.
"""
def __init__(self, num_classes, pretrained=None):
"""
Args:
num_classes(int): number of classes, background should be counted in.
e.g: there are 100 foreground objects, num_classes should be 101.
pretrained(str): 'imagenet' or 'coco', set 'imagenet' indicate just
backbone use imagenet pretrained weights, 'coco' indicate whole
Mask R-CNN model use pretrained weights on COCO dataset.
"""
super(MaskRCNN, self).__init__()
if pretrained is not None:
assert pretrained in ['imagenet', 'coco']
assert pretrained not in ['coco'], "COCO pretrained weights is not available yet."
self.config = ConfigParser()
self.config.read(os.path.abspath(os.path.join(__file__, "../", "config.ini")))
self.num_classes = num_classes
self.pooling_size_clsbbox = (7, 7)
self.pooling_size_mask = (14, 14)
self.validating = False # when True output loss and predict results.
use_fpn = bool(int(self.config['BACKBONE']['USE_FPN']))
self.use_fpn = use_fpn
self.train_rpn_only = bool(int(self.config['TRAIN']['TRAIN_RPN_ONLY']))
resnet_layer = int(self.config['BACKBONE']['RESNET_LAYER'])
if self.use_fpn:
self.backbone_fpn = ResNetFPN(resnet_layer, pretrained=pretrained)
self.depth = 256
else:
self.backbone = ResNet(resnet_layer, pretrained=pretrained)
self.depth = 1024
self.rpn = RPN(self.depth, self.use_fpn)
if not self.train_rpn_only:
# RoiAlign for cls and bbox head, pooling size 7x7
self.roi_align_clsbbox = RoiAlign(grid_size=self.pooling_size_clsbbox)
# RoiAlign for mask head, pooling size 14x14
self.roi_align_mask = RoiAlign(grid_size=self.pooling_size_mask)
self.clsbbox_head = ClsBBoxHead(depth=self.depth, pool_size=self.pooling_size_clsbbox,
num_classes=num_classes)
self.mask_head = MaskHead(depth=self.depth, pool_size=self.pooling_size_mask,
num_classes=num_classes)
self.img_height = None
self.img_width = None
self.batch_size = None
def forward(self, image, gt_classes=None, gt_bboxes=None, gt_masks=None):
"""
Args:
image(Tensor): [N, C, H, W], image data.
gt_classes(Tensor): [N, M], ground truth class ids.
gt_bboxes(Tensor): [N, M, (x1, y1, x2, y2)], ground truth bounding
boxes, coord is in format (left, top, right, bottom).
gt_masks(Tensor): [N, M, H, W], ground truth masks.
Returns:
result(list of lists of dict): the outer list is mini-batch, the
inner list is detected objects, the dict contains keys below.
|------------------------------------------------------------------|
|keys in result dict: |
| 'proposal': (x1, y1, x2, y2), course bbox from RPN proposal. |
| 'cls_pred': predicted class id. |
| 'bbox_pred': (x1, y1, x2, y2), refined bbox from head. |
| 'mask_pred': [H, W], predicted mask. |
| |
|e.g. result[0][0]['mask_pred'] stands for the first object's mask |
| prediction of the first image in mini-batch. |
|------------------------------------------------------------------|
"""
if not self.training and (gt_classes is not None
and gt_bboxes is not None
and gt_masks is not None):
self.validating = True
else:
self.validating = False
self._check_input(image, gt_classes, gt_bboxes, gt_masks)
self.img_height, self.img_width = image.size(2), image.size(3)
self.batch_size = image.size(0)
img_shape = image.new(self.batch_size, 2).zero_()
img_shape[:, 0] = self.img_height
img_shape[:, 1] = self.img_width
result, maskrcnn_loss = None, 0
if self.use_fpn:
p2, p3, p4, p5, p6 = self.backbone_fpn(Variable(image, requires_grad=False))
# feature maps to feed RPN to generate proposals.
rpn_features = [p2, p3, p4, p5, p6]
# feature maps to feed prediction heads to refine bbox and predict class and mask.
head_features = [p2, p3, p4, p5]
else:
feature_map = self.backbone(Variable(image, requires_grad=False))
rpn_features = [feature_map]
head_features = [feature_map]
rois, rpn_loss_cls, rpn_loss_bbox = self.rpn(rpn_features, gt_bboxes, img_shape)
if self.train_rpn_only: # only train RPN.
result = self._process_result(self.batch_size, head_features, rois)
rpn_loss = rpn_loss_cls + rpn_loss_bbox
return result, rpn_loss
else: # train RPN + Predict heads together.
if self.training or self.validating:
gen_targets = self._generate_targets(rois, gt_classes, gt_bboxes, gt_masks)
rois_sampled, cls_targets, bbox_targets, mask_targets = gen_targets
cls_prob, bbox_reg, mask_prob = self._run_predict_head(head_features, rois_sampled)
head_loss = MaskRCNN._calc_head_loss(cls_prob, bbox_reg, mask_prob,
cls_targets, bbox_targets, mask_targets)
maskrcnn_loss = rpn_loss_cls + rpn_loss_bbox + head_loss
if not self.training: # valid or test phase
# rois value will be changed in _run_predict_head(), so make two copy.
rois_head, rois_result = rois.clone(), rois.clone()
cls_prob, bbox_reg, _ = self._run_predict_head(head_features, rois_head)
result = self._process_result(self.batch_size, head_features, rois_result,
cls_prob, bbox_reg)
return result, maskrcnn_loss
def _check_input(self, image, gt_classes=None, gt_bboxes=None, gt_masks=None):
"""check model input.
"""
assert image.dim() == 4 and image.size(1) == 3
if self.training or self.validating:
assert gt_classes.dim() == 2
assert gt_bboxes.dim() == 3 and gt_bboxes.size(-1) == 4
assert gt_masks.dim() == 4
def _run_predict_head(self, features, rois):
"""Run classification, bounding box regression and mask prediction heads.
Args:
features(list of Variable): extracted features from backbone
rois(Tensor): [N, M (idx, score, x1, y1, x2, y2)]
Returns:
cls_prob(Variable): [(NxM), num_classes]
bbox_reg(Variable): [(NxM), num_classes, (dx, dy, dw, dh)]
mask_prob(Variable or None): [(NxM), num_classes, 28, 28] when training, None when
testing, in test stage mask head use refined bbox, self._process_result() will
handle this.
"""
mask_prob = None
rois = rois.view(-1, 6) # [N, M, 6] -> [(NxM), 6]
rois_bbox, rois_mask = rois.clone(), rois.clone()
if self.use_fpn:
rois_pooling_clsbbox = self._roi_align_fpn(features, rois_bbox, mode='clsbbox')
cls_prob, bbox_reg = self.clsbbox_head(rois_pooling_clsbbox)
if self.training or self.validating:
rois_pooling_mask = self._roi_align_fpn(features, rois_mask, mode='mask')
mask_prob = self.mask_head(rois_pooling_mask)
else:
rois_pooling_clsbbox = self.roi_align_clsbbox(features[0], rois_bbox, self.img_height)
cls_prob, bbox_reg = self.clsbbox_head(rois_pooling_clsbbox)
if self.training or self.validating:
rois_pooling_mask = self.roi_align_mask(features[0], rois_mask, self.img_height)
mask_prob = self.mask_head(rois_pooling_mask)
return cls_prob, bbox_reg, mask_prob
def _generate_targets(self, proposals, gt_classes, gt_bboxes, gt_masks, mask_size=(28, 28)):
"""Generate Mask R-CNN targets, and corresponding rois.
Args:
proposals(Tensor): [N, a, (idx, score, x1, y1, x2, y2)], proposals from RPN,
idx is batch size index.
gt_classes(Tensor): [N, b], ground truth class ids.
gt_bboxes(Tensor): [N, b, (x1, y1, x2, y2)], ground truth bounding boxes.
gt_masks(Tensor): [(N, b, H, W], ground truth masks.
Returns:
sampled_rois(Tensor): [N, c, (idx, score, x1, y1, x2, y2)], proposals after sampled to
feed RoIAlign.
cls_targets(Variable): [(Nxc)], train targets for classification.
bbox_targets(Variable): [(Nxc), (dx, dy, dw, dh)], train targets for bounding box
regression, see R-CNN paper for meaning details.
mask_targets(Variable): [(Nxc), 28, 28], train targets for mask prediction.
Notes: a: number of proposals from FRN, b: number of ground truth objects, c: number
of rois to train.
"""
rois_sample_size = int(self.config['TRAIN']['ROIS_SAMPLE_SIZE'])
rois_pos_fraction = float(self.config['TRAIN']['ROIS_POS_FRACTION'])
rois_pos_thresh = float(self.config['TRAIN']['ROIS_POS_THRESH'])
rois_neg_thresh = float(self.config['TRAIN']['ROIS_NEG_THRESH'])
batch_size = proposals.size(0)
# Todo: add support to use batch_size >= 1
assert batch_size == 1, "batch_size >= 2 will add support later."
# get rid of batch size dim, need change when support batch_size >= 1.
proposals = proposals.squeeze(0)
gt_classes = gt_classes.squeeze(0)
gt_bboxes = gt_bboxes.squeeze(0)
gt_masks = gt_masks.squeeze(0)
iou = calc_iou(proposals[:, 2:], gt_bboxes[:, :])
max_iou, max_iou_idx_gt = torch.max(iou, dim=1)
pos_index_prop = torch.nonzero(max_iou >= rois_pos_thresh).view(-1)
neg_index_prop = torch.nonzero(max_iou < rois_neg_thresh).view(-1)
# if pos_index_prop or neg_index_prop is empty, return an background.
if pos_index_prop.numel() == 0 or neg_index_prop.numel() == 0:
cls_targets = gt_classes.new([0])
bbox_targets = MaskRCNN._get_bbox_targets(proposals[:1, 2:], proposals[:1, 2:])
mask_targets = gt_masks.new(1, mask_size[0], mask_size[1]).zero_()
sampled_rois = proposals[:1, :]
sampled_rois = sampled_rois.view(batch_size, -1, 6)
cls_targets = Variable(cls_targets, requires_grad=False)
bbox_targets = Variable(bbox_targets, requires_grad=False)
mask_targets = Variable(mask_targets, requires_grad=False)
return sampled_rois, cls_targets, bbox_targets, mask_targets
pos_index_gt = max_iou_idx_gt[pos_index_prop]
sample_size_pos = int(rois_pos_fraction * rois_sample_size)
pos_num = pos_index_prop.size(0)
neg_num = neg_index_prop.size(0)
sample_size_pos = min(sample_size_pos, pos_num)
# keep the ratio of positive and negative rois, if there are not enough positives.
sample_size_neg = int((sample_size_pos / rois_pos_fraction) * (1 - rois_pos_fraction) + 1)
sample_size_neg = min(sample_size_neg, neg_num)
sample_index_pos = random.sample(range(pos_num), sample_size_pos)
sample_index_neg = random.sample(range(neg_num), sample_size_neg)
pos_index_sampled_prop = pos_index_prop[sample_index_pos]
neg_index_sampled_prop = neg_index_prop[sample_index_neg]
pos_index_sampled_gt = pos_index_gt[sample_index_pos]
index_proposal = torch.cat([pos_index_sampled_prop, neg_index_sampled_prop])
sampled_rois = proposals[index_proposal, :]
# targets for classification, positive rois use gt_class id, negative use 0 as background.
cls_targets_pos = gt_classes[pos_index_sampled_gt]
cls_targets_neg = gt_classes.new([0 for _ in range(sample_size_neg)])
cls_targets = torch.cat([cls_targets_pos, cls_targets_neg])
# bbox regression target define on define on positive proposals.
bboxes = proposals[:, 2:]
bbox_targets = MaskRCNN._get_bbox_targets(bboxes[pos_index_sampled_prop, :],
gt_bboxes[pos_index_sampled_gt, :])
# mask targets define on positive proposals.
mask_targets = MaskRCNN._get_mask_targets(bboxes[pos_index_sampled_prop, :],
gt_masks[pos_index_sampled_gt, :, :],
mask_size)
sampled_rois = sampled_rois.view(batch_size, -1, 6)
return sampled_rois, Variable(cls_targets), Variable(bbox_targets), Variable(mask_targets)
def _refine_proposal(self, proposal, bbox_reg):
"""Refine proposal bbox with the result of bbox regression.
Args:
proposal(Tensor): (x1, y1, x2, y2), bbox proposal from RPN.
bbox_reg(Tensor): (dx, dy, dw, dh), bbox regression value.
Returns:
bbox_refined(Tensor): (x1, y1, x2, y2)
"""
x, y, w, h = bbox_corner2center(proposal).chunk(4)
dx, dy, dw, dh = bbox_reg.chunk(4)
px, py = w * dx + x, h * dy + y
pw, ph = w * torch.exp(dw), h * torch.exp(dh)
bbox_refined = bbox_center2corner(torch.cat([px, py, pw, ph]))
px1, py1, px2, py2 = bbox_refined.chunk(4)
px1 = torch.clamp(px1, max=self.img_width - 1, min=0)
px2 = torch.clamp(px2, max=self.img_width - 1, min=0)
py1 = torch.clamp(py1, max=self.img_height - 1, min=0)
py2 = torch.clamp(py2, max=self.img_height - 1, min=0)
bbox_refined = torch.cat([px1, py1, px2, py2])
return bbox_refined
def _roi_align_fpn(self, fpn_features, rois, mode):
"""When use fpn backbone, set RoiAlign use different levels of fpn feature pyramid
according to RoI size.
Args:
fpn_features(list of Variable): [p2, p3, p4, p5]],
rois(Tensor): [(NxM), (n, score, x1, y1, x2, y2)], RPN proposals.
mode(str): 'clsbbox': roi_align for cls and bbox head, 'mask': roi_align for mask head.
Returns:
rois_pooling: [(NxM), C, pool_size, pool_size], rois after use RoIAlign.
"""
assert mode in ['clsbbox', 'mask']
rois_levels = [[] for _ in range(len(fpn_features))]
rois_pool_result = []
# iterate bbox to find which level of pyramid features to feed.
for roi in rois:
bbox = roi[2:]
# in feature pyramid network paper, alpha is 224 and image short side 800 pixels,
# for using of small image input, like maybe short side 256, here alpha is
# parameterized by image short side size.
alpha = 224 * min(self.img_height, self.img_width) / 800
bbox_width = torch.abs(rois.new([bbox[2] - bbox[0] + 1]).float())
bbox_height = torch.abs(rois.new([bbox[3] - bbox[1] + 1]).float())
log2 = torch.log(torch.sqrt(bbox_height * bbox_width) / alpha) / torch.log(
rois.new([2]).float())
level = torch.floor(4 + log2) - 2 # 4 stands for C4, minus 2 to make level 0 indexed
# rois small or big enough may get level below 0 or above 3.
level = int(torch.clamp(level, 0, 3))
roi.unsqueeze_(0)
rois_levels[level].append(roi)
for level in range(len(fpn_features)):
if len(rois_levels[level]) != 0:
if mode == 'clsbbox':
roi_pool_per_level = self.roi_align_clsbbox(fpn_features[level],
torch.cat(rois_levels[level]),
self.img_height)
else:
roi_pool_per_level = self.roi_align_mask(fpn_features[level],
torch.cat(rois_levels[level]),
self.img_height)
rois_pool_result.append(roi_pool_per_level)
rois_pooling = torch.cat(rois_pool_result)
return rois_pooling
def _process_result(self, batch_size, features, proposals, cls_prob=None, bbox_reg=None):
"""Get the final result in test stage.
Args:
batch_size(int): mini-batch size.
features(list of Variable): extracted features from backbone
proposals(Tensor): [N, M, (idx, score, x1, y1, x2, y2)]
cls_prob(Variable): [(NxM), num_classes]
bbox_reg(Variable): [(NxM), num_classes, (x1, y1, x2, y2)]
Returns:
result: list of lists of dict, outer list is mini-batch, inner list is detected objects,
dict contains stuff below.
dict_key:
'proposal'(Tensor): (x1, y1, x2, y2), course bbox from RPN proposal.
'cls_pred'(int): predicted class id.
'bbox_pred'(Tensor): (x1, y1, x2, y2), refined bbox from prediction head.
'mask_pred'(Tensor): [H, W], predicted mask.
e.g. result[0][0]['mask_pred'] stands for the first object's mask prediction of
the first image of mini-batch.
"""
# Todo: support batch_size > 1.
assert batch_size == 1, "batch_size > 1 will add support later"
proposals = proposals.squeeze(0)
result = []
if self.train_rpn_only:
obj_detected = []
for i in range(proposals.size(0)):
pred_dict = {'proposal': proposals[i, 2:].cpu()}
obj_detected.append(pred_dict)
result.append(obj_detected)
return result
else:
props = []
bboxes = []
cls_ids = []
for idx, roi in enumerate(proposals):
cls_id = torch.max(cls_prob[idx], dim=0)[1]
if int(cls_id) > 0: # remove background
# refine proposal bbox with bbox regression result.
bbox = self._refine_proposal(roi[2:],
bbox_reg[idx, :, :][cls_id, :].squeeze(0).data)
px1, py1, px2, py2 = bbox
# leave malformed bbox alone
if py1 >= py2 or px1 >= px2:
continue
props.append(roi.unsqueeze(0))
bboxes.append(bbox.unsqueeze(0))
cls_ids.append(int(cls_id))
if len(props) != 0:
props_origin = torch.cat(props)
props_refined = props_origin.clone()
props_refined[:, 2:] = torch.cat(bboxes)
else:
result.append([])
return result
# Apply nms.
if self.use_fpn:
pre_nms_top_n = int(self.config['FPN']['TEST_FPN_PRE_NMS_TOP_N'])
post_nms_top_n = int(self.config['FPN']['TEST_FPN_POST_NMS_TOP_N'])
nms_thresh = float(self.config['FPN']['TEST_FPN_NMS_THRESH'])
else:
pre_nms_top_n = int(self.config['RPN']['TEST_RPN_PRE_NMS_TOP_N'])
post_nms_top_n = int(self.config['RPN']['TEST_RPN_POST_NMS_TOP_N'])
nms_thresh = float(self.config['RPN']['TEST_RPN_NMS_THRESH'])
score = props_refined[:, 1]
order = torch.sort(score, dim=0, descending=True)[1]
props_origin = props_origin[order, :][:pre_nms_top_n, :]
props_refined = props_refined[order, :][:pre_nms_top_n, :]
score = props_refined[:, 1].unsqueeze(-1)
bbox = props_refined[:, 2:]
keep_idx = nms(torch.cat([bbox, score], 1), nms_thresh)
keep_idx = keep_idx[:post_nms_top_n]
props_origin = torch.cat([props_origin[idx, :].unsqueeze(0) for idx in keep_idx])
props_refined = torch.cat([props_refined[idx, :].unsqueeze(0) for idx in keep_idx])
if self.use_fpn:
rois_pooling_mask = self._roi_align_fpn(features, props_refined.clone(),
mode='mask')
mask_prob = self.mask_head(rois_pooling_mask).data
else:
rois_pooling_mask = self.roi_align_mask(features[0], props_refined.clone(),
self.img_height)
mask_prob = self.mask_head(rois_pooling_mask).data
obj_detected = []
for i in range(len(props_origin)):
pred_dict = {'proposal': props_origin[i, 2:].cpu(), 'cls_pred': cls_ids[i],
'bbox_pred': props_refined[i, 2:].cpu(), 'mask_pred': None}
px1, py1, px2, py2 = props_refined[i, 2:].int()
mask_height, mask_width = py2 - py1 + 1, px2 - px1 + 1
mask = mask_prob[i, :, :, :][cls_ids[i], :, :]
mask = Variable(mask.unsqueeze(0), requires_grad=False)
mask_resize = F.adaptive_avg_pool2d(mask, (mask_height, mask_width)).data
mask_threshold = float(self.config['TEST']['MASK_THRESH'])
mask_resize = mask_resize >= mask_threshold
mask_pred = mask_prob.new(self.img_height, self.img_width).zero_()
mask_pred[py1:py2 + 1, px1:px2 + 1] = mask_resize
pred_dict['mask_pred'] = mask_pred.cpu()
obj_detected.append(pred_dict)
result.append(obj_detected)
return result
@staticmethod
def _get_bbox_targets(proposals, gt_bboxes):
""" Calculate bounding box targets, input coord format is (left, top, right, bottom),
see R-CNN paper for the formula.
Args:
proposals(Tensor): [n, 4]
gt_bboxes(Tensor): [n, 4]
Returns:
bbox_targets(Tensor): [n, 4]
"""
proposals = bbox_corner2center(proposals)
gt_bboxes = bbox_corner2center(gt_bboxes)
xy = (gt_bboxes[:, :2] - proposals[:, :2]) / proposals[:, 2:]
wh = torch.log(gt_bboxes[:, 2:] / proposals[:, 2:])
x, y = xy.chunk(2, dim=1)
w, h = wh.chunk(2, dim=1)
bbox_targets = torch.cat([x, y, w, h], dim=1)
return bbox_targets
@staticmethod
def _get_mask_targets(proposals, gt_masks, mask_size):
"""Get mask targets, mask target is intersection between proposal and ground
truth mask, input coord format is (left, top, right, bottom).
Args:
proposals(Tensor): [num_rois, 4]
gt_masks(Tensor): [N, num_rois, H, W]
mask_size(tuple): (mask_height, mask_width)
Returns:
mask_targets(Tensor): [num_rois, mask_height, mask_width]
"""
num_rois = proposals.size(0)
img_height = gt_masks.size(1)
img_width = gt_masks.size(2)
mask_targets = gt_masks.new(num_rois, mask_size[0], mask_size[1]).zero_()
for i in range(num_rois):
x1, y1, x2, y2 = proposals[i, :]
x1 = int(max(min(img_width - 1, x1), 0))
x2 = int(max(min(img_width - 1, x2), 0))
y1 = int(max(min(img_height - 1, y1), 0))
y2 = int(max(min(img_height - 1, y2), 0))
mask = Variable(gt_masks[i, y1:y2 + 1, x1:x2 + 1], requires_grad=False)
# mask.unsqueeze(0) work around F.adaptive_avg_pool2d silent crash.
mask_resize = F.adaptive_avg_pool2d(mask.unsqueeze(0), output_size=mask_size)
mask_targets[i, :, :] = mask_resize.data[0]
return mask_targets
@staticmethod
def _calc_head_loss(cls_prob, bbox_reg, mask_prob, cls_targets, bbox_targets, mask_targets):
""" Calculate Mask R-CNN loss.
Args:
cls_prob(Variable): [(NxS), num_classes], classification predict probability.
bbox_reg(Variable): [(NxS), num_classes, (dx, dy, dw, dh)], bounding box regression.
mask_prob(Variable): [(NxS), num_classes, H, W], mask prediction.
cls_targets(Variable): [(NxS)], classification targets.
bbox_targets(Variable): [(NxPositive), (dx, dy, dw, dh)], bounding box regression targets.
mask_targets(Variable): [(NxPositive), H, W], mask targets.
Returns:
maskrcnn_loss: Total loss of Mask R-CNN predict heads.
Notes: In above, S: number of sampled rois feed to prediction heads.
"""
# calculate classification head loss.
cls_loss = F.nll_loss(cls_prob, cls_targets)
# calculate bbox regression and mask head loss.
bbox_loss, mask_loss = 0, 0
num_foreground = bbox_targets.size(0)
for i in range(num_foreground):
cls_id = int(cls_targets[i])
# Only corresponding class prediction contribute to bbox and mask loss.
bbox_loss += F.smooth_l1_loss(bbox_reg[i, cls_id, :], bbox_targets[i, :])
mask_loss += F.binary_cross_entropy(mask_prob[i, cls_id, :, :], mask_targets[i, :, :])
bbox_loss /= num_foreground
mask_loss /= num_foreground
head_loss = cls_loss + bbox_loss + mask_loss
return head_loss
| null |
maskrcnn.py
|
maskrcnn.py
|
py
| 26,912 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.nn.Module",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "backbones.resnet_fpn.ResNetFPN",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "backbones.resnet.ResNet",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "proposal.rpn.RPN",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pooling.roi_align.RoiAlign",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pooling.roi_align.RoiAlign",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "heads.cls_bbox.ClsBBoxHead_fc",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "heads.mask.MaskHead",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "tools.detect_utils.calc_iou",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "torch.nonzero",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "torch.nonzero",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "tools.detect_utils.bbox_corner2center",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "proposal.rpn",
"line_number": 297,
"usage_type": "argument"
},
{
"api_name": "torch.exp",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "tools.detect_utils.bbox_center2corner",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "torch.log",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "torch.sqrt",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "torch.floor",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "torch.clamp",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "torch.sort",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "libs.nms.pth_nms.pth_nms",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.adaptive_avg_pool2d",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 461,
"usage_type": "name"
},
{
"api_name": "tools.detect_utils.bbox_corner2center",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "tools.detect_utils.bbox_corner2center",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "torch.log",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 490,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 517,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.adaptive_avg_pool2d",
"line_number": 519,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 519,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.nll_loss",
"line_number": 544,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 544,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.smooth_l1_loss",
"line_number": 552,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 552,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.binary_cross_entropy",
"line_number": 553,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 553,
"usage_type": "name"
}
] |
127277110
|
import json
def total_score(j):
sum=0
parsed=json.loads(j)
for key, value in parsed.items():
sum+=value
data={}
data["total_score"]=sum
return json.dumps(data)
#return data ? why not?
print(total_score('{"john": 10, "steve": 31}'))
| null |
S17week2/JSON.py
|
JSON.py
|
py
| 273 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.loads",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 9,
"usage_type": "call"
}
] |
39253422
|
from bs4 import BeautifulSoup
import urllib.request
import re
def crawl(start_page, distance, action):
visited = set()
queue = [[start_page, distance]]
def getLinks(url):
try:
html_page = urllib.request.urlopen(url)
except urllib.error.HTTPError as e:
return []
soup = BeautifulSoup(html_page, features="lxml")
links = []
for link in soup.findAll('a', attrs={'href': re.compile("^http://")}):
links.append(link.get('href'))
return links
def bfs():
while queue:
page = queue.pop(0)
if page[0] not in visited:
yield (page[0], action(page[0]))
visited.add(page[0])
links = getLinks(page[0])
if page[1] > 0:
for i in links:
queue.append([i, page[1]-1])
return bfs()
def findAll(page):
try:
html = urllib.request.urlopen(page).read()
except urllib.error.HTTPError as e:
return []
soup = BeautifulSoup(html, features="lxml", from_encoding="UTF-8")
for script in soup(["script", "style"]):
script.extract() # rip it out
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = '\n'.join(chunk for chunk in chunks if chunk)
return re.findall(r"([^.]*?Python[^.]*\.)",text)
it = crawl("https://github.com/", 4, findAll)
for i in it:
print(i)
| null |
zima2019/RozszerzonyPython/Lista6/zadanie1.py
|
zadanie1.py
|
py
| 1,550 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "urllib.request.request.urlopen",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "urllib.request.error",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "urllib.request.error",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 45,
"usage_type": "call"
}
] |
509837062
|
import os
import sys
import json
import pydoc
import datetime
import psycopg2
from tabulate import tabulate
from argparse import ArgumentParser
REPORT = """\n\n\n\n\n\n==================================ORDER==================================
order-id: {orderId}
billAmount: {billAmount}
status: {status}
timestamp: {ts}
customer:
name: {cname}
phone: {cphone}
address: {address}
{table}
===================================BILL===================================
billAmount: {billAmount}
===================================END===================================
\n\n\n\n\n\n
"""
def process(orders, orderId):
report = []
for order in orders:
print('[+] generating report for {} orderid'.format(order['order_id']))
orderId = order['order_id']
orderAmount = order['order_amount']
orderStatus = order['order_status']
timestamp = order['ts']
name = order['user']['name']
phone = order['user']['phone']
address = order['user']['address']
table = tabulate(
order['products'],
# headers=['Sr No.', 'Product Name', 'Quantity', 'Price', 'Total']
headers=['Sr No.', 'Product Name', 'Quantity']
)
x1 = REPORT.format(
orderId=orderId,
billAmount=orderAmount,
status=orderStatus,
ts=timestamp,
cname=name,
cphone=phone,
address=address,
table=table
)
report.append(x1)
REP = '\n'.join(x for x in report)
filename = '{0}-orders-{1}.txt'.format(datetime.date.today().strftime('%d%m%Y'), orderId)
with open(os.path.join('reports', filename), 'w') as rep_file:
rep_file.write(REP)
print('[+] report written to file {}'.format(filename))
def main(orderId):
connection = None
cursor = None
ORDERS = []
try:
connection = psycopg2.connect("dbname=Ekasta host=db.ekastaplatform.com user=ekasta password=beacon5791 port=5432")
cursor = connection.cursor()
cursor.execute('''SELECT * FROM orders WHERE order_id = {}'''.format(orderId))
orders = cursor.fetchall()
print('[+] found {} order(s)'.format(len(orders)))
for order in orders:
temp_order = {}
orderDBId = order[0]
userId = order[9]
cursor.execute('''SELECT * FROM orderitems JOIN barcodemasters ON orderitems."barcodemasterId" = barcodemasters.id WHERE "orderId" = {0};'''.format(orderDBId))
products = cursor.fetchall()
temp_order['order_id'] = order[1]
temp_order['order_amount'] = order[2]
temp_order['order_status'] = order[3]
temp_order['products'] = []
temp_order['ts'] = order[6]
print('[+] processing {} orderid'.format(order[1]))
for index, product in enumerate(products):
print('[+]\tadd {0} to order with id {1}'.format(product[9], order[1]))
product_O = []
product_O.append(index + 1)
product_O.append(product[9])
product_O.append(product[7])
# product_O.append(product[17])
# product_O.append(product[7] * product[17])
temp_order['products'].append(product_O)
cursor.execute('''SELECT * FROM users WHERE id = {0};'''.format(userId))
user = cursor.fetchone()
temp_order['user'] = {}
temp_order['user']['name'] = user[2]
temp_order['user']['phone'] = user[4]
temp_order['user']['address'] = user[6]
ORDERS.append(temp_order)
except Exception as e:
print(e)
finally:
if connection is not None:
cursor.close()
connection.close()
process(ORDERS)
if __name__ == '__main__':
parse = ArgumentParser()
parse.add_argument('--order-id', type=str, help='Order Id for which to fetch Info')
main()
| null |
orderwise.py
|
orderwise.py
|
py
| 3,446 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tabulate.tabulate",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "psycopg2.connect",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 115,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.