hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a96d57b61d8688819ccbbbd9291ae22fdd80039b | 566 | py | Python | sqlite_framework/sql/item/constraint/table/base.py | alvarogzp/python-sqlite-framework | 29db97a64f95cfe13eb7bae1d00b624b5a37b152 | [
"Apache-2.0"
]
| 1 | 2020-08-29T12:42:11.000Z | 2020-08-29T12:42:11.000Z | sqlite_framework/sql/item/constraint/table/base.py | alvarogzp/python-sqlite-framework | 29db97a64f95cfe13eb7bae1d00b624b5a37b152 | [
"Apache-2.0"
]
| 4 | 2018-05-07T19:36:30.000Z | 2018-05-29T05:18:13.000Z | sqlite_framework/sql/item/constraint/table/base.py | alvarogzp/python-sqlite-framework | 29db97a64f95cfe13eb7bae1d00b624b5a37b152 | [
"Apache-2.0"
]
| null | null | null | from sqlite_framework.sql.item.base import SqlItem
from sqlite_framework.sql.item.column import Column
class TableConstraint(SqlItem):
def str(self):
raise NotImplementedError()
class ColumnListTableConstraint(TableConstraint):
def __init__(self, constraint_type: str, *columns: Column):
super().__init__()
self.type = constraint_type
self.columns = columns
def str(self):
columns = ", ".join(column.name for column in self.columns)
return "{type} ({columns})".format(type=self.type, columns=columns)
| 29.789474 | 75 | 0.701413 | 457 | 0.80742 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.042403 |
a97015a85173cf78e85bed10e73b68dc69502a9d | 78 | py | Python | pydemic/report/__init__.py | GCES-Pydemic/pydemic | f221aa16e6a32ed1303fa11ebf8a357643f683d5 | [
"MIT"
]
| null | null | null | pydemic/report/__init__.py | GCES-Pydemic/pydemic | f221aa16e6a32ed1303fa11ebf8a357643f683d5 | [
"MIT"
]
| null | null | null | pydemic/report/__init__.py | GCES-Pydemic/pydemic | f221aa16e6a32ed1303fa11ebf8a357643f683d5 | [
"MIT"
]
| null | null | null | from .report_group import GroupReport
from .report_single import SingleReport
| 26 | 39 | 0.871795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a97282eeac0597449d543922ed87821479844a39 | 724 | py | Python | src/baboon_tracking/mixins/history_frames_mixin.py | radioactivebean0/baboon-tracking | 062351c514073aac8e1207b8b46ca89ece987928 | [
"MIT"
]
| 6 | 2019-07-15T19:10:59.000Z | 2022-02-01T04:25:26.000Z | src/baboon_tracking/mixins/history_frames_mixin.py | radioactivebean0/baboon-tracking | 062351c514073aac8e1207b8b46ca89ece987928 | [
"MIT"
]
| 86 | 2019-07-02T17:59:46.000Z | 2022-02-01T23:23:08.000Z | src/baboon_tracking/mixins/history_frames_mixin.py | radioactivebean0/baboon-tracking | 062351c514073aac8e1207b8b46ca89ece987928 | [
"MIT"
]
| 7 | 2019-10-16T12:58:21.000Z | 2022-03-08T00:31:32.000Z | """
Mixin for returning history frames.
"""
from collections import deque
from typing import Deque
from rx.core.typing import Observable
from baboon_tracking.models.frame import Frame
class HistoryFramesMixin:
"""
Mixin for returning history frames.
"""
def __init__(self, history_frame_count: int, history_frame_popped: Observable):
self.history_frames: Deque[Frame] = deque([])
self.history_frame_popped = history_frame_popped
self._history_frame_count = history_frame_count
def is_full(self):
"""
Returns true if the history frame deque is full.
"""
return len(self.history_frames) >= self._history_frame_count
| 25.857143 | 84 | 0.68232 | 523 | 0.722376 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.237569 |
a972d95469a20ffc4d590103acea6ae8f6b2b426 | 1,746 | py | Python | src/elm_doc/tasks/html.py | brilliantorg/elm-doc | 69ddbcd57aee3da6283c2497d735951d95b85426 | [
"BSD-3-Clause"
]
| 29 | 2017-02-01T11:58:44.000Z | 2021-05-21T15:18:33.000Z | src/elm_doc/tasks/html.py | brilliantorg/elm-doc | 69ddbcd57aee3da6283c2497d735951d95b85426 | [
"BSD-3-Clause"
]
| 143 | 2017-07-26T17:34:44.000Z | 2022-03-01T18:01:43.000Z | src/elm_doc/tasks/html.py | brilliantorg/elm-doc | 69ddbcd57aee3da6283c2497d735951d95b85426 | [
"BSD-3-Clause"
]
| 7 | 2018-03-09T10:04:45.000Z | 2021-10-19T19:17:40.000Z | import json
import html
from pathlib import Path
from elm_doc.utils import Namespace
# Note: title tag is omitted, as the Elm app sets the title after
# it's initialized.
PAGE_TEMPLATE = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<link rel="shortcut icon" size="16x16, 32x32, 48x48, 64x64, 128x128, 256x256" href="{mount_point}/assets/favicon.ico">
<link rel="stylesheet" href="{mount_point}/assets/style.css">
<script src="{mount_point}/artifacts/elm.js"></script>
<script src="{mount_point}/assets/highlight/highlight.pack.js"></script>
<link rel="stylesheet" href="{mount_point}/assets/highlight/styles/default.css">
</head>
<body>
<script>
try {{
const fontsLink = document.createElement("link");
fontsLink.href = "{mount_point}/assets/fonts/" + ((navigator.userAgent.indexOf("Macintosh") > -1) ? "_hints_off.css" : "_hints_on.css");
fontsLink.rel = "stylesheet";
document.head.appendChild(fontsLink);
}} catch(e) {{
// loading the font is not essential; log the error and move on
console.log(e);
}}
Elm.Main.init({init});
</script>
</body>
</html>
''' # noqa: E501
def _render(mount_point: str = ''):
if mount_point and mount_point[-1] == '/':
mount_point = mount_point[:-1]
init = {
'flags': {
'mountedAt': mount_point,
},
}
return PAGE_TEMPLATE.format(
mount_point=html.escape(mount_point),
init=json.dumps(init))
class actions(Namespace):
def write(output_path: Path, mount_point: str = ''):
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(str(output_path), 'w') as f:
f.write(_render(mount_point=mount_point))
| 30.103448 | 142 | 0.643757 | 245 | 0.140321 | 0 | 0 | 0 | 0 | 0 | 0 | 1,096 | 0.627721 |
a9747260a42549b174eafc1943184e3614f86276 | 1,031 | py | Python | pyPico/2.传感器实验/6.水位传感器/main.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
]
| 73 | 2020-05-02T13:48:27.000Z | 2022-03-26T13:15:10.000Z | pyPico/2.传感器实验/6.水位传感器/main.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
]
| null | null | null | pyPico/2.传感器实验/6.水位传感器/main.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
]
| 50 | 2020-05-15T13:57:28.000Z | 2022-03-30T14:03:33.000Z | '''
实验名称:水位传感器
版本:v1.0
日期:2021.1
作者:01Studio 【www.01Studio.org】
说明:通过水位传感器对水位测量并显示。
'''
#导入相关模块
import time
from machine import Pin,SoftI2C,ADC
from ssd1306 import SSD1306_I2C
#初始化oled
i2c = SoftI2C(scl=Pin(10), sda=Pin(11)) #软件I2C初始化:scl--> 10, sda --> 11
oled = SSD1306_I2C(128, 64, i2c, addr=0x3c) #OLED显示屏初始化:128*64分辨率,OLED的I2C地址是0x3c
#初始化ADC1,Pin=27
Water_level = ADC(1)
while True:
oled.fill(0) # 清屏显示黑色背景
oled.text('01Studio', 0, 0) # 首行显示01Studio
oled.text('Water Level test', 0, 15) # 次行显示实验名称
value=Water_level.read_u16() #获取ADC数值
#显示数值
oled.text(str(value)+' (65535)',0,40)
#计算电压值,获得的数据0-4095相当于0-3V,('%.2f'%)表示保留2位小数
oled.text(str('%.2f'%(value/65535*3.3))+' V',0,55)
#判断水位,分5档显示,0-4cm
if 0 <= value <=9602:
oled.text('0cm', 60, 55)
if 9602 < value <= 14403:
oled.text('1cm', 60, 55)
if 14403 < value <= 19204:
oled.text('2cm', 60, 55)
if 19204 < value <= 20804:
oled.text('3cm', 60, 55)
if 20804 < value:
oled.text('4cm', 60, 55)
oled.show()
time.sleep_ms(1000)
| 19.826923 | 81 | 0.651794 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 633 | 0.487298 |
a975a7568cae17acd3d7b4203c548d145cfe9d6a | 147 | py | Python | src/assisters/mytypes.py | khyreek/Codeforcescord-Bot | b47ce6b1bf779e6d3f904b3dcb2a811b74e90b17 | [
"Apache-2.0"
]
| null | null | null | src/assisters/mytypes.py | khyreek/Codeforcescord-Bot | b47ce6b1bf779e6d3f904b3dcb2a811b74e90b17 | [
"Apache-2.0"
]
| null | null | null | src/assisters/mytypes.py | khyreek/Codeforcescord-Bot | b47ce6b1bf779e6d3f904b3dcb2a811b74e90b17 | [
"Apache-2.0"
]
| null | null | null | from typing import Annotated
Problem = Annotated[str, "code cfs problems have, ex. 1348B"]
ProblemWidth = int
CFSSectionsData = tuple[int, ...]
| 18.375 | 61 | 0.734694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.238095 |
a9767449042e9e6827a47f70074761e36edb412a | 2,666 | py | Python | nb.py | corytaitchison/online-reviews | 10de9218137658269ba36849dfa7e8f643335d01 | [
"MIT"
]
| null | null | null | nb.py | corytaitchison/online-reviews | 10de9218137658269ba36849dfa7e8f643335d01 | [
"MIT"
]
| null | null | null | nb.py | corytaitchison/online-reviews | 10de9218137658269ba36849dfa7e8f643335d01 | [
"MIT"
]
| null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
###
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
# from nltk.stem import WordNetLemmatizer
from nltk.stem import PorterStemmer
###
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
###
from loadRandom import loadRandom2
ps = PorterStemmer()
# lemmatizer = WordNetLemmatizer()
def textProcess(text):
stopWords = set(stopwords.words('english'))
noPunc = word_tokenize(text)
return [ps.stem(word) for word in noPunc if word not in stopWords]
if __name__ == '__main__':
_seed = 123
_observations = 1e4
_subsets = [1, 2, 3, 4]
location = '/Users/caitchison/Documents/Yelp/yelp_dataset/restaurants_only.csv'
data = loadRandom2(location, _observations, seed=_seed, n=3778803).loc[:,
('text', 'useful', 'cool', 'funny', 'stars_x')]
# Calculate "interaction" score
data['interactions'] = data.useful + data.cool + data.funny
data = data[data['interactions'] >= _subsets[0]].dropna()
# Subset to get equal amounts of low-useful and high-useful
masks = [data.interactions == x for x in _subsets]
masks.append(data.interactions > _subsets[-1])
subsetSize = min([sum(mask) for mask in masks])
print("Creating subsets of size %i" % subsetSize)
newData = pd.DataFrame([])
for mask in masks:
df = data[mask].sample(n=subsetSize, random_state=_seed)
newData = newData.append(df)
data = newData
# Split interactions into quantiles (5)
data['group'] = pd.qcut(data['interactions'], q=5, labels=False)
print(pd.qcut(data['interactions'], q=5).cat.categories)
data.rename(columns={"stars_x": "stars"})
# Create a bag of words and convert the text to a sparse matrix
text = np.array(data['text'])
bow = CountVectorizer(analyzer=textProcess).fit(text)
print("Unique (Not Stop) Words:", len(bow.vocabulary_))
text = bow.transform(text)
# Split into features for testing and training at 30%
xTrain, xTest, yTrain, yTest = train_test_split(
text, np.array(data['group']), test_size=0.3, random_state=_seed)
# Train model (Multinomial Naive Bayes)
nb = MultinomialNB()
nb.fit(xTrain, yTrain)
# Test and Evaluate Model
preds = nb.predict(xTest)
print(confusion_matrix(yTest, preds))
print('\n')
print(classification_report(yTest, preds))
| 33.746835 | 122 | 0.686422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 667 | 0.250188 |
a9767fe03cd95cd1ee4f89e2a2b53d9dc840600a | 4,032 | py | Python | 1_Basics:warmup/2_TweetsFilter/twitter_exerciseB.py | ferreiro/Python_course | 73eb41e248d702741a4109a78b15ef8e5e6341f2 | [
"MIT"
]
| 2 | 2016-02-15T04:12:22.000Z | 2021-09-05T23:26:53.000Z | 1_Basics:warmup/2_TweetsFilter/twitter_exerciseB.py | ferreiro/Python-course | 73eb41e248d702741a4109a78b15ef8e5e6341f2 | [
"MIT"
]
| 10 | 2015-10-16T14:37:41.000Z | 2015-11-16T22:29:39.000Z | 2_TwitterAPI/twitter_exerciseB.py | ferreiro/Python | 9a0292d4898571fcef95546eec977d3138c7c23b | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
import csv
import json
outdirectory = "outputCSV/"
tweetsFile = "tweets.txt";
outputFile = "mostUsedHasgtags.csv";
tweetsList = [] # List that contains all the tweets readed from a file
hashtagTable = {}; # Dictionary with key= hashtags and value= frecuency for this hashtag
""" Returns a list of tweets readen from a file.
if there's a problem None object will be returned """
def loadTweets(inputFilename):
tweetsList = [] # returns a list of tweets
try:
openedFile = open(inputFilename, "r");
for line in openedFile:
tweet = json.loads(line);
if not tweet.has_key('delete'):
tweetsList.append(tweet);
# else: skip objects with "delete" key
openedFile.close(); # Close the file
except:
return None;
return tweetsList;
""" Creates a hasmap frecuency table where keys are the hashtags and
values are the number or appeareances of that hashtag in all the twetts.
Returns None if we coudn't create the Hashmap and a dictionary if everything works"""
def createHashtagFrecuencyTable(inputList):
if (not isinstance(inputList, list)):
return None; # exit function if the input object is not a list
try:
hashtagTable = {} # create empty dictionary
for tweet in inputList: # iterate all the tweets loaded in the list
for hashtag in tweet['entities']['hashtags']: # iterate all the hastags for each tweet
hashtagName = hashtag['text']; # Get a hashtag from the weet
if (hashtagName in hashtagTable):
hashtagTable[hashtagName] += 1; # Hashtag was previously added to the dictionary. Increase value by one
else:
hashtagTable[hashtagName] = 1; # Hashtag wasn't in the directionary. Add it with 1 value
except:
return None;
return hashtagTable
""" Returns a ordered hasmap, where the sorting was made taking into acccount
the value of each key on the hasmap and desdending order. """
def orderHashtagTable(dictionary):
if (not isinstance(dictionary, dict)):
return None; # exit function if the input object is not a dictionay
return sorted(dictionary.items(), key = lambda t:t[1], reverse=True); # INFO: https://www.youtube.com/watch?v=MGD_b2w_GU4
""" This function writes header and data to a .csv file pass by value
If the outputFile passed is not a .csv type. A failure will returned (False) """
def writeFile(headerList, data, outputFile):
success = True; # 0 means success | -1 = fails writing the file
if not outputFile.endswith(".csv"): # Check if the file has .csv format. If not. Will return false
print "Outpufile extension %s not valid" % (outputFile[-4:]) # Notify file output extension doesn't exist
return False; # output file format not valid
try:
outputFile = open(outputFile, 'w')
csvWriter = csv.writer(outputFile, delimiter=',', skipinitialspace=True, dialect='excel'); # http://stackoverflow.com/questions/29335614/python-csv-writer-leave-a-empty-line-at-the-end-of-the-file
csvWriter.writerow(headerList); # write the header to the csv file
for hashtag in data:
csvWriter.writerow(hashtag);
outputFile.close();
except:
return False; # Problems writting the file
return success;
tweetsList = loadTweets(tweetsFile); # Loading a list of twetts from a file
if (tweetsList != None): print "Loading twetts from file...[OK]"
else: "Loading twetts from file...[ERROR]"
hashtagTable = createHashtagFrecuencyTable(tweetsList);
if (hashtagTable != None): print "Creating hashtags table with its frecuencies...[OK]"
else: "Creating hashtags table with its frecuencies...[ERROR]"
orderedHashtagTable = orderHashtagTable(hashtagTable)
if (orderedHashtagTable != None): print "Ordering hashtags table in desdending order...[OK]"
else: "Ordering hashtags table in desdending order...[ERROR]"
headerList = ["hashtag", "frecuency"] # .csv header to write on the file
if (writeFile(headerList, orderedHashtagTable[:10], outputFile)): print "Writing csv file with top used hashtags...[OK]"
else: "Writing csv file with top used hashtags...[ERROR]"
| 33.322314 | 202 | 0.726438 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,241 | 0.55484 |
a976a9884a077db66cbb3f3d300b2d865662f9c4 | 4,346 | py | Python | docker-images/slack-prs/main.py | kiteco/kiteco-public | 74aaf5b9b0592153b92f7ed982d65e15eea885e3 | [
"BSD-3-Clause"
]
| 17 | 2022-01-10T11:01:50.000Z | 2022-03-25T03:21:08.000Z | docker-images/slack-prs/main.py | kiteco/kiteco-public | 74aaf5b9b0592153b92f7ed982d65e15eea885e3 | [
"BSD-3-Clause"
]
| 1 | 2022-01-13T14:28:47.000Z | 2022-01-13T14:28:47.000Z | docker-images/slack-prs/main.py | kiteco/kiteco-public | 74aaf5b9b0592153b92f7ed982d65e15eea885e3 | [
"BSD-3-Clause"
]
| 7 | 2022-01-07T03:58:10.000Z | 2022-03-24T07:38:20.000Z | import time
import json
import argparse
import websocket
import requests
import github
MY_NAME = 'kit' # should be able to avoid this in the future
TOKEN = 'XXXXXXX'
GITHUB_USERNAME_BY_SLACK_USERNAME = {
"adam": "adamsmith",
# XXXXXXX ...
}
channel_ids_by_name = {}
channel_names_by_id = {}
next_id = 0
def send(conn, channel, text):
global next_id, last_send_timestamp
channel_id = channel_ids_by_name.get(channel, channel)
payload = dict(
id=next_id,
type="message",
channel=channel_id,
text=text)
msg = json.dumps(payload)
conn.send(json.dumps(payload))
next_id += 1
last_send_timestamp = time.time()
def slack_escape(s):
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
def pr_queue_for(github_username, prs, comments_by_pr):
response = ""
for role, pr in github.prs_for(github_username, prs):
title, url, number = pr["title"], pr["html_url"], pr["number"]
comments = comments_by_pr.get(number, None)
if not comments:
comments = github.fetch_comments(number)
comments_by_pr[number] = comments
updates_by_user = github.summarize_updates_for(github_username, comments)
if len(updates_by_user) == 0:
update_msg = "no updates"
else:
update_msg = ", ".join("%d new from %s" % (count, user) for user, count in updates_by_user.items())
response += 'you are *%s* for %s %s: *%s*\n' % (role, url, slack_escape(title), update_msg)
if response == "":
return "you are not on any pull requests"
else:
return response
def updates_since(github_username, prs, comments_by_pr, since):
response = ""
for role, pr in github.prs_for(github_username, prs):
title, url, number = pr["title"], pr["html_url"], pr["number"]
comments = comments_by_pr.get(number, None)
if not comments:
comments = github.fetch_comments(number)
comments_by_pr[number] = comments
updates_by_user = github.summarize_updates_since(github_username, comments, since)
if updates_by_user:
status = ", ".join("%d new from %s" % (count, user) for user, count in updates_by_user.items())
response += '*%s* (%s) %s\n' % (status, url, slack_escape(title))
return response
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--daily", action="store_true")
parser.add_argument("--since", type=str)
args = parser.parse_args()
conn = None
user_ids_by_name = {}
user_names_by_id = {}
im_channel_by_user = {}
# Get messaging setup info
payload = dict(token=TOKEN)
r = requests.post('https://slack.com/api/rtm.start', data=payload).json()
if r["ok"]:
print("Successfully connected to messaging API")
else:
print("Error:\n" + str(r))
return
# Unacpk general info
dial_url = r["url"]
# Unpack channel info
users = r["users"]
for user in users:
name = user["name"]
id = user["id"]
user_ids_by_name[name] = id
user_names_by_id[id] = name
# Unpack channel info
channels = r["channels"]
for channel in channels:
name = channel["name"]
id = channel["id"]
channel_ids_by_name[name] = id
channel_names_by_id[id] = name
for im_channel in r["ims"]:
im_channel_by_user[user_names_by_id[im_channel["user"]]] = im_channel["id"]
# Open websocket
conn = websocket.create_connection(dial_url)
print("Connected")
# Send private messages
prs = github.fetch_prs()
comments = {}
if args.daily:
for user, ch in im_channel_by_user.items():
github_username = GITHUB_USERNAME_BY_SLACK_USERNAME.get(user, None)
if github_username:
print('Sending PM to %s...' % user)
msg = pr_queue_for(github_username, prs, comments)
print(msg.replace("\n", "\n "))
send(conn, ch, "Here is your daily pull request update:\n" + msg)
else:
since = 0
try:
if args.since:
# Read prev timestamp
with open(args.since) as f:
since = float(f.read().strip())
# Write new timestamp
with open(args.since, "w") as f:
f.write(str(time.time()))
except (IOError, ValueError):
pass
for user, ch in im_channel_by_user.items():
github_username = GITHUB_USERNAME_BY_SLACK_USERNAME.get(user, None)
if github_username:
msg = updates_since(github_username, prs, comments, since)
if msg:
print('Sending PM to %s...' % user)
print(msg)
send(conn, ch, msg)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| 24.834286 | 102 | 0.685228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 781 | 0.179705 |
a9775f738c3044fcff42b57c7ed49ac310db7479 | 656 | py | Python | commands/meme.py | EFFLUX110/efflux-discord-bot | fe382fc822f852efab8d4742daa756045a17bff3 | [
"MIT"
]
| null | null | null | commands/meme.py | EFFLUX110/efflux-discord-bot | fe382fc822f852efab8d4742daa756045a17bff3 | [
"MIT"
]
| 4 | 2022-02-03T18:24:32.000Z | 2022-02-03T19:24:51.000Z | commands/meme.py | EFFLUX110/efflux-discord-bot | fe382fc822f852efab8d4742daa756045a17bff3 | [
"MIT"
]
| 1 | 2022-02-03T18:12:44.000Z | 2022-02-03T18:12:44.000Z | import discord
import requests
from discord.ext import commands
class Meme(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def meme(self,ctx):
r=requests.get("https://memes.blademaker.tv/api?lang=en")
res=r.json()
title=res['title']
ups=res['ups']
downs=res['downs']
sub=res['subreddit']
m=discord.Embed(title=f"{title}\nsubreddit: {sub}")
m.set_image(url=res["image"])
m.set_footer(text=f"Requested by {ctx.author}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=m)
def setup(bot):
bot.add_cog(Meme(bot)) | 28.521739 | 87 | 0.617378 | 547 | 0.833841 | 0 | 0 | 460 | 0.70122 | 436 | 0.664634 | 134 | 0.204268 |
a977697bb7ffe10b5b5f5a391df5f58451adfd57 | 717 | py | Python | 45.py | brianfl/project-euler | 9f83a3c2da04fd0801a4a575081add665edccd5f | [
"MIT"
]
| null | null | null | 45.py | brianfl/project-euler | 9f83a3c2da04fd0801a4a575081add665edccd5f | [
"MIT"
]
| null | null | null | 45.py | brianfl/project-euler | 9f83a3c2da04fd0801a4a575081add665edccd5f | [
"MIT"
]
| null | null | null | target_num = 0
j = 0
while target_num == 0:
pent_ind = float((1 + ( 1 + 24*j*(2*j-1))**.5)/6)
tri_ind = float((-1 + (1+8*j*(2*j-1)))/2)
if pent_ind.is_integer() and tri_ind.is_integer():
num = j*(2*j-1)
if num != 1 and num != 40755:
target_num = num
j += 1
print(target_num) # 1533776805
"""
I had a brute force solution, but it was a bit over a minute.
By solving for the index values of pentagon and triangle numbers
in terms of the index value of the hexagon numbers,
the formulas in pent_ind and tri_ind pop out of the quadratic equation.
Basically those variables will only be integers if j is a valid index
for a pentagon number and triangle number as well.
""" | 29.875 | 71 | 0.661088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 393 | 0.548117 |
a97827ef5e7685a79286da4ad9d58d63d84d97d6 | 801 | py | Python | client.py | hani9/smartlockers | bd7a996be58769341367d58d5c80c70ad7bd1cb6 | [
"MIT"
]
| null | null | null | client.py | hani9/smartlockers | bd7a996be58769341367d58d5c80c70ad7bd1cb6 | [
"MIT"
]
| null | null | null | client.py | hani9/smartlockers | bd7a996be58769341367d58d5c80c70ad7bd1cb6 | [
"MIT"
]
| null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Importo les llibreries
import socket
import RPi.GPIO as GPIO
import time
# Faig la configuració bàsica del GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT) # Només utilitzo el 18. Es podria fer un bucle per activar-ne diversos alhora.
# Indico la IP del servidor i el port de comunicació
host = "PLACE_YOUR_SERVER_IP_HERE"
port = 12345
# Inicio un bucle infinit
while 1:
s = socket.socket() # Creo el socket
s.connect((host, port)) # Connecto al servidor
data = s.recv(1024) # Rebo dades
GPIO.output(int(data), GPIO.HIGH) # La dada rebuda indica el pin del gpio que es farà UP
time.sleep(1) # S'espera 1 segon
GPIO.output(int(data), GPIO.LOW) # Fa un DOWN del pin
s.close() # Tanca la connexió
| 26.7 | 103 | 0.705368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 451 | 0.55886 |
a978a3e063f71ae417a8f86e87e70e36b033503d | 16,820 | py | Python | src/mlpro/rl/pool/envmodels/mlp_robotinhtm.py | fhswf/MLPro | e944b69bed9c2d5548677711270e4a4fe868aea9 | [
"Apache-2.0"
]
| 5 | 2022-01-31T15:52:19.000Z | 2022-03-21T18:34:27.000Z | src/mlpro/rl/pool/envmodels/mlp_robotinhtm.py | fhswf/MLPro | e944b69bed9c2d5548677711270e4a4fe868aea9 | [
"Apache-2.0"
]
| 61 | 2021-12-17T13:03:59.000Z | 2022-03-31T10:24:37.000Z | src/mlpro/rl/pool/envmodels/mlp_robotinhtm.py | fhswf/MLPro | e944b69bed9c2d5548677711270e4a4fe868aea9 | [
"Apache-2.0"
]
| null | null | null | ## -------------------------------------------------------------------------------------------------
## -- Project : MLPro - A Synoptic Framework for Standardized Machine Learning Tasks
## -- Package : mlpro.rl.envmodels
## -- Module : mlp_robotinhtm
## -------------------------------------------------------------------------------------------------
## -- History :
## -- yyyy-mm-dd Ver. Auth. Description
## -- 2021-12-17 0.0.0 MRD Creation
## -- 2021-12-17 1.0.0 MRD Released first version
## -- 2021-12-20 1.0.1 DA Replaced 'done' by 'success'
## -- 2021-12-21 1.0.2 DA Class MLPEnvMdel: renamed method reset() to _reset()
## -- 2022-01-02 2.0.0 MRD Refactoring due to the changes on afct pool on
## -- TorchAFctTrans
## -- 2022-02-25 2.0.1 SY Refactoring due to auto generated ID in class Dimension
## -------------------------------------------------------------------------------------------------
"""
Ver. 2.0.1 (2022-02-25)
This module provides Environment Model based on MLP Neural Network for
robotinhtm environment.
"""
import torch
import transformations
from mlpro.rl.models import *
from mlpro.rl.pool.envs.robotinhtm import RobotArm3D
from mlpro.rl.pool.envs.robotinhtm import RobotHTM
from mlpro.sl.pool.afct.afctrans_pytorch import TorchAFctTrans
from torch.utils.data.sampler import SubsetRandomSampler
from collections import deque
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
class RobotMLPModel(torch.nn.Module):
def __init__(self, n_joint, timeStep):
super(RobotMLPModel, self).__init__()
self.n_joint = n_joint
self.timeStep = timeStep
self.hidden = 128
init_ = lambda m: init(m, torch.nn.init.orthogonal_, lambda x: torch.nn.init.
constant_(x, 0), np.sqrt(2))
self.model1 = torch.nn.Sequential(
init_(torch.nn.Linear(self.n_joint,self.hidden)),
torch.nn.Tanh(),
init_(torch.nn.Linear(self.hidden,self.hidden)),
torch.nn.Tanh(),
init_(torch.nn.Linear(self.hidden,self.hidden)),
torch.nn.Tanh(),
init_(torch.nn.Linear(self.hidden,7*(self.n_joint+1))),
torch.nn.Tanh()
)
def forward(self, I):
BatchSize=I.shape[0]
newI = I.reshape(BatchSize,2,self.n_joint) * torch.cat([torch.Tensor([self.timeStep]).repeat(1,self.n_joint), torch.ones(1,self.n_joint)])
newI = torch.sum(newI,dim=1)
out2 = self.model1(newI)
out2 = out2.reshape(BatchSize,self.n_joint+1,7)
return out2
class IOElement(BufferElement):
def __init__(self, p_input: torch.Tensor, p_output: torch.Tensor):
super().__init__({"input": p_input, "output": p_output})
# Buffer
class MyOwnBuffer(Buffer, torch.utils.data.Dataset):
def __init__(self, p_size=1):
Buffer.__init__(self, p_size=p_size)
self._internal_counter = 0
def add_element(self, p_elem: BufferElement):
Buffer.add_element(self, p_elem)
self._internal_counter += 1
def get_internal_counter(self):
return self._internal_counter
def __getitem__(self,idx):
return self._data_buffer["input"][idx], self._data_buffer["output"][idx]
class RobothtmAFct(TorchAFctTrans):
C_NAME = "Robothtm Adaptive Function"
C_BUFFER_CLS = MyOwnBuffer
def _setup_model(self):
self.joint_num = self._output_space.get_num_dim() - 6
self.net_model = RobotMLPModel(self.joint_num, 0.01)
self.optimizer = torch.optim.Adam(self.net_model.parameters(), lr=3e-4)
self.loss_dyn = torch.nn.MSELoss()
self.train_model = True
self.input_temp = None
self.sim_env = RobotArm3D()
joints = []
jointType = []
vectLinkLength = [[0, 0, 0], [0, 0, 0]]
jointType.append("rz")
for joint in range(self.joint_num - 1):
vectLinkLength.append([0, 0.7, 0])
jointType.append("rx")
jointType.append("f")
for x in range(len(jointType)):
vectorLink = dict(x=vectLinkLength[x][0], y=vectLinkLength[x][1], z=vectLinkLength[x][2])
joint = dict(
Joint_name="Joint %d" % x,
Joint_type=jointType[x],
Vector_link_length=vectorLink,
)
joints.append(joint)
for robo in joints:
self.sim_env.add_link_joint(
lvector=torch.Tensor(
[
[
robo["Vector_link_length"]["x"],
robo["Vector_link_length"]["y"],
robo["Vector_link_length"]["z"],
]
]
),
jointAxis=robo["Joint_type"],
thetaInit=torch.Tensor([np.radians(0)]),
)
self.sim_env.update_joint_coords()
def _input_preproc(self, p_input: torch.Tensor) -> torch.Tensor:
input = torch.cat([p_input[0][6+self.joint_num:], p_input[0][6:6+self.joint_num]])
input = input.reshape(1,self.joint_num*2)
self.input_temp = p_input[0][:3].reshape(1,3)
return input
def _output_postproc(self, p_output: torch.Tensor) -> torch.Tensor:
angles = torch.Tensor([])
thets = torch.zeros(3)
for idx in range(self.joint_num):
angle = torch.Tensor(transformations.euler_from_quaternion(p_output[-1][idx][3:].detach().numpy(), axes="rxyz")) - thets
thets = torch.Tensor(transformations.euler_from_quaternion(p_output[-1][idx][3:].detach().numpy(), axes="rxyz"))
angles = torch.cat([angles, torch.norm(angle).reshape(1, 1)], dim=1)
output = torch.cat([self.input_temp, p_output[-1][-1][:3].reshape(1,3)], dim=1)
output = torch.cat([output, angles], dim=1)
return output
def _adapt(self, p_input: Element, p_output: Element) -> bool:
model_input = deque(p_input.get_values()[6:])
model_input.rotate(self.joint_num)
model_input = torch.Tensor([list(model_input)])
self.sim_env.set_theta(torch.Tensor([p_output.get_values()[6 : 6 + self.joint_num]]))
self.sim_env.update_joint_coords()
model_output = self.sim_env.convert_to_quaternion().reshape(1,self.joint_num+1,7)
self._add_buffer(IOElement(model_input, model_output))
if self._buffer.get_internal_counter() % 100 != 0:
return False
# Divide Test and Train
if self.train_model:
dataset_size = len(self._buffer)
indices = list(range(dataset_size))
split = int(np.floor(0.3 * dataset_size))
np.random.seed(random.randint(1,1000))
np.random.shuffle(indices)
train_indices, test_indices = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
trainer = torch.utils.data.DataLoader(self._buffer, batch_size=100, sampler=train_sampler)
tester = torch.utils.data.DataLoader(self._buffer, batch_size=100, sampler=test_sampler)
# Training
self.net_model.train()
for i, (In, Label) in enumerate(trainer):
outputs = self.net_model(In)
loss = self.loss_dyn(outputs, Label)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
test_loss = 0
self.net_model.eval()
for i, (In, Label) in enumerate(tester):
outputs = self.net_model(In)
loss = self.loss_dyn(outputs, Label)
test_loss += loss.item()
if test_loss/len(tester) < 5e-9:
self.train_model = False
return True
def _add_buffer(self, p_buffer_element: IOElement):
self._buffer.add_element(p_buffer_element)
class MLPEnvModel(EnvModel, Mode):
C_NAME = "HTM Env Model"
def __init__(
self,
p_num_joints=4,
p_target_mode="Random",
p_ada=True,
p_logging=False,
):
# Define all the adaptive function here
self.RobotArm1 = RobotArm3D()
roboconf = {}
roboconf["Joints"] = []
jointType = []
vectLinkLength = [[0, 0, 0], [0, 0, 0]]
jointType.append("rz")
for joint in range(p_num_joints - 1):
vectLinkLength.append([0, 0.7, 0])
jointType.append("rx")
jointType.append("f")
for x in range(len(jointType)):
vectorLink = dict(x=vectLinkLength[x][0], y=vectLinkLength[x][1], z=vectLinkLength[x][2])
joint = dict(
Joint_name="Joint %d" % x,
Joint_type=jointType[x],
Vector_link_length=vectorLink,
)
roboconf["Joints"].append(joint)
roboconf["Target_mode"] = p_target_mode
roboconf["Update_rate"] = 0.01
for robo in roboconf["Joints"]:
self.RobotArm1.add_link_joint(
lvector=torch.Tensor(
[
[
robo["Vector_link_length"]["x"],
robo["Vector_link_length"]["y"],
robo["Vector_link_length"]["z"],
]
]
),
jointAxis=robo["Joint_type"],
thetaInit=torch.Tensor([np.radians(0)]),
)
self.RobotArm1.update_joint_coords()
self.jointangles = self.RobotArm1.thetas
self.dt = roboconf["Update_rate"]
self.modes = roboconf["Target_mode"]
self.target = None
self.init_distance = None
self.num_joint = self.RobotArm1.get_num_joint()
self.reach = torch.norm(torch.Tensor([[0.0, 0.0, 0.0]]) - self.RobotArm1.joints[:3, [-1]].reshape(1, 3))
# Setup space
# 1 Setup state space
obs_space = ESpace()
obs_space.add_dim(Dimension("Tx", "Targetx", "", "m", "m", p_boundaries=[-np.inf, np.inf]))
obs_space.add_dim(Dimension("Ty", "Targety", "", "m", "m", p_boundaries=[-np.inf, np.inf]))
obs_space.add_dim(Dimension("Tz", "Targetz", "", "m", "m", p_boundaries=[-np.inf, np.inf]))
obs_space.add_dim(Dimension("Px", "Targetx", "", "m", "m", p_boundaries=[-np.inf, np.inf]))
obs_space.add_dim(Dimension("Py", "Targety", "", "m", "m", p_boundaries=[-np.inf, np.inf]))
obs_space.add_dim(Dimension("Pz", "Targetz", "", "m", "m", p_boundaries=[-np.inf, np.inf]))
for idx in range(self.num_joint):
obs_space.add_dim(
Dimension("J%i" % (idx), "Joint%i" % (idx), "", "deg", "deg", p_boundaries=[-np.inf, np.inf])
)
# 2 Setup action space
action_space = ESpace()
for idx in range(self.num_joint):
action_space.add_dim(
Dimension(
"A%i" % (idx),
"AV%i" % (idx),
"",
"rad/sec",
"\frac{rad}{sec}",
p_boundaries=[-np.pi, np.pi],
)
)
# Setup Adaptive Function
# HTM Function Here
afct_strans = AFctSTrans(
RobothtmAFct,
p_state_space=obs_space,
p_action_space=action_space,
p_threshold=-1,
p_buffer_size=10000,
p_ada=p_ada,
p_logging=p_logging,
)
EnvModel.__init__(
self,
p_observation_space=obs_space,
p_action_space=action_space,
p_latency=timedelta(seconds=self.dt),
p_afct_strans=afct_strans,
p_afct_reward=None,
p_afct_success=None,
p_afct_broken=None,
p_ada=p_ada,
p_logging=p_logging,
)
Mode.__init__(self, p_mode=Mode.C_MODE_SIM, p_logging=p_logging)
if self.modes == "random":
num = random.random()
if num < 0.2:
self.target = torch.Tensor([[0.5, 0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
elif num < 0.4:
self.target = torch.Tensor([[0.0, 0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
elif num < 0.6:
self.target = torch.Tensor([[-0.5, 0.0, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
elif num < 0.8:
self.target = torch.Tensor([[0.0, -0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
else:
self.target = torch.Tensor([[-0.5, -0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
else:
self.target = torch.Tensor([[0.5, 0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
self.reset()
## -------------------------------------------------------------------------------------------------
def _compute_success(self, p_state: State = None) -> bool:
# disterror = np.linalg.norm(p_state.get_values()[:3] - p_state.get_values()[3:6])
disterror = np.linalg.norm(np.array(p_state.get_values())[:3] - np.array(p_state.get_values())[3:6])
if disterror <= 0.1:
self._state.set_terminal(True)
return True
else:
return False
## -------------------------------------------------------------------------------------------------
def _compute_broken(self, p_state: State) -> bool:
return False
## -------------------------------------------------------------------------------------------------
def _compute_reward(self, p_state_old: State, p_state_new: State) -> Reward:
reward = Reward(self.C_REWARD_TYPE)
# disterror = np.linalg.norm(p_state_new.get_values()[:3] - p_state_new.get_values()[3:6])
disterror = np.linalg.norm(np.array(p_state_new.get_values())[:3] - np.array(p_state_new.get_values())[3:6])
ratio = disterror / self.init_distance.item()
rew = -np.ones(1) * ratio
rew = rew - 10e-2
if disterror <= 0.1:
rew = rew + 1
rew = rew.astype("float64")
reward.set_overall_reward(rew)
return reward
def set_theta(self, theta):
self.RobotArm1.thetas = theta.reshape(self.num_joint)
self.RobotArm1.update_joint_coords()
self.jointangles = self.RobotArm1.thetas
def _reset(self, p_seed=None) -> None:
self.set_random_seed(p_seed)
theta = torch.zeros(self.RobotArm1.get_num_joint())
self.RobotArm1.set_theta(theta)
self.RobotArm1.update_joint_coords()
self.jointangles = self.RobotArm1.thetas
if self.modes == "random":
num = random.random()
if num < 0.2:
self.target = torch.Tensor([[0.5, 0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
elif num < 0.4:
self.target = torch.Tensor([[0.0, 0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
elif num < 0.6:
self.target = torch.Tensor([[-0.5, 0.0, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
elif num < 0.8:
self.target = torch.Tensor([[0.0, -0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
else:
self.target = torch.Tensor([[-0.5, -0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
obs = torch.cat(
[
self.target,
self.RobotArm1.joints[:3, [-1]].reshape(1, 3),
self.RobotArm1.thetas.reshape(1, self.num_joint),
],
dim=1,
)
obs = obs.cpu().flatten().tolist()
self._state = State(self._state_space)
self._state.set_values(obs)
| 39.299065 | 146 | 0.542866 | 15,189 | 0.903032 | 0 | 0 | 0 | 0 | 0 | 0 | 2,363 | 0.140488 |
a979eac6a7daaac0fe50d966818c9860d5136601 | 3,474 | py | Python | pyxlpr/data/icdar/__init__.py | XLPRUtils/pyUtils | 3a62c14b0658ad3c24d83f953ee0d88530b02b23 | [
"Apache-2.0"
]
| 15 | 2020-06-09T07:03:07.000Z | 2022-02-25T06:59:34.000Z | pyxlpr/data/icdar/__init__.py | XLPRUtils/pyUtils | 3a62c14b0658ad3c24d83f953ee0d88530b02b23 | [
"Apache-2.0"
]
| 5 | 2020-08-08T07:11:21.000Z | 2020-08-08T07:11:24.000Z | pyxlpr/data/icdar/__init__.py | XLPRUtils/pyUtils | 3a62c14b0658ad3c24d83f953ee0d88530b02b23 | [
"Apache-2.0"
]
| 2 | 2020-06-09T07:03:26.000Z | 2020-12-31T06:50:37.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author : 陈坤泽
# @Email : [email protected]
# @Date : 2021/02/22 10:29
""" 对icdar2013的三种测评方法的接口封装
官方原版处理两个 zip 文件,这里扩展支持目录、内存对象
"""
import re
from pyxllib.xl import File, Dir, shorten
class IcdarEval:
"""
>>> gt = {'1.abc': [[158, 128, 411, 181], [443, 128, 450, 169]], '2': [[176, 189, 456, 274]]}
>>> dt = {'1.abc': [[158, 128, 411, 185], [443, 120, 450, 169]]}
>>> ie = IcdarEval(gt, dt) # 除了内存格式,也兼容原来的zip文件、目录初始化方法
>>> ie.icdar2013()
{'precision': 1.0, 'recall': 0.6667, 'hmean': 0.8}
>>> ie.deteval()
{'precision': 1.0, 'recall': 0.6667, 'hmean': 0.8}
>>> ie.iou()
{'precision': 1.0, 'recall': 0.6667, 'hmean': 0.8, 'AP': 0}
"""
def __init__(self, gt, dt):
""" 输入gt和dt文件
官方原版是支持 【zip文件】,必须要遵循官方原版所有的规则
压缩包里的文件名格式为: gt_img_1.txt, res_img_1.txt
我这里扩展,也支持输入 【目录】,注意这种操作格式,除了文件名也要完全遵守官方的规则
这里文件名降低要求,只匹配出第一个出现的数值
还扩展了内存操作方式,这个格式比官方简洁,不需要遵循官方琐碎的规则,只需要
gt是一个dict
key写图片名或id编号都可以
value写若干个定位框,例如 [[xmin1, ymin1, xmax1, ymax1], [xmin2, ymin2, xmax2, ymax2], ...]
dt同gt,注意key要对应
icdar系列指标,原本是用于文本检测效果的评测,也可以扩展应用到一般性的检测任务
icdar只考虑单类,不考虑多类别问题,如果要加入类别问题,可以修改key达到更精细的分组效果
附,官方原版格式说明
{'1': b'38,43,...', '2':, b'...', ...}
key是图片编号1,2,3...233,其实改成其他各种key也行,就是一个分组概念
value是匹配效果,使用bytes格式,用\r\n作为换行符分开每个检测框
对gt而言,存储x1,y1,x2,y2,label,最后必须要有个label值
对dt而言,存储x1,y1,x2,y2
因为我这里底层做了扩展,所以从IcdarEval入口调用的测评,都是转成了我新的字典数据结构来预测的
"""
self.gt = self.init_label(gt)
self.dt = self.init_label(dt)
@classmethod
def init_label(cls, label):
if isinstance(label, dict):
# 如果是字典,信任其是按照官方格式来标注的
# {'16000,1': b'566,227,673,261,0\n682,210,945,260,0', '16001,1': ...
return label
elif isinstance(label, (str, File)) and str(label)[-4:].lower() == '.zip':
# 官方原版的 zip 文件初始化方法
return label
elif Dir.safe_init(label):
# 输入是目录,则按照数字编号大小顺序依次读数数据
d = Dir(label)
res = dict()
for f in d.select_files('*.txt'):
k = re.search(r'\d+', f.stem).group()
res[k] = f.read(mode='b')
return res
else:
raise TypeError(shorten(label))
def _eval(self, evaluate_method, default_evaluation_params, update_params):
eval_params = default_evaluation_params()
if update_params:
eval_params.update(update_params)
eval_data = evaluate_method(self.gt, self.dt, eval_params)
# eval_data字典还存有'per_sample'的每张图片详细数据
res = {k: round(v, 4) for k, v in eval_data['method'].items()} # 只保留4位小数,看起来比较舒服
return res
def icdar2013(self, params=None):
from pyxllib.data.icdar.icdar2013 import evaluate_method, default_evaluation_params
return self._eval(evaluate_method, default_evaluation_params, params)
def deteval(self, params=None):
from pyxllib.data.icdar.deteval import evaluate_method, default_evaluation_params
return self._eval(evaluate_method, default_evaluation_params, params)
def iou(self, params=None):
from pyxllib.data.icdar.iou import evaluate_method, default_evaluation_params
return self._eval(evaluate_method, default_evaluation_params, params)
| 35.814433 | 97 | 0.600173 | 4,185 | 0.928762 | 0 | 0 | 820 | 0.18198 | 0 | 0 | 2,770 | 0.614736 |
a97a18817825892c952ac7174c04fcf55fabab56 | 6,441 | py | Python | MTL/features.py | usc-sail/mica-riskybehavior-identification | dd8d1bb795ca1b8273625713887c6c4b747fd542 | [
"MIT"
]
| 2 | 2020-11-19T21:22:53.000Z | 2021-02-25T00:29:38.000Z | MTL/features.py | usc-sail/mica-riskybehavior-identification | dd8d1bb795ca1b8273625713887c6c4b747fd542 | [
"MIT"
]
| null | null | null | MTL/features.py | usc-sail/mica-riskybehavior-identification | dd8d1bb795ca1b8273625713887c6c4b747fd542 | [
"MIT"
]
| 1 | 2021-02-05T22:45:51.000Z | 2021-02-05T22:45:51.000Z | import os
import numpy as np
import torch
from transformers import BertTokenizer
from tensorflow.keras.utils import to_categorical
from NewDataLoader import *
from config import *
import warnings
class Features:
def __init__(self, **kwargs):
self.max_len = kwargs.get('max_len', 250)
self.categorical = kwargs.get('categorical', True)
self.wordrepr = kwargs.get('wordrepr', 'toronto_sent2vec')
self.sentrepr = kwargs.get('sentrepr', 'sentiment')
self.bert_selector = kwargs.get('bert_selector', 'None')
# Transform into H/M/L
self.categorize_F = np.vectorize(self.categorize)
# Feature size
self.WORD_SIZE = FEATS_SIZES[self.wordrepr]
if self.bert_selector == "first" or self.bert_selector == "last":
self.WORD_SIZE = int(self.WORD_SIZE / 2)
self.SENT_SIZE = FEATS_SIZES[self.sentrepr]
if self.sentrepr == "bert":
if self.bert_selector == "first" or self.bert_selector == "last":
self.SENT_SIZE = int(self.SENT_SIZE / 2)
print("Features:", self.wordrepr, self.sentrepr, self.max_len, self.bert_selector)
################################################
# Transform ordinal ratings into categorical
################################################
def categorize(self, rating):
if rating >= 4:
return 0 #HIGH
elif rating > 2:
return 1 #MED
else:
return 2 #LOW
################################################
# Loads features and trims them to max_len
################################################
def get_feats(self, label_f, batch_dir = None):
if not batch_dir:
batch_dir = os.path.dirname(label_f)
# Labels
batch_labels, additional_labels = load_labels(label_f)
batch_labels = np.c_[batch_labels, additional_labels]
if self.categorical:
batch_labels = self.categorize_F(batch_labels) #H/M/L
batch_labels = to_categorical(batch_labels, num_classes = 3) #One-hot encoding
vio, sex, drugs = batch_labels[:, 0, :], batch_labels[:, 1, :], batch_labels[:, 2, :]
y = [vio, sex, drugs]
# Get the index from the filename
i = os.path.basename(label_f).split("_")[0]
i = i.replace('.npz', '')
# Genre
batch_genre = load_genre(i, batch_dir)
# Words
if self.wordrepr in ['sent2vec', 'word2vec', 'script_word2vec', 'toronto_sent2vec']:
word_features = load_w2v_or_p2v(i, batch_dir, FEATS_SIZES, self.wordrepr)
elif self.wordrepr in ['bert_large', 'bert_base', 'sst', 'moviebert']:
word_features = load_BERT(i, batch_dir, FEATS_SIZES, mode = self.wordrepr, bert_selector = self.bert_selector)
elif self.wordrepr in ['ngrams', 'tfidf']:
word_features = load_tf_or_idf(i, batch_dir, self.wordrepr)
# Sentiment
if self.sentrepr in ['sentiment']:
sentiment_features = load_w2v_or_p2v(i, batch_dir, FEATS_SIZES, "sentiment")
elif self.sentrepr in ['bert_large', 'bert_base', 'sst', 'moviebert']:
sentiment_features = load_BERT(i, batch_dir, FEATS_SIZES, mode = self.sentrepr, bert_selector = self.bert_selector)
# elif sentrepr in ['sent_post', 'posteriors']:
# sentiment_features = ???
word_features = word_features[:, -self.max_len:, :] #Trim
sentiment_features = sentiment_features[:, -self.max_len:, :]
return ([word_features, sentiment_features, batch_genre], y)
def get_feats_any_only(self, label_f, index = 0, batch_dir = None):
([word_features, sentiment_features, batch_genre], y) = self.get_feats(label_f, batch_dir = batch_dir)
return ([word_features, sentiment_features, batch_genre], y[index])
def get_feats_vio_only(self, label_f, batch_dir = None):
return self.get_feats_any_only(label_f, index = 0, batch_dir = batch_dir)
def get_feats_sex_only(self, label_f, batch_dir = None):
return self.get_feats_any_only(label_f, index = 1, batch_dir = batch_dir)
def get_feats_drugs_only(self, label_f, batch_dir = None):
return self.get_feats_any_only(label_f, index = 2, batch_dir = batch_dir)
def get_concat_feats(self, label_f, batch_dir = None):
(word_features, sentiment_features, batch_genre), batch_labels = self.get_feats(label_f, batch_dir)
feats = np.concatenate([word_features, sentiment_features], axis = 2)
return [feats, batch_genre], batch_labels[0]
class BertFeatures(Features):
"""This class goes from text to padded transformer features"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.name = kwargs.get('bert_name', 'bert-base-uncased')
self.tokenizer = BertTokenizer.from_pretrained(self.name)
self.max_len = kwargs.get('max_len', self.tokenizer.max_len)
self.categorical = kwargs.get('categorical', True)
if self.max_len > self.tokenizer.max_len:
warnings.warn("max_len > tokenizer({}).max_len.".format(self.name))
print("BertFeatures:", self.name, self.max_len)
def get_feats(self, label_f, batch_dir = None):
if not batch_dir:
batch_dir = os.path.dirname(label_f)
# Labels
batch_labels, additional_labels = load_labels(label_f)
batch_labels = np.c_[batch_labels, additional_labels]
if self.categorical:
batch_labels = self.categorize_F(batch_labels) #H/M/L
batch_labels = to_categorical(batch_labels, num_classes = 3) #One-hot encoding
vio, sex, drugs = batch_labels[:, 0], batch_labels[:, 1], batch_labels[:, 2]
y = [vio, sex, drugs]
# Get the index from the filename
i = os.path.basename(label_f).split("_")[0]
i = i.replace('.npz', '')
# Genre
batch_genre = load_genre(i, batch_dir)
#
features = []
for row in load_text(i, batch_dir):
# Tokenize and trim
text = self.tokenizer.tokenize(row)[-self.max_len:]
# Encode text
input_ids = torch.tensor([self.tokenizer.encode(text, add_special_tokens = True)])
features.append(input_ids)
# Convert to tensor
features = torch.cat(features, dim = 0)
return ([features, batch_genre], y)
| 38.568862 | 127 | 0.621798 | 6,237 | 0.968328 | 0 | 0 | 0 | 0 | 0 | 0 | 1,107 | 0.171868 |
a97af6a55423ad89ce397dfb867db2824473473b | 1,233 | py | Python | project_4_data_pipelines/airflow/plugins/helpers/sparkify_dim_subdag.py | jpuris/udacity-data-engineering-submissions | e71e2569241c76b5e6c3cd074667b19bde4d7b9e | [
"MIT"
]
| null | null | null | project_4_data_pipelines/airflow/plugins/helpers/sparkify_dim_subdag.py | jpuris/udacity-data-engineering-submissions | e71e2569241c76b5e6c3cd074667b19bde4d7b9e | [
"MIT"
]
| null | null | null | project_4_data_pipelines/airflow/plugins/helpers/sparkify_dim_subdag.py | jpuris/udacity-data-engineering-submissions | e71e2569241c76b5e6c3cd074667b19bde4d7b9e | [
"MIT"
]
| null | null | null | from airflow import DAG
from operators import LoadDimensionOperator
def load_dim_subdag(
parent_dag_name: str,
task_id: str,
redshift_conn_id: str,
sql_statement: str,
do_truncate: bool,
table_name: str,
**kwargs,
):
"""
Airflow's subdag wrapper. Implements LoadDimensionOperator operator.
Subdag's name will be f'{parent_dag_name}.{task_id}'
Subdag related keyword arguments:
- parent_dag_name -- Parent DAG name
- task_id -- Task ID for the subdag to use
Keyword arguments:
redshift_conn_id -- Airflow connection name for Redshift detail
sql_statement -- SQL statement to run
do_truncate -- Does the table need to be truncated before running
SQL statement
table_name -- Dimension table name
All keyword arguments will be passed to LoadDimensionOperator
"""
dag = DAG(f'{parent_dag_name}.{task_id}', **kwargs)
load_dimension_table = LoadDimensionOperator(
task_id=task_id,
dag=dag,
redshift_conn_id=redshift_conn_id,
sql_query=sql_statement,
do_truncate=do_truncate,
table_name=table_name,
)
load_dimension_table
return dag
| 26.804348 | 75 | 0.673155 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 673 | 0.545823 |
a97bced1b47f7e35fb054962b9c59fd468c4c16b | 1,816 | py | Python | inference.py | Retrospection/Yolo-v2-pytorch | d2028219a250e50e03340538faab197ac8ece8a8 | [
"MIT"
]
| null | null | null | inference.py | Retrospection/Yolo-v2-pytorch | d2028219a250e50e03340538faab197ac8ece8a8 | [
"MIT"
]
| null | null | null | inference.py | Retrospection/Yolo-v2-pytorch | d2028219a250e50e03340538faab197ac8ece8a8 | [
"MIT"
]
| 1 | 2021-12-28T08:13:05.000Z | 2021-12-28T08:13:05.000Z | # coding: utf-8
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from src.yolo_net import YoloTest, Yolo
import torch
import cv2
import numpy as np
def read_image(path):
img1 = cv2.imread(path)
img1 = cv2.resize(img1, (224, 224))
img1 = img1.transpose((2, 0, 1))
img1 = img1[np.newaxis, :, :, :]
return torch.Tensor(img1)
class FeatureExtractor(object):
def __init__(self):
self.net = YoloTest()
state_dict = torch.load('trained_models\\only_params_trained_yolo_coco')
del state_dict['stage3_conv2.weight']
self.net.load_state_dict(state_dict)
self.net.eval()
def get_feature(self, image_path):
image = read_image(image_path)
return self.net(image).reshape((1024 * 7 * 7)).detach().numpy()
# net = Yolo(10177)
# state_dict = torch.load('trained_models\\only_params_trained_yolo_coco')
# net.load_state_dict(state_dict)
# net.eval()
# img10 = readImage('D:\\dev\\dataset\\CASIA-WebFace\\0000045\\001.jpg')
# img11 = readImage('D:\\dev\\dataset\\CASIA-WebFace\\0000045\\002.jpg')
# img21 = readImage('D:\\dev\\dataset\\CASIA-WebFace\\0000099\\001.jpg')
#
# logits = net(img10)
# print(logits.view(1, 5, -1, 49).shape)
# output10 = net(img10).reshape((1024*7*7,)).detach().numpy()
# output11 = net(img11).reshape((1024*7*7,)).detach().numpy()
# output21 = net(img21).reshape((1024*7*7,)).detach().numpy()
# dis11 = np.linalg.norm(output10 - output11)
# dis21 = np.linalg.norm(output10 - output21)
#
# print(dis11)
# print(dis21)
#
#
# def cosdis(vec1, vec2):
# return np.dot(vec1,vec2)/(np.linalg.norm(vec1)*(np.linalg.norm(vec2)))
#
# cosdis11 = cosdis(output10, output11)
# cosdis21 = cosdis(output10, output21)
# print(cosdis11)
# print(cosdis21) | 24.876712 | 80 | 0.680617 | 433 | 0.238436 | 0 | 0 | 0 | 0 | 0 | 0 | 1,017 | 0.560022 |
a97e81a89bda65fad9ab35f52160822fa9349f8c | 11,572 | py | Python | geetools/collection/modis.py | carderne/gee_tools | 4003e75ffb0ffefc9f41b1a34d849eebdb486161 | [
"MIT"
]
| null | null | null | geetools/collection/modis.py | carderne/gee_tools | 4003e75ffb0ffefc9f41b1a34d849eebdb486161 | [
"MIT"
]
| null | null | null | geetools/collection/modis.py | carderne/gee_tools | 4003e75ffb0ffefc9f41b1a34d849eebdb486161 | [
"MIT"
]
| null | null | null | # coding=utf-8
""" Google Earth Engine MODIS Collections """
from . import Collection, TODAY, Band
from functools import partial
IDS = [
'MODIS/006/MOD09GQ', 'MODIS/006/MYD09GQ',
'MODIS/006/MOD09GA', 'MODIS/006/MYD09GA',
'MODIS/006/MOD13Q1', 'MODIS/006/MYD13Q1'
]
START = {
'MODIS/006/MOD09GQ': '2000-02-24',
'MODIS/006/MYD09GQ': '2000-02-24',
'MODIS/006/MOD09GA': '2000-02-24',
'MODIS/006/MYD09GA': '2000-02-24',
'MODIS/006/MOD13Q1': '2000-02-18',
'MODIS/006/MYD13Q1': '2000-02-18',
}
END = {
'MODIS/006/MOD09GQ': TODAY,
'MODIS/006/MYD09GQ': TODAY,
'MODIS/006/MOD09GA': TODAY,
'MODIS/006/MYD09GA': TODAY,
'MODIS/006/MOD13Q1': TODAY,
'MODIS/006/MYD13Q1': TODAY,
}
class MODIS(Collection):
""" MODIS Collections """
SHORTS = {
'MODIS/006/MOD09GQ': 'TERRA_SR_250_DAILY',
'MODIS/006/MYD09GQ': 'AQUA_SR_250_DAILY',
'MODIS/006/MOD09GA': 'TERRA_SR_1KM_DAILY',
'MODIS/006/MYD09GA': 'AQUA_SR_1KM_DAILY',
'MODIS/006/MOD13Q1': 'TERRA_IND_250_16DAYS',
'MODIS/006/MYD13Q1': 'AQUA_IND_250_16DAYS'
}
def __init__(self, product_id):
""" Initialize a MODIS collection with it's product id """
super(MODIS, self).__init__()
self.product_id = product_id
self._id = self._make_id()
self._bands = self._make_bands()
# dates
self.start_date = START[self._id]
self.end_date = END[self._id]
self.spacecraft = 'MODIS'
self.cloud_cover = None
self.short_name = self.SHORTS.get(self.id)
if self._id in ['MODIS/006/MOD09GQ', 'MODIS/006/MYD09GQ']:
self.common_masks = [self.qc250]
if self._id in ['MODIS/006/MOD09GA', 'MODIS/006/MYD09GA']:
self.common_masks = [self.state_1km]
if self._id in ['MODIS/006/MOD13Q1', 'MODIS/006/MYD13Q1']:
self.common_masks = [self.detailed_qa]
def state_1km(self, image, classes=('cloud', 'shadow', 'snow',
'average_cirrus', 'high_cirrus'), renamed=False):
return self.applyMask(image, 'state_1km', classes, renamed)
def qc250(self, image, classes=('B1_highest_quality', 'B2_highest_quality'),
renamed=False):
return self.applyPositiveMask(image, 'QC_250m', classes, renamed)
def detailed_qa(self, image, classes=('cloud', 'shadow', 'snow'),
renamed=False):
if renamed:
band ='DetailedQA'
else:
band = 'detailed_qa'
return self.applyMask(image, band, classes, renamed)
def _make_bands(self):
bands = [None]*30
# Partial bands
sur_refl_b01 = partial(Band, id='sur_refl_b01', name='red',
precision='int16', min=-100,
max=16000, reference='optical')
sur_refl_b02 = partial(Band, id='sur_refl_b02', name='nir',
precision='int16', min=-100,
max=16000, reference='optical')
num_observations = partial(Band, precision='int8', min=0, max=127,
reference='classification')
QC_250m = Band('QC_250m', 'QC_250m', 'uint16', 250, 0, 4096,
'bits', bits={
'4-7': {0: 'B1_highest_quality'},
'8-11': {0: 'B2_highest_quality'},
'12': {1: 'atmospheric_corrected'}
})
obscov = partial(Band, precision='int8', min=0, max=100,
reference='classification')
iobs_res = partial(Band, id='iobs_res', name='obs_number',
precision='uint8', min=0, max=254,
reference='classification')
orbit_pnt = partial(Band, id='orbit_pnt', name='orbit_pointer',
precision='int8', min=0, max=15,
reference='classification')
granule_pnt = partial(Band, id='granule_pnt', name='granule_pointer',
precision='uint8', min=0, max=254,
reference='classification')
state_1km = Band('state_1km', 'state_1km', 'uint16', 1000, 0, 57335,
'bits', bits={
'0-1': {0: 'clear', 1:'cloud', 2:'mix'},
'2': {1: 'shadow'},
'8-9': {1: 'small_cirrus', 2: 'average_cirrus',
3: 'high_cirrus'},
'13': {1: 'adjacent'},
'15': {1: 'snow'}
})
sezenith = Band('SensorZenith', 'sensor_zenith', 'int16', 1000, 0,
18000, 'classification')
seazimuth = Band('SensorAzimuth', 'sensor_azimuth', 'int16', 1000,
-18000, 18000, 'classification')
range_band = Band('Range', 'range', 'uint16', 1000, 27000, 65535,
'classification')
sozenith = Band('SolarZenith', 'solar_zenith', 'int16', 1000, 0,
18000, 'classification')
soazimuth = Band('SolarAzimuth', 'solar_azimuth', 'int16', 1000,
-18000, 18000, 'classification')
gflags = Band('gflags', 'geolocation_flags', 'uint8', 1000, 0, 248,
'bits')
sur_refl_b03 = partial(Band, id='sur_refl_b03', name='blue',
precision='int16', min=-100, max=16000,
reference='optical')
sur_refl_b04 = partial(Band, id='sur_refl_b04', name='green',
precision='int16', min=-100, max=16000,
reference='optical')
sur_refl_b05 = partial(Band, id='sur_refl_b05', name='swir3',
precision='int16', min=-100, max=16000,
reference='optical')
sur_refl_b06 = partial(Band, id='sur_refl_b06', name='swir',
precision='int16', min=-100, max=16000,
reference='optical')
sur_refl_b07 = partial(Band, id='sur_refl_b07', name='swir2',
precision='int16', min=-100, max=16000,
reference='optical')
QC_500m = Band('QC_500m', 'QC_500m', 'uint32', 500, 0, 4294966019,
'bits', bits={
'2-5': {0: 'B1_highest_quality'},
'6-9': {0: 'B2_highest_quality'},
'10-13': {0: 'B3_highest_quality'},
'14-17': {0: 'B4_highest_quality'},
'18-21': {0: 'B5_highest_quality'},
'22-25': {0: 'B6_highest_quality'},
'26-29': {0: 'B7_highest_quality'},
})
qscan = Band('q_scan', 'q_scan', 'uint8', 250, 0, 254, 'bits')
NDVI = Band('NDVI', 'ndvi', 'int16', 250, -2000, 10000, 'classification')
EVI = Band('EVI', 'evi', 'int16', 250, -2000, 10000, 'classification')
DetailedQA = Band('DetailedQA', 'detailed_qa', 'uint16', 250, 0, 65534,
'bits', bits={
'0-1': {0: 'good_qa'},
'2-5': {0: 'highest_qa'},
'8': {1: 'adjacent'},
'10': {1: 'cloud'},
'14': {1: 'snow'},
'15': {1: 'shadow'}
})
view_zenith = Band('ViewZenith', 'view_zenith', 'int16', 250, 0, 18000,
'classification')
relative_azimuth = Band('RelativeAzimuth', 'relative_azimuth', 'int16',
250, -18000, 18000, 'classification')
DayOfYear = Band('DayOfYear', 'day_of_year', 'int16', 250, 1, 366,
'classification')
SummaryQA = Band('SummaryQA', 'summary_qa', 'int8', 250, 0, 3, 'bits',
bits={
'0-1': {0: 'clear', 1: 'marginal', 2: 'snow',
3: 'cloud'}
})
if self.product_id in ['MOD09GQ', 'MYD09GQ']:
bands[0] = num_observations(id='num_observations',
name='num_observations', scale=250)
bands[1] = sur_refl_b01(scale=250)
bands[2] = sur_refl_b02(scale=250)
bands[3] = QC_250m
bands[4] = obscov(id='obscov', name='observation_coverage', scale=250)
bands[5] = iobs_res(scale=250)
bands[6] = orbit_pnt(scale=250)
bands[7] = granule_pnt(scale=250)
if self.product_id in ['MOD09GA', 'MYD09GA']:
bands[0] = num_observations(id='num_observations_1km', scale=1000,
name='num_observations_1km')
bands[1] = state_1km
bands[2] = sezenith
bands[3] = seazimuth
bands[4] = range_band
bands[5] = sozenith
bands[6] = soazimuth
bands[7] = gflags
bands[8] = orbit_pnt(scale=500)
bands[9] = granule_pnt(scale=500)
bands[10] = num_observations(id='num_observations_500m', scale=500,
name='num_observations_500m')
bands[11] = sur_refl_b01(scale=500)
bands[12] = sur_refl_b02(scale=500)
bands[13] = sur_refl_b03(scale=500)
bands[14] = sur_refl_b04(scale=500)
bands[15] = sur_refl_b05(scale=500)
bands[16] = sur_refl_b06(scale=500)
bands[17] = sur_refl_b07(scale=500)
bands[18] = QC_500m
bands[19] = obscov(id='obscov_500m', scale=500,
name='observation_coverage_500m')
bands[20] = iobs_res(scale=500)
bands[21] = qscan
if self.product_id in ['MOD13Q1', 'MYD13Q1']:
bands[0] = NDVI
bands[1] = EVI
bands[2] = DetailedQA
bands[3] = sur_refl_b01(scale=250)
bands[4] = sur_refl_b02(scale=250)
bands[5] = sur_refl_b03(scale=250)
bands[6] = sur_refl_b07(scale=250)
bands[7] = view_zenith
bands[8] = sozenith
bands[9] = relative_azimuth
bands[10] = DayOfYear
bands[11] = SummaryQA
return [b for b in bands if b]
def _make_id(self):
return 'MODIS/006/{}'.format(self.product_id)
@staticmethod
def fromId(id):
""" Make a MODIS collection from its ID """
def error():
msg = 'Collection {} not available'
raise ValueError(msg.format(id))
if id not in IDS: error()
splitted = id.split('/')
prod = splitted[2]
return MODIS(prod)
@classmethod
def MOD09GQ(cls):
return cls(product_id='MOD09GQ')
@classmethod
def MYD09GQ(cls):
return cls(product_id='MYD09GQ')
@classmethod
def MOD09GA(cls):
return cls(product_id='MOD09GA')
@classmethod
def MYD09GA(cls):
return cls(product_id='MYD09GA')
@classmethod
def MOD13Q1(cls):
return cls(product_id='MOD13Q1')
@classmethod
def MYD13Q1(cls):
return cls(product_id='MYD13Q1')
| 38.317881 | 89 | 0.497753 | 10,841 | 0.93683 | 0 | 0 | 773 | 0.066799 | 0 | 0 | 3,118 | 0.269443 |
a97f5a52d2112340dd02628abcf36314406fa57c | 338 | py | Python | random-py/app.py | traian-mihali/publishing-py | fa050b1169258b50678f00b97958499bc0210ca3 | [
"MIT"
]
| null | null | null | random-py/app.py | traian-mihali/publishing-py | fa050b1169258b50678f00b97958499bc0210ca3 | [
"MIT"
]
| null | null | null | random-py/app.py | traian-mihali/publishing-py | fa050b1169258b50678f00b97958499bc0210ca3 | [
"MIT"
]
| null | null | null | """ This module provides a method to generate a random number between 0 and the specified number """
import random
import math
def random_num(max):
"""
Generates a random number
Parameters:
max(int): the range upper limit
Returns:
int: the random number
"""
return math.floor(random.random() * max)
| 19.882353 | 100 | 0.668639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.704142 |
a980ed05ffe9a9c97a1b948b9c9b922dc89fb870 | 847 | py | Python | sympy/printing/printer.py | certik/sympy-oldcore | eb5bd061c309d88cdfb502bfd5df511b30368458 | [
"BSD-3-Clause"
]
| 1 | 2016-05-08T17:54:57.000Z | 2016-05-08T17:54:57.000Z | sympy/printing/printer.py | certik/sympy-oldcore | eb5bd061c309d88cdfb502bfd5df511b30368458 | [
"BSD-3-Clause"
]
| null | null | null | sympy/printing/printer.py | certik/sympy-oldcore | eb5bd061c309d88cdfb502bfd5df511b30368458 | [
"BSD-3-Clause"
]
| null | null | null |
class Printer(object):
"""
"""
def __init__(self):
self._depth = -1
self._str = str
self.emptyPrinter = str
def doprint(self, expr):
"""Returns the pretty representation for expr (as a string)"""
return self._str(self._print(expr))
def _print(self, expr):
self._depth += 1
# See if the class of expr is known, or if one of its super
# classes is known, and use that pretty function
res = None
for cls in expr.__class__.__mro__:
if hasattr(self, '_print_'+cls.__name__):
res = getattr(self, '_print_'+cls.__name__)(expr)
break
# Unknown object, just use its string representation
if res is None:
res = self.emptyPrinter(expr)
self._depth -= 1
return res
| 27.322581 | 70 | 0.565525 | 845 | 0.997639 | 0 | 0 | 0 | 0 | 0 | 0 | 250 | 0.295159 |
a981fd9db88834f380bdfbae5402c0c579a7fa58 | 272 | py | Python | pleiades/transforms.py | jcwright77/pleiades | e3e208e94feee299589a094f361b301131d1bd15 | [
"MIT"
]
| 3 | 2020-03-27T19:27:01.000Z | 2021-07-15T16:28:54.000Z | pleiades/transforms.py | jcwright77/pleiades | e3e208e94feee299589a094f361b301131d1bd15 | [
"MIT"
]
| 6 | 2020-03-30T17:12:42.000Z | 2020-07-14T03:07:02.000Z | pleiades/transforms.py | jcwright77/pleiades | e3e208e94feee299589a094f361b301131d1bd15 | [
"MIT"
]
| 6 | 2020-03-30T17:05:58.000Z | 2021-08-18T19:21:00.000Z | import math
import numpy as np
def rotate(pts, angle, pivot=(0., 0.)):
pivot = np.asarray(pivot)
angle = math.pi*angle/180
c, s = np.cos(angle), np.sin(angle)
rotation = np.array([[c, -s], [s, c]])
return (np.asarray(pts) - pivot) @ rotation + pivot
| 24.727273 | 55 | 0.602941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a982f1f9c012c80b9c26e9e99c4415060d09e04a | 166 | py | Python | Project/Python/project/public/auto/__init__.py | renwei-release/dave | 773301edd3bee6e7526e0d5587ff8af9f01e288f | [
"MIT"
]
| null | null | null | Project/Python/project/public/auto/__init__.py | renwei-release/dave | 773301edd3bee6e7526e0d5587ff8af9f01e288f | [
"MIT"
]
| null | null | null | Project/Python/project/public/auto/__init__.py | renwei-release/dave | 773301edd3bee6e7526e0d5587ff8af9f01e288f | [
"MIT"
]
| null | null | null | import ctypes
import struct
from .dave_define import *
from .dave_enum import *
from .dave_msg_id import *
from .dave_msg_struct import *
from .dave_struct import * | 18.444444 | 30 | 0.789157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a9840415a7cc2a3662940dac6af33c62299a8276 | 551 | py | Python | Methods/Machine/Conductor/check.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
]
| 2 | 2020-06-29T13:48:37.000Z | 2021-06-15T07:34:05.000Z | Methods/Machine/Conductor/check.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
]
| null | null | null | Methods/Machine/Conductor/check.py | Superomeg4/pyleecan | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
"""@package Methods.Machine.Conductor.check
Check that the Conductor is correct
@date Created on Thu Jan 22 17:50:02 2015
@copyright (C) 2015-2016 EOMYS ENGINEERING.
@author pierre_b
"""
from pyleecan.Methods.Machine.LamSlotWind.check import Lam_WindCheckError
def check(self):
"""Check that the Conductor object is correct
Parameters
----------
self : Conductor
A Conductor object
Returns
-------
None
"""
pass
class CondCheckError(Lam_WindCheckError):
""" """
pass
| 17.774194 | 73 | 0.658802 | 63 | 0.114338 | 0 | 0 | 0 | 0 | 0 | 0 | 382 | 0.693285 |
a98465a5dbaaa69b7d18d16711f08102c5a830eb | 3,414 | py | Python | wholeslidedata/annotation/write_mask2.py | kaczmarj/pathology-whole-slide-data | 3adb86af716ca89f336b6c935f90bd13183572b7 | [
"Apache-2.0"
]
| 1 | 2022-02-17T19:47:14.000Z | 2022-02-17T19:47:14.000Z | wholeslidedata/annotation/write_mask2.py | kaczmarj/pathology-whole-slide-data | 3adb86af716ca89f336b6c935f90bd13183572b7 | [
"Apache-2.0"
]
| null | null | null | wholeslidedata/annotation/write_mask2.py | kaczmarj/pathology-whole-slide-data | 3adb86af716ca89f336b6c935f90bd13183572b7 | [
"Apache-2.0"
]
| null | null | null | from pathlib import Path
from typing import List
import cv2
import numpy as np
from shapely import geometry
from shapely.strtree import STRtree
from wholeslidedata.annotation.structures import Annotation, Point, Polygon
from wholeslidedata.image.wholeslideimage import WholeSlideImage
from wholeslidedata.image.wholeslideimagewriter import WholeSlideMaskWriter
from wholeslidedata.samplers.utils import shift_coordinates
def select_annotations(
stree: STRtree, center_x: int, center_y: int, width: int, height: int
):
box = geometry.box(
center_x - width // 2,
center_y - height // 2,
center_x + width // 2,
center_y + height // 2,
)
annotations = stree.query(box)
return sorted(annotations, key=lambda item: item.area, reverse=True)
def get_mask(stree, point, size, ratio):
center_x, center_y = point.x, point.y
width, height = size
# get annotations
annotations = select_annotations(
stree, center_x, center_y, (width * ratio) - 1, (height * ratio) - 1
)
# create mask placeholder
mask = np.zeros((height, width), dtype=np.int32)
# set labels of all selected annotations
for annotation in annotations:
coordinates = np.copy(annotation.coordinates)
coordinates = shift_coordinates(
coordinates, center_x, center_y, width, height, ratio
)
if isinstance(annotation, Polygon):
holemask = np.ones((height, width), dtype=np.int32) * -1
for hole in annotation.holes:
hcoordinates = shift_coordinates(
hole, center_x, center_y, width, height, ratio
)
cv2.fillPoly(holemask, np.array([hcoordinates], dtype=np.int32), 1)
holemask[holemask != -1] = mask[holemask != -1]
cv2.fillPoly(
mask,
np.array([coordinates], dtype=np.int32),
annotation.label.value,
)
mask[holemask != -1] = holemask[holemask != -1]
elif isinstance(annotation, Point):
mask[int(coordinates[1]), int(coordinates[0])] = annotation.label.value
return mask.astype(np.uint8)
def convert_annotations_to_mask(
wsi: WholeSlideImage,
annotations: List[Annotation],
spacing: float,
mask_output_path: Path,
tile_size: int = 1024,
):
stree = STRtree(annotations)
ratio = wsi.get_downsampling_from_spacing(spacing)
shape = wsi.shapes[wsi.get_level_from_spacing(spacing)]
ratio = wsi.get_downsampling_from_spacing(spacing)
write_spacing = wsi.get_real_spacing(spacing)
wsm_writer = WholeSlideMaskWriter()
wsm_writer.write(
path=mask_output_path,
spacing=write_spacing,
dimensions=(shape[0], shape[1]),
tile_shape=(tile_size, tile_size),
)
for y_pos in range(0, shape[1], tile_size):
for x_pos in range(0, shape[0], tile_size):
mask = get_mask(
stree,
geometry.Point(
(x_pos + tile_size // 2) * ratio,
(y_pos + tile_size // 2) * ratio,
),
(tile_size, tile_size),
ratio,
)
if np.any(mask):
wsm_writer.write_tile(tile=mask, coordinates=(int(x_pos), int(y_pos)))
print("closing...")
wsm_writer.save()
print("done")
| 32.207547 | 86 | 0.621558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.029291 |
a984e763170541feb20e89e4a6245f1b8e706963 | 578 | py | Python | tuples_05/tests/test_slicing_tuples.py | njoroge33/py_learn | 6ad55f37789045bc5c03f3dd668cf1ea497f4e84 | [
"MIT"
]
| null | null | null | tuples_05/tests/test_slicing_tuples.py | njoroge33/py_learn | 6ad55f37789045bc5c03f3dd668cf1ea497f4e84 | [
"MIT"
]
| 2 | 2019-04-15T06:29:55.000Z | 2019-04-19T17:34:32.000Z | tuples_05/tests/test_slicing_tuples.py | njoroge33/py_learn | 6ad55f37789045bc5c03f3dd668cf1ea497f4e84 | [
"MIT"
]
| 1 | 2019-11-19T04:51:18.000Z | 2019-11-19T04:51:18.000Z | import pytest
from ..slicing_tuples import tuple_slice
@pytest.mark.parametrize('names, ages, cities, expected', [
(('Gitau', 'Kanyoi', 'Ndegwa'), (13, 24, 5), ('Njogu-ini', 'Limuru', 'Kamae'), (
('Gitau', 13, 'Njogu-ini'), ('Kanyoi', 24, 'Limuru'), ('Ndegwa', 5, 'Kamae')
)),
(('Totua', 'Suhi'), (95, 12, 36, 78), ('Tokyo', 'Vatican', 'Hyderabad'), (
('Totua', 95, 'Tokyo'), ('Suhi', 12, 'Vatican')
)),
])
def test_tuple_slice(names, ages, cities, expected):
actual = tuple_slice(names, ages, cities)
assert actual == expected
| 36.125 | 88 | 0.570934 | 0 | 0 | 0 | 0 | 520 | 0.899654 | 0 | 0 | 198 | 0.342561 |
a9856cedef8243944a78d8985c56e556db9faae0 | 28,653 | py | Python | dftimewolf/lib/state.py | hkhalifa/dftimewolf | 0a6d62fdb362c8618bd373c18a7f446b959f1a0f | [
"Apache-2.0"
]
| null | null | null | dftimewolf/lib/state.py | hkhalifa/dftimewolf | 0a6d62fdb362c8618bd373c18a7f446b959f1a0f | [
"Apache-2.0"
]
| null | null | null | dftimewolf/lib/state.py | hkhalifa/dftimewolf | 0a6d62fdb362c8618bd373c18a7f446b959f1a0f | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
"""This class maintains the internal dfTimewolf state.
Use it to track errors, abort on global failures, clean up after modules, etc.
"""
from dataclasses import dataclass
from concurrent.futures import ThreadPoolExecutor, Future
import importlib
import logging
import threading
import traceback
from typing import TYPE_CHECKING, Callable, Dict, List, Sequence, Type, Any, TypeVar, cast # pylint: disable=line-too-long
from dftimewolf.cli import curses_display_manager as cdm
from dftimewolf.config import Config
from dftimewolf.lib import errors, utils
from dftimewolf.lib.containers.interface import AttributeContainer
from dftimewolf.lib.errors import DFTimewolfError
from dftimewolf.lib.modules import manager as modules_manager
from dftimewolf.lib.module import ThreadAwareModule, BaseModule
if TYPE_CHECKING:
from dftimewolf.lib import module as dftw_module
from dftimewolf.lib.containers import interface
T = TypeVar("T", bound="interface.AttributeContainer") # pylint: disable=invalid-name,line-too-long
# TODO(tomchop): Consider changing this to `dftimewolf.state` if we ever need
# more granularity.
logger = logging.getLogger('dftimewolf')
NEW_ISSUE_URL = 'https://github.com/log2timeline/dftimewolf/issues/new'
@dataclass
class StatsEntry:
"""A simple dataclass to store module-related statistics.
Attributes:
module_type: Type of the module that generated the stats.
module_name: Name of the module that generated the stats. This has the
same value as module_type when no runtime_name has been specified for
the module.
stats: Dictionary of stats to store. Contents are arbitrary, but
keys must be strings.
"""
module_type: str
module_name: str
stats: Dict[str, Any]
class DFTimewolfState(object):
"""The main State class.
Attributes:
command_line_options (dict[str, Any]): Command line options passed to
dftimewolf.
config (dftimewolf.config.Config): Class to be used throughout execution.
errors (list[tuple[str, bool]]): errors generated by a module. These
should be cleaned up after each module run using the CleanUp() method.
global_errors (list[tuple[str, bool]]): the CleanUp() method moves non
critical errors to this attribute for later reporting.
input (list[str]): data that the current module will use as input.
output (list[str]): data that the current module generates.
recipe: (dict[str, str]): recipe declaring modules to load.
store (dict[str, object]): arbitrary data for modules.
stats_store: store for statistics generated by modules.
"""
def __init__(self, config: Type[Config]) -> None:
"""Initializes a state."""
super(DFTimewolfState, self).__init__()
self.command_line_options = {} # type: Dict[str, Any]
self._cache = {} # type: Dict[str, str]
self._module_pool = {} # type: Dict[str, BaseModule]
self._state_lock = threading.Lock()
self._stats_lock = threading.Lock()
self._threading_event_per_module = {} # type: Dict[str, threading.Event]
self.config = config
self.errors = [] # type: List[DFTimewolfError]
self.global_errors = [] # type: List[DFTimewolfError]
self.recipe = {} # type: Dict[str, Any]
self.store = {} # type: Dict[str, List[interface.AttributeContainer]]
self.stats_store = [] # type: List[StatsEntry]
self.streaming_callbacks = {} # type: Dict[Type[interface.AttributeContainer], List[Callable[[Any], Any]]] # pylint: disable=line-too-long
self._abort_execution = False
self.stdout_log = True
def _InvokeModulesInThreads(self, callback: Callable[[Any], Any]) -> None:
"""Invokes the callback function on all the modules in separate threads.
Args:
callback (function): callback function to invoke on all the modules.
"""
threads = []
for module_definition in self.recipe['modules']:
thread_args = (module_definition, )
thread = threading.Thread(target=callback, args=thread_args)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
self.CheckErrors(is_global=True)
def ImportRecipeModules(self, module_locations: Dict[str, str]) -> None:
"""Dynamically loads the modules declared in a recipe.
Args:
module_location (dict[str, str]): A dfTimewolf module name - Python module
mapping. e.g.:
{'GRRArtifactCollector': 'dftimewolf.lib.collectors.grr_hosts'}
Raises:
errors.RecipeParseError: if a module requested in a recipe does not
exist in the mapping.
"""
for module in self.recipe['modules'] + self.recipe.get('preflights', []):
name = module['name']
if name not in module_locations:
msg = (f'In {self.recipe["name"]}: module {name} cannot be found. '
'It may not have been declared.')
raise errors.RecipeParseError(msg)
logger.debug('Loading module {0:s} from {1:s}'.format(
name, module_locations[name]))
location = module_locations[name]
try:
importlib.import_module(location)
except ModuleNotFoundError as exception:
msg = f'Cannot find Python module for {name} ({location}): {exception}'
raise errors.RecipeParseError(msg)
def LoadRecipe(self,
recipe: Dict[str, Any],
module_locations: Dict[str, str]) -> None:
"""Populates the internal module pool with modules declared in a recipe.
Args:
recipe (dict[str, Any]): recipe declaring modules to load.
Raises:
RecipeParseError: if a module in the recipe has not been declared.
"""
self.recipe = recipe
module_definitions = recipe.get('modules', [])
preflight_definitions = recipe.get('preflights', [])
self.ImportRecipeModules(module_locations)
for module_definition in module_definitions + preflight_definitions:
# Combine CLI args with args from the recipe description
module_name = module_definition['name']
module_class = modules_manager.ModulesManager.GetModuleByName(module_name)
runtime_name = module_definition.get('runtime_name')
if not runtime_name:
runtime_name = module_name
# pytype: disable=wrong-arg-types
self._module_pool[runtime_name] = module_class(self, name=runtime_name)
# pytype: enable=wrong-arg-types
def FormatExecutionPlan(self) -> str:
"""Formats execution plan.
Returns information about loaded modules and their corresponding arguments
to stdout.
Returns:
str: String representation of loaded modules and their parameters.
"""
plan = ""
maxlen = 0
modules = self.recipe.get('preflights', []) + self.recipe.get('modules', [])
for module in modules:
if not module['args']:
continue
spacing = len(max(module['args'].keys(), key=len))
maxlen = maxlen if maxlen > spacing else spacing
for module in modules:
runtime_name = module.get('runtime_name')
if runtime_name:
plan += '{0:s} ({1:s}):\n'.format(runtime_name, module['name'])
else:
plan += '{0:s}:\n'.format(module['name'])
new_args = utils.ImportArgsFromDict(
module['args'], self.command_line_options, self.config)
if not new_args:
plan += ' *No params*\n'
for key, value in new_args.items():
plan += ' {0:s}{1:s}\n'.format(key.ljust(maxlen + 3), repr(value))
return plan
def LogExecutionPlan(self) -> None:
"""Logs the result of FormatExecutionPlan() using the base logger."""
for line in self.FormatExecutionPlan().split('\n'):
logger.debug(line)
def AddToCache(self, name: str, value: Any) -> None:
"""Thread-safe method to add data to the state's cache.
If the cached item is already in the cache it will be
overwritten with the new value.
Args:
name (str): string with the name of the cache variable.
value (object): the value that will be stored in the cache.
"""
with self._state_lock:
self._cache[name] = value
def GetFromCache(self, name: str, default_value: Any=None) -> Any:
"""Thread-safe method to get data from the state's cache.
Args:
name (str): string with the name of the cache variable.
default_value (object): the value that will be returned if
the item does not exist in the cache. Optional argument
and defaults to None.
Returns:
object: object from the cache that corresponds to the name, or
the value of "default_value" if the cache does not contain
the variable.
"""
with self._state_lock:
return self._cache.get(name, default_value)
def StoreContainer(self, container: "interface.AttributeContainer") -> None:
"""Thread-safe method to store data in the state's store.
Args:
container (AttributeContainer): data to store.
"""
with self._state_lock:
self.store.setdefault(container.CONTAINER_TYPE, []).append(container)
def StoreStats(self, stats_entry: StatsEntry) -> None:
"""Thread-safe method to store stats in the state's stats store.
Args:
statsentry: The stats object to store.
"""
with self._stats_lock:
self.stats_store.append(stats_entry)
def GetStats(self) -> List[StatsEntry]:
"""Get stats entries that have been stored in the state.
Returns:
The stats objects stored in the state's stats store.
"""
with self._stats_lock:
return self.stats_store
def GetContainers(self,
container_class: Type[T],
pop: bool=False) -> Sequence[T]:
"""Thread-safe method to retrieve data from the state's store.
Args:
container_class (type): AttributeContainer class used to filter data.
pop (Optional[bool]): Whether to remove the containers from the state when
they are retrieved.
Returns:
Collection[AttributeContainer]: attribute container objects provided in
the store that correspond to the container type.
"""
with self._state_lock:
container_objects = cast(
List[T], self.store.get(container_class.CONTAINER_TYPE, []))
if pop:
self.store[container_class.CONTAINER_TYPE] = []
return tuple(container_objects)
def DedupeContainers(self, container_class: Type[T]) -> None:
"""Thread safe deduping of containers of the given type.
This requires the container being deduped to override `__eq__()`.
Args:
container_class (type): AttributeContainer class to dedupe.
"""
with self._state_lock:
deduped = []
for c in self.store.get(container_class.CONTAINER_TYPE, []):
if c not in deduped:
deduped.append(c)
self.store[container_class.CONTAINER_TYPE] = deduped
def _SetupModuleThread(self, module_definition: Dict[str, str]) -> None:
"""Calls the module's SetUp() function and sets a threading event for it.
Callback for _InvokeModulesInThreads.
Args:
module_definition (dict[str, str]): recipe module definition.
"""
module_name = module_definition['name']
runtime_name = module_definition.get('runtime_name', module_name)
logger.info('Setting up module: {0:s}'.format(runtime_name))
new_args = utils.ImportArgsFromDict(
module_definition['args'], self.command_line_options, self.config)
module = self._module_pool[runtime_name]
try:
self._RunModuleSetUp(module, **new_args)
except errors.DFTimewolfError:
msg = "A critical error occurred in module {0:s}, aborting execution."
logger.critical(msg.format(module.name))
except Exception as exception: # pylint: disable=broad-except
msg = 'An unknown error occurred in module {0:s}: {1!s}'.format(
module.name, exception)
logger.critical(msg)
# We're catching any exception that is not a DFTimewolfError, so we want
# to generate an error for further reporting.
error = errors.DFTimewolfError(
message=msg, name='dftimewolf', stacktrace=traceback.format_exc(),
critical=True, unexpected=True)
self.AddError(error)
self._threading_event_per_module[runtime_name] = threading.Event()
self.CleanUp()
def _RunModuleSetUp(self,
module: BaseModule,
**new_args: Dict[str, object]) -> None:
"""Runs SetUp of a single module.
Designed to be wrapped by an output handling subclass.
Args:
module: The modulke that will have SetUp called.
new_args: kwargs to pass to SetUp."""
module.SetUp(**new_args)
def _RunModuleProcess(self, module: BaseModule) -> None:
"""Runs Process of a single module.
Designed to be wrapped by an output handling subclass.
Args:
module: The module to run Process() on."""
module.Process()
def _RunModuleProcessThreaded(
self, module: ThreadAwareModule
) -> List[Future]: # type: ignore
"""Runs Process of a single ThreadAwareModule module.
Designed to be wrapped by an output handling subclass.
Args:
module: The module that will have Process(container) called in a threaded
fashion."""
cont_count = len(self.GetContainers(module.GetThreadOnContainerType()))
logger.info(
f'Running {cont_count} threads, max {module.GetThreadPoolSize()} '
f'simultaneous for module {module.name}')
futures = []
with ThreadPoolExecutor(max_workers=module.GetThreadPoolSize()) \
as executor:
pop = not module.KeepThreadedContainersInState()
for c in self.GetContainers(module.GetThreadOnContainerType(), pop):
futures.append(
executor.submit(module.Process, c))
return futures
def _RunModulePreProcess(self, module: ThreadAwareModule) -> None:
"""Runs PreProcess of a single module.
Designed to be wrapped by an output handling subclass.
Args:
module: The module that will have PreProcess() called."""
module.PreProcess()
def _RunModulePostProcess(self, module: ThreadAwareModule) -> None:
"""Runs PostProcess of a single module.
Designed to be wrapped by an output handling subclass.
Args:
module: The module that will have PostProcess() called."""
module.PostProcess()
# pylint: disable=unused-argument
def _HandleFuturesFromThreadedModule(
self,
futures: List[Future], # type: ignore
runtime_name: str) -> None:
"""Handles any futures raised by the async processing of a module.
Args:
futures: A list of futures, returned by RunModuleProcessThreaded().
runtime_name: runtime name of the module."""
for fut in futures:
if fut.exception():
raise fut.exception() # type: ignore
# pylint: disable=unused-argument
def SetupModules(self) -> None:
"""Performs setup tasks for each module in the module pool.
Threads declared modules' SetUp() functions. Takes CLI arguments into
account when replacing recipe parameters for each module.
"""
# Note that vars() copies the values of argparse.Namespace to a dict.
self._InvokeModulesInThreads(self._SetupModuleThread)
def _RunModuleThread(self, module_definition: Dict[str, str]) -> None:
"""Runs the module's Process() function.
Callback for _InvokeModulesInThreads.
Waits for any blockers to have finished before running Process(), then
sets an Event flag declaring the module has completed.
Args:
module_definition (dict): module definition.
"""
module_name = module_definition['name']
runtime_name = module_definition.get('runtime_name', module_name)
for dependency in module_definition['wants']:
self._threading_event_per_module[dependency].wait()
module = self._module_pool[runtime_name]
# Abort processing if a module has had critical failures before.
if self._abort_execution:
logger.critical(
'Aborting execution of {0:s} due to previous errors'.format(
module.name))
self._threading_event_per_module[runtime_name].set()
self.CleanUp()
return
logger.info('Running module: {0:s}'.format(runtime_name))
try:
if isinstance(module, ThreadAwareModule):
self._RunModulePreProcess(module)
futures = self._RunModuleProcessThreaded(module)
self._RunModulePostProcess(module)
self._HandleFuturesFromThreadedModule(futures, runtime_name)
else:
self._RunModuleProcess(module)
except errors.DFTimewolfError:
logger.critical(
"Critical error in module {0:s}, aborting execution".format(
module.name))
except Exception as exception: # pylint: disable=broad-except
msg = 'An unknown error occurred in module {0:s}: {1!s}'.format(
module.name, exception)
logger.critical(msg)
# We're catching any exception that is not a DFTimewolfError, so we want
# to generate an error for further reporting.
error = errors.DFTimewolfError(
message=msg, name='dftimewolf', stacktrace=traceback.format_exc(),
critical=True, unexpected=True)
self.AddError(error)
logger.info('Module {0:s} finished execution'.format(runtime_name))
self._threading_event_per_module[runtime_name].set()
self.CleanUp()
def RunPreflights(self) -> None:
"""Runs preflight modules."""
for preflight_definition in self.recipe.get('preflights', []):
preflight_name = preflight_definition['name']
runtime_name = preflight_definition.get('runtime_name', preflight_name)
args = preflight_definition.get('args', {})
new_args = utils.ImportArgsFromDict(
args, self.command_line_options, self.config)
preflight = self._module_pool[runtime_name]
try:
self._RunModuleSetUp(preflight, **new_args)
self._RunModuleProcess(preflight)
finally:
self.CheckErrors(is_global=True)
def CleanUpPreflights(self) -> None:
"""Executes any cleanup actions defined in preflight modules."""
for preflight_definition in self.recipe.get('preflights', []):
preflight_name = preflight_definition['name']
runtime_name = preflight_definition.get('runtime_name', preflight_name)
preflight = self._module_pool[runtime_name]
try:
preflight.CleanUp()
finally:
self.CheckErrors(is_global=True)
def InstantiateModule(self, module_name: str) -> "BaseModule":
"""Instantiates an arbitrary dfTimewolf module.
Args:
module_name (str): The name of the module to instantiate.
Returns:
BaseModule: An instance of a dftimewolf Module, which is a subclass of
BaseModule.
"""
module_class: Type["BaseModule"]
module_class = modules_manager.ModulesManager.GetModuleByName(module_name)
# pytype: disable=wrong-arg-types
return module_class(self)
# pytype: enable=wrong-arg-types
def RunModules(self) -> None:
"""Performs the actual processing for each module in the module pool."""
self._InvokeModulesInThreads(self._RunModuleThread)
def RegisterStreamingCallback(
self,
target: Callable[["interface.AttributeContainer"], Any],
container_type: Type["interface.AttributeContainer"]) -> None:
"""Registers a callback for a type of container.
The function to be registered should a single parameter of type
interface.AttributeContainer.
Args:
target (function): function to be called.
container_type (type[interface.AttributeContainer]): container type on
which the callback will be called.
"""
if container_type not in self.streaming_callbacks:
self.streaming_callbacks[container_type] = []
self.streaming_callbacks[container_type].append(target)
def StreamContainer(self, container: "interface.AttributeContainer") -> None:
"""Streams a container to the callbacks that are registered to handle it.
Args:
container (interface.AttributeContainer): container instance that will be
streamed to any registered callbacks.
"""
for callback in self.streaming_callbacks.get(type(container), []):
callback(container)
def AddError(self, error: DFTimewolfError) -> None:
"""Adds an error to the state.
Args:
error (errors.DFTimewolfError): The dfTimewolf error to add.
"""
if error.critical:
self._abort_execution = True
self.errors.append(error)
def CleanUp(self) -> None:
"""Cleans up after running a module.
The state's output becomes the input for the next stage. Any errors are
moved to the global_errors attribute so that they can be reported at a
later stage.
"""
# Move any existing errors to global errors
self.global_errors.extend(self.errors)
self.errors = []
def CheckErrors(self, is_global: bool=False) -> None:
"""Checks for errors and exits if any of them are critical.
Args:
is_global (Optional[bool]): True if the global_errors attribute should
be checked. False if the error attribute should be checked.
"""
error_objects = self.global_errors if is_global else self.errors
critical_errors = False
if error_objects:
logger.error('dfTimewolf encountered one or more errors:')
for index, error in enumerate(error_objects):
logger.error('{0:d}: error from {1:s}: {2:s}'.format(
index+1, error.name, error.message))
if error.stacktrace:
for line in error.stacktrace.split('\n'):
logger.error(line)
if error.critical:
critical_errors = True
if any(error.unexpected for error in error_objects):
logger.critical('One or more unexpected errors occurred.')
logger.critical(
'Please consider opening an issue: {0:s}'.format(NEW_ISSUE_URL))
if critical_errors:
raise errors.CriticalError('Critical error found. Aborting.')
def PublishMessage(self,
source: str,
message: str,
is_error: bool = False) -> None:
"""Receives a message for publishing.
The base class does nothing with this (as the method in module also logs the
message). This method exists to be overridden for other UIs.
Args:
source: The source of the message.
message: The message content.
is_error: True if the message is an error message, False otherwise."""
class DFTimewolfStateWithCDM(DFTimewolfState):
"""The main state class, extended to wrap methods with updates to a
CursesDisplayManager object."""
def __init__(self,
config: Type[Config],
cursesdm: cdm.CursesDisplayManager) -> None:
"""Initializes a state."""
super(DFTimewolfStateWithCDM, self).__init__(config)
self.cursesdm = cursesdm
self.stdout_log = False
def LoadRecipe(self,
recipe: Dict[str, Any],
module_locations: Dict[str, str]) -> None:
"""Populates the internal module pool with modules declared in a recipe.
Args:
recipe (dict[str, Any]): recipe declaring modules to load.
Raises:
RecipeParseError: if a module in the recipe has not been declared.
"""
super(DFTimewolfStateWithCDM, self).LoadRecipe(recipe, module_locations)
module_definitions = recipe.get('modules', [])
preflight_definitions = recipe.get('preflights', [])
self.cursesdm.SetRecipe(self.recipe['name'])
for module_definition in preflight_definitions:
self.cursesdm.EnqueuePreflight(module_definition['name'],
module_definition.get('wants', []),
module_definition.get('runtime_name'))
for module_definition in module_definitions:
self.cursesdm.EnqueueModule(module_definition['name'],
module_definition.get('wants', []),
module_definition.get('runtime_name'))
self.cursesdm.Draw()
def _RunModuleSetUp(self,
module: BaseModule,
**new_args: Dict[str, object]) -> None:
"""Runs SetUp of a single module.
Args:
module: The modulke that will have SetUp called.
new_args: kwargs to pass to SetUp."""
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.SETTINGUP)
module.SetUp(**new_args)
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.PENDING)
def _RunModuleProcess(self, module: BaseModule) -> None:
"""Runs Process of a single module.
Args:
module: The module to run Process() on."""
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.PROCESSING)
module.Process()
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.COMPLETED)
def _RunModuleProcessThreaded(
self, module: ThreadAwareModule
) -> List[Future]: # type: ignore
"""Runs Process of a single ThreadAwareModule module.
Args:
module: The module that will have Process(container) called in a threaded
fashion."""
cont_count = len(self.GetContainers(module.GetThreadOnContainerType()))
logger.info(
f'Running {cont_count} threads, max {module.GetThreadPoolSize()} '
f'simultaneous for module {module.name}')
self.cursesdm.SetThreadedModuleContainerCount(module.name, cont_count)
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.PROCESSING)
futures = []
with ThreadPoolExecutor(max_workers=module.GetThreadPoolSize()) \
as executor:
pop = not module.KeepThreadedContainersInState()
for c in self.GetContainers(module.GetThreadOnContainerType(), pop):
futures.append(
executor.submit(
self._WrapThreads, module.Process, c, module.name))
return futures
def _RunModulePreProcess(self, module: ThreadAwareModule) -> None:
"""Runs PreProcess of a single module.
Args:
module: The module that will have PreProcess() called."""
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.PREPROCESSING)
module.PreProcess()
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.PENDING)
def _RunModulePostProcess(self, module: ThreadAwareModule) -> None:
"""Runs PostProcess of a single module.
Args:
module: The module that will have PostProcess() called."""
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.POSTPROCESSING)
module.PostProcess()
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.COMPLETED)
def _HandleFuturesFromThreadedModule(
self,
futures: List[Future], # type: ignore
runtime_name: str) -> None:
"""Handles any futures raised by the async processing of a module.
Args:
futures: A list of futures, returned by RunModuleProcessThreaded().
runtime_name: runtime name of the module."""
for fut in futures:
if fut.exception():
self.cursesdm.SetError(runtime_name, str(fut.exception()))
raise fut.exception() # type: ignore
def _WrapThreads(self,
process: Callable[[AttributeContainer], None],
container: AttributeContainer,
module_name: str) -> None:
"""Wraps a ThreadPoolExecutor call to module.process with the
CursesDisplayManager status update methods.
Args:
process: A callable method: Process, belonging to a ThreadAwareModule.
container: The Container being processed by the thread.
module_name: The runtime name of the module."""
thread_id = threading.current_thread().getName()
self.cursesdm.UpdateModuleThreadState(
module_name, cdm.Status.RUNNING, thread_id, str(container))
process(container)
self.cursesdm.UpdateModuleThreadState(
module_name, cdm.Status.COMPLETED, thread_id, str(container))
def AddError(self, error: DFTimewolfError) -> None:
"""Adds an error to the state.
Args:
error (errors.DFTimewolfError): The dfTimewolf error to add.
"""
super(DFTimewolfStateWithCDM, self).AddError(error)
name = error.name if error.name else 'no_module_name'
self.cursesdm.SetError(name, error.message)
def PublishMessage(self,
source: str,
message: str,
is_error: bool = False) -> None:
"""Receives a message for publishing to the list of messages.
Args:
source: The source of the message.
message: The message content.
is_error: True if the message is an error message, False otherwise."""
self.cursesdm.EnqueueMessage(source, message, is_error)
| 36.640665 | 144 | 0.682965 | 27,376 | 0.955432 | 0 | 0 | 505 | 0.017625 | 0 | 0 | 12,843 | 0.448225 |
a98618135a8eb68ea555b4e82e1d790635fb2594 | 1,374 | py | Python | DBManager.py | d0d0d0/Kerberos | 38bf0b8388bc4f3571e790d5bc626d050df5d4dc | [
"MIT"
]
| null | null | null | DBManager.py | d0d0d0/Kerberos | 38bf0b8388bc4f3571e790d5bc626d050df5d4dc | [
"MIT"
]
| null | null | null | DBManager.py | d0d0d0/Kerberos | 38bf0b8388bc4f3571e790d5bc626d050df5d4dc | [
"MIT"
]
| null | null | null | ### Implements database management for Authentication Server and TGS ###
from Query import *
from sqlite3 import *
from config import *
class DBManager(object):
def __init__(self, dbname):
try:
self.conn = connect(dbname)
self.cursor = self.conn.cursor()
except Exception as e:
print str(e)
def createTable(self, ttype):
try:
if ttype in TYPE_SERVICE:
self.cursor.execute(CREATE_SERVICE_TABLE)
elif ttype in TYPE_USER:
self.cursor.execute(CREATE_USER_TABLE)
elif ttype in TYPE_AUTH:
self.cursor.execute(CREATE_AUTH_TABLE)
elif ttype in TYPE_TGS:
self.cursor.execute(CREATE_TGS_TABLE)
else:
print "Invalid table type."
self.conn.commit()
except Exception as e:
print str(e)
def insert(self, ttype, val):
try:
if ttype in TYPE_SERVICE:
self.cursor.execute(INSERT_SERVICE, val)
elif ttype in TYPE_USER:
self.cursor.execute(INSERT_USER, val)
elif ttype in TYPE_AUTH:
self.cursor.execute(INSERT_AUTH, val)
elif ttype in TYPE_TGS:
self.cursor.execute(INSERT_TGS, val)
else:
print "Invalid table type."
self.conn.commit()
except Exception as e:
print str(e)
def isAuthExist(self, val):
try:
self.cursor.execute(IS_AUTH, val)
data = self.cursor.fetchone()
if data == None:
return False
return True
except Exception as e:
print str(e)
| 21.809524 | 72 | 0.697234 | 1,230 | 0.895197 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.082969 |
a987d4f7ac2585765bc67edb9138327e5465eec0 | 451 | py | Python | people/views.py | kackey0-1/drf-sample | 914907320bc317240b4d7c07968b6d4ea80b4511 | [
"MIT"
]
| null | null | null | people/views.py | kackey0-1/drf-sample | 914907320bc317240b4d7c07968b6d4ea80b4511 | [
"MIT"
]
| 6 | 2021-03-30T12:05:07.000Z | 2021-04-05T14:21:46.000Z | people/views.py | kackey0-1/drf-sample | 914907320bc317240b4d7c07968b6d4ea80b4511 | [
"MIT"
]
| null | null | null | from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from .models import Person
from .serializers import PersonSerializer
@api_view(['GET'])
def list_people(request):
people = Person.objects.all()
serializer = PersonSerializer(people, many=True)
content = {
"people": serializer.data,
}
return Response(data=content, status=status.HTTP_200_OK)
| 22.55 | 60 | 0.75388 | 0 | 0 | 0 | 0 | 249 | 0.552106 | 0 | 0 | 13 | 0.028825 |
a98828e92b274eb6eae13e6556ae7fff3be2a963 | 8,867 | py | Python | simple_soccer/two_dimension.py | RyoheiGoto/reinforcement_learning | ff2ddded7fd24c831a5103818b8a747a66a75f0c | [
"MIT"
]
| 2 | 2015-11-18T17:47:19.000Z | 2016-03-20T08:22:42.000Z | simple_soccer/two_dimension.py | RyoheiGoto/reinforcement_learning | ff2ddded7fd24c831a5103818b8a747a66a75f0c | [
"MIT"
]
| 1 | 2015-11-19T18:15:13.000Z | 2016-02-09T16:48:23.000Z | simple_soccer/two_dimension.py | RyoheiGoto/ReinforcementLearning | ff2ddded7fd24c831a5103818b8a747a66a75f0c | [
"MIT"
]
| null | null | null | import numpy as np
import matplotlib.pyplot as plt
field_width = 396 #cm
field_hight = 180 #cm
goal_length = 180 #cm
threshold = 36
field_width_threshold_num = field_width / threshold + 1
field_width_threshold = [Y * threshold - field_width / 2.0 for Y in xrange(field_width_threshold_num)]
field_hight_threshold_num = field_hight / threshold + 1
field_hight_threshold = [X * threshold for X in xrange(field_hight_threshold_num)]
ball_velo_x_threshold = [X * 100.0 for X in [-1.0, -0.8, -0.6, -0.4, -0.2, 0.0]]
ball_velo_x_threshold_num = len(ball_velo_x_threshold) + 1
ball_velo_y_threshold = [Y * 50.0 for Y in [-1.0, -0.8, -0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0]]
ball_velo_y_threshold_num = len(ball_velo_y_threshold) + 1
tau = 0.2 #sec
fall_time = 10
robot_states = 3
#epsilon = 0.1
epsilon = 0.00001
alpha = 0.5
gamma = 0.5
STAND, LEFT, RIGHT, BALL = range(4)
COMPLETED = "COMPLETED"
FAILED = "FAILED"
ACTIVE = "ACTIVE"
class Soccer(object):
def __init__(self, max_episode=10000, plot=False):
np.random.seed()
self.Q = np.zeros([robot_states, field_hight_threshold_num, field_width_threshold_num, ball_velo_x_threshold_num, ball_velo_y_threshold_num])
self.robot_state = None
self.fall_count = None
self.ball_states = None
self.result = None
self.clear = None
self.process(max_episode, plot)
def status_init(self):
ball_x = np.random.randint(80, 180)
ball_y = np.random.randint(-150, 150)
ball_dx = -np.random.random() * 100
ball_dy = np.random.choice([-50, 50]) * np.random.random()
self.ball_states = (ball_x, ball_y, ball_dx, ball_dy)
self.robot_state = STAND
def threslold(self, states):
x, y, dx, dy = states
for field_x, num in zip(field_hight_threshold, xrange(field_hight_threshold_num)):
if x < field_x:
threshold_x = num
break
else:
threshold_x = field_hight_threshold_num - 1
for field_y, num in zip(field_width_threshold, xrange(field_width_threshold_num)):
if y < field_y:
threshold_y = num
break
else:
threshold_y = field_width_threshold_num - 1
for ball_dx, num in zip(ball_velo_x_threshold, xrange(ball_velo_x_threshold_num)):
if dx < ball_dx:
threshold_dx = num
break
else:
threshold_dx = ball_velo_x_threshold_num - 1
for ball_dy, num in zip(ball_velo_y_threshold, xrange(ball_velo_y_threshold_num)):
if dy < ball_dy:
threshold_dy = num
break
else:
threshold_dy = ball_velo_y_threshold_num - 1
return threshold_x, threshold_y, threshold_dx, threshold_dy
def update_status(self, ball_states):
ball_x, ball_y, ball_dx, ball_dy = ball_states
ball_x += ball_dx * tau
ball_y += ball_dy * tau
self.ball_states = [ball_x, ball_y, ball_dx, ball_dy]
def decide_action(self, ball_states, robot_state):
if robot_state == (LEFT or RIGHT):
self.fall_count -= 1
else:
policy = self.e_greedy(ball_states)
prob = 0.0
for action, policy in zip(xrange(robot_states), policy):
prob += policy
if np.random.random() < prob:
self.robot_state = action
if action == (LEFT or RIGHT):
self.fall_count = fall_time
break
else:
self.robot_state = STAND
def e_greedy(self, ball_states):
policy = []
x, y, dx, dy = self.threslold(ball_states)
q = [self.Q[action, x, y, dx, dy] for action in xrange(robot_states)]
for action in xrange(len(q)):
if action == q.index(max(q)):
policy.append(1.0 - epsilon + epsilon / robot_states)
else:
policy.append(epsilon / robot_states)
if sum(policy) != 1.0 or not sum(q):
return map(lambda n: 1.0 / robot_states, policy)
else:
return policy
def get_reward(self, ball_states, robot_state):
x, y, dx, dy = self.threslold(ball_states)
reward = 0.0
result = ACTIVE
if robot_state == STAND:
if x == 1 and y == 6:
reward = 5.0
result = COMPLETED
else:
reward = 1.0
elif robot_state == LEFT:
if x == 1 and y in (4, 5):
reward = 5.0
result = COMPLETED
elif not self.fall_count > 0:
reward = -10.0
result = FAILED
else:
reward = -5.0
elif robot_state == RIGHT:
if x == 1 and y in (7, 8):
reward = 5.0
result = COMPLETED
elif not self.fall_count > 0:
reward = -10.0
result = FAILED
else:
reward = -5.0
if x == 0 and (3 < y < 9):
reward = -10.0
result = FAILED
elif x in (0, 6) and (y < 4 or y > 8):
if robot_state == STAND:
reward = 5.0
else:
reward = -5.0
result = COMPLETED
return reward, result
def q_learning(self, ball_states, new_ball_states, new_robot_state, reward):
x, y, dx, dy = self.threslold(new_ball_states)
new = max([self.Q[action, x, y, dx, dy] for action in xrange(robot_states)])
x, y, dx, dy = self.threslold(ball_states)
old = self.Q[new_robot_state, x, y, dx, dy]
self.Q[new_robot_state, x, y, dx, dy] += alpha * (reward + gamma * new - old)
def step(self):
ball_states = self.ball_states
robot_state = self.robot_state
self.decide_action(ball_states, robot_state)
self.update_status(ball_states)
new_ball_states = self.ball_states
new_robot_states = self.robot_state
reward, result = self.get_reward(new_ball_states, new_robot_states)
self.q_learning(ball_states, new_ball_states, new_robot_states, reward)
if result == ACTIVE:
return True
else:
self.result = result
return False
def process(self, max_episode, plot):
clear = 0.0
self.clear = 0.0
for episode in np.arange(1, max_episode):
self.status_init()
log = []
for step in np.arange(1, 1000000):
ball_x, ball_y, ball_dx, ball_dy = self.ball_states
log.append([step * tau, ball_x, ball_y, ball_dx, ball_dy, self.robot_state])
if not self.step():
ball_x, ball_y, ball_dx, ball_dy = self.ball_states
log.append([step * tau, ball_x, ball_y, ball_dx, ball_dy, self.robot_state])
if self.result == COMPLETED:
clear += 1.0
self.show_result(episode, self.result)
#if plot and episode > max_episode * 0.9:
if plot and episode > 10000:
self.plotgame(log, self.result)
break
print "-" * 30
print "episode:\t%d\nclear:\t\t%d(%.3lf%%)" % (max_episode, clear, (clear / max_episode) * 100)
def show_result(self, episode, result):
if result == COMPLETED:
self.clear += 1
if episode % 1000 == 0:
print "-" * 30
print "episode:\t%d ~ %d" % ((episode - 999), episode)
print "clear:\t\t%d(%.3lf%%)" % (self.clear, (self.clear / 1000 * 100))
self.clear = 0.0
"""
global epsilon
epsilon -= 0.01
if epsilon < 0.002:
epsilon = 0.002
print "epsilon:\t%lf" % epsilon
"""
def plotgame(self, episode, result):
field = np.zeros([field_hight_threshold_num + 1, field_width_threshold_num])
for step in episode:
time, ball_x, ball_y, ball_dx, ball_dy, robot_state = step
x, y, dx, dy = self.threslold([ball_x, ball_y, ball_dx, ball_dy])
field[x, y] = BALL
field[0, 3] = field[0, 9] = -15
if robot_state == STAND:
field[1, 6] = -20
elif robot_state == LEFT:
field[1, 4] = field[1, 5] = -5
elif robot_state == RIGHT:
field[1, 7] = field[1, 8] = -10
plt.imshow(field, interpolation='none', cmap="BuGn")
plt.title(result)
plt.show()
if __name__ == '__main__':
#Soccer(max_episode=100, plot=True)
Soccer(max_episode=10000000, plot=False)
| 33.587121 | 149 | 0.551483 | 7,811 | 0.880907 | 0 | 0 | 0 | 0 | 0 | 0 | 419 | 0.047254 |
a98a17680f92454408a66d8e581e032e851f1d31 | 1,089 | py | Python | tests/test_molecular_signatures_db.py | krassowski/gsea-api | deb562ea55871b799eb501a798dd49a881ff9523 | [
"MIT"
]
| 8 | 2020-03-06T02:03:40.000Z | 2022-01-22T15:57:17.000Z | tests/test_molecular_signatures_db.py | krassowski/gsea-api | deb562ea55871b799eb501a798dd49a881ff9523 | [
"MIT"
]
| 3 | 2020-03-06T01:48:53.000Z | 2021-10-06T04:15:55.000Z | tests/test_molecular_signatures_db.py | krassowski/gsea-api | deb562ea55871b799eb501a798dd49a881ff9523 | [
"MIT"
]
| 2 | 2019-12-01T18:41:07.000Z | 2020-07-15T14:52:17.000Z | from pytest import raises
from gsea_api.molecular_signatures_db import MolecularSignaturesDatabase
def test_load():
msigdb_7_1 = MolecularSignaturesDatabase('tests/test_msigdb', version=7.1)
assert msigdb_7_1.version == '7.1'
assert msigdb_7_1.gene_sets == [
{
'name': 'c2.cp.reactome',
'id_type': 'symbols'
}
]
reactome_7_1 = msigdb_7_1.load('c2.cp.reactome', 'symbols')
assert 'REACTOME_NERVOUS_SYSTEM_DEVELOPMENT' in reactome_7_1.gene_sets_by_name
assert 'REACTOME_SERINE_BIOSYNTHESIS' not in reactome_7_1.gene_sets_by_name
msigdb_7_0 = MolecularSignaturesDatabase('tests/test_msigdb', version=7.0)
reactome_7_0 = msigdb_7_0.load('c2.cp.reactome', 'symbols')
assert 'REACTOME_NERVOUS_SYSTEM_DEVELOPMENT' not in reactome_7_0.gene_sets_by_name
assert 'REACTOME_SERINE_BIOSYNTHESIS' in reactome_7_0.gene_sets_by_name
def test_fail_no_dir():
with raises(ValueError, match='Could not find MSigDB: wrong_dir_name does not exist'):
MolecularSignaturesDatabase('wrong_dir_name', version=7.1)
| 38.892857 | 90 | 0.747475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.309458 |
a98a271a4efe485ccb8f3daffb76dc91992cf6a3 | 11,387 | py | Python | froide_govplan/admin.py | okfde/froide-govplan | 1ae085c39c25af7c7a74d90ce39580119942a328 | [
"MIT"
]
| 2 | 2022-03-13T14:49:46.000Z | 2022-03-14T18:39:04.000Z | froide_govplan/admin.py | okfde/froide-govplan | 1ae085c39c25af7c7a74d90ce39580119942a328 | [
"MIT"
]
| 3 | 2022-03-18T11:52:46.000Z | 2022-03-18T14:13:43.000Z | froide_govplan/admin.py | okfde/froide-govplan | 1ae085c39c25af7c7a74d90ce39580119942a328 | [
"MIT"
]
| 1 | 2022-03-18T09:36:20.000Z | 2022-03-18T09:36:20.000Z | from django.contrib import admin, auth
from django.contrib.auth.models import Group
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import path, reverse, reverse_lazy
from django.utils.translation import gettext_lazy as _
from adminsortable2.admin import SortableAdminMixin
from froide.api import api_router
from froide.follow.admin import FollowerAdmin
from froide.helper.admin_utils import make_choose_object_action, make_emptyfilter
from froide.helper.widgets import TagAutocompleteWidget
from froide.organization.models import Organization
from .api_views import GovernmentPlanViewSet
from .auth import get_allowed_plans, has_limited_access
from .forms import (
GovernmentPlanForm,
GovernmentPlanUpdateAcceptProposalForm,
GovernmentPlanUpdateForm,
)
from .models import (
Government,
GovernmentPlan,
GovernmentPlanFollower,
GovernmentPlanSection,
GovernmentPlanUpdate,
)
User = auth.get_user_model()
api_router.register(r"governmentplan", GovernmentPlanViewSet, basename="governmentplan")
class GovPlanAdminSite(admin.AdminSite):
site_header = "Regierungsvorhaben"
site_url = "/koalitionstracker/"
class GovernmentPlanAdminForm(GovernmentPlanForm):
class Meta:
model = GovernmentPlan
fields = "__all__"
widgets = {
"categories": TagAutocompleteWidget(
autocomplete_url=reverse_lazy("api:category-autocomplete")
),
}
class GovernmentAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = ("name", "public", "start_date", "end_date")
list_filter = ("public",)
def execute_assign_organization(admin, request, queryset, action_obj):
queryset.update(organization=action_obj)
def execute_assign_group(admin, request, queryset, action_obj):
queryset.update(group=action_obj)
PLAN_ACTIONS = {
"assign_organization": make_choose_object_action(
Organization, execute_assign_organization, _("Assign organization...")
),
"assign_group": make_choose_object_action(
Group, execute_assign_group, _("Assign permission group...")
),
}
class GovernmentPlanAdmin(admin.ModelAdmin):
form = GovernmentPlanForm
save_on_top = True
prepopulated_fields = {"slug": ("title",)}
search_fields = ("title",)
raw_id_fields = ("responsible_publicbody",)
actions = ["make_public"]
def get_queryset(self, request):
qs = get_allowed_plans(request)
qs = qs.prefetch_related(
"categories",
"organization",
"group",
)
return qs
def view_on_site(self, obj):
# Avoid Django's redirect through normal admin
# TODO: remove on https://github.com/django/django/pull/15526
return obj.get_absolute_url()
def get_actions(self, request):
actions = super().get_actions(request)
if not has_limited_access(request.user):
admin_actions = {
action: (
func,
action,
func.short_description,
)
for action, func in PLAN_ACTIONS.items()
}
actions.update(admin_actions)
return actions
def get_urls(self):
urls = super().get_urls()
my_urls = [
path(
"<int:pk>/accept-proposal/",
self.admin_site.admin_view(self.accept_proposal),
name="froide_govplan-plan_accept_proposal",
),
]
return my_urls + urls
def get_list_display(self, request):
list_display = [
"title",
"public",
"status",
"rating",
"organization",
"get_categories",
]
if not has_limited_access(request.user):
list_display.append("group")
return list_display
def get_list_filter(self, request):
list_filter = [
"status",
"rating",
"public",
]
if not has_limited_access(request.user):
list_filter.extend(
[
make_emptyfilter(
"proposals", _("Has change proposals"), empty_value=None
),
"organization",
"group",
"government",
"categories",
]
)
return list_filter
def get_fields(self, request, obj=None):
if has_limited_access(request.user):
return (
"title",
"slug",
"description",
"quote",
"public",
"due_date",
"measure",
"status",
"rating",
"reference",
)
return super().get_fields(request, obj=obj)
def get_categories(self, obj):
"""
Return the categories linked in HTML.
"""
categories = [category.name for category in obj.categories.all()]
return ", ".join(categories)
get_categories.short_description = _("category(s)")
def make_public(self, request, queryset):
queryset.update(public=True)
make_public.short_description = _("Make public")
def accept_proposal(self, request, pk):
obj = get_object_or_404(self.get_queryset(request), pk=pk)
plan_url = reverse(
"admin:froide_govplan_governmentplan_change",
args=(obj.pk,),
current_app=self.admin_site.name,
)
if not obj.proposals:
return redirect(plan_url)
if request.method == "POST":
proposals = obj.proposals or {}
proposal_id = request.POST.get("proposal_id")
delete_proposals = request.POST.getlist("proposal_delete")
update = None
if proposal_id:
data = proposals[proposal_id]["data"]
form = GovernmentPlanUpdateAcceptProposalForm(data=data, plan=obj)
if form.is_valid():
update = form.save(
proposal_id=proposal_id,
delete_proposals=delete_proposals,
)
else:
form = GovernmentPlanUpdateAcceptProposalForm(data={}, plan=obj)
form.delete_proposals(delete_proposals)
if update is None:
self.message_user(request, _("The proposal has been deleted."))
return redirect(plan_url)
self.message_user(
request,
_("An unpublished update has been created."),
)
update_url = reverse(
"admin:froide_govplan_governmentplanupdate_change",
args=(update.pk,),
current_app=self.admin_site.name,
)
return redirect(update_url)
else:
form = GovernmentPlanUpdateAcceptProposalForm(plan=obj)
opts = self.model._meta
context = {
"form": form,
"proposals": form.get_proposals(),
"object": obj,
"app_label": opts.app_label,
"opts": opts,
}
return render(
request,
"froide_govplan/admin/accept_proposal.html",
context,
)
class GovernmentPlanUpdateAdmin(admin.ModelAdmin):
form = GovernmentPlanUpdateForm
save_on_top = True
raw_id_fields = ("user", "foirequest")
date_hierarchy = "timestamp"
search_fields = ("title", "content")
list_display = (
"title",
"timestamp",
"plan",
"user",
"status",
"rating",
"public",
)
list_filter = (
"status",
"public",
"organization",
)
search_fields = (
"title",
"plan__title",
)
date_hierarchy = "timestamp"
def get_queryset(self, request):
qs = super().get_queryset(request)
qs = qs.prefetch_related(
"plan",
"user",
)
if has_limited_access(request.user):
qs = qs.filter(plan__in=get_allowed_plans(request))
return qs
def view_on_site(self, obj):
# Avoid Django's redirect through normal admin
# TODO: remove on https://github.com/django/django/pull/15526
return obj.get_absolute_url()
def save_model(self, request, obj, form, change):
limited = has_limited_access(request.user)
if not change and limited:
# When added by a limited user,
# autofill user and organization
obj.user = request.user
if obj.plan.organization:
user_has_org = request.user.organization_set.all().filter(pk=1).exists()
if user_has_org:
obj.organization = obj.plan.organization
res = super().save_model(request, obj, form, change)
obj.plan.update_from_updates()
return res
def get_fields(self, request, obj=None):
if has_limited_access(request.user):
return (
"plan",
"title",
"timestamp",
"content",
"url",
"status",
"rating",
"public",
)
return super().get_fields(request, obj=obj)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "plan":
if has_limited_access(request.user):
kwargs["queryset"] = get_allowed_plans(request)
return super().formfield_for_foreignkey(db_field, request, **kwargs)
def user_in_obj_group(self, request, obj):
if not obj.plan.group_id:
return False
user = request.user
return User.objects.filter(pk=user.pk, groups=obj.plan.group_id).exists()
def has_view_permission(self, request, obj=None):
if obj and self.user_in_obj_group(request, obj):
return True
return super().has_view_permission(request, obj=obj)
def has_add_permission(self, request):
return super().has_add_permission(request)
def has_change_permission(self, request, obj=None):
if obj and self.user_in_obj_group(request, obj):
return True
return super().has_change_permission(request, obj=obj)
class GovernmentPlanSectionAdmin(SortableAdminMixin, admin.ModelAdmin):
save_on_top = True
prepopulated_fields = {"slug": ("title",)}
search_fields = ("title",)
raw_id_fields = ("categories",)
list_display = (
"title",
"featured",
)
list_filter = (
"featured",
"categories",
"government",
)
admin.site.register(Government, GovernmentAdmin)
admin.site.register(GovernmentPlan, GovernmentPlanAdmin)
admin.site.register(GovernmentPlanUpdate, GovernmentPlanUpdateAdmin)
admin.site.register(GovernmentPlanSection, GovernmentPlanSectionAdmin)
admin.site.register(GovernmentPlanFollower, FollowerAdmin)
govplan_admin_site = GovPlanAdminSite(name="govplanadmin")
govplan_admin_site.register(GovernmentPlan, GovernmentPlanAdmin)
govplan_admin_site.register(GovernmentPlanUpdate, GovernmentPlanUpdateAdmin)
| 30.859079 | 88 | 0.596557 | 9,290 | 0.815843 | 0 | 0 | 0 | 0 | 0 | 0 | 1,685 | 0.147976 |
a98a8630e0f08cab9b6667bd3db9422e0508306a | 2,995 | py | Python | tests/test_xmltompd.py | thiblahute/python-mpegdash | e7702dec59fe61668888ba5c9e1cb2f495b72c17 | [
"MIT"
]
| 1 | 2021-06-08T04:25:04.000Z | 2021-06-08T04:25:04.000Z | tests/test_xmltompd.py | thiblahute/python-mpegdash | e7702dec59fe61668888ba5c9e1cb2f495b72c17 | [
"MIT"
]
| null | null | null | tests/test_xmltompd.py | thiblahute/python-mpegdash | e7702dec59fe61668888ba5c9e1cb2f495b72c17 | [
"MIT"
]
| 1 | 2021-09-27T12:57:51.000Z | 2021-09-27T12:57:51.000Z | try:
import unittest2 as unittest
except:
import unittest
from mpegdash.parser import MPEGDASHParser
class XML2MPDTestCase(unittest.TestCase):
def test_xml2mpd_from_string(self):
mpd_string = '''
<MPD xmlns="urn:mpeg:DASH:schema:MPD:2011" mediaPresentationDuration="PT0H1M52.43S" minBufferTime="PT1.5S"
profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" type="static">
<Period duration="PT0H1M52.43S" start="PT0S">
<AdaptationSet>
<ContentComponent contentType="video" id="1" />
<Representation bandwidth="4190760" codecs="avc1.640028" height="1080" id="1" mimeType="video/mp4" width="1920">
<BaseURL>motion-20120802-89.mp4</BaseURL>
<SegmentBase indexRange="674-981">
<Initialization range="0-673" />
</SegmentBase>
</Representation>
</AdaptationSet>
</Period>
</MPD>
'''
self.assert_mpd(MPEGDASHParser.parse(mpd_string))
def test_xml2mpd_from_file(self):
self.assert_mpd(MPEGDASHParser.parse('./tests/mpd-samples/sample-001.mpd'))
self.assert_mpd(MPEGDASHParser.parse('./tests/mpd-samples/motion-20120802-manifest.mpd'))
self.assert_mpd(MPEGDASHParser.parse('./tests/mpd-samples/oops-20120802-manifest.mpd'))
self.assert_mpd(MPEGDASHParser.parse('./tests/mpd-samples/360p_speciment_dash.mpd'))
def test_xml2mpd_from_url(self):
mpd_url = 'http://yt-dash-mse-test.commondatastorage.googleapis.com/media/motion-20120802-manifest.mpd'
self.assert_mpd(MPEGDASHParser.parse(mpd_url))
def test_xml2mpd_from_file_with_utc_timing(self):
mpd = MPEGDASHParser.parse('./tests/mpd-samples/utc_timing.mpd')
self.assertEqual(mpd.utc_timings[0].scheme_id_uri, 'urn:mpeg:dash:utc:http-iso:2014')
self.assertEqual(mpd.utc_timings[0].value, 'https://time.akamai.com/?iso')
def test_xml2mpd_from_file_with_event_messagedata(self):
mpd = MPEGDASHParser.parse('./tests/mpd-samples/with_event_message_data.mpd')
self.assertTrue(mpd.periods[0].event_streams[0].events[0].message_data is not None)
self.assertTrue(mpd.periods[0].event_streams[0].events[0].event_value is None)
self.assertTrue(mpd.periods[0].event_streams[0].events[1].message_data is None)
self.assertEqual(mpd.periods[0].event_streams[0].events[1].event_value, "Some Random Event Text")
def assert_mpd(self, mpd):
self.assertTrue(mpd is not None)
self.assertTrue(len(mpd.periods) > 0)
self.assertTrue(mpd.periods[0].adaptation_sets is not None)
self.assertTrue(len(mpd.periods[0].adaptation_sets) > 0)
self.assertTrue(mpd.periods[0].adaptation_sets[0].representations is not None)
self.assertTrue(len(mpd.periods[0].adaptation_sets[0].representations) > 0)
self.assertTrue(len(mpd.periods[0].adaptation_sets[0].representations[0].id) > 0)
| 50.762712 | 126 | 0.686477 | 2,882 | 0.96227 | 0 | 0 | 0 | 0 | 0 | 0 | 1,211 | 0.404341 |
a98cc0ed5054e6dba3e35b5238cafe5ac890c96b | 513 | py | Python | algorithm_toolbox/week_4/03_divide_and_conquer_1_search_array/iterativeBinSearch.py | dibyanshushekhardey/data_struct_and_algo_coursera | ce579ba0be19d0415dc5a9526fd04bcdb803dbc0 | [
"MIT"
]
| null | null | null | algorithm_toolbox/week_4/03_divide_and_conquer_1_search_array/iterativeBinSearch.py | dibyanshushekhardey/data_struct_and_algo_coursera | ce579ba0be19d0415dc5a9526fd04bcdb803dbc0 | [
"MIT"
]
| null | null | null | algorithm_toolbox/week_4/03_divide_and_conquer_1_search_array/iterativeBinSearch.py | dibyanshushekhardey/data_struct_and_algo_coursera | ce579ba0be19d0415dc5a9526fd04bcdb803dbc0 | [
"MIT"
]
| null | null | null | def BinarySearchIt(A, low, high, key):
while low <= high:
mid = low + ((high - low)//2)
if key == A[mid]:
return mid
elif key < A[mid]:
high = mid - 1
else:
low = mid + 1
return low - 1
arr = [3, 5, 8, 10, 12, 15, 18, 20, 20, 50, 60]
low = 1
high = 11
key = 50
index = BinarySearchIt(arr, low, high, key)
if index != -1:
print ("Element", key,"is present at index %d" %(index))
else:
print ("Element %d is not present" %(key)) | 23.318182 | 60 | 0.502924 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.116959 |
a98fe624f9604a44b5865d4659413307a64a58db | 2,133 | py | Python | 2016/day-02.py | mharty3/advent_of_code | f86e67eb772f4c328e30744610606fc154930aef | [
"MIT"
]
| null | null | null | 2016/day-02.py | mharty3/advent_of_code | f86e67eb772f4c328e30744610606fc154930aef | [
"MIT"
]
| null | null | null | 2016/day-02.py | mharty3/advent_of_code | f86e67eb772f4c328e30744610606fc154930aef | [
"MIT"
]
| null | null | null | #--- Day 2: Bathroom Security ---
from typing import List
def parse(input_data: str) -> List[List[str]]:
lines = input_data.strip().split()
directions = [list(line) for line in lines]
return directions
def move1(x, y, direction):
if direction == 'U':
y -= 1
elif direction == 'D':
y += 1
elif direction == 'L':
x -= 1
elif direction == 'R':
x += 1
if y < 0: y = 0
if y > 2: y = 2
if x < 0: x = 0
if x > 2: x = 2
return x, y
def move2(x, y, direction, keypad):
last_x = x
last_y = y
if direction == 'U':
y -= 1
elif direction == 'D':
y += 1
elif direction == 'L':
x -= 1
elif direction == 'R':
x += 1
if keypad[x][y] == '-':
return last_x, last_y
else:
return x, y
def solve1(input_data: str) -> str:
keypad = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
x = 1
y = 1
keycode = []
for line in parse(input_data):
for direction in line:
x, y = move1(x, y, direction)
keycode.append(str(keypad[y][x]))
return ''.join(keycode)
def solve2(input_data):
keypad = [['-', '-', '-', '-', '-', '-', '-'],
['-', '-', '-', '1', '-', '-', '-'],
['-', '-', '2', '3', '4', '-', '-'],
['-', '5', '6', '7', '8', '9', '-'],
['-', '-', 'A', 'B', 'C', '-', '-'],
['-', '-', '-', 'D', '-', '-', '-'],
['-', '-', '-', '-', '-', '-', '-']]
x = 1
y = 3
keycode = []
for line in parse(input_data):
for direction in line:
x, y = move2(x, y, direction, keypad)
keycode.append(keypad[y][x])
return ''.join(keycode)
test_data = """ULL
RRDDD
LURDL
UUUUD"""
assert solve1(test_data) == '1985'
assert solve2(test_data) == '5DB3'
if __name__ == '__main__':
from aocd.models import Puzzle
puzzle = Puzzle(2016, 2)
answer_1 = solve1(puzzle.input_data)
print(answer_1)
puzzle.answer_a = answer_1
answer_2 = solve2(puzzle.input_data)
puzzle.answer_b = answer_2
| 21.118812 | 50 | 0.449602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 260 | 0.121894 |
a99348b5bc6c6ccf0bf508d81eb41b18d8e6cf18 | 2,875 | py | Python | compose.py | gicmo/koji-osbuild | d8107f23478ca12cd376098a79c7465cc5dd12d1 | [
"Apache-2.0"
]
| null | null | null | compose.py | gicmo/koji-osbuild | d8107f23478ca12cd376098a79c7465cc5dd12d1 | [
"Apache-2.0"
]
| null | null | null | compose.py | gicmo/koji-osbuild | d8107f23478ca12cd376098a79c7465cc5dd12d1 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python3
import argparse
import koji
import os
from pprint import pprint
def main():
parser = argparse.ArgumentParser(description="osbuild koji client")
parser.add_argument("--url", metavar="URL", type=str,
default="https://localhost/kojihub",
help="The URL koji hub API endpoint")
parser.add_argument("--repo", metavar="REPO", help='The repository to use',
type=str, action="append", default=[])
parser.add_argument("--release", metavar="RELEASE", help='The distribution release')
parser.add_argument("--user", metavar="USER", default="kojiadmin")
parser.add_argument("--password", metavar="PASSWORD", default="kojipass")
parser.add_argument("--principal", metavar="USER", default="osbuild-krb@LOCAL")
parser.add_argument("--keytab", metavar="FILE", help="kerberos keytab",
default="/tmp/osbuild-composer-koji-test/client.keytab")
parser.add_argument("--serverca", metavar="FILE", help="Server CA",
default="/tmp/osbuild-composer-koji-test/ca-crt.pem")
parser.add_argument("--plain", help="use plain text login",
default=False, action="store_true")
parser.add_argument("name", metavar="NAME", help='The distribution name')
parser.add_argument("version", metavar="VERSION", help='The distribution version')
parser.add_argument("distro", metavar="DISTRO", help='The distribution')
parser.add_argument("target", metavar="TARGET", help='The build target')
parser.add_argument("arch", metavar="ARCHITECTURE", help='Request the architecture',
type=str, nargs="+")
parser.add_argument("--image-type", metavar="TYPE",
help='Request an image-type [default: qcow2]',
type=str, action="append", default=[])
args = parser.parse_args()
opts = {"user": args.user, "password": args.password, "serverca": args.serverca}
session = koji.ClientSession(args.url, opts)
if args.plain:
session.login()
else:
session.gssapi_login(principal=args.principal, keytab=args.keytab)
name, version, arch, target = args.name, args.version, args.arch, args.target
distro, image_types = args.distro, args.image_type
if not image_types:
image_types = ["qcow2"]
opts = {}
if args.release:
opts["release"] = args.release
if args.repo:
opts["repo"] = ",".join(args.repo)
print("name:", name)
print("version:", version)
print("distro:", distro)
print("arches:", ", ".join(arch))
print("target:", target)
print("image types ", str(image_types))
if opts:
pprint(opts)
session.osbuildImage(name, version, distro, image_types, target, arch, opts=opts)
if __name__ == "__main__":
main()
| 39.930556 | 88 | 0.631652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 878 | 0.305391 |
a9939846090c5322d4926d75f10b1fc68c18dada | 153 | py | Python | {{cookiecutter.repo_name}}/{{cookiecutter.package_name}}/{{cookiecutter.package_name}}.py | numengo/cc-py-setup | 392dfb85acb9052bf48586b9be98fc1f591d8991 | [
"ISC",
"Apache-2.0",
"MIT"
]
| 3 | 2018-02-16T17:10:15.000Z | 2018-03-01T19:38:54.000Z | {{cookiecutter.repo_name}}/{{cookiecutter.package_name}}/{{cookiecutter.package_name}}.py | numengo/cc-py-setup | 392dfb85acb9052bf48586b9be98fc1f591d8991 | [
"ISC",
"Apache-2.0",
"MIT"
]
| null | null | null | {{cookiecutter.repo_name}}/{{cookiecutter.package_name}}/{{cookiecutter.package_name}}.py | numengo/cc-py-setup | 392dfb85acb9052bf48586b9be98fc1f591d8991 | [
"ISC",
"Apache-2.0",
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""Main module {{cookiecutter.project_name}} """
from __future__ import absolute_import
from __future__ import unicode_literals
| 25.5 | 48 | 0.751634 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.464052 |
a995cea083a766e717127d27dd67556ccd2542a5 | 5,382 | py | Python | src/models/def_features.py | jshcs/cfe | dc6ca928a124a3e0e0dd64a1d3667a9b313e8c50 | [
"MIT"
]
| null | null | null | src/models/def_features.py | jshcs/cfe | dc6ca928a124a3e0e0dd64a1d3667a9b313e8c50 | [
"MIT"
]
| null | null | null | src/models/def_features.py | jshcs/cfe | dc6ca928a124a3e0e0dd64a1d3667a9b313e8c50 | [
"MIT"
]
| null | null | null | from config import *
from utils import *
import datetime
import pickle
indir_vocab_jnames = VOCAB_JNAMES
indir_bio_srt = BIO_SRT
indir_sorted_fperson_fname = SORTED_FPERSON_FNAME
indir_sorted_lperson_fname = SORTED_LPERSON_FNAME
print indir_vocab_jnames
with open(indir_vocab_jnames,'rb') as v:
all_vocab=pickle.load(v)
with open(indir_bio_srt,'rb') as v:
all_bio_vocab=pickle.load(v)
all_bio_vocab = [a.decode('utf-8') for a in all_bio_vocab]
sorted_fname= read_sorted_file_into_array(indir_sorted_fperson_fname)
sorted_lname= read_sorted_file_into_array(indir_sorted_lperson_fname)
class CRF_Features():
def __init__(self,token):
self.token = token
self.jnames_vocab=all_vocab
self.bioterms_vocab=all_bio_vocab
self.features = {k:False for k in config_params['feature_names']}
# self.db =simstring.reader(DB_JNAMES)
def is_title(self):
self.features["is_title"] = self.token.istitle()
def is_upper(self):
self.features["is_upper"] = self.token.isupper()
def is_alpha_num(self):
self.features["is_alpha_num"] = self.token.isalnum()
def word_length(self):
self.features["word_length"] = len(self.token)
def is_num(self):
self.features["is_number"] = self.token.isdigit()
def ends_with_period(self):
self.features["ends_with_period"]=self.token[-1]=='.'
def enclosed_brackets(self):
if self.token[0] in BRACKETS:
if self.token[-1]==BRACKETS[self.token[0]]:
self.features["enclosed_brackets"]=True
else:
self.features["enclosed_brackets"]=False
else:
self.features["enclosed_brackets"]=False
def has_hyphen(self):
self.features["has_hyphen"] = False
parts = self.token.split('-')
if len(parts) > 1 :
self.features["has_hyphen"] = True
is_digit = True
for part in parts :
if part != '':
is_digit = is_digit and part.isdigit()
self.features["is_number"] = is_digit
def has_colon(self):
self.features["has_colon"]= False
parts = self.token.split(':')
if len(parts) > 1 :
self.features["has_colon"]= True
def is_etal(self):
self.features["et_al"] = self.token == 'et' or self.token == 'al'
def is_valid_year(self):
self.features["is_valid_year"] = self.token.isdigit() and self.features["word_length"] <= 4 \
and self.features["word_length"] >=2 and 1<=int(self.token)<=datetime.datetime.now().year
def is_special_token(self):
self.features["is_special_token"] = True if self.token in SPCL_KEYS else False
def has_period_period(self): #12
# s=time.time()
self.features["has_period_period"]=False
if ".." in self.token:
self.features["has_period_period"]=True
def has_period_comma(self): #13
if ".," in self.token:
self.features["has_period_comma"]=True
def is_url(self): #14
if "http://" in self.token or "www." in self.token :
self.features["is_url"]=True
else :
self.features["is_url"] = False
def is_email(self): #15
stra = self.token
if '@' in stra and '.' in stra.split('@')[1] :
self.features["is_email"]=True
else :
self.features["is_email"]=False
def first_name_lexicon(self): #16
# s=time.time()
if len(self.token)==2 and self.features["is_upper"] and self.features["ends_with_period"]:
self.features["first_name_lexicon"]=True
return
arr= sorted_fname
start=0
end=len(arr)-1
self.features["first_name_lexicon"]=binary_search(arr,self.token.upper(),start,end)
def last_name_lexicon(self): #17
# s=time.time()
#arr=read_sorted_file_into_array(SORTED_LPERSON_FNAME)
arr= sorted_lname
start=0
end=len(arr)-1
self.features["last_name_lexicon"]=binary_search(arr,self.token.upper(),start,end)
# e=time.time()
# self.times.append(e-s)
def journal_lexicon(self): #18
if self.token.lower() in self.jnames_vocab:
self.features['journal_name']=True
else :
self.features['journal_name']=False
def is_bio_term(self): #19
token = self.token.decode('utf-8')
self.features["is_bio_term"]=binary_search(all_bio_vocab,token.lower(),0,len(all_bio_vocab)-1)
def get_features(self):
self.is_title()
self.is_upper()
self.is_alpha_num()
self.word_length()
self.is_num()
self.ends_with_period()
self.enclosed_brackets()
self.has_hyphen()
self.has_colon()
self.is_etal()
self.is_valid_year()
self.is_special_token()
self.has_period_comma()
self.has_period_period()
self.is_url()
self.is_email()
self.first_name_lexicon()
self.last_name_lexicon()
self.is_bio_term()
self.journal_lexicon()
return self.features
def test():
citation = 'A. Mironov A. Morozov And Morozov arXiv:1003.5752'
words = citation.split(' ')
for word in words :
feats = CRF_Features(word)
print feats.get_features()
#test() | 31.290698 | 102 | 0.622074 | 4,566 | 0.848384 | 0 | 0 | 0 | 0 | 0 | 0 | 825 | 0.153289 |
a9971d06d9c16341c965038e22004beaf49e0586 | 2,182 | py | Python | profile_python/profile.py | heroesofcode/profile-python | e4e6ee2f3739ea6edad30999b74b3d42f754a86c | [
"MIT"
]
| null | null | null | profile_python/profile.py | heroesofcode/profile-python | e4e6ee2f3739ea6edad30999b74b3d42f754a86c | [
"MIT"
]
| 1 | 2021-10-09T01:26:29.000Z | 2021-10-09T01:26:29.000Z | profile_python/profile.py | heroesofcode/profile-python | e4e6ee2f3739ea6edad30999b74b3d42f754a86c | [
"MIT"
]
| null | null | null | from rich.console import Console
from rich.table import Table
from rich.progress import track
from time import sleep
import sys
class Profile(object):
def get_datas(self, datas):
try:
print(datas['login'])
print(datas['name'])
print(datas['bio'])
print(datas['company'])
print(datas['blog'])
print(datas['location'])
except:
print("This user does not exist")
def get_repos(self, repos):
try:
for repo in repos:
table = Table(show_header=True, header_style="bold magenta")
table.add_column("Name Repository")
table.add_column("Language")
table.add_column("Forks")
table.add_column("Stars")
table.add_row(
repo['name'],
repo['language'],
str(repo['forks_count']),
str(repo['stargazers_count'])
)
console = Console()
console.print(table)
except:
print("This user does not exist")
def exist_application(self):
option_exist = input("Do you really want to exit the system? y/n: ")
if option_exist == "y":
sys.exit()
def process_data(self):
for _ in track(range(100), description='[green]Processing data'):
sleep(0.02)
def run_app(self, values_datas, values_repos):
while True:
print("-----------------------------------------------")
print("1 - My datas")
print("2 - Repositories")
print("3 - Exist")
print("-----------------------------------------------")
option = input("Choose an option: ")
if option == "1":
self.process_data()
self.get_datas(values_datas)
elif option == "2":
self.process_data()
self.get_repos(values_repos)
elif option == "3":
self.exist_application()
else:
print("This option does not exist")
| 29.486486 | 76 | 0.47846 | 2,051 | 0.939963 | 0 | 0 | 0 | 0 | 0 | 0 | 468 | 0.214482 |
a99744e768b04af0c0bed6111d20060a12e0cfeb | 2,459 | py | Python | app/view/admin/notification_manage.py | G1NTOKI0522/WeChatterBot | 1a5377713fd3d6c7a6bca1c20e8fdcf70e8215f5 | [
"BSD-3-Clause"
]
| 1 | 2020-04-03T02:54:18.000Z | 2020-04-03T02:54:18.000Z | app/view/admin/notification_manage.py | G1NTOKI0522/WeChatterBot | 1a5377713fd3d6c7a6bca1c20e8fdcf70e8215f5 | [
"BSD-3-Clause"
]
| 7 | 2020-04-11T13:22:50.000Z | 2020-05-14T00:19:37.000Z | app/view/admin/notification_manage.py | G1NTOKI0522/WeChatterBot | 1a5377713fd3d6c7a6bca1c20e8fdcf70e8215f5 | [
"BSD-3-Clause"
]
| 3 | 2020-04-11T12:09:56.000Z | 2020-12-16T13:26:20.000Z | # coding: utf-8
import datetime
from flask_login import login_required, current_user
from flask import Blueprint, request
from app.libs.http import jsonify, error_jsonify
from app.libs.db import session
from app.serializer.notice import NoticeParaSchema
from app.model.notice import Notice
bp_admin_notification = Blueprint('admin_notification', __name__, url_prefix='/admin/notification')
@bp_admin_notification.route("/", methods=["POST"])
@login_required
def notification_manage(): # 管理员设定通知
if current_user.isAdmin == 0: # 只能为管理员
return error_jsonify(10000003)
json = request.get_json()
data, errors = NoticeParaSchema().load(json)
if errors:
return error_jsonify(10000001, errors)
now = datetime.datetime.now()
data['created_at'] = now
data['source'] = '山东省人力资源管理部门'
data['user_id'] = current_user.id
new_data = Notice(**data)
session.add(new_data)
session.commit()
return jsonify({})
@bp_admin_notification.route("/", methods=["GET"])
@login_required
def notification_get(): # 管理员获得通知
if current_user.isAdmin == 0: # 只能为管理员
return error_jsonify(10000003)
if current_user.isAdmin == 2: # 如果是省级管理员
res = Notice.query.all() # 获得所有通知
if current_user.isAdmin == 1: # 市级管理员
res = Notice.query.filter_by(user_id=current_user.id).all()
data_need, errors = NoticeParaSchema(many=True).dump(res)
if errors:
return error_jsonify(10000001, errors)
return jsonify(data_need)
@bp_admin_notification.route("/<int:id>", methods=["POST"])
@login_required
def notice_manage_id(id): # 更改管理员获得的通知
if current_user.isAdmin == 0: # 只能为管理员
return error_jsonify(10000003)
json = request.get_json()
data, errors = NoticeParaSchema().load(json)
if errors:
return error_jsonify(10000001, errors)
data_need = Notice.query.filter_by(id=id)
if data_need.first() is None: # 没有这个id,更改失败
return error_jsonify(10000018)
data_need.update(data)
session.commit()
return jsonify({})
@bp_admin_notification.route("/<int:id>", methods=["DELETE"])
@login_required
def notice_manage_delete(id): # 删除id对应的通知
if current_user.isAdmin == 0: # 只能为管理员
return error_jsonify(10000003)
data_need = Notice.query.filter_by(id=id).first()
if data_need is None:
return error_jsonify(10000017)
session.delete(data_need)
session.commit()
return jsonify({})
| 28.264368 | 99 | 0.699471 | 0 | 0 | 0 | 0 | 2,240 | 0.846241 | 0 | 0 | 451 | 0.170382 |
a998c1d627b7fcf20a5161fbb3c3b4a79699eea3 | 1,345 | py | Python | test/test_delete_contact_from_group.py | schukinp/python_training | 8140bbf1aae10052055f272c8deb3a7bdb7abcfb | [
"Apache-2.0"
]
| null | null | null | test/test_delete_contact_from_group.py | schukinp/python_training | 8140bbf1aae10052055f272c8deb3a7bdb7abcfb | [
"Apache-2.0"
]
| null | null | null | test/test_delete_contact_from_group.py | schukinp/python_training | 8140bbf1aae10052055f272c8deb3a7bdb7abcfb | [
"Apache-2.0"
]
| null | null | null | from fixture.orm import ORMfixture
from model.group import Group
from model.contact import Contact
import random
db = ORMfixture(host='127.0.0.1', name='addressbook', user='root', password='')
def test_delete_contact_from_group(app):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="Russel", lastname="Westbrook"))
if len(db.get_group_list()) == 0:
app.group.create(Group(name="Test"))
old_contacts = db.get_contact_list()
old_groups = db.get_group_list()
contact = random.choice(old_contacts)
group = random.choice(old_groups)
old_contacts_in_group = db.get_contacts_in_group(group)
if len(db.get_contacts_in_group(group)) == 0:
app.contact.add_contact_to_group(contact, group)
else:
contact = random.choice(old_contacts_in_group)
old_contacts_in_group_update = db.get_contacts_in_group(group)
app.contact.delete_contact_from_group(contact, group)
new_contacts_in_group = db.get_contacts_in_group(group)
assert len(old_contacts_in_group_update) - 1 == len(new_contacts_in_group)
old_contacts_in_group_update.remove(contact)
assert sorted(old_contacts_in_group_update, key=Contact.id_or_max) == sorted(new_contacts_in_group, key=Contact.id_or_max)
| 44.833333 | 130 | 0.710781 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.042379 |
a9993f306b253d20a5358a309289cc43d569a04f | 323 | py | Python | apps/accounts/views.py | martindwyer/Juntos | 0aac3add432f5f3fc42befc720b70253d4fef2b4 | [
"MIT"
]
| null | null | null | apps/accounts/views.py | martindwyer/Juntos | 0aac3add432f5f3fc42befc720b70253d4fef2b4 | [
"MIT"
]
| null | null | null | apps/accounts/views.py | martindwyer/Juntos | 0aac3add432f5f3fc42befc720b70253d4fef2b4 | [
"MIT"
]
| null | null | null | from django.urls import reverse_lazy
from django.contrib.auth import get_user_model
from django.views.generic import CreateView
from . import forms
User = get_user_model()
class SignUp(CreateView):
form_class = forms.UserCreateForm
success_url = reverse_lazy('login')
template_name = 'accounts/signup.html'
| 23.071429 | 46 | 0.783282 | 146 | 0.452012 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.089783 |
a99aa91e73c38055d1f2d643a8c77c56216293f4 | 6,498 | py | Python | colossalai/engine/_base_engine.py | rahulgupta9202/ColossalAI | 993088d45eaa032e39cf5959df2a506f0663bc2e | [
"Apache-2.0"
]
| 1 | 2022-03-12T04:49:19.000Z | 2022-03-12T04:49:19.000Z | colossalai/engine/_base_engine.py | rahulgupta9202/ColossalAI | 993088d45eaa032e39cf5959df2a506f0663bc2e | [
"Apache-2.0"
]
| null | null | null | colossalai/engine/_base_engine.py | rahulgupta9202/ColossalAI | 993088d45eaa032e39cf5959df2a506f0663bc2e | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from torch.nn import Module
from torch.nn.modules.loss import _Loss
from torch.optim import Optimizer
from colossalai.builder import build_gradient_handler
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.logging import get_global_dist_logger
from colossalai.nn import (ZeroRedundancyOptimizer_Level_2,
ZeroRedundancyOptimizer_Level_3)
from .schedule import BaseSchedule
class Engine:
"""Basic engine class for training and evaluation. It runs a specific process method
:meth:`step` which is based on the given :attr:`schedule` over each batch of a dataset.
It controls a iteration in training.
:param model: The neural network model
:param optimizer: Optimizer for updating the parameters
:param step_schedule: Running schedule in :meth:`step`
:param gradient_accumulation: Steps of gradient accumulation
:param gradient_clipping: The norm of gradient clipping
:type model: Module
:type optimizer: Optimizer
:type step_schedule: BaseSchedule, optional
:type gradient_accumulation: int, optional
:type gradient_clipping: float, optional
"""
def __init__(self,
model: Module,
optimizer: Optimizer,
criterion: _Loss,
step_schedule: BaseSchedule,
gradient_handlers: list = None,
gradient_accumulation: int = 1,
gradient_clipping: float = 0.0,
):
self._model = model
self._optimizer = optimizer
self._criterion = criterion
self._schedule = step_schedule
# schedule initialize
self._schedule.initialize(model, optimizer)
# state
self.training = True # default
# gradient accumulation
assert gradient_accumulation > 0, 'gradient accumulation size must be larger than 0'
self._grad_accum_size = gradient_accumulation
self._grad_clip = gradient_clipping
self._logger = get_global_dist_logger()
# build gradient handler
self._gradient_handlers = []
if gradient_handlers is not None:
assert isinstance(gradient_handlers, list), \
f'argument gradient_handler_cfg expected type list, ' \
f'but got type {type(gradient_handlers)}'
elif isinstance(optimizer, (ZeroRedundancyOptimizer_Level_2,
ZeroRedundancyOptimizer_Level_3)):
gradient_handlers = [dict(type='ZeROGradientHandler')]
self._logger.info(
"Training with zero is detected, ZeROGradientHandler is automatically "
"added even though not specified in the configuration",
ranks=[0])
elif gpc.is_initialized(ParallelMode.DATA) and gpc.get_world_size(
ParallelMode.DATA) > 1:
gradient_handlers = [dict(type='DataParallelGradientHandler')]
self._logger.info(
"Data parallel training is detected, DataParallelGradientHandler is automatically "
"added even though not specified in the configuration",
ranks=[0])
if gradient_handlers is None:
self._logger.warning(
"No gradient handler is set up, please make sure you do not need "
"to all-reduce the gradients after a training step.",
ranks=[0])
else:
for cfg in gradient_handlers:
handler = build_gradient_handler(cfg, model, optimizer)
self._gradient_handlers.append(handler)
@property
def model(self):
return self._model
@property
def optimizer(self):
return self._optimizer
@property
def criterion(self):
return self._criterion
@property
def schedule(self):
return self._schedule
@property
def gradient_accumulation(self):
return self._grad_accum_size
def handle_gradient(self):
"""Handles all-reduce operations of gradients across different parallel groups.
"""
for handler in self._gradient_handlers:
handler.handle_gradient()
def train(self):
"""Sets the model to training mode.
"""
self.training = True
self._model.train()
def eval(self):
"""Sets the model to evaluation mode.
"""
self.training = False
self._model.eval()
def step(self,
data_iter,
is_last_iteration: bool = False,
return_loss=True):
"""A running step based on the schedule. Usually, it runs a training or
evaluation over a batch of dataset.
:param data_iter: Data iterator of the dataset
:param is_last_iteration: If True, this iteration is the last iteration in the epoch
:param return_loss: loss will be returned if True
:type data_iter: Iterator
:type is_last_iteration: bool, optional
:type return_loss: bool, optional
:return: (output, lablel, loss)
"""
if self.training:
self._optimizer.zero_grad()
# differentiate training and eval with grad accum
if self.training:
for i in range(self._grad_accum_size):
output, label, loss = self._schedule.forward_backward_step(
data_iter, self._model, self._criterion, self._optimizer,
forward_only=False,
grad_accum_size=self._grad_accum_size,
return_loss=return_loss)
if i == self._grad_accum_size - 1:
# all reduce gradients
self.handle_gradient()
self._schedule.optimizer_step(self._model, self._optimizer, self._grad_clip)
else:
output, label, loss = self._schedule.forward_backward_step(
data_iter, self._model, self._criterion, self._optimizer,
forward_only=True,
grad_accum_size=1,
return_loss=return_loss)
# consume the remaining dataset left out due to gradient accumulation
if is_last_iteration:
while True:
try:
_ = next(data_iter)
except StopIteration:
break
return output, label, loss
| 36.711864 | 99 | 0.622499 | 5,986 | 0.921207 | 0 | 0 | 333 | 0.051247 | 0 | 0 | 2,238 | 0.344414 |
a99b36048f5d32ab6c9b6ad9baf0b5a681590fdd | 718 | py | Python | 11. Optical Flow/optical_flow.py | farhan0syakir/OpenCv-tutorial | b3d78f3567f4ea61b8955190f51097b6ceb4b318 | [
"MIT"
]
| 15 | 2021-05-04T15:03:14.000Z | 2022-03-20T11:57:55.000Z | 11. Optical Flow/optical_flow.py | farhan0syakir/OpenCv-tutorial | b3d78f3567f4ea61b8955190f51097b6ceb4b318 | [
"MIT"
]
| 12 | 2020-09-24T16:57:45.000Z | 2020-10-23T15:13:06.000Z | 11. Optical Flow/optical_flow.py | farhan0syakir/OpenCv-tutorial | b3d78f3567f4ea61b8955190f51097b6ceb4b318 | [
"MIT"
]
| 18 | 2020-09-21T13:01:37.000Z | 2020-10-15T19:42:28.000Z | import numpy as np
import cv2
cap = cv2.VideoCapture('motion.avi')
ret, frame = cap.read()
gs_im0 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
points_prev = cv2.goodFeaturesToTrack(gs_im0, 100, 0.03, 9.0, False)
while(cap.isOpened()):
ret, frame = cap.read()
gs_im1 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Call tracker.
points, st, err = cv2.calcOpticalFlowPyrLK(gs_im0, gs_im1, points_prev, None, (3,3))
for i,p in enumerate(points):
a,b = p.ravel()
frame = cv2.circle(frame,(a,b),3,(255,255,255),-1)
cv2.imshow('frame',frame)
points_prev = points
gs_im0 = gs_im1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() | 25.642857 | 88 | 0.650418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.051532 |
a99d2fd19858a720fd9deb294de8995490e6da48 | 574 | py | Python | game/rendering.py | rajbala5479/asteroid | 73c6eab1bbdb68ff6c7f337c9517ba0ac1f34294 | [
"MIT"
]
| null | null | null | game/rendering.py | rajbala5479/asteroid | 73c6eab1bbdb68ff6c7f337c9517ba0ac1f34294 | [
"MIT"
]
| null | null | null | game/rendering.py | rajbala5479/asteroid | 73c6eab1bbdb68ff6c7f337c9517ba0ac1f34294 | [
"MIT"
]
| null | null | null | import math
class Renderer:
# Convenience methods
def drawCircle( self, radius = 10, res = 30):
pass
class FilledPolygon():
def __init__():
pass
def render(self):
if len
class PolyLine():
def __init__():
pass
def make_circle(radius = 10, res = 20, filled = True):
points = []
for i in range(res):
ang = 2*math.pi * i / res
points.append((math.cos(ang) * radius, math.sin(ang) * radius) )
if filled:
return FilledPolygon(points)
else:
return PolyLine(points, True) | 19.793103 | 72 | 0.574913 | 252 | 0.439024 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.036585 |
a99e5850b3151bb654dd58f3e042f9310c260e3c | 2,770 | py | Python | tests/components/test_servo.py | Shivam60/j5 | 18069737644c8f1c95944386773c7643d5df5aeb | [
"MIT"
]
| null | null | null | tests/components/test_servo.py | Shivam60/j5 | 18069737644c8f1c95944386773c7643d5df5aeb | [
"MIT"
]
| null | null | null | tests/components/test_servo.py | Shivam60/j5 | 18069737644c8f1c95944386773c7643d5df5aeb | [
"MIT"
]
| null | null | null | """Tests for the servo classes."""
from typing import List, Optional, Type
import pytest
from j5.backends import Backend
from j5.boards import Board
from j5.components.servo import Servo, ServoInterface, ServoPosition
class MockServoDriver(ServoInterface):
"""A testing driver for servos."""
def get_servo_position(self, board: Board, identifier: int) -> ServoPosition:
"""Get the position of a Servo."""
return 0.5
def set_servo_position(
self,
board: Board,
identifier: int,
position: ServoPosition,
) -> None:
"""Set the position of a Servo."""
pass
class MockServoBoard(Board):
"""A testing board for servos."""
@property
def name(self) -> str:
"""The name of this board."""
return "Testing Servo Board"
@property
def serial(self) -> str:
"""The serial number of this board."""
return "SERIAL"
@property
def firmware_version(self) -> Optional[str]:
"""Get the firmware version of this board."""
return self._backend.get_firmware_version(self)
@property
def supported_components(self) -> List[Type["Component"]]:
"""List the types of component that this Board supports."""
return [Servo]
def make_safe(self):
"""Make this board safe."""
pass
@staticmethod
def discover(backend: Backend):
"""Detect all of the boards on a given backend."""
return []
def test_servo_interface_implementation():
"""Test that we can implement the ServoInterface."""
MockServoDriver()
def test_servo_interface_class():
"""Test that the interface class is ServoInterface."""
assert Servo.interface_class() is ServoInterface
def test_servo_instantiation():
"""Test that we can instantiate a Servo."""
Servo(0, MockServoBoard(), MockServoDriver())
def test_servo_get_position():
"""Test that we can get the position of a servo."""
servo = Servo(2, MockServoBoard(), MockServoDriver())
assert type(servo.position) is float
assert servo.position == 0.5
def test_servo_set_position():
"""Test that we can set the position of a servo."""
servo = Servo(2, MockServoBoard(), MockServoDriver())
servo.position = 0.6
def test_servo_set_position_none():
"""Test that we can set the position of a servo to None."""
servo = Servo(2, MockServoBoard(), MockServoDriver())
servo.position = None
def test_servo_set_position_out_of_bounds():
"""Test that we cannot set < -1 or > 1."""
servo = Servo(2, MockServoBoard(), MockServoDriver())
with pytest.raises(ValueError):
servo.position = 2
with pytest.raises(ValueError):
servo.position = -2
| 26.634615 | 81 | 0.652708 | 1,280 | 0.462094 | 0 | 0 | 677 | 0.244404 | 0 | 0 | 809 | 0.292058 |
a99e9b3110ca912a6a3fdcacc3a5951f95d02cb7 | 327 | py | Python | Desafios/des029.py | vitormrts/ExerciciosPython | 176b1c21e147670f7495678bdd4fc97241440d28 | [
"MIT"
]
| 1 | 2021-02-07T18:58:57.000Z | 2021-02-07T18:58:57.000Z | Desafios/des029.py | vitormrts/ExerciciosPython | 176b1c21e147670f7495678bdd4fc97241440d28 | [
"MIT"
]
| null | null | null | Desafios/des029.py | vitormrts/ExerciciosPython | 176b1c21e147670f7495678bdd4fc97241440d28 | [
"MIT"
]
| null | null | null | frase = str(input('Digite uma frase: ')).lower()
print('Sobre a letra "a": \nQuantas vezes ela aparece? {} vezes;'.format(frase.count('a')))
print('Em que posição ela aparece pela primeira vez? {};'.format(frase.strip().index('a')+1))
print('Em que posição ela aparece pela última vez? {}.'.format(frase.strip().rfind('a')+1))
| 65.4 | 93 | 0.678899 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.581325 |
a9a00c334939540391cc64f13f7f530cabcf615a | 7,546 | py | Python | unfold/transactions/views.py | wesny/unfold | 6594054f7408ac142fc6e902093b6fc8cbfda94e | [
"MIT"
]
| null | null | null | unfold/transactions/views.py | wesny/unfold | 6594054f7408ac142fc6e902093b6fc8cbfda94e | [
"MIT"
]
| null | null | null | unfold/transactions/views.py | wesny/unfold | 6594054f7408ac142fc6e902093b6fc8cbfda94e | [
"MIT"
]
| null | null | null | from django.contrib.auth.mixins import LoginRequiredMixin
from django.views import View
from django.views.generic import ListView
from django.utils.http import is_safe_url
from django.contrib import messages
from rest_framework import status
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import redirect, render
from mama_cas.models import ServiceTicket
from mama_cas.utils import redirect as cas_redirect
from mama_cas.utils import to_bool
from rest_framework.response import Response
from decimal import Decimal
from django.urls import reverse
import urllib
from pinax.stripe.mixins import CustomerMixin
from pinax.stripe.models import Charge
from pinax.stripe.actions import charges
from stripe.error import CardError
from rest_framework_jwt.settings import api_settings
from unfold.transactions.models import Purchase, Article
from unfold.transactions.admin import PurchaseForm
from unfold.users.models import User
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
def bad_request(message):
return Response({
'status': 'error',
'message': message,
}, status=status.HTTP_400_BAD_REQUEST)
class PurchaseView(LoginRequiredMixin, View):
template_name = "pages/purchase_article.html"
form_class = PurchaseForm
# def test_func(self):
# return self.request.user.is_publisher
def get(self, request, *args, **kwargs):
publisherusername = request.GET.get('publisher', None)
external_id = request.GET.get('id', None)
new_token = to_bool(request.GET.get('new_token', None))
if publisherusername == None or external_id == None:
return bad_request("Invalid Parameters")
try:
article = Article.objects.get(publisher__username=publisherusername, external_id=external_id)
except ObjectDoesNotExist:
return bad_request("Article referenced does not exist")
purchase = Purchase.objects.filter(article=article, buyer=request.user)
if purchase.exists():
if new_token != None:
publisher = User.objects.get(username=publisherusername)
st = ServiceTicket.objects.create_ticket(service=publisherusername + '.com', user=request.user)
return cas_redirect(article.url, params={'token': st.ticket})
else:
return redirect(article.url)
try:
publisher = User.objects.get(username=publisherusername)
except ObjectDoesNotExist:
return bad_request("Publisher does not exist")
next_url = ''
if article.price > request.user.balance:
next_url = urllib.parse.quote(request.get_full_path(), safe='~()*!.\'')
form = self.form_class(initial={
'external_id': external_id,
'publisher': publisherusername,
'price': article.price
})
data = {
'form': form,
'price': article.price,
'publisher': publisher.name,
'title': article.title,
'balance': request.user.balance,
'next': next_url or ''
}
return render(request, self.template_name, data)
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
external_id = form.cleaned_data['external_id']
publisherusername = form.cleaned_data['publisher']
price = form.cleaned_data['price']
new_token = to_bool(request.GET.get('new_token', None))
try:
article = Article.objects.get(publisher__username=publisherusername, external_id=external_id)
except ObjectDoesNotExist:
return bad_request("Article referenced does not exist")
if article.price != price:
return bad_request("Price has changed since submission")
purchase = Purchase(article=article, price=price, buyer=request.user)
purchase.save()
request.user.balance = request.user.balance - purchase.price
request.user.save()
publisher = User.objects.get(username=publisherusername)
publisher.balance = publisher.balance + purchase.price
publisher.save()
if new_token != None:
st = ServiceTicket.objects.create_ticket(service=publisherusername + '.com', user=request.user)
return cas_redirect(article.url, params={'token': st.ticket})
else:
return redirect(article.url)
return render(request, self.template_name, {'form': form})
class ReloadView(LoginRequiredMixin, View):
template_name = "pages/refill_account.html"
def get_redirect_url(self):
redirect_to = self.request.POST.get(
'next',
self.request.GET.get('next', '')
)
url_is_safe = is_safe_url(url=redirect_to)
return redirect_to if url_is_safe else ''
def get(self, request, *args, **kwargs):
can_charge = True
balance = request.user.balance
data = {
'balance': balance,
'can_charge': can_charge
}
return render(request, self.template_name, data)
def post(self, request, *args, **kwargs):
try:
add_on = Decimal(request.POST.get('amount'))
except:
messages.error(request, 'Amount was not in the desired format.')
can_charge = True
balance = request.user.balance
data = {
'balance': balance,
'can_charge': can_charge
}
return render(request, self.template_name, data)
try:
charges.create(amount=add_on, customer=request.user.customer.stripe_id)
except CardError as e:
body = e.json_body
err = body.get('error', {})
messages.error(request, err.get('message'))
return redirect("/reload")
user = User.objects.get(username=request.user.username)
user.balance = user.balance + add_on
user.save()
messages.success(request, "Payment was successfully processed.")
url = self.get_redirect_url() or '/user'
return redirect(url)
class NewAPIKeyView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
payload = jwt_payload_handler(request.user)
token = jwt_encode_handler(payload)
request.user.token = token
request.user.save()
return redirect('/user')
class StripeAccountFromCustomerMixin(object):
@property
def stripe_account(self):
customer = getattr(self, "customer", None)
return customer.stripe_account if customer else None
@property
def stripe_account_stripe_id(self):
return self.stripe_account.stripe_id if self.stripe_account else None
stripe_account_stripe_id.fget.short_description = "Stripe Account"
class ChargeListView(LoginRequiredMixin, CustomerMixin, ListView):
model = Charge
context_object_name = "charge_list"
template_name = "pinax/stripe/charge_list.html"
def get_queryset(self):
return super(ChargeListView, self).get_queryset().order_by("charge_created")
class PurchaseListView(LoginRequiredMixin, ListView):
model = Purchase
template_name = "pages/articles_list.html"
def get_queryset(self):
return Purchase.objects.filter(buyer=self.request.user)
| 39.507853 | 111 | 0.658362 | 6,326 | 0.838325 | 0 | 0 | 278 | 0.036841 | 0 | 0 | 781 | 0.103499 |
a9a1965586fb4160c10932687996645bcd809a1c | 1,843 | py | Python | interviewbit/Programming/Arrays/Rotate Matrix/solution.py | pablotrinidad/competitive-programming | de16d007ca276330cd0a92bd5b75ce4e9e75fb59 | [
"MIT"
]
| null | null | null | interviewbit/Programming/Arrays/Rotate Matrix/solution.py | pablotrinidad/competitive-programming | de16d007ca276330cd0a92bd5b75ce4e9e75fb59 | [
"MIT"
]
| null | null | null | interviewbit/Programming/Arrays/Rotate Matrix/solution.py | pablotrinidad/competitive-programming | de16d007ca276330cd0a92bd5b75ce4e9e75fb59 | [
"MIT"
]
| null | null | null | """InterviewBit.
Programming > Arrays > Rotate Matrix.
"""
class Solution:
"""Solution."""
def rotate(self, A):
"""Rotate matrix."""
n = len(A)
for l in range(0, n // 2): # l = level
for o in range(0, n - (l * 2) - 1): # o = offset
tlr, tlc = l, l + o # Top Left row/column
trr, trc = l + o, n - 1 - l # Top Right row/column
brr, brc = n - 1 - l, n - 1 - l - o # Bottom right row/column
blr, blc = n - 1 - l - o, l # Bottom left row/column
# Switch corner values
A[tlr][tlc], A[trr][trc], A[brr][brc], A[blr][blc] = A[blr][blc], A[tlr][tlc], A[trr][trc], A[brr][brc]
return A
matrices = [
[
[1]
],
[
[1, 2],
[3, 4]
],
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
],
[
['a', 'b', 'c', 'd'],
['e', 'f', 'g', 'h'],
['i', 'j', 'k', 'l'],
['m', 'n', 'o', 'p'],
],
[
[str(x).zfill(2) for x in range(1, 6)],
[str(x).zfill(2) for x in range(6, 11)],
[str(x).zfill(2) for x in range(11, 16)],
[str(x).zfill(2) for x in range(16, 21)],
[str(x).zfill(2) for x in range(21, 26)]
],
[
[str(x).zfill(2) for x in range(1, 7)],
[str(x).zfill(2) for x in range(7, 13)],
[str(x).zfill(2) for x in range(13, 19)],
[str(x).zfill(2) for x in range(19, 25)],
[str(x).zfill(2) for x in range(25, 31)],
[str(x).zfill(2) for x in range(31, 37)]
]
]
solution = Solution()
for matrix in matrices:
print("Matrix before rotation:")
for row in matrix:
print('\t', row)
print("Matrix after rotation:")
for row in solution.rotate(matrix):
print('\t', row)
print('\n' * 3)
| 26.328571 | 119 | 0.429192 | 672 | 0.364623 | 0 | 0 | 0 | 0 | 0 | 0 | 340 | 0.184482 |
a9a1ee58b00c556118c2fed52b5d79faa8995835 | 2,334 | py | Python | integration-tests/src/test/resources/model-in-image/scripts/verify-jdbc-resource.py | tanmaygarg-oracle/weblogic-kubernetes-operator | 2920cf3d9ba5c63ef1af6d9e4a574995286f524e | [
"UPL-1.0",
"MIT"
]
| null | null | null | integration-tests/src/test/resources/model-in-image/scripts/verify-jdbc-resource.py | tanmaygarg-oracle/weblogic-kubernetes-operator | 2920cf3d9ba5c63ef1af6d9e4a574995286f524e | [
"UPL-1.0",
"MIT"
]
| null | null | null | integration-tests/src/test/resources/model-in-image/scripts/verify-jdbc-resource.py | tanmaygarg-oracle/weblogic-kubernetes-operator | 2920cf3d9ba5c63ef1af6d9e4a574995286f524e | [
"UPL-1.0",
"MIT"
]
| null | null | null | # Copyright (c) 2019, 2020, Oracle Corporation and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
connect('weblogic', 'welcome1', 't3://DOMAINNAME-admin-server:7001')
# get all JDBC Properties
dsCounter = 0
allJDBCResources = cmo.getJDBCSystemResources()
for jdbcResource in allJDBCResources:
dsCounter = dsCounter + 1
dsname = jdbcResource.getName()
dsResource = jdbcResource.getJDBCResource()
dsJNDIname = dsResource.getJDBCDataSourceParams().getJNDINames()#[0]
dsDriver = dsResource.getJDBCDriverParams().getDriverName()
conn = dsResource.getJDBCDriverParams().getUrl()
dsInitialCap = dsResource.getJDBCConnectionPoolParams().getInitialCapacity()
dsMaxCap = dsResource.getJDBCConnectionPoolParams().getMaxCapacity()
dsParams = dsResource.getJDBCDataSourceParams()
dsProps = dsResource.getJDBCDriverParams().getProperties()
dsParams = dsResource.getJDBCConnectionPoolParams()
user = get("/JDBCSystemResources/"+ dsname +"/Resource/" + dsname + "/JDBCDriverParams/" + dsname + "/Properties/" + dsname + "/Properties/user/Value")
readTimeOut = get("/JDBCSystemResources/"+ dsname +"/Resource/" + dsname + "/JDBCDriverParams/" + dsname + "/Properties/" + dsname + "/Properties/oracle.jdbc.ReadTimeout/Value")
connTimeOut = get("/JDBCSystemResources/"+ dsname +"/Resource/" + dsname + "/JDBCDriverParams/" + dsname + "/Properties/" + dsname + "/Properties/oracle.net.CONNECT_TIMEOUT/Value")
print 'datasource.name.' + str(dsCounter) +'=' + str(dsname)
print 'datasource.jndiname.' + str(dsCounter) + '=' + str(dsJNDIname)
print 'datasource.driver.class.' + str(dsCounter) + '=' + dsDriver
print 'datasource.url.' + str(dsCounter) + '=' + conn
print 'datasource.initialCapacity.' + str(dsCounter) + '=' + str(dsInitialCap)
print 'datasource.maxCapacity.' + str(dsCounter) + '=' + str(dsMaxCap)
print 'datasource.readTimeout.' + str(dsCounter) + '=' + readTimeOut
print 'datasource.connectionTimeout.' + str(dsCounter) + '=' + connTimeOut
print 'datasource.username.' + str(dsCounter) + '=' + str(user)
print 'datasource.dsProps.' + str(dsCounter) + '=' + str(dsProps)
print 'datasource.dsParams.' + str(dsCounter) + '=' + str(dsParams)
disconnect()
exit() | 61.421053 | 184 | 0.711225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 868 | 0.371894 |
a9a32c0822386523441969c6648b6dd0e0b1aae2 | 682 | py | Python | Projeto_2/no.py | claudiodacruz/Projetos-ED | 4495ed792be77f988b12c63f356f68ca709005fe | [
"MIT"
]
| null | null | null | Projeto_2/no.py | claudiodacruz/Projetos-ED | 4495ed792be77f988b12c63f356f68ca709005fe | [
"MIT"
]
| null | null | null | Projeto_2/no.py | claudiodacruz/Projetos-ED | 4495ed792be77f988b12c63f356f68ca709005fe | [
"MIT"
]
| null | null | null | class No:
def __init__(self, dado):
self._dado = dado
self._direita = None
self._esquerda = None
self._pai = None
def get_dado(self):
return self._dado
def get_direita(self):
return self._direita
def get_esquerda(self):
return self._esquerda
def get_pai(self):
return self._pai
def set_dado(self, novoDado):
self._dado = novoDado
def set_direita(self, setarDireita):
self._direita = setarDireita
def set_esquerda(self, setarEsquerda):
self._esquerda = setarEsquerda
def set_pai(self, novoPai):
self._pai = novoPai
| 22 | 42 | 0.592375 | 681 | 0.998534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a9a3856b6e71069b01f3d5066c6f323c68f21ce5 | 1,283 | py | Python | tests/dao_tests/test_stored_sample_dao.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
]
| 39 | 2017-10-13T19:16:27.000Z | 2021-09-24T16:58:21.000Z | tests/test_stored_sample_dao.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
]
| 312 | 2017-09-08T15:42:13.000Z | 2022-03-23T18:21:40.000Z | tests/test_stored_sample_dao.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
]
| 19 | 2017-09-15T13:58:00.000Z | 2022-02-07T18:33:20.000Z | from rdr_service import clock
from rdr_service.dao.biobank_stored_sample_dao import BiobankStoredSampleDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.model.biobank_stored_sample import BiobankStoredSample
from rdr_service.model.participant import Participant
from tests.helpers.unittest_base import BaseTestCase
class BiobankStoredSampleDaoTest(BaseTestCase):
"""Tests only that a sample can be written and read; see the reconciliation pipeline."""
def setUp(self):
super().setUp()
self.participant = Participant(participantId=123, biobankId=555)
ParticipantDao().insert(self.participant)
self.dao = BiobankStoredSampleDao()
def test_insert_and_read_sample(self):
sample_id = "WEB123456"
test_code = "1U234"
now = clock.CLOCK.now()
created = self.dao.insert(
BiobankStoredSample(
biobankStoredSampleId=sample_id,
biobankId=self.participant.biobankId,
biobankOrderIdentifier="KIT",
test=test_code,
confirmed=now,
)
)
fetched = self.dao.get(sample_id)
self.assertEqual(test_code, created.test)
self.assertEqual(test_code, fetched.test)
| 37.735294 | 92 | 0.694466 | 935 | 0.728761 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.086516 |
a9a3934109af932f3d04644fe8eb5b82a3bf255d | 2,769 | py | Python | server/pantryflask/__init__.py | jernaumorat/IntelligentPantry | 33d1ee867a5b6e0169fb44918069fbec5bfde259 | [
"MIT"
]
| null | null | null | server/pantryflask/__init__.py | jernaumorat/IntelligentPantry | 33d1ee867a5b6e0169fb44918069fbec5bfde259 | [
"MIT"
]
| null | null | null | server/pantryflask/__init__.py | jernaumorat/IntelligentPantry | 33d1ee867a5b6e0169fb44918069fbec5bfde259 | [
"MIT"
]
| 1 | 2021-11-11T09:25:34.000Z | 2021-11-11T09:25:34.000Z | import socket, os, atexit
from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask.helpers import send_from_directory, url_for
from zeroconf import ServiceInfo, Zeroconf
from pantryflask.config import FlaskConfig
from pantryflask.auth import token_auth, generate_pairing_code, generate_user_token
from pantryflask.models import AuthToken
from pantryflask.db import db
from pantryflask.pantry_api import bp as pantry_bp
from pantryflask.robot_api import bp as robot_bp
from pantryflask.util import bp as util_bp
ip = os.environ.get('LISTEN_IP')
httpZconf = ServiceInfo(
"_http._tcp.local.",
"intpantry._http._tcp.local.",
addresses=[socket.inet_aton(ip)],
port=5000)
httpsZconf = ServiceInfo(
"_https._tcp.local.",
"intpantry._https._tcp.local.",
addresses=[socket.inet_aton(ip)],
port=5443)
zc = Zeroconf()
zc.register_service(httpZconf)
print('Service Registered:', httpZconf)
def app_factory(config={}):
app = Flask(__name__)
app.config.from_object(FlaskConfig) if config == {} else app.config.from_object(config)
db.init_app(app)
migrate = Migrate(app, db)
@app.route('/')
def get_root():
links = []
for rule in app.url_map.iter_rules():
methods = ','.join(rule.methods)
links.append((f'{rule}', methods, rule.endpoint))
return jsonify(links)
@app.route('/cert', methods=['GET'])
def get_cert():
response = send_from_directory(os.path.join('.', 'static'), 'ssr.crt')
return response
@app.route('/pair', methods=['GET'])
def pair_device():
code = request.args.get('code')
if len(AuthToken.query.filter_by(token_class='user').all()) == 0 and not code:
return jsonify(generate_pairing_code())
token = generate_user_token(code)
if token == None:
return jsonify(None), 401
return jsonify(token), 201
@app.route('/pair', methods=['POST'])
@token_auth.login_required(role=['user'])
def get_pairing_code():
return jsonify(generate_pairing_code())
@app.route('/pair', methods=['DELETE'])
@token_auth.login_required(role=['user'])
def delete_token():
token = request.headers.get('Authorization')
print(token)
token = token.split(' ')[1]
db.session.delete(AuthToken.query.get(token))
db.session.commit()
return jsonify('OK')
app.register_blueprint(pantry_bp)
app.register_blueprint(robot_bp)
app.register_blueprint(util_bp)
return app, db, migrate
@atexit.register
def shutdown():
zc.unregister_all_services()
app, db, migrate = app_factory() | 29.457447 | 91 | 0.669195 | 0 | 0 | 0 | 0 | 1,340 | 0.483929 | 0 | 0 | 263 | 0.09498 |
8d10162b60dc80362847021a74c900fd613e0ff7 | 39,370 | py | Python | lingua_franca/lang/parse_eu.py | OpenVoiceOS/ovos-lingua-franca | 392cc37cbfde3b8d6f11258c1e148e63ba2fb951 | [
"Apache-2.0"
]
| null | null | null | lingua_franca/lang/parse_eu.py | OpenVoiceOS/ovos-lingua-franca | 392cc37cbfde3b8d6f11258c1e148e63ba2fb951 | [
"Apache-2.0"
]
| 13 | 2022-01-26T03:43:46.000Z | 2022-03-25T17:00:18.000Z | lingua_franca/lang/parse_eu.py | OpenVoiceOS/ovos-lingua-franca | 392cc37cbfde3b8d6f11258c1e148e63ba2fb951 | [
"Apache-2.0"
]
| 1 | 2022-01-18T21:11:44.000Z | 2022-01-18T21:11:44.000Z | #
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Parse functions for Basque (eu)
TODO: numbers greater than 999999
"""
from datetime import datetime
from dateutil.relativedelta import relativedelta
from dateutil.tz import gettz
from lingua_franca.lang.format_eu import pronounce_number_eu
from lingua_franca.lang.parse_common import *
from lingua_franca.lang.common_data_eu import _NUM_STRING_EU
def isFractional_eu(input_str):
"""
This function takes the given text and checks if it is a fraction.
Args:
text (str): the string to check if fractional
Returns:
(bool) or (float): False if not a fraction, otherwise the fraction
"""
if input_str.endswith('s', -1):
input_str = input_str[:len(input_str) - 1] # e.g. "fifths"
aFrac = {"erdia": 2, "erdi": 2, "heren": 3, "laurden": 4,
"laurdena": 4, "bosten": 5, "bostena": 5, "seiren": 6, "seirena": 6,
"zazpiren": 7, "zapirena": 7, "zortziren": 8, "zortzirena": 8,
"bederatziren": 9, "bederatzirena": 9, "hamarren": 10, "hamarrena": 10,
"hamaikaren": 11, "hamaikarena": 11, "hamabiren": 12, "hamabirena": 12}
if input_str.lower() in aFrac:
return 1.0 / aFrac[input_str]
if (input_str == "hogeiren" or input_str == "hogeirena"):
return 1.0 / 20
if (input_str == "hogeita hamarren" or input_str == "hogeita hamarrena"):
return 1.0 / 30
if (input_str == "ehunen" or input_str == "ehunena"):
return 1.0 / 100
if (input_str == "milaren" or input_str == "milarena"):
return 1.0 / 1000
return False
# TODO: short_scale and ordinals don't do anything here.
# The parameters are present in the function signature for API compatibility
# reasons.
#
# Returns incorrect output on certain fractional phrases like, "cuarto de dos"
def extract_number_eu(text, short_scale=True, ordinals=False):
"""
This function prepares the given text for parsing by making
numbers consistent, getting rid of contractions, etc.
Args:
text (str): the string to normalize
Returns:
(int) or (float): The value of extracted number
"""
aWords = text.lower().split()
count = 0
result = None
while count < len(aWords):
val = 0
word = aWords[count]
next_next_word = None
if count + 1 < len(aWords):
next_word = aWords[count + 1]
if count + 2 < len(aWords):
next_next_word = aWords[count + 2]
else:
next_word = None
# is current word a number?
if word in _NUM_STRING_EU:
val = _NUM_STRING_EU[word]
elif word.isdigit(): # doesn't work with decimals
val = int(word)
elif is_numeric(word):
val = float(word)
elif isFractional_eu(word):
if next_word in _NUM_STRING_EU:
# erdi bat, heren bat, etab
result = _NUM_STRING_EU[next_word]
# hurrengo hitza (bat, bi, ...) salto egin
next_word = None
count += 2
elif not result:
result = 1
count += 1
result = result * isFractional_eu(word)
continue
if not val:
# look for fractions like "2/3"
aPieces = word.split('/')
# if (len(aPieces) == 2 and is_numeric(aPieces[0])
# and is_numeric(aPieces[1])):
if look_for_fractions(aPieces):
val = float(aPieces[0]) / float(aPieces[1])
if val:
if result is None:
result = 0
# handle fractions
if next_word == "en" or next_word == "ren":
result = float(result) / float(val)
else:
result = val
if next_word is None:
break
# number word and fraction
ands = ["eta"]
if next_word in ands:
zeros = 0
if result is None:
count += 1
continue
newWords = aWords[count + 2:]
newText = ""
for word in newWords:
newText += word + " "
afterAndVal = extract_number_eu(newText[:-1])
if afterAndVal:
if result < afterAndVal or result < 20:
while afterAndVal > 1:
afterAndVal = afterAndVal / 10.0
for word in newWords:
if word == "zero" or word == "0":
zeros += 1
else:
break
for _ in range(0, zeros):
afterAndVal = afterAndVal / 10.0
result += afterAndVal
break
elif next_next_word is not None:
if next_next_word in ands:
newWords = aWords[count + 3:]
newText = ""
for word in newWords:
newText += word + " "
afterAndVal = extract_number_eu(newText[:-1])
if afterAndVal:
if result is None:
result = 0
result += afterAndVal
break
decimals = ["puntu", "koma", ".", ","]
if next_word in decimals:
zeros = 0
newWords = aWords[count + 2:]
newText = ""
for word in newWords:
newText += word + " "
for word in newWords:
if word == "zero" or word == "0":
zeros += 1
else:
break
afterDotVal = str(extract_number_eu(newText[:-1]))
afterDotVal = zeros * "0" + afterDotVal
result = float(str(result) + "." + afterDotVal)
break
count += 1
# Return the $str with the number related words removed
# (now empty strings, so strlen == 0)
# aWords = [word for word in aWords if len(word) > 0]
# text = ' '.join(aWords)
if "." in str(result):
integer, dec = str(result).split(".")
# cast float to int
if dec == "0":
result = int(integer)
return result or False
# TODO Not parsing 'cero'
def eu_number_parse(words, i):
def eu_cte(i, s):
if i < len(words) and s == words[i]:
return s, i + 1
return None
def eu_number_word(i, mi, ma):
if i < len(words):
v = _NUM_STRING_EU.get(words[i])
if v and v >= mi and v <= ma:
return v, i + 1
return None
def eu_number_1_99(i):
if i >= len(words):
return None
r1 = eu_number_word(i, 1, 29)
if r1:
return r1
composed = False
if words[i] != "eta" and words[i][-2:] == "ta":
composed = True
words[i] = words[i][:-2]
r1 = eu_number_word(i, 20, 90)
if r1:
v1, i1 = r1
if composed:
# i2 = r2[1]
r3 = eu_number_word(i1, 1, 19)
if r3:
v3, i3 = r3
return v1 + v3, i3
return r1
return None
def eu_number_1_999(i):
r1 = eu_number_word(i, 100, 900)
if r1:
v1, i1 = r1
r2 = eu_cte(i1, "eta")
if r2:
i2 = r2[1]
r3 = eu_number_1_99(i2)
if r3:
v3, i3 = r3
return v1 + v3, i3
else:
return r1
# [1-99]
r1 = eu_number_1_99(i)
if r1:
return r1
return None
def eu_number(i):
# check for cero
r1 = eu_number_word(i, 0, 0)
if r1:
return r1
# check for [1-999] (mil [0-999])?
r1 = eu_number_1_999(i)
if r1:
v1, i1 = r1
r2 = eu_cte(i1, "mila")
if r2:
i2 = r2[1]
r3 = eu_number_1_999(i2)
if r3:
v3, i3 = r3
return v1 * 1000 + v3, i3
else:
return v1 * 1000, i2
else:
return r1
return None
return eu_number(i)
def extract_numbers_eu(text, short_scale=True, ordinals=False):
"""
Takes in a string and extracts a list of numbers.
Args:
text (str): the string to extract a number from
short_scale (bool): Use "short scale" or "long scale" for large
numbers -- over a million. The default is short scale, which
is now common in most English speaking countries.
See https://en.wikipedia.org/wiki/Names_of_large_numbers
ordinals (bool): consider ordinal numbers, e.g. third=3 instead of 1/3
Returns:
list: list of extracted numbers as floats
"""
return extract_numbers_generic(text, pronounce_number_eu, extract_number_eu,
short_scale=short_scale, ordinals=ordinals)
def normalize_eu(text, remove_articles=True):
""" Basque string normalization """
words = text.split() # this also removed extra spaces
normalized = ""
i = 0
while i < len(words):
word = words[i]
# Convert numbers into digits
r = eu_number_parse(words, i)
if r:
v, i = r
normalized += " " + str(v)
continue
normalized += " " + word
i += 1
return normalized[1:] # strip the initial space
return text
# TODO MycroftAI/mycroft-core#2348
def extract_datetime_eu(input_str, anchorDate=None, default_time=None):
def clean_string(s):
# cleans the input string of unneeded punctuation and capitalization
# among other things
symbols = [".", ",", ";", "?", "!", "."]
# noise_words = ["entre", "la", "del", "al", "el", "de",
# "para", "una", "cualquier", "a",
# "e'", "esta", "este"]
# TODO
noise_words = ["artean", "tartean", "edozein", "hau", "hontan", "honetan",
"para", "una", "cualquier", "a",
"e'", "esta", "este"]
for word in symbols:
s = s.replace(word, "")
for word in noise_words:
s = s.replace(" " + word + " ", " ")
s = s.lower().replace(
"-",
" ").replace(
"_",
"")
# handle synonyms and equivalents, "tomorrow early = tomorrow morning
synonyms = {"goiza": ["egunsentia", "goiz", "oso goiz"],
"arratsaldea": ["arratsa", "bazkalostea", "arratsalde", "arrats"],
"gaua": ["iluntzea", "berandu", "gau", "gaba"]}
for syn in synonyms:
for word in synonyms[syn]:
s = s.replace(" " + word + " ", " " + syn + " ")
# relevant plurals
wordlist = ["goizak", "arratsaldeak", "gauak", "egunak", "asteak",
"urteak", "minutuak", "segunduak", "hurrengoak",
"datozenak", "orduak", "hilabeteak"]
for _, word in enumerate(wordlist):
s = s.replace(word, word.rstrip('ak'))
# s = s.replace("meses", "mes").replace("anteriores", "anterior")
return s
def date_found():
return found or \
(
datestr != "" or
yearOffset != 0 or monthOffset != 0 or
dayOffset is True or hrOffset != 0 or
hrAbs or minOffset != 0 or
minAbs or secOffset != 0
)
if input_str == "":
return None
if anchorDate is None:
anchorDate = datetime.now()
found = False
daySpecified = False
dayOffset = False
monthOffset = 0
yearOffset = 0
dateNow = anchorDate
today = dateNow.strftime("%w")
currentYear = dateNow.strftime("%Y")
fromFlag = False
datestr = ""
hasYear = False
timeQualifier = ""
words = clean_string(input_str).split(" ")
timeQualifiersList = ['goiza', 'arratsaldea', 'gaua']
time_indicators = ["en", "la", "al", "por", "pasados",
"pasadas", "día", "hora"]
days = ['astelehena', 'asteartea', 'asteazkena',
'osteguna', 'ostirala', 'larunbata', 'igandea']
months = ['urtarrila', 'otsaila', 'martxoa', 'apirila', 'maiatza', 'ekaina',
'uztaila', 'abuztua', 'iraila', 'urria', 'azaroa',
'abendua']
monthsShort = ['urt', 'ots', 'mar', 'api', 'mai', 'eka', 'uzt', 'abu',
'ira', 'urr', 'aza', 'abe']
nexts = ["hurrengo", "datorren", "ondorengo"]
suffix_nexts = ["barru"]
lasts = ["azken", "duela"]
suffix_lasts = ["aurreko"]
nxts = ["ondorengo", "hurrengo", "datorren"]
prevs = ["aurreko", "duela", "previo", "anterior"]
# TODO
froms = ["desde", "en", "para", "después de", "por", "próximo",
"próxima", "de"]
thises = ["hau"]
froms += thises
lists = nxts + prevs + froms + time_indicators
for idx, word in enumerate(words):
if word == "":
continue
wordPrevPrev = words[idx - 2] if idx > 1 else ""
wordPrev = words[idx - 1] if idx > 0 else ""
wordNext = words[idx + 1] if idx + 1 < len(words) else ""
wordNextNext = words[idx + 2] if idx + 2 < len(words) else ""
wordNextNextNext = words[idx + 3] if idx + 3 < len(words) else ""
start = idx
used = 0
# save timequalifier for later
if word in timeQualifiersList:
timeQualifier = word
# parse today, tomorrow, yesterday
elif (word == "gaur" or word == "gaurko") and not fromFlag:
dayOffset = 0
used += 1
elif (word == "bihar" or word == "biharko") and not fromFlag:
dayOffset = 1
used += 1
elif (word == "atzo" or word == "atzoko") and not fromFlag:
dayOffset -= 1
used += 1
# before yesterday
elif (word == "herenegun" or word == "herenegungo") and not fromFlag:
dayOffset -= 2
used += 1
# if wordNext == "ayer":
# used += 1
# elif word == "ante" and wordNext == "ante" and wordNextNext == \
# "ayer" and not fromFlag:
# dayOffset -= 3
# used += 3
# elif word == "ante anteayer" and not fromFlag:
# dayOffset -= 3
# used += 1
# day after tomorrow
elif (word == "etzi" or word == "etziko") and not fromFlag:
dayOffset += 2
used = 1
elif (word == "etzidamu" or word == "etzidamuko") and not fromFlag:
dayOffset += 3
used = 1
# parse 5 days, 10 weeks, last week, next week, week after
elif word == "egun" or word == "eguna" or word == "eguneko":
if wordPrevPrev and wordPrevPrev == "duela":
used += 1
if wordPrev and wordPrev[0].isdigit():
dayOffset -= int(wordPrev)
start -= 1
used += 1
elif (wordPrev and wordPrev[0].isdigit() and
wordNext not in months and
wordNext not in monthsShort):
dayOffset += int(wordPrev)
start -= 1
used += 2
elif wordNext and wordNext[0].isdigit() and wordNextNext not in \
months and wordNextNext not in monthsShort:
dayOffset += int(wordNext)
start -= 1
used += 2
elif word == "aste" or word == "astea" or word == "asteko" and not fromFlag:
if wordPrev[0].isdigit():
dayOffset += int(wordPrev) * 7
start -= 1
used = 2
for w in nexts:
if wordPrev == w:
dayOffset = 7
start -= 1
used = 2
for w in lasts:
if wordPrev == w:
dayOffset = -7
start -= 1
used = 2
for w in suffix_nexts:
if wordNext == w:
dayOffset = 7
start -= 1
used = 2
for w in suffix_lasts:
if wordNext == w:
dayOffset = -7
start -= 1
used = 2
# parse 10 months, next month, last month
elif word == "hilabete" or word == "hilabetea" or word == "hilabeteko" and not fromFlag:
if wordPrev[0].isdigit():
monthOffset = int(wordPrev)
start -= 1
used = 2
for w in nexts:
if wordPrev == w:
monthOffset = 7
start -= 1
used = 2
for w in lasts:
if wordPrev == w:
monthOffset = -7
start -= 1
used = 2
for w in suffix_nexts:
if wordNext == w:
monthOffset = 7
start -= 1
used = 2
for w in suffix_lasts:
if wordNext == w:
monthOffset = -7
start -= 1
used = 2
# parse 5 years, next year, last year
elif word == "urte" or word == "urtea" or word == "urteko" and not fromFlag:
if wordPrev[0].isdigit():
yearOffset = int(wordPrev)
start -= 1
used = 2
for w in nexts:
if wordPrev == w:
yearOffset = 1
start -= 1
used = 2
for w in lasts:
if wordPrev == w:
yearOffset = -1
start -= 1
used = 2
for w in suffix_nexts:
if wordNext == w:
yearOffset = 1
start -= 1
used = 2
for w in suffix_lasts:
if wordNext == w:
yearOffset = -1
start -= 1
used = 2
# parse Monday, Tuesday, etc., and next Monday,
# last Tuesday, etc.
elif word in days and not fromFlag:
d = days.index(word)
dayOffset = (d + 1) - int(today)
used = 1
if dayOffset < 0:
dayOffset += 7
if wordPrev == "hurrengo":
dayOffset += 7
used += 1
start -= 1
elif wordPrev == "aurreko":
dayOffset -= 7
used += 1
start -= 1
if wordNext == "hurrengo":
# dayOffset += 7
used += 1
elif wordNext == "aurreko":
# dayOffset -= 7
used += 1
# parse 15 of July, June 20th, Feb 18, 19 of February
elif word in months or word in monthsShort:
try:
m = months.index(word)
except ValueError:
m = monthsShort.index(word)
used += 1
datestr = months[m]
if wordPrev and wordPrev[0].isdigit():
# 13 mayo
datestr += " " + wordPrev
start -= 1
used += 1
if wordNext and wordNext[0].isdigit():
datestr += " " + wordNext
used += 1
hasYear = True
else:
hasYear = False
elif wordNext and wordNext[0].isdigit():
# mayo 13
datestr += " " + wordNext
used += 1
if wordNextNext and wordNextNext[0].isdigit():
datestr += " " + wordNextNext
used += 1
hasYear = True
else:
hasYear = False
elif wordPrevPrev and wordPrevPrev[0].isdigit():
# 13 dia mayo
datestr += " " + wordPrevPrev
start -= 2
used += 2
if wordNext and word[0].isdigit():
datestr += " " + wordNext
used += 1
hasYear = True
else:
hasYear = False
elif wordNextNext and wordNextNext[0].isdigit():
# mayo dia 13
datestr += " " + wordNextNext
used += 2
if wordNextNextNext and wordNextNextNext[0].isdigit():
datestr += " " + wordNextNextNext
used += 1
hasYear = True
else:
hasYear = False
if datestr in months:
datestr = ""
# parse 5 days from tomorrow, 10 weeks from next thursday,
# 2 months from July
validFollowups = days + months + monthsShort
validFollowups.append("gaur")
validFollowups.append("bihar")
validFollowups.append("atzo")
# validFollowups.append("atzoko")
validFollowups.append("herenegun")
validFollowups.append("orain")
validFollowups.append("oraintxe")
# validFollowups.append("ante")
# TODO
if word in froms and wordNext in validFollowups:
if not (word == "bihar" or word == "herenegun" or word == "atzo"):
used = 1
fromFlag = True
if wordNext == "bihar":
dayOffset += 1
elif wordNext == "atzo" or wordNext == "atzoko":
dayOffset -= 1
elif wordNext == "herenegun":
dayOffset -= 2
# elif (wordNext == "ante" and wordNext == "ante" and
# wordNextNextNext == "ayer"):
# dayOffset -= 3
elif wordNext in days:
d = days.index(wordNext)
tmpOffset = (d + 1) - int(today)
used = 2
# if wordNextNext == "feira":
# used += 1
if tmpOffset < 0:
tmpOffset += 7
if wordNextNext:
if wordNextNext in nxts:
tmpOffset += 7
used += 1
elif wordNextNext in prevs:
tmpOffset -= 7
used += 1
dayOffset += tmpOffset
elif wordNextNext and wordNextNext in days:
d = days.index(wordNextNext)
tmpOffset = (d + 1) - int(today)
used = 3
if wordNextNextNext:
if wordNextNextNext in nxts:
tmpOffset += 7
used += 1
elif wordNextNextNext in prevs:
tmpOffset -= 7
used += 1
dayOffset += tmpOffset
# if wordNextNextNext == "feira":
# used += 1
if wordNext in months:
used -= 1
if used > 0:
if start - 1 > 0 and words[start - 1] in lists:
start -= 1
used += 1
for i in range(0, used):
words[i + start] = ""
if start - 1 >= 0 and words[start - 1] in lists:
words[start - 1] = ""
found = True
daySpecified = True
# parse time
hrOffset = 0
minOffset = 0
secOffset = 0
hrAbs = None
minAbs = None
for idx, word in enumerate(words):
if word == "":
continue
wordPrevPrev = words[idx - 2] if idx > 1 else ""
wordPrev = words[idx - 1] if idx > 0 else ""
wordNext = words[idx + 1] if idx + 1 < len(words) else ""
wordNextNext = words[idx + 2] if idx + 2 < len(words) else ""
wordNextNextNext = words[idx + 3] if idx + 3 < len(words) else ""
# parse noon, midnight, morning, afternoon, evening
used = 0
if word == "eguerdi" or word == "eguerdia" or word == "eguerdian":
hrAbs = 12
used += 2
elif word == "gauerdi" or word == "gauerdia" or word == "gauerdian":
hrAbs = 0
used += 2
elif word == "goiza":
if not hrAbs:
hrAbs = 8
used += 1
elif word == "arratsaldea" or word == "arratsa" or word == "arratsean" or word == "arratsaldean":
if not hrAbs:
hrAbs = 15
used += 1
# TODO
# elif word == "media" and wordNext == "tarde":
# if not hrAbs:
# hrAbs = 17
# used += 2
elif word == "iluntze" or word == "iluntzea" or word == "iluntzean":
if not hrAbs:
hrAbs = 20
used += 2
# TODO
# elif word == "media" and wordNext == "mañana":
# if not hrAbs:
# hrAbs = 10
# used += 2
# elif word == "fim" and wordNext == "tarde":
# if not hrAbs:
# hrAbs = 19
# used += 2
elif word == "egunsentia" or word == "egunsentian" or word == "egunsenti":
if not hrAbs:
hrAbs = 6
used += 1
# elif word == "madrugada":
# if not hrAbs:
# hrAbs = 1
# used += 2
elif word == "gaua" or word == "gauean" or word == "gau":
if not hrAbs:
hrAbs = 21
used += 1
# parse half an hour, quarter hour
# TODO
elif (word == "hora" and
(wordPrev in time_indicators or wordPrevPrev in
time_indicators)):
if wordPrev == "media":
minOffset = 30
elif wordPrev == "cuarto":
minOffset = 15
elif wordPrevPrev == "cuarto":
minOffset = 15
if idx > 2 and words[idx - 3] in time_indicators:
words[idx - 3] = ""
words[idx - 2] = ""
else:
hrOffset = 1
if wordPrevPrev in time_indicators:
words[idx - 2] = ""
words[idx - 1] = ""
used += 1
hrAbs = -1
minAbs = -1
# parse 5:00 am, 12:00 p.m., etc
elif word[0].isdigit():
isTime = True
strHH = ""
strMM = ""
remainder = ""
if ':' in word:
# parse colons
# "3:00 in the morning"
stage = 0
length = len(word)
for i in range(length):
if stage == 0:
if word[i].isdigit():
strHH += word[i]
elif word[i] == ":":
stage = 1
else:
stage = 2
i -= 1
elif stage == 1:
if word[i].isdigit():
strMM += word[i]
else:
stage = 2
i -= 1
elif stage == 2:
remainder = word[i:].replace(".", "")
break
if remainder == "":
nextWord = wordNext.replace(".", "")
if nextWord == "am" or nextWord == "pm":
remainder = nextWord
used += 1
elif wordNext == "goiza" or wordNext == "egunsentia" or wordNext == "goizeko" or wordNext == "egunsentiko":
remainder = "am"
used += 1
elif wordPrev == "arratsaldeko" or wordPrev == "arratsaldea" or wordPrev == "arratsaldean":
remainder = "pm"
used += 1
elif wordNext == "gaua" or wordNext == "gauean" or wordNext == "gaueko":
if 0 < int(word[0]) < 6:
remainder = "am"
else:
remainder = "pm"
used += 1
elif wordNext in thises and (wordNextNext == "goiza" or wordNextNext == "goizean" or wordNextNext == "goizeko"):
remainder = "am"
used = 2
elif wordNext in thises and \
(wordNextNext == "arratsaldea" or wordNextNext == "arratsaldean" or wordNextNext == "arratsaldeko"):
remainder = "pm"
used = 2
elif wordNext in thises and (wordNextNext == "gaua" or wordNextNext == "gauean" or wordNextNext == "gaueko"):
remainder = "pm"
used = 2
else:
if timeQualifier != "":
if strHH <= 12 and \
(timeQualifier == "goiza" or
timeQualifier == "arratsaldea"):
strHH += 12
else:
# try to parse # s without colons
# 5 hours, 10 minutes etc.
length = len(word)
strNum = ""
remainder = ""
for i in range(length):
if word[i].isdigit():
strNum += word[i]
else:
remainder += word[i]
if remainder == "":
remainder = wordNext.replace(".", "").lstrip().rstrip()
if (
remainder == "pm" or
wordNext == "pm" or
remainder == "p.m." or
wordNext == "p.m."):
strHH = strNum
remainder = "pm"
used = 1
elif (
remainder == "am" or
wordNext == "am" or
remainder == "a.m." or
wordNext == "a.m."):
strHH = strNum
remainder = "am"
used = 1
else:
if (wordNext == "pm" or
wordNext == "p.m." or
wordPrev == "arratsaldeko"):
strHH = strNum
remainder = "pm"
used = 0
elif (wordNext == "am" or
wordNext == "a.m." or
wordPrev == "goizeko"):
strHH = strNum
remainder = "am"
used = 0
elif (int(word) > 100 and
(
# wordPrev == "o" or
# wordPrev == "oh" or
wordPrev == "zero"
)):
# 0800 hours (pronounced oh-eight-hundred)
strHH = int(word) / 100
strMM = int(word) - strHH * 100
if wordNext == "orduak":
used += 1
elif (
wordNext == "orduak" and
word[0] != '0' and
(
int(word) < 100 and
int(word) > 2400
)):
# ignores military time
# "in 3 hours"
hrOffset = int(word)
used = 2
isTime = False
hrAbs = -1
minAbs = -1
elif wordNext == "minutu":
# "in 10 minutes"
minOffset = int(word)
used = 2
isTime = False
hrAbs = -1
minAbs = -1
elif wordNext == "segundu":
# in 5 seconds
secOffset = int(word)
used = 2
isTime = False
hrAbs = -1
minAbs = -1
elif int(word) > 100:
strHH = int(word) / 100
strMM = int(word) - strHH * 100
if wordNext == "ordu":
used += 1
elif wordNext == "" or (
wordNext == "puntuan"):
strHH = word
strMM = 00
if wordNext == "puntuan":
used += 2
if wordNextNextNext == "arratsaldea":
remainder = "pm"
used += 1
elif wordNextNextNext == "goiza":
remainder = "am"
used += 1
elif wordNextNextNext == "gaua":
if 0 > strHH > 6:
remainder = "am"
else:
remainder = "pm"
used += 1
elif wordNext[0].isdigit():
strHH = word
strMM = wordNext
used += 1
if wordNextNext == "orduak":
used += 1
else:
isTime = False
strHH = int(strHH) if strHH else 0
strMM = int(strMM) if strMM else 0
strHH = strHH + 12 if (remainder == "pm" and
0 < strHH < 12) else strHH
strHH = strHH - 12 if (remainder == "am" and
0 < strHH >= 12) else strHH
if strHH > 24 or strMM > 59:
isTime = False
used = 0
if isTime:
hrAbs = strHH * 1
minAbs = strMM * 1
used += 1
if used > 0:
# removed parsed words from the sentence
for i in range(used):
words[idx + i] = ""
if wordPrev == "puntuan":
words[words.index(wordPrev)] = ""
if idx > 0 and wordPrev in time_indicators:
words[idx - 1] = ""
if idx > 1 and wordPrevPrev in time_indicators:
words[idx - 2] = ""
idx += used - 1
found = True
# check that we found a date
if not date_found():
return None
if dayOffset is False:
dayOffset = 0
# perform date manipulation
extractedDate = dateNow
extractedDate = extractedDate.replace(microsecond=0,
second=0,
minute=0,
hour=0)
if datestr != "":
en_months = ['january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november',
'december']
en_monthsShort = ['jan', 'feb', 'mar', 'apr', 'may', 'june', 'july',
'aug',
'sept', 'oct', 'nov', 'dec']
for idx, en_month in enumerate(en_months):
datestr = datestr.replace(months[idx], en_month)
for idx, en_month in enumerate(en_monthsShort):
datestr = datestr.replace(monthsShort[idx], en_month)
temp = datetime.strptime(datestr, "%B %d")
temp = temp.replace(tzinfo=None)
if not hasYear:
temp = temp.replace(year=extractedDate.year, tzinfo=extractedDate.tzinfo)
if extractedDate < temp:
extractedDate = extractedDate.replace(year=int(currentYear),
month=int(
temp.strftime(
"%m")),
day=int(temp.strftime(
"%d")))
else:
extractedDate = extractedDate.replace(
year=int(currentYear) + 1,
month=int(temp.strftime("%m")),
day=int(temp.strftime("%d")))
else:
extractedDate = extractedDate.replace(
year=int(temp.strftime("%Y")),
month=int(temp.strftime("%m")),
day=int(temp.strftime("%d")))
if yearOffset != 0:
extractedDate = extractedDate + relativedelta(years=yearOffset)
if monthOffset != 0:
extractedDate = extractedDate + relativedelta(months=monthOffset)
if dayOffset != 0:
extractedDate = extractedDate + relativedelta(days=dayOffset)
if hrAbs is None and minAbs is None and default_time:
hrAbs = default_time.hour
minAbs = default_time.minute
if hrAbs != -1 and minAbs != -1:
extractedDate = extractedDate + relativedelta(hours=hrAbs or 0,
minutes=minAbs or 0)
if (hrAbs or minAbs) and datestr == "":
if not daySpecified and dateNow > extractedDate:
extractedDate = extractedDate + relativedelta(days=1)
if hrOffset != 0:
extractedDate = extractedDate + relativedelta(hours=hrOffset)
if minOffset != 0:
extractedDate = extractedDate + relativedelta(minutes=minOffset)
if secOffset != 0:
extractedDate = extractedDate + relativedelta(seconds=secOffset)
resultStr = " ".join(words)
resultStr = ' '.join(resultStr.split())
# resultStr = pt_pruning(resultStr)
return [extractedDate, resultStr]
def get_gender_eu(word, raw_string=""):
# There is no gender in Basque
gender = False
return gender
| 36.218951 | 132 | 0.436297 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,733 | 0.196394 |
8d1326f81b702308f07d05eaa330ea71663f64ad | 6,976 | py | Python | path-generation/velocity_profile.py | iqzprvagbv/path-planning | c5b1099dbe1aadbd78a1fdb16c0a2f82245c19bc | [
"MIT"
]
| null | null | null | path-generation/velocity_profile.py | iqzprvagbv/path-planning | c5b1099dbe1aadbd78a1fdb16c0a2f82245c19bc | [
"MIT"
]
| 1 | 2021-06-01T21:26:25.000Z | 2021-06-01T21:26:25.000Z | path-generation/velocity_profile.py | iqzprvagbv/path-planning | c5b1099dbe1aadbd78a1fdb16c0a2f82245c19bc | [
"MIT"
]
| null | null | null | # Defines a velocity profile, which is the big object we've been
# working towards.
from math import sqrt, ceil
import json
class PlanningPoint(object):
# pylint: disable=too-many-instance-attributes
# planning points unfortunately require this much data
def __init__(self, position, time, radius, distance, heading):
# pylint: disable=bad-whitespace
# this next block is unreadable without the spacing
self.radius = radius
self.heading = heading
self.position = position
self.distance = distance
self.internal_time = time
self.total_time = None
self.external_time = None
self.max_velocity = None
self.left_velocity = None
self.right_velocity = None
self.actual_velocity = None
def __str__(self):
return ("Planning Point: " + "\n" +
"Time: " + str(self.internal_time) + "\n" +
"Max Velocity: " + str(self.max_velocity) + "\n" +
"Velocity: " + str(self.actual_velocity) + "\n")
def compute_max_velocity(self, robot):
if self.radius == 0:
velocity = robot.max_velocity
elif self.radius > 0:
velocity = (self.radius*robot.max_velocity)/(self.radius + (robot.width/2.))
else:
velocity = (self.radius*robot.max_velocity)/(self.radius - (robot.width/2.))
self.max_velocity = velocity
def compute_wheel_velocity(self, robot):
if self.actual_velocity is None:
velocity = self.max_velocity
else:
velocity = self.actual_velocity
if self.radius == 0:
self.right_velocity = velocity
self.left_velocity = velocity
else:
self.right_velocity = velocity/self.radius*(self.radius+robot.width/2)
self.left_velocity = velocity/self.radius*(self.radius-robot.width/2)
def json_object(self):
return {"time": self.external_time,
"heading": self.heading,
"left velcoity": self.left_velocity,
"right velocity": self.right_velocity}
class VelocityProfile(object):
def __init__(self, path, robot, distance):
self.path = path
self.robot = robot
self.distance = distance
self.points = []
self.__init_points()
#broken
#self.__establish_accel()
# Dirty Hack
actual_max_accel = self.robot.max_acceleration
current_max_accel = float('inf')
while current_max_accel > actual_max_accel:
print "Ensuring Consitency of Wheels"
self.__forward_consistency(0)
self.__reverse_consistency(0)
self.__establish_timestamps()
self.__init_wheels()
current_max_accel = self.__get_max_accel()
self.robot.max_acceleration = 3./4 * self.robot.max_acceleration
def __init_points(self):
print "Initializing Planning Points..."
last_t = 0
steps = ceil(self.path.total_length/self.distance)
step = 1
progress = 0
for t in self.path.planning_times(self.distance):
print '\r[{0}{1}] {2}%'.format('#'*int(progress * 30),
'-'*(int((1-progress) * 30)),
int(progress*100)),
radius = self.path.curvature_radius(t)
distance = self.path.length(last_t, t)
position = self.path.eval(t)
heading = self.path.heading(t)
point = PlanningPoint(position, t, radius, distance, heading)
point.compute_max_velocity(self.robot)
point.compute_wheel_velocity(self.robot)
self.points.append(point)
last_t = t
step += 1
progress = step/steps
print "Done!"
def __forward_consistency(self, initial_velocity):
print "Establishing Forward Consistency..."
last_velocity = None
for point in self.points:
if last_velocity is None:
point.actual_velocity = min(initial_velocity, point.max_velocity)
else:
obtainable = sqrt(last_velocity**2+2*self.robot.max_acceleration*point.distance)
point.actual_velocity = min(point.max_velocity, obtainable)
last_velocity = point.actual_velocity
print "Done!"
def __reverse_consistency(self, final_velocity):
print "Establishing Reverse Consistency..."
last_velocity = None
last_distance = None
for point in reversed(self.points):
if last_velocity is None:
point.actual_velocity = min(final_velocity, point.actual_velocity)
else:
obtainable = sqrt(last_velocity**2+2*self.robot.max_acceleration*last_distance)
point.actual_velocity = min(point.actual_velocity, obtainable)
last_distance = point.distance
last_velocity = point.actual_velocity
print "Done!"
def __establish_timestamps(self):
print "Establishing Timestamps..."
last_time = None
last_velocity = None
for point in self.points:
if last_time is None:
point.external_time = 0
else:
dt = (2*point.distance)/(point.actual_velocity + last_velocity)
point.external_time = last_time + dt
last_time = point.external_time
last_velocity = point.actual_velocity
self.total_time = last_time
print "Done!"
def __init_wheels(self):
print "Computing Wheel Velocities..."
for point in self.points:
point.compute_wheel_velocity(self.robot)
print "Done!"
def __get_max_accel(self):
last_point = None
max_accel = 0
for point in self.points:
if last_point is None:
last_point = point
else:
dt = point.external_time - last_point.external_time
left_accel = (abs(point.left_velocity -
last_point.left_velocity))/dt
right_accel = (abs(point.right_velocity -
last_point.right_velocity))/dt
max_accel = max(max_accel, left_accel, right_accel)
return max_accel
class ProfileEncoder(json.JSONEncoder):
# pylint: disable=arguments-differ
# pylint: disable=method-hidden
# This code is copy pasted from the official python docs, I assume it's fine
def default(self, obj):
if isinstance(obj, VelocityProfile):
output = []
for point in obj.points:
output.append(point.json_object())
return output
# This will throw an error if it's given the wrong type
return json.JSONEncoder.default(self, obj)
| 38.32967 | 96 | 0.591886 | 6,845 | 0.981221 | 0 | 0 | 0 | 0 | 0 | 0 | 881 | 0.12629 |
8d1378b3e67d5a0964ccf48994e4da6105c0ae60 | 472 | py | Python | move_py_files.py | rune-l/coco-annotator | a7ae8004c5e1ca74e5bbc41d09edc5cfab117a14 | [
"MIT"
]
| null | null | null | move_py_files.py | rune-l/coco-annotator | a7ae8004c5e1ca74e5bbc41d09edc5cfab117a14 | [
"MIT"
]
| null | null | null | move_py_files.py | rune-l/coco-annotator | a7ae8004c5e1ca74e5bbc41d09edc5cfab117a14 | [
"MIT"
]
| null | null | null | import os
import subprocess
test_set_path = '/Users/runelangergaard/Documents/SmartAnnotation/data/test_set'
test_imgs = os.listdir(test_set_path)
test_imgs
cwd_path = '/Users/runelangergaard'
os.chdir(cwd_path)
for img in test_imgs:
full_path = os.path.join(test_set_path, img)
subprocess.run([
"scp",
"-i",
"coco-anno.pem",
full_path,
"[email protected]:/datasets/tmp"
])
| 23.6 | 83 | 0.684322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.396186 |
8d13e8253f51474a77c77b964813f16a0d1c345f | 304 | py | Python | examples/apply.py | PictElm/grom | 52e28efad1edae447347dd396e80a665c283b05d | [
"Apache-2.0"
]
| 1 | 2019-06-29T18:53:31.000Z | 2019-06-29T18:53:31.000Z | examples/apply.py | PictElm/grom | 52e28efad1edae447347dd396e80a665c283b05d | [
"Apache-2.0"
]
| null | null | null | examples/apply.py | PictElm/grom | 52e28efad1edae447347dd396e80a665c283b05d | [
"Apache-2.0"
]
| null | null | null | import random
import grom
grom.debug(False)
dirName = "dump\\"
inputName = "example.bmp"
outputName = "output.bmp"
g = grom.Genome(dirName + inputName, partition=[
('head', 0x76),
('raw')
])
print(g)
print(g.partition)
g.apply(lambda x: 255 - x, ['raw'])
g(dirName + outputName, pause=False)
| 16 | 48 | 0.661184 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.161184 |
8d14a69daed26d53510912624929725162594aec | 3,351 | py | Python | statefun-sdk-python/statefun/statefun_builder.py | MartijnVisser/flink-statefun | 66b2fc5a178d916756428f65a197095fbb43f57d | [
"Apache-2.0"
]
| null | null | null | statefun-sdk-python/statefun/statefun_builder.py | MartijnVisser/flink-statefun | 66b2fc5a178d916756428f65a197095fbb43f57d | [
"Apache-2.0"
]
| 7 | 2022-02-24T17:20:28.000Z | 2022-03-25T13:18:44.000Z | statefun-sdk-python/statefun/statefun_builder.py | MartijnVisser/flink-statefun | 66b2fc5a178d916756428f65a197095fbb43f57d | [
"Apache-2.0"
]
| null | null | null | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import typing
from statefun.core import ValueSpec
from statefun.context import Context
from statefun.messages import Message
from statefun.storage import make_address_storage_spec, StorageSpec
import inspect
class StatefulFunction(object):
__slots__ = ("fun", "storage_spec", "is_async")
def __init__(self,
fun: typing.Callable[[Context, Message], None],
specs: StorageSpec,
is_async: bool):
if fun is None:
raise ValueError("function code is missing.")
self.fun = fun
if specs is None:
raise ValueError("storage spec is missing.")
self.storage_spec = specs
self.is_async = is_async
class StatefulFunctions(object):
__slots__ = ("_functions",)
def __init__(self):
self._functions = {}
def register(self, typename: str, fun, specs: typing.Optional[typing.List[ValueSpec]] = None):
"""registers a StatefulFunction function instance, under the given namespace with the given function type. """
if fun is None:
raise ValueError("function instance must be provided")
if not typename:
raise ValueError("function typename must be provided")
storage_spec = make_address_storage_spec(specs if specs else [])
is_async = inspect.iscoroutinefunction(fun)
sig = inspect.getfullargspec(fun)
if len(sig.args) != 2:
raise ValueError(
f"The registered function {typename} does not expect a context and a message but rather {sig.args}.")
self._functions[typename] = StatefulFunction(fun=fun, specs=storage_spec, is_async=is_async)
def bind(self, typename, specs: typing.List[ValueSpec] = None):
"""wraps a StatefulFunction instance with a given namespace and type.
for example:
s = StatefulFunctions()
@s.define("com.foo.bar/greeter")
def greeter(context, message):
print("Hi there")
This would add an invokable stateful function that can accept messages
sent to "com.foo.bar/greeter".
"""
def wrapper(function):
self.register(typename, function, specs)
return function
return wrapper
def for_typename(self, typename: str) -> StatefulFunction:
return self._functions[typename]
| 39.423529 | 118 | 0.640107 | 2,177 | 0.649657 | 0 | 0 | 0 | 0 | 0 | 0 | 1,709 | 0.509997 |
8d17091c2b65264aa06f866332b484a8ae11e68d | 2,195 | py | Python | Solutions/236.py | ruppysuppy/Daily-Coding-Problem-Solutions | 37d061215a9af2ce39c51f8816c83039914c0d0b | [
"MIT"
]
| 70 | 2021-03-18T05:22:40.000Z | 2022-03-30T05:36:50.000Z | Solutions/236.py | ungaro/Daily-Coding-Problem-Solutions | 37d061215a9af2ce39c51f8816c83039914c0d0b | [
"MIT"
]
| null | null | null | Solutions/236.py | ungaro/Daily-Coding-Problem-Solutions | 37d061215a9af2ce39c51f8816c83039914c0d0b | [
"MIT"
]
| 30 | 2021-03-18T05:22:43.000Z | 2022-03-17T10:25:18.000Z | """
Problem:
You are given a list of N points (x1, y1), (x2, y2), ..., (xN, yN) representing a
polygon. You can assume these points are given in order; that is, you can construct the
polygon by connecting point 1 to point 2, point 2 to point 3, and so on, finally
looping around to connect point N to point 1.
Determine if a new point p lies inside this polygon. (If p is on the boundary of the
polygon, you should return False).
"""
from typing import List, Tuple
Point = Tuple[int, int]
def is_inside(points: List[Point], p: Point) -> bool:
# Using the following concept:
# if a stright line in drawn from the point p to its right (till infinity), the
# drawn line will intersect the lines connecting the points odd number of times
# (if p is enclosed by the points) else the the number of intersections will be
# even (implying its outside the figure created by the points)
# Details:
# https://www.geeksforgeeks.org/how-to-check-if-a-given-point-lies-inside-a-polygon
if len(points) in (0, 1, 2):
return False
x, y = p
last = points[0]
intersections = 0
same_height = set()
for point in points[1:]:
x1, y1 = last
x2, y2 = point
if min(y1, y2) <= y <= max(y1, y2) and x <= min(x1, x2):
if y2 == y and point not in same_height:
intersections += 1
same_height.add(point)
elif y1 == y and last not in same_height:
intersections += 1
same_height.add(last)
last = point
point = points[0]
x1, y1 = last
x2, y2 = point
if max(y1, y2) >= y >= min(y1, y2) and x <= min(x1, x2):
if y2 == y and point not in same_height:
intersections += 1
same_height.add(point)
elif y1 == y and last not in same_height:
intersections += 1
same_height.add(last)
if intersections % 2 == 1:
return True
return False
if __name__ == "__main__":
print(is_inside([(4, 3), (5, 4), (6, 3), (5, 2)], (3, 3)))
print(is_inside([(4, 3), (5, 4), (6, 3), (5, 2)], (5, 3)))
"""
SPECS:
TIME COMPLEXITY: O(n)
SPACE COMPLEXITY: O(n)
"""
| 29.662162 | 87 | 0.596811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 927 | 0.422323 |
8d199b44ca6bfd408aa35f9d1da7c224cc1e44a1 | 968 | py | Python | tests/modules/generate/fake_package_repository_resolver.py | goldstar611/appimage-builder | 62e4b8781e604545817eb47c058f5be0c0d27d15 | [
"MIT"
]
| 155 | 2019-12-16T00:04:03.000Z | 2022-03-28T11:22:55.000Z | tests/modules/generate/fake_package_repository_resolver.py | goldstar611/appimage-builder | 62e4b8781e604545817eb47c058f5be0c0d27d15 | [
"MIT"
]
| 151 | 2019-11-22T13:13:22.000Z | 2022-03-30T21:27:32.000Z | tests/modules/generate/fake_package_repository_resolver.py | goldstar611/appimage-builder | 62e4b8781e604545817eb47c058f5be0c0d27d15 | [
"MIT"
]
| 28 | 2020-01-15T15:30:43.000Z | 2022-03-22T08:58:06.000Z | # Copyright 2021 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from appimagebuilder.modules.generate.package_managers.apt import (
PackageRepositoryResolver,
)
class FakePackageRepositoryResolver(PackageRepositoryResolver):
def resolve_source_lines(self, packages) -> []:
return [
"deb http://archive.ubuntu.com/ubuntu/ focal main restricted universe multiverse"
]
| 44 | 93 | 0.759298 | 236 | 0.243802 | 0 | 0 | 0 | 0 | 0 | 0 | 698 | 0.721074 |
8d19a458c0aeddafe12f42faf41b63a52a85ae7f | 2,546 | py | Python | Oblig3/test_benchmark.py | fabiorodp/IN5550_Neural_Methods_in_Natural_Language_Processing | 4d3b2ed56b56e016413ae1544e19ad2a2c0ef047 | [
"MIT"
]
| null | null | null | Oblig3/test_benchmark.py | fabiorodp/IN5550_Neural_Methods_in_Natural_Language_Processing | 4d3b2ed56b56e016413ae1544e19ad2a2c0ef047 | [
"MIT"
]
| null | null | null | Oblig3/test_benchmark.py | fabiorodp/IN5550_Neural_Methods_in_Natural_Language_Processing | 4d3b2ed56b56e016413ae1544e19ad2a2c0ef047 | [
"MIT"
]
| null | null | null | # Author: Fabio Rodrigues Pereira
# E-mail: [email protected]
# Author: Per Morten Halvorsen
# E-mail: [email protected]
# Author: Eivind Grønlie Guren
# E-mail: [email protected]
try:
from Oblig3.packages.preprocess import load_raw_data, filter_raw_data, pad
from Oblig3.packages.preprocess import OurCONLLUDataset
from Oblig3.packages.model import Transformer
except:
from packages.preprocess import load_raw_data, filter_raw_data, pad
from packages.preprocess import OurCONLLUDataset
from packages.model import Transformer
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from transformers import BertTokenizer
import torch
# first step
# datapath = '/cluster/projects/nn9851k/IN5550/norne-nb-in5550-train.conllu'
# NORBERT = '/cluster/shared/nlpl/data/vectors/latest/216'
datapath = 'Oblig3/saga/norne-nb-in5550-train.conllu'
NORBERT = 'Oblig3/saga/216/'
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.cuda.empty_cache() if torch.cuda.is_available() else None
# loading raw data
con_df = load_raw_data(datapath=datapath)
con_df = filter_raw_data(df=con_df, min_entities=5)
# splitting data
train_df, val_df = train_test_split(
con_df,
# train_size=0.50,
test_size=0.25,
random_state=1,
shuffle=True,
)
tokenizer = BertTokenizer.from_pretrained(NORBERT)
# creating data sets
train_dataset = OurCONLLUDataset(
df=train_df,
tokenizer=tokenizer,
device=device
)
val_dataset = OurCONLLUDataset(
df=val_df,
tokenizer=tokenizer,
label_vocab=train_dataset.label_vocab,
device=device
)
# creating data loaders
train_loader = DataLoader(
train_dataset,
batch_size=32,
collate_fn=lambda batch: pad(batch, train_dataset.IGNORE_ID)
)
val_loader = DataLoader(
val_dataset,
batch_size=len(val_dataset),
collate_fn=lambda batch: pad(batch, train_dataset.IGNORE_ID)
)
# calling transformer model
transformer = Transformer(
NORBERT=NORBERT,
num_labels=len(train_dataset.label_indexer),
NOT_ENTITY_ID=train_dataset.label_indexer['O'],
device=device,
epochs=100, # 12 for the optimal
lr_scheduler=False,
factor=0.1,
patience=2,
loss_funct='cross-entropy',
random_state=1,
verbose=True,
lr=0.01,
momentum=0.9,
epoch_patience=1, # 0 for the optimal
label_indexer=train_dataset.label_indexer
)
transformer.fit(
loader=train_loader,
test=val_loader,
verbose=True
)
torch.save(transformer, "transformer_benchmark_12ep.pt")
| 24.480769 | 78 | 0.749411 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 598 | 0.234786 |
8d1acd1c8212f19c55510b4dd8c3544bf2548519 | 11,176 | py | Python | test/test_box/test_box_storage.py | cmc333333/parsons | 50804a3627117797570f1e9233c9bbad583f7831 | [
"Apache-2.0"
]
| null | null | null | test/test_box/test_box_storage.py | cmc333333/parsons | 50804a3627117797570f1e9233c9bbad583f7831 | [
"Apache-2.0"
]
| 2 | 2021-11-24T19:39:57.000Z | 2022-01-03T23:03:35.000Z | test/test_box/test_box_storage.py | cmc333333/parsons | 50804a3627117797570f1e9233c9bbad583f7831 | [
"Apache-2.0"
]
| null | null | null | import logging
import os
import random
import string
import unittest
import warnings
from boxsdk.exception import BoxAPIException, BoxOAuthException
from parsons.box import Box
from parsons.etl import Table
"""Prior to running, you should ensure that the relevant environment
variables have been set, e.g. via
# Note: these are fake keys, provided as examples.
export BOX_CLIENT_ID=txqedp4rqi0cz5qckz361fziavdtdwxz
export BOX_CLIENT_SECRET=bk264KHMDLVy89TeuUpSRa4CN5o35u9h
export BOX_ACCESS_TOKEN=boK97B39m3ozIGyTcazbWRbi5F2SSZ5J
"""
TEST_CLIENT_ID = os.getenv('BOX_CLIENT_ID')
TEST_BOX_CLIENT_SECRET = os.getenv('BOX_CLIENT_SECRET')
TEST_ACCESS_TOKEN = os.getenv('BOX_ACCESS_TOKEN')
def generate_random_string(length):
"""Utility to generate random alpha string for file/folder names"""
return ''.join(random.choice(string.ascii_letters) for i in range(length))
@unittest.skipIf(not os.getenv('LIVE_TEST'), 'Skipping because not running live test')
class TestBoxStorage(unittest.TestCase):
def setUp(self) -> None:
warnings.filterwarnings(action="ignore", message="unclosed", category=ResourceWarning)
# Create a client that we'll use to manipulate things behind the scenes
self.client = Box()
# Create test folder that we'll use for all our manipulations
self.temp_folder_name = generate_random_string(24)
logging.info(f'Creating temp folder {self.temp_folder_name}')
self.temp_folder_id = self.client.create_folder(self.temp_folder_name)
def tearDown(self) -> None:
logging.info(f'Deleting temp folder {self.temp_folder_name}')
self.client.delete_folder_by_id(self.temp_folder_id)
def test_list_files_by_id(self) -> None:
# Count on environment variables being set
box = Box()
subfolder = box.create_folder_by_id(folder_name='id_subfolder',
parent_folder_id=self.temp_folder_id)
# Create a couple of files in the temp folder
table = Table([['phone_number', 'last_name', 'first_name'],
['4435705355', 'Warren', 'Elizabeth'],
['5126993336', 'Obama', 'Barack']])
box.upload_table_to_folder_id(table, 'temp1', folder_id=subfolder)
box.upload_table_to_folder_id(table, 'temp2', folder_id=subfolder)
box.create_folder_by_id(folder_name='temp_folder1', parent_folder_id=subfolder)
box.create_folder_by_id(folder_name='temp_folder2', parent_folder_id=subfolder)
file_list = box.list_files_by_id(folder_id=subfolder)
self.assertEqual(['temp1', 'temp2'], file_list['name'])
# Check that if we delete a file, it's no longer there
for box_file in file_list:
if box_file['name'] == 'temp1':
box.delete_file_by_id(box_file['id'])
break
file_list = box.list_files_by_id(folder_id=subfolder)
self.assertEqual(['temp2'], file_list['name'])
folder_list = box.list_folders_by_id(folder_id=subfolder)['name']
self.assertEqual(['temp_folder1', 'temp_folder2'], folder_list)
def test_list_files_by_path(self) -> None:
# Count on environment variables being set
box = Box()
# Make sure our test folder is in the right place
found_default = False
for item in box.list():
if item['name'] == self.temp_folder_name:
found_default = True
break
self.assertTrue(found_default,
f'Failed to find test folder f{self.temp_folder_name} '
f'in default Box folder')
subfolder_name = 'path_subfolder'
subfolder_path = f'{self.temp_folder_name}/{subfolder_name}'
box.create_folder(path=subfolder_path)
# Create a couple of files in the temp folder
table = Table([['phone_number', 'last_name', 'first_name'],
['4435705355', 'Warren', 'Elizabeth'],
['5126993336', 'Obama', 'Barack']])
box.upload_table(table, f'{subfolder_path}/temp1')
box.upload_table(table, f'{subfolder_path}/temp2')
box.create_folder(f'{subfolder_path}/temp_folder1')
box.create_folder(f'{subfolder_path}/temp_folder2')
file_list = box.list(path=subfolder_path, item_type='file')
self.assertEqual(['temp1', 'temp2'], file_list['name'])
# Check that if we delete a file, it's no longer there
for box_file in file_list:
if box_file['name'] == 'temp1':
box.delete_file(path=f'{subfolder_path}/temp1')
break
file_list = box.list(path=subfolder_path, item_type='file')
self.assertEqual(['temp2'], file_list['name'])
folder_list = box.list(path=subfolder_path, item_type='folder')
self.assertEqual(['temp_folder1', 'temp_folder2'], folder_list['name'])
# Make sure we can delete by path
box.delete_folder(f'{subfolder_path}/temp_folder1')
folder_list = box.list(path=subfolder_path, item_type='folder')
self.assertEqual(['temp_folder2'], folder_list['name'])
def test_upload_file(self) -> None:
# Count on environment variables being set
box = Box()
table = Table([['phone_number', 'last_name', 'first_name'],
['4435705355', 'Warren', 'Elizabeth'],
['5126993336', 'Obama', 'Barack']])
box_file = box.upload_table_to_folder_id(table, 'phone_numbers',
folder_id=self.temp_folder_id)
new_table = box.get_table_by_file_id(box_file.id)
# Check that what we saved is equal to what we got back
self.assertEqual(str(table), str(new_table))
# Check that things also work in JSON
box_file = box.upload_table_to_folder_id(table, 'phone_numbers_json',
folder_id=self.temp_folder_id,
format='json')
new_table = box.get_table_by_file_id(box_file.id, format='json')
# Check that what we saved is equal to what we got back
self.assertEqual(str(table), str(new_table))
# Now check the same thing with paths instead of file_id
path_filename = 'path_phone_numbers'
box_file = box.upload_table(table, f'{self.temp_folder_name}/{path_filename}')
new_table = box.get_table(path=f'{self.temp_folder_name}/{path_filename}')
# Check that we throw an exception with bad formats
with self.assertRaises(ValueError):
box.upload_table_to_folder_id(table, 'phone_numbers', format='illegal_format')
with self.assertRaises(ValueError):
box.get_table_by_file_id(box_file.id, format='illegal_format')
def test_download_file(self) -> None:
box = Box()
table = Table([['phone_number', 'last_name', 'first_name'],
['4435705355', 'Warren', 'Elizabeth'],
['5126993336', 'Obama', 'Barack']])
uploaded_file = table.to_csv()
path_filename = f'{self.temp_folder_name}/my_path'
box.upload_table(table, path_filename)
downloaded_file = box.download_file(path_filename)
with open(uploaded_file) as uploaded, open(downloaded_file) as downloaded:
self.assertEqual(str(uploaded.read()), str(downloaded.read()))
def test_get_item_id(self) -> None:
# Count on environment variables being set
box = Box()
# Create a subfolder in which we'll do this test
sub_sub_folder_name = 'item_subfolder'
sub_sub_folder_id = box.create_folder_by_id(folder_name=sub_sub_folder_name,
parent_folder_id=self.temp_folder_id)
table = Table([['phone_number', 'last_name', 'first_name'],
['4435705355', 'Warren', 'Elizabeth'],
['5126993336', 'Obama', 'Barack']])
box_file = box.upload_table_to_folder_id(table, 'file_in_subfolder',
folder_id=self.temp_folder_id)
box_file = box.upload_table_to_folder_id(table, 'phone_numbers',
folder_id=sub_sub_folder_id)
# Now try getting various ids
file_path = f'{self.temp_folder_name}/item_subfolder/phone_numbers'
self.assertEqual(box_file.id, box.get_item_id(path=file_path))
file_path = f'{self.temp_folder_name}/item_subfolder'
self.assertEqual(sub_sub_folder_id, box.get_item_id(path=file_path))
file_path = self.temp_folder_name
self.assertEqual(self.temp_folder_id, box.get_item_id(path=file_path))
# Trailing "/"
with self.assertRaises(ValueError):
file_path = f'{self.temp_folder_name}/item_subfolder/phone_numbers/'
box.get_item_id(path=file_path)
# Nonexistent file
with self.assertRaises(ValueError):
file_path = f'{self.temp_folder_name}/item_subfolder/nonexistent/phone_numbers'
box.get_item_id(path=file_path)
# File (rather than folder) in middle of path
with self.assertRaises(ValueError):
file_path = f'{self.temp_folder_name}/file_in_subfolder/phone_numbers'
box.get_item_id(path=file_path)
def test_errors(self) -> None:
# Count on environment variables being set
box = Box()
nonexistent_id = '9999999'
table = Table([['phone_number', 'last_name', 'first_name'],
['4435705355', 'Warren', 'Elizabeth'],
['5126993336', 'Obama', 'Barack']])
# Upload a bad format
with self.assertRaises(ValueError):
box.upload_table_to_folder_id(table, 'temp1', format='bad_format')
# Download a bad format
with self.assertRaises(ValueError):
box.get_table_by_file_id(file_id=nonexistent_id, format='bad_format')
# Upload to non-existent folder
with self.assertLogs(level=logging.WARNING):
with self.assertRaises(BoxAPIException):
box.upload_table_to_folder_id(table, 'temp1', folder_id=nonexistent_id)
# Download a non-existent file
with self.assertLogs(level=logging.WARNING):
with self.assertRaises(BoxAPIException):
box.get_table_by_file_id(nonexistent_id, format='json')
# Create folder in non-existent parent
with self.assertRaises(ValueError):
box.create_folder('nonexistent_path/path')
# Create folder in non-existent parent
with self.assertLogs(level=logging.WARNING):
with self.assertRaises(BoxAPIException):
box.create_folder_by_id(folder_name='subfolder', parent_folder_id=nonexistent_id)
# Try using bad credentials
box = Box(access_token='5345345345')
with self.assertLogs(level=logging.WARNING):
with self.assertRaises(BoxOAuthException):
box.list_files_by_id()
| 42.656489 | 97 | 0.642895 | 10,209 | 0.913475 | 0 | 0 | 10,296 | 0.92126 | 0 | 0 | 3,660 | 0.327487 |
8d1b66ad840bf7a208b29ea852c07fe8f18d11de | 3,961 | py | Python | Task2.py | sahil7pathak/Image_Segmentation_and_Point_Detection | 7cf00f1c0a10ee0384eba7cbbb17f0779642cfa3 | [
"MIT"
]
| null | null | null | Task2.py | sahil7pathak/Image_Segmentation_and_Point_Detection | 7cf00f1c0a10ee0384eba7cbbb17f0779642cfa3 | [
"MIT"
]
| null | null | null | Task2.py | sahil7pathak/Image_Segmentation_and_Point_Detection | 7cf00f1c0a10ee0384eba7cbbb17f0779642cfa3 | [
"MIT"
]
| null | null | null | import matplotlib.pyplot as plt
import numpy as np
import cv2
'''Erosion Method'''
def erosion(image, kernel):
img_height = image.shape[0]
img_width = image.shape[1]
kernel_height = kernel.shape[0]
kernel_width = kernel.shape[1]
h = kernel_height//2
w = kernel_width//2
res = [[0 for x in range(img_width)] for y in range(img_height)]
res = np.array(res)
for i in range(h, img_height-h):
for j in range(w, img_width-w):
a = np.array(image[(i-h):(i-h)+kernel_height, (j-w):(j-w)+kernel_width])
if(np.array_equal(a, kernel)):
res[i][j] = 1
else:
res[i][j] = 0
return res
'''Point Detection Method'''
def point_detection(image, kernel):
img_height = image.shape[0]
img_width = image.shape[1]
image = cv2.Laplacian(image, cv2.CV_32F)
kernel_height = kernel.shape[0]
kernel_width = kernel.shape[1]
h = kernel_height//2
w = kernel_width//2
'''Threshold chosen to be a value which is 90% of maximum sum value'''
T = 8382
sum_arr = []
res = [[0 for x in range(img_width)] for y in range(img_height)]
res = np.array(res)
for i in range(h, img_height-h):
for j in range(w, img_width-w):
a = np.array(image[(i-h):(i-h)+kernel_height, (j-w):(j-w)+kernel_width])
out = ((np.multiply(kernel, a)))
sum = np.abs(np.sum(out))
sum_arr.append(sum)
if(sum > T):
co_ord = (i, j)
res[i][j] = 1
print("Maximum sum: ",np.max(np.array(sum_arr)))
return res, co_ord
def check_segment(image):
img_height = image.shape[0]
img_width = image.shape[1]
'''Threshold chosen by observing the plotted histogram'''
T = 204
res = [[0 for x in range(img_width)] for y in range(img_height)]
res = np.array(res)
for i in range(image.shape[0]):
for j in range(image.shape[1]):
if(image[i][j] > T):
res[i][j] = 255
else:
res[i][j] = 0
return res
img = cv2.imread("point.jpg",0)
sample = img
kernel = np.array([[-1,-1,-1],
[-1,8,-1],
[-1,-1,-1]])
output, co_ord = point_detection(img, kernel)
output = output*255
output = np.asarray(output, np.uint8)
cv2.rectangle(output,(424,230),(464,272),(255,255,255),2)
cv2.imwrite("res_point.jpg",output)
'''Code for segmenting the object from the background'''
img2 = cv2.imread("segment.jpg", 0)
seg = check_segment(img2)
seg = np.asarray(seg, np.uint8)
cv2.rectangle(seg,(155,115),(208,172),(255,255,255),2)
cv2.rectangle(seg,(245,68),(300,223),(255,255,255),2)
cv2.rectangle(seg,(322,13),(370,291),(255,255,255),2)
cv2.rectangle(seg,(382,33),(430,264),(255,255,255),2)
'''Observed co-ordinates of bounding boxes, in col, row format'''
print("1st box: ")
print("Upper left: (155,115)")
print("Upper right: (208,115)")
print("Bottom left: (155,172)")
print("Bottom right: (208,172)\n")
print("2nd box: ")
print("Upper left: (245,68)")
print("Upper right: (300,68)")
print("Bottom left: (245,223)")
print("Bottom right: (300,223)\n")
print("3rd box: ")
print("Upper left: (322,13)")
print("Upper right: (370,13)")
print("Bottom left: (322,291)")
print("Bottom right: (370,291)\n")
print("4th box: ")
print("Upper left: (382,33)")
print("Upper right: (430,33)")
print("Bottom left: (382,264)")
print("Bottom right: (430,264)")
cv2.imwrite("res_segment.jpg",seg)
'''Plotting Histogram'''
my_dict = {}
for i in range(np.unique(img2).shape[0]):
a = np.unique(img2)[i]
count = np.sum(img2 == a)
my_dict[a] = count
sorted_by_value = sorted(my_dict.items(), key=lambda kv: kv[1])
uniq = list(np.unique(img2))
val = list(my_dict.values())
plt.plot(uniq[1:],val[1:])
plt.show()
| 30.705426 | 85 | 0.578642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 819 | 0.206766 |
8d1f2e38cdfd31edc3acb7a262903d61da73d831 | 1,652 | py | Python | Subjects/migrations/0001_initial.py | Mithzyl/Master-college-selecting-api | ec8f36067fb648238df4faeaa6a65e5a78740e6c | [
"MIT"
]
| null | null | null | Subjects/migrations/0001_initial.py | Mithzyl/Master-college-selecting-api | ec8f36067fb648238df4faeaa6a65e5a78740e6c | [
"MIT"
]
| null | null | null | Subjects/migrations/0001_initial.py | Mithzyl/Master-college-selecting-api | ec8f36067fb648238df4faeaa6a65e5a78740e6c | [
"MIT"
]
| null | null | null | # Generated by Django 3.1.5 on 2021-02-07 08:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FirstMajorSubject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.IntegerField(blank=True, null=True)),
('name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='ForeignLanguageSubject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.IntegerField(blank=True, null=True)),
('name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='PoliticSubject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.IntegerField(blank=True, null=True)),
('name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='SecondMajorSubject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.IntegerField(blank=True, null=True)),
('name', models.CharField(max_length=40)),
],
),
]
| 35.148936 | 114 | 0.552663 | 1,559 | 0.943705 | 0 | 0 | 0 | 0 | 0 | 0 | 206 | 0.124697 |
8d1f97cb6d168a2c8e3c97a6da76772adf11469f | 239 | py | Python | app/__init__.py | pahumadad/flask-oauth | 309e235da8d72bb4e33d6fb68eb90b2f5392823a | [
"MIT"
]
| 1 | 2017-04-27T09:23:48.000Z | 2017-04-27T09:23:48.000Z | app/__init__.py | pahumadad/flask-oauth | 309e235da8d72bb4e33d6fb68eb90b2f5392823a | [
"MIT"
]
| null | null | null | app/__init__.py | pahumadad/flask-oauth | 309e235da8d72bb4e33d6fb68eb90b2f5392823a | [
"MIT"
]
| null | null | null | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
lm = LoginManager(app)
from app import views, models, oauth
| 21.727273 | 39 | 0.803347 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.033473 |
8d212f11594f7ae449b95c565655219888507326 | 511 | py | Python | Python/toLowerCase.py | dianeyeo/LeetCode | b814831e7a4296a4e95785b75ea5c540a3fca63d | [
"MIT"
]
| null | null | null | Python/toLowerCase.py | dianeyeo/LeetCode | b814831e7a4296a4e95785b75ea5c540a3fca63d | [
"MIT"
]
| null | null | null | Python/toLowerCase.py | dianeyeo/LeetCode | b814831e7a4296a4e95785b75ea5c540a3fca63d | [
"MIT"
]
| null | null | null | """
https://leetcode.com/problems/to-lower-case/
Difficulty: Easy
Given a string s, return the string after replacing every uppercase letter with the same lowercase letter.
Example 1:
Input: s = "Hello"
Output: "hello"
Example 2:
Input: s = "here"
Output: "here"
Example 3:
Input: s = "LOVELY"
Output: "lovely"
Constraints:
1 <= s.length <= 100
s consists of printable ASCII characters.
"""
class Solution:
def toLowerCase(self, str: str) -> str:
return str.lower()
| 17.033333 | 106 | 0.661448 | 86 | 0.168297 | 0 | 0 | 0 | 0 | 0 | 0 | 422 | 0.825832 |
8d213f69d083136ed499e8028606ef1e8d49f01e | 2,495 | py | Python | covid_phylo/src/analysis.py | mrubio-chavarria/covidMonitor | 8d59b17dbff46a781527de181f22b115565e5c2d | [
"MIT"
]
| 1 | 2021-03-22T17:05:52.000Z | 2021-03-22T17:05:52.000Z | covid_phylo/src/analysis.py | mrubio-chavarria/covidMonitor | 8d59b17dbff46a781527de181f22b115565e5c2d | [
"MIT"
]
| 6 | 2020-06-06T01:51:21.000Z | 2022-01-13T02:39:02.000Z | covid_phylo/src/analysis.py | mrubio-chavarria/covidMonitor | 8d59b17dbff46a781527de181f22b115565e5c2d | [
"MIT"
]
| null | null | null | import align_tools as at
import matplotlib.pyplot as plt
import numpy as np
from collections import Counter
def h(x):
if x>0:
return 1
else:
return 0
def get_counter(arr, lower_sat=None, upper_sat=None):
result = {}
for val in arr:
if (upper_sat is None or val < upper_sat) and (lower_sat is None or val > lower_sat):
result[val] = result.get(val, 0) + 1
elif upper_sat is not None and val >= upper_sat:
result[upper_sat] = result.get(upper_sat, 0) + 1
else:
result[lower_sat] = result.get(lower_sat, 0) + 1
return result
def analyse_gaps(num_gaps, collaps_factor=1):
print(get_counter(num_gaps, upper_sat=1))
has_gaps = [h(num_gap) for num_gap in num_gaps]
num_gaps_collaps = [sum(has_gaps[max([collaps_factor*i, 0]):min([collaps_factor*(i+1), len(has_gaps)])]) for i in range(int(len(has_gaps)/collaps_factor)+1)]
ax = plt.subplot(111)
x = [n for n in num_gaps_collaps]
ax.bar(range(len(num_gaps_collaps)), num_gaps_collaps)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Posiciones con gaps')
plt.show()
def analyse_changes(num_vars_det, num_vars_all):
vars_det_sites = get_counter(num_vars_det, 0, 4)
vars_all_sites = get_counter(num_vars_all, 0, 4)
print('only determined')
print([f'k={k}: {vars_det_sites.get(k, 0)}, {vars_det_sites.get(k, 0) / len(num_vars_det) * 100:.2f}%' for k in vars_det_sites])
print('also undetermined')
print([f'k={k}: {vars_all_sites.get(k, 0)}, {vars_all_sites.get(k, 0) / len(num_vars_all) * 100:.2f}%' for k in vars_all_sites])
x = [n for n in vars_det_sites]
y = [vars_det_sites.get(n, 0) for n in x]
z = [vars_all_sites[n] for n in x]
ax = plt.subplot(111)
bar1 = ax.bar(np.array(x)-0.1, y, width=0.2, color='b', align='center')
bar2 = ax.bar(np.array(x)+0.1, z, width=0.2, color='r', align='center')
ax.legend( (bar1[0], bar2[0]), ('Solo bases conocidas', 'Incluyendo bases desconocidas'))
plt.xlabel('k (saturación en 4)')
plt.xticks([1, 2, 3, 4])
plt.ylabel('n_k')
plt.title('Histograma de nucleotidos distintos por posición')
plt.show()
def main():
records = at.aligned_records_by_tag("complete")
num_gaps, num_vars_det, num_vars_all = at.analyse_alignment(records)
print("done anaylsis")
analyse_gaps(num_gaps, collaps_factor=300)
analyse_changes(num_vars_det, num_vars_all)
if __name__ == '__main__':
main() | 34.178082 | 161 | 0.658116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 441 | 0.176612 |
8d21b09432278f9368a292eca49b25d9da12e492 | 88 | py | Python | apps/salt/apps.py | plsof/tabops_api | 39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8 | [
"MIT"
]
| 1 | 2019-07-31T07:34:38.000Z | 2019-07-31T07:34:38.000Z | apps/salt/apps.py | plsof/tabops_api | 39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8 | [
"MIT"
]
| 9 | 2019-12-05T00:39:29.000Z | 2022-02-10T14:13:29.000Z | apps/salt/apps.py | plsof/tabops_api | 39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8 | [
"MIT"
]
| null | null | null | from django.apps import AppConfig
class SaltConfig(AppConfig):
name = 'apps.salt'
| 14.666667 | 33 | 0.738636 | 51 | 0.579545 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.125 |
8d21d5ac301b7c2c83e332f0f0cea5a96ae6d81d | 1,266 | py | Python | pygears_vivado/vivmod.py | Anari-AI/pygears-vivado | a9d928d9914b479739ff8fc1e208813292c4b711 | [
"MIT"
]
| 1 | 2022-03-19T02:11:12.000Z | 2022-03-19T02:11:12.000Z | pygears_vivado/vivmod.py | Anari-AI/pygears-vivado | a9d928d9914b479739ff8fc1e208813292c4b711 | [
"MIT"
]
| null | null | null | pygears_vivado/vivmod.py | Anari-AI/pygears-vivado | a9d928d9914b479739ff8fc1e208813292c4b711 | [
"MIT"
]
| 1 | 2021-06-01T13:21:12.000Z | 2021-06-01T13:21:12.000Z | import os
from pygears.hdl.sv import SVModuleInst
from .ip_resolver import IPResolver
class SVVivModuleInst(SVModuleInst):
def __init__(self, node, lang=None):
resolver = IPResolver(node)
super().__init__(node, resolver.lang, resolver)
@property
def is_generated(self):
return True
@property
def include(self):
return [os.path.join(self.ipdir, 'hdl')]
def get_wrap_portmap(self, parent_lang):
sig_map = {}
for s in self.node.meta_kwds['signals']:
sig_map[s.name] = s.name
port_map = {}
for p in self.node.in_ports + self.node.out_ports:
name = p.basename
if self.lang == 'sv':
port_map[name] = name
elif parent_lang == 'sv':
sig_map[f'{name}_tvalid'] = f'{name}.valid'
sig_map[f'{name}_tready'] = f'{name}.ready'
sig_map[f'{name}_tdata'] = f'{name}.data'
elif parent_lang == 'v':
sig_map[f'{name}_tvalid'] = f'{name}_valid'
sig_map[f'{name}_tready'] = f'{name}_ready'
sig_map[f'{name}_tdata'] = f'{name}_data'
else:
port_map[name] = name
return port_map, sig_map
| 30.878049 | 59 | 0.553712 | 1,177 | 0.9297 | 0 | 0 | 138 | 0.109005 | 0 | 0 | 207 | 0.163507 |
8d24383aba0b77760774f695ed82a4ade6ace738 | 1,841 | py | Python | commodore/inventory/render.py | projectsyn/commodore | afd924a2aa8abb79cd6a8970ff225756469dd2b3 | [
"BSD-3-Clause"
]
| 39 | 2019-12-17T13:40:19.000Z | 2021-12-31T08:22:52.000Z | commodore/inventory/render.py | projectsyn/commodore | afd924a2aa8abb79cd6a8970ff225756469dd2b3 | [
"BSD-3-Clause"
]
| 161 | 2020-02-14T18:32:49.000Z | 2022-03-25T09:23:35.000Z | commodore/inventory/render.py | projectsyn/commodore | afd924a2aa8abb79cd6a8970ff225756469dd2b3 | [
"BSD-3-Clause"
]
| 12 | 2019-12-18T15:43:09.000Z | 2021-06-28T11:51:59.000Z | import shutil
import tempfile
from pathlib import Path
from typing import Dict
import click
from commodore.config import Config
from .parameters import ClassNotFound, InventoryFactory, InventoryFacts
def _cleanup_work_dir(cfg: Config, work_dir: Path):
if not cfg.debug:
# Clean up work dir if we're not in debug mode
shutil.rmtree(work_dir)
def extract_components(
cfg: Config, invfacts: InventoryFacts
) -> Dict[str, Dict[str, str]]:
if cfg.debug:
click.echo(
f"Called with: global_config={invfacts.global_config} "
+ f"tenant_config={invfacts.tenant_config} "
+ f"extra_classes={invfacts.extra_classes} "
+ f"allow_missing_classes={invfacts.allow_missing_classes}."
)
global_dir = Path(invfacts.global_config).resolve().absolute()
tenant_dir = None
if invfacts.tenant_config:
tenant_dir = Path(invfacts.tenant_config).resolve().absolute()
work_dir = Path(tempfile.mkdtemp(prefix="commodore-reclass-")).resolve()
if global_dir.is_dir() and (not tenant_dir or tenant_dir.is_dir()):
invfactory = InventoryFactory.from_repo_dirs(
work_dir, global_dir, tenant_dir, invfacts
)
else:
_cleanup_work_dir(cfg, work_dir)
raise NotImplementedError("Cloning global or tenant repo not yet implemented")
try:
inv = invfactory.reclass(invfacts)
components = inv.parameters("components")
except ClassNotFound as e:
_cleanup_work_dir(cfg, work_dir)
raise ValueError(
"Unable to render inventory with `--no-allow-missing-classes`. "
+ f"Class '{e.name}' not found. "
+ "Verify the provided values or allow missing classes."
) from e
_cleanup_work_dir(cfg, work_dir)
return components
| 30.683333 | 86 | 0.674633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 475 | 0.258012 |
8d2771d9640e1def0fa9d63283dfdac05afbee62 | 25,468 | py | Python | nova/pci/stats.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
]
| null | null | null | nova/pci/stats.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
]
| null | null | null | nova/pci/stats.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
]
| 2 | 2017-07-20T17:31:34.000Z | 2020-07-24T02:42:19.000Z | begin_unit
comment|'# Copyright (c) 2013 Intel, Inc.'
nl|'\n'
comment|'# Copyright (c) 2013 OpenStack Foundation'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'copy'
newline|'\n'
nl|'\n'
name|'from'
name|'oslo_config'
name|'import'
name|'cfg'
newline|'\n'
name|'from'
name|'oslo_log'
name|'import'
name|'log'
name|'as'
name|'logging'
newline|'\n'
name|'import'
name|'six'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'i18n'
name|'import'
name|'_LE'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'fields'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'pci_device_pool'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'pci'
name|'import'
name|'utils'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'pci'
name|'import'
name|'whitelist'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'cfg'
op|'.'
name|'CONF'
newline|'\n'
DECL|variable|LOG
name|'LOG'
op|'='
name|'logging'
op|'.'
name|'getLogger'
op|'('
name|'__name__'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|PciDeviceStats
name|'class'
name|'PciDeviceStats'
op|'('
name|'object'
op|')'
op|':'
newline|'\n'
nl|'\n'
indent|' '
string|'"""PCI devices summary information.\n\n According to the PCI SR-IOV spec, a PCI physical function can have up to\n 256 PCI virtual functions, thus the number of assignable PCI functions in\n a cloud can be big. The scheduler needs to know all device availability\n information in order to determine which compute hosts can support a PCI\n request. Passing individual virtual device information to the scheduler\n does not scale, so we provide summary information.\n\n Usually the virtual functions provided by a host PCI device have the same\n value for most properties, like vendor_id, product_id and class type.\n The PCI stats class summarizes this information for the scheduler.\n\n The pci stats information is maintained exclusively by compute node\n resource tracker and updated to database. The scheduler fetches the\n information and selects the compute node accordingly. If a compute\n node is selected, the resource tracker allocates the devices to the\n instance and updates the pci stats information.\n\n This summary information will be helpful for cloud management also.\n """'
newline|'\n'
nl|'\n'
DECL|variable|pool_keys
name|'pool_keys'
op|'='
op|'['
string|"'product_id'"
op|','
string|"'vendor_id'"
op|','
string|"'numa_node'"
op|','
string|"'dev_type'"
op|']'
newline|'\n'
nl|'\n'
DECL|member|__init__
name|'def'
name|'__init__'
op|'('
name|'self'
op|','
name|'stats'
op|'='
name|'None'
op|','
name|'dev_filter'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'PciDeviceStats'
op|','
name|'self'
op|')'
op|'.'
name|'__init__'
op|'('
op|')'
newline|'\n'
comment|'# NOTE(sbauza): Stats are a PCIDevicePoolList object'
nl|'\n'
name|'self'
op|'.'
name|'pools'
op|'='
op|'['
name|'pci_pool'
op|'.'
name|'to_dict'
op|'('
op|')'
nl|'\n'
name|'for'
name|'pci_pool'
name|'in'
name|'stats'
op|']'
name|'if'
name|'stats'
name|'else'
op|'['
op|']'
newline|'\n'
name|'self'
op|'.'
name|'pools'
op|'.'
name|'sort'
op|'('
name|'key'
op|'='
name|'lambda'
name|'item'
op|':'
name|'len'
op|'('
name|'item'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'dev_filter'
op|'='
name|'dev_filter'
name|'or'
name|'whitelist'
op|'.'
name|'Whitelist'
op|'('
nl|'\n'
name|'CONF'
op|'.'
name|'pci_passthrough_whitelist'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_equal_properties
dedent|''
name|'def'
name|'_equal_properties'
op|'('
name|'self'
op|','
name|'dev'
op|','
name|'entry'
op|','
name|'matching_keys'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'all'
op|'('
name|'dev'
op|'.'
name|'get'
op|'('
name|'prop'
op|')'
op|'=='
name|'entry'
op|'.'
name|'get'
op|'('
name|'prop'
op|')'
nl|'\n'
name|'for'
name|'prop'
name|'in'
name|'matching_keys'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_find_pool
dedent|''
name|'def'
name|'_find_pool'
op|'('
name|'self'
op|','
name|'dev_pool'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return the first pool that matches dev."""'
newline|'\n'
name|'for'
name|'pool'
name|'in'
name|'self'
op|'.'
name|'pools'
op|':'
newline|'\n'
indent|' '
name|'pool_keys'
op|'='
name|'pool'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
name|'del'
name|'pool_keys'
op|'['
string|"'count'"
op|']'
newline|'\n'
name|'del'
name|'pool_keys'
op|'['
string|"'devices'"
op|']'
newline|'\n'
name|'if'
op|'('
name|'len'
op|'('
name|'pool_keys'
op|'.'
name|'keys'
op|'('
op|')'
op|')'
op|'=='
name|'len'
op|'('
name|'dev_pool'
op|'.'
name|'keys'
op|'('
op|')'
op|')'
name|'and'
nl|'\n'
name|'self'
op|'.'
name|'_equal_properties'
op|'('
name|'dev_pool'
op|','
name|'pool_keys'
op|','
name|'dev_pool'
op|'.'
name|'keys'
op|'('
op|')'
op|')'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'pool'
newline|'\n'
nl|'\n'
DECL|member|_create_pool_keys_from_dev
dedent|''
dedent|''
dedent|''
name|'def'
name|'_create_pool_keys_from_dev'
op|'('
name|'self'
op|','
name|'dev'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""create a stats pool dict that this dev is supposed to be part of\n\n Note that this pool dict contains the stats pool\'s keys and their\n values. \'count\' and \'devices\' are not included.\n """'
newline|'\n'
comment|"# Don't add a device that doesn't have a matching device spec."
nl|'\n'
comment|'# This can happen during initial sync up with the controller'
nl|'\n'
name|'devspec'
op|'='
name|'self'
op|'.'
name|'dev_filter'
op|'.'
name|'get_devspec'
op|'('
name|'dev'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'devspec'
op|':'
newline|'\n'
indent|' '
name|'return'
newline|'\n'
dedent|''
name|'tags'
op|'='
name|'devspec'
op|'.'
name|'get_tags'
op|'('
op|')'
newline|'\n'
name|'pool'
op|'='
op|'{'
name|'k'
op|':'
name|'getattr'
op|'('
name|'dev'
op|','
name|'k'
op|')'
name|'for'
name|'k'
name|'in'
name|'self'
op|'.'
name|'pool_keys'
op|'}'
newline|'\n'
name|'if'
name|'tags'
op|':'
newline|'\n'
indent|' '
name|'pool'
op|'.'
name|'update'
op|'('
name|'tags'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'pool'
newline|'\n'
nl|'\n'
DECL|member|add_device
dedent|''
name|'def'
name|'add_device'
op|'('
name|'self'
op|','
name|'dev'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Add a device to its matching pool."""'
newline|'\n'
name|'dev_pool'
op|'='
name|'self'
op|'.'
name|'_create_pool_keys_from_dev'
op|'('
name|'dev'
op|')'
newline|'\n'
name|'if'
name|'dev_pool'
op|':'
newline|'\n'
indent|' '
name|'pool'
op|'='
name|'self'
op|'.'
name|'_find_pool'
op|'('
name|'dev_pool'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'pool'
op|':'
newline|'\n'
indent|' '
name|'dev_pool'
op|'['
string|"'count'"
op|']'
op|'='
number|'0'
newline|'\n'
name|'dev_pool'
op|'['
string|"'devices'"
op|']'
op|'='
op|'['
op|']'
newline|'\n'
name|'self'
op|'.'
name|'pools'
op|'.'
name|'append'
op|'('
name|'dev_pool'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'pools'
op|'.'
name|'sort'
op|'('
name|'key'
op|'='
name|'lambda'
name|'item'
op|':'
name|'len'
op|'('
name|'item'
op|')'
op|')'
newline|'\n'
name|'pool'
op|'='
name|'dev_pool'
newline|'\n'
dedent|''
name|'pool'
op|'['
string|"'count'"
op|']'
op|'+='
number|'1'
newline|'\n'
name|'pool'
op|'['
string|"'devices'"
op|']'
op|'.'
name|'append'
op|'('
name|'dev'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'staticmethod'
newline|'\n'
DECL|member|_decrease_pool_count
name|'def'
name|'_decrease_pool_count'
op|'('
name|'pool_list'
op|','
name|'pool'
op|','
name|'count'
op|'='
number|'1'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Decrement pool\'s size by count.\n\n If pool becomes empty, remove pool from pool_list.\n """'
newline|'\n'
name|'if'
name|'pool'
op|'['
string|"'count'"
op|']'
op|'>'
name|'count'
op|':'
newline|'\n'
indent|' '
name|'pool'
op|'['
string|"'count'"
op|']'
op|'-='
name|'count'
newline|'\n'
name|'count'
op|'='
number|'0'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'count'
op|'-='
name|'pool'
op|'['
string|"'count'"
op|']'
newline|'\n'
name|'pool_list'
op|'.'
name|'remove'
op|'('
name|'pool'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'count'
newline|'\n'
nl|'\n'
DECL|member|remove_device
dedent|''
name|'def'
name|'remove_device'
op|'('
name|'self'
op|','
name|'dev'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Remove one device from the first pool that it matches."""'
newline|'\n'
name|'dev_pool'
op|'='
name|'self'
op|'.'
name|'_create_pool_keys_from_dev'
op|'('
name|'dev'
op|')'
newline|'\n'
name|'if'
name|'dev_pool'
op|':'
newline|'\n'
indent|' '
name|'pool'
op|'='
name|'self'
op|'.'
name|'_find_pool'
op|'('
name|'dev_pool'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'pool'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'PciDevicePoolEmpty'
op|'('
nl|'\n'
name|'compute_node_id'
op|'='
name|'dev'
op|'.'
name|'compute_node_id'
op|','
name|'address'
op|'='
name|'dev'
op|'.'
name|'address'
op|')'
newline|'\n'
dedent|''
name|'pool'
op|'['
string|"'devices'"
op|']'
op|'.'
name|'remove'
op|'('
name|'dev'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_decrease_pool_count'
op|'('
name|'self'
op|'.'
name|'pools'
op|','
name|'pool'
op|')'
newline|'\n'
nl|'\n'
DECL|member|get_free_devs
dedent|''
dedent|''
name|'def'
name|'get_free_devs'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'free_devs'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'pool'
name|'in'
name|'self'
op|'.'
name|'pools'
op|':'
newline|'\n'
indent|' '
name|'free_devs'
op|'.'
name|'extend'
op|'('
name|'pool'
op|'['
string|"'devices'"
op|']'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'free_devs'
newline|'\n'
nl|'\n'
DECL|member|consume_requests
dedent|''
name|'def'
name|'consume_requests'
op|'('
name|'self'
op|','
name|'pci_requests'
op|','
name|'numa_cells'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
name|'alloc_devices'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'request'
name|'in'
name|'pci_requests'
op|':'
newline|'\n'
indent|' '
name|'count'
op|'='
name|'request'
op|'.'
name|'count'
newline|'\n'
name|'spec'
op|'='
name|'request'
op|'.'
name|'spec'
newline|'\n'
comment|'# For now, keep the same algorithm as during scheduling:'
nl|'\n'
comment|'# a spec may be able to match multiple pools.'
nl|'\n'
name|'pools'
op|'='
name|'self'
op|'.'
name|'_filter_pools_for_spec'
op|'('
name|'self'
op|'.'
name|'pools'
op|','
name|'spec'
op|')'
newline|'\n'
name|'if'
name|'numa_cells'
op|':'
newline|'\n'
indent|' '
name|'pools'
op|'='
name|'self'
op|'.'
name|'_filter_pools_for_numa_cells'
op|'('
name|'pools'
op|','
name|'numa_cells'
op|')'
newline|'\n'
dedent|''
name|'pools'
op|'='
name|'self'
op|'.'
name|'_filter_non_requested_pfs'
op|'('
name|'request'
op|','
name|'pools'
op|')'
newline|'\n'
comment|'# Failed to allocate the required number of devices'
nl|'\n'
comment|'# Return the devices already allocated back to their pools'
nl|'\n'
name|'if'
name|'sum'
op|'('
op|'['
name|'pool'
op|'['
string|"'count'"
op|']'
name|'for'
name|'pool'
name|'in'
name|'pools'
op|']'
op|')'
op|'<'
name|'count'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'error'
op|'('
name|'_LE'
op|'('
string|'"Failed to allocate PCI devices for instance."'
nl|'\n'
string|'" Unassigning devices back to pools."'
nl|'\n'
string|'" This should not happen, since the scheduler"'
nl|'\n'
string|'" should have accurate information, and allocation"'
nl|'\n'
string|'" during claims is controlled via a hold"'
nl|'\n'
string|'" on the compute node semaphore"'
op|')'
op|')'
newline|'\n'
name|'for'
name|'d'
name|'in'
name|'range'
op|'('
name|'len'
op|'('
name|'alloc_devices'
op|')'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'add_device'
op|'('
name|'alloc_devices'
op|'.'
name|'pop'
op|'('
op|')'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'None'
newline|'\n'
dedent|''
name|'for'
name|'pool'
name|'in'
name|'pools'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'pool'
op|'['
string|"'count'"
op|']'
op|'>='
name|'count'
op|':'
newline|'\n'
indent|' '
name|'num_alloc'
op|'='
name|'count'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'num_alloc'
op|'='
name|'pool'
op|'['
string|"'count'"
op|']'
newline|'\n'
dedent|''
name|'count'
op|'-='
name|'num_alloc'
newline|'\n'
name|'pool'
op|'['
string|"'count'"
op|']'
op|'-='
name|'num_alloc'
newline|'\n'
name|'for'
name|'d'
name|'in'
name|'range'
op|'('
name|'num_alloc'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pci_dev'
op|'='
name|'pool'
op|'['
string|"'devices'"
op|']'
op|'.'
name|'pop'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_handle_device_dependents'
op|'('
name|'pci_dev'
op|')'
newline|'\n'
name|'pci_dev'
op|'.'
name|'request_id'
op|'='
name|'request'
op|'.'
name|'request_id'
newline|'\n'
name|'alloc_devices'
op|'.'
name|'append'
op|'('
name|'pci_dev'
op|')'
newline|'\n'
dedent|''
name|'if'
name|'count'
op|'=='
number|'0'
op|':'
newline|'\n'
indent|' '
name|'break'
newline|'\n'
dedent|''
dedent|''
dedent|''
name|'return'
name|'alloc_devices'
newline|'\n'
nl|'\n'
DECL|member|_handle_device_dependents
dedent|''
name|'def'
name|'_handle_device_dependents'
op|'('
name|'self'
op|','
name|'pci_dev'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Remove device dependents or a parent from pools.\n\n In case the device is a PF, all of it\'s dependent VFs should\n be removed from pools count, if these are present.\n When the device is a VF, it\'s parent PF pool count should be\n decreased, unless it is no longer in a pool.\n """'
newline|'\n'
name|'if'
name|'pci_dev'
op|'.'
name|'dev_type'
op|'=='
name|'fields'
op|'.'
name|'PciDeviceType'
op|'.'
name|'SRIOV_PF'
op|':'
newline|'\n'
indent|' '
name|'vfs_list'
op|'='
name|'objects'
op|'.'
name|'PciDeviceList'
op|'.'
name|'get_by_parent_address'
op|'('
nl|'\n'
name|'pci_dev'
op|'.'
name|'_context'
op|','
nl|'\n'
name|'pci_dev'
op|'.'
name|'compute_node_id'
op|','
nl|'\n'
name|'pci_dev'
op|'.'
name|'address'
op|')'
newline|'\n'
name|'if'
name|'vfs_list'
op|':'
newline|'\n'
indent|' '
name|'for'
name|'vf'
name|'in'
name|'vfs_list'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'remove_device'
op|'('
name|'vf'
op|')'
newline|'\n'
dedent|''
dedent|''
dedent|''
name|'elif'
name|'pci_dev'
op|'.'
name|'dev_type'
op|'=='
name|'fields'
op|'.'
name|'PciDeviceType'
op|'.'
name|'SRIOV_VF'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'parent'
op|'='
name|'pci_dev'
op|'.'
name|'get_by_dev_addr'
op|'('
name|'pci_dev'
op|'.'
name|'_context'
op|','
nl|'\n'
name|'pci_dev'
op|'.'
name|'compute_node_id'
op|','
nl|'\n'
name|'pci_dev'
op|'.'
name|'parent_addr'
op|')'
newline|'\n'
comment|'# Make sure not to decrease PF pool count if this parent has'
nl|'\n'
comment|'# been already removed from pools'
nl|'\n'
name|'if'
name|'parent'
name|'in'
name|'self'
op|'.'
name|'get_free_devs'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'remove_device'
op|'('
name|'parent'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'except'
name|'exception'
op|'.'
name|'PciDeviceNotFound'
op|':'
newline|'\n'
indent|' '
name|'return'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
dedent|''
op|'@'
name|'staticmethod'
newline|'\n'
DECL|member|_filter_pools_for_spec
name|'def'
name|'_filter_pools_for_spec'
op|'('
name|'pools'
op|','
name|'request_specs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
name|'pool'
name|'for'
name|'pool'
name|'in'
name|'pools'
nl|'\n'
name|'if'
name|'utils'
op|'.'
name|'pci_device_prop_match'
op|'('
name|'pool'
op|','
name|'request_specs'
op|')'
op|']'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'staticmethod'
newline|'\n'
DECL|member|_filter_pools_for_numa_cells
name|'def'
name|'_filter_pools_for_numa_cells'
op|'('
name|'pools'
op|','
name|'numa_cells'
op|')'
op|':'
newline|'\n'
comment|"# Some systems don't report numa node info for pci devices, in"
nl|'\n'
comment|'# that case None is reported in pci_device.numa_node, by adding None'
nl|'\n'
comment|'# to numa_cells we allow assigning those devices to instances with'
nl|'\n'
comment|'# numa topology'
nl|'\n'
indent|' '
name|'numa_cells'
op|'='
op|'['
name|'None'
op|']'
op|'+'
op|'['
name|'cell'
op|'.'
name|'id'
name|'for'
name|'cell'
name|'in'
name|'numa_cells'
op|']'
newline|'\n'
comment|'# filter out pools which numa_node is not included in numa_cells'
nl|'\n'
name|'return'
op|'['
name|'pool'
name|'for'
name|'pool'
name|'in'
name|'pools'
name|'if'
name|'any'
op|'('
name|'utils'
op|'.'
name|'pci_device_prop_match'
op|'('
nl|'\n'
name|'pool'
op|','
op|'['
op|'{'
string|"'numa_node'"
op|':'
name|'cell'
op|'}'
op|']'
op|')'
nl|'\n'
name|'for'
name|'cell'
name|'in'
name|'numa_cells'
op|')'
op|']'
newline|'\n'
nl|'\n'
DECL|member|_filter_non_requested_pfs
dedent|''
name|'def'
name|'_filter_non_requested_pfs'
op|'('
name|'self'
op|','
name|'request'
op|','
name|'matching_pools'
op|')'
op|':'
newline|'\n'
comment|'# Remove SRIOV_PFs from pools, unless it has been explicitly requested'
nl|'\n'
comment|'# This is especially needed in cases where PFs and VFs has the same'
nl|'\n'
comment|'# product_id.'
nl|'\n'
indent|' '
name|'if'
name|'all'
op|'('
name|'spec'
op|'.'
name|'get'
op|'('
string|"'dev_type'"
op|')'
op|'!='
name|'fields'
op|'.'
name|'PciDeviceType'
op|'.'
name|'SRIOV_PF'
name|'for'
nl|'\n'
name|'spec'
name|'in'
name|'request'
op|'.'
name|'spec'
op|')'
op|':'
newline|'\n'
indent|' '
name|'matching_pools'
op|'='
name|'self'
op|'.'
name|'_filter_pools_for_pfs'
op|'('
name|'matching_pools'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'matching_pools'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'staticmethod'
newline|'\n'
DECL|member|_filter_pools_for_pfs
name|'def'
name|'_filter_pools_for_pfs'
op|'('
name|'pools'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
name|'pool'
name|'for'
name|'pool'
name|'in'
name|'pools'
nl|'\n'
name|'if'
name|'not'
name|'pool'
op|'.'
name|'get'
op|'('
string|"'dev_type'"
op|')'
op|'=='
name|'fields'
op|'.'
name|'PciDeviceType'
op|'.'
name|'SRIOV_PF'
op|']'
newline|'\n'
nl|'\n'
DECL|member|_apply_request
dedent|''
name|'def'
name|'_apply_request'
op|'('
name|'self'
op|','
name|'pools'
op|','
name|'request'
op|','
name|'numa_cells'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
comment|'# NOTE(vladikr): This code maybe open to race conditions.'
nl|'\n'
comment|'# Two concurrent requests may succeed when called support_requests'
nl|'\n'
comment|'# because this method does not remove related devices from the pools'
nl|'\n'
indent|' '
name|'count'
op|'='
name|'request'
op|'.'
name|'count'
newline|'\n'
name|'matching_pools'
op|'='
name|'self'
op|'.'
name|'_filter_pools_for_spec'
op|'('
name|'pools'
op|','
name|'request'
op|'.'
name|'spec'
op|')'
newline|'\n'
name|'if'
name|'numa_cells'
op|':'
newline|'\n'
indent|' '
name|'matching_pools'
op|'='
name|'self'
op|'.'
name|'_filter_pools_for_numa_cells'
op|'('
name|'matching_pools'
op|','
nl|'\n'
name|'numa_cells'
op|')'
newline|'\n'
dedent|''
name|'matching_pools'
op|'='
name|'self'
op|'.'
name|'_filter_non_requested_pfs'
op|'('
name|'request'
op|','
nl|'\n'
name|'matching_pools'
op|')'
newline|'\n'
name|'if'
name|'sum'
op|'('
op|'['
name|'pool'
op|'['
string|"'count'"
op|']'
name|'for'
name|'pool'
name|'in'
name|'matching_pools'
op|']'
op|')'
op|'<'
name|'count'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'False'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'for'
name|'pool'
name|'in'
name|'matching_pools'
op|':'
newline|'\n'
indent|' '
name|'count'
op|'='
name|'self'
op|'.'
name|'_decrease_pool_count'
op|'('
name|'pools'
op|','
name|'pool'
op|','
name|'count'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'count'
op|':'
newline|'\n'
indent|' '
name|'break'
newline|'\n'
dedent|''
dedent|''
dedent|''
name|'return'
name|'True'
newline|'\n'
nl|'\n'
DECL|member|support_requests
dedent|''
name|'def'
name|'support_requests'
op|'('
name|'self'
op|','
name|'requests'
op|','
name|'numa_cells'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Check if the pci requests can be met.\n\n Scheduler checks compute node\'s PCI stats to decide if an\n instance can be scheduled into the node. Support does not\n mean real allocation.\n If numa_cells is provided then only devices contained in\n those nodes are considered.\n """'
newline|'\n'
comment|'# note (yjiang5): this function has high possibility to fail,'
nl|'\n'
comment|'# so no exception should be triggered for performance reason.'
nl|'\n'
name|'pools'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'self'
op|'.'
name|'pools'
op|')'
newline|'\n'
name|'return'
name|'all'
op|'('
op|'['
name|'self'
op|'.'
name|'_apply_request'
op|'('
name|'pools'
op|','
name|'r'
op|','
name|'numa_cells'
op|')'
nl|'\n'
name|'for'
name|'r'
name|'in'
name|'requests'
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|apply_requests
dedent|''
name|'def'
name|'apply_requests'
op|'('
name|'self'
op|','
name|'requests'
op|','
name|'numa_cells'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Apply PCI requests to the PCI stats.\n\n This is used in multiple instance creation, when the scheduler has to\n maintain how the resources are consumed by the instances.\n If numa_cells is provided then only devices contained in\n those nodes are considered.\n """'
newline|'\n'
name|'if'
name|'not'
name|'all'
op|'('
op|'['
name|'self'
op|'.'
name|'_apply_request'
op|'('
name|'self'
op|'.'
name|'pools'
op|','
name|'r'
op|','
name|'numa_cells'
op|')'
nl|'\n'
name|'for'
name|'r'
name|'in'
name|'requests'
op|']'
op|')'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'PciDeviceRequestFailed'
op|'('
name|'requests'
op|'='
name|'requests'
op|')'
newline|'\n'
nl|'\n'
DECL|member|__iter__
dedent|''
dedent|''
name|'def'
name|'__iter__'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|"# 'devices' shouldn't be part of stats"
nl|'\n'
indent|' '
name|'pools'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'pool'
name|'in'
name|'self'
op|'.'
name|'pools'
op|':'
newline|'\n'
indent|' '
name|'tmp'
op|'='
op|'{'
name|'k'
op|':'
name|'v'
name|'for'
name|'k'
op|','
name|'v'
name|'in'
name|'six'
op|'.'
name|'iteritems'
op|'('
name|'pool'
op|')'
name|'if'
name|'k'
op|'!='
string|"'devices'"
op|'}'
newline|'\n'
name|'pools'
op|'.'
name|'append'
op|'('
name|'tmp'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'iter'
op|'('
name|'pools'
op|')'
newline|'\n'
nl|'\n'
DECL|member|clear
dedent|''
name|'def'
name|'clear'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Clear all the stats maintained."""'
newline|'\n'
name|'self'
op|'.'
name|'pools'
op|'='
op|'['
op|']'
newline|'\n'
nl|'\n'
DECL|member|__eq__
dedent|''
name|'def'
name|'__eq__'
op|'('
name|'self'
op|','
name|'other'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'cmp'
op|'('
name|'self'
op|'.'
name|'pools'
op|','
name|'other'
op|'.'
name|'pools'
op|')'
op|'=='
number|'0'
newline|'\n'
nl|'\n'
DECL|member|__ne__
dedent|''
name|'def'
name|'__ne__'
op|'('
name|'self'
op|','
name|'other'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'not'
op|'('
name|'self'
op|'=='
name|'other'
op|')'
newline|'\n'
nl|'\n'
DECL|member|to_device_pools_obj
dedent|''
name|'def'
name|'to_device_pools_obj'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return the contents of the pools as a PciDevicePoolList object."""'
newline|'\n'
name|'stats'
op|'='
op|'['
name|'x'
name|'for'
name|'x'
name|'in'
name|'self'
op|']'
newline|'\n'
name|'return'
name|'pci_device_pool'
op|'.'
name|'from_pci_stats'
op|'('
name|'stats'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 14.37246 | 1,148 | 0.61167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14,664 | 0.575781 |
8d29d50d0c950b859290e95b7cb057e02fb60ee8 | 4,045 | py | Python | profit/models/torch/vae.py | ayushkarnawat/profit | f3c4d601078b52513af6832c3faf75ddafc59ac5 | [
"MIT"
]
| null | null | null | profit/models/torch/vae.py | ayushkarnawat/profit | f3c4d601078b52513af6832c3faf75ddafc59ac5 | [
"MIT"
]
| 1 | 2021-09-15T13:13:12.000Z | 2021-09-15T13:13:12.000Z | profit/models/torch/vae.py | ayushkarnawat/profit | f3c4d601078b52513af6832c3faf75ddafc59ac5 | [
"MIT"
]
| null | null | null | """Variational autoencoder model."""
from typing import Tuple
import torch
from torch import nn
from torch.nn import functional as F
class BaseVAE(nn.Module):
"""Base class for creating variational autoencoders (VAEs).
The module is designed to connect user-specified encoder/decoder
layers to form a latent space representation of the data.
A general overview of the model can be described by:
https://lilianweng.github.io/lil-log/2018/08/12/from-autoencoder-to-beta-vae.html
"""
def __init__(self) -> None:
super(BaseVAE, self).__init__()
def encode(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Builds the encoded representation of the input.
The encoded model outputs the mean and logvar of the latent
space embeddings/distribution, or in more mathematical terms,
:math:: `q(z|x) = \\mathcal{N}(z| \\mu(x), \\sigma(x))`
"""
raise NotImplementedError
def reparameterize(self, mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:
"""Reparamaterization trick.
Computes the latent vector (`z`), which is a compressed low-dim
representation of the input.
This trick allows us to express the gradient of the expectation
as the expectation of the gradient [1]. Additionally, it makes
the variance of the estimate an order of magnitude lower than
without using it. This allows us to compute the gradient during
the backward pass more accurately, with better estimates [2].
References:
-----------
-[1] https://gregorygundersen.com/blog/2018/04/29/reparameterization/
-[2] https://stats.stackexchange.com/a/226136
"""
std = torch.exp(0.5*logvar)
# eps=N(0,I), where the I is an identity matrix of same size as std
eps = torch.randn_like(std)
return mu + std*eps
def decode(self, z: torch.Tensor) -> torch.Tensor:
"""Decodes the sampled latent vector (`z`) into the reconstructed
output (`x'`).
Ideally, the reconstructed output (`x'`) is identical to the
original input (`x`).
"""
raise NotImplementedError
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar, z
class SequenceVAE(BaseVAE):
"""CbAS VAE model for (one-hot) encoded sequences."""
def __init__(self,
seqlen: int,
vocab_size: int,
hidden_size: int = 64,
latent_size: int = 20) -> None:
super(SequenceVAE, self).__init__()
self.seqlen = seqlen
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.latent_size = latent_size
# Probablistic encoder
self.fc1 = nn.Linear(seqlen * vocab_size, hidden_size)
self.fc21 = nn.Linear(hidden_size, latent_size)
self.fc22 = nn.Linear(hidden_size, latent_size)
# Probablistic decoder
self.fc3 = nn.Linear(latent_size, hidden_size)
self.fc4 = nn.Linear(hidden_size, seqlen * vocab_size)
# Reshape occurs here (see self.decode())
# size is now: (seqlen * vocab_size) -> (seqlen, vocab_size)
self.fc5 = nn.Linear(vocab_size, vocab_size)
def encode(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# Flatten (n, seqlen, vocab_size) -> (n, seqlen * vocab_size)
x = x.view(x.size(0), -1)
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def decode(self, z: torch.Tensor) -> torch.Tensor:
# Input tensor: Latent vector z = (num_samples, latent_size)
h3 = F.relu(self.fc3(z))
h4 = self.fc4(h3)
reshaped = h4.view(h4.size(0), self.seqlen, self.vocab_size)
# Return logits since F.cross_entropy computes log_softmax internally
return self.fc5(reshaped)
| 36.116071 | 90 | 0.634611 | 3,904 | 0.965142 | 0 | 0 | 0 | 0 | 0 | 0 | 1,979 | 0.489246 |
8d2ae38a47c725cb399a9f327008d51a718980eb | 2,037 | py | Python | backend/export/views.py | dmryutov/otus-python-0319-final | de07f36ee4bbd57dbfb16defaf762b08ec41fb0e | [
"Apache-2.0"
]
| null | null | null | backend/export/views.py | dmryutov/otus-python-0319-final | de07f36ee4bbd57dbfb16defaf762b08ec41fb0e | [
"Apache-2.0"
]
| 6 | 2020-06-05T23:05:14.000Z | 2022-02-10T10:42:31.000Z | backend/export/views.py | dmryutov/otus-python-0319-final | de07f36ee4bbd57dbfb16defaf762b08ec41fb0e | [
"Apache-2.0"
]
| null | null | null | from django.http.response import HttpResponse
from rest_framework import serializers, viewsets
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from .excel import Excel
XLSX_MIME = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
class ExportViewSet(viewsets.GenericViewSet):
serializer_class = serializers.Serializer
permission_classes = (IsAuthenticated,)
@staticmethod
def download_file(file_name, export_func, *args, **kwargs):
"""
Generate file and send it to client
Args:
file_name (str): Excel file name
export_func (str): Export function
args: Export function args
kwargs: Export function kwargs
Returns:
django.http.response.HttpResponse: HTTP response
"""
response = HttpResponse(content_type=XLSX_MIME)
response['Content-Disposition'] = 'attachment; filename="{}.xlsx"'.format(file_name)
getattr(Excel(file_name), export_func)(*args, **kwargs).save(response)
return response
@action(methods=['post'], detail=False)
def stl(self, request):
"""
Export time series decomposition results to Excel file
"""
self.check_permissions(request)
data = request.data.get('data', [])
result = request.data.get('result', {})
return self.download_file('STL', 'export_stl', data, result)
@action(methods=['post'], detail=False)
def forecast(self, request):
"""
Export time series forecasting results to Excel file
"""
self.check_permissions(request)
data = request.data.get('data', [])
result = request.data.get('result', {})
date_start = request.data.get('date_start', '2018-01-01')
period_type = request.data.get('period_type', 'W')
return self.download_file('Forecast', 'export_forecast', data, result,
date_start, period_type)
| 32.333333 | 92 | 0.650957 | 1,731 | 0.849779 | 0 | 0 | 1,578 | 0.774669 | 0 | 0 | 725 | 0.355916 |
8d2b9627ee560b695980d399a9b852afb9663aac | 1,593 | py | Python | tests/test_clamp.py | josemolinagarcia/maya-math-nodes | 1f83eef1d1efe0b0c3dbb1477ca31ed9f8911ee4 | [
"MIT"
]
| 148 | 2018-01-12T20:30:45.000Z | 2022-02-28T05:20:46.000Z | tests/test_clamp.py | josemolinagarcia/maya-math-nodes | 1f83eef1d1efe0b0c3dbb1477ca31ed9f8911ee4 | [
"MIT"
]
| 13 | 2018-01-17T18:02:13.000Z | 2021-11-23T06:06:24.000Z | tests/test_clamp.py | josemolinagarcia/maya-math-nodes | 1f83eef1d1efe0b0c3dbb1477ca31ed9f8911ee4 | [
"MIT"
]
| 41 | 2018-01-16T01:41:29.000Z | 2021-08-24T01:27:56.000Z | # Copyright (c) 2018 Serguei Kalentchouk et al. All rights reserved.
# Use of this source code is governed by an MIT license that can be found in the LICENSE file.
from node_test_case import NodeTestCase, cmds
class TestClamp(NodeTestCase):
def test_clamp(self):
node = self.create_node('Clamp', {'input': 5.0, 'inputMin': 0.0, 'inputMax': 2.0}, 2.0)
cmds.setAttr('{0}.{1}'.format(node, 'input'), -1.0)
self.assertAlmostEqual(cmds.getAttr('{0}.output'.format(node)), 0.0)
def test_clamp_int(self):
node = self.create_node('ClampInt', {'input': 5, 'inputMin': 0, 'inputMax': 2}, 2)
cmds.setAttr('{0}.{1}'.format(node, 'input'), -1)
self.assertAlmostEqual(cmds.getAttr('{0}.output'.format(node)), 0)
def test_clamp_angle(self):
node = self.create_node('ClampAngle', {'input': 5.0, 'inputMin': 0.0, 'inputMax': 2.0}, 2.0)
cmds.setAttr('{0}.{1}'.format(node, 'input'), -1.0)
self.assertAlmostEqual(cmds.getAttr('{0}.output'.format(node)), 0.0)
def test_remap(self):
self.create_node('Remap', {'input': 0.5, 'low1': 0.0, 'high1': 1.0, 'low2': 0.0, 'high2': 10.0}, 5.0)
def test_remap_angle(self):
self.create_node('RemapAngle', {'input': 0.5, 'low1': 0.0, 'high1': 1.0, 'low2': 0.0, 'high2': 10.0}, 5.0)
def test_remap_int(self):
self.create_node('RemapInt', {'input': 5, 'low1': 0, 'high1': 10, 'low2': 0, 'high2': 100}, 50)
def test_smoothstep(self):
self.create_node('Smoothstep', {'input': 0.3}, 0.216)
| 44.25 | 114 | 0.595731 | 1,380 | 0.86629 | 0 | 0 | 0 | 0 | 0 | 0 | 503 | 0.315756 |
8d2bec83c642f547afb331d447ae8ff19041fd5a | 1,111 | py | Python | src/tests/tests_get_formatted_items.py | kazqvaizer/checklistbot | f715280fbe7035bc2ce4f69cbf95595d9fe3a225 | [
"MIT"
]
| 5 | 2020-10-06T13:42:45.000Z | 2021-12-21T07:35:08.000Z | src/tests/tests_get_formatted_items.py | kazqvaizer/checklistbot | f715280fbe7035bc2ce4f69cbf95595d9fe3a225 | [
"MIT"
]
| null | null | null | src/tests/tests_get_formatted_items.py | kazqvaizer/checklistbot | f715280fbe7035bc2ce4f69cbf95595d9fe3a225 | [
"MIT"
]
| null | null | null | import pytest
from models import TodoItem
pytestmark = [
pytest.mark.usefixtures("use_db"),
]
@pytest.fixture
def chat(factory):
return factory.chat()
@pytest.fixture
def items(factory, chat):
return [
factory.item(chat=chat, text="Hello"),
factory.item(chat=chat, text="Nice!"),
]
def test_format_without_strike(items, chat):
lines = chat.get_formatted_items().split("\n")
assert len(lines) == 2
assert "1. Hello" == lines[0]
assert "2. Nice!" == lines[1]
def test_format_with_strike(items, chat):
items[0].is_checked = True
items[0].save()
lines = chat.get_formatted_items().split("\n")
assert len(lines) == 2
assert "<s>1. Hello</s>" == lines[0]
assert "2. Nice!" == lines[1]
def test_respect_order_by_id(items, chat):
TodoItem.update(id=100500).where(TodoItem.id == items[0].id).execute()
lines = chat.get_formatted_items().split("\n")
assert len(lines) == 2
assert "1. Nice!" == lines[0]
assert "2. Hello" == lines[1]
def test_no_items_is_okay(chat):
assert chat.get_formatted_items() == ""
| 20.574074 | 74 | 0.640864 | 0 | 0 | 0 | 0 | 214 | 0.192619 | 0 | 0 | 103 | 0.092709 |
8d2cd1060b91fea7d66c9afe4a0c6e646802593b | 3,945 | py | Python | web/multilingual/database.py | mahoyen/web | 1d190a86e3277315804bfcc0b8f9abd4f9c1d780 | [
"MIT"
]
| null | null | null | web/multilingual/database.py | mahoyen/web | 1d190a86e3277315804bfcc0b8f9abd4f9c1d780 | [
"MIT"
]
| null | null | null | web/multilingual/database.py | mahoyen/web | 1d190a86e3277315804bfcc0b8f9abd4f9c1d780 | [
"MIT"
]
| null | null | null | import copy
import json
from django.contrib import admin
from django.db import models
from web.multilingual.data_structures import MultiLingualTextStructure
from web.multilingual.form import MultiLingualFormField, MultiLingualRichTextFormField, \
MultiLingualRichTextUploadingFormField
from web.multilingual.widgets import MultiLingualTextInput, MultiLingualRichText, MultiLingualRichTextUploading
class MultiLingualTextField(models.TextField):
"""
A database field for multilingual text fields
"""
widget = MultiLingualTextInput
form_class = MultiLingualFormField
use_default_if_empty = True
def __init__(self, *args, **kwargs):
# Allow for specification of a widget on creation, to allow for both textarea and text input
self.widget = kwargs.pop("widget", self.widget)
self.use_default_if_empty = kwargs.pop("use_default_if_empty", self.use_default_if_empty)
super().__init__(*args, **kwargs)
def to_python(self, value):
"""
Deserialization of the given value
"""
if value is None:
return value
if isinstance(value, MultiLingualTextStructure):
return value
return MultiLingualTextStructure(value, self.use_default_if_empty)
def get_prep_value(self, value):
"""
Converts the given value to a value that can be saved in the database
"""
if value is None:
return value
if isinstance(value, MultiLingualTextStructure):
# Save the content as a JSON object with languages as keys
return json.dumps({language: value[language] for language in value.supported_languages})
return value
def from_db_value(self, value, expression, connection):
"""
Converts the database value to the python representation
"""
return MultiLingualTextStructure(value, self.use_default_if_empty)
def formfield(self, **kwargs):
"""
Sets up the form field
"""
defaults = {"form_class": self.form_class, "widget": self.widget}
defaults.update(kwargs)
return super().formfield(**defaults)
class MultiLingualRichTextField(MultiLingualTextField):
# CKEditor has specific requirements for its for form class and widget
widget = MultiLingualRichText
form_class = MultiLingualRichTextFormField
class MultiLingualRichTextUploadingField(MultiLingualTextField):
# CKEditor has specific requirements for its for form class and widget
widget = MultiLingualRichTextUploading
form_class = MultiLingualRichTextUploadingFormField
class MultiLingualFieldAdmin(admin.ModelAdmin):
"""
Django admin does not render MultiValue fields correctly. This ModelAdmin object overrides the default Django admin
rendering of the MultiLingual fields.
"""
def formfield_for_dbfield(self, db_field, request, **kwargs):
# Want to override the Django admin fields
if isinstance(db_field, MultiLingualTextField):
properties = {}
for key, value in db_field.widget.__dict__.items():
try:
# Need to perform deep copies in case of mutable properties
properties[key] = copy.deepcopy(value)
except TypeError:
# Some class properties are not possible to copy. These will not be mutable anyways
properties[key] = value
# Want to copy widget, as to not override the template for the normal forms
widget = type("AdminMultiLingualTextField", (db_field.widget,), properties)
# Different template for admin page, without Semantic UI
widget.template_name = "web/forms/widgets/admin_multi_lingual_text_field.html"
return db_field.formfield(widget=widget, **kwargs)
return super().formfield_for_dbfield(db_field, request, **kwargs)
| 39.848485 | 119 | 0.694043 | 3,529 | 0.89455 | 0 | 0 | 0 | 0 | 0 | 0 | 1,249 | 0.316603 |
8d2fec927240532eb03988da6b6277edf3bec73d | 2,859 | py | Python | cart/tests/test_views.py | mohsenamoon1160417237/ECommerce-app | 4cca492214b04b56f625aef2a2979956a8256710 | [
"MIT"
]
| null | null | null | cart/tests/test_views.py | mohsenamoon1160417237/ECommerce-app | 4cca492214b04b56f625aef2a2979956a8256710 | [
"MIT"
]
| null | null | null | cart/tests/test_views.py | mohsenamoon1160417237/ECommerce-app | 4cca492214b04b56f625aef2a2979956a8256710 | [
"MIT"
]
| null | null | null | from django.test import TestCase
from shop.models import Product
from django.contrib.auth.models import User
from coupons.forms import CouponForm
class CartAddViewTest(TestCase):
def setUp(self):
self.data = {"quantity" : 2,
"update" : False}
self.product = Product.objects.create(name='clothes',
description='clothes',
price=12.00
)
self.product.save()
self.user = User.objects.create(username='mohsen' ,
email='[email protected]' ,
password='mohsen1160417237')
self.user.save()
self.url = '/cart/add/{}/'.format(self.product.id)
def test_get_method_not_allowed(self):
response = self.client.get(self.url , follow=True)
self.assertEqual(response.status_code , 405)
def test_cart_add_user_authenticated(self):
self.client.force_login(self.user)
response = self.client.post(self.url , data=self.data , follow=True)
redirect_url = response.request['PATH_INFO']
self.assertEqual(response.status_code , 200)
self.assertEqual(redirect_url , '/cart/detail/')
def test_cart_add_user_not_authenticated(self):
self.client.logout()
response = self.client.post(self.url , data=self.data , follow=True)
redirect_url = response.request['PATH_INFO']
self.assertEqual(response.status_code , 200)
self.assertEqual(redirect_url , '/account/login/')
class CartRemoveViewTest(TestCase):
def setUp(self):
self.product = Product.objects.create(name='clothes',
description='clothes',
price=12.00
)
self.product.save()
self.url = '/cart/remove/{}/'.format(self.product.id)
def test_get_method_not_allowed(self):
response = self.client.get(self.url , follow=True)
self.assertEqual(response.status_code , 405)
def test_cart_remove_ok(self):
response = self.client.post(self.url , follow=True)
redirect_url = response.request['PATH_INFO']
self.assertEqual(response.status_code , 200)
self.assertEqual(redirect_url , '/cart/detail/')
class CartDetailViewTest(TestCase):
def setUp(self):
self.data = {''}
self.url = '/cart/detail/'
def test_cart_detail_ok(self):
response = self.client.post(self.url)
self.assertEqual(response.status_code , 200)
| 28.878788 | 77 | 0.550892 | 2,679 | 0.937041 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.081497 |
8d341997147380f82b39848b173c8f836285f331 | 2,134 | py | Python | tests/conftest.py | gpontesss/botus_receptus | bf29f5f70a2e7ae3548a44287c636515f78e7e77 | [
"BSD-3-Clause"
]
| 3 | 2019-04-15T01:45:46.000Z | 2020-04-07T13:31:19.000Z | tests/conftest.py | gpontesss/botus_receptus | bf29f5f70a2e7ae3548a44287c636515f78e7e77 | [
"BSD-3-Clause"
]
| 244 | 2020-04-20T22:10:23.000Z | 2022-03-31T23:03:48.000Z | tests/conftest.py | gpontesss/botus_receptus | bf29f5f70a2e7ae3548a44287c636515f78e7e77 | [
"BSD-3-Clause"
]
| 1 | 2021-11-08T08:52:32.000Z | 2021-11-08T08:52:32.000Z | from __future__ import annotations
import asyncio
from typing import Any
import asynctest.mock # type: ignore
import pytest # type: ignore
import pytest_mock._util # type: ignore
pytest_mock._util._mock_module = asynctest.mock
class EventLoopClockAdvancer:
"""
A helper object that when called will advance the event loop's time. If the
call is awaited, the caller task will wait an iteration for the update to
wake up any awaiting handlers.
"""
__slots__ = ("offset", "loop", "sleep_duration", "_base_time")
def __init__(self, loop, sleep_duration=1e-4):
self.offset = 0.0
self._base_time = loop.time
self.loop = loop
self.sleep_duration = sleep_duration
# incorporate offset timing into the event loop
self.loop.time = self.time
def time(self):
"""
Return the time according to the event loop's clock. The time is
adjusted by an offset.
"""
return self._base_time() + self.offset
async def __call__(self, seconds):
"""
Advance time by a given offset in seconds. Returns an awaitable
that will complete after all tasks scheduled for after advancement
of time are proceeding.
"""
# sleep so that the loop does everything currently waiting
await asyncio.sleep(self.sleep_duration)
if seconds > 0:
# advance the clock by the given offset
self.offset += seconds
# Once the clock is adjusted, new tasks may have just been
# scheduled for running in the next pass through the event loop
await asyncio.sleep(self.sleep_duration)
@pytest.fixture
def advance_time(event_loop):
return EventLoopClockAdvancer(event_loop)
@pytest.fixture
def mock_aiohttp(mocker: Any) -> None:
mocker.patch('aiohttp.ClientSession', autospec=True)
@pytest.fixture
def mock_discord_bot(mocker: Any) -> None:
mocker.patch('discord.ext.commands.Bot')
@pytest.fixture(autouse=True)
def add_async_mocks(mocker: Any) -> None:
mocker.CoroutineMock = mocker.mock_module.CoroutineMock
| 28.837838 | 79 | 0.680412 | 1,450 | 0.679475 | 0 | 0 | 436 | 0.204311 | 666 | 0.31209 | 915 | 0.428772 |
8d352ba96be56207cce46e2dc458765a09de6f97 | 1,247 | py | Python | Shark_Training/pyimagesearch/preprocessing/meanpreprocessor.py | crpurcell/MQ_DPI_Release | 97444513e8b8d48ec91ff8a43b9dfaed0da029f9 | [
"MIT"
]
| null | null | null | Shark_Training/pyimagesearch/preprocessing/meanpreprocessor.py | crpurcell/MQ_DPI_Release | 97444513e8b8d48ec91ff8a43b9dfaed0da029f9 | [
"MIT"
]
| null | null | null | Shark_Training/pyimagesearch/preprocessing/meanpreprocessor.py | crpurcell/MQ_DPI_Release | 97444513e8b8d48ec91ff8a43b9dfaed0da029f9 | [
"MIT"
]
| null | null | null | #=============================================================================#
# #
# MODIFIED: 15-Jan-2019 by C. Purcell #
# #
#=============================================================================#
import cv2
#-----------------------------------------------------------------------------#
class MeanPreprocessor:
def __init__(self, rMean, gMean, bMean, rgbOrder=True):
self.rMean = rMean
self.gMean = gMean
self.bMean = bMean
self.rgbOrder = rgbOrder
def preprocess(self, image):
# Split the image into its respective RGB channels
if self.rgbOrder:
(R, G, B) = cv2.split(image.astype("float32"))
else:
(B, G, R) = cv2.split(image.astype("float32"))
# Subtract the means for each channel
R -= self.rMean
G -= self.gMean
B -= self.bMean
# Merge the channels back together and return the image
if self.rgbOrder:
return cv2.merge([R, G, B])
else:
return cv2.merge([B, G, R])
| 35.628571 | 79 | 0.36648 | 753 | 0.603849 | 0 | 0 | 0 | 0 | 0 | 0 | 634 | 0.50842 |
8d36012ec39c8b5de0335c08778adaf22f20af3c | 985 | py | Python | aiida_quantumespresso/parsers/constants.py | unkcpz/aiida-quantumespresso | fbac0993bb8b6cdeba85717453debcf0ab062b5a | [
"MIT"
]
| null | null | null | aiida_quantumespresso/parsers/constants.py | unkcpz/aiida-quantumespresso | fbac0993bb8b6cdeba85717453debcf0ab062b5a | [
"MIT"
]
| null | null | null | aiida_quantumespresso/parsers/constants.py | unkcpz/aiida-quantumespresso | fbac0993bb8b6cdeba85717453debcf0ab062b5a | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Physical or mathematical constants.
Since every code has its own conversion units, this module defines what
QE understands as for an eV or other quantities.
Whenever possible, we try to use the constants defined in
:py:mod:aiida.common.constants:, but if some constants are slightly different
among different codes (e.g., different standard definition), we define
the constants in this file.
"""
from aiida.common.constants import (
ang_to_m,
bohr_si,
bohr_to_ang,
hartree_to_ev,
invcm_to_THz,
ry_si,
ry_to_ev,
timeau_to_sec,
)
# From the definition of Quantum ESPRESSO, conversion from atomic mass
# units to Rydberg units:
# REAL(DP), PARAMETER :: AMU_SI = 1.660538782E-27_DP ! Kg
# REAL(DP), PARAMETER :: ELECTRONMASS_SI = 9.10938215E-31_DP ! Kg
# REAL(DP), PARAMETER :: AMU_AU = AMU_SI / ELECTRONMASS_SI
# REAL(DP), PARAMETER :: AMU_RY = AMU_AU / 2.0_DP
amu_Ry = 911.4442421323
| 31.774194 | 77 | 0.700508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 786 | 0.79797 |
8d3e794674c7c132a4877a4a375649bf2399c45b | 2,639 | py | Python | venv/lib/python3.8/site-packages/keras/api/_v2/keras/applications/__init__.py | JIANG-CX/data_labeling | 8d2470bbb537dfc09ed2f7027ed8ee7de6447248 | [
"MIT"
]
| 1 | 2021-05-24T10:08:51.000Z | 2021-05-24T10:08:51.000Z | venv/lib/python3.8/site-packages/keras/api/_v2/keras/applications/__init__.py | JIANG-CX/data_labeling | 8d2470bbb537dfc09ed2f7027ed8ee7de6447248 | [
"MIT"
]
| null | null | null | venv/lib/python3.8/site-packages/keras/api/_v2/keras/applications/__init__.py | JIANG-CX/data_labeling | 8d2470bbb537dfc09ed2f7027ed8ee7de6447248 | [
"MIT"
]
| null | null | null | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.keras.applications namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from keras.api._v2.keras.applications import densenet
from keras.api._v2.keras.applications import efficientnet
from keras.api._v2.keras.applications import imagenet_utils
from keras.api._v2.keras.applications import inception_resnet_v2
from keras.api._v2.keras.applications import inception_v3
from keras.api._v2.keras.applications import mobilenet
from keras.api._v2.keras.applications import mobilenet_v2
from keras.api._v2.keras.applications import mobilenet_v3
from keras.api._v2.keras.applications import nasnet
from keras.api._v2.keras.applications import resnet
from keras.api._v2.keras.applications import resnet50
from keras.api._v2.keras.applications import resnet_v2
from keras.api._v2.keras.applications import vgg16
from keras.api._v2.keras.applications import vgg19
from keras.api._v2.keras.applications import xception
from keras.applications.densenet import DenseNet121
from keras.applications.densenet import DenseNet169
from keras.applications.densenet import DenseNet201
from keras.applications.efficientnet import EfficientNetB0
from keras.applications.efficientnet import EfficientNetB1
from keras.applications.efficientnet import EfficientNetB2
from keras.applications.efficientnet import EfficientNetB3
from keras.applications.efficientnet import EfficientNetB4
from keras.applications.efficientnet import EfficientNetB5
from keras.applications.efficientnet import EfficientNetB6
from keras.applications.efficientnet import EfficientNetB7
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras.applications.inception_v3 import InceptionV3
from keras.applications.mobilenet import MobileNet
from keras.applications.mobilenet_v2 import MobileNetV2
from keras.applications.mobilenet_v3 import MobileNetV3Large
from keras.applications.mobilenet_v3 import MobileNetV3Small
from keras.applications.nasnet import NASNetLarge
from keras.applications.nasnet import NASNetMobile
from keras.applications.resnet import ResNet101
from keras.applications.resnet import ResNet152
from keras.applications.resnet import ResNet50
from keras.applications.resnet_v2 import ResNet101V2
from keras.applications.resnet_v2 import ResNet152V2
from keras.applications.resnet_v2 import ResNet50V2
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.applications.xception import Xception
del _print_function
| 47.981818 | 82 | 0.869269 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.068966 |
8d3ebf8c27b4787edb5db6336b9fad286f003b92 | 97 | py | Python | flash/vision/embedding/__init__.py | alvin-chang/lightning-flash | 481d4d369ff0a5d8c2b2d9e4970c5608a92b3ff5 | [
"Apache-2.0"
]
| 2 | 2021-06-25T08:42:36.000Z | 2021-06-25T08:49:29.000Z | flash/vision/embedding/__init__.py | alvin-chang/lightning-flash | 481d4d369ff0a5d8c2b2d9e4970c5608a92b3ff5 | [
"Apache-2.0"
]
| null | null | null | flash/vision/embedding/__init__.py | alvin-chang/lightning-flash | 481d4d369ff0a5d8c2b2d9e4970c5608a92b3ff5 | [
"Apache-2.0"
]
| null | null | null | from flash.vision.embedding.image_embedder_model import ImageEmbedder, ImageEmbedderDataPipeline
| 48.5 | 96 | 0.907216 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
8d3f8941dd6434ce1537415533cd51f289916f52 | 5,554 | py | Python | configstruct/config_struct.py | bradrf/configstruct | aeea8fbba1e2daa0a0c38eeb9622d1716c0bb3e8 | [
"MIT"
]
| null | null | null | configstruct/config_struct.py | bradrf/configstruct | aeea8fbba1e2daa0a0c38eeb9622d1716c0bb3e8 | [
"MIT"
]
| 16 | 2016-10-13T09:53:46.000Z | 2022-03-24T15:04:51.000Z | configstruct/config_struct.py | bradrf/configstruct | aeea8fbba1e2daa0a0c38eeb9622d1716c0bb3e8 | [
"MIT"
]
| null | null | null | import os
import sys
import logging
from configparser import ConfigParser
from .open_struct import OpenStruct
from .section_struct import SectionStruct
# TODO: use file lock when read/write
def choose_theirs(section, option, mine, theirs):
'''Always prefer values for keys from file.'''
return theirs
def choose_mine(section, option, mine, theirs):
'''Always prefer values for keys in memory.'''
return mine
LOG_LEVELS = ['debug-all', 'debug', 'info', 'warning', 'error', 'critical']
LOG_OPTIONS = {'log_level': 'info', 'log_file': 'STDERR'}
class OtherLoggingFilter(logging.Filter):
'''Quell logs from other modules using a different minimum level.'''
def __init__(self, whitelisted_module, minimum_other_level):
super(self.__class__, self).__init__(whitelisted_module)
self._minimum_other_level = minimum_other_level
def filter(self, record):
rc = super(self.__class__, self).filter(record)
if rc != 0:
return rc # matched the whitelisted module
return record.levelno >= self._minimum_other_level
class ConfigStruct(OpenStruct):
'''Provides simplified access for managing typed configuration options saved in a file.
:param config_file: path to file that should house configuration items.
:param log_options_parent: option key to use if this instance is expected to use the
`LOG_OPTIONS` default values and allow configuration of basic logging
:param sections_defaults: options that are provided as defaults (will be overridden by any
options read from the `config_file`)
'''
def __init__(self, config_file, log_options_parent=None, **sections_defaults):
super(ConfigStruct, self).__init__()
self._config_file = config_file
self._log_options_parent = log_options_parent
if log_options_parent:
parent_options = sections_defaults.get(log_options_parent, {})
sections_defaults[log_options_parent] = LOG_OPTIONS.copy()
sections_defaults[log_options_parent].update(parent_options)
for (name, items) in sections_defaults.items():
self[name] = SectionStruct(name, **items)
self._load(choose_theirs) # because above were basic defaults for the keys
def configure_basic_logging(self, main_module_name, **kwargs):
'''Use common logging options to configure all logging.
Basic logging configuration is used to set levels for all logs from the main module and to
filter out logs from other modules unless they are of one level in priority higher.
:param main_module_name: name of the primary module for normal logging
'''
if not self._log_options_parent:
raise ValueError('Missing log_options_parent')
options = self[self._log_options_parent]
log_level_index = LOG_LEVELS.index(options.log_level)
log_kwargs = {
'level': getattr(logging, options.log_level.upper()),
'format': '[%(asctime)s #%(process)d] %(levelname)-8s %(name)-12s %(message)s',
'datefmt': '%Y-%m-%dT%H:%M:%S%z',
}
if options.log_file == 'STDERR':
log_kwargs['stream'] = sys.stderr
elif options.log_file == 'STDOUT':
log_kwargs['stream'] = sys.stdout
else:
log_kwargs['filename'] = options.log_file
log_kwargs.update(kwargs) # allow overrides from caller
logging.basicConfig(**log_kwargs)
# now filter out any other module's logging unless it's one level above the main
other_log_level = getattr(logging, LOG_LEVELS[log_level_index + 1].upper())
other_filter = OtherLoggingFilter(main_module_name, other_log_level)
for handler in logging.root.handlers:
handler.addFilter(other_filter)
def save(self, conflict_resolver=choose_mine):
'''Save all options in memory to the `config_file`.
Options are read once more from the file (to allow other writers to save configuration),
keys in conflict are resolved, and the final results are written back to the file.
:param conflict_resolver: a simple lambda or function to choose when an option key is
provided from an outside source (THEIRS, usually a file on disk) but is also already
set on this ConfigStruct (MINE)
'''
config = self._load(conflict_resolver) # in case some other process has added items
with open(self._config_file, 'wb') as cf:
config.write(cf)
######################################################################
# private
def _load(self, resolver):
config = ConfigParser()
if os.path.exists(self._config_file):
with open(self._config_file) as cf:
config.readfp(cf) # use readfp as read somehow circumvents mockfs in tests
loaded = self._sync_sections_with(config, resolver)
self._add_new_sections(config, loaded)
return config
def _sync_sections_with(self, config, resolver):
loaded = set()
for name in config.sections():
if name not in self:
self[name] = SectionStruct(name)
self[name].sync_with(config, resolver)
loaded.add(name)
return loaded
def _add_new_sections(self, config, seen):
for name in self:
if name not in seen:
self[name].sync_with(config, choose_mine) # new ones, so always "mine"
| 40.540146 | 100 | 0.659705 | 4,982 | 0.897011 | 0 | 0 | 0 | 0 | 0 | 0 | 2,203 | 0.396651 |
8d4042ed9b0586457ce903d2cc6db6a880c03485 | 10,327 | py | Python | test_apps/python_app/tests/compiler_test.py | Origen-SDK/o2 | 5b0f9a6d113ddebc73c7ee224931e8b2d0301794 | [
"MIT"
]
| null | null | null | test_apps/python_app/tests/compiler_test.py | Origen-SDK/o2 | 5b0f9a6d113ddebc73c7ee224931e8b2d0301794 | [
"MIT"
]
| 127 | 2019-11-23T17:09:35.000Z | 2021-09-02T11:06:20.000Z | test_apps/python_app/tests/compiler_test.py | Origen-SDK/o2 | 5b0f9a6d113ddebc73c7ee224931e8b2d0301794 | [
"MIT"
]
| null | null | null | import origen # pylint: disable=import-error
import pytest, pathlib, os, stat, abc
from os import access, W_OK, X_OK, R_OK
from tests.shared import clean_falcon, clean_compiler, tmp_dir
def user_compiler():
''' End users should access the compiler via ``origen.app.compiler``. '''
return origen.app.compiler
MakoRenderer = origen.compiler.MakoRenderer
# JinjaRenderer = origen.compiler.JinjaRenderer
def test_compiler_inits(clean_falcon):
assert isinstance(user_compiler(), origen.compiler.Compiler) == True
assert user_compiler().stack == []
assert user_compiler().renders == []
assert user_compiler().output_files == []
assert 'mako' in user_compiler().renderers
assert user_compiler().renderers['mako'] is MakoRenderer
def test_copmiler_selects_appropriate_syntax(clean_falcon):
test = "myfile.txt.mako"
assert user_compiler().select_syntax(test) == 'mako'
assert user_compiler().select_syntax(pathlib.Path(test)) == 'mako'
test = "myfile.txt.jinja"
assert user_compiler().select_syntax(test) == 'jinja'
assert user_compiler().select_syntax(pathlib.Path(test)) == 'jinja'
test = "myfile.txt"
assert user_compiler().select_syntax(test) is None
assert user_compiler().select_syntax(pathlib.Path(test)) is None
def test_compiler_text_render_requires_syntax(clean_falcon):
with pytest.raises(origen.compiler.ExplicitSyntaxRequiredError):
user_compiler().render("Test...", direct_src=True)
class FixtureCompilerTest(abc.ABC):
''' Fixture conformance testing the child renderer
'''
@property
@abc.abstractclassmethod
def extension(cls):
raise NotImplementedError
@property
@abc.abstractclassmethod
def syntax(cls):
raise NotImplementedError
@property
def str_render(self):
return "Hello " + self.templatify('"Origen"') + "!"
@property
def str_render_with_standard_context(self):
return f"Hello from Origen version {self.templatify('origen.version')}!"
@property
def str_render_with_additional_context(self):
return f"Hello from template compiler \"{self.templatify('test_renderer_name')}\"!"
@property
def expected_str_render(self):
return "Hello Origen!"
@property
def expected_str_render_with_standard_context(self):
# Make sure origen.version isn't woefully broken
assert isinstance(origen.version, str)
assert len(origen.version) > 0
return f"Hello from Origen version {origen.version}!"
@property
def expected_str_render_with_additional_context(self):
return f"Hello from template compiler \"{self.syntax}\"!"
@property
def dummy_input_filename(self):
return pathlib.Path(
str(self.expected_output_filename) + f'.{self.extension}')
@property
def expected_output_filename(self):
return tmp_dir().joinpath(f'test_file.txt')
@property
def expected_default_output_filename(self):
s = user_compiler().renderers[self.syntax]
return origen.app.output_dir.joinpath(f'compiled/test_file.txt')
@property
def input_filename(self):
return origen.root.joinpath('templates/dut_info.txt' +
f'.{self.extension}')
@property
def output_filename(self):
return tmp_dir().joinpath('dut_info.txt')
@property
def expected_dut_info_output(self):
return "\n".join([
self.expected_str_render_with_standard_context,
self.expected_str_render_with_additional_context,
'The application name is "example"'
])
def test_compiler_resolves_default_filenames(self):
# Test as string
f = str(self.dummy_input_filename)
r = user_compiler().resolve_filename(f)
assert r == self.expected_default_output_filename
# Test as pathlib.Path
assert user_compiler().resolve_filename(
self.dummy_input_filename) == self.expected_default_output_filename
def test_compiler_resolves_filenames(self):
# Test as string
assert user_compiler().resolve_filename(
str(self.dummy_input_filename),
output_dir=tmp_dir()) == self.expected_output_filename
# Test as pathlib.Path
assert user_compiler().resolve_filename(
self.dummy_input_filename,
output_dir=tmp_dir()) == self.expected_output_filename
@property
def additional_context(self):
return {'test_renderer_name': self.syntax}
def test_render_file(self):
''' Test that the renderer can render a given file '''
rendered = user_compiler().render(self.input_filename,
syntax=self.syntax,
direct_src=False,
output_dir=tmp_dir(),
context=self.additional_context)
assert isinstance(rendered, pathlib.Path)
assert rendered == self.output_filename
assert rendered.exists
assert open(rendered, 'r').read() == self.expected_dut_info_output
def test_render_str(self):
''' Test that the renderer can render a given string '''
rendered = user_compiler().render(self.str_render,
syntax=self.syntax,
direct_src=True)
assert rendered == self.expected_str_render
def test_render_with_standard_context(self):
''' Renders output using the standard context '''
rendered = user_compiler().render(
self.str_render_with_standard_context,
syntax=self.syntax,
direct_src=True)
assert rendered == self.expected_str_render_with_standard_context
def test_render_with_additional_context(self):
''' Renders output using additional context given as an option
-> Test that the renderer supports the 'additional_context' option
'''
rendered = user_compiler().render(
self.str_render_with_additional_context,
syntax=self.syntax,
direct_src=True,
context={'test_renderer_name': self.syntax})
assert rendered == self.expected_str_render_with_additional_context
@abc.abstractclassmethod
def templatify(self, input):
raise NotImplementedError
class TestMakoCompiler(FixtureCompilerTest):
extension = 'mako'
syntax = 'mako'
def templatify(self, input):
return "${" + input + "}"
# class TestJinjaCompiler:
# pass
class TestCompilerStack():
''' Tests the compiler's stack-like interface '''
test_cases = TestMakoCompiler()
''' Borrow the Mako test cases for use here '''
def test_compiler_can_accept_requests(self, clean_falcon, clean_compiler):
''' Push can accept either a straight pathlib.Path or str object (interpreted as a file)
or a tuple consisting of a 'src' and 'options'
'''
assert len(user_compiler().stack) == 0
user_compiler().push('test.mako')
assert len(user_compiler().stack) == 1
assert isinstance(user_compiler().stack[0], tuple)
assert isinstance(user_compiler().stack[0][0], list)
assert isinstance(user_compiler().stack[0][0][0], pathlib.Path)
assert user_compiler().stack[0][1] == {}
def test_compiler_can_clear_itself(self):
assert len(user_compiler().stack) > 0
user_compiler().clear()
assert user_compiler().stack == []
assert user_compiler().renders == []
assert user_compiler().output_files == []
def test_compiler_renders_text(self, clean_falcon, clean_compiler):
origen.app.compile(self.test_cases.str_render,
direct_src=True,
syntax='mako')
assert len(user_compiler().renders) == 1
assert len(user_compiler().stack) == 0
assert user_compiler(
).renders[0] == self.test_cases.expected_str_render
origen.app.compile(self.test_cases.str_render_with_additional_context,
context=self.test_cases.additional_context,
direct_src=True,
syntax='mako')
assert len(user_compiler().renders) == 2
assert len(user_compiler().stack) == 0
assert user_compiler().renders[
1] == self.test_cases.expected_str_render_with_additional_context
assert user_compiler().renders[-1] == user_compiler().last_render
def test_compiler_text_render_requires_syntax(self, clean_falcon,
clean_compiler):
assert len(user_compiler().stack) == 0
with pytest.raises(origen.compiler.ExplicitSyntaxRequiredError):
origen.app.compile(self.test_cases.str_render, direct_src=True)
def test_compiler_returns_templates_dir(self):
assert user_compiler().templates_dir == origen.app.root.joinpath(
'templates')
def test_compiler_renders_files(self, clean_falcon, clean_compiler):
origen.app.compile('dut_info.txt.mako',
output_dir=tmp_dir(),
context=self.test_cases.additional_context,
templates_dir=user_compiler().templates_dir)
assert len(user_compiler().stack) == 0
assert len(user_compiler().output_files) == 1
compiled_file = user_compiler().output_files[0]
compiled_file_status = os.stat(compiled_file)
assert isinstance(compiled_file, pathlib.PurePath) == True
assert compiled_file.exists() == True
assert access(compiled_file, R_OK) == True
# Check file permissions
assert bool(compiled_file_status.st_mode & stat.S_IRUSR) == True
assert bool(compiled_file_status.st_mode & stat.S_IWUSR) == True
assert bool(compiled_file_status.st_mode & stat.S_IWUSR) == True
| 39.117424 | 97 | 0.637165 | 8,743 | 0.846616 | 0 | 0 | 2,216 | 0.214583 | 0 | 0 | 1,614 | 0.156289 |
8d42c2702dd5a391e27f8a389f8a934778ba0c95 | 999 | py | Python | api/api.py | devSessions/crvi | 1ecc68d6c968294bcc5ceea747604ee237f6080c | [
"MIT"
]
| 25 | 2017-12-31T06:51:54.000Z | 2021-11-17T11:29:30.000Z | api/api.py | amittomar-1/crvi | 1ecc68d6c968294bcc5ceea747604ee237f6080c | [
"MIT"
]
| 23 | 2020-01-28T21:34:12.000Z | 2022-03-11T23:11:54.000Z | api/api.py | amittomar-1/crvi | 1ecc68d6c968294bcc5ceea747604ee237f6080c | [
"MIT"
]
| 11 | 2018-01-04T12:30:33.000Z | 2020-12-01T18:08:59.000Z | from flask import Flask, jsonify, request
import predict
import socket
app = Flask(__name__)
@app.route('/')
@app.route('/home')
def home():
"""Renders the home page."""
return (
"Welcome Guest!!!"
)
#to spedicy route after url
@app.route('/api', methods=['POST'])
def get_tasks():
#get url from form
# url = request.form['url']
url = request.files['url']
#sends url for prediction
sender = predict.predict(url)
#get values from prediction
rec = sender.predict_only()
# #list of out values
# outputlist=[rec]
# #for multiple json apis
# tasks = []
# tasks1 = [
# {
# 'value': outputlist[0],
# },
# ]
# tasks.append(tasks1)
# return jsonify({'tasks': tasks})
return jsonify({'cash': rec})
if __name__ == '__main__':
#for remote host
ip = socket.gethostbyname(socket.gethostname())
app.run(port=5000,host=ip)
#for local host
#app.run(debug=True, port=5000) | 19.211538 | 51 | 0.58959 | 0 | 0 | 0 | 0 | 684 | 0.684685 | 0 | 0 | 470 | 0.47047 |
8d4484e9d066b90a85e8763af3ea488f55a3ae34 | 68 | py | Python | exe/__init__.py | whisperaven/0ops.exed | ab9f14868fec664fe78edab6fb7eb572b3048c58 | [
"MIT"
]
| 10 | 2017-03-17T02:15:18.000Z | 2019-10-26T23:54:21.000Z | exe/__init__.py | whisperaven/0ops | ab9f14868fec664fe78edab6fb7eb572b3048c58 | [
"MIT"
]
| 1 | 2017-03-20T03:17:17.000Z | 2017-03-20T04:04:26.000Z | exe/__init__.py | whisperaven/0ops | ab9f14868fec664fe78edab6fb7eb572b3048c58 | [
"MIT"
]
| 3 | 2017-03-17T02:46:23.000Z | 2018-04-14T15:49:56.000Z | # (c) 2016, Hao Feng <[email protected]>
__version__ = '0.1.0'
| 17 | 44 | 0.661765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.75 |
8d4492744de35276bcea0bf1ccb409c9aa59295e | 418 | py | Python | Special_Viewer.py | Akivamelka/unsupervised_mid_semester | 5393185d7b0327bbb7cd4b3700d4d00704a5623f | [
"MIT"
]
| null | null | null | Special_Viewer.py | Akivamelka/unsupervised_mid_semester | 5393185d7b0327bbb7cd4b3700d4d00704a5623f | [
"MIT"
]
| null | null | null | Special_Viewer.py | Akivamelka/unsupervised_mid_semester | 5393185d7b0327bbb7cd4b3700d4d00704a5623f | [
"MIT"
]
| null | null | null | from Dimension_Reduction import Viewer
import pandas as pd
view_tool = Viewer()
reduc = 'pca'
suffix = '5'
data_plot = pd.read_csv(f"{reduc}_dim2_{suffix}.csv", delimiter=",")
models = ['km', 'fuzz', 'gmm', 'dbsc', 'hier', 'spec' ]
for model in models:
print(model)
labels = pd.read_csv(f"labels_{model}_{suffix}.csv", delimiter=",")
view_tool.view_vs_target(data_plot, labels, suffix, model) | 32.153846 | 72 | 0.669856 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.251196 |
8d481fde3510821315275850b3a25299bc9b350d | 6,621 | py | Python | pytumblr/types.py | 9999years/pytumblr | fe9b2fb60866785141fc0deb5a357a773c0f4229 | [
"Apache-2.0"
]
| null | null | null | pytumblr/types.py | 9999years/pytumblr | fe9b2fb60866785141fc0deb5a357a773c0f4229 | [
"Apache-2.0"
]
| null | null | null | pytumblr/types.py | 9999years/pytumblr | fe9b2fb60866785141fc0deb5a357a773c0f4229 | [
"Apache-2.0"
]
| null | null | null | from collections import UserList
from dataclasses import dataclass, field
from datetime import datetime
from typing import List, Dict, Any, Optional, Type
DATE_FORMAT = '%Y-%m-%d %H:%M:%S %Z'
def parse_date(tumblr_date: str) -> datetime:
return datetime.strptime(tumblr_date, DATE_FORMAT)
@dataclass
class Link:
"""
An objects in a _links hash
"""
href: str
type: str
@dataclass
class NavigationLink(Link):
pass
@dataclass
class ActionLink(Link):
method: str
query_params: Dict[str, Any]
_link_classes = {'navigation': NavigationLink,
'action': ActionLink}
@dataclass
class Tag:
tag: str
is_tracked: bool
featured: bool
thumb_url: Optional[str] = None
@dataclass
class BaseBlog:
name: str
updated: int
title: str
description: str
@dataclass
class Blog(BaseBlog):
url: str
@dataclass
class BlogInfo(BaseBlog):
posts: int
ask: bool
ask_anon: bool
likes: int
is_blocked_from_primary: bool
@dataclass
class UserBlogInfo:
url: str
title: str
primary: bool
followers: int
tweet: str
facebook: str
type: str
@dataclass
class UserInfo:
following: int
default_post_format: str
name: str
likes: int
blogs: List[UserBlogInfo]
def __post_init__(self):
self.blogs = [UserBlogInfo(**blog) for blog in self.blogs]
@dataclass
class Avatar:
avatar_url: str
@dataclass
class Post:
id: int
type: str
blog_name: str
post_url: str
timestamp: int
date: datetime
format: str
reblog_key: str
tags: List[str]
total_posts: int
blog: Optional[BlogInfo] = None
bookmarks: Optional[bool] = None
mobile: Optional[bool] = None
source_url: Optional[str] = None
source_title: Optional[str] = None
liked: Optional[bool] = None
state: Optional[str] = None
is_blocks_post_format: Optional[bool] = None
def __new__(cls, *args, **kwargs):
if 'blog_name' in kwargs and 'blog' not in kwargs:
return DashboardPost(*args, **kwargs)
else:
return POST_CLASSES[kwargs['type']](*args, **kwargs)
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
def __post_init__(self):
self.date = parse_date(self.date)
self.blog = BlogInfo(**self.blog)
@dataclass
class DashboardPost(Post):
blog: None = None
@dataclass
class Submission(Post):
slug: str = None
short_url: str = None
post_author: Optional[str] = None
is_submission: Optional[bool] = True
anonymous_name: Optional[str] = None
anonymous_email: Optional[str] = None
state: Optional[str] = 'submission'
@dataclass
class LegacyTextPost(Post):
title: Optional[str] = None
body: Optional[str] = None
@dataclass
class ImageSize:
width: int
height: int
url: str
@dataclass
class Photo:
caption: str
alt_sizes: List[ImageSize]
def __post_init__(self):
self.alt_sizes = [ImageSize(**size) for size in self.alt_sizes]
@dataclass
class VerbosePhoto(Photo):
original_size: ImageSize
width: int
height: int
url: str
def __post_init__(self):
self.original_size = ImageSize(**self.original_size)
@dataclass
class LegacyPhotoPost(Post):
"""
A photo or photoset post
"""
caption: Optional[str] = None
width: Optional[int] = None
height: Optional[int] = None
photos: List[Photo] = field(default_factory=list)
def __post_init__(self):
self.photos = [Photo(**photo) for photo in self.photos]
@dataclass
class LegacyQuotePost(Post):
text: Optional[str] = None
# HTML source, not an attribution
source: Optional[str] = None
@dataclass
class LegacyLinkPost(Post):
title: Optional[str] = None
description: Optional[str] = None
url: Optional[str] = None
author: Optional[str] = None
excerpt: Optional[str] = None
publisher: Optional[str] = None
photos: List[VerbosePhoto] = field(default_factory=list)
def __post_init__(self):
self.photos = [VerbosePhoto(**photo) for photo in self.photos]
@dataclass
class ChatLine:
name: str
label: str
phrase: str
@dataclass
class LegacyChatPost(Post):
title: Optional[str] = None
body: Optional[str] = None
dialogue: List[ChatLine] = field(default_factory=list)
def __post_init__(self):
self.dialogue = [ChatLine(**line) for line in self.dialogue]
@dataclass
class LegacyAudioPost(Post):
caption: Optional[str] = None
player: Optional[str] = None
plays: Optional[int] = None
album_art: Optional[str] = None
artist: Optional[str] = None
album: Optional[str] = None
track_name: Optional[str] = None
track_number: Optional[int] = None
year: Optional[int] = None
@dataclass
class VideoPlayer:
width: int
embed_code: str
@dataclass
class LegacyVideoPost(Post):
caption: Optional[str] = None
player: List[Any] = field(default_factory=[])
@dataclass
class LegacyAnswerPost(Post):
asking_name: Optional[str] = None
asking_url: Optional[str] = None
question: Optional[str] = None
answer: Optional[str] = None
# a type -> class dict
POST_CLASSES: Dict[str, Type] = {
'photo': LegacyPhotoPost,
'quote': LegacyQuotePost,
'link': LegacyLinkPost,
'chat': LegacyChatPost,
'audio': LegacyAudioPost,
'video': LegacyVideoPost,
'answer': LegacyAnswerPost,
}
@dataclass
class Likes:
liked_posts: List[Post]
liked_count: int
def __post_init__(self):
self.liked_posts = [Post(**post) for post in self.liked_posts]
@dataclass
class Following:
blogs: List[BlogInfo]
total_blogs: int
def __post_init__(self):
self.blogs = [BlogInfo(**blog) for blog in self.blogs]
@dataclass
class Follower:
name: str
following: bool
url: str
updated: int
@dataclass
class Followers:
total_users: int
users: List[Follower]
def __post_init__(self):
self.users = [Follower(**user) for user in self.users]
@dataclass
class Reblog:
comment: str
tree_html: str
@dataclass
class Dashboard:
posts: List[DashboardPost]
def __post_init__(self):
self.posts = [DashboardPost(**post) for post in self.posts]
@dataclass
class Posts:
posts: List[Post]
def __post_init__(self):
self.posts = [Post(**post) for post in self.posts]
@dataclass
class BlogPosts(Posts):
blog: BlogInfo
def __post_init__(self):
self.blog = BlogInfo(**self.blog)
| 19.247093 | 71 | 0.661683 | 5,493 | 0.829633 | 0 | 0 | 5,867 | 0.88612 | 0 | 0 | 263 | 0.039722 |
8d4876f42fc49dd8332e5b4739b6a7de0c8b9bb2 | 311 | py | Python | simple_jobs_scraper.py | Engnation/Jobs-Scraper | 6f8b1207731da9f187db406a5be6916774ba3bc5 | [
"MIT"
]
| null | null | null | simple_jobs_scraper.py | Engnation/Jobs-Scraper | 6f8b1207731da9f187db406a5be6916774ba3bc5 | [
"MIT"
]
| null | null | null | simple_jobs_scraper.py | Engnation/Jobs-Scraper | 6f8b1207731da9f187db406a5be6916774ba3bc5 | [
"MIT"
]
| null | null | null | from jobs_scraper import JobsScraper
# Let's create a new JobsScraper object and perform the scraping for a given query.
position_var = "Python"
scraper = JobsScraper(country="ca", position=position_var, location="Toronto", pages=3)
df = scraper.scrape()
df.to_csv(rf'{position_var} jobs.csv', index = False) | 34.555556 | 87 | 0.768489 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.421222 |
8d4a0164b56629bd4e65dd24b9c1a1fba70a5ea1 | 810 | py | Python | mac/redRMacUpdater.py | PiRSquared17/r-orange | 6bc383f1db3c10c59e16b39daffc44df904ce031 | [
"Apache-2.0"
]
| 1 | 2019-04-15T13:50:30.000Z | 2019-04-15T13:50:30.000Z | mac/redRMacUpdater.py | PiRSquared17/r-orange | 6bc383f1db3c10c59e16b39daffc44df904ce031 | [
"Apache-2.0"
]
| null | null | null | mac/redRMacUpdater.py | PiRSquared17/r-orange | 6bc383f1db3c10c59e16b39daffc44df904ce031 | [
"Apache-2.0"
]
| 1 | 2016-01-21T23:00:21.000Z | 2016-01-21T23:00:21.000Z | import tarfile, sys,os
from PyQt4.QtCore import *
from PyQt4.QtGui import *
app = QApplication(sys.argv)
try:
zfile = tarfile.open(sys.argv[1], "r:gz" )
zfile.extractall(sys.argv[2])
zfile.close()
mb = QMessageBox('Red-R Updated', "Red-R has been updated'",
QMessageBox.Information, QMessageBox.Ok | QMessageBox.Default,
QMessageBox.NoButton, QMessageBox.NoButton)
except:
mb = QMessageBox('Red-R Updated', "There was an Error in updating Red-R.\n\n%s" % sys.exc_info()[0],
QMessageBox.Information, QMessageBox.Ok | QMessageBox.Default,
QMessageBox.NoButton, QMessageBox.NoButton)
app.setActiveWindow(mb)
mb.setFocus()
mb.show()
app.exit(0)
#mb.exec_()
sys.exit(app.exec_())
os.remove(sys.argv[1]) | 30 | 105 | 0.646914 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.144444 |
8d4be9a3c0385e4ebdfd3712a699e128c38acafc | 9,346 | py | Python | darknet_websocket_demo.py | wutianze/darknet-superb-service | fdee5a932c8a3898701c1e302e4642fbff853630 | [
"MIT"
]
| null | null | null | darknet_websocket_demo.py | wutianze/darknet-superb-service | fdee5a932c8a3898701c1e302e4642fbff853630 | [
"MIT"
]
| null | null | null | darknet_websocket_demo.py | wutianze/darknet-superb-service | fdee5a932c8a3898701c1e302e4642fbff853630 | [
"MIT"
]
| null | null | null | from ctypes import *
#from multiprocessing import Process, Queue
import queue
import time
from threading import Lock,Thread
from fastapi import FastAPI
from fastapi import Request
from fastapi import WebSocket, WebSocketDisconnect
import uvicorn
#from yolo_service import *
import socket
import random
from typing import List
import darknet
import cv2
import time
import io
import struct
import os
import numpy as np
import base64
import json
from jtracer.tracing import init_tracer
import pynng
from PIL import Image
from opentracing.propagation import Format
def convert2relative(bbox,darknet_height,darknet_width):
"""
YOLO format use relative coordinates for annotation
"""
x, y, w, h = bbox
_height = darknet_height
_width = darknet_width
return x/_width, y/_height, w/_width, h/_height
def convert2original(image, bbox,darknet_height,darknet_width):
x, y, w, h = convert2relative(bbox,darknet_height,darknet_width)
image_h, image_w, __ = image.shape
orig_x = int(x * image_w)
orig_y = int(y * image_h)
orig_width = int(w * image_w)
orig_height = int(h * image_h)
bbox_converted = (orig_x, orig_y, orig_width, orig_height)
return bbox_converted
class SuperbFrame:
def __init__(self,darknet_height,darknet_width):
self.image = None
self.results = None
self.darknet_image = darknet.make_image(darknet_width,darknet_height,3)
self.recv_timestamp = 0
self.send_timestamp = 0
self.inference_time = 0
self.final_image = None
self.bytes = None
self.span = None
def port_is_used(port,ip="0.0.0.0"):
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
s.connect((ip,port))
s.shutdown(2)
return True
except Exception as e:
return False
app = FastAPI()
class ConnectionManager:
def __init__(self):
# 存放激活的ws连接对象
self.active_connections: List[WebSocket] = []
self.ports = set()
self.port_lock = Lock()
async def connect(self, ws: WebSocket):
# 等待连接
await ws.accept()
# 存储ws连接对象
self.active_connections.append(ws)
def disconnect(self, ws: WebSocket):
# 关闭时 移除ws对象
self.active_connections.remove(ws)
manager = ConnectionManager()
@app.get("/get_port")
def get_port(request:Request):
while True:
manager.port_lock.acquire()
port_tmp = random.randint(int(os.getenv("SUPB_MIN_PORT")),int(os.getenv("SUPB_MAX_PORT")))
if port_tmp in manager.ports or port_is_used(port_tmp):
manager.port_lock.release()
continue
else:
manager.ports.add(port_tmp)
manager.port_lock.release()
return port_tmp # port_tmp is the key for a client
def parse_data(data,tracer):
head_length, msg_length = struct.unpack("ii", data[0:8])
head_length, msg_length, msg_head, msg = struct.unpack("ii"+ str(head_length) + "s" + str(msg_length) + "s", data)
if head_length > 2:
span_dict = json.loads(msg_head)
span_ctx = tracer.extract(Format.TEXT_MAP, span_dict)
return span_ctx, msg
else:
return None, msg
def send_index(send_queue, sock,keep_alive):
while keep_alive:
try:
span_reply = send_queue.get(block=False,timeout=20)
sock.send(span_reply)
except pynng.Timeout:
print("sock.send timeout")
except:
pass # no msg to send
def send_then_recv(input_address,send_queue,input_queue,tracer,darknet_width,darknet_height,sock,keep_alive):
#sock = pynng.Pair1(recv_timeout=100,send_timeout=100)
#sock.listen(input_address)
while keep_alive:
#try:
# span_reply = send_queue.get(block=False,timeout=20)
# sock.send(span_reply)
#except pynng.Timeout:
# print("sock.send timeout")
#except:
# pass # no msg to send
try:
msg = sock.recv()
except pynng.Timeout:
continue
recv_time = time.time()
newFrame = SuperbFrame(darknet_height,darknet_width)
newFrame.recv_timestamp = int(recv_time*1000.0) # in ms
# msg handling
span_ctx, msg_content = parse_data(msg,tracer)
if span_ctx is not None:
newFrame.span = tracer.start_span('image_procss',child_of=span_ctx)
header = msg_content[0:24]
hh,ww,cc,tt = struct.unpack('iiid',header)
newFrame.send_timestamp = int(tt*1000.0)
hh,ww,cc,tt,ss = struct.unpack('iiid'+str(hh*ww*cc)+'s',msg_content)
newFrame.image = cv2.cvtColor((np.frombuffer(ss,dtype=np.uint8)).reshape(hh,ww,cc), cv2.COLOR_BGR2RGB)
darknet.copy_image_from_bytes(newFrame.darknet_image,cv2.resize(newFrame.image,(darknet_width,darknet_height),interpolation=cv2.INTER_LINEAR).tobytes())
#if span_ctx is not None:
# newFrame.span.finish()
try:
input_queue.put(newFrame,block=False,timeout=100)
except:
print("input_queue is full, discard current msg")
continue
def keep_inference(send_queue,input_queue,result_queue,network,class_names,keep_alive):
while keep_alive:
try:
#print("get newFrame")
newFrame = input_queue.get(block=False,timeout=100)
except:
#print("inference get fail")
continue
prev_time = time.time()
newFrame.results = darknet.detect_image(network, class_names, newFrame.darknet_image, thresh=0.2)
newFrame.inference_time = int((time.time()-prev_time)*1000.0) # s -> ms
darknet.free_image(newFrame.darknet_image)
if newFrame.span is not None:
index = newFrame.span.get_baggage_item('index')
newFrame.span.finish()
try:
send_queue.put(index.encode())
#sock.send(index.encode())
except:
print("send_queue is full, discard current msg")
try:
result_queue.put(newFrame,block=False,timeout=10)
except:
print("result_queue is full, discard current msg")
continue
def generate_output(result_queue,need_bytes,keep_alive,class_colors,darknet_height,darknet_width,resizew=960,resizeh=480):
while keep_alive:
try:
newFrame = result_queue.get(block=False,timeout=30)
except:
continue
detections_adjusted = []
if newFrame is not None:
for label, confidence, bbox in newFrame.results:
bbox_adjusted = convert2original(newFrame.image, bbox,darknet_height,darknet_width)
detections_adjusted.append((str(label), confidence, bbox_adjusted))
image = darknet.draw_boxes(detections_adjusted, newFrame.image, class_colors)
cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
newFrame.final_image = image
if need_bytes:
img = Image.fromarray(image).resize((resizew,resizeh))
img_byte_arr = io.BytesIO()
img.save(img_byte_arr, format='PNG')
img_byte_arr.seek(0)
newFrame.bytes = base64.b64encode(img_byte_arr.read()).decode()
return newFrame
else:
continue
@app.websocket("/ws/{port}")# user is the received port_tmp
async def stream_handler(websocket: WebSocket, port: str):
print("a new websocket connected")
await manager.connect(websocket)
network,class_names,class_colors = darknet.load_network(
"./cfg/yolov4.cfg",
"./cfg/coco.data",
"./yolov4.weights",
batch_size=1
)
darknet_width = darknet.network_width(network)
darknet_height = darknet.network_height(network)
tracer = init_tracer("image-process")
input_queue = queue.Queue(maxsize=5)
result_queue = queue.Queue(maxsize=5)
send_queue = queue.Queue(maxsize=5)
input_address = "tcp://0.0.0.0:"+port
sock = pynng.Pair1(recv_timeout=100,send_timeout=100)
sock.listen(input_address)
keep_alive = True
p0 = Thread(target=send_then_recv,args=(input_address,send_queue,input_queue,tracer,darknet_width,darknet_height,sock,keep_alive))
p1 = Thread(target=keep_inference,args=(send_queue,input_queue,result_queue,network,class_names,keep_alive))
p2 = Thread(target=send_index,args=(send_queue,sock,keep_alive))
p0.start()
p1.start()
p2.start()
try:
while keep_alive:
superbFrame = generate_output(result_queue,True,keep_alive,class_colors,darknet_width,darknet_height)
send1_time = int(time.time()*1000.0)
payload = {"img": "data:image/png;base64,%s"%(superbFrame.bytes),"send0_time":superbFrame.send_timestamp,"recv_time":superbFrame.recv_timestamp,"send1_time":send1_time}
await websocket.send_json(payload)
except WebSocketDisconnect:
keep_alive = False
p0.join()
p1.join()
p2.join()
sock.close()
manager.disconnect(websocket)
manager.ports.discard(port)
if __name__ == "__main__":
uvicorn.run("darknet_websocket_demo:app",host="0.0.0.0",port=int(os.getenv("SUPB_SERVICE_PORT")),log_level="info")
| 35.003745 | 180 | 0.652044 | 873 | 0.092892 | 0 | 0 | 2,295 | 0.244201 | 1,912 | 0.203448 | 1,236 | 0.131517 |
8d4d42f7498f1a4af52daeaede069016fb2ef667 | 2,389 | py | Python | tests/unit/test_sherman_morrison.py | willwheelera/pyqmc | 0c8d1f308bbccb1560aa680a5a75e7a4fe7a69fb | [
"MIT"
]
| 44 | 2019-06-04T13:53:26.000Z | 2022-03-31T08:36:30.000Z | tests/unit/test_sherman_morrison.py | willwheelera/pyqmc | 0c8d1f308bbccb1560aa680a5a75e7a4fe7a69fb | [
"MIT"
]
| 121 | 2019-05-13T14:05:20.000Z | 2022-02-16T19:24:37.000Z | tests/unit/test_sherman_morrison.py | willwheelera/pyqmc | 0c8d1f308bbccb1560aa680a5a75e7a4fe7a69fb | [
"MIT"
]
| 35 | 2019-04-26T21:57:50.000Z | 2022-02-14T07:56:34.000Z | import numpy as np
from pyqmc.slater import sherman_morrison_row
from pyqmc.slater import sherman_morrison_ms
def test_sherman_morrison():
ratio_err, inv_err = run_sherman_morrison()
assert ratio_err < 1e-13, f"ratios don't match {ratio_err}"
assert inv_err < 1e-13, f"inverses don't match {inv_err}"
ratio_err, inv_err = run_sherman_morrison(ms=True)
assert ratio_err < 1e-13, f"ratios don't match {ratio_err}"
assert inv_err < 1e-13, f"inverses don't match {inv_err}"
def construct_mat(nconf, n, ndet=None):
u, s, v = np.linalg.svd(np.random.randn(n, n))
if ndet is None:
shape = (nconf, n)
else:
shape = (nconf, ndet, n)
svals = (np.random.rand(*shape) + 1) * np.random.choice([-1, 1], shape)
matrix = np.einsum("ij,...hj,jk->...hik", u, svals, v)
return matrix
def construct_vec(matrix, nconf, n, e, ndet=None):
if ndet is None:
coef = np.random.randn(nconf, n - 1)
else:
coef = np.random.randn(nconf, ndet, n - 1)
not_e = np.arange(n) != e
vec_ = np.einsum("i...j,i...jk->i...k", coef, matrix[..., not_e, :])
proj = (np.random.random(nconf) - 1) * 2
proj += np.sign(proj) * 0.5
vec = vec_ + np.einsum("i...k,i->i...k", matrix[..., e, :], proj)
return vec
def run_sherman_morrison(ms=False):
n = 10
nconf = 4
e = 2
ndet = 8 if ms else None
# construct matrix that isn't near singular
matrix = construct_mat(nconf, n, ndet=ndet)
inv = np.linalg.inv(matrix)
# make sure new matrix isn't near singular
newmatrix = matrix.copy()
vec = construct_vec(matrix, nconf, n, e, ndet=ndet)
newmatrix[..., e, :] = vec
# compute ratios and inverses directly and by update
if ndet is None:
smratio, sminv = sherman_morrison_row(e, inv, vec)
else:
smratio, sminv = sherman_morrison_ms(e, inv, vec)
npratio = np.linalg.det(newmatrix) / np.linalg.det(matrix)
npinv = np.linalg.inv(newmatrix)
ratio_err = np.amax(np.abs(npratio - smratio))
inv_err = np.amax(np.abs(npinv - sminv))
return ratio_err, inv_err
if __name__ == "__main__":
r_err, inv_err = list(zip(*[run_sherman_morrison() for i in range(2000)]))
print(np.amax(r_err))
print(np.amax(inv_err))
counts, bins = np.histogram(np.log10(inv_err), bins=np.arange(-16, 0))
print(np.stack([counts, bins[1:]]))
| 30.628205 | 78 | 0.631226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.141063 |
8d4df1f93edc3b8bb4e583e03cb8610d1cc0439f | 1,543 | py | Python | script/licel-plotter.py | FedeVerstraeten/smn-lidar-controller | 7850fd48702d5f2e00d07b499812b3b2fb2b7676 | [
"MIT"
]
| null | null | null | script/licel-plotter.py | FedeVerstraeten/smn-lidar-controller | 7850fd48702d5f2e00d07b499812b3b2fb2b7676 | [
"MIT"
]
| 1 | 2021-10-05T03:53:55.000Z | 2021-10-05T03:53:55.000Z | script/licel-plotter.py | FedeVerstraeten/smnar-lidar-controller | 7850fd48702d5f2e00d07b499812b3b2fb2b7676 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
import sys
import socket
import time
import numpy as np
import matplotlib.pyplot as plt
HOST = '10.49.234.234'
PORT = 2055
def command_to_licel(licelcommand):
data=None
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
s.sendall(bytes(licelcommand+'\r\n','utf-8'))
time.sleep(2) # wait TCP adquisition
data = s.recv(8192) # 8192 = 4096 * 2
print("Len:",len(data),"type:",type(data))
return data
if __name__ == '__main__':
# Select TR
command_select='SELECT 0'
rsp=repr(command_to_licel(command_select))
print('Received',rsp)
# Clear memory
command_clear='MCLEAR'
rsp=repr(command_to_licel(command_clear))
print('Received',rsp)
# Start TR
command_start='MSTART'
rsp=repr(command_to_licel(command_start))
print('Received',rsp)
time.sleep(5)
# Stop TR
command_stop='MSTOP'
rsp=repr(command_to_licel(command_stop))
print('Received',rsp)
# Get data
command_data='DATA? 0 4001 LSW A'
rsp=command_to_licel(command_data)
#print('Received',rsp)
# with open('outputlicel', 'w') as f:
# f.write(rsp)
data_output=rsp
# Plot
t = np.arange(0, len(data_output), 1)
data_arr=[]
for data_byte in data_output:
data_arr.append(int(data_byte))
fig, ax = plt.subplots()
ax.plot(t, data_arr)
ax.set(xlabel='time (s)', ylabel='voltage (mV)',title='SMN LICEL')
ax.grid()
fig.savefig("test.png")
plt.show()
| 24.109375 | 70 | 0.644848 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 389 | 0.252106 |
8d500786de7e53c7c13f50132e8ecbc760d095db | 13,860 | py | Python | horizon/openstack_dashboard/dashboards/identity/account/tables.py | yianjiajia/openstack_horizon | 9e36a4c3648ef29d0df6912d990465f51d6124a6 | [
"Apache-2.0"
]
| null | null | null | horizon/openstack_dashboard/dashboards/identity/account/tables.py | yianjiajia/openstack_horizon | 9e36a4c3648ef29d0df6912d990465f51d6124a6 | [
"Apache-2.0"
]
| null | null | null | horizon/openstack_dashboard/dashboards/identity/account/tables.py | yianjiajia/openstack_horizon | 9e36a4c3648ef29d0df6912d990465f51d6124a6 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import json
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from django.conf import settings
from horizon import forms
from horizon import tables
from horizon.utils import filters
from openstack_dashboard import api
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
POLICY_CHECK = getattr(settings, "POLICY_CHECK_FUNCTION", lambda p, r: True)
class CreateAccount(tables.LinkAction):
name = "create"
verbose_name = _("Create Account")
url = "horizon:identity:account:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("identity", "identity:create_user"),)
class DeleteAccountAction(tables.DeleteAction):
help_text = _(
"This Operation will delete all configuration and resources(network, images, servers, disks, VPN, firewall, keypair) and !!! Please confirm your operation.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete User",
u"Delete Users",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted User",
u"Deleted Users",
count
)
name = "delete"
policy_rules = (("identity", "identity:update_user"),)
def allowed(self, request, user):
if not api.keystone.keystone_can_edit_user():
return False
self.enabled = True
if not user:
return False
else:
return user.enabled
def delele_billing_account(self, request, obj_id):
client = api.billing.RequestClient(request)
account = client.get_account(obj_id)
if account:
ret = client.api_request('/account/delete/' + account['account_id'],
method='DELETE')
user = json.loads(ret.read())
if user['success'] != 'success':
raise
def delete(self, request, obj_id):
LOG.info('Deleting User "%s".' % obj_id)
try:
api.keystone.user_update_enabled(request, obj_id, False)
user = api.keystone.user_get(request, obj_id)
api.keystone.tenant_update(request, user.default_project_id, enabled=False)
self.delele_billing_account(request, user.default_project_id)
# operation log
config = _('User ID: %s') % obj_id
api.logger.Logger(request).create(resource_type='account', action_name='Deletes User',
resource_name='Account', config=config,
status='Success')
except Exception:
# operation log
config = _('User ID: %s') % obj_id
api.logger.Logger(request).create(resource_type='account', action_name='Deletes User',
resource_name='Account', config=config,
status='Error')
class EnableAccountAction(tables.DeleteAction):
help_text = _(
"This Operation will enable the user and project!!! Please confirm your operation.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Enable User",
u"Enable Users",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Enabled User",
u"Enabled Users",
count
)
name = "enable"
policy_rules = (("identity", "identity:update_user"),)
def allowed(self, request, user):
if not api.keystone.keystone_can_edit_user():
return False
self.enabled = True
if not user:
return False
else:
return not user.enabled
def enable_billing_account(self, request, obj_id):
client = api.billing.RequestClient(request)
account = client.get_account(obj_id)
if account:
params = {}
params['account'] = {}
params['account']['status'] = 'normal'
params['account']['frozen_status'] = 'normal'
ret = client.api_request('/account/update/' + account['account_id'],
method='PUT', data=json.dumps(params))
user = json.loads(ret.read())
if user['success'] != 'success':
raise
def action(self, request, obj_id):
LOG.info('Enable User "%s".' % obj_id)
try:
api.keystone.user_update_enabled(request, obj_id, True)
user = api.keystone.user_get(request, obj_id)
api.keystone.tenant_update(request, user.default_project_id, enabled=True)
self.enable_billing_account(request, user.default_project_id)
# operation log
config = _('User ID: %s') % obj_id
api.logger.Logger(request).create(resource_type='account', action_name='Enables User',
resource_name='Account', config=config,
status='Success')
except Exception:
# operation log
config = _('User ID: %s') % obj_id
api.logger.Logger(request).create(resource_type='account', action_name='Enables User',
resource_name='Account', config=config,
status='Error')
class AccountFilterAction(tables.FilterAction):
name = "filter_account"
filter_type = "server"
filter_choices = (('sname', _("Name"), True),
('scompany', _("Company Name"), True),
('enabled', _("Status"), True),)
class EditAccountInfoLink(tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = "horizon:identity:account:update_info"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:update_user"),)
def allowed(self, request, datum=None):
if not datum:
return False
else:
user = api.keystone.user_get(request, datum)
return user.enabled
class AdjustQuotaLink(tables.LinkAction):
name = "update_quota"
verbose_name = _("Modify Quotas")
url = "horizon:identity:account:update_quota"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:update_project"),)
def allowed(self, request, datum=None):
# only display when the modified user have this region
region_choices = []
regions = api.keystone.list_regions_for_user(request, datum.id)
for region in regions:
region_choices.append(region['id'])
if request.user.services_region not in region_choices:
return False
if not datum:
return False
else:
user = api.keystone.user_get(request, datum)
return user.enabled
class RoleChangeLink(tables.BatchAction):
name = "adjust_quota"
classes = ('btn-danger',)
icon = "pencil"
help_text = _("Please do it carefully!")
policy_rules = (("identity", "identity:update_user"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Role Change",
u"Role Changes",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Role Changed",
u"Role Changed",
count
)
def allowed(self, request, datum=None):
policy = (("identity", "identity:create_grant"),
("identity", "identity:revoke_grant"),)
# only normal user can change their role
# only support and admin can do this action
if not datum:
return False
else:
user = api.keystone.user_get(request, datum)
if user.enabled:
default_role = api.keystone.get_default_role(request)
if user.default_role_id != default_role.id:
return False
return POLICY_CHECK(policy, request)
else:
return False
def action(self, request, obj_id):
try:
user = api.keystone.user_get(request, obj_id)
default_user_role = api.keystone.get_default_role(request)
default_project_admin_role = api.keystone.get_default_project_admin_role(request)
api.keystone.remove_tenant_user_role(request, project=user.default_project_id,
user=user.id, role=default_user_role.id)
api.keystone.user_update(request, obj_id, **{'default_role_id': default_project_admin_role.id})
api.keystone.add_tenant_user_role(request, project=user.default_project_id,
user=user.id, role=default_project_admin_role.id)
# operation log
config = _('Old role %s, new role %s') % (default_user_role.name, default_project_admin_role.name)
api.logger.Logger(request).create(resource_type='account', action_name='Role_Change',
resource_name='Account', config=config,
status='Success')
except Exception:
# operation log
config = _('Old role %s, new role %s') % (default_user_role.name, default_project_admin_role.name)
api.logger.Logger(request).create(resource_type='account', action_name='Role_Change',
resource_name='Account', config=config,
status='Error')
class ChangePasswordLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "change_password"
verbose_name = _("Change Password")
url = "horizon:identity:account:change_password"
classes = ("ajax-modal",)
icon = "key"
policy_rules = (("identity", "identity:change_password"),)
policy_target_attrs = (("user_id", "id"),)
def allowed(self, request, datum=None):
if not datum:
return False
else:
user = api.keystone.user_get(request, datum)
return user.enabled and api.keystone.keystone_can_edit_user()
class UpdateRegionsLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "regions"
verbose_name = _("Update Regions")
url = "horizon:identity:account:regions"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:update_user_regions"),)
def allowed(self, request, datum=None):
if not datum:
return False
else:
user = api.keystone.user_get(request, datum)
return user.enabled
class UpdateMembersLink(tables.LinkAction):
name = "users"
verbose_name = _("Manage Members")
url = "horizon:identity:account:update_member"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:list_users"),
("identity", "identity:list_grants"))
def allowed(self, request, datum=None):
if not datum:
return False
else:
user = api.keystone.user_get(request, datum)
return user.enabled
STATUS_DISPLAY_CHOICES = (
(False, _("Delete")),
(True, _("Normal")),
)
class AccountsTable(tables.DataTable):
id = tables.Column('id', hidden=True)
# project_id = tables.Column('project_id', hidden=True)
name = tables.Column('name',
verbose_name=_('User Name'),
form_field=forms.CharField(),
link='horizon:identity:account:detail'
)
company = tables.Column('company',
verbose_name=_('Company Name'),
form_field=forms.CharField())
# email = tables.Column('email', verbose_name=_('Email'),
# form_field=forms.CharField(required=False),
# filters=(lambda v: defaultfilters
# .default_if_none(v, ""),
# defaultfilters.escape,
# defaultfilters.urlize)
# )
enabled = tables.Column('enabled', verbose_name=_('Status'),
# status=True,
# status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES,
empty_value="False")
created_at = tables.Column('created_at',
verbose_name=_('Created_at'),
filters=[filters.parse_isotime])
class Meta(object):
name = "accounts"
verbose_name = _("AccountList")
table_actions = (AccountFilterAction, CreateAccount)
row_actions = (EditAccountInfoLink, AdjustQuotaLink, UpdateRegionsLink, UpdateMembersLink,
RoleChangeLink, ChangePasswordLink, DeleteAccountAction, EnableAccountAction)
| 37.258065 | 165 | 0.581818 | 12,695 | 0.915945 | 0 | 0 | 956 | 0.068975 | 0 | 0 | 3,529 | 0.254618 |
8d5291b6a1ce7e03aab2c5b10e8c178dc0212bb3 | 2,278 | py | Python | 3Sum.py | Muthu2093/Algorithms-practice | 999434103a9098a4361104fd39cba5913860fa9d | [
"MIT"
]
| null | null | null | 3Sum.py | Muthu2093/Algorithms-practice | 999434103a9098a4361104fd39cba5913860fa9d | [
"MIT"
]
| null | null | null | 3Sum.py | Muthu2093/Algorithms-practice | 999434103a9098a4361104fd39cba5913860fa9d | [
"MIT"
]
| null | null | null | ## Given an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
## Note:
## The solution set must not contain duplicate triplets.
## Example:
## Given array nums = [-1, 0, 1, 2, -1, -4],
## A solution set is:
## [
## [-1, 0, 1],
## [-1, -1, 2]
## ]
class Solution:
def quickSort(self, nums, l, r):
if(l<r):
pi = self.partition(nums, l, r)
self.quickSort(nums, l, pi-1)
self.quickSort(nums, pi+1, r)
def partition(self, nums, low, high):
pivot = nums[high]
j=low-1
for i in range(low, high):
if nums[i] <= pivot:
j += 1
nums[i],nums[j] = nums[j],nums[i]
nums[high],nums[j+1] = nums[j+1],nums[high]
return (j+1)
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if len(nums) <= 2:
return []
if len(nums) == 3:
if sum(nums) == 0:
lis = []
lis.append(nums)
return lis
#self.quickSort(nums, 0 , len(nums)-1)
nums.sort()
lis =[]
for m in range (1,len(nums)-1):
l=0
r=len(nums)-1
if (m+2 <= r and nums[m] == nums[m+2]):
k=m+3
while(k<=r and nums[m] != nums[k]):
k = k + 1
if k > r:
break
m=k-2
l=k-3
while (l<m and m<r):
if (nums[l] + nums[m] + nums[r] == 0):
lis.append((nums[l],nums[m],nums[r]))
while(l<r and nums[l] == nums[l+1]):
l = l+1
while(l<r and nums[r] == nums[r-1]):
r = r-1
if (nums[l] + nums[m] + nums[r] < 0):
l = l + 1
else:
r = r - 1
lis = list(set(lis))
return lis
| 27.780488 | 164 | 0.368306 | 1,893 | 0.830992 | 0 | 0 | 0 | 0 | 0 | 0 | 459 | 0.201493 |
8d52b06f889e9040ed2102aec6867ed5ea6a3b70 | 684 | py | Python | moim/models.py | gyukebox/django-tutorial-moim | ea9bea85dadf22bff58ae26ee1ac59171bbe0240 | [
"MIT"
]
| null | null | null | moim/models.py | gyukebox/django-tutorial-moim | ea9bea85dadf22bff58ae26ee1ac59171bbe0240 | [
"MIT"
]
| 4 | 2018-01-01T09:26:30.000Z | 2018-01-06T07:13:01.000Z | moim/models.py | gyukebox/django-tutorial-moim | ea9bea85dadf22bff58ae26ee1ac59171bbe0240 | [
"MIT"
]
| null | null | null | from django.db import models
from user.models import UserModel
class MoimModel(models.Model):
title = models.CharField(max_length=20)
creator = models.ForeignKey(UserModel, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
starts_at = models.DateTimeField()
max_attendee = models.PositiveIntegerField()
attendees = models.ManyToManyField(
UserModel, related_name='Attendee', blank=True)
summary = models.CharField(max_length=100)
description = models.TextField()
image = models.FileField(blank=True, upload_to='static/images')
def __str__(self):
return '{} : {}'.format(self.title, self.summary)
| 36 | 68 | 0.730994 | 618 | 0.903509 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.049708 |
8d5338ad6760bdfbd08440494b1ea9d0eab1dc53 | 1,809 | py | Python | developers_chamber/scripts/gitlab.py | dstlmrk/developers-chamber | 93f928048f57c049f1c85446d18078b73376462a | [
"MIT"
]
| 8 | 2019-08-23T15:46:30.000Z | 2021-03-23T20:12:21.000Z | developers_chamber/scripts/gitlab.py | dstlmrk/developers-chamber | 93f928048f57c049f1c85446d18078b73376462a | [
"MIT"
]
| 14 | 2019-09-17T20:24:18.000Z | 2021-05-18T21:10:12.000Z | developers_chamber/scripts/gitlab.py | dstlmrk/developers-chamber | 93f928048f57c049f1c85446d18078b73376462a | [
"MIT"
]
| 6 | 2019-08-23T15:46:21.000Z | 2022-02-18T11:01:18.000Z | import os
import click
from developers_chamber.git_utils import get_current_branch_name
from developers_chamber.gitlab_utils import \
create_merge_request as create_merge_request_func
from developers_chamber.scripts import cli
DEFAULT_API_URL = os.environ.get('GITLAB_API_URL', 'https://gitlab.com/api/v4')
DEFAULT_PROJECT = os.environ.get('GITLAB_PROJECT')
DEFAULT_TARGET_BRANCH = os.environ.get('GITLAB_TARGET_BRANCH', 'next')
DEFAULT_TOKEN = os.environ.get('GITLAB_TOKEN')
@cli.group()
def gitlab():
"""GitLab commands"""
@gitlab.command()
@click.option('--api-url', help='GitLab instance API URL (defaults to gitlab.com)', type=str, required=True,
default=DEFAULT_API_URL)
@click.option('--token', help='token (can be set as env variable GITLAB_TOKEN)', type=str, required=True,
default=DEFAULT_TOKEN)
@click.option('--source-branch', help='source Git branch', type=str)
@click.option('--target-branch', help='target Git branch (defaults to env variable GITLAB_TARGET_BRANCH)', type=str,
default=DEFAULT_TARGET_BRANCH)
@click.option('--project', help='GitLab project name (defaults to env variable GITLAB_PROJECT)', type=str,
required=True, default=DEFAULT_PROJECT)
def create_release_merge_request(api_url, token, source_branch, target_branch, project):
"""Create a new merge request in GitLab project after release"""
if not source_branch:
source_branch = get_current_branch_name()
mr_url = create_merge_request_func(
api_url=api_url,
token=token,
title=f'Merge branch "{source_branch}"',
description='',
source_branch=source_branch,
target_branch=target_branch,
project=project,
)
click.echo(f'Merge request was successfully created: {mr_url}')
| 38.489362 | 116 | 0.726368 | 0 | 0 | 0 | 0 | 1,320 | 0.729685 | 0 | 0 | 585 | 0.323383 |
8d5577a30127caeb2ef24f4e9b841abc050103d0 | 15,790 | py | Python | tests_pytest/state_machines/autoinstall/test_autoinstall_smbase.py | tessia-project/tessia | b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540 | [
"Apache-2.0"
]
| 5 | 2020-06-04T10:20:33.000Z | 2020-10-26T15:09:19.000Z | tests_pytest/state_machines/autoinstall/test_autoinstall_smbase.py | tessia-project/tessia | b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540 | [
"Apache-2.0"
]
| null | null | null | tests_pytest/state_machines/autoinstall/test_autoinstall_smbase.py | tessia-project/tessia | b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2021 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test base autoinstall machine
A smallest implementation on SmBase is used to test common features
"""
# pylint: disable=invalid-name # we have really long test names
# pylint: disable=redefined-outer-name # use of fixtures
# pylint: disable=unused-argument # use of fixtures for their side effects
#
# IMPORTS
#
from pathlib import Path
from tessia.baselib.hypervisors.hmc.volume_descriptor import FcpVolumeDescriptor
from tessia.server.config import Config
from tessia.server.state_machines.autoinstall import plat_lpar, plat_zvm, plat_kvm
from tessia.server.state_machines.autoinstall import plat_base, sm_base
from tessia.server.state_machines.autoinstall.model import AutoinstallMachineModel
from tessia.server.state_machines.autoinstall.sm_base import SmBase
from tests_pytest.decorators import tracked
from tests_pytest.state_machines.ssh_stub import SshClient
from tests_pytest.state_machines.null_hypervisor import NullHypervisor
import pytest
import yaml
#
# CONSTANTS AND DEFINITIONS
#
CREDS = {'user': 'unit', 'password': 'test'}
#
# CODE
#
class NullMachine(SmBase):
"""
Concrete SmBase implementation
This implementation helps trigger all common paths without having
any distro specifics (i.e. termination conditions or log lines)
"""
def __init__(self, model: AutoinstallMachineModel,
platform: plat_base.PlatBase, *args, **kwargs):
"""
Initialize SmBase
"""
super().__init__(model, platform, *args, **kwargs)
@property
@classmethod
def DISTRO_TYPE(cls): # pylint: disable=invalid-name
"""
Return the type of linux distribution supported.
"""
return "null"
# DISTRO_TYPE
def wait_install(self):
"""
Consider operating system installed and return immediately
"""
# wait_install()
class NullPostInstallChecker:
"""
PostInstallChecked that checks that it has been called
"""
@tracked
def verify(self):
"""
Public method to verify installed system
"""
return []
class TestModelUpdate:
"""
Test model updates during autoinstallation
"""
class UpdatingHypervisor(NullHypervisor):
"""
Hypervisor that returns some valid data about storage volumes
"""
@tracked
def query_dpm_storage_devices(self, guest_name):
"""Query storage devices on DPM"""
return [
FcpVolumeDescriptor(
**{'uri': '/api/storage-volumes/1', 'attachment': 'fcp',
'is_fulfilled': True, 'size': 19.07,
'uuid': '6005076309FFD435000000000000CD0F',
'paths': [{'device_nr': 'FC00',
'wwpn': '5005076309049435',
'lun': 'CD0F0000'}]
})]
@pytest.fixture
def scsi_volume_without_paths(self):
"""
A single-partition SCSI volume
"""
result = AutoinstallMachineModel.ZfcpVolume(
'cd0f0000', 20_000_000, multipath=True,
wwid='36005076309ffd435000000000000cd0f')
result.set_partitions('msdos', [{
'mount_point': '/data',
'size': 18_000,
'filesystem': 'ext4',
'part_type': 'primary',
'mount_opts': None,
}])
yield result
@pytest.fixture(autouse=True)
def mock_hypervisors(self, monkeypatch):
"""
Use hypevisor stub instead of real sessions
"""
monkeypatch.setattr(plat_lpar, 'HypervisorHmc',
TestModelUpdate.UpdatingHypervisor)
def test_model_update_adds_fcp_paths(
self, lpar_scsi_system, default_os_tuple, tmpdir,
scsi_volume_without_paths):
"""
Attempt to install "nothing" on an LPAR on SCSI disk
Verify that hypervisor is called with correct parameters
and post-install checker is run
"""
model = AutoinstallMachineModel(*default_os_tuple,
lpar_scsi_system, CREDS)
model.system_profile.add_volume(scsi_volume_without_paths)
checker = NullPostInstallChecker()
hyp = plat_lpar.PlatLpar.create_hypervisor(model)
platform = plat_lpar.PlatLpar(model, hyp)
# autoinstall machines use their own working directory
# and have to be initialized in a temporary environment
with tmpdir.as_cwd():
smbase = NullMachine(model, platform, checker)
smbase.start()
assert len(model.system_profile.volumes) == 2
assert model.system_profile.volumes[1].paths
@pytest.fixture(autouse=True)
def mock_config(monkeypatch, tmp_path):
"""
Set default configuration
"""
def get_config():
"""
Configuration for use in tests
"""
# use a temporary path for storing templates
return {
'auto_install': {
'url': 'http://server_1:5000/',
'dir': str(tmp_path),
'live_img_passwd': 'liveimage'
}
}
monkeypatch.setattr(Config, 'get_config', get_config)
@pytest.fixture(autouse=True)
def mock_hypervisors(monkeypatch):
"""
Use hypevisor stub instead of real sessions
"""
monkeypatch.setattr(plat_lpar, 'HypervisorHmc', NullHypervisor)
monkeypatch.setattr(plat_zvm, 'HypervisorZvm', NullHypervisor)
monkeypatch.setattr(plat_kvm, 'HypervisorKvm', NullHypervisor)
@pytest.fixture(autouse=True)
def mock_ssh(monkeypatch):
"""
Use ssh stub instead of real sessions
"""
monkeypatch.setattr(plat_base, 'SshClient', SshClient)
monkeypatch.setattr(plat_kvm, 'SshClient', SshClient)
monkeypatch.setattr(sm_base, 'SshClient', SshClient)
def test_boot_and_postinstall_check_on_lpar_dasd(
lpar_dasd_system, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on an LPAR on DASD disk
Verify that hypervisor is called with correct parameters
and post-install checker is run
"""
model = AutoinstallMachineModel(*default_os_tuple,
lpar_dasd_system, CREDS)
checker = NullPostInstallChecker()
hyp = plat_lpar.PlatLpar.create_hypervisor(model)
platform = plat_lpar.PlatLpar(model, hyp)
# autoinstall machines use their own working directory
# and have to be initialized in a temporary environment
with tmpdir.as_cwd():
smbase = NullMachine(model, platform, checker)
smbase.start()
assert checker.verify.called_once
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == lpar_dasd_system.hypervisor.boot_options['partition-name']
assert cpus == lpar_dasd_system.cpus
assert mem == lpar_dasd_system.memory
# installation device does not show up in HmcHypervisor boot,
# it is only used later during installation
assert attrs['boot_params']['boot_method'] == 'dasd'
assert attrs['boot_params']['devicenr'] == \
lpar_dasd_system.hypervisor.boot_options['boot-device']
def test_boot_and_postinstall_check_on_lpar_scsi(
lpar_scsi_system, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on an LPAR on SCSI disk
Verify that hypervisor is called with correct parameters
and post-install checker is run
"""
model = AutoinstallMachineModel(*default_os_tuple,
lpar_scsi_system, CREDS)
checker = NullPostInstallChecker()
hyp = plat_lpar.PlatLpar.create_hypervisor(model)
platform = plat_lpar.PlatLpar(model, hyp)
# autoinstall machines use their own working directory
# and have to be initialized in a temporary environment
with tmpdir.as_cwd():
smbase = NullMachine(model, platform, checker)
smbase.start()
assert checker.verify.called_once
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == lpar_scsi_system.hypervisor.boot_options['partition-name']
assert cpus == lpar_scsi_system.cpus
assert mem == lpar_scsi_system.memory
# installation device does not show up in HmcHypervisor boot,
# it is only used later during installation
assert attrs['boot_params']['boot_method'] == 'dasd'
assert attrs['boot_params']['devicenr'] == \
lpar_scsi_system.hypervisor.boot_options['boot-device']
def test_boot_and_postinstall_check_on_vm_dasd(
vm_dasd_system, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on a VM on DASD disk
Verify that hypervisor is called with correct parameters
and post-install checker is run
"""
model = AutoinstallMachineModel(*default_os_tuple,
vm_dasd_system, CREDS)
checker = NullPostInstallChecker()
hyp = plat_zvm.PlatZvm.create_hypervisor(model)
platform = plat_zvm.PlatZvm(model, hyp)
# autoinstall machines use their own working directory
# and have to be initialized in a temporary environment
with tmpdir.as_cwd():
smbase = NullMachine(model, platform, checker)
smbase.start()
assert checker.verify.called_once
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == vm_dasd_system.system_name
assert cpus == vm_dasd_system.cpus
assert mem == vm_dasd_system.memory
assert vm_dasd_system.volumes[0].device_id == \
attrs['storage_volumes'][0]['devno']
def test_boot_and_postinstall_check_on_vm_scsi(
vm_scsi_system, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on a VM on SCSI disk
Verify that hypervisor is called with correct parameters
and post-install checker is run
"""
model = AutoinstallMachineModel(*default_os_tuple,
vm_scsi_system, CREDS)
checker = NullPostInstallChecker()
hyp = plat_zvm.PlatZvm.create_hypervisor(model)
platform = plat_zvm.PlatZvm(model, hyp)
# autoinstall machines use their own working directory
# and have to be initialized in a temporary environment
with tmpdir.as_cwd():
smbase = NullMachine(model, platform, checker)
smbase.start()
assert checker.verify.called_once
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == vm_scsi_system.system_name
assert cpus == vm_scsi_system.cpus
assert mem == vm_scsi_system.memory
assert vm_scsi_system.volumes[0].lun == \
attrs['storage_volumes'][0]['lun']
def testboot_and_postinstall_check_on_kvm_scsi(
kvm_scsi_system, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on a KVM on SCSI disk
Verify correct device paths
and that hypervisor is called with correct parameters
and post-install checker is run
"""
model = AutoinstallMachineModel(*default_os_tuple,
kvm_scsi_system, CREDS)
checker = NullPostInstallChecker()
hyp = plat_kvm.PlatKvm.create_hypervisor(model)
platform = plat_kvm.PlatKvm(model, hyp)
# autoinstall machines use their own working directory
# and have to be initialized in a temporary environment
with tmpdir.as_cwd():
smbase = NullMachine(model, platform, checker)
smbase.start()
assert checker.verify.called_once
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == kvm_scsi_system.system_name
assert cpus == kvm_scsi_system.cpus
assert mem == kvm_scsi_system.memory
assert kvm_scsi_system.volumes[0].lun == \
attrs['storage_volumes'][0]['volume_id']
for volume in model.system_profile.volumes:
assert '/dev/disk/by-path/ccw' in volume.device_path
def test_network_boot_on_lpar_scsi(
scsi_volume, osa_iface, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on an LPAR on SCSI disk
using network boot
Verify that hypervisor is called with correct parameters
"""
ins_file = 'user@password:inst.local/some-os/boot.ins'
hmc_hypervisor = AutoinstallMachineModel.HmcHypervisor(
'hmc', 'hmc.local',
{'user': '', 'password': ''},
{
'partition-name': 'LP10',
'boot-method': 'network',
'boot-uri': 'ftp://' + ins_file,
})
system = AutoinstallMachineModel.SystemProfile(
'lp10', 'default',
hypervisor=hmc_hypervisor,
hostname='lp10.local',
cpus=2, memory=8192,
volumes=[scsi_volume],
interfaces=[(osa_iface, True)]
)
model = AutoinstallMachineModel(*default_os_tuple, system, CREDS)
hyp = plat_lpar.PlatLpar.create_hypervisor(model)
platform = plat_lpar.PlatLpar(model, hyp)
with tmpdir.as_cwd():
smbase = NullMachine(model, platform)
smbase.start()
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == hmc_hypervisor.boot_options['partition-name']
assert cpus == system.cpus
assert mem == system.memory
assert attrs['boot_params']['boot_method'] == 'ftp'
assert attrs['boot_params']['insfile'] == ins_file
def test_template_lpar_dasd(lpar_dasd_system, default_os_tuple, tmpdir):
"""
Test major template parameters
"""
*os_tuple, _, _ = default_os_tuple
package_repo = AutoinstallMachineModel.PackageRepository(
'aux', 'http://example.com/repo', 'package repo')
model = AutoinstallMachineModel(
*os_tuple, [], [package_repo], lpar_dasd_system, CREDS)
hyp = plat_lpar.PlatLpar.create_hypervisor(model)
platform = plat_lpar.PlatLpar(model, hyp)
with tmpdir.as_cwd():
smbase = NullMachine(model, platform)
autofile_path = (Path.cwd() / 'lp10-default')
smbase.start()
autofile = yaml.safe_load(autofile_path.read_text())
assert autofile['system']['type'] == 'LPAR'
assert autofile['system']['hostname'] == 'lp10.local'
assert autofile['gw_iface']['type'] == 'OSA'
assert autofile['gw_iface']['osname'] == 'enccw0b01'
assert autofile['gw_iface']['search_list'] == ['example.com', 'local']
assert autofile['ifaces'][0]['osname'] == 'enccw0b01'
assert autofile['volumes'][0]['type'] == 'DASD'
assert autofile['volumes'][0]['partitions'] == [
{'fs': 'ext4', 'mp': '/', 'size': '18000M'}
]
assert autofile['repos'][0]['name'] == 'os-repo'
assert autofile['repos'][1]['name'] == 'aux'
def test_template_kvm_scsi(kvm_scsi_system, default_os_tuple, tmpdir):
"""
Test major template parameters
"""
model = AutoinstallMachineModel(*default_os_tuple,
kvm_scsi_system, CREDS)
hyp = plat_kvm.PlatKvm.create_hypervisor(model)
platform = plat_kvm.PlatKvm(model, hyp)
with tmpdir.as_cwd():
smbase = NullMachine(model, platform)
autofile_path = (Path.cwd() / 'kvm54-default')
smbase.start()
autofile = yaml.safe_load(autofile_path.read_text())
assert autofile['system']['type'] == 'KVM'
assert autofile['system']['hostname'] == 'kvm54.local'
assert autofile['gw_iface']['type'] == 'MACVTAP'
assert autofile['gw_iface']['osname'] == 'eth0'
assert autofile['ifaces'][0]['is_gateway']
| 34.326087 | 82 | 0.664155 | 3,663 | 0.231982 | 499 | 0.031602 | 2,804 | 0.177581 | 0 | 0 | 5,652 | 0.357948 |
8d5578255a37005da9d4bcc07955742be9a91579 | 2,261 | py | Python | tests/test_command/test_cat_command.py | bbglab/openvariant | ea1e1b6edf0486b0dea34f43227ba333df1071cc | [
"BSD-3-Clause"
]
| null | null | null | tests/test_command/test_cat_command.py | bbglab/openvariant | ea1e1b6edf0486b0dea34f43227ba333df1071cc | [
"BSD-3-Clause"
]
| null | null | null | tests/test_command/test_cat_command.py | bbglab/openvariant | ea1e1b6edf0486b0dea34f43227ba333df1071cc | [
"BSD-3-Clause"
]
| null | null | null | import unittest
from os import getcwd
from click.testing import CliRunner
from openvariant.commands.openvar import cat
class TestCatCommand(unittest.TestCase):
def test_cat_command(self):
runner = CliRunner()
result = runner.invoke(cat, [f'{getcwd()}/tests/data/dataset'])
self.assertEqual(result.exit_code, 0)
self.assertNotEqual(result.output, None)
def test_cat_command_all_flags(self):
runner = CliRunner()
result = runner.invoke(cat, [f'{getcwd()}/tests/data/dataset', '--header', '--where', "variant == 'DEL'",
'--annotations', f'{getcwd()}/tests/data/dataset/dataset.yaml'])
self.assertEqual(result.exit_code, 0)
self.assertNotEqual(result.output, None)
def test_cat_path_no_exist_command_input(self):
runner = CliRunner()
result = runner.invoke(cat, [f'{getcwd()}/tests/data/no_exist'])
self.assertTrue(f"Error: Invalid value for '[INPUT_PATH]': Path '{getcwd()}/tests/data/no_exist' does not exist."
in result.output)
self.assertEqual(result.exit_code, 2)
def test_cat_path_command_no_exist_where_flag(self):
runner = CliRunner()
result = runner.invoke(cat, [f'{getcwd()}/tests/data/dataset', '--where', "variant=no_exist"])
self.assertEqual(result.exit_code, 1)
def test_cat_command_invalid_where(self):
runner = CliRunner()
result = runner.invoke(cat, [f'{getcwd()}/tests/data/dataset', '--where'])
self.assertEqual(result.exit_code, 2)
def test_cat_command_no_exist_annotation(self):
runner = CliRunner()
result = runner.invoke(cat, [f'{getcwd()}/tests/data/dataset', '--annotations',
f'{getcwd()}/tests/data/dataset/no_exist.yaml'])
self.assertEqual(result.exit_code, 2)
def test_cat_command_invalid_annotation(self):
runner = CliRunner()
result = runner.invoke(cat, [f'{getcwd()}/tests/data/dataset', '--annotations'])
self.assertEqual(result.exit_code, 2)
def test_cat_command_no_args(self):
runner = CliRunner()
result = runner.invoke(cat, [])
self.assertEqual(result.exit_code, 1)
| 37.065574 | 121 | 0.640867 | 2,137 | 0.945157 | 0 | 0 | 0 | 0 | 0 | 0 | 531 | 0.234852 |
8d559eab2b8075257716e7bc85f5c9d82b0d3221 | 4,766 | py | Python | resnet.py | rVSaxena/VAE | 26aa3452a0c8f663153d8cfc8bf1686e242d2fac | [
"Unlicense"
]
| null | null | null | resnet.py | rVSaxena/VAE | 26aa3452a0c8f663153d8cfc8bf1686e242d2fac | [
"Unlicense"
]
| null | null | null | resnet.py | rVSaxena/VAE | 26aa3452a0c8f663153d8cfc8bf1686e242d2fac | [
"Unlicense"
]
| null | null | null | import torch
import torch.nn as nn
class ResidualEncoderBlock(nn.Module):
"""
Implements ResidualBlock for
rectangular feature maps with input shape == output shape
OR input shape == (output shape)*2 ie in all dimensions (except the batch dimension).
In the latter case, input_shape must be even in all dimensions (expect the batch dimension)
"""
def __init__(self, in_channels, out_channels, downsample, kernel_dim=3, normalizer=nn.BatchNorm2d, **kwargs):
"""
in_channels: int
out_channels: int
downsample: Boolean
kernel_dim: int
use nn.Identity to skip normalization
"""
assert kernel_dim%2==1, "Only odd kernel dimensions supported. Received {}".format(kernel_dim)
super(ResidualEncoderBlock, self).__init__()
self.downsample=downsample
self.in_channels=in_channels
self.out_channels=out_channels
self.Normalizer1=normalizer(out_channels)
self.Normalizer2=normalizer(out_channels)
# SAK is shape adjusting kernel
if self.downsample or (in_channels!=out_channels):
if not self.downsample:
self.SAK=nn.Conv2d(in_channels, out_channels, 1, stride=1, bias=False)
else:
self.SAK=nn.Conv2d(in_channels, out_channels, 1, stride=2, bias=False)
stride=2 if self.downsample else 1
self.conv1=nn.Conv2d(in_channels, out_channels, kernel_dim, stride=stride, padding=int((kernel_dim-1)/2))
self.conv2=nn.Conv2d(out_channels, out_channels, kernel_dim, padding=int((kernel_dim-1)/2)) # this one maintains shape. So stride 1 and padd=(k-1)/2 work
self.activation1=nn.ReLU(inplace=True)
self.activation2=nn.ReLU(inplace=True)
return
def forward(self, x):
"""
The shape of x must be accd to (n,c,h,w)
"""
# compute the first block
out=self.conv1(x)
out=self.Normalizer1(out)
out=self.activation1(out)
# ready the input for addition
if self.downsample or (self.in_channels!=self.out_channels):
x=self.SAK(x)
# compute the output
out=self.conv2(out)
out=self.Normalizer2(out)+x
out=self.activation2(out)
return out
class ResidualDecoderBlock(nn.Module):
"""
Implements ResidualBlock for
rectangular feature maps with input shape == output shape
OR 2*input shape == output shape ie in all dimensions (except the batch dimension).
In the latter case, input_shape must be even in all dimensions (expect the batch dimension)
"""
def __init__(self, in_channels, out_channels, upsample, kernel_dim=3, normalizer=nn.BatchNorm2d, **kwargs):
"""
in_channels: int
out_channels: int
upsample: Boolean
kernel_dim: int
use nn.Identity to skip normalization
"""
assert kernel_dim%2==1, "Only odd dimension supported, got {}".format(kernel_dim)
super(ResidualDecoderBlock, self).__init__()
self.upsample=upsample
self.in_channels=in_channels
self.out_channels=out_channels
self.Normalizer1=normalizer(out_channels)
self.Normalizer2=normalizer(out_channels)
# SAK is shape adjusting kernel
if self.upsample or (in_channels!=out_channels):
if not self.upsample:
self.SAK=nn.ConvTranspose2d(in_channels, out_channels, 1, stride=1, bias=False, padding=0)
else:
self.SAK=nn.ConvTranspose2d(in_channels, out_channels, 1, stride=2, bias=False, padding=0, output_padding=1)
stride=2 if self.upsample else 1
output_pad=1 if self.upsample else 0
self.conv1=nn.ConvTranspose2d(in_channels, out_channels, kernel_dim, stride=stride, padding=int((kernel_dim-1)/2), output_padding=output_pad)
self.conv2=nn.ConvTranspose2d(out_channels, out_channels, kernel_dim, padding=int((kernel_dim-1)/2)) # this one maintains shape. So stride 1 and padd=(k-2)/2 work
self.activation1=nn.ReLU(inplace=True)
self.activation2=nn.ReLU(inplace=True)
return
def forward(self, x):
"""
The shape of x must be accd to (n,c,h,w)
"""
# compute the first block
out=self.conv1(x)
out=self.Normalizer1(out)
out=self.activation1(out)
# ready the input for addition
if self.upsample or (self.in_channels!=self.out_channels):
x=self.SAK(x)
# compute the output
out=self.conv2(out)
out=self.Normalizer2(out)+x
out=self.activation2(out)
return out
| 34.042857 | 170 | 0.640579 | 4,724 | 0.991188 | 0 | 0 | 0 | 0 | 0 | 0 | 1,479 | 0.310323 |
8d56bf9a638e31e26421d0d5ccd052c3c7de5f95 | 246 | py | Python | camknows/camknows.py | dreoporto/camknows | 769aeb91ff16ff654aa1b182f3564dd26a0f7ad6 | [
"MIT"
]
| 2 | 2021-09-20T12:29:57.000Z | 2021-09-28T11:09:06.000Z | camknows/camknows.py | dreoporto/camknows | 769aeb91ff16ff654aa1b182f3564dd26a0f7ad6 | [
"MIT"
]
| null | null | null | camknows/camknows.py | dreoporto/camknows | 769aeb91ff16ff654aa1b182f3564dd26a0f7ad6 | [
"MIT"
]
| null | null | null |
from camera import Camera
def main() -> None:
try:
camera = Camera()
camera.start_camera_loop()
except KeyboardInterrupt:
print('Application closed (KeyboardInterrupt)')
if __name__ == '__main__':
main()
| 15.375 | 55 | 0.630081 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.203252 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.