code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
<|reserved_special_token_0|>
def get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):
"""
Получение информации со страницы с видео по video id
"""
YOUTUBE_URI = (
'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&'
+
'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'
)
format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY, videoId=
videoId, maxResults=maxResults, nextPageToken=nextPageToken)
content = requests.get(format_youtube_uri).text
data = json.loads(content)
return data
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):
"""
Получение информации со страницы с видео по video id
"""
YOUTUBE_URI = (
'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&'
+
'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'
)
format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY, videoId=
videoId, maxResults=maxResults, nextPageToken=nextPageToken)
content = requests.get(format_youtube_uri).text
data = json.loads(content)
return data
<|reserved_special_token_0|>
def get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30,
maxResults=10, nextPageToken=''):
"""
Выгрузка maxResults комментариев
"""
api = Api(api_key=YOUTUBE_API_KEY)
video_by_keywords = api.search_by_keywords(q=query, search_type=[
'video'], count=count_video, limit=limit)
videoId = [x.id.videoId for x in video_by_keywords.items]
comments_all = []
for id_video in videoId:
try:
data = get_data(YOUTUBE_API_KEY, id_video, maxResults=
maxResults, nextPageToken=nextPageToken)
comment = list(get_text_of_comment(data))
comments_all.append(comment)
except:
continue
comments = sum(comments_all, [])
return comments
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):
"""
Получение информации со страницы с видео по video id
"""
YOUTUBE_URI = (
'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&'
+
'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'
)
format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY, videoId=
videoId, maxResults=maxResults, nextPageToken=nextPageToken)
content = requests.get(format_youtube_uri).text
data = json.loads(content)
return data
def get_text_of_comment(data):
"""
Получение комментариев из полученных данных под одним видео
"""
comms = set()
for item in data['items']:
comm = item['snippet']['topLevelComment']['snippet']['textDisplay']
comms.add(comm)
return comms
def get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30,
maxResults=10, nextPageToken=''):
"""
Выгрузка maxResults комментариев
"""
api = Api(api_key=YOUTUBE_API_KEY)
video_by_keywords = api.search_by_keywords(q=query, search_type=[
'video'], count=count_video, limit=limit)
videoId = [x.id.videoId for x in video_by_keywords.items]
comments_all = []
for id_video in videoId:
try:
data = get_data(YOUTUBE_API_KEY, id_video, maxResults=
maxResults, nextPageToken=nextPageToken)
comment = list(get_text_of_comment(data))
comments_all.append(comment)
except:
continue
comments = sum(comments_all, [])
return comments
<|reserved_special_token_1|>
import json
import requests
from pyyoutube import Api
def get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):
"""
Получение информации со страницы с видео по video id
"""
YOUTUBE_URI = (
'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&'
+
'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'
)
format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY, videoId=
videoId, maxResults=maxResults, nextPageToken=nextPageToken)
content = requests.get(format_youtube_uri).text
data = json.loads(content)
return data
def get_text_of_comment(data):
"""
Получение комментариев из полученных данных под одним видео
"""
comms = set()
for item in data['items']:
comm = item['snippet']['topLevelComment']['snippet']['textDisplay']
comms.add(comm)
return comms
def get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30,
maxResults=10, nextPageToken=''):
"""
Выгрузка maxResults комментариев
"""
api = Api(api_key=YOUTUBE_API_KEY)
video_by_keywords = api.search_by_keywords(q=query, search_type=[
'video'], count=count_video, limit=limit)
videoId = [x.id.videoId for x in video_by_keywords.items]
comments_all = []
for id_video in videoId:
try:
data = get_data(YOUTUBE_API_KEY, id_video, maxResults=
maxResults, nextPageToken=nextPageToken)
comment = list(get_text_of_comment(data))
comments_all.append(comment)
except:
continue
comments = sum(comments_all, [])
return comments
<|reserved_special_token_1|>
import json
import requests
from pyyoutube import Api
def get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):
"""
Получение информации со страницы с видео по video id
"""
YOUTUBE_URI = 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&' + \
'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'
format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY,
videoId=videoId,
maxResults=maxResults,
nextPageToken=nextPageToken)
content = requests.get(format_youtube_uri).text
data = json.loads(content)
return data
def get_text_of_comment(data):
"""
Получение комментариев из полученных данных под одним видео
"""
comms = set()
for item in data['items']:
comm = item['snippet']['topLevelComment']['snippet']['textDisplay']
comms.add(comm)
return comms
def get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30, maxResults=10, nextPageToken=''):
"""
Выгрузка maxResults комментариев
"""
api = Api(api_key=YOUTUBE_API_KEY)
video_by_keywords = api.search_by_keywords(q=query,
search_type=["video"],
count=count_video,
limit=limit)
videoId = [x.id.videoId for x in video_by_keywords.items]
comments_all = []
for id_video in videoId:
try:
data = get_data(YOUTUBE_API_KEY,
id_video,
maxResults=maxResults,
nextPageToken=nextPageToken)
comment = list(get_text_of_comment(data))
comments_all.append(comment)
except:
continue
comments = sum(comments_all, [])
return comments
|
flexible
|
{
"blob_id": "4ed5ceb784fb1e3046ab9f10c4b556f2e94274db",
"index": 7054,
"step-1": "<mask token>\n\n\ndef get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):\n \"\"\"\n Получение информации со страницы с видео по video id\n \"\"\"\n YOUTUBE_URI = (\n 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&'\n +\n 'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'\n )\n format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY, videoId=\n videoId, maxResults=maxResults, nextPageToken=nextPageToken)\n content = requests.get(format_youtube_uri).text\n data = json.loads(content)\n return data\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):\n \"\"\"\n Получение информации со страницы с видео по video id\n \"\"\"\n YOUTUBE_URI = (\n 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&'\n +\n 'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'\n )\n format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY, videoId=\n videoId, maxResults=maxResults, nextPageToken=nextPageToken)\n content = requests.get(format_youtube_uri).text\n data = json.loads(content)\n return data\n\n\n<mask token>\n\n\ndef get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30,\n maxResults=10, nextPageToken=''):\n \"\"\"\n Выгрузка maxResults комментариев\n \"\"\"\n api = Api(api_key=YOUTUBE_API_KEY)\n video_by_keywords = api.search_by_keywords(q=query, search_type=[\n 'video'], count=count_video, limit=limit)\n videoId = [x.id.videoId for x in video_by_keywords.items]\n comments_all = []\n for id_video in videoId:\n try:\n data = get_data(YOUTUBE_API_KEY, id_video, maxResults=\n maxResults, nextPageToken=nextPageToken)\n comment = list(get_text_of_comment(data))\n comments_all.append(comment)\n except:\n continue\n comments = sum(comments_all, [])\n return comments\n",
"step-3": "<mask token>\n\n\ndef get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):\n \"\"\"\n Получение информации со страницы с видео по video id\n \"\"\"\n YOUTUBE_URI = (\n 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&'\n +\n 'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'\n )\n format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY, videoId=\n videoId, maxResults=maxResults, nextPageToken=nextPageToken)\n content = requests.get(format_youtube_uri).text\n data = json.loads(content)\n return data\n\n\ndef get_text_of_comment(data):\n \"\"\"\n Получение комментариев из полученных данных под одним видео\n \"\"\"\n comms = set()\n for item in data['items']:\n comm = item['snippet']['topLevelComment']['snippet']['textDisplay']\n comms.add(comm)\n return comms\n\n\ndef get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30,\n maxResults=10, nextPageToken=''):\n \"\"\"\n Выгрузка maxResults комментариев\n \"\"\"\n api = Api(api_key=YOUTUBE_API_KEY)\n video_by_keywords = api.search_by_keywords(q=query, search_type=[\n 'video'], count=count_video, limit=limit)\n videoId = [x.id.videoId for x in video_by_keywords.items]\n comments_all = []\n for id_video in videoId:\n try:\n data = get_data(YOUTUBE_API_KEY, id_video, maxResults=\n maxResults, nextPageToken=nextPageToken)\n comment = list(get_text_of_comment(data))\n comments_all.append(comment)\n except:\n continue\n comments = sum(comments_all, [])\n return comments\n",
"step-4": "import json\nimport requests\nfrom pyyoutube import Api\n\n\ndef get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):\n \"\"\"\n Получение информации со страницы с видео по video id\n \"\"\"\n YOUTUBE_URI = (\n 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&'\n +\n 'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'\n )\n format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY, videoId=\n videoId, maxResults=maxResults, nextPageToken=nextPageToken)\n content = requests.get(format_youtube_uri).text\n data = json.loads(content)\n return data\n\n\ndef get_text_of_comment(data):\n \"\"\"\n Получение комментариев из полученных данных под одним видео\n \"\"\"\n comms = set()\n for item in data['items']:\n comm = item['snippet']['topLevelComment']['snippet']['textDisplay']\n comms.add(comm)\n return comms\n\n\ndef get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30,\n maxResults=10, nextPageToken=''):\n \"\"\"\n Выгрузка maxResults комментариев\n \"\"\"\n api = Api(api_key=YOUTUBE_API_KEY)\n video_by_keywords = api.search_by_keywords(q=query, search_type=[\n 'video'], count=count_video, limit=limit)\n videoId = [x.id.videoId for x in video_by_keywords.items]\n comments_all = []\n for id_video in videoId:\n try:\n data = get_data(YOUTUBE_API_KEY, id_video, maxResults=\n maxResults, nextPageToken=nextPageToken)\n comment = list(get_text_of_comment(data))\n comments_all.append(comment)\n except:\n continue\n comments = sum(comments_all, [])\n return comments\n",
"step-5": "import json\n\nimport requests\nfrom pyyoutube import Api\n\n\ndef get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):\n \"\"\"\n Получение информации со страницы с видео по video id\n \"\"\"\n YOUTUBE_URI = 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&' + \\\n 'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'\n format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY,\n videoId=videoId,\n maxResults=maxResults,\n nextPageToken=nextPageToken)\n content = requests.get(format_youtube_uri).text\n data = json.loads(content)\n return data\n\n\ndef get_text_of_comment(data):\n \"\"\"\n Получение комментариев из полученных данных под одним видео\n \"\"\"\n comms = set()\n for item in data['items']:\n comm = item['snippet']['topLevelComment']['snippet']['textDisplay']\n comms.add(comm)\n return comms\n\n\ndef get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30, maxResults=10, nextPageToken=''):\n \"\"\"\n Выгрузка maxResults комментариев\n \"\"\"\n api = Api(api_key=YOUTUBE_API_KEY)\n video_by_keywords = api.search_by_keywords(q=query,\n search_type=[\"video\"],\n count=count_video,\n limit=limit)\n videoId = [x.id.videoId for x in video_by_keywords.items]\n\n comments_all = []\n for id_video in videoId:\n try:\n data = get_data(YOUTUBE_API_KEY,\n id_video,\n maxResults=maxResults,\n nextPageToken=nextPageToken)\n comment = list(get_text_of_comment(data))\n comments_all.append(comment)\n except:\n continue\n comments = sum(comments_all, [])\n return comments\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class FlaskConfig(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FlaskConfig(object):
SECRET_KEY = os.environ.get('FLASK_SECRET_KEY') or 'TuLAsWbcoKr5YhDE'
BOOTSTRAP_SERVE_LOCAL = os.environ.get('FLASK_BOOTSTRAP_SERVE_LOCAL'
) or True
APPLICATION_ROOT = os.environ.get('FLASK_APPLICATION_ROOT') or ''
<|reserved_special_token_1|>
<|reserved_special_token_0|>
basedir = os.path.abspath(os.path.dirname(__file__))
class FlaskConfig(object):
SECRET_KEY = os.environ.get('FLASK_SECRET_KEY') or 'TuLAsWbcoKr5YhDE'
BOOTSTRAP_SERVE_LOCAL = os.environ.get('FLASK_BOOTSTRAP_SERVE_LOCAL'
) or True
APPLICATION_ROOT = os.environ.get('FLASK_APPLICATION_ROOT') or ''
<|reserved_special_token_1|>
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class FlaskConfig(object):
SECRET_KEY = os.environ.get('FLASK_SECRET_KEY') or 'TuLAsWbcoKr5YhDE'
BOOTSTRAP_SERVE_LOCAL = os.environ.get('FLASK_BOOTSTRAP_SERVE_LOCAL'
) or True
APPLICATION_ROOT = os.environ.get('FLASK_APPLICATION_ROOT') or ''
<|reserved_special_token_1|>
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class FlaskConfig(object):
SECRET_KEY = os.environ.get('FLASK_SECRET_KEY') or 'TuLAsWbcoKr5YhDE'
BOOTSTRAP_SERVE_LOCAL = os.environ.get('FLASK_BOOTSTRAP_SERVE_LOCAL') or True
APPLICATION_ROOT = os.environ.get('FLASK_APPLICATION_ROOT') or ''
# SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
# 'sqlite:///' + os.path.join(basedir, 'app.db')
# SQLALCHEMY_TRACK_MODIFICATIONS = False
|
flexible
|
{
"blob_id": "a0349abb3a56ff4bc1700dbf0fa5a1fc2e3453ce",
"index": 6469,
"step-1": "<mask token>\n\n\nclass FlaskConfig(object):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FlaskConfig(object):\n SECRET_KEY = os.environ.get('FLASK_SECRET_KEY') or 'TuLAsWbcoKr5YhDE'\n BOOTSTRAP_SERVE_LOCAL = os.environ.get('FLASK_BOOTSTRAP_SERVE_LOCAL'\n ) or True\n APPLICATION_ROOT = os.environ.get('FLASK_APPLICATION_ROOT') or ''\n",
"step-3": "<mask token>\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n\nclass FlaskConfig(object):\n SECRET_KEY = os.environ.get('FLASK_SECRET_KEY') or 'TuLAsWbcoKr5YhDE'\n BOOTSTRAP_SERVE_LOCAL = os.environ.get('FLASK_BOOTSTRAP_SERVE_LOCAL'\n ) or True\n APPLICATION_ROOT = os.environ.get('FLASK_APPLICATION_ROOT') or ''\n",
"step-4": "import os\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n\nclass FlaskConfig(object):\n SECRET_KEY = os.environ.get('FLASK_SECRET_KEY') or 'TuLAsWbcoKr5YhDE'\n BOOTSTRAP_SERVE_LOCAL = os.environ.get('FLASK_BOOTSTRAP_SERVE_LOCAL'\n ) or True\n APPLICATION_ROOT = os.environ.get('FLASK_APPLICATION_ROOT') or ''\n",
"step-5": "import os\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nclass FlaskConfig(object):\n SECRET_KEY = os.environ.get('FLASK_SECRET_KEY') or 'TuLAsWbcoKr5YhDE'\n BOOTSTRAP_SERVE_LOCAL = os.environ.get('FLASK_BOOTSTRAP_SERVE_LOCAL') or True\n APPLICATION_ROOT = os.environ.get('FLASK_APPLICATION_ROOT') or ''\n # SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \\\n # 'sqlite:///' + os.path.join(basedir, 'app.db')\n # SQLALCHEMY_TRACK_MODIFICATIONS = False\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, pickle, json, ast
import pandas as pd
from scipy import spatial
import numpy as np
from scipy import stats
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.svm import LinearSVC
from sklearn.metrics import precision_score, recall_score, f1_score
from datetime import date
from sklearn.neural_network import MLPClassifier
class TaskSolver:
W2V_DICT = dict()
def __init__(self):
pass
def solve(self, task_name, **kwargs):
self.gen_w2v_dict()
if task_name == 'k-nearest-words':
self.task_k_nearest_words(kwargs.get('k'), kwargs.get('word'))
elif task_name == 'synonym-antonym-classification':
self.task_synonym_antonym_classification()
elif task_name == 'test-cosin-similarity-with-visim-400-dataset':
self.test_with_visim_400_data_set()
def task_calculate_cosin_similarity(self, word1, word2, print_to_screen=True):
sim = 0
if word1 in self.W2V_DICT and word2 in self.W2V_DICT:
sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self.W2V_DICT[word2])) / 2
if (print_to_screen): print("Độ tương đồng giữa '{}' và '{}' là: {}".format(word1, word2, sim))
return sim
def test_with_visim_400_data_set(self):
visim_400_df = pd.read_csv(
os.path.abspath('./Word-Similarity/datasets/ViSim-400/Visim-400.txt'),
sep="\t")
rs, sim1_arr, sim2_arr = [], [], []
for index, row in visim_400_df.iterrows():
word_1, word_2 = row['Word1'], row['Word2']
sim_1, sim_2 = row['Sim1'], row['Sim2']
if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT:
sim = self.task_calculate_cosin_similarity(word_1, word_2, True)
rs.append(sim)
sim1_arr.append(sim_1)
sim2_arr.append(sim_2)
print("Hệ số tương đồng Pearson là: ", stats.pearsonr(rs, sim1_arr))
print("Hệ số tương đồng Spearman là: ", stats.spearmanr(rs, sim1_arr))
def task_k_nearest_words(self, k, word):
k = int(k)
if word not in self.W2V_DICT:
print("Word '{}' not in vocab".format(word))
return
sims = []
for key in self.W2V_DICT:
if key != word:
sims.append({
'key': key,
'sim': self.task_calculate_cosin_similarity(key, word, False)
})
k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0: (k - 1)]
print("{} từ tương đồng nhất với từ '{}' là:".format(k, word))
for w in k_list:
print("Từ {} có độ tương đồng là {}".format(w.get('key'), w.get('sim')))
return k_list
def task_synonym_antonym_classification(self):
self.prepare_data()
self.train_synonym_antonym_classification()
self.test_synonym_antonym_classification()
def test_synonym_antonym_classification(self):
clf = pickle.load(open('./main/model/svm.model', 'rb'))
X_test, Y_test = [], []
for file in [
'./Word-Similarity/datasets/ViCon-400/400_noun_pairs.txt',
'./Word-Similarity/datasets/ViCon-400/400_verb_pairs.txt',
'./Word-Similarity/datasets/ViCon-400/600_adj_pairs.txt'
]:
f = open(file, 'r', encoding="utf8")
for index, line in enumerate(f):
line_arr = line.split()
if index == 0: continue
word1, word2, relation = line_arr[0], line_arr[1], line_arr[2]
if word1 in self.W2V_DICT and word2 in self.W2V_DICT:
vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)
X_test.append(vec)
if relation == 'SYN': Y_test.append(1)
elif relation == 'ANT': Y_test.append(-1)
X_test = X_test
pred = clf.predict(X_test)
print("Test date: {}".format(date.today()))
print("Precision: {}".format(precision_score(Y_test, pred)))
print("Recall: {}".format(recall_score(Y_test, pred)))
print("F1: {}".format(f1_score(Y_test, pred)))
log = """
Test date: {}
Precision: {}
Recall: {}
F1: {}
\n
----------------------------------------
""".format(
date.today(),
precision_score(Y_test, pred),
recall_score(Y_test, pred),
f1_score(Y_test, pred))
log_f = open('./main/log', 'a+')
log_f.write(log)
log_f.close()
def gen_vec_for_synonym_antonym_pair(self, word1, word2):
np_vec1, np_vec2 = np.array(self.W2V_DICT[word1]), np.array(self.W2V_DICT[word2])
return np.concatenate((
np_vec1,
np_vec2,
np_vec1 + np_vec2,
np_vec1 * np_vec2,
np.absolute(np_vec1 - np_vec2),
# np.array([self.task_calculate_cosin_similarity(word1, word2, False)])
), axis=0)
def train_synonym_antonym_classification(self):
X_train, Y_train = pickle.load(open('./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+'))
unique, counts = np.unique(Y_train, return_counts=True)
label_count = dict(zip(unique, counts))
clf = MLPClassifier()
clf.fit(X_train, Y_train)
pickle.dump(clf, open('./main/model/svm.model', 'wb+'))
return clf
def prepare_data(self):
X, Y = [], []
for file in [
'./Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt',
'./Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt'
]:
f = open(file, 'r', encoding="utf8")
for index, line in enumerate(f):
line_arr = line.split()
if len(line_arr) < 2: continue
word1, word2 = line_arr[0], line_arr[1]
if word1 in self.W2V_DICT and word2 in self.W2V_DICT:
vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)
X.append(vec)
if os.path.basename(f.name) == 'Antonym_vietnamese.txt': Y.append(-1)
else: Y.append(1)
X, Y = np.array(X), np.array(Y)
pickle.dump(
( X.astype(np.float64), Y ),
open('./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+')
)
def gen_w2v_dict(self):
with open('./main/dataset/w2v/w2v-dict.json', 'w+') as f:
if f.read(1):
f.seek(0)
self.W2V_DICT = json.load(f)
if not self.W2V_DICT:
with open('./Word-Similarity/word2vec/W2V_150.txt', 'r', encoding="utf8") as f:
for index, line in enumerate(f):
line_arr = line.split()
if index > 1:
self.W2V_DICT.update({line_arr[0]: np.array(line_arr[1:]).astype(float).tolist()})
f = open("./main/dataset/w2v/w2v-dict.json","w+")
f.write(json.dumps(self.W2V_DICT))
f.close()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Helper")
parser.add_argument(
"--task",
required=True,
metavar="path",
help="""
Task name:
0 => Cosin Similarity
1 => Test Cosine Similarity with Visim-400 dataset
2 => K Nearest Words
3 => Synonym Antonym Classification
""",
)
parser.add_argument(
"--word",
metavar="path",
help="Target word used in 'K Nearest Words' task",
)
parser.add_argument(
"--k",
metavar="path",
help="Number of 'Nearest Words' used in 'K Nearest Words' task",
)
parser.add_argument(
"--word1",
metavar="path",
help="Source word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task",
)
parser.add_argument(
"--word2",
metavar="path",
help="Target word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task",
)
args = parser.parse_args()
task = args.task
k = args.k
word = args.word
word1 = args.word1
word2 = args.word2
switcher = {
'0': 'calculate-cosin-similarity',
'1': 'test-cosin-similarity-with-visim-400-dataset',
'2': 'k-nearest-words',
'3': 'synonym-antonym-classification',
'4': 'predict-synonym-antonym'
}
task_name = switcher.get(task, "Invalid task")
task_solver = TaskSolver()
task_solver.solve(
task_name,
k=k,
word=word,
word1=word1,
word2=word2
)
|
normal
|
{
"blob_id": "c23bd136991bfb41f153321420c2fcfba0c843f4",
"index": 1513,
"step-1": "<mask token>\n\n\nclass TaskSolver:\n <mask token>\n <mask token>\n <mask token>\n\n def task_calculate_cosin_similarity(self, word1, word2, print_to_screen\n =True):\n sim = 0\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self.\n W2V_DICT[word2])) / 2\n if print_to_screen:\n print(\"Độ tương đồng giữa '{}' và '{}' là: {}\".format(word1,\n word2, sim))\n return sim\n\n def test_with_visim_400_data_set(self):\n visim_400_df = pd.read_csv(os.path.abspath(\n './Word-Similarity/datasets/ViSim-400/Visim-400.txt'), sep='\\t')\n rs, sim1_arr, sim2_arr = [], [], []\n for index, row in visim_400_df.iterrows():\n word_1, word_2 = row['Word1'], row['Word2']\n sim_1, sim_2 = row['Sim1'], row['Sim2']\n if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT:\n sim = self.task_calculate_cosin_similarity(word_1, word_2, True\n )\n rs.append(sim)\n sim1_arr.append(sim_1)\n sim2_arr.append(sim_2)\n print('Hệ số tương đồng Pearson là: ', stats.pearsonr(rs, sim1_arr))\n print('Hệ số tương đồng Spearman là: ', stats.spearmanr(rs, sim1_arr))\n\n def task_k_nearest_words(self, k, word):\n k = int(k)\n if word not in self.W2V_DICT:\n print(\"Word '{}' not in vocab\".format(word))\n return\n sims = []\n for key in self.W2V_DICT:\n if key != word:\n sims.append({'key': key, 'sim': self.\n task_calculate_cosin_similarity(key, word, False)})\n k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0:k - 1]\n print(\"{} từ tương đồng nhất với từ '{}' là:\".format(k, word))\n for w in k_list:\n print('Từ {} có độ tương đồng là {}'.format(w.get('key'), w.get\n ('sim')))\n return k_list\n\n def task_synonym_antonym_classification(self):\n self.prepare_data()\n self.train_synonym_antonym_classification()\n self.test_synonym_antonym_classification()\n <mask token>\n <mask token>\n\n def train_synonym_antonym_classification(self):\n X_train, Y_train = pickle.load(open(\n './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+'))\n unique, counts = np.unique(Y_train, return_counts=True)\n label_count = dict(zip(unique, counts))\n clf = MLPClassifier()\n clf.fit(X_train, Y_train)\n pickle.dump(clf, open('./main/model/svm.model', 'wb+'))\n return clf\n\n def prepare_data(self):\n X, Y = [], []\n for file in [\n './Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt',\n './Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt']:\n f = open(file, 'r', encoding='utf8')\n for index, line in enumerate(f):\n line_arr = line.split()\n if len(line_arr) < 2:\n continue\n word1, word2 = line_arr[0], line_arr[1]\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n X.append(vec)\n if os.path.basename(f.name) == 'Antonym_vietnamese.txt':\n Y.append(-1)\n else:\n Y.append(1)\n X, Y = np.array(X), np.array(Y)\n pickle.dump((X.astype(np.float64), Y), open(\n './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+'))\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TaskSolver:\n <mask token>\n\n def __init__(self):\n pass\n <mask token>\n\n def task_calculate_cosin_similarity(self, word1, word2, print_to_screen\n =True):\n sim = 0\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self.\n W2V_DICT[word2])) / 2\n if print_to_screen:\n print(\"Độ tương đồng giữa '{}' và '{}' là: {}\".format(word1,\n word2, sim))\n return sim\n\n def test_with_visim_400_data_set(self):\n visim_400_df = pd.read_csv(os.path.abspath(\n './Word-Similarity/datasets/ViSim-400/Visim-400.txt'), sep='\\t')\n rs, sim1_arr, sim2_arr = [], [], []\n for index, row in visim_400_df.iterrows():\n word_1, word_2 = row['Word1'], row['Word2']\n sim_1, sim_2 = row['Sim1'], row['Sim2']\n if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT:\n sim = self.task_calculate_cosin_similarity(word_1, word_2, True\n )\n rs.append(sim)\n sim1_arr.append(sim_1)\n sim2_arr.append(sim_2)\n print('Hệ số tương đồng Pearson là: ', stats.pearsonr(rs, sim1_arr))\n print('Hệ số tương đồng Spearman là: ', stats.spearmanr(rs, sim1_arr))\n\n def task_k_nearest_words(self, k, word):\n k = int(k)\n if word not in self.W2V_DICT:\n print(\"Word '{}' not in vocab\".format(word))\n return\n sims = []\n for key in self.W2V_DICT:\n if key != word:\n sims.append({'key': key, 'sim': self.\n task_calculate_cosin_similarity(key, word, False)})\n k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0:k - 1]\n print(\"{} từ tương đồng nhất với từ '{}' là:\".format(k, word))\n for w in k_list:\n print('Từ {} có độ tương đồng là {}'.format(w.get('key'), w.get\n ('sim')))\n return k_list\n\n def task_synonym_antonym_classification(self):\n self.prepare_data()\n self.train_synonym_antonym_classification()\n self.test_synonym_antonym_classification()\n\n def test_synonym_antonym_classification(self):\n clf = pickle.load(open('./main/model/svm.model', 'rb'))\n X_test, Y_test = [], []\n for file in ['./Word-Similarity/datasets/ViCon-400/400_noun_pairs.txt',\n './Word-Similarity/datasets/ViCon-400/400_verb_pairs.txt',\n './Word-Similarity/datasets/ViCon-400/600_adj_pairs.txt']:\n f = open(file, 'r', encoding='utf8')\n for index, line in enumerate(f):\n line_arr = line.split()\n if index == 0:\n continue\n word1, word2, relation = line_arr[0], line_arr[1], line_arr[2]\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n X_test.append(vec)\n if relation == 'SYN':\n Y_test.append(1)\n elif relation == 'ANT':\n Y_test.append(-1)\n X_test = X_test\n pred = clf.predict(X_test)\n print('Test date: {}'.format(date.today()))\n print('Precision: {}'.format(precision_score(Y_test, pred)))\n print('Recall: {}'.format(recall_score(Y_test, pred)))\n print('F1: {}'.format(f1_score(Y_test, pred)))\n log = (\n \"\"\"\n Test date: {}\n Precision: {}\n Recall: {}\n F1: {}\n \n\n ----------------------------------------\n \"\"\"\n .format(date.today(), precision_score(Y_test, pred),\n recall_score(Y_test, pred), f1_score(Y_test, pred)))\n log_f = open('./main/log', 'a+')\n log_f.write(log)\n log_f.close()\n\n def gen_vec_for_synonym_antonym_pair(self, word1, word2):\n np_vec1, np_vec2 = np.array(self.W2V_DICT[word1]), np.array(self.\n W2V_DICT[word2])\n return np.concatenate((np_vec1, np_vec2, np_vec1 + np_vec2, np_vec1 *\n np_vec2, np.absolute(np_vec1 - np_vec2)), axis=0)\n\n def train_synonym_antonym_classification(self):\n X_train, Y_train = pickle.load(open(\n './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+'))\n unique, counts = np.unique(Y_train, return_counts=True)\n label_count = dict(zip(unique, counts))\n clf = MLPClassifier()\n clf.fit(X_train, Y_train)\n pickle.dump(clf, open('./main/model/svm.model', 'wb+'))\n return clf\n\n def prepare_data(self):\n X, Y = [], []\n for file in [\n './Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt',\n './Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt']:\n f = open(file, 'r', encoding='utf8')\n for index, line in enumerate(f):\n line_arr = line.split()\n if len(line_arr) < 2:\n continue\n word1, word2 = line_arr[0], line_arr[1]\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n X.append(vec)\n if os.path.basename(f.name) == 'Antonym_vietnamese.txt':\n Y.append(-1)\n else:\n Y.append(1)\n X, Y = np.array(X), np.array(Y)\n pickle.dump((X.astype(np.float64), Y), open(\n './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+'))\n\n def gen_w2v_dict(self):\n with open('./main/dataset/w2v/w2v-dict.json', 'w+') as f:\n if f.read(1):\n f.seek(0)\n self.W2V_DICT = json.load(f)\n if not self.W2V_DICT:\n with open('./Word-Similarity/word2vec/W2V_150.txt', 'r',\n encoding='utf8') as f:\n for index, line in enumerate(f):\n line_arr = line.split()\n if index > 1:\n self.W2V_DICT.update({line_arr[0]: np.array(\n line_arr[1:]).astype(float).tolist()})\n f = open('./main/dataset/w2v/w2v-dict.json', 'w+')\n f.write(json.dumps(self.W2V_DICT))\n f.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TaskSolver:\n <mask token>\n\n def __init__(self):\n pass\n\n def solve(self, task_name, **kwargs):\n self.gen_w2v_dict()\n if task_name == 'k-nearest-words':\n self.task_k_nearest_words(kwargs.get('k'), kwargs.get('word'))\n elif task_name == 'synonym-antonym-classification':\n self.task_synonym_antonym_classification()\n elif task_name == 'test-cosin-similarity-with-visim-400-dataset':\n self.test_with_visim_400_data_set()\n\n def task_calculate_cosin_similarity(self, word1, word2, print_to_screen\n =True):\n sim = 0\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self.\n W2V_DICT[word2])) / 2\n if print_to_screen:\n print(\"Độ tương đồng giữa '{}' và '{}' là: {}\".format(word1,\n word2, sim))\n return sim\n\n def test_with_visim_400_data_set(self):\n visim_400_df = pd.read_csv(os.path.abspath(\n './Word-Similarity/datasets/ViSim-400/Visim-400.txt'), sep='\\t')\n rs, sim1_arr, sim2_arr = [], [], []\n for index, row in visim_400_df.iterrows():\n word_1, word_2 = row['Word1'], row['Word2']\n sim_1, sim_2 = row['Sim1'], row['Sim2']\n if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT:\n sim = self.task_calculate_cosin_similarity(word_1, word_2, True\n )\n rs.append(sim)\n sim1_arr.append(sim_1)\n sim2_arr.append(sim_2)\n print('Hệ số tương đồng Pearson là: ', stats.pearsonr(rs, sim1_arr))\n print('Hệ số tương đồng Spearman là: ', stats.spearmanr(rs, sim1_arr))\n\n def task_k_nearest_words(self, k, word):\n k = int(k)\n if word not in self.W2V_DICT:\n print(\"Word '{}' not in vocab\".format(word))\n return\n sims = []\n for key in self.W2V_DICT:\n if key != word:\n sims.append({'key': key, 'sim': self.\n task_calculate_cosin_similarity(key, word, False)})\n k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0:k - 1]\n print(\"{} từ tương đồng nhất với từ '{}' là:\".format(k, word))\n for w in k_list:\n print('Từ {} có độ tương đồng là {}'.format(w.get('key'), w.get\n ('sim')))\n return k_list\n\n def task_synonym_antonym_classification(self):\n self.prepare_data()\n self.train_synonym_antonym_classification()\n self.test_synonym_antonym_classification()\n\n def test_synonym_antonym_classification(self):\n clf = pickle.load(open('./main/model/svm.model', 'rb'))\n X_test, Y_test = [], []\n for file in ['./Word-Similarity/datasets/ViCon-400/400_noun_pairs.txt',\n './Word-Similarity/datasets/ViCon-400/400_verb_pairs.txt',\n './Word-Similarity/datasets/ViCon-400/600_adj_pairs.txt']:\n f = open(file, 'r', encoding='utf8')\n for index, line in enumerate(f):\n line_arr = line.split()\n if index == 0:\n continue\n word1, word2, relation = line_arr[0], line_arr[1], line_arr[2]\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n X_test.append(vec)\n if relation == 'SYN':\n Y_test.append(1)\n elif relation == 'ANT':\n Y_test.append(-1)\n X_test = X_test\n pred = clf.predict(X_test)\n print('Test date: {}'.format(date.today()))\n print('Precision: {}'.format(precision_score(Y_test, pred)))\n print('Recall: {}'.format(recall_score(Y_test, pred)))\n print('F1: {}'.format(f1_score(Y_test, pred)))\n log = (\n \"\"\"\n Test date: {}\n Precision: {}\n Recall: {}\n F1: {}\n \n\n ----------------------------------------\n \"\"\"\n .format(date.today(), precision_score(Y_test, pred),\n recall_score(Y_test, pred), f1_score(Y_test, pred)))\n log_f = open('./main/log', 'a+')\n log_f.write(log)\n log_f.close()\n\n def gen_vec_for_synonym_antonym_pair(self, word1, word2):\n np_vec1, np_vec2 = np.array(self.W2V_DICT[word1]), np.array(self.\n W2V_DICT[word2])\n return np.concatenate((np_vec1, np_vec2, np_vec1 + np_vec2, np_vec1 *\n np_vec2, np.absolute(np_vec1 - np_vec2)), axis=0)\n\n def train_synonym_antonym_classification(self):\n X_train, Y_train = pickle.load(open(\n './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+'))\n unique, counts = np.unique(Y_train, return_counts=True)\n label_count = dict(zip(unique, counts))\n clf = MLPClassifier()\n clf.fit(X_train, Y_train)\n pickle.dump(clf, open('./main/model/svm.model', 'wb+'))\n return clf\n\n def prepare_data(self):\n X, Y = [], []\n for file in [\n './Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt',\n './Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt']:\n f = open(file, 'r', encoding='utf8')\n for index, line in enumerate(f):\n line_arr = line.split()\n if len(line_arr) < 2:\n continue\n word1, word2 = line_arr[0], line_arr[1]\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n X.append(vec)\n if os.path.basename(f.name) == 'Antonym_vietnamese.txt':\n Y.append(-1)\n else:\n Y.append(1)\n X, Y = np.array(X), np.array(Y)\n pickle.dump((X.astype(np.float64), Y), open(\n './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+'))\n\n def gen_w2v_dict(self):\n with open('./main/dataset/w2v/w2v-dict.json', 'w+') as f:\n if f.read(1):\n f.seek(0)\n self.W2V_DICT = json.load(f)\n if not self.W2V_DICT:\n with open('./Word-Similarity/word2vec/W2V_150.txt', 'r',\n encoding='utf8') as f:\n for index, line in enumerate(f):\n line_arr = line.split()\n if index > 1:\n self.W2V_DICT.update({line_arr[0]: np.array(\n line_arr[1:]).astype(float).tolist()})\n f = open('./main/dataset/w2v/w2v-dict.json', 'w+')\n f.write(json.dumps(self.W2V_DICT))\n f.close()\n\n\n<mask token>\n",
"step-4": "import os, pickle, json, ast\nimport pandas as pd\nfrom scipy import spatial\nimport numpy as np\nfrom scipy import stats\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.svm import LinearSVC\nfrom sklearn.metrics import precision_score, recall_score, f1_score\nfrom datetime import date\nfrom sklearn.neural_network import MLPClassifier\n\n\nclass TaskSolver:\n W2V_DICT = dict()\n\n def __init__(self):\n pass\n\n def solve(self, task_name, **kwargs):\n self.gen_w2v_dict()\n if task_name == 'k-nearest-words':\n self.task_k_nearest_words(kwargs.get('k'), kwargs.get('word'))\n elif task_name == 'synonym-antonym-classification':\n self.task_synonym_antonym_classification()\n elif task_name == 'test-cosin-similarity-with-visim-400-dataset':\n self.test_with_visim_400_data_set()\n\n def task_calculate_cosin_similarity(self, word1, word2, print_to_screen\n =True):\n sim = 0\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self.\n W2V_DICT[word2])) / 2\n if print_to_screen:\n print(\"Độ tương đồng giữa '{}' và '{}' là: {}\".format(word1,\n word2, sim))\n return sim\n\n def test_with_visim_400_data_set(self):\n visim_400_df = pd.read_csv(os.path.abspath(\n './Word-Similarity/datasets/ViSim-400/Visim-400.txt'), sep='\\t')\n rs, sim1_arr, sim2_arr = [], [], []\n for index, row in visim_400_df.iterrows():\n word_1, word_2 = row['Word1'], row['Word2']\n sim_1, sim_2 = row['Sim1'], row['Sim2']\n if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT:\n sim = self.task_calculate_cosin_similarity(word_1, word_2, True\n )\n rs.append(sim)\n sim1_arr.append(sim_1)\n sim2_arr.append(sim_2)\n print('Hệ số tương đồng Pearson là: ', stats.pearsonr(rs, sim1_arr))\n print('Hệ số tương đồng Spearman là: ', stats.spearmanr(rs, sim1_arr))\n\n def task_k_nearest_words(self, k, word):\n k = int(k)\n if word not in self.W2V_DICT:\n print(\"Word '{}' not in vocab\".format(word))\n return\n sims = []\n for key in self.W2V_DICT:\n if key != word:\n sims.append({'key': key, 'sim': self.\n task_calculate_cosin_similarity(key, word, False)})\n k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0:k - 1]\n print(\"{} từ tương đồng nhất với từ '{}' là:\".format(k, word))\n for w in k_list:\n print('Từ {} có độ tương đồng là {}'.format(w.get('key'), w.get\n ('sim')))\n return k_list\n\n def task_synonym_antonym_classification(self):\n self.prepare_data()\n self.train_synonym_antonym_classification()\n self.test_synonym_antonym_classification()\n\n def test_synonym_antonym_classification(self):\n clf = pickle.load(open('./main/model/svm.model', 'rb'))\n X_test, Y_test = [], []\n for file in ['./Word-Similarity/datasets/ViCon-400/400_noun_pairs.txt',\n './Word-Similarity/datasets/ViCon-400/400_verb_pairs.txt',\n './Word-Similarity/datasets/ViCon-400/600_adj_pairs.txt']:\n f = open(file, 'r', encoding='utf8')\n for index, line in enumerate(f):\n line_arr = line.split()\n if index == 0:\n continue\n word1, word2, relation = line_arr[0], line_arr[1], line_arr[2]\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n X_test.append(vec)\n if relation == 'SYN':\n Y_test.append(1)\n elif relation == 'ANT':\n Y_test.append(-1)\n X_test = X_test\n pred = clf.predict(X_test)\n print('Test date: {}'.format(date.today()))\n print('Precision: {}'.format(precision_score(Y_test, pred)))\n print('Recall: {}'.format(recall_score(Y_test, pred)))\n print('F1: {}'.format(f1_score(Y_test, pred)))\n log = (\n \"\"\"\n Test date: {}\n Precision: {}\n Recall: {}\n F1: {}\n \n\n ----------------------------------------\n \"\"\"\n .format(date.today(), precision_score(Y_test, pred),\n recall_score(Y_test, pred), f1_score(Y_test, pred)))\n log_f = open('./main/log', 'a+')\n log_f.write(log)\n log_f.close()\n\n def gen_vec_for_synonym_antonym_pair(self, word1, word2):\n np_vec1, np_vec2 = np.array(self.W2V_DICT[word1]), np.array(self.\n W2V_DICT[word2])\n return np.concatenate((np_vec1, np_vec2, np_vec1 + np_vec2, np_vec1 *\n np_vec2, np.absolute(np_vec1 - np_vec2)), axis=0)\n\n def train_synonym_antonym_classification(self):\n X_train, Y_train = pickle.load(open(\n './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+'))\n unique, counts = np.unique(Y_train, return_counts=True)\n label_count = dict(zip(unique, counts))\n clf = MLPClassifier()\n clf.fit(X_train, Y_train)\n pickle.dump(clf, open('./main/model/svm.model', 'wb+'))\n return clf\n\n def prepare_data(self):\n X, Y = [], []\n for file in [\n './Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt',\n './Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt']:\n f = open(file, 'r', encoding='utf8')\n for index, line in enumerate(f):\n line_arr = line.split()\n if len(line_arr) < 2:\n continue\n word1, word2 = line_arr[0], line_arr[1]\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n X.append(vec)\n if os.path.basename(f.name) == 'Antonym_vietnamese.txt':\n Y.append(-1)\n else:\n Y.append(1)\n X, Y = np.array(X), np.array(Y)\n pickle.dump((X.astype(np.float64), Y), open(\n './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+'))\n\n def gen_w2v_dict(self):\n with open('./main/dataset/w2v/w2v-dict.json', 'w+') as f:\n if f.read(1):\n f.seek(0)\n self.W2V_DICT = json.load(f)\n if not self.W2V_DICT:\n with open('./Word-Similarity/word2vec/W2V_150.txt', 'r',\n encoding='utf8') as f:\n for index, line in enumerate(f):\n line_arr = line.split()\n if index > 1:\n self.W2V_DICT.update({line_arr[0]: np.array(\n line_arr[1:]).astype(float).tolist()})\n f = open('./main/dataset/w2v/w2v-dict.json', 'w+')\n f.write(json.dumps(self.W2V_DICT))\n f.close()\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description='Helper')\n parser.add_argument('--task', required=True, metavar='path', help=\n \"\"\"\n Task name: \n 0 => Cosin Similarity\n 1 => Test Cosine Similarity with Visim-400 dataset\n 2 => K Nearest Words\n 3 => Synonym Antonym Classification\n \"\"\"\n )\n parser.add_argument('--word', metavar='path', help=\n \"Target word used in 'K Nearest Words' task\")\n parser.add_argument('--k', metavar='path', help=\n \"Number of 'Nearest Words' used in 'K Nearest Words' task\")\n parser.add_argument('--word1', metavar='path', help=\n \"Source word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task\"\n )\n parser.add_argument('--word2', metavar='path', help=\n \"Target word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task\"\n )\n args = parser.parse_args()\n task = args.task\n k = args.k\n word = args.word\n word1 = args.word1\n word2 = args.word2\n switcher = {'0': 'calculate-cosin-similarity', '1':\n 'test-cosin-similarity-with-visim-400-dataset', '2':\n 'k-nearest-words', '3': 'synonym-antonym-classification', '4':\n 'predict-synonym-antonym'}\n task_name = switcher.get(task, 'Invalid task')\n task_solver = TaskSolver()\n task_solver.solve(task_name, k=k, word=word, word1=word1, word2=word2)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n\nimport os, pickle, json, ast\nimport pandas as pd\nfrom scipy import spatial\nimport numpy as np\nfrom scipy import stats\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.svm import LinearSVC\nfrom sklearn.metrics import precision_score, recall_score, f1_score\nfrom datetime import date\nfrom sklearn.neural_network import MLPClassifier\n\nclass TaskSolver:\n\n W2V_DICT = dict()\n\n def __init__(self):\n pass\n\n def solve(self, task_name, **kwargs):\n\n self.gen_w2v_dict()\n\n if task_name == 'k-nearest-words':\n self.task_k_nearest_words(kwargs.get('k'), kwargs.get('word'))\n elif task_name == 'synonym-antonym-classification':\n self.task_synonym_antonym_classification()\n elif task_name == 'test-cosin-similarity-with-visim-400-dataset':\n self.test_with_visim_400_data_set()\n\n def task_calculate_cosin_similarity(self, word1, word2, print_to_screen=True):\n\n sim = 0\n\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n\n sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self.W2V_DICT[word2])) / 2\n\n if (print_to_screen): print(\"Độ tương đồng giữa '{}' và '{}' là: {}\".format(word1, word2, sim))\n\n return sim\n\n def test_with_visim_400_data_set(self):\n\n visim_400_df = pd.read_csv(\n os.path.abspath('./Word-Similarity/datasets/ViSim-400/Visim-400.txt'), \n sep=\"\\t\")\n\n rs, sim1_arr, sim2_arr = [], [], []\n\n for index, row in visim_400_df.iterrows():\n\n word_1, word_2 = row['Word1'], row['Word2']\n sim_1, sim_2 = row['Sim1'], row['Sim2']\n\n if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT:\n\n sim = self.task_calculate_cosin_similarity(word_1, word_2, True)\n\n rs.append(sim)\n \n sim1_arr.append(sim_1)\n sim2_arr.append(sim_2)\n\n print(\"Hệ số tương đồng Pearson là: \", stats.pearsonr(rs, sim1_arr))\n print(\"Hệ số tương đồng Spearman là: \", stats.spearmanr(rs, sim1_arr))\n\n def task_k_nearest_words(self, k, word):\n\n k = int(k)\n\n if word not in self.W2V_DICT: \n print(\"Word '{}' not in vocab\".format(word))\n return\n\n sims = []\n\n for key in self.W2V_DICT:\n\n if key != word:\n\n sims.append({\n 'key': key,\n 'sim': self.task_calculate_cosin_similarity(key, word, False)\n })\n \n k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0: (k - 1)]\n\n print(\"{} từ tương đồng nhất với từ '{}' là:\".format(k, word))\n\n for w in k_list:\n\n print(\"Từ {} có độ tương đồng là {}\".format(w.get('key'), w.get('sim')))\n\n return k_list\n\n def task_synonym_antonym_classification(self):\n\n self.prepare_data()\n\n self.train_synonym_antonym_classification()\n\n self.test_synonym_antonym_classification()\n\n def test_synonym_antonym_classification(self):\n\n clf = pickle.load(open('./main/model/svm.model', 'rb'))\n\n X_test, Y_test = [], []\n\n for file in [\n './Word-Similarity/datasets/ViCon-400/400_noun_pairs.txt', \n './Word-Similarity/datasets/ViCon-400/400_verb_pairs.txt',\n './Word-Similarity/datasets/ViCon-400/600_adj_pairs.txt'\n ]:\n\n f = open(file, 'r', encoding=\"utf8\")\n\n for index, line in enumerate(f):\n\n line_arr = line.split()\n\n if index == 0: continue\n\n word1, word2, relation = line_arr[0], line_arr[1], line_arr[2]\n\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n\n X_test.append(vec)\n\n if relation == 'SYN': Y_test.append(1)\n elif relation == 'ANT': Y_test.append(-1)\n\n X_test = X_test\n pred = clf.predict(X_test)\n\n print(\"Test date: {}\".format(date.today()))\n print(\"Precision: {}\".format(precision_score(Y_test, pred)))\n print(\"Recall: {}\".format(recall_score(Y_test, pred)))\n print(\"F1: {}\".format(f1_score(Y_test, pred)))\n\n log = \"\"\"\n Test date: {}\n Precision: {}\n Recall: {}\n F1: {}\n \\n\n ----------------------------------------\n \"\"\".format(\n date.today(), \n precision_score(Y_test, pred), \n recall_score(Y_test, pred), \n f1_score(Y_test, pred))\n\n log_f = open('./main/log', 'a+')\n\n log_f.write(log)\n\n log_f.close()\n\n def gen_vec_for_synonym_antonym_pair(self, word1, word2):\n\n np_vec1, np_vec2 = np.array(self.W2V_DICT[word1]), np.array(self.W2V_DICT[word2])\n\n return np.concatenate((\n np_vec1,\n np_vec2,\n np_vec1 + np_vec2, \n np_vec1 * np_vec2,\n np.absolute(np_vec1 - np_vec2),\n # np.array([self.task_calculate_cosin_similarity(word1, word2, False)])\n ), axis=0)\n\n def train_synonym_antonym_classification(self):\n\n X_train, Y_train = pickle.load(open('./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+'))\n\n unique, counts = np.unique(Y_train, return_counts=True)\n\n label_count = dict(zip(unique, counts))\n \n clf = MLPClassifier()\n \n clf.fit(X_train, Y_train)\n\n pickle.dump(clf, open('./main/model/svm.model', 'wb+'))\n\n return clf\n\n def prepare_data(self):\n\n X, Y = [], []\n\n for file in [\n './Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt', \n './Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt'\n ]:\n \n f = open(file, 'r', encoding=\"utf8\")\n\n for index, line in enumerate(f):\n\n line_arr = line.split()\n \n if len(line_arr) < 2: continue\n\n word1, word2 = line_arr[0], line_arr[1]\n\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n\n X.append(vec)\n\n if os.path.basename(f.name) == 'Antonym_vietnamese.txt': Y.append(-1)\n else: Y.append(1)\n\n\n X, Y = np.array(X), np.array(Y)\n\n pickle.dump(\n ( X.astype(np.float64), Y ),\n open('./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+')\n )\n\n def gen_w2v_dict(self):\n with open('./main/dataset/w2v/w2v-dict.json', 'w+') as f:\n if f.read(1):\n\n f.seek(0)\n\n self.W2V_DICT = json.load(f)\n\n if not self.W2V_DICT:\n with open('./Word-Similarity/word2vec/W2V_150.txt', 'r', encoding=\"utf8\") as f:\n\n for index, line in enumerate(f):\n\n line_arr = line.split()\n \n if index > 1:\n\n self.W2V_DICT.update({line_arr[0]: np.array(line_arr[1:]).astype(float).tolist()})\n\n f = open(\"./main/dataset/w2v/w2v-dict.json\",\"w+\")\n\n f.write(json.dumps(self.W2V_DICT))\n\n f.close()\n\nif __name__ == \"__main__\":\n\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Helper\")\n\n parser.add_argument(\n \"--task\",\n required=True,\n metavar=\"path\",\n help=\"\"\"\n Task name: \n 0 => Cosin Similarity\n 1 => Test Cosine Similarity with Visim-400 dataset\n 2 => K Nearest Words\n 3 => Synonym Antonym Classification\n \"\"\",\n )\n\n parser.add_argument(\n \"--word\",\n metavar=\"path\",\n help=\"Target word used in 'K Nearest Words' task\",\n )\n\n parser.add_argument(\n \"--k\",\n metavar=\"path\",\n help=\"Number of 'Nearest Words' used in 'K Nearest Words' task\",\n )\n\n parser.add_argument(\n \"--word1\",\n metavar=\"path\",\n help=\"Source word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task\",\n )\n\n parser.add_argument(\n \"--word2\",\n metavar=\"path\",\n help=\"Target word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task\",\n )\n\n args = parser.parse_args()\n\n task = args.task \n k = args.k \n word = args.word \n word1 = args.word1\n word2 = args.word2\n\n switcher = {\n '0': 'calculate-cosin-similarity',\n '1': 'test-cosin-similarity-with-visim-400-dataset',\n '2': 'k-nearest-words',\n '3': 'synonym-antonym-classification',\n '4': 'predict-synonym-antonym'\n }\n\n task_name = switcher.get(task, \"Invalid task\")\n\n task_solver = TaskSolver()\n\n task_solver.solve(\n task_name, \n k=k,\n word=word,\n word1=word1,\n word2=word2\n )\n",
"step-ids": [
7,
11,
12,
15,
16
]
}
|
[
7,
11,
12,
15,
16
] |
"""
pokespeare.http.py
~~~~~~~~~~~~~~~~~~
Contains definitions of custom HTTP clients, allowing for more flexibility on
the library choice
"""
import abc
import requests
from typing import Dict, Tuple, Any
from .exceptions import HTTPError, UnexpectedError
import requests_cache
class HTTPClient(abc.ABC):
"""Basic interface class. Allow to define custom HTTP clients giving
stronger contract behaviour
:type cache_name: str
:param cache_name: The name of the cache, corresponds to the name of the
sqlite DB on the filesystem if the `beckend` is sqlite
or the name of the redis namespace in case of `redis`
backend.
:type backend: str
:param backend: The backend to use, can be either `memory` to use a simple
python dict, `sqlite` to use a sqlite DB on the filesystem
or `redis` for a redis cache
:type expire_after: int
:param expire_after: Define after how many seconds each key in the cache
have to be evicted
:type allowable_methods: Tuple[str]
:param allowable_methods: A tuple of strings defining for which HTTP
methods to apply caching
Also supports `connection` in case of a redis connection on kwargs,
for more info `https://requests-cache.readthedocs.io/en/latest/api.html`
"""
def __init__(
self,
cache_name: str = "",
*,
backend: str = "memory",
expire_after: int = 3600,
allowable_methods: Tuple[str] = ("GET",),
**kwargs
):
self.cache_name = cache_name
self.backend = backend
self.expire_after = expire_after
self.allowable_methods = allowable_methods
self.cache_enabled = False
if self.cache_name:
self.enable_cache(**kwargs)
@abc.abstractmethod
def enable_cache(self, **kwargs: Dict[str, Any]) -> None:
"""Enable caching for each request"""
pass
@abc.abstractmethod
def disable_cache(self) -> None:
"""Disable caching"""
pass
@abc.abstractmethod
def get(self, url: str, **kwargs: Dict[str, Any]) -> Any:
"""Perform GET request to a defined URL"""
pass
@abc.abstractmethod
def post(self, url: str, **kwargs: Dict[str, Any]) -> Any:
"""Perform POST request to a defined URL"""
pass
class RequestsHTTPClient(HTTPClient):
"""
Simple wrapper class around requests library, which is used as the
main engine for each call. Allow better unit-testing overall.
"""
def enable_cache(self, **kwargs: Dict[str, Any]) -> None:
requests_cache.install_cache(
self.cache_name,
backend=self.backend,
expire_after=self.expire_after,
allowable_methods=self.allowable_methods,
**kwargs
)
self.cache_enabled = True
def disable_cache(self) -> None:
requests_cache.disable_cache()
requests_cache.uninstall_cache()
self.cache_enabled = False
def get(self, url: str, **kwargs: Dict[str, Any]) -> Any:
try:
response = requests.get(url, **kwargs)
response.raise_for_status()
except (
requests.exceptions.HTTPError,
requests.exceptions.TooManyRedirects,
) as e:
raise HTTPError(e)
except (requests.exceptions.RequestException, Exception) as e:
raise UnexpectedError(e)
return response
def post(self, url: str, **kwargs: Dict[str, Any]) -> Any:
try:
response = requests.post(url, **kwargs)
except (
requests.exceptions.HTTPError,
requests.exceptions.TooManyRedirects,
) as e:
raise HTTPError(e)
except (requests.exceptions.RequestException, Exception) as e:
raise UnexpectedError(e)
return response
|
normal
|
{
"blob_id": "1a126ba7e73eb2e7811ab32146fe5aee6c6b30f9",
"index": 4290,
"step-1": "<mask token>\n\n\nclass HTTPClient(abc.ABC):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @abc.abstractmethod\n def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n \"\"\"Perform POST request to a defined URL\"\"\"\n pass\n\n\nclass RequestsHTTPClient(HTTPClient):\n \"\"\"\n Simple wrapper class around requests library, which is used as the\n main engine for each call. Allow better unit-testing overall.\n \"\"\"\n\n def enable_cache(self, **kwargs: Dict[str, Any]) ->None:\n requests_cache.install_cache(self.cache_name, backend=self.backend,\n expire_after=self.expire_after, allowable_methods=self.\n allowable_methods, **kwargs)\n self.cache_enabled = True\n\n def disable_cache(self) ->None:\n requests_cache.disable_cache()\n requests_cache.uninstall_cache()\n self.cache_enabled = False\n\n def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n try:\n response = requests.get(url, **kwargs)\n response.raise_for_status()\n except (requests.exceptions.HTTPError, requests.exceptions.\n TooManyRedirects) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n\n def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n try:\n response = requests.post(url, **kwargs)\n except (requests.exceptions.HTTPError, requests.exceptions.\n TooManyRedirects) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n",
"step-2": "<mask token>\n\n\nclass HTTPClient(abc.ABC):\n <mask token>\n <mask token>\n <mask token>\n\n @abc.abstractmethod\n def disable_cache(self) ->None:\n \"\"\"Disable caching\"\"\"\n pass\n\n @abc.abstractmethod\n def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n \"\"\"Perform GET request to a defined URL\"\"\"\n pass\n\n @abc.abstractmethod\n def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n \"\"\"Perform POST request to a defined URL\"\"\"\n pass\n\n\nclass RequestsHTTPClient(HTTPClient):\n \"\"\"\n Simple wrapper class around requests library, which is used as the\n main engine for each call. Allow better unit-testing overall.\n \"\"\"\n\n def enable_cache(self, **kwargs: Dict[str, Any]) ->None:\n requests_cache.install_cache(self.cache_name, backend=self.backend,\n expire_after=self.expire_after, allowable_methods=self.\n allowable_methods, **kwargs)\n self.cache_enabled = True\n\n def disable_cache(self) ->None:\n requests_cache.disable_cache()\n requests_cache.uninstall_cache()\n self.cache_enabled = False\n\n def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n try:\n response = requests.get(url, **kwargs)\n response.raise_for_status()\n except (requests.exceptions.HTTPError, requests.exceptions.\n TooManyRedirects) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n\n def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n try:\n response = requests.post(url, **kwargs)\n except (requests.exceptions.HTTPError, requests.exceptions.\n TooManyRedirects) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n",
"step-3": "<mask token>\n\n\nclass HTTPClient(abc.ABC):\n <mask token>\n <mask token>\n\n @abc.abstractmethod\n def enable_cache(self, **kwargs: Dict[str, Any]) ->None:\n \"\"\"Enable caching for each request\"\"\"\n pass\n\n @abc.abstractmethod\n def disable_cache(self) ->None:\n \"\"\"Disable caching\"\"\"\n pass\n\n @abc.abstractmethod\n def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n \"\"\"Perform GET request to a defined URL\"\"\"\n pass\n\n @abc.abstractmethod\n def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n \"\"\"Perform POST request to a defined URL\"\"\"\n pass\n\n\nclass RequestsHTTPClient(HTTPClient):\n \"\"\"\n Simple wrapper class around requests library, which is used as the\n main engine for each call. Allow better unit-testing overall.\n \"\"\"\n\n def enable_cache(self, **kwargs: Dict[str, Any]) ->None:\n requests_cache.install_cache(self.cache_name, backend=self.backend,\n expire_after=self.expire_after, allowable_methods=self.\n allowable_methods, **kwargs)\n self.cache_enabled = True\n\n def disable_cache(self) ->None:\n requests_cache.disable_cache()\n requests_cache.uninstall_cache()\n self.cache_enabled = False\n\n def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n try:\n response = requests.get(url, **kwargs)\n response.raise_for_status()\n except (requests.exceptions.HTTPError, requests.exceptions.\n TooManyRedirects) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n\n def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n try:\n response = requests.post(url, **kwargs)\n except (requests.exceptions.HTTPError, requests.exceptions.\n TooManyRedirects) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n",
"step-4": "<mask token>\nimport abc\nimport requests\nfrom typing import Dict, Tuple, Any\nfrom .exceptions import HTTPError, UnexpectedError\nimport requests_cache\n\n\nclass HTTPClient(abc.ABC):\n \"\"\"Basic interface class. Allow to define custom HTTP clients giving\n stronger contract behaviour\n\n :type cache_name: str\n :param cache_name: The name of the cache, corresponds to the name of the\n sqlite DB on the filesystem if the `beckend` is sqlite\n or the name of the redis namespace in case of `redis`\n backend.\n\n :type backend: str\n :param backend: The backend to use, can be either `memory` to use a simple\n python dict, `sqlite` to use a sqlite DB on the filesystem\n or `redis` for a redis cache\n\n :type expire_after: int\n :param expire_after: Define after how many seconds each key in the cache\n have to be evicted\n\n :type allowable_methods: Tuple[str]\n :param allowable_methods: A tuple of strings defining for which HTTP\n methods to apply caching\n\n Also supports `connection` in case of a redis connection on kwargs,\n for more info `https://requests-cache.readthedocs.io/en/latest/api.html`\n \"\"\"\n\n def __init__(self, cache_name: str='', *, backend: str='memory',\n expire_after: int=3600, allowable_methods: Tuple[str]=('GET',), **\n kwargs):\n self.cache_name = cache_name\n self.backend = backend\n self.expire_after = expire_after\n self.allowable_methods = allowable_methods\n self.cache_enabled = False\n if self.cache_name:\n self.enable_cache(**kwargs)\n\n @abc.abstractmethod\n def enable_cache(self, **kwargs: Dict[str, Any]) ->None:\n \"\"\"Enable caching for each request\"\"\"\n pass\n\n @abc.abstractmethod\n def disable_cache(self) ->None:\n \"\"\"Disable caching\"\"\"\n pass\n\n @abc.abstractmethod\n def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n \"\"\"Perform GET request to a defined URL\"\"\"\n pass\n\n @abc.abstractmethod\n def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n \"\"\"Perform POST request to a defined URL\"\"\"\n pass\n\n\nclass RequestsHTTPClient(HTTPClient):\n \"\"\"\n Simple wrapper class around requests library, which is used as the\n main engine for each call. Allow better unit-testing overall.\n \"\"\"\n\n def enable_cache(self, **kwargs: Dict[str, Any]) ->None:\n requests_cache.install_cache(self.cache_name, backend=self.backend,\n expire_after=self.expire_after, allowable_methods=self.\n allowable_methods, **kwargs)\n self.cache_enabled = True\n\n def disable_cache(self) ->None:\n requests_cache.disable_cache()\n requests_cache.uninstall_cache()\n self.cache_enabled = False\n\n def get(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n try:\n response = requests.get(url, **kwargs)\n response.raise_for_status()\n except (requests.exceptions.HTTPError, requests.exceptions.\n TooManyRedirects) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n\n def post(self, url: str, **kwargs: Dict[str, Any]) ->Any:\n try:\n response = requests.post(url, **kwargs)\n except (requests.exceptions.HTTPError, requests.exceptions.\n TooManyRedirects) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n",
"step-5": "\"\"\"\npokespeare.http.py\n~~~~~~~~~~~~~~~~~~\n\nContains definitions of custom HTTP clients, allowing for more flexibility on\nthe library choice\n\"\"\"\n\nimport abc\nimport requests\nfrom typing import Dict, Tuple, Any\nfrom .exceptions import HTTPError, UnexpectedError\nimport requests_cache\n\n\nclass HTTPClient(abc.ABC):\n \"\"\"Basic interface class. Allow to define custom HTTP clients giving\n stronger contract behaviour\n\n :type cache_name: str\n :param cache_name: The name of the cache, corresponds to the name of the\n sqlite DB on the filesystem if the `beckend` is sqlite\n or the name of the redis namespace in case of `redis`\n backend.\n\n :type backend: str\n :param backend: The backend to use, can be either `memory` to use a simple\n python dict, `sqlite` to use a sqlite DB on the filesystem\n or `redis` for a redis cache\n\n :type expire_after: int\n :param expire_after: Define after how many seconds each key in the cache\n have to be evicted\n\n :type allowable_methods: Tuple[str]\n :param allowable_methods: A tuple of strings defining for which HTTP\n methods to apply caching\n\n Also supports `connection` in case of a redis connection on kwargs,\n for more info `https://requests-cache.readthedocs.io/en/latest/api.html`\n \"\"\"\n\n def __init__(\n self,\n cache_name: str = \"\",\n *,\n backend: str = \"memory\",\n expire_after: int = 3600,\n allowable_methods: Tuple[str] = (\"GET\",),\n **kwargs\n ):\n self.cache_name = cache_name\n self.backend = backend\n self.expire_after = expire_after\n self.allowable_methods = allowable_methods\n self.cache_enabled = False\n if self.cache_name:\n self.enable_cache(**kwargs)\n\n @abc.abstractmethod\n def enable_cache(self, **kwargs: Dict[str, Any]) -> None:\n \"\"\"Enable caching for each request\"\"\"\n pass\n\n @abc.abstractmethod\n def disable_cache(self) -> None:\n \"\"\"Disable caching\"\"\"\n pass\n\n @abc.abstractmethod\n def get(self, url: str, **kwargs: Dict[str, Any]) -> Any:\n \"\"\"Perform GET request to a defined URL\"\"\"\n pass\n\n @abc.abstractmethod\n def post(self, url: str, **kwargs: Dict[str, Any]) -> Any:\n \"\"\"Perform POST request to a defined URL\"\"\"\n pass\n\n\nclass RequestsHTTPClient(HTTPClient):\n \"\"\"\n Simple wrapper class around requests library, which is used as the\n main engine for each call. Allow better unit-testing overall.\n \"\"\"\n\n def enable_cache(self, **kwargs: Dict[str, Any]) -> None:\n requests_cache.install_cache(\n self.cache_name,\n backend=self.backend,\n expire_after=self.expire_after,\n allowable_methods=self.allowable_methods,\n **kwargs\n )\n self.cache_enabled = True\n\n def disable_cache(self) -> None:\n requests_cache.disable_cache()\n requests_cache.uninstall_cache()\n self.cache_enabled = False\n\n def get(self, url: str, **kwargs: Dict[str, Any]) -> Any:\n try:\n response = requests.get(url, **kwargs)\n response.raise_for_status()\n except (\n requests.exceptions.HTTPError,\n requests.exceptions.TooManyRedirects,\n ) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n\n def post(self, url: str, **kwargs: Dict[str, Any]) -> Any:\n try:\n response = requests.post(url, **kwargs)\n except (\n requests.exceptions.HTTPError,\n requests.exceptions.TooManyRedirects,\n ) as e:\n raise HTTPError(e)\n except (requests.exceptions.RequestException, Exception) as e:\n raise UnexpectedError(e)\n return response\n",
"step-ids": [
8,
10,
11,
14,
15
]
}
|
[
8,
10,
11,
14,
15
] |
#!/usr/local/bin/python
''' side_on.py
Open a 3d trajectory file (x y z) and produce a side-on plot of the
y-z plane, with straight line between start and end and a virtual
wall superimposed at 10 yards.
arg1 = infile
arg2 = optional outfile
'''
import sys
import matplotlib.pyplot as plt
infilename = sys.argv[1]
outfilename = None
try:
outfilename = sys.argv[2]
except IndexError:
pass
with open(infilename) as datafile:
data = datafile.read()
datafile.close()
data = data.split('\n')
# get rid of any empty line at the end of file
if data[-1] in ['\n', '\r\n', '']:
data.pop(-1)
x = [row.split()[0] for row in data]
y = [row.split()[1] for row in data]
z = [row.split()[2] for row in data]
# Get goalpost corner points
bly = y.pop(0)
blz = z.pop(0)
tly = y.pop(0)
tlz = z.pop(0)
try_ = y.pop(0)
trz = z.pop(0)
bry = y.pop(0)
brz = z.pop(0)
max_height = max(y)
# Work out everage depth of goalposts
avgz = (float(blz) + float(tlz) + float(trz) + float(brz)) / 4
fig = plt.figure('Side On Projection with Virtual Wall')
ax = fig.add_subplot(111, aspect='equal')
string = "Maximum height: " + str(max_height) + "m"
# annotate with maximum height
trans = ax.get_xaxis_transform()
ann = ax.annotate(string, xy=(8, -1), xycoords=trans)
ax.set_xlabel("Distance Travelled to Goal / m")
ax.set_ylabel("Height / m")
ax.plot(z, y, 'k.')
# Draw a red 2.14m wall at 9.14m
ax.plot([9.14, 9.14], [0, 1.82], c='r', linewidth=2)
ax.plot([avgz, avgz], [0, 2.44], c='k', linewidth=2)
plt.show()
# Save it if necessary
if outfilename is not None:
print "Save:", outfilename
fig.savefig(outfilename, bbox_inches='tight')
|
normal
|
{
"blob_id": "146aca6c7da17ddccb815638292cbcdda66f28e6",
"index": 7035,
"step-1": "#!/usr/local/bin/python\n\n''' side_on.py\n\n Open a 3d trajectory file (x y z) and produce a side-on plot of the\n y-z plane, with straight line between start and end and a virtual\n wall superimposed at 10 yards.\n\n arg1 = infile\n arg2 = optional outfile\n'''\n\nimport sys\nimport matplotlib.pyplot as plt\n\ninfilename = sys.argv[1]\noutfilename = None\ntry:\n outfilename = sys.argv[2]\nexcept IndexError:\n pass\n\nwith open(infilename) as datafile:\n data = datafile.read()\n datafile.close()\n\ndata = data.split('\\n')\n\n# get rid of any empty line at the end of file\nif data[-1] in ['\\n', '\\r\\n', '']:\n data.pop(-1)\n\nx = [row.split()[0] for row in data]\ny = [row.split()[1] for row in data]\nz = [row.split()[2] for row in data]\n\n# Get goalpost corner points\nbly = y.pop(0)\nblz = z.pop(0)\n\ntly = y.pop(0)\ntlz = z.pop(0)\n\ntry_ = y.pop(0)\ntrz = z.pop(0)\n\nbry = y.pop(0)\nbrz = z.pop(0)\n\nmax_height = max(y)\n\n# Work out everage depth of goalposts\navgz = (float(blz) + float(tlz) + float(trz) + float(brz)) / 4\n\nfig = plt.figure('Side On Projection with Virtual Wall')\nax = fig.add_subplot(111, aspect='equal')\n\nstring = \"Maximum height: \" + str(max_height) + \"m\"\n\n# annotate with maximum height\ntrans = ax.get_xaxis_transform()\nann = ax.annotate(string, xy=(8, -1), xycoords=trans)\n\nax.set_xlabel(\"Distance Travelled to Goal / m\")\nax.set_ylabel(\"Height / m\")\nax.plot(z, y, 'k.')\n\n# Draw a red 2.14m wall at 9.14m\nax.plot([9.14, 9.14], [0, 1.82], c='r', linewidth=2)\nax.plot([avgz, avgz], [0, 2.44], c='k', linewidth=2)\n\nplt.show()\n\n# Save it if necessary\nif outfilename is not None:\n print \"Save:\", outfilename\n fig.savefig(outfilename, bbox_inches='tight')\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class TransactionStack:
<|reserved_special_token_0|>
def transaction_stack(self, transaction, customer_name, company_name,
no_of_share, cost, time):
new_transaction = {'transaction': transaction, 'customer_name':
customer_name, 'company_name': company_name, 'no_of_share':
no_of_share, 'cost': cost, 'time': time}
self.stack.push(new_transaction)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TransactionStack:
def __init__(self):
self.stack = Stack()
with open('json_file/Transaction_Stack.json') as data:
try:
temp = json.load(data)
except Exception:
pass
else:
for i in temp:
self.stack.push(i)
def transaction_stack(self, transaction, customer_name, company_name,
no_of_share, cost, time):
new_transaction = {'transaction': transaction, 'customer_name':
customer_name, 'company_name': company_name, 'no_of_share':
no_of_share, 'cost': cost, 'time': time}
self.stack.push(new_transaction)
def save_transaction(self):
temp1 = []
size = self.stack.size()
for i in range(size):
temp1.append(self.stack.pop())
with open('Transaction_stack.json', 'w') as data:
json.dump(temp1, data)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TransactionStack:
def __init__(self):
self.stack = Stack()
with open('json_file/Transaction_Stack.json') as data:
try:
temp = json.load(data)
except Exception:
pass
else:
for i in temp:
self.stack.push(i)
def transaction_stack(self, transaction, customer_name, company_name,
no_of_share, cost, time):
new_transaction = {'transaction': transaction, 'customer_name':
customer_name, 'company_name': company_name, 'no_of_share':
no_of_share, 'cost': cost, 'time': time}
self.stack.push(new_transaction)
def save_transaction(self):
temp1 = []
size = self.stack.size()
for i in range(size):
temp1.append(self.stack.pop())
with open('Transaction_stack.json', 'w') as data:
json.dump(temp1, data)
if __name__ == '__main__':
pass
<|reserved_special_token_1|>
import json
from week2.Stack import Stack
class TransactionStack:
def __init__(self):
self.stack = Stack()
with open('json_file/Transaction_Stack.json') as data:
try:
temp = json.load(data)
except Exception:
pass
else:
for i in temp:
self.stack.push(i)
def transaction_stack(self, transaction, customer_name, company_name,
no_of_share, cost, time):
new_transaction = {'transaction': transaction, 'customer_name':
customer_name, 'company_name': company_name, 'no_of_share':
no_of_share, 'cost': cost, 'time': time}
self.stack.push(new_transaction)
def save_transaction(self):
temp1 = []
size = self.stack.size()
for i in range(size):
temp1.append(self.stack.pop())
with open('Transaction_stack.json', 'w') as data:
json.dump(temp1, data)
if __name__ == '__main__':
pass
<|reserved_special_token_1|>
import json
from week2.Stack import Stack
class TransactionStack:
def __init__(self):
self.stack = Stack()
with open("json_file/Transaction_Stack.json") as data:
try:
temp = json.load(data)
except Exception:
pass
else:
for i in temp:
self.stack.push(i)
def transaction_stack(self, transaction, customer_name, company_name, no_of_share, cost, time):
new_transaction = {"transaction": transaction, "customer_name": customer_name, "company_name": company_name,
"no_of_share": no_of_share, "cost": cost, "time": time}
self.stack.push(new_transaction)
def save_transaction(self):
temp1 = []
size = self.stack.size()
for i in range(size):
temp1.append(self.stack.pop())
with open("Transaction_stack.json", 'w') as data:
json.dump(temp1, data)
# Main method
if __name__ == "__main__":
pass
|
flexible
|
{
"blob_id": "30a2358e8396d24d6c3cd72d04321aa9f9f83995",
"index": 8233,
"step-1": "<mask token>\n\n\nclass TransactionStack:\n <mask token>\n\n def transaction_stack(self, transaction, customer_name, company_name,\n no_of_share, cost, time):\n new_transaction = {'transaction': transaction, 'customer_name':\n customer_name, 'company_name': company_name, 'no_of_share':\n no_of_share, 'cost': cost, 'time': time}\n self.stack.push(new_transaction)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TransactionStack:\n\n def __init__(self):\n self.stack = Stack()\n with open('json_file/Transaction_Stack.json') as data:\n try:\n temp = json.load(data)\n except Exception:\n pass\n else:\n for i in temp:\n self.stack.push(i)\n\n def transaction_stack(self, transaction, customer_name, company_name,\n no_of_share, cost, time):\n new_transaction = {'transaction': transaction, 'customer_name':\n customer_name, 'company_name': company_name, 'no_of_share':\n no_of_share, 'cost': cost, 'time': time}\n self.stack.push(new_transaction)\n\n def save_transaction(self):\n temp1 = []\n size = self.stack.size()\n for i in range(size):\n temp1.append(self.stack.pop())\n with open('Transaction_stack.json', 'w') as data:\n json.dump(temp1, data)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TransactionStack:\n\n def __init__(self):\n self.stack = Stack()\n with open('json_file/Transaction_Stack.json') as data:\n try:\n temp = json.load(data)\n except Exception:\n pass\n else:\n for i in temp:\n self.stack.push(i)\n\n def transaction_stack(self, transaction, customer_name, company_name,\n no_of_share, cost, time):\n new_transaction = {'transaction': transaction, 'customer_name':\n customer_name, 'company_name': company_name, 'no_of_share':\n no_of_share, 'cost': cost, 'time': time}\n self.stack.push(new_transaction)\n\n def save_transaction(self):\n temp1 = []\n size = self.stack.size()\n for i in range(size):\n temp1.append(self.stack.pop())\n with open('Transaction_stack.json', 'w') as data:\n json.dump(temp1, data)\n\n\nif __name__ == '__main__':\n pass\n",
"step-4": "import json\nfrom week2.Stack import Stack\n\n\nclass TransactionStack:\n\n def __init__(self):\n self.stack = Stack()\n with open('json_file/Transaction_Stack.json') as data:\n try:\n temp = json.load(data)\n except Exception:\n pass\n else:\n for i in temp:\n self.stack.push(i)\n\n def transaction_stack(self, transaction, customer_name, company_name,\n no_of_share, cost, time):\n new_transaction = {'transaction': transaction, 'customer_name':\n customer_name, 'company_name': company_name, 'no_of_share':\n no_of_share, 'cost': cost, 'time': time}\n self.stack.push(new_transaction)\n\n def save_transaction(self):\n temp1 = []\n size = self.stack.size()\n for i in range(size):\n temp1.append(self.stack.pop())\n with open('Transaction_stack.json', 'w') as data:\n json.dump(temp1, data)\n\n\nif __name__ == '__main__':\n pass\n",
"step-5": "import json\n\nfrom week2.Stack import Stack\n\n\nclass TransactionStack:\n def __init__(self):\n\n self.stack = Stack()\n with open(\"json_file/Transaction_Stack.json\") as data:\n try:\n temp = json.load(data)\n except Exception:\n pass\n else:\n for i in temp:\n self.stack.push(i)\n\n def transaction_stack(self, transaction, customer_name, company_name, no_of_share, cost, time):\n\n new_transaction = {\"transaction\": transaction, \"customer_name\": customer_name, \"company_name\": company_name,\n \"no_of_share\": no_of_share, \"cost\": cost, \"time\": time}\n self.stack.push(new_transaction)\n\n def save_transaction(self):\n\n temp1 = []\n size = self.stack.size()\n for i in range(size):\n temp1.append(self.stack.pop())\n with open(\"Transaction_stack.json\", 'w') as data:\n json.dump(temp1, data)\n\n\n# Main method\nif __name__ == \"__main__\":\n pass\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Database created........')
print('List of databases after creating new one')
print(client.list_database_names())
<|reserved_special_token_0|>
meta.create_all(engine)
<|reserved_special_token_1|>
client = MongoClient('localhost', 27017)
db = client['mydb']
print('Database created........')
print('List of databases after creating new one')
print(client.list_database_names())
engine = create_engine('sqlite:///test.db', echo=True)
meta = MetaData()
Items = Table('Items', meta, Column('id', Integer, primary_key=True),
Column('product_name', String), Column('price', Float), Column(
'quantity', Integer))
Users = Table('Users', meta, Column('firstname', String), Column('lastname',
String), Column('email', String), Column('passwd', String), Column(
'phone', Integer))
meta.create_all(engine)
<|reserved_special_token_1|>
#app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
#app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True;
#db = SQLAlchemy(app)
# MONGODB CREATION
#Creating a pymongo client
client = MongoClient('localhost', 27017)
#Getting the database instance
db = client['mydb']
print("Database created........")
#Verification
print("List of databases after creating new one")
print(client.list_database_names())
# DB CREATION AND INSTANTIATION #
#DB -- OPTION 1
engine = create_engine('sqlite:///test.db', echo = True)
meta = MetaData()
# Database Schema for Item and User #
Items = Table(
'Items', meta,
Column('id', Integer, primary_key = True),
Column('product_name', String),
Column('price', Float),
Column('quantity', Integer)
)
Users = Table(
'Users', meta,
Column('firstname', String),
Column('lastname', String),
Column('email', String),
Column('passwd', String),
Column('phone', Integer)
)
meta.create_all(engine)
#class Item(db.Model):
# id = db.Column(db.Integer, primary_key = True)
# product = db.Column(db.String(200))
# price = db.Column(db.Integer)
|
flexible
|
{
"blob_id": "5b7567129d447ae2b75f4a8f9c26127f8b7553ec",
"index": 7818,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Database created........')\nprint('List of databases after creating new one')\nprint(client.list_database_names())\n<mask token>\nmeta.create_all(engine)\n",
"step-3": "client = MongoClient('localhost', 27017)\ndb = client['mydb']\nprint('Database created........')\nprint('List of databases after creating new one')\nprint(client.list_database_names())\nengine = create_engine('sqlite:///test.db', echo=True)\nmeta = MetaData()\nItems = Table('Items', meta, Column('id', Integer, primary_key=True),\n Column('product_name', String), Column('price', Float), Column(\n 'quantity', Integer))\nUsers = Table('Users', meta, Column('firstname', String), Column('lastname',\n String), Column('email', String), Column('passwd', String), Column(\n 'phone', Integer))\nmeta.create_all(engine)\n",
"step-4": "#app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'\n#app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True;\n#db = SQLAlchemy(app)\n\n\n# MONGODB CREATION\n#Creating a pymongo client\nclient = MongoClient('localhost', 27017)\n\n#Getting the database instance\ndb = client['mydb']\nprint(\"Database created........\")\n\n#Verification\nprint(\"List of databases after creating new one\")\nprint(client.list_database_names())\n\n# DB CREATION AND INSTANTIATION #\n#DB -- OPTION 1\nengine = create_engine('sqlite:///test.db', echo = True)\nmeta = MetaData()\n\n# Database Schema for Item and User #\nItems = Table(\n 'Items', meta, \n Column('id', Integer, primary_key = True), \n Column('product_name', String), \n Column('price', Float), \n Column('quantity', Integer)\n)\nUsers = Table(\n 'Users', meta,\n Column('firstname', String),\n Column('lastname', String),\n Column('email', String),\n Column('passwd', String),\n Column('phone', Integer)\n)\nmeta.create_all(engine)\n\n\n#class Item(db.Model):\n# id = db.Column(db.Integer, primary_key = True)\n# product = db.Column(db.String(200))\n# price = db.Column(db.Integer)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from redis_db import RedisClient
from setting import TEST_URL
import requests
class Test_Proxy():
def __init__(self):
self.db=RedisClient()
def proxy_test(self, proxy):
url = TEST_URL
proxies={
"http":proxy,
"https":proxy
}
# print("{}(测试中)".format(proxy))
try:
r = requests.get(url, proxies=proxies, timeout=5)
if r.status_code ==200:
# print("{}(可用)".format(proxy))
self.db.max(proxy)
except requests.exceptions.ConnectionError:
self.db.decrease(proxy)
# print("{}(减一)".format(proxy))
|
normal
|
{
"blob_id": "2cbdb828ab6e0ad44154f0c5b2a1d807fd0d2520",
"index": 8783,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test_Proxy:\n\n def __init__(self):\n self.db = RedisClient()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Test_Proxy:\n\n def __init__(self):\n self.db = RedisClient()\n\n def proxy_test(self, proxy):\n url = TEST_URL\n proxies = {'http': proxy, 'https': proxy}\n try:\n r = requests.get(url, proxies=proxies, timeout=5)\n if r.status_code == 200:\n self.db.max(proxy)\n except requests.exceptions.ConnectionError:\n self.db.decrease(proxy)\n",
"step-4": "from redis_db import RedisClient\nfrom setting import TEST_URL\nimport requests\n\n\nclass Test_Proxy:\n\n def __init__(self):\n self.db = RedisClient()\n\n def proxy_test(self, proxy):\n url = TEST_URL\n proxies = {'http': proxy, 'https': proxy}\n try:\n r = requests.get(url, proxies=proxies, timeout=5)\n if r.status_code == 200:\n self.db.max(proxy)\n except requests.exceptions.ConnectionError:\n self.db.decrease(proxy)\n",
"step-5": "from redis_db import RedisClient\nfrom setting import TEST_URL\nimport requests\n\nclass Test_Proxy():\n def __init__(self):\n self.db=RedisClient()\n\n def proxy_test(self, proxy):\n url = TEST_URL\n proxies={\n \"http\":proxy,\n \"https\":proxy\n }\n # print(\"{}(测试中)\".format(proxy))\n try:\n r = requests.get(url, proxies=proxies, timeout=5)\n if r.status_code ==200:\n # print(\"{}(可用)\".format(proxy))\n self.db.max(proxy)\n except requests.exceptions.ConnectionError:\n self.db.decrease(proxy)\n # print(\"{}(减一)\".format(proxy))\n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def stats_print():
print('The coffee machine has:')
print(str(water) + ' of water')
print(str(milk) + ' of milk')
print(str(coffee) + ' of coffee beans')
print(str(cups) + ' of disposable cups')
print(str(money) + ' of money')
def coffee_machine():
user_action = input('Write action (buy, fill, take, remaining, exit):')
if user_action == 'buy':
buying()
elif user_action == 'fill':
filling()
elif user_action == 'take':
taking()
elif user_action == 'remaining':
stats_print()
coffee_machine()
elif user_action == 'exit':
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def buying():
global water
global coffee
global cups
global milk
global money
choice_coffee = input(
'What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino, back - to main menu:'
)
if choice_coffee == '1':
if water > 250 and coffee > 16 and cups > 1:
print('I have enough resources, making you a coffee!')
water -= 250
coffee -= 16
cups -= 1
money += 4
coffee_machine()
elif choice_coffee == '2':
if water > 350 and coffee > 16 and cups > 1 and milk > 75:
print('I have enough resources, making you a coffee!')
water -= 350
milk -= 75
coffee -= 20
cups -= 1
money += 7
elif water < 350:
print('Sorry, not enough water!')
coffee_machine()
elif choice_coffee == '3':
if water > 200 and coffee > 12 and cups > 1 and milk > 100:
print('I have enough resources, making you a coffee!')
water -= 200
milk -= 100
coffee -= 12
cups -= 1
money += 6
coffee_machine()
elif choice_coffee == 'back':
coffee_machine()
def filling():
global water
global coffee
global cups
global milk
water_fill = int(input('Write how many ml of water do you want to add:'))
milk_fill = int(input('Write how many ml of milk do you want to add:'))
coffee_fill = int(input(
'Write how many grams of coffee beans do you want to add:'))
cups_fill = int(input(
'Write how many disposable cups of coffee do you want to add:'))
water += water_fill
milk += milk_fill
coffee += coffee_fill
cups += cups_fill
coffee_machine()
def taking():
global money
print('I gave you $' + str(money))
money = 0
coffee_machine()
def stats_print():
print('The coffee machine has:')
print(str(water) + ' of water')
print(str(milk) + ' of milk')
print(str(coffee) + ' of coffee beans')
print(str(cups) + ' of disposable cups')
print(str(money) + ' of money')
def coffee_machine():
user_action = input('Write action (buy, fill, take, remaining, exit):')
if user_action == 'buy':
buying()
elif user_action == 'fill':
filling()
elif user_action == 'take':
taking()
elif user_action == 'remaining':
stats_print()
coffee_machine()
elif user_action == 'exit':
return
coffee_machine()
<|reserved_special_token_1|>
water = 400
milk = 540
coffee = 120
cups = 9
money = 550
def buying():
global water
global coffee
global cups
global milk
global money
choice_coffee = input(
'What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino, back - to main menu:'
)
if choice_coffee == '1':
if water > 250 and coffee > 16 and cups > 1:
print('I have enough resources, making you a coffee!')
water -= 250
coffee -= 16
cups -= 1
money += 4
coffee_machine()
elif choice_coffee == '2':
if water > 350 and coffee > 16 and cups > 1 and milk > 75:
print('I have enough resources, making you a coffee!')
water -= 350
milk -= 75
coffee -= 20
cups -= 1
money += 7
elif water < 350:
print('Sorry, not enough water!')
coffee_machine()
elif choice_coffee == '3':
if water > 200 and coffee > 12 and cups > 1 and milk > 100:
print('I have enough resources, making you a coffee!')
water -= 200
milk -= 100
coffee -= 12
cups -= 1
money += 6
coffee_machine()
elif choice_coffee == 'back':
coffee_machine()
def filling():
global water
global coffee
global cups
global milk
water_fill = int(input('Write how many ml of water do you want to add:'))
milk_fill = int(input('Write how many ml of milk do you want to add:'))
coffee_fill = int(input(
'Write how many grams of coffee beans do you want to add:'))
cups_fill = int(input(
'Write how many disposable cups of coffee do you want to add:'))
water += water_fill
milk += milk_fill
coffee += coffee_fill
cups += cups_fill
coffee_machine()
def taking():
global money
print('I gave you $' + str(money))
money = 0
coffee_machine()
def stats_print():
print('The coffee machine has:')
print(str(water) + ' of water')
print(str(milk) + ' of milk')
print(str(coffee) + ' of coffee beans')
print(str(cups) + ' of disposable cups')
print(str(money) + ' of money')
def coffee_machine():
user_action = input('Write action (buy, fill, take, remaining, exit):')
if user_action == 'buy':
buying()
elif user_action == 'fill':
filling()
elif user_action == 'take':
taking()
elif user_action == 'remaining':
stats_print()
coffee_machine()
elif user_action == 'exit':
return
coffee_machine()
<|reserved_special_token_1|>
water = 400
milk = 540
coffee = 120
cups = 9
money = 550
def buying():
global water
global coffee
global cups
global milk
global money
choice_coffee = input("What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino, back - to main menu:")
if choice_coffee == "1":
if water > 250 and coffee > 16 and cups > 1:
print("I have enough resources, making you a coffee!")
water -= 250
coffee -= 16
cups -= 1
money += 4
coffee_machine()
elif choice_coffee == "2":
if water > 350 and coffee > 16 and cups > 1 and milk > 75:
print("I have enough resources, making you a coffee!")
water -= 350
milk -= 75
coffee -= 20
cups -= 1
money += 7
elif water < 350:
print("Sorry, not enough water!")
coffee_machine()
elif choice_coffee == "3":
if water > 200 and coffee > 12 and cups > 1 and milk > 100:
print("I have enough resources, making you a coffee!")
water -= 200
milk -= 100
coffee -= 12
cups -= 1
money += 6
coffee_machine()
elif choice_coffee == "back":
coffee_machine()
def filling():
global water
global coffee
global cups
global milk
water_fill = int(input("Write how many ml of water do you want to add:"))
milk_fill = int(input("Write how many ml of milk do you want to add:"))
coffee_fill = int(input("Write how many grams of coffee beans do you want to add:"))
cups_fill = int(input("Write how many disposable cups of coffee do you want to add:"))
water += water_fill
milk += milk_fill
coffee += coffee_fill
cups += cups_fill
coffee_machine()
def taking():
global money
print("I gave you $" + str(money))
money = 0
coffee_machine()
def stats_print():
print("The coffee machine has:")
print(str(water) + " of water")
print(str(milk) + " of milk")
print(str(coffee) + " of coffee beans")
print(str(cups) + " of disposable cups")
print(str(money) + " of money")
def coffee_machine():
user_action = input("Write action (buy, fill, take, remaining, exit):")
if user_action == "buy":
buying()
elif user_action == "fill":
filling()
elif user_action == "take":
taking()
elif user_action == "remaining":
stats_print()
coffee_machine()
elif user_action == "exit":
return
coffee_machine()
|
flexible
|
{
"blob_id": "4e98ebd040297cb9472368478452bc484e0aaa04",
"index": 3255,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef stats_print():\n print('The coffee machine has:')\n print(str(water) + ' of water')\n print(str(milk) + ' of milk')\n print(str(coffee) + ' of coffee beans')\n print(str(cups) + ' of disposable cups')\n print(str(money) + ' of money')\n\n\ndef coffee_machine():\n user_action = input('Write action (buy, fill, take, remaining, exit):')\n if user_action == 'buy':\n buying()\n elif user_action == 'fill':\n filling()\n elif user_action == 'take':\n taking()\n elif user_action == 'remaining':\n stats_print()\n coffee_machine()\n elif user_action == 'exit':\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef buying():\n global water\n global coffee\n global cups\n global milk\n global money\n choice_coffee = input(\n 'What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino, back - to main menu:'\n )\n if choice_coffee == '1':\n if water > 250 and coffee > 16 and cups > 1:\n print('I have enough resources, making you a coffee!')\n water -= 250\n coffee -= 16\n cups -= 1\n money += 4\n coffee_machine()\n elif choice_coffee == '2':\n if water > 350 and coffee > 16 and cups > 1 and milk > 75:\n print('I have enough resources, making you a coffee!')\n water -= 350\n milk -= 75\n coffee -= 20\n cups -= 1\n money += 7\n elif water < 350:\n print('Sorry, not enough water!')\n coffee_machine()\n elif choice_coffee == '3':\n if water > 200 and coffee > 12 and cups > 1 and milk > 100:\n print('I have enough resources, making you a coffee!')\n water -= 200\n milk -= 100\n coffee -= 12\n cups -= 1\n money += 6\n coffee_machine()\n elif choice_coffee == 'back':\n coffee_machine()\n\n\ndef filling():\n global water\n global coffee\n global cups\n global milk\n water_fill = int(input('Write how many ml of water do you want to add:'))\n milk_fill = int(input('Write how many ml of milk do you want to add:'))\n coffee_fill = int(input(\n 'Write how many grams of coffee beans do you want to add:'))\n cups_fill = int(input(\n 'Write how many disposable cups of coffee do you want to add:'))\n water += water_fill\n milk += milk_fill\n coffee += coffee_fill\n cups += cups_fill\n coffee_machine()\n\n\ndef taking():\n global money\n print('I gave you $' + str(money))\n money = 0\n coffee_machine()\n\n\ndef stats_print():\n print('The coffee machine has:')\n print(str(water) + ' of water')\n print(str(milk) + ' of milk')\n print(str(coffee) + ' of coffee beans')\n print(str(cups) + ' of disposable cups')\n print(str(money) + ' of money')\n\n\ndef coffee_machine():\n user_action = input('Write action (buy, fill, take, remaining, exit):')\n if user_action == 'buy':\n buying()\n elif user_action == 'fill':\n filling()\n elif user_action == 'take':\n taking()\n elif user_action == 'remaining':\n stats_print()\n coffee_machine()\n elif user_action == 'exit':\n return\n\n\ncoffee_machine()\n",
"step-4": "water = 400\nmilk = 540\ncoffee = 120\ncups = 9\nmoney = 550\n\n\ndef buying():\n global water\n global coffee\n global cups\n global milk\n global money\n choice_coffee = input(\n 'What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino, back - to main menu:'\n )\n if choice_coffee == '1':\n if water > 250 and coffee > 16 and cups > 1:\n print('I have enough resources, making you a coffee!')\n water -= 250\n coffee -= 16\n cups -= 1\n money += 4\n coffee_machine()\n elif choice_coffee == '2':\n if water > 350 and coffee > 16 and cups > 1 and milk > 75:\n print('I have enough resources, making you a coffee!')\n water -= 350\n milk -= 75\n coffee -= 20\n cups -= 1\n money += 7\n elif water < 350:\n print('Sorry, not enough water!')\n coffee_machine()\n elif choice_coffee == '3':\n if water > 200 and coffee > 12 and cups > 1 and milk > 100:\n print('I have enough resources, making you a coffee!')\n water -= 200\n milk -= 100\n coffee -= 12\n cups -= 1\n money += 6\n coffee_machine()\n elif choice_coffee == 'back':\n coffee_machine()\n\n\ndef filling():\n global water\n global coffee\n global cups\n global milk\n water_fill = int(input('Write how many ml of water do you want to add:'))\n milk_fill = int(input('Write how many ml of milk do you want to add:'))\n coffee_fill = int(input(\n 'Write how many grams of coffee beans do you want to add:'))\n cups_fill = int(input(\n 'Write how many disposable cups of coffee do you want to add:'))\n water += water_fill\n milk += milk_fill\n coffee += coffee_fill\n cups += cups_fill\n coffee_machine()\n\n\ndef taking():\n global money\n print('I gave you $' + str(money))\n money = 0\n coffee_machine()\n\n\ndef stats_print():\n print('The coffee machine has:')\n print(str(water) + ' of water')\n print(str(milk) + ' of milk')\n print(str(coffee) + ' of coffee beans')\n print(str(cups) + ' of disposable cups')\n print(str(money) + ' of money')\n\n\ndef coffee_machine():\n user_action = input('Write action (buy, fill, take, remaining, exit):')\n if user_action == 'buy':\n buying()\n elif user_action == 'fill':\n filling()\n elif user_action == 'take':\n taking()\n elif user_action == 'remaining':\n stats_print()\n coffee_machine()\n elif user_action == 'exit':\n return\n\n\ncoffee_machine()\n",
"step-5": "water = 400\r\nmilk = 540\r\ncoffee = 120\r\ncups = 9\r\nmoney = 550\r\n\r\n\r\ndef buying():\r\n global water\r\n global coffee\r\n global cups\r\n global milk\r\n global money\r\n choice_coffee = input(\"What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino, back - to main menu:\")\r\n if choice_coffee == \"1\":\r\n if water > 250 and coffee > 16 and cups > 1:\r\n print(\"I have enough resources, making you a coffee!\")\r\n water -= 250\r\n coffee -= 16\r\n cups -= 1\r\n money += 4\r\n coffee_machine()\r\n elif choice_coffee == \"2\":\r\n if water > 350 and coffee > 16 and cups > 1 and milk > 75:\r\n print(\"I have enough resources, making you a coffee!\")\r\n water -= 350\r\n milk -= 75\r\n coffee -= 20\r\n cups -= 1\r\n money += 7\r\n elif water < 350:\r\n print(\"Sorry, not enough water!\")\r\n coffee_machine()\r\n elif choice_coffee == \"3\":\r\n if water > 200 and coffee > 12 and cups > 1 and milk > 100:\r\n print(\"I have enough resources, making you a coffee!\")\r\n water -= 200\r\n milk -= 100\r\n coffee -= 12\r\n cups -= 1\r\n money += 6\r\n coffee_machine()\r\n elif choice_coffee == \"back\":\r\n coffee_machine()\r\n\r\n\r\ndef filling():\r\n global water\r\n global coffee\r\n global cups\r\n global milk\r\n water_fill = int(input(\"Write how many ml of water do you want to add:\"))\r\n milk_fill = int(input(\"Write how many ml of milk do you want to add:\"))\r\n coffee_fill = int(input(\"Write how many grams of coffee beans do you want to add:\"))\r\n cups_fill = int(input(\"Write how many disposable cups of coffee do you want to add:\"))\r\n water += water_fill\r\n milk += milk_fill\r\n coffee += coffee_fill\r\n cups += cups_fill\r\n coffee_machine()\r\n\r\n\r\ndef taking():\r\n global money\r\n print(\"I gave you $\" + str(money))\r\n money = 0\r\n coffee_machine()\r\n\r\n\r\ndef stats_print():\r\n print(\"The coffee machine has:\")\r\n print(str(water) + \" of water\")\r\n print(str(milk) + \" of milk\")\r\n print(str(coffee) + \" of coffee beans\")\r\n print(str(cups) + \" of disposable cups\")\r\n print(str(money) + \" of money\")\r\n\r\n\r\ndef coffee_machine():\r\n user_action = input(\"Write action (buy, fill, take, remaining, exit):\")\r\n if user_action == \"buy\":\r\n buying()\r\n elif user_action == \"fill\":\r\n filling()\r\n elif user_action == \"take\":\r\n taking()\r\n elif user_action == \"remaining\":\r\n stats_print()\r\n coffee_machine()\r\n elif user_action == \"exit\":\r\n return\r\n\r\n\r\ncoffee_machine()",
"step-ids": [
0,
2,
6,
7,
8
]
}
|
[
0,
2,
6,
7,
8
] |
#Import dependencies
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import string
import operator
from sklearn.feature_extraction.text import CountVectorizer
import pickle
import nltk
from nltk.corpus import stopwords
#nltk.download('stopwords')
from nltk.tokenize import word_tokenize
def text_process(text):
nopunc = [char for char in text if char not in string.punctuation]
nopunc = ''.join(nopunc)
return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
#Looping through the web-scraped reviews to make predictions
def ml_predictor(web_scrapedf):
def text_process(text):
nopunc = [char for char in text if char not in string.punctuation]
nopunc = ''.join(nopunc)
return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
#Loading the model
loaded_model = pickle.load(open("ml_model/model.pickle", 'rb'))
#Loading the vectorizor
loaded_vectorizor = pickle.load(open("ml_model/vectorizer.pickle", 'rb'))
#Creating predictions for each review
for label, row in web_scrapedf.iterrows():
text = row['Reviews']
text_transform = loaded_vectorizor.transform([text])
ml_prediction = loaded_model.predict(text_transform)[0]
web_scrapedf.at[label, 'ml_predictions'] = ml_prediction
#Filtering on columns we need
scrape_results_df = web_scrapedf[['Reviews', 'ml_predictions']]
return scrape_results_df
#Function to create positive words for word cloud
def positive_words(scrape_results_df):
def text_process(text):
nopunc = [char for char in text if char not in string.punctuation]
nopunc = ''.join(nopunc)
return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
#Creating list of positive words
positive_wordcloud = scrape_results_df[scrape_results_df['ml_predictions'] == 'Positive']
positivecv = CountVectorizer(analyzer=text_process)
positive_fit=positivecv.fit_transform(positive_wordcloud['Reviews'])
#creating key value dicitionary pair of counts
positive_word_list = positivecv.get_feature_names();
positive_count_list = positive_fit.toarray().sum(axis=0)
positive_words = dict(zip(positive_word_list, positive_count_list))
positive_sorted = sorted(positive_words.items(), key=operator.itemgetter(1), reverse=True)
positive_sorted = [(p[0], int(p[1])) for p in positive_sorted]
positive_sorted = positive_sorted[:49]
return positive_sorted
#Function to create negative words for word cloud
def negative_words(scrape_results_df):
def text_process(text):
nopunc = [char for char in text if char not in string.punctuation]
nopunc = ''.join(nopunc)
return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
#Creating the list of negative words
negative_wordcloud = scrape_results_df[scrape_results_df['ml_predictions'] == 'Negative']
negativecv = CountVectorizer(analyzer=text_process)
negative_fit=negativecv.fit_transform(negative_wordcloud['Reviews'])
#creating key value dicitionary pair of counts
negative_word_list = negativecv.get_feature_names();
negative_count_list = negative_fit.toarray().sum(axis=0)
negative_words = dict(zip(negative_word_list, negative_count_list))
negative_sorted = sorted(negative_words.items(), key=operator.itemgetter(1), reverse=True)
negative_sorted = [(n[0], int(n[1])) for n in negative_sorted]
negative_sorted = negative_sorted[:49]
return negative_sorted
|
normal
|
{
"blob_id": "82f86284dddf48bf2c65ddf55eb6d7a372306373",
"index": 7182,
"step-1": "<mask token>\n\n\ndef positive_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n positive_wordcloud = scrape_results_df[scrape_results_df[\n 'ml_predictions'] == 'Positive']\n positivecv = CountVectorizer(analyzer=text_process)\n positive_fit = positivecv.fit_transform(positive_wordcloud['Reviews'])\n positive_word_list = positivecv.get_feature_names()\n positive_count_list = positive_fit.toarray().sum(axis=0)\n positive_words = dict(zip(positive_word_list, positive_count_list))\n positive_sorted = sorted(positive_words.items(), key=operator.\n itemgetter(1), reverse=True)\n positive_sorted = [(p[0], int(p[1])) for p in positive_sorted]\n positive_sorted = positive_sorted[:49]\n return positive_sorted\n\n\ndef negative_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n negative_wordcloud = scrape_results_df[scrape_results_df[\n 'ml_predictions'] == 'Negative']\n negativecv = CountVectorizer(analyzer=text_process)\n negative_fit = negativecv.fit_transform(negative_wordcloud['Reviews'])\n negative_word_list = negativecv.get_feature_names()\n negative_count_list = negative_fit.toarray().sum(axis=0)\n negative_words = dict(zip(negative_word_list, negative_count_list))\n negative_sorted = sorted(negative_words.items(), key=operator.\n itemgetter(1), reverse=True)\n negative_sorted = [(n[0], int(n[1])) for n in negative_sorted]\n negative_sorted = negative_sorted[:49]\n return negative_sorted\n",
"step-2": "<mask token>\n\n\ndef text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n\n\n<mask token>\n\n\ndef positive_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n positive_wordcloud = scrape_results_df[scrape_results_df[\n 'ml_predictions'] == 'Positive']\n positivecv = CountVectorizer(analyzer=text_process)\n positive_fit = positivecv.fit_transform(positive_wordcloud['Reviews'])\n positive_word_list = positivecv.get_feature_names()\n positive_count_list = positive_fit.toarray().sum(axis=0)\n positive_words = dict(zip(positive_word_list, positive_count_list))\n positive_sorted = sorted(positive_words.items(), key=operator.\n itemgetter(1), reverse=True)\n positive_sorted = [(p[0], int(p[1])) for p in positive_sorted]\n positive_sorted = positive_sorted[:49]\n return positive_sorted\n\n\ndef negative_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n negative_wordcloud = scrape_results_df[scrape_results_df[\n 'ml_predictions'] == 'Negative']\n negativecv = CountVectorizer(analyzer=text_process)\n negative_fit = negativecv.fit_transform(negative_wordcloud['Reviews'])\n negative_word_list = negativecv.get_feature_names()\n negative_count_list = negative_fit.toarray().sum(axis=0)\n negative_words = dict(zip(negative_word_list, negative_count_list))\n negative_sorted = sorted(negative_words.items(), key=operator.\n itemgetter(1), reverse=True)\n negative_sorted = [(n[0], int(n[1])) for n in negative_sorted]\n negative_sorted = negative_sorted[:49]\n return negative_sorted\n",
"step-3": "<mask token>\n\n\ndef text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n\n\ndef ml_predictor(web_scrapedf):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n loaded_model = pickle.load(open('ml_model/model.pickle', 'rb'))\n loaded_vectorizor = pickle.load(open('ml_model/vectorizer.pickle', 'rb'))\n for label, row in web_scrapedf.iterrows():\n text = row['Reviews']\n text_transform = loaded_vectorizor.transform([text])\n ml_prediction = loaded_model.predict(text_transform)[0]\n web_scrapedf.at[label, 'ml_predictions'] = ml_prediction\n scrape_results_df = web_scrapedf[['Reviews', 'ml_predictions']]\n return scrape_results_df\n\n\ndef positive_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n positive_wordcloud = scrape_results_df[scrape_results_df[\n 'ml_predictions'] == 'Positive']\n positivecv = CountVectorizer(analyzer=text_process)\n positive_fit = positivecv.fit_transform(positive_wordcloud['Reviews'])\n positive_word_list = positivecv.get_feature_names()\n positive_count_list = positive_fit.toarray().sum(axis=0)\n positive_words = dict(zip(positive_word_list, positive_count_list))\n positive_sorted = sorted(positive_words.items(), key=operator.\n itemgetter(1), reverse=True)\n positive_sorted = [(p[0], int(p[1])) for p in positive_sorted]\n positive_sorted = positive_sorted[:49]\n return positive_sorted\n\n\ndef negative_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n negative_wordcloud = scrape_results_df[scrape_results_df[\n 'ml_predictions'] == 'Negative']\n negativecv = CountVectorizer(analyzer=text_process)\n negative_fit = negativecv.fit_transform(negative_wordcloud['Reviews'])\n negative_word_list = negativecv.get_feature_names()\n negative_count_list = negative_fit.toarray().sum(axis=0)\n negative_words = dict(zip(negative_word_list, negative_count_list))\n negative_sorted = sorted(negative_words.items(), key=operator.\n itemgetter(1), reverse=True)\n negative_sorted = [(n[0], int(n[1])) for n in negative_sorted]\n negative_sorted = negative_sorted[:49]\n return negative_sorted\n",
"step-4": "import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport string\nimport operator\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport pickle\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\n\n\ndef text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n\n\ndef ml_predictor(web_scrapedf):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n loaded_model = pickle.load(open('ml_model/model.pickle', 'rb'))\n loaded_vectorizor = pickle.load(open('ml_model/vectorizer.pickle', 'rb'))\n for label, row in web_scrapedf.iterrows():\n text = row['Reviews']\n text_transform = loaded_vectorizor.transform([text])\n ml_prediction = loaded_model.predict(text_transform)[0]\n web_scrapedf.at[label, 'ml_predictions'] = ml_prediction\n scrape_results_df = web_scrapedf[['Reviews', 'ml_predictions']]\n return scrape_results_df\n\n\ndef positive_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n positive_wordcloud = scrape_results_df[scrape_results_df[\n 'ml_predictions'] == 'Positive']\n positivecv = CountVectorizer(analyzer=text_process)\n positive_fit = positivecv.fit_transform(positive_wordcloud['Reviews'])\n positive_word_list = positivecv.get_feature_names()\n positive_count_list = positive_fit.toarray().sum(axis=0)\n positive_words = dict(zip(positive_word_list, positive_count_list))\n positive_sorted = sorted(positive_words.items(), key=operator.\n itemgetter(1), reverse=True)\n positive_sorted = [(p[0], int(p[1])) for p in positive_sorted]\n positive_sorted = positive_sorted[:49]\n return positive_sorted\n\n\ndef negative_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n negative_wordcloud = scrape_results_df[scrape_results_df[\n 'ml_predictions'] == 'Negative']\n negativecv = CountVectorizer(analyzer=text_process)\n negative_fit = negativecv.fit_transform(negative_wordcloud['Reviews'])\n negative_word_list = negativecv.get_feature_names()\n negative_count_list = negative_fit.toarray().sum(axis=0)\n negative_words = dict(zip(negative_word_list, negative_count_list))\n negative_sorted = sorted(negative_words.items(), key=operator.\n itemgetter(1), reverse=True)\n negative_sorted = [(n[0], int(n[1])) for n in negative_sorted]\n negative_sorted = negative_sorted[:49]\n return negative_sorted\n",
"step-5": "#Import dependencies\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport string\nimport operator\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport pickle\nimport nltk\nfrom nltk.corpus import stopwords\n#nltk.download('stopwords')\nfrom nltk.tokenize import word_tokenize\n\n\ndef text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]\n\n\n#Looping through the web-scraped reviews to make predictions\ndef ml_predictor(web_scrapedf):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]\n\n #Loading the model\n loaded_model = pickle.load(open(\"ml_model/model.pickle\", 'rb'))\n \n #Loading the vectorizor\n loaded_vectorizor = pickle.load(open(\"ml_model/vectorizer.pickle\", 'rb'))\n \n\n #Creating predictions for each review\n for label, row in web_scrapedf.iterrows():\n text = row['Reviews']\n \n text_transform = loaded_vectorizor.transform([text])\n \n ml_prediction = loaded_model.predict(text_transform)[0]\n web_scrapedf.at[label, 'ml_predictions'] = ml_prediction\n\n #Filtering on columns we need \n scrape_results_df = web_scrapedf[['Reviews', 'ml_predictions']]\n\n return scrape_results_df\n\n#Function to create positive words for word cloud\ndef positive_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]\n\n #Creating list of positive words\n positive_wordcloud = scrape_results_df[scrape_results_df['ml_predictions'] == 'Positive']\n\n positivecv = CountVectorizer(analyzer=text_process) \n positive_fit=positivecv.fit_transform(positive_wordcloud['Reviews'])\n\n #creating key value dicitionary pair of counts\n positive_word_list = positivecv.get_feature_names(); \n positive_count_list = positive_fit.toarray().sum(axis=0) \n\n\n positive_words = dict(zip(positive_word_list, positive_count_list))\n positive_sorted = sorted(positive_words.items(), key=operator.itemgetter(1), reverse=True)\n positive_sorted = [(p[0], int(p[1])) for p in positive_sorted]\n positive_sorted = positive_sorted[:49]\n\n return positive_sorted\n\n#Function to create negative words for word cloud\ndef negative_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]\n \n #Creating the list of negative words\n negative_wordcloud = scrape_results_df[scrape_results_df['ml_predictions'] == 'Negative']\n\n negativecv = CountVectorizer(analyzer=text_process) \n negative_fit=negativecv.fit_transform(negative_wordcloud['Reviews'])\n\n #creating key value dicitionary pair of counts\n negative_word_list = negativecv.get_feature_names(); \n negative_count_list = negative_fit.toarray().sum(axis=0) \n\n\n negative_words = dict(zip(negative_word_list, negative_count_list))\n negative_sorted = sorted(negative_words.items(), key=operator.itemgetter(1), reverse=True)\n negative_sorted = [(n[0], int(n[1])) for n in negative_sorted]\n negative_sorted = negative_sorted[:49]\n\n return negative_sorted\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
import requests
import os
def line(body):
url = "https://notify-api.line.me/api/notify"
access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'
headers = {'Authorization': 'Bearer ' + access_token}
message = body
payload = {'message': message}
r = requests.post(url, headers=headers, params=payload)
def send_image():
url = "https://notify-api.line.me/api/notify"
access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'
# File Name
FILENAME = os.path.join(os.path.dirname(os.path.abspath(__file__)), "screen.png")
headers = {'Authorization': 'Bearer ' + access_token}
message = 'この画面のエラーで落ちました'
image = FILENAME
payload = {'message': message}
files = {'imageFile': open(image, 'rb')}
r = requests.post(url, headers=headers, params=payload, files=files,)
|
normal
|
{
"blob_id": "8b598703df67fb8287fe6cdccda5b73bf2892da8",
"index": 4878,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef line(body):\n url = 'https://notify-api.line.me/api/notify'\n access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'\n headers = {'Authorization': 'Bearer ' + access_token}\n message = body\n payload = {'message': message}\n r = requests.post(url, headers=headers, params=payload)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef line(body):\n url = 'https://notify-api.line.me/api/notify'\n access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'\n headers = {'Authorization': 'Bearer ' + access_token}\n message = body\n payload = {'message': message}\n r = requests.post(url, headers=headers, params=payload)\n\n\ndef send_image():\n url = 'https://notify-api.line.me/api/notify'\n access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'\n FILENAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'screen.png')\n headers = {'Authorization': 'Bearer ' + access_token}\n message = 'この画面のエラーで落ちました'\n image = FILENAME\n payload = {'message': message}\n files = {'imageFile': open(image, 'rb')}\n r = requests.post(url, headers=headers, params=payload, files=files)\n",
"step-4": "import requests\nimport os\n\n\ndef line(body):\n url = 'https://notify-api.line.me/api/notify'\n access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'\n headers = {'Authorization': 'Bearer ' + access_token}\n message = body\n payload = {'message': message}\n r = requests.post(url, headers=headers, params=payload)\n\n\ndef send_image():\n url = 'https://notify-api.line.me/api/notify'\n access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'\n FILENAME = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'screen.png')\n headers = {'Authorization': 'Bearer ' + access_token}\n message = 'この画面のエラーで落ちました'\n image = FILENAME\n payload = {'message': message}\n files = {'imageFile': open(image, 'rb')}\n r = requests.post(url, headers=headers, params=payload, files=files)\n",
"step-5": "# -*- coding: utf-8 -*-\nimport requests\nimport os\n\n\ndef line(body):\n url = \"https://notify-api.line.me/api/notify\"\n access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'\n headers = {'Authorization': 'Bearer ' + access_token}\n message = body\n payload = {'message': message}\n r = requests.post(url, headers=headers, params=payload)\n\n\ndef send_image():\n url = \"https://notify-api.line.me/api/notify\"\n access_token = 'I89UnoDRgRSInUXJOTg5fAniBE08CUuxVqj8ythMLt8'\n # File Name\n FILENAME = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"screen.png\")\n headers = {'Authorization': 'Bearer ' + access_token}\n message = 'この画面のエラーで落ちました'\n image = FILENAME\n payload = {'message': message}\n files = {'imageFile': open(image, 'rb')}\n r = requests.post(url, headers=headers, params=payload, files=files,)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Game:
def __init__(self, Grille, PlayerX, PlayerY, Score=0):
self.PlayerX = PlayerX
self.PlayerY = PlayerY
self.Score = Score
self.Grille = Grille
def copy(self):
return copy.deepcopy(self)
<|reserved_special_token_0|>
def Affiche(Game):
canvas.delete('all')
H = canvas.winfo_height()
def DrawCase(x, y, coul):
x *= L
y *= L
canvas.create_rectangle(x, H - y, x + L, H - y - L, fill=coul)
for x in range(LARGEUR):
for y in range(HAUTEUR):
if Game.Grille[x, y] == 1:
DrawCase(x, y, 'gray')
if Game.Grille[x, y] == 2:
DrawCase(x, y, 'cyan')
DrawCase(Game.PlayerX, Game.PlayerY, 'red')
def AfficheScore(Game):
info = 'SCORE : ' + str(Game.Score)
canvas.create_text(80, 13, font='Helvetica 12 bold', fill='yellow',
text=info)
<|reserved_special_token_0|>
def GetAllExectuableMove(Game):
possibleMove = [(0, +1), (0, -1), (+1, 0), (-1, 0)]
executableMove = []
for tup in possibleMove:
x, y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]
v = Game.Grille[x, y]
if v == 0:
executableMove.append((x, y))
return executableMove
<|reserved_special_token_0|>
def MonteCarlo(Game):
return Simulate(Game)
def MovePlayerWithIA(Game):
executableMove = GetAllExectuableMove(Game)
result = None, None
maxi = 0
if len(executableMove) == 0:
return None, None
for x, y in executableMove:
Game.PlayerX = x
Game.PlayerY = y
total = MonteCarlo(Game)
if total > maxi:
result = x, y
maxi = total
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Game:
def __init__(self, Grille, PlayerX, PlayerY, Score=0):
self.PlayerX = PlayerX
self.PlayerY = PlayerY
self.Score = Score
self.Grille = Grille
def copy(self):
return copy.deepcopy(self)
<|reserved_special_token_0|>
def CreerUnePage(id):
Frame = tk.Frame(F)
ListePages[id] = Frame
Frame.grid(row=0, column=0, sticky='nsew')
return Frame
def AfficherPage(id):
global PageActive
PageActive = id
ListePages[id].tkraise()
<|reserved_special_token_0|>
def Affiche(Game):
canvas.delete('all')
H = canvas.winfo_height()
def DrawCase(x, y, coul):
x *= L
y *= L
canvas.create_rectangle(x, H - y, x + L, H - y - L, fill=coul)
for x in range(LARGEUR):
for y in range(HAUTEUR):
if Game.Grille[x, y] == 1:
DrawCase(x, y, 'gray')
if Game.Grille[x, y] == 2:
DrawCase(x, y, 'cyan')
DrawCase(Game.PlayerX, Game.PlayerY, 'red')
def AfficheScore(Game):
info = 'SCORE : ' + str(Game.Score)
canvas.create_text(80, 13, font='Helvetica 12 bold', fill='yellow',
text=info)
<|reserved_special_token_0|>
def GetAllExectuableMove(Game):
possibleMove = [(0, +1), (0, -1), (+1, 0), (-1, 0)]
executableMove = []
for tup in possibleMove:
x, y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]
v = Game.Grille[x, y]
if v == 0:
executableMove.append((x, y))
return executableMove
def Simulate(Game):
nb = NbSimulation
G = np.tile(Game.Grille, (nb, 1, 1))
X = np.tile(Game.PlayerX, nb)
Y = np.tile(Game.PlayerY, nb)
S = np.tile(Game.Score, nb)
I = np.arange(nb)
continuer = True
while continuer:
G[I, X, Y] = 2
LPossibles = np.zeros((nb, 4), dtype=np.int8)
for i in range(4):
LPossibles[I, i] = np.where(G[I, X + dx[i + 1], Y + dy[i + 1]] ==
0, i + 1, 0)
LPossibles.sort(axis=1)
LPossibles = np.fliplr(LPossibles)
Indices = np.count_nonzero(LPossibles, axis=1)
Indices[Indices == 0] = 1
R = np.random.randint(12, size=nb, dtype=np.int8)
Position = LPossibles[I, R % Indices[I]]
if nb == np.count_nonzero(Position == 0):
continuer = False
S[I] += ds[Position]
X += dx[Position]
Y += dy[Position]
return np.mean(S)
def MonteCarlo(Game):
return Simulate(Game)
def MovePlayerWithIA(Game):
executableMove = GetAllExectuableMove(Game)
result = None, None
maxi = 0
if len(executableMove) == 0:
return None, None
for x, y in executableMove:
Game.PlayerX = x
Game.PlayerY = y
total = MonteCarlo(Game)
if total > maxi:
result = x, y
maxi = total
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Game:
def __init__(self, Grille, PlayerX, PlayerY, Score=0):
self.PlayerX = PlayerX
self.PlayerY = PlayerY
self.Score = Score
self.Grille = Grille
def copy(self):
return copy.deepcopy(self)
<|reserved_special_token_0|>
Window.geometry(str(largeurPix) + 'x' + str(hauteurPix))
Window.title('TRON')
<|reserved_special_token_0|>
F.pack(side='top', fill='both', expand=True)
F.grid_rowconfigure(0, weight=1)
F.grid_columnconfigure(0, weight=1)
<|reserved_special_token_0|>
def CreerUnePage(id):
Frame = tk.Frame(F)
ListePages[id] = Frame
Frame.grid(row=0, column=0, sticky='nsew')
return Frame
def AfficherPage(id):
global PageActive
PageActive = id
ListePages[id].tkraise()
<|reserved_special_token_0|>
canvas.place(x=0, y=0)
def Affiche(Game):
canvas.delete('all')
H = canvas.winfo_height()
def DrawCase(x, y, coul):
x *= L
y *= L
canvas.create_rectangle(x, H - y, x + L, H - y - L, fill=coul)
for x in range(LARGEUR):
for y in range(HAUTEUR):
if Game.Grille[x, y] == 1:
DrawCase(x, y, 'gray')
if Game.Grille[x, y] == 2:
DrawCase(x, y, 'cyan')
DrawCase(Game.PlayerX, Game.PlayerY, 'red')
def AfficheScore(Game):
info = 'SCORE : ' + str(Game.Score)
canvas.create_text(80, 13, font='Helvetica 12 bold', fill='yellow',
text=info)
<|reserved_special_token_0|>
def GetAllExectuableMove(Game):
possibleMove = [(0, +1), (0, -1), (+1, 0), (-1, 0)]
executableMove = []
for tup in possibleMove:
x, y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]
v = Game.Grille[x, y]
if v == 0:
executableMove.append((x, y))
return executableMove
def Simulate(Game):
nb = NbSimulation
G = np.tile(Game.Grille, (nb, 1, 1))
X = np.tile(Game.PlayerX, nb)
Y = np.tile(Game.PlayerY, nb)
S = np.tile(Game.Score, nb)
I = np.arange(nb)
continuer = True
while continuer:
G[I, X, Y] = 2
LPossibles = np.zeros((nb, 4), dtype=np.int8)
for i in range(4):
LPossibles[I, i] = np.where(G[I, X + dx[i + 1], Y + dy[i + 1]] ==
0, i + 1, 0)
LPossibles.sort(axis=1)
LPossibles = np.fliplr(LPossibles)
Indices = np.count_nonzero(LPossibles, axis=1)
Indices[Indices == 0] = 1
R = np.random.randint(12, size=nb, dtype=np.int8)
Position = LPossibles[I, R % Indices[I]]
if nb == np.count_nonzero(Position == 0):
continuer = False
S[I] += ds[Position]
X += dx[Position]
Y += dy[Position]
return np.mean(S)
def MonteCarlo(Game):
return Simulate(Game)
def MovePlayerWithIA(Game):
executableMove = GetAllExectuableMove(Game)
result = None, None
maxi = 0
if len(executableMove) == 0:
return None, None
for x, y in executableMove:
Game.PlayerX = x
Game.PlayerY = y
total = MonteCarlo(Game)
if total > maxi:
result = x, y
maxi = total
return result
def Play(Game):
x, y = Game.PlayerX, Game.PlayerY
Game.Grille[x, y] = 2
x, y = MovePlayerWithIA(Game)
if x == None or y == None:
return True
else:
Game.PlayerX = x
Game.PlayerY = y
Game.Score += 1
return False
<|reserved_special_token_0|>
def Partie():
Tstart = time.time()
PartieTermine = Play(CurrentGame)
print(time.time() - Tstart)
if not PartieTermine:
Affiche(CurrentGame)
Window.after(1000, Partie)
else:
AfficheScore(CurrentGame)
AfficherPage(0)
Window.after(100, Partie)
Window.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
NbSimulation = 20000
Data = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
GInit = np.array(Data, dtype=np.int8)
GInit = np.flip(GInit, 0).transpose()
LARGEUR = 13
HAUTEUR = 17
class Game:
def __init__(self, Grille, PlayerX, PlayerY, Score=0):
self.PlayerX = PlayerX
self.PlayerY = PlayerY
self.Score = Score
self.Grille = Grille
def copy(self):
return copy.deepcopy(self)
GameInit = Game(GInit, 3, 5)
L = 20
largeurPix = LARGEUR * L
hauteurPix = HAUTEUR * L
Window = tk.Tk()
Window.geometry(str(largeurPix) + 'x' + str(hauteurPix))
Window.title('TRON')
F = tk.Frame(Window)
F.pack(side='top', fill='both', expand=True)
F.grid_rowconfigure(0, weight=1)
F.grid_columnconfigure(0, weight=1)
ListePages = {}
PageActive = 0
def CreerUnePage(id):
Frame = tk.Frame(F)
ListePages[id] = Frame
Frame.grid(row=0, column=0, sticky='nsew')
return Frame
def AfficherPage(id):
global PageActive
PageActive = id
ListePages[id].tkraise()
Frame0 = CreerUnePage(0)
canvas = tk.Canvas(Frame0, width=largeurPix, height=hauteurPix, bg='black')
canvas.place(x=0, y=0)
def Affiche(Game):
canvas.delete('all')
H = canvas.winfo_height()
def DrawCase(x, y, coul):
x *= L
y *= L
canvas.create_rectangle(x, H - y, x + L, H - y - L, fill=coul)
for x in range(LARGEUR):
for y in range(HAUTEUR):
if Game.Grille[x, y] == 1:
DrawCase(x, y, 'gray')
if Game.Grille[x, y] == 2:
DrawCase(x, y, 'cyan')
DrawCase(Game.PlayerX, Game.PlayerY, 'red')
def AfficheScore(Game):
info = 'SCORE : ' + str(Game.Score)
canvas.create_text(80, 13, font='Helvetica 12 bold', fill='yellow',
text=info)
dx = np.array([0, -1, 0, 1, 0], dtype=np.int8)
dy = np.array([0, 0, 1, 0, -1], dtype=np.int8)
ds = np.array([0, 1, 1, 1, 1], dtype=np.int8)
def GetAllExectuableMove(Game):
possibleMove = [(0, +1), (0, -1), (+1, 0), (-1, 0)]
executableMove = []
for tup in possibleMove:
x, y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]
v = Game.Grille[x, y]
if v == 0:
executableMove.append((x, y))
return executableMove
def Simulate(Game):
nb = NbSimulation
G = np.tile(Game.Grille, (nb, 1, 1))
X = np.tile(Game.PlayerX, nb)
Y = np.tile(Game.PlayerY, nb)
S = np.tile(Game.Score, nb)
I = np.arange(nb)
continuer = True
while continuer:
G[I, X, Y] = 2
LPossibles = np.zeros((nb, 4), dtype=np.int8)
for i in range(4):
LPossibles[I, i] = np.where(G[I, X + dx[i + 1], Y + dy[i + 1]] ==
0, i + 1, 0)
LPossibles.sort(axis=1)
LPossibles = np.fliplr(LPossibles)
Indices = np.count_nonzero(LPossibles, axis=1)
Indices[Indices == 0] = 1
R = np.random.randint(12, size=nb, dtype=np.int8)
Position = LPossibles[I, R % Indices[I]]
if nb == np.count_nonzero(Position == 0):
continuer = False
S[I] += ds[Position]
X += dx[Position]
Y += dy[Position]
return np.mean(S)
def MonteCarlo(Game):
return Simulate(Game)
def MovePlayerWithIA(Game):
executableMove = GetAllExectuableMove(Game)
result = None, None
maxi = 0
if len(executableMove) == 0:
return None, None
for x, y in executableMove:
Game.PlayerX = x
Game.PlayerY = y
total = MonteCarlo(Game)
if total > maxi:
result = x, y
maxi = total
return result
def Play(Game):
x, y = Game.PlayerX, Game.PlayerY
Game.Grille[x, y] = 2
x, y = MovePlayerWithIA(Game)
if x == None or y == None:
return True
else:
Game.PlayerX = x
Game.PlayerY = y
Game.Score += 1
return False
CurrentGame = GameInit.copy()
def Partie():
Tstart = time.time()
PartieTermine = Play(CurrentGame)
print(time.time() - Tstart)
if not PartieTermine:
Affiche(CurrentGame)
Window.after(1000, Partie)
else:
AfficheScore(CurrentGame)
AfficherPage(0)
Window.after(100, Partie)
Window.mainloop()
<|reserved_special_token_1|>
import tkinter as tk
import random
import numpy as np
import copy
import time
#################################################################################
#
# Données de partie
NbSimulation = 20000
Data = [ [1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,0,0,0,0,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1] ]
GInit = np.array(Data,dtype=np.int8)
GInit = np.flip(GInit,0).transpose()
LARGEUR = 13
HAUTEUR = 17
# container pour passer efficacement toutes les données de la partie
class Game:
def __init__(self, Grille, PlayerX, PlayerY, Score=0):
self.PlayerX = PlayerX
self.PlayerY = PlayerY
self.Score = Score
self.Grille = Grille
def copy(self):
return copy.deepcopy(self)
GameInit = Game(GInit,3,5)
##############################################################
#
# création de la fenetre principale - NE PAS TOUCHER
L = 20 # largeur d'une case du jeu en pixel
largeurPix = LARGEUR * L
hauteurPix = HAUTEUR * L
Window = tk.Tk()
Window.geometry(str(largeurPix)+"x"+str(hauteurPix)) # taille de la fenetre
Window.title("TRON")
# création de la frame principale stockant toutes les pages
F = tk.Frame(Window)
F.pack(side="top", fill="both", expand=True)
F.grid_rowconfigure(0, weight=1)
F.grid_columnconfigure(0, weight=1)
# gestion des différentes pages
ListePages = {}
PageActive = 0
def CreerUnePage(id):
Frame = tk.Frame(F)
ListePages[id] = Frame
Frame.grid(row=0, column=0, sticky="nsew")
return Frame
def AfficherPage(id):
global PageActive
PageActive = id
ListePages[id].tkraise()
Frame0 = CreerUnePage(0)
canvas = tk.Canvas(Frame0,width = largeurPix, height = hauteurPix, bg ="black" )
canvas.place(x=0,y=0)
# Dessine la grille de jeu - ne pas toucher
def Affiche(Game):
canvas.delete("all")
H = canvas.winfo_height()
def DrawCase(x,y,coul):
x *= L
y *= L
canvas.create_rectangle(x,H-y,x+L,H-y-L,fill=coul)
# dessin des murs
for x in range (LARGEUR):
for y in range (HAUTEUR):
if Game.Grille[x,y] == 1 : DrawCase(x,y,"gray" )
if Game.Grille[x,y] == 2 : DrawCase(x,y,"cyan" )
# dessin de la moto
DrawCase(Game.PlayerX,Game.PlayerY,"red" )
def AfficheScore(Game):
info = "SCORE : " + str(Game.Score)
canvas.create_text(80, 13, font='Helvetica 12 bold', fill="yellow", text=info)
###########################################################
#
# gestion du joueur IA
# VOTRE CODE ICI
dx = np.array([0, -1, 0, 1, 0],dtype=np.int8)
dy = np.array([0, 0, 1, 0, -1],dtype=np.int8)
# scores associés à chaque déplacement
ds = np.array([0, 1, 1, 1, 1],dtype=np.int8)
def GetAllExectuableMove(Game):
possibleMove = [(0,+1),(0,-1),(+1,0),(-1,0)]
executableMove = []
for tup in possibleMove :
x,y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]
v = Game.Grille[x,y]
if v == 0 :
executableMove.append((x,y))
return executableMove
def Simulate(Game):
nb = NbSimulation
# on copie les datas de départ pour créer plusieurs parties
G = np.tile(Game.Grille,(nb,1,1)) # grille (x,y) pour chaque partie
X = np.tile(Game.PlayerX,nb) # playerX (x) pour chaque partie
Y = np.tile(Game.PlayerY,nb) # playerY (y) pour chaque partie
S = np.tile(Game.Score,nb) # score (s) pour chaque partie
I = np.arange(nb) # 0,1,2,3,...,nb-1
# VOTRE CODE ICI
continuer = True
while(continuer) :
# pour chaque partie, on fait une affectation à 2 le passage de la moto
G[I, X, Y] = 2
### pour chaque partie, on gère tous les index de déplacements possibles
# pour chaque partie, on associe une liste de taille 4 initialisée à 0
LPossibles = np.zeros((nb, 4),dtype=np.int8)
# pour chaque partie, on associe la liste de taille 4 à i si le joueur peut bouger dans cette direction, 0 sinon
for i in range(4):
LPossibles[I,i] = np.where(G[I, X+dx[i+1], Y+dy[i+1]] == 0,i+1,0)
# pour chaque partie, on trie la liste des directions de manière décroissante
LPossibles.sort(axis=1)
LPossibles = np.fliplr(LPossibles)
### pour chaque partie, on compte le nombre de déplacements possibles
# pour chaque partie, on compte le nombre d'éléments de LPossibles non nuls
Indices = np.count_nonzero(LPossibles, axis=1)
# pour chaque partie, on remplace les index de 0 par 1 pour pas planter sur le modulo
Indices[Indices == 0] = 1
# pour chaque partie, on génère un index de direction aléatoire
R = np.random.randint(12,size=nb,dtype=np.int8)
# pour chaque partie, on réucupère un vecteur position
Position = LPossibles[I, R % Indices[I]]
### on gère les déplacement et le code
# on arrete le traitement si, on est statique sur l'ensemble des parties
if(nb == np.count_nonzero(Position == 0)): continuer = False
# pour chaque partie, on incrémente le score
S[I] += ds[Position]
# pour chaque partie, on déplace le joueur
X += dx[Position]
Y += dy[Position]
# on retourne la moyenne des scores
return np.mean(S)
def MonteCarlo(Game):
return Simulate(Game)
def MovePlayerWithIA(Game):
executableMove = GetAllExectuableMove(Game)
result = (None, None)
maxi = 0
if(len(executableMove)==0):
return None, None
for x,y in executableMove:
Game.PlayerX = x
Game.PlayerY = y
total = MonteCarlo(Game)
if(total>maxi):
result = (x,y)
maxi = total
return result
def Play(Game):
x,y = Game.PlayerX, Game.PlayerY
Game.Grille[x,y] = 2 # laisse la trace de la moto
x,y = MovePlayerWithIA(Game)
if x == None or y == None :
# collision détectée
return True # partie terminée
else :
Game.PlayerX = x # valide le déplacement
Game.PlayerY = y # valide le déplacement
Game.Score += 1
return False # la partie continue
################################################################################
CurrentGame = GameInit.copy()
def Partie():
Tstart = time.time()
PartieTermine = Play(CurrentGame)
print(time.time() - Tstart)
if not PartieTermine :
Affiche(CurrentGame)
# rappelle la fonction Partie() dans 30ms
# entre temps laisse l'OS réafficher l'interface
Window.after(1000,Partie)
else :
AfficheScore(CurrentGame)
#####################################################################################
#
# Mise en place de l'interface - ne pas toucher
AfficherPage(0)
Window.after(100,Partie)
Window.mainloop()
|
flexible
|
{
"blob_id": "86177dfa9b8bed5916703edcc16ea4d01cbabf84",
"index": 3278,
"step-1": "<mask token>\n\n\nclass Game:\n\n def __init__(self, Grille, PlayerX, PlayerY, Score=0):\n self.PlayerX = PlayerX\n self.PlayerY = PlayerY\n self.Score = Score\n self.Grille = Grille\n\n def copy(self):\n return copy.deepcopy(self)\n\n\n<mask token>\n\n\ndef Affiche(Game):\n canvas.delete('all')\n H = canvas.winfo_height()\n\n def DrawCase(x, y, coul):\n x *= L\n y *= L\n canvas.create_rectangle(x, H - y, x + L, H - y - L, fill=coul)\n for x in range(LARGEUR):\n for y in range(HAUTEUR):\n if Game.Grille[x, y] == 1:\n DrawCase(x, y, 'gray')\n if Game.Grille[x, y] == 2:\n DrawCase(x, y, 'cyan')\n DrawCase(Game.PlayerX, Game.PlayerY, 'red')\n\n\ndef AfficheScore(Game):\n info = 'SCORE : ' + str(Game.Score)\n canvas.create_text(80, 13, font='Helvetica 12 bold', fill='yellow',\n text=info)\n\n\n<mask token>\n\n\ndef GetAllExectuableMove(Game):\n possibleMove = [(0, +1), (0, -1), (+1, 0), (-1, 0)]\n executableMove = []\n for tup in possibleMove:\n x, y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]\n v = Game.Grille[x, y]\n if v == 0:\n executableMove.append((x, y))\n return executableMove\n\n\n<mask token>\n\n\ndef MonteCarlo(Game):\n return Simulate(Game)\n\n\ndef MovePlayerWithIA(Game):\n executableMove = GetAllExectuableMove(Game)\n result = None, None\n maxi = 0\n if len(executableMove) == 0:\n return None, None\n for x, y in executableMove:\n Game.PlayerX = x\n Game.PlayerY = y\n total = MonteCarlo(Game)\n if total > maxi:\n result = x, y\n maxi = total\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Game:\n\n def __init__(self, Grille, PlayerX, PlayerY, Score=0):\n self.PlayerX = PlayerX\n self.PlayerY = PlayerY\n self.Score = Score\n self.Grille = Grille\n\n def copy(self):\n return copy.deepcopy(self)\n\n\n<mask token>\n\n\ndef CreerUnePage(id):\n Frame = tk.Frame(F)\n ListePages[id] = Frame\n Frame.grid(row=0, column=0, sticky='nsew')\n return Frame\n\n\ndef AfficherPage(id):\n global PageActive\n PageActive = id\n ListePages[id].tkraise()\n\n\n<mask token>\n\n\ndef Affiche(Game):\n canvas.delete('all')\n H = canvas.winfo_height()\n\n def DrawCase(x, y, coul):\n x *= L\n y *= L\n canvas.create_rectangle(x, H - y, x + L, H - y - L, fill=coul)\n for x in range(LARGEUR):\n for y in range(HAUTEUR):\n if Game.Grille[x, y] == 1:\n DrawCase(x, y, 'gray')\n if Game.Grille[x, y] == 2:\n DrawCase(x, y, 'cyan')\n DrawCase(Game.PlayerX, Game.PlayerY, 'red')\n\n\ndef AfficheScore(Game):\n info = 'SCORE : ' + str(Game.Score)\n canvas.create_text(80, 13, font='Helvetica 12 bold', fill='yellow',\n text=info)\n\n\n<mask token>\n\n\ndef GetAllExectuableMove(Game):\n possibleMove = [(0, +1), (0, -1), (+1, 0), (-1, 0)]\n executableMove = []\n for tup in possibleMove:\n x, y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]\n v = Game.Grille[x, y]\n if v == 0:\n executableMove.append((x, y))\n return executableMove\n\n\ndef Simulate(Game):\n nb = NbSimulation\n G = np.tile(Game.Grille, (nb, 1, 1))\n X = np.tile(Game.PlayerX, nb)\n Y = np.tile(Game.PlayerY, nb)\n S = np.tile(Game.Score, nb)\n I = np.arange(nb)\n continuer = True\n while continuer:\n G[I, X, Y] = 2\n LPossibles = np.zeros((nb, 4), dtype=np.int8)\n for i in range(4):\n LPossibles[I, i] = np.where(G[I, X + dx[i + 1], Y + dy[i + 1]] ==\n 0, i + 1, 0)\n LPossibles.sort(axis=1)\n LPossibles = np.fliplr(LPossibles)\n Indices = np.count_nonzero(LPossibles, axis=1)\n Indices[Indices == 0] = 1\n R = np.random.randint(12, size=nb, dtype=np.int8)\n Position = LPossibles[I, R % Indices[I]]\n if nb == np.count_nonzero(Position == 0):\n continuer = False\n S[I] += ds[Position]\n X += dx[Position]\n Y += dy[Position]\n return np.mean(S)\n\n\ndef MonteCarlo(Game):\n return Simulate(Game)\n\n\ndef MovePlayerWithIA(Game):\n executableMove = GetAllExectuableMove(Game)\n result = None, None\n maxi = 0\n if len(executableMove) == 0:\n return None, None\n for x, y in executableMove:\n Game.PlayerX = x\n Game.PlayerY = y\n total = MonteCarlo(Game)\n if total > maxi:\n result = x, y\n maxi = total\n return result\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Game:\n\n def __init__(self, Grille, PlayerX, PlayerY, Score=0):\n self.PlayerX = PlayerX\n self.PlayerY = PlayerY\n self.Score = Score\n self.Grille = Grille\n\n def copy(self):\n return copy.deepcopy(self)\n\n\n<mask token>\nWindow.geometry(str(largeurPix) + 'x' + str(hauteurPix))\nWindow.title('TRON')\n<mask token>\nF.pack(side='top', fill='both', expand=True)\nF.grid_rowconfigure(0, weight=1)\nF.grid_columnconfigure(0, weight=1)\n<mask token>\n\n\ndef CreerUnePage(id):\n Frame = tk.Frame(F)\n ListePages[id] = Frame\n Frame.grid(row=0, column=0, sticky='nsew')\n return Frame\n\n\ndef AfficherPage(id):\n global PageActive\n PageActive = id\n ListePages[id].tkraise()\n\n\n<mask token>\ncanvas.place(x=0, y=0)\n\n\ndef Affiche(Game):\n canvas.delete('all')\n H = canvas.winfo_height()\n\n def DrawCase(x, y, coul):\n x *= L\n y *= L\n canvas.create_rectangle(x, H - y, x + L, H - y - L, fill=coul)\n for x in range(LARGEUR):\n for y in range(HAUTEUR):\n if Game.Grille[x, y] == 1:\n DrawCase(x, y, 'gray')\n if Game.Grille[x, y] == 2:\n DrawCase(x, y, 'cyan')\n DrawCase(Game.PlayerX, Game.PlayerY, 'red')\n\n\ndef AfficheScore(Game):\n info = 'SCORE : ' + str(Game.Score)\n canvas.create_text(80, 13, font='Helvetica 12 bold', fill='yellow',\n text=info)\n\n\n<mask token>\n\n\ndef GetAllExectuableMove(Game):\n possibleMove = [(0, +1), (0, -1), (+1, 0), (-1, 0)]\n executableMove = []\n for tup in possibleMove:\n x, y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]\n v = Game.Grille[x, y]\n if v == 0:\n executableMove.append((x, y))\n return executableMove\n\n\ndef Simulate(Game):\n nb = NbSimulation\n G = np.tile(Game.Grille, (nb, 1, 1))\n X = np.tile(Game.PlayerX, nb)\n Y = np.tile(Game.PlayerY, nb)\n S = np.tile(Game.Score, nb)\n I = np.arange(nb)\n continuer = True\n while continuer:\n G[I, X, Y] = 2\n LPossibles = np.zeros((nb, 4), dtype=np.int8)\n for i in range(4):\n LPossibles[I, i] = np.where(G[I, X + dx[i + 1], Y + dy[i + 1]] ==\n 0, i + 1, 0)\n LPossibles.sort(axis=1)\n LPossibles = np.fliplr(LPossibles)\n Indices = np.count_nonzero(LPossibles, axis=1)\n Indices[Indices == 0] = 1\n R = np.random.randint(12, size=nb, dtype=np.int8)\n Position = LPossibles[I, R % Indices[I]]\n if nb == np.count_nonzero(Position == 0):\n continuer = False\n S[I] += ds[Position]\n X += dx[Position]\n Y += dy[Position]\n return np.mean(S)\n\n\ndef MonteCarlo(Game):\n return Simulate(Game)\n\n\ndef MovePlayerWithIA(Game):\n executableMove = GetAllExectuableMove(Game)\n result = None, None\n maxi = 0\n if len(executableMove) == 0:\n return None, None\n for x, y in executableMove:\n Game.PlayerX = x\n Game.PlayerY = y\n total = MonteCarlo(Game)\n if total > maxi:\n result = x, y\n maxi = total\n return result\n\n\ndef Play(Game):\n x, y = Game.PlayerX, Game.PlayerY\n Game.Grille[x, y] = 2\n x, y = MovePlayerWithIA(Game)\n if x == None or y == None:\n return True\n else:\n Game.PlayerX = x\n Game.PlayerY = y\n Game.Score += 1\n return False\n\n\n<mask token>\n\n\ndef Partie():\n Tstart = time.time()\n PartieTermine = Play(CurrentGame)\n print(time.time() - Tstart)\n if not PartieTermine:\n Affiche(CurrentGame)\n Window.after(1000, Partie)\n else:\n AfficheScore(CurrentGame)\n\n\nAfficherPage(0)\nWindow.after(100, Partie)\nWindow.mainloop()\n",
"step-4": "<mask token>\nNbSimulation = 20000\nData = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\nGInit = np.array(Data, dtype=np.int8)\nGInit = np.flip(GInit, 0).transpose()\nLARGEUR = 13\nHAUTEUR = 17\n\n\nclass Game:\n\n def __init__(self, Grille, PlayerX, PlayerY, Score=0):\n self.PlayerX = PlayerX\n self.PlayerY = PlayerY\n self.Score = Score\n self.Grille = Grille\n\n def copy(self):\n return copy.deepcopy(self)\n\n\nGameInit = Game(GInit, 3, 5)\nL = 20\nlargeurPix = LARGEUR * L\nhauteurPix = HAUTEUR * L\nWindow = tk.Tk()\nWindow.geometry(str(largeurPix) + 'x' + str(hauteurPix))\nWindow.title('TRON')\nF = tk.Frame(Window)\nF.pack(side='top', fill='both', expand=True)\nF.grid_rowconfigure(0, weight=1)\nF.grid_columnconfigure(0, weight=1)\nListePages = {}\nPageActive = 0\n\n\ndef CreerUnePage(id):\n Frame = tk.Frame(F)\n ListePages[id] = Frame\n Frame.grid(row=0, column=0, sticky='nsew')\n return Frame\n\n\ndef AfficherPage(id):\n global PageActive\n PageActive = id\n ListePages[id].tkraise()\n\n\nFrame0 = CreerUnePage(0)\ncanvas = tk.Canvas(Frame0, width=largeurPix, height=hauteurPix, bg='black')\ncanvas.place(x=0, y=0)\n\n\ndef Affiche(Game):\n canvas.delete('all')\n H = canvas.winfo_height()\n\n def DrawCase(x, y, coul):\n x *= L\n y *= L\n canvas.create_rectangle(x, H - y, x + L, H - y - L, fill=coul)\n for x in range(LARGEUR):\n for y in range(HAUTEUR):\n if Game.Grille[x, y] == 1:\n DrawCase(x, y, 'gray')\n if Game.Grille[x, y] == 2:\n DrawCase(x, y, 'cyan')\n DrawCase(Game.PlayerX, Game.PlayerY, 'red')\n\n\ndef AfficheScore(Game):\n info = 'SCORE : ' + str(Game.Score)\n canvas.create_text(80, 13, font='Helvetica 12 bold', fill='yellow',\n text=info)\n\n\ndx = np.array([0, -1, 0, 1, 0], dtype=np.int8)\ndy = np.array([0, 0, 1, 0, -1], dtype=np.int8)\nds = np.array([0, 1, 1, 1, 1], dtype=np.int8)\n\n\ndef GetAllExectuableMove(Game):\n possibleMove = [(0, +1), (0, -1), (+1, 0), (-1, 0)]\n executableMove = []\n for tup in possibleMove:\n x, y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]\n v = Game.Grille[x, y]\n if v == 0:\n executableMove.append((x, y))\n return executableMove\n\n\ndef Simulate(Game):\n nb = NbSimulation\n G = np.tile(Game.Grille, (nb, 1, 1))\n X = np.tile(Game.PlayerX, nb)\n Y = np.tile(Game.PlayerY, nb)\n S = np.tile(Game.Score, nb)\n I = np.arange(nb)\n continuer = True\n while continuer:\n G[I, X, Y] = 2\n LPossibles = np.zeros((nb, 4), dtype=np.int8)\n for i in range(4):\n LPossibles[I, i] = np.where(G[I, X + dx[i + 1], Y + dy[i + 1]] ==\n 0, i + 1, 0)\n LPossibles.sort(axis=1)\n LPossibles = np.fliplr(LPossibles)\n Indices = np.count_nonzero(LPossibles, axis=1)\n Indices[Indices == 0] = 1\n R = np.random.randint(12, size=nb, dtype=np.int8)\n Position = LPossibles[I, R % Indices[I]]\n if nb == np.count_nonzero(Position == 0):\n continuer = False\n S[I] += ds[Position]\n X += dx[Position]\n Y += dy[Position]\n return np.mean(S)\n\n\ndef MonteCarlo(Game):\n return Simulate(Game)\n\n\ndef MovePlayerWithIA(Game):\n executableMove = GetAllExectuableMove(Game)\n result = None, None\n maxi = 0\n if len(executableMove) == 0:\n return None, None\n for x, y in executableMove:\n Game.PlayerX = x\n Game.PlayerY = y\n total = MonteCarlo(Game)\n if total > maxi:\n result = x, y\n maxi = total\n return result\n\n\ndef Play(Game):\n x, y = Game.PlayerX, Game.PlayerY\n Game.Grille[x, y] = 2\n x, y = MovePlayerWithIA(Game)\n if x == None or y == None:\n return True\n else:\n Game.PlayerX = x\n Game.PlayerY = y\n Game.Score += 1\n return False\n\n\nCurrentGame = GameInit.copy()\n\n\ndef Partie():\n Tstart = time.time()\n PartieTermine = Play(CurrentGame)\n print(time.time() - Tstart)\n if not PartieTermine:\n Affiche(CurrentGame)\n Window.after(1000, Partie)\n else:\n AfficheScore(CurrentGame)\n\n\nAfficherPage(0)\nWindow.after(100, Partie)\nWindow.mainloop()\n",
"step-5": "import tkinter as tk\nimport random\nimport numpy as np\nimport copy \nimport time\n\n#################################################################################\n#\n# Données de partie\nNbSimulation = 20000\nData = [ [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,1,0,0,0,0,0,0,0,0,0,0,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1] ]\n\nGInit = np.array(Data,dtype=np.int8)\nGInit = np.flip(GInit,0).transpose()\n\nLARGEUR = 13\nHAUTEUR = 17\n\n# container pour passer efficacement toutes les données de la partie\n\nclass Game:\n def __init__(self, Grille, PlayerX, PlayerY, Score=0):\n self.PlayerX = PlayerX\n self.PlayerY = PlayerY\n self.Score = Score\n self.Grille = Grille\n \n def copy(self): \n return copy.deepcopy(self)\n\nGameInit = Game(GInit,3,5)\n\n##############################################################\n#\n# création de la fenetre principale - NE PAS TOUCHER\n\nL = 20 # largeur d'une case du jeu en pixel \nlargeurPix = LARGEUR * L\nhauteurPix = HAUTEUR * L\n\n\nWindow = tk.Tk()\nWindow.geometry(str(largeurPix)+\"x\"+str(hauteurPix)) # taille de la fenetre\nWindow.title(\"TRON\")\n\n\n# création de la frame principale stockant toutes les pages\n\nF = tk.Frame(Window)\nF.pack(side=\"top\", fill=\"both\", expand=True)\nF.grid_rowconfigure(0, weight=1)\nF.grid_columnconfigure(0, weight=1)\n\n# gestion des différentes pages\n\nListePages = {}\nPageActive = 0\n\ndef CreerUnePage(id):\n Frame = tk.Frame(F)\n ListePages[id] = Frame\n Frame.grid(row=0, column=0, sticky=\"nsew\")\n return Frame\n\ndef AfficherPage(id):\n global PageActive\n PageActive = id\n ListePages[id].tkraise()\n \nFrame0 = CreerUnePage(0)\n\ncanvas = tk.Canvas(Frame0,width = largeurPix, height = hauteurPix, bg =\"black\" )\ncanvas.place(x=0,y=0)\n\n# Dessine la grille de jeu - ne pas toucher\n\n\ndef Affiche(Game):\n canvas.delete(\"all\")\n H = canvas.winfo_height()\n \n def DrawCase(x,y,coul):\n x *= L\n y *= L\n canvas.create_rectangle(x,H-y,x+L,H-y-L,fill=coul)\n \n # dessin des murs \n \n for x in range (LARGEUR):\n for y in range (HAUTEUR):\n if Game.Grille[x,y] == 1 : DrawCase(x,y,\"gray\" )\n if Game.Grille[x,y] == 2 : DrawCase(x,y,\"cyan\" )\n \n \n # dessin de la moto\n DrawCase(Game.PlayerX,Game.PlayerY,\"red\" )\n\ndef AfficheScore(Game):\n info = \"SCORE : \" + str(Game.Score)\n canvas.create_text(80, 13, font='Helvetica 12 bold', fill=\"yellow\", text=info)\n\n\n###########################################################\n#\n# gestion du joueur IA\n\n# VOTRE CODE ICI \ndx = np.array([0, -1, 0, 1, 0],dtype=np.int8)\ndy = np.array([0, 0, 1, 0, -1],dtype=np.int8)\n\n# scores associés à chaque déplacement\nds = np.array([0, 1, 1, 1, 1],dtype=np.int8)\ndef GetAllExectuableMove(Game):\n possibleMove = [(0,+1),(0,-1),(+1,0),(-1,0)]\n executableMove = []\n for tup in possibleMove :\n x,y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]\n v = Game.Grille[x,y]\n if v == 0 :\n executableMove.append((x,y))\n \n return executableMove\n\ndef Simulate(Game):\n\n nb = NbSimulation\n # on copie les datas de départ pour créer plusieurs parties\n G = np.tile(Game.Grille,(nb,1,1)) # grille (x,y) pour chaque partie\n X = np.tile(Game.PlayerX,nb) # playerX (x) pour chaque partie\n Y = np.tile(Game.PlayerY,nb) # playerY (y) pour chaque partie\n S = np.tile(Game.Score,nb) # score (s) pour chaque partie\n I = np.arange(nb) # 0,1,2,3,...,nb-1\n\n # VOTRE CODE ICI\n continuer = True\n\n while(continuer) :\n\n # pour chaque partie, on fait une affectation à 2 le passage de la moto\n G[I, X, Y] = 2\n\n\n ### pour chaque partie, on gère tous les index de déplacements possibles\n # pour chaque partie, on associe une liste de taille 4 initialisée à 0 \n LPossibles = np.zeros((nb, 4),dtype=np.int8)\n\n # pour chaque partie, on associe la liste de taille 4 à i si le joueur peut bouger dans cette direction, 0 sinon\n for i in range(4): \n LPossibles[I,i] = np.where(G[I, X+dx[i+1], Y+dy[i+1]] == 0,i+1,0)\n\n # pour chaque partie, on trie la liste des directions de manière décroissante\n LPossibles.sort(axis=1)\n LPossibles = np.fliplr(LPossibles)\n\n\n ### pour chaque partie, on compte le nombre de déplacements possibles\n # pour chaque partie, on compte le nombre d'éléments de LPossibles non nuls\n Indices = np.count_nonzero(LPossibles, axis=1)\n \n # pour chaque partie, on remplace les index de 0 par 1 pour pas planter sur le modulo\n Indices[Indices == 0] = 1\n\n # pour chaque partie, on génère un index de direction aléatoire\n R = np.random.randint(12,size=nb,dtype=np.int8)\n\n # pour chaque partie, on réucupère un vecteur position\n Position = LPossibles[I, R % Indices[I]]\n \n\n ### on gère les déplacement et le code\n\n # on arrete le traitement si, on est statique sur l'ensemble des parties\n if(nb == np.count_nonzero(Position == 0)): continuer = False\n\n # pour chaque partie, on incrémente le score\n S[I] += ds[Position]\n\n # pour chaque partie, on déplace le joueur\n X += dx[Position]\n Y += dy[Position]\n\n # on retourne la moyenne des scores\n return np.mean(S)\n\n\n \ndef MonteCarlo(Game):\n return Simulate(Game)\n\ndef MovePlayerWithIA(Game):\n executableMove = GetAllExectuableMove(Game)\n result = (None, None)\n maxi = 0\n if(len(executableMove)==0):\n return None, None\n\n for x,y in executableMove:\n Game.PlayerX = x \n Game.PlayerY = y\n total = MonteCarlo(Game)\n if(total>maxi):\n result = (x,y)\n maxi = total\n return result\n\ndef Play(Game): \n \n x,y = Game.PlayerX, Game.PlayerY\n\n Game.Grille[x,y] = 2 # laisse la trace de la moto\n\n x,y = MovePlayerWithIA(Game)\n if x == None or y == None :\n # collision détectée\n return True # partie terminée\n else :\n Game.PlayerX = x # valide le déplacement\n Game.PlayerY = y # valide le déplacement\n Game.Score += 1\n return False # la partie continue\n \n\n################################################################################\n \nCurrentGame = GameInit.copy()\n \n\ndef Partie():\n Tstart = time.time()\n PartieTermine = Play(CurrentGame)\n print(time.time() - Tstart)\n if not PartieTermine :\n Affiche(CurrentGame)\n # rappelle la fonction Partie() dans 30ms\n # entre temps laisse l'OS réafficher l'interface\n Window.after(1000,Partie) \n else :\n AfficheScore(CurrentGame)\n\n\n#####################################################################################\n#\n# Mise en place de l'interface - ne pas toucher\n\nAfficherPage(0)\nWindow.after(100,Partie)\nWindow.mainloop()\n \n\n \n \n\n \n \n\n",
"step-ids": [
8,
11,
14,
15,
17
]
}
|
[
8,
11,
14,
15,
17
] |
#!/usr/bin/python2
import md5
from pwn import *
import time
LIMIT = 500
TARGET = "shell2017.picoctf.com"
PORT = 46290
FILE = "hash.txt"
def generate_hashes(seed):
a = []
current_hash = seed
for i in range(1000):
current_hash = md5.new(current_hash).hexdigest()
a.append(current_hash)
return a
def find_prev_hash(array,current_hash):
return array[array.index(current_hash)-1]
def skip_intro(conn):
for i in range(6):
conn.recvline()
def skip_question(conn):
for i in range(4):
conn.recvline()
def go_to_register(conn):
#there's a bunch of crap that needs to be skipped for recvline()
skip_intro(conn)
skip_question(conn)
conn.sendline("r")
def extract_hash_id():
conn = remote(TARGET,PORT)
go_to_register(conn)
id_and_seed = conn.recvline().split()
hash_id = id_and_seed[7].rstrip('\n')
seed = id_and_seed[-1].rstrip('\n')
conn.recvline()
current_hash = conn.recvline().rstrip('\n')
prev_hash = find_prev_hash(generate_hashes(seed),current_hash)
conn.sendline(prev_hash)
#Yep is in the validated hash, so we will use this as the success metric
if "Yep!" in conn.recvline():
conn.close()
return (hash_id, seed)
conn.close()
return None
def main():
hash_list = []
for i in range(1000):
try:
hash_list.append(extract_hash_id())
except:
time.sleep(1)
continue
with open("hash.txt","a") as fp:
for tup in hash_list:
fp.write("{} {}\n".format(tup[0],tup[1]))
#I went back into the code to use this function whenever I found a match in my hash text file
# print(find_prev_hash(generate_hashes("ead81fe8cfe9fda9e4c2093e17e4d024"),"58cb392a127b699c6f22f228e23ae73e"))
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "5e78992df94cbbe441495b7d8fb80104ec000748",
"index": 6728,
"step-1": "<mask token>\n\n\ndef generate_hashes(seed):\n a = []\n current_hash = seed\n for i in range(1000):\n current_hash = md5.new(current_hash).hexdigest()\n a.append(current_hash)\n return a\n\n\ndef find_prev_hash(array, current_hash):\n return array[array.index(current_hash) - 1]\n\n\n<mask token>\n\n\ndef go_to_register(conn):\n skip_intro(conn)\n skip_question(conn)\n conn.sendline('r')\n\n\ndef extract_hash_id():\n conn = remote(TARGET, PORT)\n go_to_register(conn)\n id_and_seed = conn.recvline().split()\n hash_id = id_and_seed[7].rstrip('\\n')\n seed = id_and_seed[-1].rstrip('\\n')\n conn.recvline()\n current_hash = conn.recvline().rstrip('\\n')\n prev_hash = find_prev_hash(generate_hashes(seed), current_hash)\n conn.sendline(prev_hash)\n if 'Yep!' in conn.recvline():\n conn.close()\n return hash_id, seed\n conn.close()\n return None\n\n\ndef main():\n hash_list = []\n for i in range(1000):\n try:\n hash_list.append(extract_hash_id())\n except:\n time.sleep(1)\n continue\n with open('hash.txt', 'a') as fp:\n for tup in hash_list:\n fp.write('{} {}\\n'.format(tup[0], tup[1]))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_hashes(seed):\n a = []\n current_hash = seed\n for i in range(1000):\n current_hash = md5.new(current_hash).hexdigest()\n a.append(current_hash)\n return a\n\n\ndef find_prev_hash(array, current_hash):\n return array[array.index(current_hash) - 1]\n\n\ndef skip_intro(conn):\n for i in range(6):\n conn.recvline()\n\n\ndef skip_question(conn):\n for i in range(4):\n conn.recvline()\n\n\ndef go_to_register(conn):\n skip_intro(conn)\n skip_question(conn)\n conn.sendline('r')\n\n\ndef extract_hash_id():\n conn = remote(TARGET, PORT)\n go_to_register(conn)\n id_and_seed = conn.recvline().split()\n hash_id = id_and_seed[7].rstrip('\\n')\n seed = id_and_seed[-1].rstrip('\\n')\n conn.recvline()\n current_hash = conn.recvline().rstrip('\\n')\n prev_hash = find_prev_hash(generate_hashes(seed), current_hash)\n conn.sendline(prev_hash)\n if 'Yep!' in conn.recvline():\n conn.close()\n return hash_id, seed\n conn.close()\n return None\n\n\ndef main():\n hash_list = []\n for i in range(1000):\n try:\n hash_list.append(extract_hash_id())\n except:\n time.sleep(1)\n continue\n with open('hash.txt', 'a') as fp:\n for tup in hash_list:\n fp.write('{} {}\\n'.format(tup[0], tup[1]))\n\n\n<mask token>\n",
"step-3": "<mask token>\nLIMIT = 500\nTARGET = 'shell2017.picoctf.com'\nPORT = 46290\nFILE = 'hash.txt'\n\n\ndef generate_hashes(seed):\n a = []\n current_hash = seed\n for i in range(1000):\n current_hash = md5.new(current_hash).hexdigest()\n a.append(current_hash)\n return a\n\n\ndef find_prev_hash(array, current_hash):\n return array[array.index(current_hash) - 1]\n\n\ndef skip_intro(conn):\n for i in range(6):\n conn.recvline()\n\n\ndef skip_question(conn):\n for i in range(4):\n conn.recvline()\n\n\ndef go_to_register(conn):\n skip_intro(conn)\n skip_question(conn)\n conn.sendline('r')\n\n\ndef extract_hash_id():\n conn = remote(TARGET, PORT)\n go_to_register(conn)\n id_and_seed = conn.recvline().split()\n hash_id = id_and_seed[7].rstrip('\\n')\n seed = id_and_seed[-1].rstrip('\\n')\n conn.recvline()\n current_hash = conn.recvline().rstrip('\\n')\n prev_hash = find_prev_hash(generate_hashes(seed), current_hash)\n conn.sendline(prev_hash)\n if 'Yep!' in conn.recvline():\n conn.close()\n return hash_id, seed\n conn.close()\n return None\n\n\ndef main():\n hash_list = []\n for i in range(1000):\n try:\n hash_list.append(extract_hash_id())\n except:\n time.sleep(1)\n continue\n with open('hash.txt', 'a') as fp:\n for tup in hash_list:\n fp.write('{} {}\\n'.format(tup[0], tup[1]))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import md5\nfrom pwn import *\nimport time\nLIMIT = 500\nTARGET = 'shell2017.picoctf.com'\nPORT = 46290\nFILE = 'hash.txt'\n\n\ndef generate_hashes(seed):\n a = []\n current_hash = seed\n for i in range(1000):\n current_hash = md5.new(current_hash).hexdigest()\n a.append(current_hash)\n return a\n\n\ndef find_prev_hash(array, current_hash):\n return array[array.index(current_hash) - 1]\n\n\ndef skip_intro(conn):\n for i in range(6):\n conn.recvline()\n\n\ndef skip_question(conn):\n for i in range(4):\n conn.recvline()\n\n\ndef go_to_register(conn):\n skip_intro(conn)\n skip_question(conn)\n conn.sendline('r')\n\n\ndef extract_hash_id():\n conn = remote(TARGET, PORT)\n go_to_register(conn)\n id_and_seed = conn.recvline().split()\n hash_id = id_and_seed[7].rstrip('\\n')\n seed = id_and_seed[-1].rstrip('\\n')\n conn.recvline()\n current_hash = conn.recvline().rstrip('\\n')\n prev_hash = find_prev_hash(generate_hashes(seed), current_hash)\n conn.sendline(prev_hash)\n if 'Yep!' in conn.recvline():\n conn.close()\n return hash_id, seed\n conn.close()\n return None\n\n\ndef main():\n hash_list = []\n for i in range(1000):\n try:\n hash_list.append(extract_hash_id())\n except:\n time.sleep(1)\n continue\n with open('hash.txt', 'a') as fp:\n for tup in hash_list:\n fp.write('{} {}\\n'.format(tup[0], tup[1]))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python2\nimport md5 \nfrom pwn import *\nimport time\n\nLIMIT = 500\nTARGET = \"shell2017.picoctf.com\"\nPORT = 46290\nFILE = \"hash.txt\"\n\ndef generate_hashes(seed):\n a = []\n current_hash = seed\n \n for i in range(1000): \n current_hash = md5.new(current_hash).hexdigest()\n a.append(current_hash)\n \n return a\n\ndef find_prev_hash(array,current_hash):\n return array[array.index(current_hash)-1]\n\ndef skip_intro(conn):\n for i in range(6):\n conn.recvline()\n\ndef skip_question(conn):\n for i in range(4):\n conn.recvline()\n\ndef go_to_register(conn):\n #there's a bunch of crap that needs to be skipped for recvline()\n skip_intro(conn)\n skip_question(conn)\n conn.sendline(\"r\")\n \ndef extract_hash_id():\n conn = remote(TARGET,PORT) \n go_to_register(conn)\n \n id_and_seed = conn.recvline().split()\n hash_id = id_and_seed[7].rstrip('\\n')\n seed = id_and_seed[-1].rstrip('\\n')\n\n conn.recvline()\n current_hash = conn.recvline().rstrip('\\n')\n \n prev_hash = find_prev_hash(generate_hashes(seed),current_hash)\n conn.sendline(prev_hash)\n \n #Yep is in the validated hash, so we will use this as the success metric\n if \"Yep!\" in conn.recvline():\n conn.close()\n return (hash_id, seed)\n conn.close()\n return None\n \ndef main(): \n hash_list = []\n for i in range(1000):\n try:\n hash_list.append(extract_hash_id())\n except:\n time.sleep(1)\n continue\n with open(\"hash.txt\",\"a\") as fp:\n for tup in hash_list:\n fp.write(\"{} {}\\n\".format(tup[0],tup[1])) \n\n #I went back into the code to use this function whenever I found a match in my hash text file\n # print(find_prev_hash(generate_hashes(\"ead81fe8cfe9fda9e4c2093e17e4d024\"),\"58cb392a127b699c6f22f228e23ae73e\"))\n \nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
5,
7,
9,
10,
11
]
}
|
[
5,
7,
9,
10,
11
] |
<|reserved_special_token_0|>
class sfp_googlesearch(SpiderFootPlugin):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def watchedEvents(self):
return ['INTERNET_NAME']
def producedEvents(self):
return ['LINKED_URL_INTERNAL', 'SEARCH_ENGINE_WEB_CONTENT']
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
self.sf.debug('Already did a search for ' + eventData +
', skipping.')
return None
else:
self.results.append(eventData)
pages = self.sf.googleIterate('site:' + eventData, dict(limit=self.
opts['pages'], useragent=self.opts['_useragent'], timeout=self.
opts['_fetchtimeout']))
if pages is None:
self.sf.info('No results returned from Google.')
return None
for page in pages.keys():
if page in self.results:
continue
else:
self.results.append(page)
if self.checkForStop():
return None
evt = SpiderFootEvent('SEARCH_ENGINE_WEB_CONTENT', pages[page],
self.__name__, event)
self.notifyListeners(evt)
if self.opts['fetchlinks']:
links = self.sf.parseLinks(page, pages[page], eventData)
if len(links) == 0:
continue
for link in links:
if link in self.results:
continue
else:
self.results.append(link)
self.sf.debug('Found a link: ' + link)
if self.sf.urlFQDN(link).endswith(eventData):
if self.checkForStop():
return None
evt = SpiderFootEvent('LINKED_URL_INTERNAL', link,
self.__name__, event)
self.notifyListeners(evt)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class sfp_googlesearch(SpiderFootPlugin):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = list()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return ['INTERNET_NAME']
def producedEvents(self):
return ['LINKED_URL_INTERNAL', 'SEARCH_ENGINE_WEB_CONTENT']
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
self.sf.debug('Already did a search for ' + eventData +
', skipping.')
return None
else:
self.results.append(eventData)
pages = self.sf.googleIterate('site:' + eventData, dict(limit=self.
opts['pages'], useragent=self.opts['_useragent'], timeout=self.
opts['_fetchtimeout']))
if pages is None:
self.sf.info('No results returned from Google.')
return None
for page in pages.keys():
if page in self.results:
continue
else:
self.results.append(page)
if self.checkForStop():
return None
evt = SpiderFootEvent('SEARCH_ENGINE_WEB_CONTENT', pages[page],
self.__name__, event)
self.notifyListeners(evt)
if self.opts['fetchlinks']:
links = self.sf.parseLinks(page, pages[page], eventData)
if len(links) == 0:
continue
for link in links:
if link in self.results:
continue
else:
self.results.append(link)
self.sf.debug('Found a link: ' + link)
if self.sf.urlFQDN(link).endswith(eventData):
if self.checkForStop():
return None
evt = SpiderFootEvent('LINKED_URL_INTERNAL', link,
self.__name__, event)
self.notifyListeners(evt)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class sfp_googlesearch(SpiderFootPlugin):
"""Google:Footprint,Investigate:Some light Google scraping to identify sub-domains and links."""
opts = {'fetchlinks': True, 'pages': 20}
optdescs = {'fetchlinks':
'Fetch links found on the target domain-name?', 'pages':
'Number of Google results pages to iterate through.'}
results = list()
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = list()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return ['INTERNET_NAME']
def producedEvents(self):
return ['LINKED_URL_INTERNAL', 'SEARCH_ENGINE_WEB_CONTENT']
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
self.sf.debug('Already did a search for ' + eventData +
', skipping.')
return None
else:
self.results.append(eventData)
pages = self.sf.googleIterate('site:' + eventData, dict(limit=self.
opts['pages'], useragent=self.opts['_useragent'], timeout=self.
opts['_fetchtimeout']))
if pages is None:
self.sf.info('No results returned from Google.')
return None
for page in pages.keys():
if page in self.results:
continue
else:
self.results.append(page)
if self.checkForStop():
return None
evt = SpiderFootEvent('SEARCH_ENGINE_WEB_CONTENT', pages[page],
self.__name__, event)
self.notifyListeners(evt)
if self.opts['fetchlinks']:
links = self.sf.parseLinks(page, pages[page], eventData)
if len(links) == 0:
continue
for link in links:
if link in self.results:
continue
else:
self.results.append(link)
self.sf.debug('Found a link: ' + link)
if self.sf.urlFQDN(link).endswith(eventData):
if self.checkForStop():
return None
evt = SpiderFootEvent('LINKED_URL_INTERNAL', link,
self.__name__, event)
self.notifyListeners(evt)
<|reserved_special_token_1|>
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_googlesearch(SpiderFootPlugin):
"""Google:Footprint,Investigate:Some light Google scraping to identify sub-domains and links."""
opts = {'fetchlinks': True, 'pages': 20}
optdescs = {'fetchlinks':
'Fetch links found on the target domain-name?', 'pages':
'Number of Google results pages to iterate through.'}
results = list()
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = list()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return ['INTERNET_NAME']
def producedEvents(self):
return ['LINKED_URL_INTERNAL', 'SEARCH_ENGINE_WEB_CONTENT']
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
self.sf.debug('Already did a search for ' + eventData +
', skipping.')
return None
else:
self.results.append(eventData)
pages = self.sf.googleIterate('site:' + eventData, dict(limit=self.
opts['pages'], useragent=self.opts['_useragent'], timeout=self.
opts['_fetchtimeout']))
if pages is None:
self.sf.info('No results returned from Google.')
return None
for page in pages.keys():
if page in self.results:
continue
else:
self.results.append(page)
if self.checkForStop():
return None
evt = SpiderFootEvent('SEARCH_ENGINE_WEB_CONTENT', pages[page],
self.__name__, event)
self.notifyListeners(evt)
if self.opts['fetchlinks']:
links = self.sf.parseLinks(page, pages[page], eventData)
if len(links) == 0:
continue
for link in links:
if link in self.results:
continue
else:
self.results.append(link)
self.sf.debug('Found a link: ' + link)
if self.sf.urlFQDN(link).endswith(eventData):
if self.checkForStop():
return None
evt = SpiderFootEvent('LINKED_URL_INTERNAL', link,
self.__name__, event)
self.notifyListeners(evt)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_googlesearch
# Purpose: Searches Google for content related to the domain in question.
#
# Author: Steve Micallef <[email protected]>
#
# Created: 07/05/2012
# Copyright: (c) Steve Micallef 2012
# Licence: GPL
# -------------------------------------------------------------------------------
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_googlesearch(SpiderFootPlugin):
"""Google:Footprint,Investigate:Some light Google scraping to identify sub-domains and links."""
# Default options
opts = {
'fetchlinks': True, # Should we fetch links on the base domain?
'pages': 20 # Number of google results pages to iterate
}
# Option descriptions
optdescs = {
'fetchlinks': "Fetch links found on the target domain-name?",
'pages': "Number of Google results pages to iterate through."
}
# Target
results = list()
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = list()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["INTERNET_NAME"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["LINKED_URL_INTERNAL", "SEARCH_ENGINE_WEB_CONTENT"]
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
self.sf.debug("Already did a search for " + eventData + ", skipping.")
return None
else:
self.results.append(eventData)
# Sites hosted on the domain
pages = self.sf.googleIterate("site:" + eventData,
dict(limit=self.opts['pages'], useragent=self.opts['_useragent'],
timeout=self.opts['_fetchtimeout']))
if pages is None:
self.sf.info("No results returned from Google.")
return None
for page in pages.keys():
if page in self.results:
continue
else:
self.results.append(page)
# Check if we've been asked to stop
if self.checkForStop():
return None
# Submit the google results for analysis
evt = SpiderFootEvent("SEARCH_ENGINE_WEB_CONTENT", pages[page],
self.__name__, event)
self.notifyListeners(evt)
# We can optionally fetch links to our domain found in the search
# results. These may not have been identified through spidering.
if self.opts['fetchlinks']:
links = self.sf.parseLinks(page, pages[page], eventData)
if len(links) == 0:
continue
for link in links:
if link in self.results:
continue
else:
self.results.append(link)
self.sf.debug("Found a link: " + link)
if self.sf.urlFQDN(link).endswith(eventData):
if self.checkForStop():
return None
evt = SpiderFootEvent("LINKED_URL_INTERNAL", link,
self.__name__, event)
self.notifyListeners(evt)
# End of sfp_googlesearch class
|
flexible
|
{
"blob_id": "3a6eaa238e78e7a818bcf6e18cc7881eadf94b07",
"index": 7863,
"step-1": "<mask token>\n\n\nclass sfp_googlesearch(SpiderFootPlugin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def watchedEvents(self):\n return ['INTERNET_NAME']\n\n def producedEvents(self):\n return ['LINKED_URL_INTERNAL', 'SEARCH_ENGINE_WEB_CONTENT']\n\n def handleEvent(self, event):\n eventName = event.eventType\n srcModuleName = event.module\n eventData = event.data\n if eventData in self.results:\n self.sf.debug('Already did a search for ' + eventData +\n ', skipping.')\n return None\n else:\n self.results.append(eventData)\n pages = self.sf.googleIterate('site:' + eventData, dict(limit=self.\n opts['pages'], useragent=self.opts['_useragent'], timeout=self.\n opts['_fetchtimeout']))\n if pages is None:\n self.sf.info('No results returned from Google.')\n return None\n for page in pages.keys():\n if page in self.results:\n continue\n else:\n self.results.append(page)\n if self.checkForStop():\n return None\n evt = SpiderFootEvent('SEARCH_ENGINE_WEB_CONTENT', pages[page],\n self.__name__, event)\n self.notifyListeners(evt)\n if self.opts['fetchlinks']:\n links = self.sf.parseLinks(page, pages[page], eventData)\n if len(links) == 0:\n continue\n for link in links:\n if link in self.results:\n continue\n else:\n self.results.append(link)\n self.sf.debug('Found a link: ' + link)\n if self.sf.urlFQDN(link).endswith(eventData):\n if self.checkForStop():\n return None\n evt = SpiderFootEvent('LINKED_URL_INTERNAL', link,\n self.__name__, event)\n self.notifyListeners(evt)\n",
"step-2": "<mask token>\n\n\nclass sfp_googlesearch(SpiderFootPlugin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def setup(self, sfc, userOpts=dict()):\n self.sf = sfc\n self.results = list()\n for opt in userOpts.keys():\n self.opts[opt] = userOpts[opt]\n\n def watchedEvents(self):\n return ['INTERNET_NAME']\n\n def producedEvents(self):\n return ['LINKED_URL_INTERNAL', 'SEARCH_ENGINE_WEB_CONTENT']\n\n def handleEvent(self, event):\n eventName = event.eventType\n srcModuleName = event.module\n eventData = event.data\n if eventData in self.results:\n self.sf.debug('Already did a search for ' + eventData +\n ', skipping.')\n return None\n else:\n self.results.append(eventData)\n pages = self.sf.googleIterate('site:' + eventData, dict(limit=self.\n opts['pages'], useragent=self.opts['_useragent'], timeout=self.\n opts['_fetchtimeout']))\n if pages is None:\n self.sf.info('No results returned from Google.')\n return None\n for page in pages.keys():\n if page in self.results:\n continue\n else:\n self.results.append(page)\n if self.checkForStop():\n return None\n evt = SpiderFootEvent('SEARCH_ENGINE_WEB_CONTENT', pages[page],\n self.__name__, event)\n self.notifyListeners(evt)\n if self.opts['fetchlinks']:\n links = self.sf.parseLinks(page, pages[page], eventData)\n if len(links) == 0:\n continue\n for link in links:\n if link in self.results:\n continue\n else:\n self.results.append(link)\n self.sf.debug('Found a link: ' + link)\n if self.sf.urlFQDN(link).endswith(eventData):\n if self.checkForStop():\n return None\n evt = SpiderFootEvent('LINKED_URL_INTERNAL', link,\n self.__name__, event)\n self.notifyListeners(evt)\n",
"step-3": "<mask token>\n\n\nclass sfp_googlesearch(SpiderFootPlugin):\n \"\"\"Google:Footprint,Investigate:Some light Google scraping to identify sub-domains and links.\"\"\"\n opts = {'fetchlinks': True, 'pages': 20}\n optdescs = {'fetchlinks':\n 'Fetch links found on the target domain-name?', 'pages':\n 'Number of Google results pages to iterate through.'}\n results = list()\n\n def setup(self, sfc, userOpts=dict()):\n self.sf = sfc\n self.results = list()\n for opt in userOpts.keys():\n self.opts[opt] = userOpts[opt]\n\n def watchedEvents(self):\n return ['INTERNET_NAME']\n\n def producedEvents(self):\n return ['LINKED_URL_INTERNAL', 'SEARCH_ENGINE_WEB_CONTENT']\n\n def handleEvent(self, event):\n eventName = event.eventType\n srcModuleName = event.module\n eventData = event.data\n if eventData in self.results:\n self.sf.debug('Already did a search for ' + eventData +\n ', skipping.')\n return None\n else:\n self.results.append(eventData)\n pages = self.sf.googleIterate('site:' + eventData, dict(limit=self.\n opts['pages'], useragent=self.opts['_useragent'], timeout=self.\n opts['_fetchtimeout']))\n if pages is None:\n self.sf.info('No results returned from Google.')\n return None\n for page in pages.keys():\n if page in self.results:\n continue\n else:\n self.results.append(page)\n if self.checkForStop():\n return None\n evt = SpiderFootEvent('SEARCH_ENGINE_WEB_CONTENT', pages[page],\n self.__name__, event)\n self.notifyListeners(evt)\n if self.opts['fetchlinks']:\n links = self.sf.parseLinks(page, pages[page], eventData)\n if len(links) == 0:\n continue\n for link in links:\n if link in self.results:\n continue\n else:\n self.results.append(link)\n self.sf.debug('Found a link: ' + link)\n if self.sf.urlFQDN(link).endswith(eventData):\n if self.checkForStop():\n return None\n evt = SpiderFootEvent('LINKED_URL_INTERNAL', link,\n self.__name__, event)\n self.notifyListeners(evt)\n",
"step-4": "from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent\n\n\nclass sfp_googlesearch(SpiderFootPlugin):\n \"\"\"Google:Footprint,Investigate:Some light Google scraping to identify sub-domains and links.\"\"\"\n opts = {'fetchlinks': True, 'pages': 20}\n optdescs = {'fetchlinks':\n 'Fetch links found on the target domain-name?', 'pages':\n 'Number of Google results pages to iterate through.'}\n results = list()\n\n def setup(self, sfc, userOpts=dict()):\n self.sf = sfc\n self.results = list()\n for opt in userOpts.keys():\n self.opts[opt] = userOpts[opt]\n\n def watchedEvents(self):\n return ['INTERNET_NAME']\n\n def producedEvents(self):\n return ['LINKED_URL_INTERNAL', 'SEARCH_ENGINE_WEB_CONTENT']\n\n def handleEvent(self, event):\n eventName = event.eventType\n srcModuleName = event.module\n eventData = event.data\n if eventData in self.results:\n self.sf.debug('Already did a search for ' + eventData +\n ', skipping.')\n return None\n else:\n self.results.append(eventData)\n pages = self.sf.googleIterate('site:' + eventData, dict(limit=self.\n opts['pages'], useragent=self.opts['_useragent'], timeout=self.\n opts['_fetchtimeout']))\n if pages is None:\n self.sf.info('No results returned from Google.')\n return None\n for page in pages.keys():\n if page in self.results:\n continue\n else:\n self.results.append(page)\n if self.checkForStop():\n return None\n evt = SpiderFootEvent('SEARCH_ENGINE_WEB_CONTENT', pages[page],\n self.__name__, event)\n self.notifyListeners(evt)\n if self.opts['fetchlinks']:\n links = self.sf.parseLinks(page, pages[page], eventData)\n if len(links) == 0:\n continue\n for link in links:\n if link in self.results:\n continue\n else:\n self.results.append(link)\n self.sf.debug('Found a link: ' + link)\n if self.sf.urlFQDN(link).endswith(eventData):\n if self.checkForStop():\n return None\n evt = SpiderFootEvent('LINKED_URL_INTERNAL', link,\n self.__name__, event)\n self.notifyListeners(evt)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n# -------------------------------------------------------------------------------\r\n# Name: sfp_googlesearch\r\n# Purpose: Searches Google for content related to the domain in question.\r\n#\r\n# Author: Steve Micallef <[email protected]>\r\n#\r\n# Created: 07/05/2012\r\n# Copyright: (c) Steve Micallef 2012\r\n# Licence: GPL\r\n# -------------------------------------------------------------------------------\r\n\r\nfrom sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent\r\n\r\n\r\nclass sfp_googlesearch(SpiderFootPlugin):\r\n \"\"\"Google:Footprint,Investigate:Some light Google scraping to identify sub-domains and links.\"\"\"\r\n\r\n # Default options\r\n opts = {\r\n 'fetchlinks': True, # Should we fetch links on the base domain?\r\n 'pages': 20 # Number of google results pages to iterate\r\n }\r\n\r\n # Option descriptions\r\n optdescs = {\r\n 'fetchlinks': \"Fetch links found on the target domain-name?\",\r\n 'pages': \"Number of Google results pages to iterate through.\"\r\n }\r\n\r\n # Target\r\n results = list()\r\n\r\n def setup(self, sfc, userOpts=dict()):\r\n self.sf = sfc\r\n self.results = list()\r\n\r\n for opt in userOpts.keys():\r\n self.opts[opt] = userOpts[opt]\r\n\r\n # What events is this module interested in for input\r\n def watchedEvents(self):\r\n return [\"INTERNET_NAME\"]\r\n\r\n # What events this module produces\r\n # This is to support the end user in selecting modules based on events\r\n # produced.\r\n def producedEvents(self):\r\n return [\"LINKED_URL_INTERNAL\", \"SEARCH_ENGINE_WEB_CONTENT\"]\r\n\r\n def handleEvent(self, event):\r\n eventName = event.eventType\r\n srcModuleName = event.module\r\n eventData = event.data\r\n\r\n if eventData in self.results:\r\n self.sf.debug(\"Already did a search for \" + eventData + \", skipping.\")\r\n return None\r\n else:\r\n self.results.append(eventData)\r\n\r\n # Sites hosted on the domain\r\n pages = self.sf.googleIterate(\"site:\" + eventData,\r\n dict(limit=self.opts['pages'], useragent=self.opts['_useragent'],\r\n timeout=self.opts['_fetchtimeout']))\r\n if pages is None:\r\n self.sf.info(\"No results returned from Google.\")\r\n return None\r\n\r\n for page in pages.keys():\r\n if page in self.results:\r\n continue\r\n else:\r\n self.results.append(page)\r\n\r\n # Check if we've been asked to stop\r\n if self.checkForStop():\r\n return None\r\n\r\n # Submit the google results for analysis\r\n evt = SpiderFootEvent(\"SEARCH_ENGINE_WEB_CONTENT\", pages[page],\r\n self.__name__, event)\r\n self.notifyListeners(evt)\r\n\r\n # We can optionally fetch links to our domain found in the search\r\n # results. These may not have been identified through spidering.\r\n if self.opts['fetchlinks']:\r\n links = self.sf.parseLinks(page, pages[page], eventData)\r\n if len(links) == 0:\r\n continue\r\n\r\n for link in links:\r\n if link in self.results:\r\n continue\r\n else:\r\n self.results.append(link)\r\n self.sf.debug(\"Found a link: \" + link)\r\n if self.sf.urlFQDN(link).endswith(eventData):\r\n if self.checkForStop():\r\n return None\r\n\r\n evt = SpiderFootEvent(\"LINKED_URL_INTERNAL\", link,\r\n self.__name__, event)\r\n self.notifyListeners(evt)\r\n\r\n# End of sfp_googlesearch class\r\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
<|reserved_special_token_0|>
class Unit(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Unit(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return f'{self.short_name}'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Unit(models.Model):
short_name = models.CharField(max_length=20)
full_name = models.CharField(max_length=255, null=True)
weight_in_grams = models.FloatField(default=1.0)
def __str__(self):
return f'{self.short_name}'
<|reserved_special_token_1|>
from django.db import models
class Unit(models.Model):
short_name = models.CharField(max_length=20)
full_name = models.CharField(max_length=255, null=True)
weight_in_grams = models.FloatField(default=1.0)
def __str__(self):
return f'{self.short_name}'
<|reserved_special_token_1|>
from django.db import models
#from ingredients.models import *
class Unit(models.Model):
short_name = models.CharField(max_length=20)
full_name = models.CharField(max_length=255, null=True)
weight_in_grams = models.FloatField(default=1.0)
def __str__(self):
return f"{self.short_name}"
|
flexible
|
{
"blob_id": "fa880adcb9f009ffc206de59e8284ac6350fef4c",
"index": 5948,
"step-1": "<mask token>\n\n\nclass Unit(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Unit(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return f'{self.short_name}'\n",
"step-3": "<mask token>\n\n\nclass Unit(models.Model):\n short_name = models.CharField(max_length=20)\n full_name = models.CharField(max_length=255, null=True)\n weight_in_grams = models.FloatField(default=1.0)\n\n def __str__(self):\n return f'{self.short_name}'\n",
"step-4": "from django.db import models\n\n\nclass Unit(models.Model):\n short_name = models.CharField(max_length=20)\n full_name = models.CharField(max_length=255, null=True)\n weight_in_grams = models.FloatField(default=1.0)\n\n def __str__(self):\n return f'{self.short_name}'\n",
"step-5": "from django.db import models\n#from ingredients.models import *\n\nclass Unit(models.Model):\n short_name = models.CharField(max_length=20)\n full_name = models.CharField(max_length=255, null=True)\n weight_in_grams = models.FloatField(default=1.0) \n\n def __str__(self):\n return f\"{self.short_name}\"",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License
from .attack_models import (DriftAttack, AdditiveGaussian, RandomGaussian,
BitFlipAttack, RandomSignFlipAttack)
from typing import Dict
def get_attack(attack_config: Dict):
if attack_config["attack_model"] == 'drift':
return DriftAttack(attack_config=attack_config)
elif attack_config["attack_model"] == 'additive_gaussian':
return AdditiveGaussian(attack_config=attack_config)
elif attack_config["attack_model"] == 'random_gaussian':
return RandomGaussian(attack_config=attack_config)
elif attack_config["attack_model"] == 'bit_flip':
return BitFlipAttack(attack_config=attack_config)
elif attack_config["attack_model"] == 'random_sign_flip':
return RandomSignFlipAttack(attack_config=attack_config)
else:
raise NotImplementedError("Invalid attack model: {}".format(attack_config["attack_model"]))
def launch_attack(attack_mode, mal_nodes):
if attack_mode == 'coordinated':
# Co-ordinated Attack
attacker = mal_nodes[0].attack_model
print('Co-ordinated \'{}\' attack applied to {} clients'.format(mal_nodes[0].attack_model.attack_algorithm,
len(mal_nodes)))
attacker.attack(byz_clients=mal_nodes)
elif attack_mode == 'un_coordinated':
# un_coordinated stand alone attack per client
attacker = mal_nodes[0].attack_model
print('Un Co-ordinated \'{}\' attack applied to {} clients'.
format(mal_nodes[0].attack_model.attack_algorithm, len(mal_nodes)))
for mal_client in mal_nodes:
attacker.attack(byz_clients=[mal_client])
else:
raise NotImplementedError
|
normal
|
{
"blob_id": "11320922d24b27c5cfa714f88eb0a757deef987f",
"index": 8546,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_attack(attack_config: Dict):\n if attack_config['attack_model'] == 'drift':\n return DriftAttack(attack_config=attack_config)\n elif attack_config['attack_model'] == 'additive_gaussian':\n return AdditiveGaussian(attack_config=attack_config)\n elif attack_config['attack_model'] == 'random_gaussian':\n return RandomGaussian(attack_config=attack_config)\n elif attack_config['attack_model'] == 'bit_flip':\n return BitFlipAttack(attack_config=attack_config)\n elif attack_config['attack_model'] == 'random_sign_flip':\n return RandomSignFlipAttack(attack_config=attack_config)\n else:\n raise NotImplementedError('Invalid attack model: {}'.format(\n attack_config['attack_model']))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_attack(attack_config: Dict):\n if attack_config['attack_model'] == 'drift':\n return DriftAttack(attack_config=attack_config)\n elif attack_config['attack_model'] == 'additive_gaussian':\n return AdditiveGaussian(attack_config=attack_config)\n elif attack_config['attack_model'] == 'random_gaussian':\n return RandomGaussian(attack_config=attack_config)\n elif attack_config['attack_model'] == 'bit_flip':\n return BitFlipAttack(attack_config=attack_config)\n elif attack_config['attack_model'] == 'random_sign_flip':\n return RandomSignFlipAttack(attack_config=attack_config)\n else:\n raise NotImplementedError('Invalid attack model: {}'.format(\n attack_config['attack_model']))\n\n\ndef launch_attack(attack_mode, mal_nodes):\n if attack_mode == 'coordinated':\n attacker = mal_nodes[0].attack_model\n print(\"Co-ordinated '{}' attack applied to {} clients\".format(\n mal_nodes[0].attack_model.attack_algorithm, len(mal_nodes)))\n attacker.attack(byz_clients=mal_nodes)\n elif attack_mode == 'un_coordinated':\n attacker = mal_nodes[0].attack_model\n print(\"Un Co-ordinated '{}' attack applied to {} clients\".format(\n mal_nodes[0].attack_model.attack_algorithm, len(mal_nodes)))\n for mal_client in mal_nodes:\n attacker.attack(byz_clients=[mal_client])\n else:\n raise NotImplementedError\n",
"step-4": "from .attack_models import DriftAttack, AdditiveGaussian, RandomGaussian, BitFlipAttack, RandomSignFlipAttack\nfrom typing import Dict\n\n\ndef get_attack(attack_config: Dict):\n if attack_config['attack_model'] == 'drift':\n return DriftAttack(attack_config=attack_config)\n elif attack_config['attack_model'] == 'additive_gaussian':\n return AdditiveGaussian(attack_config=attack_config)\n elif attack_config['attack_model'] == 'random_gaussian':\n return RandomGaussian(attack_config=attack_config)\n elif attack_config['attack_model'] == 'bit_flip':\n return BitFlipAttack(attack_config=attack_config)\n elif attack_config['attack_model'] == 'random_sign_flip':\n return RandomSignFlipAttack(attack_config=attack_config)\n else:\n raise NotImplementedError('Invalid attack model: {}'.format(\n attack_config['attack_model']))\n\n\ndef launch_attack(attack_mode, mal_nodes):\n if attack_mode == 'coordinated':\n attacker = mal_nodes[0].attack_model\n print(\"Co-ordinated '{}' attack applied to {} clients\".format(\n mal_nodes[0].attack_model.attack_algorithm, len(mal_nodes)))\n attacker.attack(byz_clients=mal_nodes)\n elif attack_mode == 'un_coordinated':\n attacker = mal_nodes[0].attack_model\n print(\"Un Co-ordinated '{}' attack applied to {} clients\".format(\n mal_nodes[0].attack_model.attack_algorithm, len(mal_nodes)))\n for mal_client in mal_nodes:\n attacker.attack(byz_clients=[mal_client])\n else:\n raise NotImplementedError\n",
"step-5": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License\nfrom .attack_models import (DriftAttack, AdditiveGaussian, RandomGaussian,\n BitFlipAttack, RandomSignFlipAttack)\nfrom typing import Dict\n\n\ndef get_attack(attack_config: Dict):\n if attack_config[\"attack_model\"] == 'drift':\n return DriftAttack(attack_config=attack_config)\n elif attack_config[\"attack_model\"] == 'additive_gaussian':\n return AdditiveGaussian(attack_config=attack_config)\n elif attack_config[\"attack_model\"] == 'random_gaussian':\n return RandomGaussian(attack_config=attack_config)\n elif attack_config[\"attack_model\"] == 'bit_flip':\n return BitFlipAttack(attack_config=attack_config)\n elif attack_config[\"attack_model\"] == 'random_sign_flip':\n return RandomSignFlipAttack(attack_config=attack_config)\n else:\n raise NotImplementedError(\"Invalid attack model: {}\".format(attack_config[\"attack_model\"]))\n\n\ndef launch_attack(attack_mode, mal_nodes):\n if attack_mode == 'coordinated':\n # Co-ordinated Attack\n attacker = mal_nodes[0].attack_model\n print('Co-ordinated \\'{}\\' attack applied to {} clients'.format(mal_nodes[0].attack_model.attack_algorithm,\n len(mal_nodes)))\n attacker.attack(byz_clients=mal_nodes)\n elif attack_mode == 'un_coordinated':\n # un_coordinated stand alone attack per client\n attacker = mal_nodes[0].attack_model\n print('Un Co-ordinated \\'{}\\' attack applied to {} clients'.\n format(mal_nodes[0].attack_model.attack_algorithm, len(mal_nodes)))\n for mal_client in mal_nodes:\n attacker.attack(byz_clients=[mal_client])\n else:\n raise NotImplementedError\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import re
BASICPATTERN = '[!/](%s)\s{,1}(.*)' # example "/animefind baka" -> (animefind, baka)
# returns compiled BASICPATTERN for each given string
def basicRegex(strings):
if not isinstance(strings,list):
return []
ans = []
for string in strings:
pattern = re.compile(BASICPATTERN % string.strip())
ans.append(pattern)
return ans
|
normal
|
{
"blob_id": "1a28aea824752d18cbd462693f8f8980dba4974e",
"index": 9387,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef basicRegex(strings):\n if not isinstance(strings, list):\n return []\n ans = []\n for string in strings:\n pattern = re.compile(BASICPATTERN % string.strip())\n ans.append(pattern)\n return ans\n",
"step-3": "<mask token>\nBASICPATTERN = '[!/](%s)\\\\s{,1}(.*)'\n\n\ndef basicRegex(strings):\n if not isinstance(strings, list):\n return []\n ans = []\n for string in strings:\n pattern = re.compile(BASICPATTERN % string.strip())\n ans.append(pattern)\n return ans\n",
"step-4": "import re\nBASICPATTERN = '[!/](%s)\\\\s{,1}(.*)'\n\n\ndef basicRegex(strings):\n if not isinstance(strings, list):\n return []\n ans = []\n for string in strings:\n pattern = re.compile(BASICPATTERN % string.strip())\n ans.append(pattern)\n return ans\n",
"step-5": "import re\n\nBASICPATTERN = '[!/](%s)\\s{,1}(.*)' # example \"/animefind baka\" -> (animefind, baka)\n\n\n# returns compiled BASICPATTERN for each given string\ndef basicRegex(strings):\n if not isinstance(strings,list):\n return []\n ans = []\n for string in strings:\n pattern = re.compile(BASICPATTERN % string.strip())\n ans.append(pattern)\n return ans\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('Hello' * 5)
<|reserved_special_token_1|>
print ("Hello"*5)
|
flexible
|
{
"blob_id": "9ae7b6d081529a5c70b7362c852647b3638e7e98",
"index": 8105,
"step-1": "<mask token>\n",
"step-2": "print('Hello' * 5)\n",
"step-3": "print (\"Hello\"*5)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(name='pybanery', version='1.0', description=
'Python interface for Kanbanery', author='Pablo Lluch', author_email=
'[email protected]', py_modules=['pybanery'], scripts=['pybanery'])
<|reserved_special_token_1|>
import os
from distutils.core import setup, Extension
import distutils.util
setup(name='pybanery', version='1.0', description=
'Python interface for Kanbanery', author='Pablo Lluch', author_email=
'[email protected]', py_modules=['pybanery'], scripts=['pybanery'])
<|reserved_special_token_1|>
#!/usr/bin/env python
import os
from distutils.core import setup, Extension
import distutils.util
setup (name = 'pybanery',
version= '1.0',
description='Python interface for Kanbanery',
author = 'Pablo Lluch',
author_email = '[email protected]',
py_modules = ['pybanery'],
scripts=['pybanery'],
)
|
flexible
|
{
"blob_id": "60c862accbb9cda40ed4c45491f643f065e2868a",
"index": 6467,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='pybanery', version='1.0', description=\n 'Python interface for Kanbanery', author='Pablo Lluch', author_email=\n '[email protected]', py_modules=['pybanery'], scripts=['pybanery'])\n",
"step-3": "import os\nfrom distutils.core import setup, Extension\nimport distutils.util\nsetup(name='pybanery', version='1.0', description=\n 'Python interface for Kanbanery', author='Pablo Lluch', author_email=\n '[email protected]', py_modules=['pybanery'], scripts=['pybanery'])\n",
"step-4": "#!/usr/bin/env python\n\nimport os\nfrom distutils.core import setup, Extension\nimport distutils.util\n\nsetup (name = 'pybanery',\n version= '1.0',\n description='Python interface for Kanbanery',\n author = 'Pablo Lluch',\n author_email = '[email protected]',\n py_modules = ['pybanery'],\n scripts=['pybanery'],\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def train():
args = get_args()
os.makedirs(args.model_path, exist_ok=True)
set_seed(args.seed)
"""
To follow this training routine you need a DataLoader that yields the tuples of the following format:
(Bx3xHxW FloatTensor x, BxHxW LongTensor y, BxN LongTensor y_cls) where
x - batch of input images,
y - batch of groung truth seg maps,
y_cls - batch of 1D tensors of dimensionality N: N total number of classes,
y_cls[i, T] = 1 if class T is present in image i, 0 otherwise
"""
traindata = HeadSegData(args.seg_data_path, args.train_txt, train=True)
train_loader = DataLoader(traindata, batch_size=args.seg_batch_size,
shuffle=True, num_workers=1)
net, _ = build_network(None, args.seg_backend)
seg_criterion = nn.NLLLoss().cuda(0)
cls_criterion = nn.BCEWithLogitsLoss().cuda(0)
optimizer = optim.Adam(net.parameters(), lr=args.seg_lr)
print('start training...')
net.train()
total_loss = 0.0
for epoch in range(args.seg_epochs):
if (epoch + 1) % 5 == 0:
for group in optimizer.param_groups:
group['lr'] *= 0.25
total_loss = 0.0
for i, (x, y, y_cls) in enumerate(train_loader):
x, y, y_cls = x.cuda(0), y.cuda(0).long(), y_cls.cuda(0).float()
out, out_cls = net(x)
seg_loss = seg_criterion(out, y)
cls_loss = cls_criterion(out_cls, y_cls)
loss = seg_loss + args.seg_alpha * cls_loss
total_loss += loss.item()
if i % 50 == 0:
status = '[batch:{0}/{1} epoch:{2}] loss = {3:0.5f}'.format(i,
len(traindata) // args.seg_batch_size, epoch + 1, loss.
item())
print(status)
optimizer.zero_grad()
loss.backward()
optimizer.step()
torch.save(net.state_dict(), os.path.join(args.model_path,
f"{'seg'}_{args.seg_model}_{args.seg_backend}_{epoch}.pth"))
print(f'epoch:{epoch} total_loss: {total_loss / len(traindata)}')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def build_network(snapshot, backend):
epoch = 0
backend = backend.lower()
net = models[backend]()
if snapshot is not None:
_, epoch = os.path.basename(snapshot).split('_')
epoch = int(epoch)
net.load_state_dict(torch.load(snapshot))
logging.info('Snapshot for epoch {} loaded from {}'.format(epoch,
snapshot))
net = net.cuda(0)
return net, epoch
def train():
args = get_args()
os.makedirs(args.model_path, exist_ok=True)
set_seed(args.seed)
"""
To follow this training routine you need a DataLoader that yields the tuples of the following format:
(Bx3xHxW FloatTensor x, BxHxW LongTensor y, BxN LongTensor y_cls) where
x - batch of input images,
y - batch of groung truth seg maps,
y_cls - batch of 1D tensors of dimensionality N: N total number of classes,
y_cls[i, T] = 1 if class T is present in image i, 0 otherwise
"""
traindata = HeadSegData(args.seg_data_path, args.train_txt, train=True)
train_loader = DataLoader(traindata, batch_size=args.seg_batch_size,
shuffle=True, num_workers=1)
net, _ = build_network(None, args.seg_backend)
seg_criterion = nn.NLLLoss().cuda(0)
cls_criterion = nn.BCEWithLogitsLoss().cuda(0)
optimizer = optim.Adam(net.parameters(), lr=args.seg_lr)
print('start training...')
net.train()
total_loss = 0.0
for epoch in range(args.seg_epochs):
if (epoch + 1) % 5 == 0:
for group in optimizer.param_groups:
group['lr'] *= 0.25
total_loss = 0.0
for i, (x, y, y_cls) in enumerate(train_loader):
x, y, y_cls = x.cuda(0), y.cuda(0).long(), y_cls.cuda(0).float()
out, out_cls = net(x)
seg_loss = seg_criterion(out, y)
cls_loss = cls_criterion(out_cls, y_cls)
loss = seg_loss + args.seg_alpha * cls_loss
total_loss += loss.item()
if i % 50 == 0:
status = '[batch:{0}/{1} epoch:{2}] loss = {3:0.5f}'.format(i,
len(traindata) // args.seg_batch_size, epoch + 1, loss.
item())
print(status)
optimizer.zero_grad()
loss.backward()
optimizer.step()
torch.save(net.state_dict(), os.path.join(args.model_path,
f"{'seg'}_{args.seg_model}_{args.seg_backend}_{epoch}.pth"))
print(f'epoch:{epoch} total_loss: {total_loss / len(traindata)}')
if __name__ == '__main__':
train()
<|reserved_special_token_1|>
__author__ = 'BeiYu'
<|reserved_special_token_0|>
models = {'squeezenet': lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=512,
deep_features_size=256, backend='squeezenet', n_classes=3), 'densenet':
lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=1024, deep_features_size=
512, backend='densenet', n_classes=3), 'resnet18': lambda : PSPNet(
sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend=
'resnet18', n_classes=3), 'resnet34': lambda : PSPNet(sizes=(1, 2, 3, 6
), psp_size=512, deep_features_size=256, backend='resnet34', n_classes=
3), 'resnet50': lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=2048,
deep_features_size=1024, backend='resnet50', n_classes=3), 'resnet101':
lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=
1024, backend='resnet101', n_classes=3), 'resnet152': lambda : PSPNet(
sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend=
'resnet152', n_classes=3)}
def build_network(snapshot, backend):
epoch = 0
backend = backend.lower()
net = models[backend]()
if snapshot is not None:
_, epoch = os.path.basename(snapshot).split('_')
epoch = int(epoch)
net.load_state_dict(torch.load(snapshot))
logging.info('Snapshot for epoch {} loaded from {}'.format(epoch,
snapshot))
net = net.cuda(0)
return net, epoch
def train():
args = get_args()
os.makedirs(args.model_path, exist_ok=True)
set_seed(args.seed)
"""
To follow this training routine you need a DataLoader that yields the tuples of the following format:
(Bx3xHxW FloatTensor x, BxHxW LongTensor y, BxN LongTensor y_cls) where
x - batch of input images,
y - batch of groung truth seg maps,
y_cls - batch of 1D tensors of dimensionality N: N total number of classes,
y_cls[i, T] = 1 if class T is present in image i, 0 otherwise
"""
traindata = HeadSegData(args.seg_data_path, args.train_txt, train=True)
train_loader = DataLoader(traindata, batch_size=args.seg_batch_size,
shuffle=True, num_workers=1)
net, _ = build_network(None, args.seg_backend)
seg_criterion = nn.NLLLoss().cuda(0)
cls_criterion = nn.BCEWithLogitsLoss().cuda(0)
optimizer = optim.Adam(net.parameters(), lr=args.seg_lr)
print('start training...')
net.train()
total_loss = 0.0
for epoch in range(args.seg_epochs):
if (epoch + 1) % 5 == 0:
for group in optimizer.param_groups:
group['lr'] *= 0.25
total_loss = 0.0
for i, (x, y, y_cls) in enumerate(train_loader):
x, y, y_cls = x.cuda(0), y.cuda(0).long(), y_cls.cuda(0).float()
out, out_cls = net(x)
seg_loss = seg_criterion(out, y)
cls_loss = cls_criterion(out_cls, y_cls)
loss = seg_loss + args.seg_alpha * cls_loss
total_loss += loss.item()
if i % 50 == 0:
status = '[batch:{0}/{1} epoch:{2}] loss = {3:0.5f}'.format(i,
len(traindata) // args.seg_batch_size, epoch + 1, loss.
item())
print(status)
optimizer.zero_grad()
loss.backward()
optimizer.step()
torch.save(net.state_dict(), os.path.join(args.model_path,
f"{'seg'}_{args.seg_model}_{args.seg_backend}_{epoch}.pth"))
print(f'epoch:{epoch} total_loss: {total_loss / len(traindata)}')
if __name__ == '__main__':
train()
<|reserved_special_token_1|>
__author__ = 'BeiYu'
from utils.init_env import set_seed
from utils.options import *
import os
import logging
import torch
from torch import nn
from torch import optim
from torch.optim.lr_scheduler import MultiStepLR
from torch.autograd import Variable
from torch.utils.data import DataLoader
from modules.seg_dataset import *
from tqdm import tqdm
import click
import torch.nn.functional as F
import numpy as np
from modules.seg import PSPNet
models = {'squeezenet': lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=512,
deep_features_size=256, backend='squeezenet', n_classes=3), 'densenet':
lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=1024, deep_features_size=
512, backend='densenet', n_classes=3), 'resnet18': lambda : PSPNet(
sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend=
'resnet18', n_classes=3), 'resnet34': lambda : PSPNet(sizes=(1, 2, 3, 6
), psp_size=512, deep_features_size=256, backend='resnet34', n_classes=
3), 'resnet50': lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=2048,
deep_features_size=1024, backend='resnet50', n_classes=3), 'resnet101':
lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=
1024, backend='resnet101', n_classes=3), 'resnet152': lambda : PSPNet(
sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend=
'resnet152', n_classes=3)}
def build_network(snapshot, backend):
epoch = 0
backend = backend.lower()
net = models[backend]()
if snapshot is not None:
_, epoch = os.path.basename(snapshot).split('_')
epoch = int(epoch)
net.load_state_dict(torch.load(snapshot))
logging.info('Snapshot for epoch {} loaded from {}'.format(epoch,
snapshot))
net = net.cuda(0)
return net, epoch
def train():
args = get_args()
os.makedirs(args.model_path, exist_ok=True)
set_seed(args.seed)
"""
To follow this training routine you need a DataLoader that yields the tuples of the following format:
(Bx3xHxW FloatTensor x, BxHxW LongTensor y, BxN LongTensor y_cls) where
x - batch of input images,
y - batch of groung truth seg maps,
y_cls - batch of 1D tensors of dimensionality N: N total number of classes,
y_cls[i, T] = 1 if class T is present in image i, 0 otherwise
"""
traindata = HeadSegData(args.seg_data_path, args.train_txt, train=True)
train_loader = DataLoader(traindata, batch_size=args.seg_batch_size,
shuffle=True, num_workers=1)
net, _ = build_network(None, args.seg_backend)
seg_criterion = nn.NLLLoss().cuda(0)
cls_criterion = nn.BCEWithLogitsLoss().cuda(0)
optimizer = optim.Adam(net.parameters(), lr=args.seg_lr)
print('start training...')
net.train()
total_loss = 0.0
for epoch in range(args.seg_epochs):
if (epoch + 1) % 5 == 0:
for group in optimizer.param_groups:
group['lr'] *= 0.25
total_loss = 0.0
for i, (x, y, y_cls) in enumerate(train_loader):
x, y, y_cls = x.cuda(0), y.cuda(0).long(), y_cls.cuda(0).float()
out, out_cls = net(x)
seg_loss = seg_criterion(out, y)
cls_loss = cls_criterion(out_cls, y_cls)
loss = seg_loss + args.seg_alpha * cls_loss
total_loss += loss.item()
if i % 50 == 0:
status = '[batch:{0}/{1} epoch:{2}] loss = {3:0.5f}'.format(i,
len(traindata) // args.seg_batch_size, epoch + 1, loss.
item())
print(status)
optimizer.zero_grad()
loss.backward()
optimizer.step()
torch.save(net.state_dict(), os.path.join(args.model_path,
f"{'seg'}_{args.seg_model}_{args.seg_backend}_{epoch}.pth"))
print(f'epoch:{epoch} total_loss: {total_loss / len(traindata)}')
if __name__ == '__main__':
train()
<|reserved_special_token_1|>
# Author: BeiYu
# Github: https://github.com/beiyuouo
# Date : 2021/2/21 21:57
# Description:
__author__ = "BeiYu"
from utils.init_env import set_seed
from utils.options import *
import os
import logging
import torch
from torch import nn
from torch import optim
from torch.optim.lr_scheduler import MultiStepLR
from torch.autograd import Variable
from torch.utils.data import DataLoader
from modules.seg_dataset import *
from tqdm import tqdm
import click
import torch.nn.functional as F
import numpy as np
from modules.seg import PSPNet
models = {
'squeezenet': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='squeezenet', n_classes=3),
'densenet': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=1024, deep_features_size=512, backend='densenet', n_classes=3),
'resnet18': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet18', n_classes=3),
'resnet34': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet34', n_classes=3),
'resnet50': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet50', n_classes=3),
'resnet101': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet101', n_classes=3),
'resnet152': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet152', n_classes=3)
}
def build_network(snapshot, backend):
epoch = 0
backend = backend.lower()
net = models[backend]()
# net = nn.DataParallel(net)
if snapshot is not None:
_, epoch = os.path.basename(snapshot).split('_')
epoch = int(epoch)
net.load_state_dict(torch.load(snapshot))
logging.info("Snapshot for epoch {} loaded from {}".format(epoch, snapshot))
net = net.cuda(0)
return net, epoch
def train():
args = get_args()
# os.environ["CUDA_VISIBLE_DEVICES"] = gpu
# net, starting_epoch = build_network(snapshot, backend)
# data_path = os.path.abspath(os.path.expanduser(data_path))
# models_path = os.path.abspath(os.path.expanduser(models_path))
os.makedirs(args.model_path, exist_ok=True)
set_seed(args.seed)
'''
To follow this training routine you need a DataLoader that yields the tuples of the following format:
(Bx3xHxW FloatTensor x, BxHxW LongTensor y, BxN LongTensor y_cls) where
x - batch of input images,
y - batch of groung truth seg maps,
y_cls - batch of 1D tensors of dimensionality N: N total number of classes,
y_cls[i, T] = 1 if class T is present in image i, 0 otherwise
'''
traindata = HeadSegData(args.seg_data_path, args.train_txt, train=True)
train_loader = DataLoader(traindata, batch_size=args.seg_batch_size, shuffle=True, num_workers=1)
net, _ = build_network(None, args.seg_backend)
seg_criterion = nn.NLLLoss().cuda(0)
cls_criterion = nn.BCEWithLogitsLoss().cuda(0)
optimizer = optim.Adam(net.parameters(), lr=args.seg_lr)
# scheduler = MultiStepLR(optimizer, milestones=[int(x) for x in milestones.split(',')])
print("start training...")
net.train()
total_loss = 0.0
for epoch in range(args.seg_epochs):
if (epoch+1) % 5 == 0:
for group in optimizer.param_groups:
group['lr'] *= 0.25
total_loss = 0.0
for i, (x, y, y_cls) in enumerate(train_loader):
x, y, y_cls = x.cuda(0), y.cuda(0).long(), y_cls.cuda(0).float()
out, out_cls = net(x)
# print(x.shape, out.shape, out_cls.shape, y.shape, y_cls.shape)
seg_loss = seg_criterion(out, y)
cls_loss = cls_criterion(out_cls, y_cls)
loss = seg_loss + args.seg_alpha * cls_loss
total_loss += loss.item()
if i % 50 == 0:
status = '[batch:{0}/{1} epoch:{2}] loss = {3:0.5f}'.format(i, len(traindata) // args.seg_batch_size,
epoch + 1,
loss.item())
print(status)
optimizer.zero_grad()
loss.backward()
optimizer.step()
torch.save(net.state_dict(), os.path.join(args.model_path,
f'{"seg"}_{args.seg_model}_{args.seg_backend}_{epoch}.pth'))
print(f'epoch:{epoch} total_loss: {total_loss / len(traindata)}')
if __name__ == '__main__':
train()
|
flexible
|
{
"blob_id": "75e6554ea3c327c87a2a65710a7f1d55e9933bb0",
"index": 276,
"step-1": "<mask token>\n\n\ndef train():\n args = get_args()\n os.makedirs(args.model_path, exist_ok=True)\n set_seed(args.seed)\n \"\"\"\n To follow this training routine you need a DataLoader that yields the tuples of the following format:\n (Bx3xHxW FloatTensor x, BxHxW LongTensor y, BxN LongTensor y_cls) where\n x - batch of input images,\n y - batch of groung truth seg maps,\n y_cls - batch of 1D tensors of dimensionality N: N total number of classes, \n y_cls[i, T] = 1 if class T is present in image i, 0 otherwise\n \"\"\"\n traindata = HeadSegData(args.seg_data_path, args.train_txt, train=True)\n train_loader = DataLoader(traindata, batch_size=args.seg_batch_size,\n shuffle=True, num_workers=1)\n net, _ = build_network(None, args.seg_backend)\n seg_criterion = nn.NLLLoss().cuda(0)\n cls_criterion = nn.BCEWithLogitsLoss().cuda(0)\n optimizer = optim.Adam(net.parameters(), lr=args.seg_lr)\n print('start training...')\n net.train()\n total_loss = 0.0\n for epoch in range(args.seg_epochs):\n if (epoch + 1) % 5 == 0:\n for group in optimizer.param_groups:\n group['lr'] *= 0.25\n total_loss = 0.0\n for i, (x, y, y_cls) in enumerate(train_loader):\n x, y, y_cls = x.cuda(0), y.cuda(0).long(), y_cls.cuda(0).float()\n out, out_cls = net(x)\n seg_loss = seg_criterion(out, y)\n cls_loss = cls_criterion(out_cls, y_cls)\n loss = seg_loss + args.seg_alpha * cls_loss\n total_loss += loss.item()\n if i % 50 == 0:\n status = '[batch:{0}/{1} epoch:{2}] loss = {3:0.5f}'.format(i,\n len(traindata) // args.seg_batch_size, epoch + 1, loss.\n item())\n print(status)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n torch.save(net.state_dict(), os.path.join(args.model_path,\n f\"{'seg'}_{args.seg_model}_{args.seg_backend}_{epoch}.pth\"))\n print(f'epoch:{epoch} total_loss: {total_loss / len(traindata)}')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef build_network(snapshot, backend):\n epoch = 0\n backend = backend.lower()\n net = models[backend]()\n if snapshot is not None:\n _, epoch = os.path.basename(snapshot).split('_')\n epoch = int(epoch)\n net.load_state_dict(torch.load(snapshot))\n logging.info('Snapshot for epoch {} loaded from {}'.format(epoch,\n snapshot))\n net = net.cuda(0)\n return net, epoch\n\n\ndef train():\n args = get_args()\n os.makedirs(args.model_path, exist_ok=True)\n set_seed(args.seed)\n \"\"\"\n To follow this training routine you need a DataLoader that yields the tuples of the following format:\n (Bx3xHxW FloatTensor x, BxHxW LongTensor y, BxN LongTensor y_cls) where\n x - batch of input images,\n y - batch of groung truth seg maps,\n y_cls - batch of 1D tensors of dimensionality N: N total number of classes, \n y_cls[i, T] = 1 if class T is present in image i, 0 otherwise\n \"\"\"\n traindata = HeadSegData(args.seg_data_path, args.train_txt, train=True)\n train_loader = DataLoader(traindata, batch_size=args.seg_batch_size,\n shuffle=True, num_workers=1)\n net, _ = build_network(None, args.seg_backend)\n seg_criterion = nn.NLLLoss().cuda(0)\n cls_criterion = nn.BCEWithLogitsLoss().cuda(0)\n optimizer = optim.Adam(net.parameters(), lr=args.seg_lr)\n print('start training...')\n net.train()\n total_loss = 0.0\n for epoch in range(args.seg_epochs):\n if (epoch + 1) % 5 == 0:\n for group in optimizer.param_groups:\n group['lr'] *= 0.25\n total_loss = 0.0\n for i, (x, y, y_cls) in enumerate(train_loader):\n x, y, y_cls = x.cuda(0), y.cuda(0).long(), y_cls.cuda(0).float()\n out, out_cls = net(x)\n seg_loss = seg_criterion(out, y)\n cls_loss = cls_criterion(out_cls, y_cls)\n loss = seg_loss + args.seg_alpha * cls_loss\n total_loss += loss.item()\n if i % 50 == 0:\n status = '[batch:{0}/{1} epoch:{2}] loss = {3:0.5f}'.format(i,\n len(traindata) // args.seg_batch_size, epoch + 1, loss.\n item())\n print(status)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n torch.save(net.state_dict(), os.path.join(args.model_path,\n f\"{'seg'}_{args.seg_model}_{args.seg_backend}_{epoch}.pth\"))\n print(f'epoch:{epoch} total_loss: {total_loss / len(traindata)}')\n\n\nif __name__ == '__main__':\n train()\n",
"step-3": "__author__ = 'BeiYu'\n<mask token>\nmodels = {'squeezenet': lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=512,\n deep_features_size=256, backend='squeezenet', n_classes=3), 'densenet':\n lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=1024, deep_features_size=\n 512, backend='densenet', n_classes=3), 'resnet18': lambda : PSPNet(\n sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend=\n 'resnet18', n_classes=3), 'resnet34': lambda : PSPNet(sizes=(1, 2, 3, 6\n ), psp_size=512, deep_features_size=256, backend='resnet34', n_classes=\n 3), 'resnet50': lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=2048,\n deep_features_size=1024, backend='resnet50', n_classes=3), 'resnet101':\n lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=\n 1024, backend='resnet101', n_classes=3), 'resnet152': lambda : PSPNet(\n sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend=\n 'resnet152', n_classes=3)}\n\n\ndef build_network(snapshot, backend):\n epoch = 0\n backend = backend.lower()\n net = models[backend]()\n if snapshot is not None:\n _, epoch = os.path.basename(snapshot).split('_')\n epoch = int(epoch)\n net.load_state_dict(torch.load(snapshot))\n logging.info('Snapshot for epoch {} loaded from {}'.format(epoch,\n snapshot))\n net = net.cuda(0)\n return net, epoch\n\n\ndef train():\n args = get_args()\n os.makedirs(args.model_path, exist_ok=True)\n set_seed(args.seed)\n \"\"\"\n To follow this training routine you need a DataLoader that yields the tuples of the following format:\n (Bx3xHxW FloatTensor x, BxHxW LongTensor y, BxN LongTensor y_cls) where\n x - batch of input images,\n y - batch of groung truth seg maps,\n y_cls - batch of 1D tensors of dimensionality N: N total number of classes, \n y_cls[i, T] = 1 if class T is present in image i, 0 otherwise\n \"\"\"\n traindata = HeadSegData(args.seg_data_path, args.train_txt, train=True)\n train_loader = DataLoader(traindata, batch_size=args.seg_batch_size,\n shuffle=True, num_workers=1)\n net, _ = build_network(None, args.seg_backend)\n seg_criterion = nn.NLLLoss().cuda(0)\n cls_criterion = nn.BCEWithLogitsLoss().cuda(0)\n optimizer = optim.Adam(net.parameters(), lr=args.seg_lr)\n print('start training...')\n net.train()\n total_loss = 0.0\n for epoch in range(args.seg_epochs):\n if (epoch + 1) % 5 == 0:\n for group in optimizer.param_groups:\n group['lr'] *= 0.25\n total_loss = 0.0\n for i, (x, y, y_cls) in enumerate(train_loader):\n x, y, y_cls = x.cuda(0), y.cuda(0).long(), y_cls.cuda(0).float()\n out, out_cls = net(x)\n seg_loss = seg_criterion(out, y)\n cls_loss = cls_criterion(out_cls, y_cls)\n loss = seg_loss + args.seg_alpha * cls_loss\n total_loss += loss.item()\n if i % 50 == 0:\n status = '[batch:{0}/{1} epoch:{2}] loss = {3:0.5f}'.format(i,\n len(traindata) // args.seg_batch_size, epoch + 1, loss.\n item())\n print(status)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n torch.save(net.state_dict(), os.path.join(args.model_path,\n f\"{'seg'}_{args.seg_model}_{args.seg_backend}_{epoch}.pth\"))\n print(f'epoch:{epoch} total_loss: {total_loss / len(traindata)}')\n\n\nif __name__ == '__main__':\n train()\n",
"step-4": "__author__ = 'BeiYu'\nfrom utils.init_env import set_seed\nfrom utils.options import *\nimport os\nimport logging\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom torch.optim.lr_scheduler import MultiStepLR\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom modules.seg_dataset import *\nfrom tqdm import tqdm\nimport click\nimport torch.nn.functional as F\nimport numpy as np\nfrom modules.seg import PSPNet\nmodels = {'squeezenet': lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=512,\n deep_features_size=256, backend='squeezenet', n_classes=3), 'densenet':\n lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=1024, deep_features_size=\n 512, backend='densenet', n_classes=3), 'resnet18': lambda : PSPNet(\n sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend=\n 'resnet18', n_classes=3), 'resnet34': lambda : PSPNet(sizes=(1, 2, 3, 6\n ), psp_size=512, deep_features_size=256, backend='resnet34', n_classes=\n 3), 'resnet50': lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=2048,\n deep_features_size=1024, backend='resnet50', n_classes=3), 'resnet101':\n lambda : PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=\n 1024, backend='resnet101', n_classes=3), 'resnet152': lambda : PSPNet(\n sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend=\n 'resnet152', n_classes=3)}\n\n\ndef build_network(snapshot, backend):\n epoch = 0\n backend = backend.lower()\n net = models[backend]()\n if snapshot is not None:\n _, epoch = os.path.basename(snapshot).split('_')\n epoch = int(epoch)\n net.load_state_dict(torch.load(snapshot))\n logging.info('Snapshot for epoch {} loaded from {}'.format(epoch,\n snapshot))\n net = net.cuda(0)\n return net, epoch\n\n\ndef train():\n args = get_args()\n os.makedirs(args.model_path, exist_ok=True)\n set_seed(args.seed)\n \"\"\"\n To follow this training routine you need a DataLoader that yields the tuples of the following format:\n (Bx3xHxW FloatTensor x, BxHxW LongTensor y, BxN LongTensor y_cls) where\n x - batch of input images,\n y - batch of groung truth seg maps,\n y_cls - batch of 1D tensors of dimensionality N: N total number of classes, \n y_cls[i, T] = 1 if class T is present in image i, 0 otherwise\n \"\"\"\n traindata = HeadSegData(args.seg_data_path, args.train_txt, train=True)\n train_loader = DataLoader(traindata, batch_size=args.seg_batch_size,\n shuffle=True, num_workers=1)\n net, _ = build_network(None, args.seg_backend)\n seg_criterion = nn.NLLLoss().cuda(0)\n cls_criterion = nn.BCEWithLogitsLoss().cuda(0)\n optimizer = optim.Adam(net.parameters(), lr=args.seg_lr)\n print('start training...')\n net.train()\n total_loss = 0.0\n for epoch in range(args.seg_epochs):\n if (epoch + 1) % 5 == 0:\n for group in optimizer.param_groups:\n group['lr'] *= 0.25\n total_loss = 0.0\n for i, (x, y, y_cls) in enumerate(train_loader):\n x, y, y_cls = x.cuda(0), y.cuda(0).long(), y_cls.cuda(0).float()\n out, out_cls = net(x)\n seg_loss = seg_criterion(out, y)\n cls_loss = cls_criterion(out_cls, y_cls)\n loss = seg_loss + args.seg_alpha * cls_loss\n total_loss += loss.item()\n if i % 50 == 0:\n status = '[batch:{0}/{1} epoch:{2}] loss = {3:0.5f}'.format(i,\n len(traindata) // args.seg_batch_size, epoch + 1, loss.\n item())\n print(status)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n torch.save(net.state_dict(), os.path.join(args.model_path,\n f\"{'seg'}_{args.seg_model}_{args.seg_backend}_{epoch}.pth\"))\n print(f'epoch:{epoch} total_loss: {total_loss / len(traindata)}')\n\n\nif __name__ == '__main__':\n train()\n",
"step-5": "# Author: BeiYu\n# Github: https://github.com/beiyuouo\n# Date : 2021/2/21 21:57\n# Description:\n\n__author__ = \"BeiYu\"\n\nfrom utils.init_env import set_seed\nfrom utils.options import *\n\nimport os\nimport logging\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom torch.optim.lr_scheduler import MultiStepLR\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom modules.seg_dataset import *\nfrom tqdm import tqdm\nimport click\nimport torch.nn.functional as F\nimport numpy as np\nfrom modules.seg import PSPNet\n\nmodels = {\n 'squeezenet': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='squeezenet', n_classes=3),\n 'densenet': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=1024, deep_features_size=512, backend='densenet', n_classes=3),\n 'resnet18': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet18', n_classes=3),\n 'resnet34': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet34', n_classes=3),\n 'resnet50': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet50', n_classes=3),\n 'resnet101': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet101', n_classes=3),\n 'resnet152': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet152', n_classes=3)\n}\n\n\ndef build_network(snapshot, backend):\n epoch = 0\n backend = backend.lower()\n net = models[backend]()\n # net = nn.DataParallel(net)\n if snapshot is not None:\n _, epoch = os.path.basename(snapshot).split('_')\n epoch = int(epoch)\n net.load_state_dict(torch.load(snapshot))\n logging.info(\"Snapshot for epoch {} loaded from {}\".format(epoch, snapshot))\n net = net.cuda(0)\n return net, epoch\n\n\ndef train():\n args = get_args()\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu\n # net, starting_epoch = build_network(snapshot, backend)\n # data_path = os.path.abspath(os.path.expanduser(data_path))\n # models_path = os.path.abspath(os.path.expanduser(models_path))\n os.makedirs(args.model_path, exist_ok=True)\n set_seed(args.seed)\n\n '''\n To follow this training routine you need a DataLoader that yields the tuples of the following format:\n (Bx3xHxW FloatTensor x, BxHxW LongTensor y, BxN LongTensor y_cls) where\n x - batch of input images,\n y - batch of groung truth seg maps,\n y_cls - batch of 1D tensors of dimensionality N: N total number of classes, \n y_cls[i, T] = 1 if class T is present in image i, 0 otherwise\n '''\n traindata = HeadSegData(args.seg_data_path, args.train_txt, train=True)\n train_loader = DataLoader(traindata, batch_size=args.seg_batch_size, shuffle=True, num_workers=1)\n\n net, _ = build_network(None, args.seg_backend)\n seg_criterion = nn.NLLLoss().cuda(0)\n cls_criterion = nn.BCEWithLogitsLoss().cuda(0)\n optimizer = optim.Adam(net.parameters(), lr=args.seg_lr)\n # scheduler = MultiStepLR(optimizer, milestones=[int(x) for x in milestones.split(',')])\n\n print(\"start training...\")\n net.train()\n total_loss = 0.0\n for epoch in range(args.seg_epochs):\n if (epoch+1) % 5 == 0:\n for group in optimizer.param_groups:\n group['lr'] *= 0.25\n total_loss = 0.0\n for i, (x, y, y_cls) in enumerate(train_loader):\n x, y, y_cls = x.cuda(0), y.cuda(0).long(), y_cls.cuda(0).float()\n\n out, out_cls = net(x)\n # print(x.shape, out.shape, out_cls.shape, y.shape, y_cls.shape)\n seg_loss = seg_criterion(out, y)\n cls_loss = cls_criterion(out_cls, y_cls)\n loss = seg_loss + args.seg_alpha * cls_loss\n total_loss += loss.item()\n\n if i % 50 == 0:\n status = '[batch:{0}/{1} epoch:{2}] loss = {3:0.5f}'.format(i, len(traindata) // args.seg_batch_size,\n epoch + 1,\n loss.item())\n print(status)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n torch.save(net.state_dict(), os.path.join(args.model_path,\n f'{\"seg\"}_{args.seg_model}_{args.seg_backend}_{epoch}.pth'))\n print(f'epoch:{epoch} total_loss: {total_loss / len(traindata)}')\n\n\nif __name__ == '__main__':\n train()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import datetime
class Event(object):
def __init__(self):
self.id = None
self.raw = None
self.create_dt = datetime.datetime.now()
self.device_id = None
self.collector_id = None
self.device_hostname = None
self.device_domain_name = None
self.device_ip_address = None
self.types = []
def to_dict(self):
d = {}
for item in self.__dict__:
val = getattr(self, item)
if val != None:
d[item] = val
return d
|
normal
|
{
"blob_id": "7554b00f8c4d40f1d3ee2341f118048ca7ad10ea",
"index": 709,
"step-1": "<mask token>\n\n\nclass Event(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Event(object):\n <mask token>\n\n def to_dict(self):\n d = {}\n for item in self.__dict__:\n val = getattr(self, item)\n if val != None:\n d[item] = val\n return d\n",
"step-3": "<mask token>\n\n\nclass Event(object):\n\n def __init__(self):\n self.id = None\n self.raw = None\n self.create_dt = datetime.datetime.now()\n self.device_id = None\n self.collector_id = None\n self.device_hostname = None\n self.device_domain_name = None\n self.device_ip_address = None\n self.types = []\n\n def to_dict(self):\n d = {}\n for item in self.__dict__:\n val = getattr(self, item)\n if val != None:\n d[item] = val\n return d\n",
"step-4": "import datetime\n\n\nclass Event(object):\n\n def __init__(self):\n self.id = None\n self.raw = None\n self.create_dt = datetime.datetime.now()\n self.device_id = None\n self.collector_id = None\n self.device_hostname = None\n self.device_domain_name = None\n self.device_ip_address = None\n self.types = []\n\n def to_dict(self):\n d = {}\n for item in self.__dict__:\n val = getattr(self, item)\n if val != None:\n d[item] = val\n return d\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Train size: {}'.format(len(train)))
print('Test size: {}'.format(len(test)))
<|reserved_special_token_0|>
vf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))
vf.add(Dropout(0.5, input_shape=800))
vf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))
vf.add(Dense(10, activation='sigmoid', input_shape=800))
vf.train(x_train=train_x, y_train=train_y, x_test=test_x, y_test=test_y,
epochs=100000, alpha=0.001, mini_batch_size=100)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
train = list(read('train'))
test = list(read('test'))
print('Train size: {}'.format(len(train)))
print('Test size: {}'.format(len(test)))
test_x, test_y = normalize(test)
train_x, train_y = normalize(train)
vf = VectorFlux()
vf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))
vf.add(Dropout(0.5, input_shape=800))
vf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))
vf.add(Dense(10, activation='sigmoid', input_shape=800))
vf.train(x_train=train_x, y_train=train_y, x_test=test_x, y_test=test_y,
epochs=100000, alpha=0.001, mini_batch_size=100)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from vectorflux import VectorFlux
from mnist import read, show, normalize
from vectorflux.layers import Dense
from vectorflux.layers.Dropout import Dropout
train = list(read('train'))
test = list(read('test'))
print('Train size: {}'.format(len(train)))
print('Test size: {}'.format(len(test)))
test_x, test_y = normalize(test)
train_x, train_y = normalize(train)
vf = VectorFlux()
vf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))
vf.add(Dropout(0.5, input_shape=800))
vf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))
vf.add(Dense(10, activation='sigmoid', input_shape=800))
vf.train(x_train=train_x, y_train=train_y, x_test=test_x, y_test=test_y,
epochs=100000, alpha=0.001, mini_batch_size=100)
<|reserved_special_token_1|>
"""
Implements a Neural Network
"""
from vectorflux import VectorFlux
from mnist import read, show, normalize
from vectorflux.layers import Dense
from vectorflux.layers.Dropout import Dropout
train = list(read('train'))
test = list(read('test'))
print("Train size: {}".format(len(train)))
print("Test size: {}".format(len(test)))
# Normalization for values
test_x, test_y = normalize(test)
train_x, train_y = normalize(train)
vf = VectorFlux()
vf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))
vf.add(Dropout(0.5, input_shape=800))
vf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))
vf.add(Dense(10, activation='sigmoid', input_shape=800))
vf.train(x_train = train_x, y_train = train_y, x_test=test_x, y_test = test_y, epochs=100000, alpha=0.001, mini_batch_size=100)
|
flexible
|
{
"blob_id": "94d296b5a13bfa59dba5812da31707f9db9080af",
"index": 1292,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Train size: {}'.format(len(train)))\nprint('Test size: {}'.format(len(test)))\n<mask token>\nvf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))\nvf.add(Dropout(0.5, input_shape=800))\nvf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))\nvf.add(Dense(10, activation='sigmoid', input_shape=800))\nvf.train(x_train=train_x, y_train=train_y, x_test=test_x, y_test=test_y,\n epochs=100000, alpha=0.001, mini_batch_size=100)\n",
"step-3": "<mask token>\ntrain = list(read('train'))\ntest = list(read('test'))\nprint('Train size: {}'.format(len(train)))\nprint('Test size: {}'.format(len(test)))\ntest_x, test_y = normalize(test)\ntrain_x, train_y = normalize(train)\nvf = VectorFlux()\nvf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))\nvf.add(Dropout(0.5, input_shape=800))\nvf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))\nvf.add(Dense(10, activation='sigmoid', input_shape=800))\nvf.train(x_train=train_x, y_train=train_y, x_test=test_x, y_test=test_y,\n epochs=100000, alpha=0.001, mini_batch_size=100)\n",
"step-4": "<mask token>\nfrom vectorflux import VectorFlux\nfrom mnist import read, show, normalize\nfrom vectorflux.layers import Dense\nfrom vectorflux.layers.Dropout import Dropout\ntrain = list(read('train'))\ntest = list(read('test'))\nprint('Train size: {}'.format(len(train)))\nprint('Test size: {}'.format(len(test)))\ntest_x, test_y = normalize(test)\ntrain_x, train_y = normalize(train)\nvf = VectorFlux()\nvf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))\nvf.add(Dropout(0.5, input_shape=800))\nvf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))\nvf.add(Dense(10, activation='sigmoid', input_shape=800))\nvf.train(x_train=train_x, y_train=train_y, x_test=test_x, y_test=test_y,\n epochs=100000, alpha=0.001, mini_batch_size=100)\n",
"step-5": "\"\"\"\nImplements a Neural Network\n\n\"\"\"\nfrom vectorflux import VectorFlux\nfrom mnist import read, show, normalize\n\nfrom vectorflux.layers import Dense\nfrom vectorflux.layers.Dropout import Dropout\n\ntrain = list(read('train'))\ntest = list(read('test'))\n\nprint(\"Train size: {}\".format(len(train)))\nprint(\"Test size: {}\".format(len(test)))\n\n# Normalization for values\ntest_x, test_y = normalize(test)\ntrain_x, train_y = normalize(train)\n\nvf = VectorFlux()\nvf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))\nvf.add(Dropout(0.5, input_shape=800))\nvf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))\nvf.add(Dense(10, activation='sigmoid', input_shape=800))\n\nvf.train(x_train = train_x, y_train = train_y, x_test=test_x, y_test = test_y, epochs=100000, alpha=0.001, mini_batch_size=100)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class StudentForm(forms.Form):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class EmployeeForm(forms.ModelForm):
class Meta:
model = Employee
fields = '__all__'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StudentForm(forms.Form):
firstname = forms.CharField(label='Enter first name:', max_length=50)
lastname = forms.CharField(label='Enter last name:', max_length=100)
email = forms.EmailField(label='Enter Email')
file = forms.FileField()
class EmployeeForm(forms.ModelForm):
class Meta:
model = Employee
fields = '__all__'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EmpForm(forms.ModelForm):
class Meta:
model = Student
fields = '__all__'
class StudentForm(forms.Form):
firstname = forms.CharField(label='Enter first name:', max_length=50)
lastname = forms.CharField(label='Enter last name:', max_length=100)
email = forms.EmailField(label='Enter Email')
file = forms.FileField()
class EmployeeForm(forms.ModelForm):
class Meta:
model = Employee
fields = '__all__'
<|reserved_special_token_1|>
from django import forms
from myapp.models import Student
from myapp.models import Employee
class EmpForm(forms.ModelForm):
class Meta:
model = Student
fields = '__all__'
class StudentForm(forms.Form):
firstname = forms.CharField(label='Enter first name:', max_length=50)
lastname = forms.CharField(label='Enter last name:', max_length=100)
email = forms.EmailField(label='Enter Email')
file = forms.FileField()
class EmployeeForm(forms.ModelForm):
class Meta:
model = Employee
fields = '__all__'
<|reserved_special_token_1|>
from django import forms
from myapp.models import Student
from myapp.models import Employee
class EmpForm(forms.ModelForm):
class Meta:
model = Student
fields = "__all__"
class StudentForm(forms.Form):
firstname = forms.CharField(label="Enter first name:", max_length=50)
lastname = forms.CharField(label="Enter last name:", max_length=100)
email=forms.EmailField(label="Enter Email")
file=forms.FileField()
class EmployeeForm(forms.ModelForm):
class Meta:
model = Employee
fields = "__all__"
|
flexible
|
{
"blob_id": "0b141ecca501c21df50e76d0841dd5651274f0da",
"index": 8509,
"step-1": "<mask token>\n\n\nclass StudentForm(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass EmployeeForm(forms.ModelForm):\n\n\n class Meta:\n model = Employee\n fields = '__all__'\n",
"step-2": "<mask token>\n\n\nclass StudentForm(forms.Form):\n firstname = forms.CharField(label='Enter first name:', max_length=50)\n lastname = forms.CharField(label='Enter last name:', max_length=100)\n email = forms.EmailField(label='Enter Email')\n file = forms.FileField()\n\n\nclass EmployeeForm(forms.ModelForm):\n\n\n class Meta:\n model = Employee\n fields = '__all__'\n",
"step-3": "<mask token>\n\n\nclass EmpForm(forms.ModelForm):\n\n\n class Meta:\n model = Student\n fields = '__all__'\n\n\nclass StudentForm(forms.Form):\n firstname = forms.CharField(label='Enter first name:', max_length=50)\n lastname = forms.CharField(label='Enter last name:', max_length=100)\n email = forms.EmailField(label='Enter Email')\n file = forms.FileField()\n\n\nclass EmployeeForm(forms.ModelForm):\n\n\n class Meta:\n model = Employee\n fields = '__all__'\n",
"step-4": "from django import forms\nfrom myapp.models import Student\nfrom myapp.models import Employee\n\n\nclass EmpForm(forms.ModelForm):\n\n\n class Meta:\n model = Student\n fields = '__all__'\n\n\nclass StudentForm(forms.Form):\n firstname = forms.CharField(label='Enter first name:', max_length=50)\n lastname = forms.CharField(label='Enter last name:', max_length=100)\n email = forms.EmailField(label='Enter Email')\n file = forms.FileField()\n\n\nclass EmployeeForm(forms.ModelForm):\n\n\n class Meta:\n model = Employee\n fields = '__all__'\n",
"step-5": "from django import forms\nfrom myapp.models import Student\nfrom myapp.models import Employee\n\n\nclass EmpForm(forms.ModelForm):\n class Meta:\n model = Student\n fields = \"__all__\"\n\n\nclass StudentForm(forms.Form):\n firstname = forms.CharField(label=\"Enter first name:\", max_length=50)\n lastname = forms.CharField(label=\"Enter last name:\", max_length=100)\n email=forms.EmailField(label=\"Enter Email\")\n file=forms.FileField()\n\nclass EmployeeForm(forms.ModelForm):\n class Meta:\n model = Employee\n fields = \"__all__\"\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class testAbRectangularGridBuilder(unittest.TestCase):
def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids=None):
class _mockClass:
def __init__(self, posCellCentroids):
self.posCellCentroids = posCellCentroids
self.cell = None
def getAlphaSubMatrix(self, cell):
sm = _mockClass(self.posCellCentroids)
sm.cell = cell
return sm
def _positive(self, cell):
cntrs = self.posCellCentroids
if cell is None or cntrs is None:
return False
else:
for c in cntrs:
if cell.contains(gm.Point([c[0], c[1]])):
return True
return False
def onLand(self):
cell = self.cell
return self._positive(cell)
def isCoastalCell(self, cell, boundary=None, surface=-1):
return self._positive(cell)
return _mockClass(posCellCentroids)
def testGetSeaGridSerial(self):
minx = 100.0
miny = 45.0
dx = 0.5
dy = 1.0
nx = 30
ny = 10
maxx = minx + nx * dx
maxy = miny + ny * dy
landCntrs = [[100.25, 45.25], [105.25, 47.25]]
coastCntrs = [[100.75, 45.25], [105.25, 47.25]]
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,
nParWorker=1, minXYIsCentroid=False)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertFalse(grd.wrapAroundDateline)
self.assertEqual(1, grd.nParWorker)
cells = grd.cells
self.assertEqual(nx * ny - 3, len(cells))
<|reserved_special_token_0|>
def testGetNeighborsSerial(self):
minx = 100.0
miny = 45.0
dx = 0.5
dy = 1.0
nx = 30
ny = 10
maxx = minx + nx * dx
maxy = miny + ny * dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=1)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(1, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(3, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[100]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
def testGetNeighborsParallel(self):
minx = 100.0
miny = 45.0
dx = 0.5
dy = 1.0
nx = 30
ny = 10
maxx = minx + nx * dx
maxy = miny + ny * dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(4, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(3, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[100]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
def testGetNeighborsWrapAroundGlobalParallel(self):
minx = -179.0
miny = -89.0
dx = 2
dy = 2
nx = 180
ny = 90
maxx = minx + nx * dx
maxy = miny + ny * dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertTrue(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(-182, ncls[3].boundary.coords[0][0])
self.assertEqual(-182, ncls[4].boundary.coords[0][0])
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(-182, ncls[5].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
cell = cells[65]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(-182, ncls[5].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
cell = cells[89]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(-182, ncls[3].boundary.coords[0][0])
self.assertEqual(-182, ncls[4].boundary.coords[0][0])
cell = cells[200]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[nx * ny - 22]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(182, ncls[5].boundary.coords[1][0])
self.assertEqual(182, ncls[6].boundary.coords[1][0])
self.assertEqual(182, ncls[6].boundary.coords[1][0])
cell = cells[nx * ny - 1]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(182, ncls[3].boundary.coords[1][0])
self.assertEqual(182, ncls[4].boundary.coords[1][0])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class testAbRectangularGridBuilder(unittest.TestCase):
def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids=None):
class _mockClass:
def __init__(self, posCellCentroids):
self.posCellCentroids = posCellCentroids
self.cell = None
def getAlphaSubMatrix(self, cell):
sm = _mockClass(self.posCellCentroids)
sm.cell = cell
return sm
def _positive(self, cell):
cntrs = self.posCellCentroids
if cell is None or cntrs is None:
return False
else:
for c in cntrs:
if cell.contains(gm.Point([c[0], c[1]])):
return True
return False
def onLand(self):
cell = self.cell
return self._positive(cell)
def isCoastalCell(self, cell, boundary=None, surface=-1):
return self._positive(cell)
return _mockClass(posCellCentroids)
def testGetSeaGridSerial(self):
minx = 100.0
miny = 45.0
dx = 0.5
dy = 1.0
nx = 30
ny = 10
maxx = minx + nx * dx
maxy = miny + ny * dy
landCntrs = [[100.25, 45.25], [105.25, 47.25]]
coastCntrs = [[100.75, 45.25], [105.25, 47.25]]
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,
nParWorker=1, minXYIsCentroid=False)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertFalse(grd.wrapAroundDateline)
self.assertEqual(1, grd.nParWorker)
cells = grd.cells
self.assertEqual(nx * ny - 3, len(cells))
def testGetSeaGridParallel(self):
minx = 100.0
miny = 45.0
dx = 0.5
dy = 1.0
nx = 30
ny = 10
maxx = minx + nx * dx
maxy = miny + ny * dy
landCntrs = [[100.25, 45.25], [105.25, 47.25]]
coastCntrs = [[100.75, 45.25], [105.25, 47.25]]
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,
nParWorker=4, minXYIsCentroid=False)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(4, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
self.assertEqual(nx * ny - 3, len(cells))
def testGetNeighborsSerial(self):
minx = 100.0
miny = 45.0
dx = 0.5
dy = 1.0
nx = 30
ny = 10
maxx = minx + nx * dx
maxy = miny + ny * dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=1)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(1, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(3, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[100]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
def testGetNeighborsParallel(self):
minx = 100.0
miny = 45.0
dx = 0.5
dy = 1.0
nx = 30
ny = 10
maxx = minx + nx * dx
maxy = miny + ny * dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(4, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(3, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[100]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
def testGetNeighborsWrapAroundGlobalParallel(self):
minx = -179.0
miny = -89.0
dx = 2
dy = 2
nx = 180
ny = 90
maxx = minx + nx * dx
maxy = miny + ny * dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertTrue(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(-182, ncls[3].boundary.coords[0][0])
self.assertEqual(-182, ncls[4].boundary.coords[0][0])
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(-182, ncls[5].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
cell = cells[65]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(-182, ncls[5].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
cell = cells[89]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(-182, ncls[3].boundary.coords[0][0])
self.assertEqual(-182, ncls[4].boundary.coords[0][0])
cell = cells[200]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[nx * ny - 22]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(182, ncls[5].boundary.coords[1][0])
self.assertEqual(182, ncls[6].boundary.coords[1][0])
self.assertEqual(182, ncls[6].boundary.coords[1][0])
cell = cells[nx * ny - 1]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(182, ncls[3].boundary.coords[1][0])
self.assertEqual(182, ncls[4].boundary.coords[1][0])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class testAbRectangularGridBuilder(unittest.TestCase):
def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids=None):
class _mockClass:
def __init__(self, posCellCentroids):
self.posCellCentroids = posCellCentroids
self.cell = None
def getAlphaSubMatrix(self, cell):
sm = _mockClass(self.posCellCentroids)
sm.cell = cell
return sm
def _positive(self, cell):
cntrs = self.posCellCentroids
if cell is None or cntrs is None:
return False
else:
for c in cntrs:
if cell.contains(gm.Point([c[0], c[1]])):
return True
return False
def onLand(self):
cell = self.cell
return self._positive(cell)
def isCoastalCell(self, cell, boundary=None, surface=-1):
return self._positive(cell)
return _mockClass(posCellCentroids)
def testGetSeaGridSerial(self):
minx = 100.0
miny = 45.0
dx = 0.5
dy = 1.0
nx = 30
ny = 10
maxx = minx + nx * dx
maxy = miny + ny * dy
landCntrs = [[100.25, 45.25], [105.25, 47.25]]
coastCntrs = [[100.75, 45.25], [105.25, 47.25]]
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,
nParWorker=1, minXYIsCentroid=False)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertFalse(grd.wrapAroundDateline)
self.assertEqual(1, grd.nParWorker)
cells = grd.cells
self.assertEqual(nx * ny - 3, len(cells))
def testGetSeaGridParallel(self):
minx = 100.0
miny = 45.0
dx = 0.5
dy = 1.0
nx = 30
ny = 10
maxx = minx + nx * dx
maxy = miny + ny * dy
landCntrs = [[100.25, 45.25], [105.25, 47.25]]
coastCntrs = [[100.75, 45.25], [105.25, 47.25]]
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,
nParWorker=4, minXYIsCentroid=False)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(4, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
self.assertEqual(nx * ny - 3, len(cells))
def testGetNeighborsSerial(self):
minx = 100.0
miny = 45.0
dx = 0.5
dy = 1.0
nx = 30
ny = 10
maxx = minx + nx * dx
maxy = miny + ny * dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=1)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(1, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(3, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[100]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
def testGetNeighborsParallel(self):
minx = 100.0
miny = 45.0
dx = 0.5
dy = 1.0
nx = 30
ny = 10
maxx = minx + nx * dx
maxy = miny + ny * dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(4, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(3, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[100]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
def testGetNeighborsWrapAroundGlobalParallel(self):
minx = -179.0
miny = -89.0
dx = 2
dy = 2
nx = 180
ny = 90
maxx = minx + nx * dx
maxy = miny + ny * dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertTrue(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(-182, ncls[3].boundary.coords[0][0])
self.assertEqual(-182, ncls[4].boundary.coords[0][0])
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(-182, ncls[5].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
cell = cells[65]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(-182, ncls[5].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
cell = cells[89]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(-182, ncls[3].boundary.coords[0][0])
self.assertEqual(-182, ncls[4].boundary.coords[0][0])
cell = cells[200]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[nx * ny - 22]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(182, ncls[5].boundary.coords[1][0])
self.assertEqual(182, ncls[6].boundary.coords[1][0])
self.assertEqual(182, ncls[6].boundary.coords[1][0])
cell = cells[nx * ny - 1]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(182, ncls[3].boundary.coords[1][0])
self.assertEqual(182, ncls[4].boundary.coords[1][0])
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
import shapely.geometry as gm
from alphaBetaLab.abRectangularGridBuilder import abRectangularGridBuilder
class testAbRectangularGridBuilder(unittest.TestCase):
def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids=None):
class _mockClass:
def __init__(self, posCellCentroids):
self.posCellCentroids = posCellCentroids
self.cell = None
def getAlphaSubMatrix(self, cell):
sm = _mockClass(self.posCellCentroids)
sm.cell = cell
return sm
def _positive(self, cell):
cntrs = self.posCellCentroids
if cell is None or cntrs is None:
return False
else:
for c in cntrs:
if cell.contains(gm.Point([c[0], c[1]])):
return True
return False
def onLand(self):
cell = self.cell
return self._positive(cell)
def isCoastalCell(self, cell, boundary=None, surface=-1):
return self._positive(cell)
return _mockClass(posCellCentroids)
def testGetSeaGridSerial(self):
minx = 100.0
miny = 45.0
dx = 0.5
dy = 1.0
nx = 30
ny = 10
maxx = minx + nx * dx
maxy = miny + ny * dy
landCntrs = [[100.25, 45.25], [105.25, 47.25]]
coastCntrs = [[100.75, 45.25], [105.25, 47.25]]
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,
nParWorker=1, minXYIsCentroid=False)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertFalse(grd.wrapAroundDateline)
self.assertEqual(1, grd.nParWorker)
cells = grd.cells
self.assertEqual(nx * ny - 3, len(cells))
def testGetSeaGridParallel(self):
minx = 100.0
miny = 45.0
dx = 0.5
dy = 1.0
nx = 30
ny = 10
maxx = minx + nx * dx
maxy = miny + ny * dy
landCntrs = [[100.25, 45.25], [105.25, 47.25]]
coastCntrs = [[100.75, 45.25], [105.25, 47.25]]
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,
nParWorker=4, minXYIsCentroid=False)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(4, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
self.assertEqual(nx * ny - 3, len(cells))
def testGetNeighborsSerial(self):
minx = 100.0
miny = 45.0
dx = 0.5
dy = 1.0
nx = 30
ny = 10
maxx = minx + nx * dx
maxy = miny + ny * dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=1)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(1, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(3, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[100]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
def testGetNeighborsParallel(self):
minx = 100.0
miny = 45.0
dx = 0.5
dy = 1.0
nx = 30
ny = 10
maxx = minx + nx * dx
maxy = miny + ny * dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(4, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(3, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[100]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
def testGetNeighborsWrapAroundGlobalParallel(self):
minx = -179.0
miny = -89.0
dx = 2
dy = 2
nx = 180
ny = 90
maxx = minx + nx * dx
maxy = miny + ny * dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertTrue(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(-182, ncls[3].boundary.coords[0][0])
self.assertEqual(-182, ncls[4].boundary.coords[0][0])
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(-182, ncls[5].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
cell = cells[65]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(-182, ncls[5].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
cell = cells[89]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(-182, ncls[3].boundary.coords[0][0])
self.assertEqual(-182, ncls[4].boundary.coords[0][0])
cell = cells[200]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
cell = cells[nx * ny - 22]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(182, ncls[5].boundary.coords[1][0])
self.assertEqual(182, ncls[6].boundary.coords[1][0])
self.assertEqual(182, ncls[6].boundary.coords[1][0])
cell = cells[nx * ny - 1]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue(cell.distance(nc) < 1e-09)
self.assertEqual(182, ncls[3].boundary.coords[1][0])
self.assertEqual(182, ncls[4].boundary.coords[1][0])
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
import shapely.geometry as gm
from alphaBetaLab.abRectangularGridBuilder import abRectangularGridBuilder
class testAbRectangularGridBuilder(unittest.TestCase):
def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids = None):
class _mockClass:
def __init__(self, posCellCentroids):
self.posCellCentroids = posCellCentroids
self.cell = None
def getAlphaSubMatrix(self, cell):
sm = _mockClass(self.posCellCentroids)
sm.cell = cell
return sm
def _positive(self, cell):
cntrs = self.posCellCentroids
if cell is None or cntrs is None:
return False
else:
for c in cntrs:
if cell.contains(gm.Point([c[0], c[1]])):
return True
return False
def onLand(self):
cell = self.cell
return self._positive(cell)
def isCoastalCell(self, cell, boundary = None, surface = -1):
return self._positive(cell)
return _mockClass(posCellCentroids)
def testGetSeaGridSerial(self):
minx = 100.
miny = 45.
dx = .5
dy = 1.
nx = 30
ny = 10
maxx = minx + nx*dx
maxy = miny + ny*dy
landCntrs = [[100.25, 45.25], [105.25, 47.25]]
coastCntrs = [[100.75, 45.25], [105.25, 47.25]]
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,
nParWorker=1, minXYIsCentroid=False)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertFalse(grd.wrapAroundDateline)
self.assertEqual(1, grd.nParWorker)
cells = grd.cells
self.assertEqual(nx*ny - 3, len(cells))
def testGetSeaGridParallel(self):
minx = 100.
miny = 45.
dx = .5
dy = 1.
nx = 30
ny = 10
maxx = minx + nx*dx
maxy = miny + ny*dy
landCntrs = [[100.25, 45.25], [105.25, 47.25]]
coastCntrs = [[100.75, 45.25], [105.25, 47.25]]
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4, minXYIsCentroid=False)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(4, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
self.assertEqual(nx*ny - 3, len(cells))
def testGetNeighborsSerial(self):
minx = 100.
miny = 45.
dx = .5
dy = 1.
nx = 30
ny = 10
maxx = minx + nx*dx
maxy = miny + ny*dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 1)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(1, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(3, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
cell = cells[100]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
def testGetNeighborsParallel(self):
minx = 100.
miny = 45.
dx = .5
dy = 1.
nx = 30
ny = 10
maxx = minx + nx*dx
maxy = miny + ny*dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 4)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(4, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(3, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
cell = cells[100]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
def testGetNeighborsWrapAroundGlobalParallel(self):
minx = -179.
miny = -89.
dx = 2
dy = 2
nx = 180
ny = 90
maxx = minx + nx*dx
maxy = miny + ny*dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 4)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertTrue(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(-182, ncls[3].boundary.coords[0][0])
self.assertEqual(-182, ncls[4].boundary.coords[0][0])
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(-182, ncls[5].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
cell = cells[65]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(-182, ncls[5].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
cell = cells[89]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(-182, ncls[3].boundary.coords[0][0])
self.assertEqual(-182, ncls[4].boundary.coords[0][0])
cell = cells[200]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
cell = cells[nx*ny-22]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(182, ncls[5].boundary.coords[1][0])
self.assertEqual(182, ncls[6].boundary.coords[1][0])
self.assertEqual(182, ncls[6].boundary.coords[1][0])
cell = cells[nx*ny-1]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(182, ncls[3].boundary.coords[1][0])
self.assertEqual(182, ncls[4].boundary.coords[1][0])
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "6175ce6534d44d703df6cdef94fc2b1285e25f49",
"index": 2202,
"step-1": "<mask token>\n\n\nclass testAbRectangularGridBuilder(unittest.TestCase):\n\n def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids=None):\n\n\n class _mockClass:\n\n def __init__(self, posCellCentroids):\n self.posCellCentroids = posCellCentroids\n self.cell = None\n\n def getAlphaSubMatrix(self, cell):\n sm = _mockClass(self.posCellCentroids)\n sm.cell = cell\n return sm\n\n def _positive(self, cell):\n cntrs = self.posCellCentroids\n if cell is None or cntrs is None:\n return False\n else:\n for c in cntrs:\n if cell.contains(gm.Point([c[0], c[1]])):\n return True\n return False\n\n def onLand(self):\n cell = self.cell\n return self._positive(cell)\n\n def isCoastalCell(self, cell, boundary=None, surface=-1):\n return self._positive(cell)\n return _mockClass(posCellCentroids)\n\n def testGetSeaGridSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=1, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertFalse(grd.wrapAroundDateline)\n self.assertEqual(1, grd.nParWorker)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n <mask token>\n\n def testGetNeighborsSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=1)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(1, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsWrapAroundGlobalParallel(self):\n minx = -179.0\n miny = -89.0\n dx = 2\n dy = 2\n nx = 180\n ny = 90\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertTrue(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[65]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[89]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[200]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[nx * ny - 22]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[5].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n cell = cells[nx * ny - 1]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[3].boundary.coords[1][0])\n self.assertEqual(182, ncls[4].boundary.coords[1][0])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass testAbRectangularGridBuilder(unittest.TestCase):\n\n def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids=None):\n\n\n class _mockClass:\n\n def __init__(self, posCellCentroids):\n self.posCellCentroids = posCellCentroids\n self.cell = None\n\n def getAlphaSubMatrix(self, cell):\n sm = _mockClass(self.posCellCentroids)\n sm.cell = cell\n return sm\n\n def _positive(self, cell):\n cntrs = self.posCellCentroids\n if cell is None or cntrs is None:\n return False\n else:\n for c in cntrs:\n if cell.contains(gm.Point([c[0], c[1]])):\n return True\n return False\n\n def onLand(self):\n cell = self.cell\n return self._positive(cell)\n\n def isCoastalCell(self, cell, boundary=None, surface=-1):\n return self._positive(cell)\n return _mockClass(posCellCentroids)\n\n def testGetSeaGridSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=1, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertFalse(grd.wrapAroundDateline)\n self.assertEqual(1, grd.nParWorker)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetSeaGridParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=4, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetNeighborsSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=1)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(1, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsWrapAroundGlobalParallel(self):\n minx = -179.0\n miny = -89.0\n dx = 2\n dy = 2\n nx = 180\n ny = 90\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertTrue(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[65]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[89]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[200]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[nx * ny - 22]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[5].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n cell = cells[nx * ny - 1]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[3].boundary.coords[1][0])\n self.assertEqual(182, ncls[4].boundary.coords[1][0])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass testAbRectangularGridBuilder(unittest.TestCase):\n\n def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids=None):\n\n\n class _mockClass:\n\n def __init__(self, posCellCentroids):\n self.posCellCentroids = posCellCentroids\n self.cell = None\n\n def getAlphaSubMatrix(self, cell):\n sm = _mockClass(self.posCellCentroids)\n sm.cell = cell\n return sm\n\n def _positive(self, cell):\n cntrs = self.posCellCentroids\n if cell is None or cntrs is None:\n return False\n else:\n for c in cntrs:\n if cell.contains(gm.Point([c[0], c[1]])):\n return True\n return False\n\n def onLand(self):\n cell = self.cell\n return self._positive(cell)\n\n def isCoastalCell(self, cell, boundary=None, surface=-1):\n return self._positive(cell)\n return _mockClass(posCellCentroids)\n\n def testGetSeaGridSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=1, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertFalse(grd.wrapAroundDateline)\n self.assertEqual(1, grd.nParWorker)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetSeaGridParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=4, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetNeighborsSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=1)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(1, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsWrapAroundGlobalParallel(self):\n minx = -179.0\n miny = -89.0\n dx = 2\n dy = 2\n nx = 180\n ny = 90\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertTrue(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[65]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[89]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[200]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[nx * ny - 22]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[5].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n cell = cells[nx * ny - 1]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[3].boundary.coords[1][0])\n self.assertEqual(182, ncls[4].boundary.coords[1][0])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nimport shapely.geometry as gm\nfrom alphaBetaLab.abRectangularGridBuilder import abRectangularGridBuilder\n\n\nclass testAbRectangularGridBuilder(unittest.TestCase):\n\n def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids=None):\n\n\n class _mockClass:\n\n def __init__(self, posCellCentroids):\n self.posCellCentroids = posCellCentroids\n self.cell = None\n\n def getAlphaSubMatrix(self, cell):\n sm = _mockClass(self.posCellCentroids)\n sm.cell = cell\n return sm\n\n def _positive(self, cell):\n cntrs = self.posCellCentroids\n if cell is None or cntrs is None:\n return False\n else:\n for c in cntrs:\n if cell.contains(gm.Point([c[0], c[1]])):\n return True\n return False\n\n def onLand(self):\n cell = self.cell\n return self._positive(cell)\n\n def isCoastalCell(self, cell, boundary=None, surface=-1):\n return self._positive(cell)\n return _mockClass(posCellCentroids)\n\n def testGetSeaGridSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=1, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertFalse(grd.wrapAroundDateline)\n self.assertEqual(1, grd.nParWorker)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetSeaGridParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=4, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetNeighborsSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=1)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(1, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsWrapAroundGlobalParallel(self):\n minx = -179.0\n miny = -89.0\n dx = 2\n dy = 2\n nx = 180\n ny = 90\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertTrue(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[65]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[89]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[200]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[nx * ny - 22]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[5].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n cell = cells[nx * ny - 1]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[3].boundary.coords[1][0])\n self.assertEqual(182, ncls[4].boundary.coords[1][0])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nimport shapely.geometry as gm\n\nfrom alphaBetaLab.abRectangularGridBuilder import abRectangularGridBuilder\n\nclass testAbRectangularGridBuilder(unittest.TestCase):\n\n def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids = None):\n class _mockClass:\n def __init__(self, posCellCentroids):\n self.posCellCentroids = posCellCentroids\n self.cell = None\n def getAlphaSubMatrix(self, cell):\n sm = _mockClass(self.posCellCentroids)\n sm.cell = cell\n return sm\n def _positive(self, cell):\n cntrs = self.posCellCentroids\n if cell is None or cntrs is None:\n return False\n else:\n for c in cntrs:\n if cell.contains(gm.Point([c[0], c[1]])):\n return True\n return False\n def onLand(self):\n cell = self.cell\n return self._positive(cell)\n def isCoastalCell(self, cell, boundary = None, surface = -1):\n return self._positive(cell)\n return _mockClass(posCellCentroids)\n\n\n def testGetSeaGridSerial(self):\n minx = 100.\n miny = 45.\n dx = .5\n dy = 1.\n nx = 30\n ny = 10\n maxx = minx + nx*dx\n maxy = miny + ny*dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, \n nParWorker=1, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertFalse(grd.wrapAroundDateline)\n self.assertEqual(1, grd.nParWorker)\n cells = grd.cells\n self.assertEqual(nx*ny - 3, len(cells))\n\n\n def testGetSeaGridParallel(self):\n minx = 100.\n miny = 45.\n dx = .5\n dy = 1.\n nx = 30\n ny = 10\n maxx = minx + nx*dx\n maxy = miny + ny*dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n self.assertEqual(nx*ny - 3, len(cells))\n \n\n def testGetNeighborsSerial(self):\n minx = 100.\n miny = 45.\n dx = .5\n dy = 1.\n nx = 30\n ny = 10\n maxx = minx + nx*dx\n maxy = miny + ny*dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 1)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(1, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n\n def testGetNeighborsParallel(self):\n minx = 100.\n miny = 45.\n dx = .5\n dy = 1.\n nx = 30\n ny = 10\n maxx = minx + nx*dx\n maxy = miny + ny*dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n \n\n def testGetNeighborsWrapAroundGlobalParallel(self):\n minx = -179.\n miny = -89.\n dx = 2\n dy = 2\n nx = 180\n ny = 90\n maxx = minx + nx*dx\n maxy = miny + ny*dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertTrue(grd.wrapAroundDateline)\n cells = grd.cells\n\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n\n cell = cells[65]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n\n cell = cells[89]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n\n cell = cells[200]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n cell = cells[nx*ny-22]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(182, ncls[5].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n\n cell = cells[nx*ny-1]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(182, ncls[3].boundary.coords[1][0])\n self.assertEqual(182, ncls[4].boundary.coords[1][0])\n \n \n \nif __name__ == '__main__':\n unittest.main()\n\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plugin.register_formats(SBMLFormat, SBMLDirectory, JSONFormat,
JSONDirectory, CommunityModelFormat, CommunityModelManifest,
CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory,
MicomMediumFile, MicomMediumDirectory, TradeoffResultsDirectory)
plugin.register_semantic_types(MetabolicModels, CommunityModels,
MicomResults, MicomMedium)
plugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)
plugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)
plugin.register_semantic_type_to_format(CommunityModels[Pickle],
CommunityModelDirectory)
plugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)
plugin.register_semantic_type_to_format(TradeoffResults,
TradeoffResultsDirectory)
plugin.register_semantic_type_to_format(MicomMedium[Global],
MicomMediumDirectory)
plugin.register_semantic_type_to_format(MicomMedium[PerSample],
MicomMediumDirectory)
plugin.methods.register_function(function=q2_micom.db, inputs={},
parameters={'meta': Metadata, 'rank': Str % Choices(q2_micom._build.
RANKS), 'threads': Int % Range(1, None)}, outputs=[('metabolic_models',
MetabolicModels[JSON])], input_descriptions={}, parameter_descriptions=
{'meta':
'Metadata for the individual metabolic models in `folder`. Must contain the the following columns: %s.'
% ', '.join(REQ_FIELDS), 'rank':
'The phylogenetic rank at which to summarize taxa.', 'threads':
'The number of threads to use when constructing models.'},
output_descriptions={'metabolic_models': 'The metabolic model DB.'},
name='Build a metabolic model database.', description=
'Constructs pan-genome models summarized to the specified rank and bundles the models to be used by MICOM. The chosen rank has to be the same you want as when building your community models. So you may not build genus-level community models with a species level database. You will only need to run this function if you want to build a custom DB. For many use cases downloading the prebuilt AGORA DB with the the preferred rank should be sufficient.'
, citations=[citations['agora'], citations['agora_reply'], citations[
'micom']])
plugin.methods.register_function(function=q2_micom.build, inputs={
'abundance': FeatureTable[Frequency | RelativeFrequency], 'taxonomy':
FeatureData[Taxonomy], 'models': MetabolicModels[JSON]}, parameters={
'threads': Int % Range(1, None), 'cutoff': Float % Range(0.0, 1.0),
'strict': Bool, 'solver': Str % Choices('auto', 'cplex', 'osqp',
'gurobi')}, outputs=[('community_models', CommunityModels[Pickle])],
input_descriptions={'abundance':
'The feature table containing the samples over which beta diversity should be computed.'
, 'taxonomy': 'The taxonomy assignments for the ASVs in the table.',
'models': 'The single taxon model database to use.'},
parameter_descriptions={'threads':
'The number of threads to use when constructing models.', 'cutoff':
'Taxa with a relative abundance smaller than this will be dropped.',
'strict':
'If true will collapse and match on all taxa ranks up to the specified rank (so on all higher ranks as well). If false (default) will match only on single taxa rank specified before. If using the strict option make sure ranks are named the same as in the used database.'
, 'solver':
'The quadratic and linear programming solver that will be used in the models. Will pick an appropriate one by default. `cplex` and `gurobi` are commercial solvers with free academic licenses and have to be installed manually. See the docs for more info.'
}, output_descriptions={'community_models': 'The community models.'},
name='Build community models.', description=
'Builds the metabolic community models for a set of samples.',
citations=[citations['micom']])
plugin.methods.register_function(function=q2_micom.minimal_medium, inputs={
'models': CommunityModels[Pickle]}, parameters={'min_growth': Float %
Range(0.0, None, inclusive_start=False), 'threads': Int % Range(1, None
)}, outputs=[('medium', MicomMedium[Global])], input_descriptions={
'models':
'A collection of metabolic community models. This should contain on model for each sample.'
}, parameter_descriptions={'min_growth':
'The minimum achievable growth rate for each taxon. The returned growth medium enables all taxa to growth simultaneously with at least this rate.'
, 'threads': 'The number of threads to use when simulating.'},
output_descriptions={'medium': 'The resulting growth medium.'}, name=
'Obtain a minimal growth medium for models.', description=
'Obtains a minimal growth medium for the community models. Please note that this medium does not have any biological feasibility. If you have any knowledge about metabolites present in the environment we recommend you construct the medium by hand.'
, citations=[citations['micom']])
plugin.methods.register_function(function=q2_micom.grow, inputs={'models':
CommunityModels[Pickle], 'medium': MicomMedium[Global | PerSample]},
parameters={'tradeoff': Float % Range(0.0, 1.0, inclusive_start=False,
inclusive_end=True), 'strategy': Str % Choices('pFBA', 'minimal uptake',
'none'), 'threads': Int % Range(1, None)}, outputs=[('results',
MicomResults)], input_descriptions={'models':
'A collection of metabolic community models. This should contain on model for each sample.'
, 'medium': 'The growth medium to use.'}, parameter_descriptions={
'tradeoff':
'The tradeoff parameter. This describes the balance between maximizing biomass production of the entire community and biomass production of individual taxa (ergo "egoistic" growth). A value of 1.0 would yield the best biomass production across the community but will only allow a few taxa to grow. Smaller values will allow more taxa to grow but will sacrifice overall biomass. A value of 0.5 (the default) has been shown to best reproduce growth rates in the human gut.'
, 'strategy':
'The strategy used when choosing the solution in the optimal flux space. `minimal uptake` uses the fluxes that result in the smallest total uptake from the environment.`pFBA` uses parsimonious Flux Balance Analysis and thus will choose the fluxes with the lowest enzyme requirement for each taxon. `none` will return an arbitrary solution from the optimal flux space.'
, 'threads': 'The number of threads to use when simulating.'},
output_descriptions={'results':
'The resulting taxa-level growth rates and metabolic exchange fluxes.'},
name='Simulate growth for community models.', description=
'Simulates growth for a set of samples. Note that those are sample-specific or "personalized" simulations, so each taxon may have different growth rates and metabolite usage in each sample.'
, citations=[citations['micom']])
plugin.methods.register_function(function=q2_micom.tradeoff, inputs={
'models': CommunityModels[Pickle], 'medium': MicomMedium[Global |
PerSample]}, parameters={'tradeoff_min': Float % Range(0.0, 1.0,
inclusive_start=False), 'tradeoff_max': Float % Range(0.0, 1.0,
inclusive_end=True), 'step': Float % Range(0.0, 1.0), 'threads': Int},
outputs=[('results', TradeoffResults)], input_descriptions={'models':
'A collection of metabolic community models. This should contain on model for each sample.'
, 'medium': 'The growth medium to use.'}, parameter_descriptions={
'tradeoff_min':
'The minimum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0.'
, 'tradeoff_max':
'The maximum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0 and also belarger than `tradeoff_min`.'
, 'step': 'The tradeoff value step size to use.', 'threads':
'The number of threads to use when simulating.'}, output_descriptions={
'results':
'The resulting taxa-level growth rates for varying tradeoff values.'},
name='Test a variety of tradeoff values.', description=
'Simulates growth for a set of samples while varying the tradeoff between community and taxon biomass production. This can be used to characterize a good tradeoff value for a specific set of samples. Our study suggested that a good tradeoff value is the largest value that allows the majority of taxa in the sample to grow.'
, citations=[citations['micom']])
plugin.methods.register_function(function=q2_micom.filter_models, inputs={
'models': CommunityModels[Pickle]}, parameters={'metadata': Metadata,
'query': Str, 'exclude': Bool}, outputs=[('filtered_models',
CommunityModels[Pickle])], input_descriptions={'models':
'A collection of metabolic community models. This should contain on model for each sample.'
}, parameter_descriptions={'metadata':
'The metadata for the samples to keep or to query.', 'query':
'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'
, 'exclude':
'If true will use all samples *except* the ones selected by metadata and query.'
}, output_descriptions={'filtered_models':
'The filtered community models.'}, name=
'Filters models for a chosen set of samples.', description=
'Select a subset of samples and their community models using a list of samples or a pandas query expression.'
, citations=[citations['micom']])
plugin.methods.register_function(function=q2_micom.filter_results, inputs={
'results': MicomResults}, parameters={'metadata': Metadata, 'query':
Str, 'exclude': Bool}, outputs=[('filtered_results', MicomResults)],
input_descriptions={'results':
'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'
}, parameter_descriptions={'metadata':
'The metadata for the samples to keep or to query.', 'query':
'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'
, 'exclude':
'If true will use all samples *except* the ones selected by metadata and query.'
}, output_descriptions={'filtered_results':
'The filtered simulation models.'}, name=
'Filters results for a chosen set of samples.', description=
'Select a subset of samples and their simulation results using a list of samples or a pandas query expression.'
, citations=[citations['micom']])
plugin.visualizers.register_function(function=q2_micom.plot_growth, inputs=
{'results': MicomResults}, parameters={}, input_descriptions={'results':
'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'
}, parameter_descriptions={}, name='Plot taxa growth rates.',
description=
'Plot predicted growth rates for each taxon in each sample. Only points with growing taxa are shown (growth rate sufficiently larger than zero).'
, citations=[citations['micom']])
plugin.visualizers.register_function(function=q2_micom.exchanges_per_sample,
inputs={'results': MicomResults}, parameters={'direction': Str %
Choices('import', 'export'), 'cluster': Bool}, input_descriptions={
'results':
'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'
}, parameter_descriptions={'direction': 'The direction of the flux.',
'cluster': 'Whether to perform clutering on samples and reactions.'},
name='Plot gloabl exchange rates.', description=
'Plot predicted global exchange fluxes for each sample. When plotting imports this corresponds to the consumption fluxes for each metabolite that is available to the community. When plotting export this corresponds to the production fluxes for each metabolite.'
, citations=[citations['micom']])
plugin.visualizers.register_function(function=q2_micom.exchanges_per_taxon,
inputs={'results': MicomResults}, parameters={'direction': Str %
Choices('import', 'export'), 'perplexity': Int % Range(2, None)},
input_descriptions={'results':
'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'
}, parameter_descriptions={'direction': 'The direction of the flux.',
'perplexity':
'TSNE parameter. Relates to the number of neighbors used to calculate distances. Smaller values preserve more local structure and larger values preserve more global structure.'
}, name='Plot niche overlap.', description=
'Plot growth or production niches. The entire set of import or export fluxes for each taxon in each sample is reduced onto a single point on a 2D plane.Taxa that are close to each other either consume similar metabolites (imports) or produce similar metabolites (exports).'
, citations=[citations['micom']])
plugin.visualizers.register_function(function=q2_micom.plot_tradeoff,
inputs={'results': TradeoffResults}, parameters={}, input_descriptions=
{'results':
'A set of MICOM tradeoff analysis results. Contains predicted growth rates for each tested tradeoff.'
}, parameter_descriptions={}, name='Plot tradeoff results.',
description=
'Plot predicted growth rate distributions for each tradeoff as well as the fraction of growing taxa in each sample and tradeoff value. For a good tradeoff value one usually tries to find the largest tradeoff value that still aloows most taxa to grow.'
, citations=[citations['micom']])
plugin.visualizers.register_function(function=q2_micom.fit_phenotype,
inputs={'results': MicomResults}, parameters={'metadata':
MetadataColumn[Categorical | Numeric], 'variable_type': Str % Choices(
'binary', 'continuous'), 'flux_type': Str % Choices('import',
'production'), 'min_coef': Float % Range(0, None)}, input_descriptions=
{'results':
'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'
}, parameter_descriptions={'metadata': 'The metadata variable to use.',
'variable_type': 'The type of the phenotype variable.', 'flux_type':
'Which fluxes to use.', 'min_coef':
'Only coefficient with absolute values larger than this will be shown.'
}, name='Test for differential production', description=
'Test for overall metabolite production differences between two groups.',
citations=[citations['micom']])
importlib.import_module('q2_micom._transform')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
citations = Citations.load('citations.bib', package='q2_micom')
plugin = Plugin(name='micom', version=q2_micom.__version__, website=
'https://github.com/micom-dev/q2-micom', package='q2_micom',
description='', short_description=
'Plugin for metabolic modeling of microbial communities.', citations=[
citations['micom']])
plugin.register_formats(SBMLFormat, SBMLDirectory, JSONFormat,
JSONDirectory, CommunityModelFormat, CommunityModelManifest,
CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory,
MicomMediumFile, MicomMediumDirectory, TradeoffResultsDirectory)
plugin.register_semantic_types(MetabolicModels, CommunityModels,
MicomResults, MicomMedium)
plugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)
plugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)
plugin.register_semantic_type_to_format(CommunityModels[Pickle],
CommunityModelDirectory)
plugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)
plugin.register_semantic_type_to_format(TradeoffResults,
TradeoffResultsDirectory)
plugin.register_semantic_type_to_format(MicomMedium[Global],
MicomMediumDirectory)
plugin.register_semantic_type_to_format(MicomMedium[PerSample],
MicomMediumDirectory)
plugin.methods.register_function(function=q2_micom.db, inputs={},
parameters={'meta': Metadata, 'rank': Str % Choices(q2_micom._build.
RANKS), 'threads': Int % Range(1, None)}, outputs=[('metabolic_models',
MetabolicModels[JSON])], input_descriptions={}, parameter_descriptions=
{'meta':
'Metadata for the individual metabolic models in `folder`. Must contain the the following columns: %s.'
% ', '.join(REQ_FIELDS), 'rank':
'The phylogenetic rank at which to summarize taxa.', 'threads':
'The number of threads to use when constructing models.'},
output_descriptions={'metabolic_models': 'The metabolic model DB.'},
name='Build a metabolic model database.', description=
'Constructs pan-genome models summarized to the specified rank and bundles the models to be used by MICOM. The chosen rank has to be the same you want as when building your community models. So you may not build genus-level community models with a species level database. You will only need to run this function if you want to build a custom DB. For many use cases downloading the prebuilt AGORA DB with the the preferred rank should be sufficient.'
, citations=[citations['agora'], citations['agora_reply'], citations[
'micom']])
plugin.methods.register_function(function=q2_micom.build, inputs={
'abundance': FeatureTable[Frequency | RelativeFrequency], 'taxonomy':
FeatureData[Taxonomy], 'models': MetabolicModels[JSON]}, parameters={
'threads': Int % Range(1, None), 'cutoff': Float % Range(0.0, 1.0),
'strict': Bool, 'solver': Str % Choices('auto', 'cplex', 'osqp',
'gurobi')}, outputs=[('community_models', CommunityModels[Pickle])],
input_descriptions={'abundance':
'The feature table containing the samples over which beta diversity should be computed.'
, 'taxonomy': 'The taxonomy assignments for the ASVs in the table.',
'models': 'The single taxon model database to use.'},
parameter_descriptions={'threads':
'The number of threads to use when constructing models.', 'cutoff':
'Taxa with a relative abundance smaller than this will be dropped.',
'strict':
'If true will collapse and match on all taxa ranks up to the specified rank (so on all higher ranks as well). If false (default) will match only on single taxa rank specified before. If using the strict option make sure ranks are named the same as in the used database.'
, 'solver':
'The quadratic and linear programming solver that will be used in the models. Will pick an appropriate one by default. `cplex` and `gurobi` are commercial solvers with free academic licenses and have to be installed manually. See the docs for more info.'
}, output_descriptions={'community_models': 'The community models.'},
name='Build community models.', description=
'Builds the metabolic community models for a set of samples.',
citations=[citations['micom']])
plugin.methods.register_function(function=q2_micom.minimal_medium, inputs={
'models': CommunityModels[Pickle]}, parameters={'min_growth': Float %
Range(0.0, None, inclusive_start=False), 'threads': Int % Range(1, None
)}, outputs=[('medium', MicomMedium[Global])], input_descriptions={
'models':
'A collection of metabolic community models. This should contain on model for each sample.'
}, parameter_descriptions={'min_growth':
'The minimum achievable growth rate for each taxon. The returned growth medium enables all taxa to growth simultaneously with at least this rate.'
, 'threads': 'The number of threads to use when simulating.'},
output_descriptions={'medium': 'The resulting growth medium.'}, name=
'Obtain a minimal growth medium for models.', description=
'Obtains a minimal growth medium for the community models. Please note that this medium does not have any biological feasibility. If you have any knowledge about metabolites present in the environment we recommend you construct the medium by hand.'
, citations=[citations['micom']])
plugin.methods.register_function(function=q2_micom.grow, inputs={'models':
CommunityModels[Pickle], 'medium': MicomMedium[Global | PerSample]},
parameters={'tradeoff': Float % Range(0.0, 1.0, inclusive_start=False,
inclusive_end=True), 'strategy': Str % Choices('pFBA', 'minimal uptake',
'none'), 'threads': Int % Range(1, None)}, outputs=[('results',
MicomResults)], input_descriptions={'models':
'A collection of metabolic community models. This should contain on model for each sample.'
, 'medium': 'The growth medium to use.'}, parameter_descriptions={
'tradeoff':
'The tradeoff parameter. This describes the balance between maximizing biomass production of the entire community and biomass production of individual taxa (ergo "egoistic" growth). A value of 1.0 would yield the best biomass production across the community but will only allow a few taxa to grow. Smaller values will allow more taxa to grow but will sacrifice overall biomass. A value of 0.5 (the default) has been shown to best reproduce growth rates in the human gut.'
, 'strategy':
'The strategy used when choosing the solution in the optimal flux space. `minimal uptake` uses the fluxes that result in the smallest total uptake from the environment.`pFBA` uses parsimonious Flux Balance Analysis and thus will choose the fluxes with the lowest enzyme requirement for each taxon. `none` will return an arbitrary solution from the optimal flux space.'
, 'threads': 'The number of threads to use when simulating.'},
output_descriptions={'results':
'The resulting taxa-level growth rates and metabolic exchange fluxes.'},
name='Simulate growth for community models.', description=
'Simulates growth for a set of samples. Note that those are sample-specific or "personalized" simulations, so each taxon may have different growth rates and metabolite usage in each sample.'
, citations=[citations['micom']])
plugin.methods.register_function(function=q2_micom.tradeoff, inputs={
'models': CommunityModels[Pickle], 'medium': MicomMedium[Global |
PerSample]}, parameters={'tradeoff_min': Float % Range(0.0, 1.0,
inclusive_start=False), 'tradeoff_max': Float % Range(0.0, 1.0,
inclusive_end=True), 'step': Float % Range(0.0, 1.0), 'threads': Int},
outputs=[('results', TradeoffResults)], input_descriptions={'models':
'A collection of metabolic community models. This should contain on model for each sample.'
, 'medium': 'The growth medium to use.'}, parameter_descriptions={
'tradeoff_min':
'The minimum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0.'
, 'tradeoff_max':
'The maximum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0 and also belarger than `tradeoff_min`.'
, 'step': 'The tradeoff value step size to use.', 'threads':
'The number of threads to use when simulating.'}, output_descriptions={
'results':
'The resulting taxa-level growth rates for varying tradeoff values.'},
name='Test a variety of tradeoff values.', description=
'Simulates growth for a set of samples while varying the tradeoff between community and taxon biomass production. This can be used to characterize a good tradeoff value for a specific set of samples. Our study suggested that a good tradeoff value is the largest value that allows the majority of taxa in the sample to grow.'
, citations=[citations['micom']])
plugin.methods.register_function(function=q2_micom.filter_models, inputs={
'models': CommunityModels[Pickle]}, parameters={'metadata': Metadata,
'query': Str, 'exclude': Bool}, outputs=[('filtered_models',
CommunityModels[Pickle])], input_descriptions={'models':
'A collection of metabolic community models. This should contain on model for each sample.'
}, parameter_descriptions={'metadata':
'The metadata for the samples to keep or to query.', 'query':
'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'
, 'exclude':
'If true will use all samples *except* the ones selected by metadata and query.'
}, output_descriptions={'filtered_models':
'The filtered community models.'}, name=
'Filters models for a chosen set of samples.', description=
'Select a subset of samples and their community models using a list of samples or a pandas query expression.'
, citations=[citations['micom']])
plugin.methods.register_function(function=q2_micom.filter_results, inputs={
'results': MicomResults}, parameters={'metadata': Metadata, 'query':
Str, 'exclude': Bool}, outputs=[('filtered_results', MicomResults)],
input_descriptions={'results':
'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'
}, parameter_descriptions={'metadata':
'The metadata for the samples to keep or to query.', 'query':
'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'
, 'exclude':
'If true will use all samples *except* the ones selected by metadata and query.'
}, output_descriptions={'filtered_results':
'The filtered simulation models.'}, name=
'Filters results for a chosen set of samples.', description=
'Select a subset of samples and their simulation results using a list of samples or a pandas query expression.'
, citations=[citations['micom']])
plugin.visualizers.register_function(function=q2_micom.plot_growth, inputs=
{'results': MicomResults}, parameters={}, input_descriptions={'results':
'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'
}, parameter_descriptions={}, name='Plot taxa growth rates.',
description=
'Plot predicted growth rates for each taxon in each sample. Only points with growing taxa are shown (growth rate sufficiently larger than zero).'
, citations=[citations['micom']])
plugin.visualizers.register_function(function=q2_micom.exchanges_per_sample,
inputs={'results': MicomResults}, parameters={'direction': Str %
Choices('import', 'export'), 'cluster': Bool}, input_descriptions={
'results':
'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'
}, parameter_descriptions={'direction': 'The direction of the flux.',
'cluster': 'Whether to perform clutering on samples and reactions.'},
name='Plot gloabl exchange rates.', description=
'Plot predicted global exchange fluxes for each sample. When plotting imports this corresponds to the consumption fluxes for each metabolite that is available to the community. When plotting export this corresponds to the production fluxes for each metabolite.'
, citations=[citations['micom']])
plugin.visualizers.register_function(function=q2_micom.exchanges_per_taxon,
inputs={'results': MicomResults}, parameters={'direction': Str %
Choices('import', 'export'), 'perplexity': Int % Range(2, None)},
input_descriptions={'results':
'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'
}, parameter_descriptions={'direction': 'The direction of the flux.',
'perplexity':
'TSNE parameter. Relates to the number of neighbors used to calculate distances. Smaller values preserve more local structure and larger values preserve more global structure.'
}, name='Plot niche overlap.', description=
'Plot growth or production niches. The entire set of import or export fluxes for each taxon in each sample is reduced onto a single point on a 2D plane.Taxa that are close to each other either consume similar metabolites (imports) or produce similar metabolites (exports).'
, citations=[citations['micom']])
plugin.visualizers.register_function(function=q2_micom.plot_tradeoff,
inputs={'results': TradeoffResults}, parameters={}, input_descriptions=
{'results':
'A set of MICOM tradeoff analysis results. Contains predicted growth rates for each tested tradeoff.'
}, parameter_descriptions={}, name='Plot tradeoff results.',
description=
'Plot predicted growth rate distributions for each tradeoff as well as the fraction of growing taxa in each sample and tradeoff value. For a good tradeoff value one usually tries to find the largest tradeoff value that still aloows most taxa to grow.'
, citations=[citations['micom']])
plugin.visualizers.register_function(function=q2_micom.fit_phenotype,
inputs={'results': MicomResults}, parameters={'metadata':
MetadataColumn[Categorical | Numeric], 'variable_type': Str % Choices(
'binary', 'continuous'), 'flux_type': Str % Choices('import',
'production'), 'min_coef': Float % Range(0, None)}, input_descriptions=
{'results':
'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'
}, parameter_descriptions={'metadata': 'The metadata variable to use.',
'variable_type': 'The type of the phenotype variable.', 'flux_type':
'Which fluxes to use.', 'min_coef':
'Only coefficient with absolute values larger than this will be shown.'
}, name='Test for differential production', description=
'Test for overall metabolite production differences between two groups.',
citations=[citations['micom']])
importlib.import_module('q2_micom._transform')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import importlib
from qiime2.plugin import Plugin, Str, Choices, Int, Bool, Range, Float, Metadata, MetadataColumn, Categorical, Numeric, Citations
import q2_micom
from q2_micom._formats_and_types import SBML, JSON, Pickle, SBMLFormat, SBMLDirectory, JSONFormat, JSONDirectory, CommunityModelFormat, CommunityModelManifest, CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory, MicomMediumFile, MicomMediumDirectory, MetabolicModels, CommunityModels, MicomResults, MicomMedium, Global, PerSample, TradeoffResults, TradeoffResultsDirectory, REQ_FIELDS
from q2_types.feature_data import FeatureData, Taxonomy
from q2_types.feature_table import FeatureTable, RelativeFrequency, Frequency
citations = Citations.load('citations.bib', package='q2_micom')
plugin = Plugin(name='micom', version=q2_micom.__version__, website=
'https://github.com/micom-dev/q2-micom', package='q2_micom',
description='', short_description=
'Plugin for metabolic modeling of microbial communities.', citations=[
citations['micom']])
plugin.register_formats(SBMLFormat, SBMLDirectory, JSONFormat,
JSONDirectory, CommunityModelFormat, CommunityModelManifest,
CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory,
MicomMediumFile, MicomMediumDirectory, TradeoffResultsDirectory)
plugin.register_semantic_types(MetabolicModels, CommunityModels,
MicomResults, MicomMedium)
plugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)
plugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)
plugin.register_semantic_type_to_format(CommunityModels[Pickle],
CommunityModelDirectory)
plugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)
plugin.register_semantic_type_to_format(TradeoffResults,
TradeoffResultsDirectory)
plugin.register_semantic_type_to_format(MicomMedium[Global],
MicomMediumDirectory)
plugin.register_semantic_type_to_format(MicomMedium[PerSample],
MicomMediumDirectory)
plugin.methods.register_function(function=q2_micom.db, inputs={},
parameters={'meta': Metadata, 'rank': Str % Choices(q2_micom._build.
RANKS), 'threads': Int % Range(1, None)}, outputs=[('metabolic_models',
MetabolicModels[JSON])], input_descriptions={}, parameter_descriptions=
{'meta':
'Metadata for the individual metabolic models in `folder`. Must contain the the following columns: %s.'
% ', '.join(REQ_FIELDS), 'rank':
'The phylogenetic rank at which to summarize taxa.', 'threads':
'The number of threads to use when constructing models.'},
output_descriptions={'metabolic_models': 'The metabolic model DB.'},
name='Build a metabolic model database.', description=
'Constructs pan-genome models summarized to the specified rank and bundles the models to be used by MICOM. The chosen rank has to be the same you want as when building your community models. So you may not build genus-level community models with a species level database. You will only need to run this function if you want to build a custom DB. For many use cases downloading the prebuilt AGORA DB with the the preferred rank should be sufficient.'
, citations=[citations['agora'], citations['agora_reply'], citations[
'micom']])
plugin.methods.register_function(function=q2_micom.build, inputs={
'abundance': FeatureTable[Frequency | RelativeFrequency], 'taxonomy':
FeatureData[Taxonomy], 'models': MetabolicModels[JSON]}, parameters={
'threads': Int % Range(1, None), 'cutoff': Float % Range(0.0, 1.0),
'strict': Bool, 'solver': Str % Choices('auto', 'cplex', 'osqp',
'gurobi')}, outputs=[('community_models', CommunityModels[Pickle])],
input_descriptions={'abundance':
'The feature table containing the samples over which beta diversity should be computed.'
, 'taxonomy': 'The taxonomy assignments for the ASVs in the table.',
'models': 'The single taxon model database to use.'},
parameter_descriptions={'threads':
'The number of threads to use when constructing models.', 'cutoff':
'Taxa with a relative abundance smaller than this will be dropped.',
'strict':
'If true will collapse and match on all taxa ranks up to the specified rank (so on all higher ranks as well). If false (default) will match only on single taxa rank specified before. If using the strict option make sure ranks are named the same as in the used database.'
, 'solver':
'The quadratic and linear programming solver that will be used in the models. Will pick an appropriate one by default. `cplex` and `gurobi` are commercial solvers with free academic licenses and have to be installed manually. See the docs for more info.'
}, output_descriptions={'community_models': 'The community models.'},
name='Build community models.', description=
'Builds the metabolic community models for a set of samples.',
citations=[citations['micom']])
plugin.methods.register_function(function=q2_micom.minimal_medium, inputs={
'models': CommunityModels[Pickle]}, parameters={'min_growth': Float %
Range(0.0, None, inclusive_start=False), 'threads': Int % Range(1, None
)}, outputs=[('medium', MicomMedium[Global])], input_descriptions={
'models':
'A collection of metabolic community models. This should contain on model for each sample.'
}, parameter_descriptions={'min_growth':
'The minimum achievable growth rate for each taxon. The returned growth medium enables all taxa to growth simultaneously with at least this rate.'
, 'threads': 'The number of threads to use when simulating.'},
output_descriptions={'medium': 'The resulting growth medium.'}, name=
'Obtain a minimal growth medium for models.', description=
'Obtains a minimal growth medium for the community models. Please note that this medium does not have any biological feasibility. If you have any knowledge about metabolites present in the environment we recommend you construct the medium by hand.'
, citations=[citations['micom']])
plugin.methods.register_function(function=q2_micom.grow, inputs={'models':
CommunityModels[Pickle], 'medium': MicomMedium[Global | PerSample]},
parameters={'tradeoff': Float % Range(0.0, 1.0, inclusive_start=False,
inclusive_end=True), 'strategy': Str % Choices('pFBA', 'minimal uptake',
'none'), 'threads': Int % Range(1, None)}, outputs=[('results',
MicomResults)], input_descriptions={'models':
'A collection of metabolic community models. This should contain on model for each sample.'
, 'medium': 'The growth medium to use.'}, parameter_descriptions={
'tradeoff':
'The tradeoff parameter. This describes the balance between maximizing biomass production of the entire community and biomass production of individual taxa (ergo "egoistic" growth). A value of 1.0 would yield the best biomass production across the community but will only allow a few taxa to grow. Smaller values will allow more taxa to grow but will sacrifice overall biomass. A value of 0.5 (the default) has been shown to best reproduce growth rates in the human gut.'
, 'strategy':
'The strategy used when choosing the solution in the optimal flux space. `minimal uptake` uses the fluxes that result in the smallest total uptake from the environment.`pFBA` uses parsimonious Flux Balance Analysis and thus will choose the fluxes with the lowest enzyme requirement for each taxon. `none` will return an arbitrary solution from the optimal flux space.'
, 'threads': 'The number of threads to use when simulating.'},
output_descriptions={'results':
'The resulting taxa-level growth rates and metabolic exchange fluxes.'},
name='Simulate growth for community models.', description=
'Simulates growth for a set of samples. Note that those are sample-specific or "personalized" simulations, so each taxon may have different growth rates and metabolite usage in each sample.'
, citations=[citations['micom']])
plugin.methods.register_function(function=q2_micom.tradeoff, inputs={
'models': CommunityModels[Pickle], 'medium': MicomMedium[Global |
PerSample]}, parameters={'tradeoff_min': Float % Range(0.0, 1.0,
inclusive_start=False), 'tradeoff_max': Float % Range(0.0, 1.0,
inclusive_end=True), 'step': Float % Range(0.0, 1.0), 'threads': Int},
outputs=[('results', TradeoffResults)], input_descriptions={'models':
'A collection of metabolic community models. This should contain on model for each sample.'
, 'medium': 'The growth medium to use.'}, parameter_descriptions={
'tradeoff_min':
'The minimum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0.'
, 'tradeoff_max':
'The maximum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0 and also belarger than `tradeoff_min`.'
, 'step': 'The tradeoff value step size to use.', 'threads':
'The number of threads to use when simulating.'}, output_descriptions={
'results':
'The resulting taxa-level growth rates for varying tradeoff values.'},
name='Test a variety of tradeoff values.', description=
'Simulates growth for a set of samples while varying the tradeoff between community and taxon biomass production. This can be used to characterize a good tradeoff value for a specific set of samples. Our study suggested that a good tradeoff value is the largest value that allows the majority of taxa in the sample to grow.'
, citations=[citations['micom']])
plugin.methods.register_function(function=q2_micom.filter_models, inputs={
'models': CommunityModels[Pickle]}, parameters={'metadata': Metadata,
'query': Str, 'exclude': Bool}, outputs=[('filtered_models',
CommunityModels[Pickle])], input_descriptions={'models':
'A collection of metabolic community models. This should contain on model for each sample.'
}, parameter_descriptions={'metadata':
'The metadata for the samples to keep or to query.', 'query':
'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'
, 'exclude':
'If true will use all samples *except* the ones selected by metadata and query.'
}, output_descriptions={'filtered_models':
'The filtered community models.'}, name=
'Filters models for a chosen set of samples.', description=
'Select a subset of samples and their community models using a list of samples or a pandas query expression.'
, citations=[citations['micom']])
plugin.methods.register_function(function=q2_micom.filter_results, inputs={
'results': MicomResults}, parameters={'metadata': Metadata, 'query':
Str, 'exclude': Bool}, outputs=[('filtered_results', MicomResults)],
input_descriptions={'results':
'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'
}, parameter_descriptions={'metadata':
'The metadata for the samples to keep or to query.', 'query':
'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'
, 'exclude':
'If true will use all samples *except* the ones selected by metadata and query.'
}, output_descriptions={'filtered_results':
'The filtered simulation models.'}, name=
'Filters results for a chosen set of samples.', description=
'Select a subset of samples and their simulation results using a list of samples or a pandas query expression.'
, citations=[citations['micom']])
plugin.visualizers.register_function(function=q2_micom.plot_growth, inputs=
{'results': MicomResults}, parameters={}, input_descriptions={'results':
'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'
}, parameter_descriptions={}, name='Plot taxa growth rates.',
description=
'Plot predicted growth rates for each taxon in each sample. Only points with growing taxa are shown (growth rate sufficiently larger than zero).'
, citations=[citations['micom']])
plugin.visualizers.register_function(function=q2_micom.exchanges_per_sample,
inputs={'results': MicomResults}, parameters={'direction': Str %
Choices('import', 'export'), 'cluster': Bool}, input_descriptions={
'results':
'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'
}, parameter_descriptions={'direction': 'The direction of the flux.',
'cluster': 'Whether to perform clutering on samples and reactions.'},
name='Plot gloabl exchange rates.', description=
'Plot predicted global exchange fluxes for each sample. When plotting imports this corresponds to the consumption fluxes for each metabolite that is available to the community. When plotting export this corresponds to the production fluxes for each metabolite.'
, citations=[citations['micom']])
plugin.visualizers.register_function(function=q2_micom.exchanges_per_taxon,
inputs={'results': MicomResults}, parameters={'direction': Str %
Choices('import', 'export'), 'perplexity': Int % Range(2, None)},
input_descriptions={'results':
'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'
}, parameter_descriptions={'direction': 'The direction of the flux.',
'perplexity':
'TSNE parameter. Relates to the number of neighbors used to calculate distances. Smaller values preserve more local structure and larger values preserve more global structure.'
}, name='Plot niche overlap.', description=
'Plot growth or production niches. The entire set of import or export fluxes for each taxon in each sample is reduced onto a single point on a 2D plane.Taxa that are close to each other either consume similar metabolites (imports) or produce similar metabolites (exports).'
, citations=[citations['micom']])
plugin.visualizers.register_function(function=q2_micom.plot_tradeoff,
inputs={'results': TradeoffResults}, parameters={}, input_descriptions=
{'results':
'A set of MICOM tradeoff analysis results. Contains predicted growth rates for each tested tradeoff.'
}, parameter_descriptions={}, name='Plot tradeoff results.',
description=
'Plot predicted growth rate distributions for each tradeoff as well as the fraction of growing taxa in each sample and tradeoff value. For a good tradeoff value one usually tries to find the largest tradeoff value that still aloows most taxa to grow.'
, citations=[citations['micom']])
plugin.visualizers.register_function(function=q2_micom.fit_phenotype,
inputs={'results': MicomResults}, parameters={'metadata':
MetadataColumn[Categorical | Numeric], 'variable_type': Str % Choices(
'binary', 'continuous'), 'flux_type': Str % Choices('import',
'production'), 'min_coef': Float % Range(0, None)}, input_descriptions=
{'results':
'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'
}, parameter_descriptions={'metadata': 'The metadata variable to use.',
'variable_type': 'The type of the phenotype variable.', 'flux_type':
'Which fluxes to use.', 'min_coef':
'Only coefficient with absolute values larger than this will be shown.'
}, name='Test for differential production', description=
'Test for overall metabolite production differences between two groups.',
citations=[citations['micom']])
importlib.import_module('q2_micom._transform')
<|reserved_special_token_1|>
"""Plugin setup."""
import importlib
from qiime2.plugin import (
Plugin,
Str,
Choices,
Int,
Bool,
Range,
Float,
Metadata,
MetadataColumn,
Categorical,
Numeric,
Citations,
)
import q2_micom
from q2_micom._formats_and_types import (
SBML,
JSON,
Pickle,
SBMLFormat,
SBMLDirectory,
JSONFormat,
JSONDirectory,
CommunityModelFormat,
CommunityModelManifest,
CommunityModelDirectory,
GrowthRates,
Fluxes,
MicomResultsDirectory,
MicomMediumFile,
MicomMediumDirectory,
MetabolicModels,
CommunityModels,
MicomResults,
MicomMedium,
Global,
PerSample,
TradeoffResults,
TradeoffResultsDirectory,
REQ_FIELDS,
)
from q2_types.feature_data import FeatureData, Taxonomy
from q2_types.feature_table import FeatureTable, RelativeFrequency, Frequency
citations = Citations.load("citations.bib", package="q2_micom")
plugin = Plugin(
name="micom",
version=q2_micom.__version__,
website="https://github.com/micom-dev/q2-micom",
package="q2_micom",
description=(""),
short_description="Plugin for metabolic modeling of microbial communities.",
citations=[citations["micom"]],
)
plugin.register_formats(
SBMLFormat,
SBMLDirectory,
JSONFormat,
JSONDirectory,
CommunityModelFormat,
CommunityModelManifest,
CommunityModelDirectory,
GrowthRates,
Fluxes,
MicomResultsDirectory,
MicomMediumFile,
MicomMediumDirectory,
TradeoffResultsDirectory,
)
plugin.register_semantic_types(
MetabolicModels, CommunityModels, MicomResults, MicomMedium
)
plugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)
plugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)
plugin.register_semantic_type_to_format(
CommunityModels[Pickle], CommunityModelDirectory
)
plugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)
plugin.register_semantic_type_to_format(TradeoffResults, TradeoffResultsDirectory)
plugin.register_semantic_type_to_format(MicomMedium[Global], MicomMediumDirectory)
plugin.register_semantic_type_to_format(MicomMedium[PerSample], MicomMediumDirectory)
plugin.methods.register_function(
function=q2_micom.db,
inputs={},
parameters={
"meta": Metadata,
"rank": Str % Choices(q2_micom._build.RANKS),
"threads": Int % Range(1, None),
},
outputs=[("metabolic_models", MetabolicModels[JSON])],
input_descriptions={},
parameter_descriptions={
"meta": (
"Metadata for the individual metabolic models in `folder`. "
"Must contain the the following columns: %s." % ", ".join(REQ_FIELDS)
),
"rank": "The phylogenetic rank at which to summarize taxa.",
"threads": "The number of threads to use when constructing models.",
},
output_descriptions={"metabolic_models": "The metabolic model DB."},
name="Build a metabolic model database.",
description=(
"Constructs pan-genome models summarized to the specified rank "
"and bundles the models to be used by MICOM. "
"The chosen rank has to be the same you want as when building your "
"community models. "
"So you may not build genus-level community models with a species "
"level database. "
"You will only need to run this function if you want to build a "
"custom DB. For many use cases downloading the prebuilt AGORA DB "
"with the the preferred rank should be sufficient."
),
citations=[
citations["agora"],
citations["agora_reply"],
citations["micom"],
],
)
plugin.methods.register_function(
function=q2_micom.build,
inputs={
"abundance": FeatureTable[Frequency | RelativeFrequency],
"taxonomy": FeatureData[Taxonomy],
"models": MetabolicModels[JSON],
},
parameters={
"threads": Int % Range(1, None),
"cutoff": Float % Range(0.0, 1.0),
"strict": Bool,
"solver": Str % Choices("auto", "cplex", "osqp", "gurobi"),
},
outputs=[("community_models", CommunityModels[Pickle])],
input_descriptions={
"abundance": (
"The feature table containing the samples over which beta "
"diversity should be computed."
),
"taxonomy": "The taxonomy assignments for the ASVs in the table.",
"models": "The single taxon model database to use.",
},
parameter_descriptions={
"threads": "The number of threads to use when constructing models.",
"cutoff": "Taxa with a relative abundance smaller than this will "
"be dropped.",
"strict": (
"If true will collapse and match on all taxa ranks up to the "
"specified rank (so on all higher ranks as well). If false "
"(default) will match only on single taxa rank specified before. "
"If using the strict option make sure ranks are named the same as in "
"the used database."
),
"solver": (
"The quadratic and linear programming solver that will be used "
"in the models. Will pick an appropriate one by default. "
"`cplex` and `gurobi` are commercial solvers with free academic "
"licenses and have to be installed manually. See the docs for more info."
),
},
output_descriptions={"community_models": "The community models."},
name="Build community models.",
description=("Builds the metabolic community models for a set of samples."),
citations=[citations["micom"]],
)
plugin.methods.register_function(
function=q2_micom.minimal_medium,
inputs={"models": CommunityModels[Pickle]},
parameters={
"min_growth": Float % Range(0.0, None, inclusive_start=False),
"threads": Int % Range(1, None),
},
outputs=[("medium", MicomMedium[Global])],
input_descriptions={
"models": (
"A collection of metabolic community models. "
"This should contain on model for each sample."
),
},
parameter_descriptions={
"min_growth": (
"The minimum achievable growth rate for each taxon. "
"The returned growth medium enables all taxa to growth "
"simultaneously with at least this rate."
),
"threads": "The number of threads to use when simulating.",
},
output_descriptions={"medium": "The resulting growth medium."},
name="Obtain a minimal growth medium for models.",
description=(
"Obtains a minimal growth medium for the community models. "
"Please note that this medium does not have any biological "
"feasibility. If you have any knowledge about metabolites present "
"in the environment we recommend you construct the medium by hand."
),
citations=[citations["micom"]],
)
plugin.methods.register_function(
function=q2_micom.grow,
inputs={
"models": CommunityModels[Pickle],
"medium": MicomMedium[Global | PerSample],
},
parameters={
"tradeoff": Float % Range(0.0, 1.0, inclusive_start=False, inclusive_end=True),
"strategy": Str % Choices("pFBA", "minimal uptake", "none"),
"threads": Int % Range(1, None),
},
outputs=[("results", MicomResults)],
input_descriptions={
"models": (
"A collection of metabolic community models. "
"This should contain on model for each sample."
),
"medium": "The growth medium to use.",
},
parameter_descriptions={
"tradeoff": (
"The tradeoff parameter. This describes the balance "
"between maximizing biomass production of the entire "
"community and biomass production of individual taxa "
'(ergo "egoistic" growth). A value of 1.0 would yield '
"the best biomass production across the community but "
"will only allow a few taxa to grow. Smaller values will "
"allow more taxa to grow but will sacrifice overall "
"biomass. A value of 0.5 (the default) has been shown to "
"best reproduce growth rates in the human gut."
),
"strategy": (
"The strategy used when choosing the solution in the "
"optimal flux space. `minimal uptake` uses the fluxes "
"that result in the smallest total uptake from the environment."
"`pFBA` uses parsimonious Flux Balance Analysis and thus will choose "
"the fluxes with the lowest enzyme requirement for each taxon. "
"`none` will return an arbitrary solution from the optimal flux space."
),
"threads": "The number of threads to use when simulating.",
},
output_descriptions={
"results": "The resulting taxa-level growth rates and metabolic "
"exchange fluxes."
},
name="Simulate growth for community models.",
description=(
"Simulates growth for a set of samples. Note that those are "
'sample-specific or "personalized" simulations, so each taxon '
"may have different growth rates and metabolite usage in each sample."
),
citations=[citations["micom"]],
)
plugin.methods.register_function(
function=q2_micom.tradeoff,
inputs={
"models": CommunityModels[Pickle],
"medium": MicomMedium[Global | PerSample],
},
parameters={
"tradeoff_min": Float % Range(0.0, 1.0, inclusive_start=False),
"tradeoff_max": Float % Range(0.0, 1.0, inclusive_end=True),
"step": Float % Range(0.0, 1.0),
"threads": Int,
},
outputs=[("results", TradeoffResults)],
input_descriptions={
"models": (
"A collection of metabolic community models. "
"This should contain on model for each sample."
),
"medium": "The growth medium to use.",
},
parameter_descriptions={
"tradeoff_min": "The minimum tradeoff parameter to test. This should "
"be larger than 0.0 and smaller than 1.0.",
"tradeoff_max": "The maximum tradeoff parameter to test. This should "
"be larger than 0.0 and smaller than 1.0 and also be"
"larger than `tradeoff_min`.",
"step": "The tradeoff value step size to use.",
"threads": "The number of threads to use when simulating.",
},
output_descriptions={
"results": "The resulting taxa-level growth rates for varying "
"tradeoff values."
},
name="Test a variety of tradeoff values.",
description=(
"Simulates growth for a set of samples while varying the tradeoff "
"between community and taxon biomass production. "
"This can be used to characterize a good tradeoff value for a "
"specific set of samples. Our study suggested that a good tradeoff "
"value is the largest value that allows the majority of taxa in the "
"sample to grow."
),
citations=[citations["micom"]],
)
plugin.methods.register_function(
function=q2_micom.filter_models,
inputs={"models": CommunityModels[Pickle]},
parameters={"metadata": Metadata, "query": Str, "exclude": Bool},
outputs=[("filtered_models", CommunityModels[Pickle])],
input_descriptions={
"models": (
"A collection of metabolic community models. "
"This should contain on model for each sample."
)
},
parameter_descriptions={
"metadata": "The metadata for the samples to keep or to query.",
"query": (
"A pandas query expression to select samples from the metadata. "
"This will call `query` on the metadata DataFrame, so you can test "
"your query by loading our metadata into a pandas DataFrame."
),
"exclude": (
"If true will use all samples *except* the ones selected "
"by metadata and query."
),
},
output_descriptions={"filtered_models": "The filtered community models."},
name="Filters models for a chosen set of samples.",
description=(
"Select a subset of samples and their community models using a list "
"of samples or a pandas query expression."
),
citations=[citations["micom"]],
)
plugin.methods.register_function(
function=q2_micom.filter_results,
inputs={"results": MicomResults},
parameters={"metadata": Metadata, "query": Str, "exclude": Bool},
outputs=[("filtered_results", MicomResults)],
input_descriptions={
"results": (
"A set of MICOM analysis results. "
"Contains predicted groath rates and exchange fluxes."
)
},
parameter_descriptions={
"metadata": "The metadata for the samples to keep or to query.",
"query": (
"A pandas query expression to select samples from the metadata. "
"This will call `query` on the metadata DataFrame, so you can test "
"your query by loading our metadata into a pandas DataFrame."
),
"exclude": (
"If true will use all samples *except* the ones selected "
"by metadata and query."
),
},
output_descriptions={"filtered_results": "The filtered simulation models."},
name="Filters results for a chosen set of samples.",
description=(
"Select a subset of samples and their simulation results using a list "
"of samples or a pandas query expression."
),
citations=[citations["micom"]],
)
plugin.visualizers.register_function(
function=q2_micom.plot_growth,
inputs={"results": MicomResults},
parameters={},
input_descriptions={
"results": (
"A set of MICOM analysis results. "
"Contains predicted groath rates and exchange fluxes."
)
},
parameter_descriptions={},
name="Plot taxa growth rates.",
description=(
"Plot predicted growth rates for each taxon in each sample. "
"Only points with growing taxa are shown (growth rate sufficiently "
"larger than zero)."
),
citations=[citations["micom"]],
)
plugin.visualizers.register_function(
function=q2_micom.exchanges_per_sample,
inputs={"results": MicomResults},
parameters={
"direction": Str % Choices("import", "export"),
"cluster": Bool,
},
input_descriptions={
"results": (
"A set of MICOM analysis results. "
"Contains predicted groath rates and exchange fluxes."
)
},
parameter_descriptions={
"direction": "The direction of the flux.",
"cluster": "Whether to perform clutering on samples and reactions.",
},
name="Plot gloabl exchange rates.",
description=(
"Plot predicted global exchange fluxes for each sample. "
"When plotting imports this corresponds to the consumption "
"fluxes for each metabolite that is available to the community. "
"When plotting export this corresponds to the production fluxes "
"for each metabolite."
),
citations=[citations["micom"]],
)
plugin.visualizers.register_function(
function=q2_micom.exchanges_per_taxon,
inputs={"results": MicomResults},
parameters={
"direction": Str % Choices("import", "export"),
"perplexity": Int % Range(2, None),
},
input_descriptions={
"results": (
"A set of MICOM analysis results. "
"Contains predicted growth rates and exchange fluxes."
)
},
parameter_descriptions={
"direction": "The direction of the flux.",
"perplexity": "TSNE parameter. Relates to the number of neighbors used to "
"calculate distances. Smaller values preserve more local "
"structure and larger values preserve more global structure.",
},
name="Plot niche overlap.",
description=(
"Plot growth or production niches. "
"The entire set of import or export fluxes for each taxon in each "
"sample is reduced onto a single point on a 2D plane."
"Taxa that are close to each other either consume similar metabolites "
" (imports) or produce similar metabolites (exports)."
),
citations=[citations["micom"]],
)
plugin.visualizers.register_function(
function=q2_micom.plot_tradeoff,
inputs={"results": TradeoffResults},
parameters={},
input_descriptions={
"results": (
"A set of MICOM tradeoff analysis results. "
"Contains predicted growth rates for each tested tradeoff."
)
},
parameter_descriptions={},
name="Plot tradeoff results.",
description=(
"Plot predicted growth rate distributions for each tradeoff as "
"well as the fraction of growing taxa in each sample and tradeoff "
"value. For a good tradeoff value one usually tries to find the "
"largest tradeoff value that still aloows most taxa to grow."
),
citations=[citations["micom"]],
)
plugin.visualizers.register_function(
function=q2_micom.fit_phenotype,
inputs={"results": MicomResults},
parameters={
"metadata": MetadataColumn[Categorical | Numeric],
"variable_type": Str % Choices("binary", "continuous"),
"flux_type": Str % Choices("import", "production"),
"min_coef": Float % Range(0, None),
},
input_descriptions={
"results": (
"A set of MICOM analysis results. "
"Contains predicted growth rates and exchange fluxes."
),
},
parameter_descriptions={
"metadata": "The metadata variable to use.",
"variable_type": "The type of the phenotype variable.",
"flux_type": "Which fluxes to use.",
"min_coef": (
"Only coefficient with absolute values larger than this " "will be shown."
),
},
name="Test for differential production",
description=(
"Test for overall metabolite production differences " "between two groups."
),
citations=[citations["micom"]],
)
importlib.import_module("q2_micom._transform")
|
flexible
|
{
"blob_id": "9a6f159d9208ee9e337de7b717e2e25c7e7f9f06",
"index": 4277,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplugin.register_formats(SBMLFormat, SBMLDirectory, JSONFormat,\n JSONDirectory, CommunityModelFormat, CommunityModelManifest,\n CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory,\n MicomMediumFile, MicomMediumDirectory, TradeoffResultsDirectory)\nplugin.register_semantic_types(MetabolicModels, CommunityModels,\n MicomResults, MicomMedium)\nplugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)\nplugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)\nplugin.register_semantic_type_to_format(CommunityModels[Pickle],\n CommunityModelDirectory)\nplugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)\nplugin.register_semantic_type_to_format(TradeoffResults,\n TradeoffResultsDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[Global],\n MicomMediumDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[PerSample],\n MicomMediumDirectory)\nplugin.methods.register_function(function=q2_micom.db, inputs={},\n parameters={'meta': Metadata, 'rank': Str % Choices(q2_micom._build.\n RANKS), 'threads': Int % Range(1, None)}, outputs=[('metabolic_models',\n MetabolicModels[JSON])], input_descriptions={}, parameter_descriptions=\n {'meta': \n 'Metadata for the individual metabolic models in `folder`. Must contain the the following columns: %s.'\n % ', '.join(REQ_FIELDS), 'rank':\n 'The phylogenetic rank at which to summarize taxa.', 'threads':\n 'The number of threads to use when constructing models.'},\n output_descriptions={'metabolic_models': 'The metabolic model DB.'},\n name='Build a metabolic model database.', description=\n 'Constructs pan-genome models summarized to the specified rank and bundles the models to be used by MICOM. The chosen rank has to be the same you want as when building your community models. So you may not build genus-level community models with a species level database. You will only need to run this function if you want to build a custom DB. For many use cases downloading the prebuilt AGORA DB with the the preferred rank should be sufficient.'\n , citations=[citations['agora'], citations['agora_reply'], citations[\n 'micom']])\nplugin.methods.register_function(function=q2_micom.build, inputs={\n 'abundance': FeatureTable[Frequency | RelativeFrequency], 'taxonomy':\n FeatureData[Taxonomy], 'models': MetabolicModels[JSON]}, parameters={\n 'threads': Int % Range(1, None), 'cutoff': Float % Range(0.0, 1.0),\n 'strict': Bool, 'solver': Str % Choices('auto', 'cplex', 'osqp',\n 'gurobi')}, outputs=[('community_models', CommunityModels[Pickle])],\n input_descriptions={'abundance':\n 'The feature table containing the samples over which beta diversity should be computed.'\n , 'taxonomy': 'The taxonomy assignments for the ASVs in the table.',\n 'models': 'The single taxon model database to use.'},\n parameter_descriptions={'threads':\n 'The number of threads to use when constructing models.', 'cutoff':\n 'Taxa with a relative abundance smaller than this will be dropped.',\n 'strict':\n 'If true will collapse and match on all taxa ranks up to the specified rank (so on all higher ranks as well). If false (default) will match only on single taxa rank specified before. If using the strict option make sure ranks are named the same as in the used database.'\n , 'solver':\n 'The quadratic and linear programming solver that will be used in the models. Will pick an appropriate one by default. `cplex` and `gurobi` are commercial solvers with free academic licenses and have to be installed manually. See the docs for more info.'\n }, output_descriptions={'community_models': 'The community models.'},\n name='Build community models.', description=\n 'Builds the metabolic community models for a set of samples.',\n citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.minimal_medium, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'min_growth': Float %\n Range(0.0, None, inclusive_start=False), 'threads': Int % Range(1, None\n )}, outputs=[('medium', MicomMedium[Global])], input_descriptions={\n 'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'min_growth':\n 'The minimum achievable growth rate for each taxon. The returned growth medium enables all taxa to growth simultaneously with at least this rate.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'medium': 'The resulting growth medium.'}, name=\n 'Obtain a minimal growth medium for models.', description=\n 'Obtains a minimal growth medium for the community models. Please note that this medium does not have any biological feasibility. If you have any knowledge about metabolites present in the environment we recommend you construct the medium by hand.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.grow, inputs={'models':\n CommunityModels[Pickle], 'medium': MicomMedium[Global | PerSample]},\n parameters={'tradeoff': Float % Range(0.0, 1.0, inclusive_start=False,\n inclusive_end=True), 'strategy': Str % Choices('pFBA', 'minimal uptake',\n 'none'), 'threads': Int % Range(1, None)}, outputs=[('results',\n MicomResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff':\n 'The tradeoff parameter. This describes the balance between maximizing biomass production of the entire community and biomass production of individual taxa (ergo \"egoistic\" growth). A value of 1.0 would yield the best biomass production across the community but will only allow a few taxa to grow. Smaller values will allow more taxa to grow but will sacrifice overall biomass. A value of 0.5 (the default) has been shown to best reproduce growth rates in the human gut.'\n , 'strategy':\n 'The strategy used when choosing the solution in the optimal flux space. `minimal uptake` uses the fluxes that result in the smallest total uptake from the environment.`pFBA` uses parsimonious Flux Balance Analysis and thus will choose the fluxes with the lowest enzyme requirement for each taxon. `none` will return an arbitrary solution from the optimal flux space.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'results':\n 'The resulting taxa-level growth rates and metabolic exchange fluxes.'},\n name='Simulate growth for community models.', description=\n 'Simulates growth for a set of samples. Note that those are sample-specific or \"personalized\" simulations, so each taxon may have different growth rates and metabolite usage in each sample.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.tradeoff, inputs={\n 'models': CommunityModels[Pickle], 'medium': MicomMedium[Global |\n PerSample]}, parameters={'tradeoff_min': Float % Range(0.0, 1.0,\n inclusive_start=False), 'tradeoff_max': Float % Range(0.0, 1.0,\n inclusive_end=True), 'step': Float % Range(0.0, 1.0), 'threads': Int},\n outputs=[('results', TradeoffResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff_min':\n 'The minimum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0.'\n , 'tradeoff_max':\n 'The maximum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0 and also belarger than `tradeoff_min`.'\n , 'step': 'The tradeoff value step size to use.', 'threads':\n 'The number of threads to use when simulating.'}, output_descriptions={\n 'results':\n 'The resulting taxa-level growth rates for varying tradeoff values.'},\n name='Test a variety of tradeoff values.', description=\n 'Simulates growth for a set of samples while varying the tradeoff between community and taxon biomass production. This can be used to characterize a good tradeoff value for a specific set of samples. Our study suggested that a good tradeoff value is the largest value that allows the majority of taxa in the sample to grow.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_models, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'metadata': Metadata,\n 'query': Str, 'exclude': Bool}, outputs=[('filtered_models',\n CommunityModels[Pickle])], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_models':\n 'The filtered community models.'}, name=\n 'Filters models for a chosen set of samples.', description=\n 'Select a subset of samples and their community models using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_results, inputs={\n 'results': MicomResults}, parameters={'metadata': Metadata, 'query':\n Str, 'exclude': Bool}, outputs=[('filtered_results', MicomResults)],\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_results':\n 'The filtered simulation models.'}, name=\n 'Filters results for a chosen set of samples.', description=\n 'Select a subset of samples and their simulation results using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_growth, inputs=\n {'results': MicomResults}, parameters={}, input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={}, name='Plot taxa growth rates.',\n description=\n 'Plot predicted growth rates for each taxon in each sample. Only points with growing taxa are shown (growth rate sufficiently larger than zero).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_sample,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'cluster': Bool}, input_descriptions={\n 'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'cluster': 'Whether to perform clutering on samples and reactions.'},\n name='Plot gloabl exchange rates.', description=\n 'Plot predicted global exchange fluxes for each sample. When plotting imports this corresponds to the consumption fluxes for each metabolite that is available to the community. When plotting export this corresponds to the production fluxes for each metabolite.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_taxon,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'perplexity': Int % Range(2, None)},\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'perplexity':\n 'TSNE parameter. Relates to the number of neighbors used to calculate distances. Smaller values preserve more local structure and larger values preserve more global structure.'\n }, name='Plot niche overlap.', description=\n 'Plot growth or production niches. The entire set of import or export fluxes for each taxon in each sample is reduced onto a single point on a 2D plane.Taxa that are close to each other either consume similar metabolites (imports) or produce similar metabolites (exports).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_tradeoff,\n inputs={'results': TradeoffResults}, parameters={}, input_descriptions=\n {'results':\n 'A set of MICOM tradeoff analysis results. Contains predicted growth rates for each tested tradeoff.'\n }, parameter_descriptions={}, name='Plot tradeoff results.',\n description=\n 'Plot predicted growth rate distributions for each tradeoff as well as the fraction of growing taxa in each sample and tradeoff value. For a good tradeoff value one usually tries to find the largest tradeoff value that still aloows most taxa to grow.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.fit_phenotype,\n inputs={'results': MicomResults}, parameters={'metadata':\n MetadataColumn[Categorical | Numeric], 'variable_type': Str % Choices(\n 'binary', 'continuous'), 'flux_type': Str % Choices('import',\n 'production'), 'min_coef': Float % Range(0, None)}, input_descriptions=\n {'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'metadata': 'The metadata variable to use.',\n 'variable_type': 'The type of the phenotype variable.', 'flux_type':\n 'Which fluxes to use.', 'min_coef':\n 'Only coefficient with absolute values larger than this will be shown.'\n }, name='Test for differential production', description=\n 'Test for overall metabolite production differences between two groups.',\n citations=[citations['micom']])\nimportlib.import_module('q2_micom._transform')\n",
"step-3": "<mask token>\ncitations = Citations.load('citations.bib', package='q2_micom')\nplugin = Plugin(name='micom', version=q2_micom.__version__, website=\n 'https://github.com/micom-dev/q2-micom', package='q2_micom',\n description='', short_description=\n 'Plugin for metabolic modeling of microbial communities.', citations=[\n citations['micom']])\nplugin.register_formats(SBMLFormat, SBMLDirectory, JSONFormat,\n JSONDirectory, CommunityModelFormat, CommunityModelManifest,\n CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory,\n MicomMediumFile, MicomMediumDirectory, TradeoffResultsDirectory)\nplugin.register_semantic_types(MetabolicModels, CommunityModels,\n MicomResults, MicomMedium)\nplugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)\nplugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)\nplugin.register_semantic_type_to_format(CommunityModels[Pickle],\n CommunityModelDirectory)\nplugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)\nplugin.register_semantic_type_to_format(TradeoffResults,\n TradeoffResultsDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[Global],\n MicomMediumDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[PerSample],\n MicomMediumDirectory)\nplugin.methods.register_function(function=q2_micom.db, inputs={},\n parameters={'meta': Metadata, 'rank': Str % Choices(q2_micom._build.\n RANKS), 'threads': Int % Range(1, None)}, outputs=[('metabolic_models',\n MetabolicModels[JSON])], input_descriptions={}, parameter_descriptions=\n {'meta': \n 'Metadata for the individual metabolic models in `folder`. Must contain the the following columns: %s.'\n % ', '.join(REQ_FIELDS), 'rank':\n 'The phylogenetic rank at which to summarize taxa.', 'threads':\n 'The number of threads to use when constructing models.'},\n output_descriptions={'metabolic_models': 'The metabolic model DB.'},\n name='Build a metabolic model database.', description=\n 'Constructs pan-genome models summarized to the specified rank and bundles the models to be used by MICOM. The chosen rank has to be the same you want as when building your community models. So you may not build genus-level community models with a species level database. You will only need to run this function if you want to build a custom DB. For many use cases downloading the prebuilt AGORA DB with the the preferred rank should be sufficient.'\n , citations=[citations['agora'], citations['agora_reply'], citations[\n 'micom']])\nplugin.methods.register_function(function=q2_micom.build, inputs={\n 'abundance': FeatureTable[Frequency | RelativeFrequency], 'taxonomy':\n FeatureData[Taxonomy], 'models': MetabolicModels[JSON]}, parameters={\n 'threads': Int % Range(1, None), 'cutoff': Float % Range(0.0, 1.0),\n 'strict': Bool, 'solver': Str % Choices('auto', 'cplex', 'osqp',\n 'gurobi')}, outputs=[('community_models', CommunityModels[Pickle])],\n input_descriptions={'abundance':\n 'The feature table containing the samples over which beta diversity should be computed.'\n , 'taxonomy': 'The taxonomy assignments for the ASVs in the table.',\n 'models': 'The single taxon model database to use.'},\n parameter_descriptions={'threads':\n 'The number of threads to use when constructing models.', 'cutoff':\n 'Taxa with a relative abundance smaller than this will be dropped.',\n 'strict':\n 'If true will collapse and match on all taxa ranks up to the specified rank (so on all higher ranks as well). If false (default) will match only on single taxa rank specified before. If using the strict option make sure ranks are named the same as in the used database.'\n , 'solver':\n 'The quadratic and linear programming solver that will be used in the models. Will pick an appropriate one by default. `cplex` and `gurobi` are commercial solvers with free academic licenses and have to be installed manually. See the docs for more info.'\n }, output_descriptions={'community_models': 'The community models.'},\n name='Build community models.', description=\n 'Builds the metabolic community models for a set of samples.',\n citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.minimal_medium, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'min_growth': Float %\n Range(0.0, None, inclusive_start=False), 'threads': Int % Range(1, None\n )}, outputs=[('medium', MicomMedium[Global])], input_descriptions={\n 'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'min_growth':\n 'The minimum achievable growth rate for each taxon. The returned growth medium enables all taxa to growth simultaneously with at least this rate.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'medium': 'The resulting growth medium.'}, name=\n 'Obtain a minimal growth medium for models.', description=\n 'Obtains a minimal growth medium for the community models. Please note that this medium does not have any biological feasibility. If you have any knowledge about metabolites present in the environment we recommend you construct the medium by hand.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.grow, inputs={'models':\n CommunityModels[Pickle], 'medium': MicomMedium[Global | PerSample]},\n parameters={'tradeoff': Float % Range(0.0, 1.0, inclusive_start=False,\n inclusive_end=True), 'strategy': Str % Choices('pFBA', 'minimal uptake',\n 'none'), 'threads': Int % Range(1, None)}, outputs=[('results',\n MicomResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff':\n 'The tradeoff parameter. This describes the balance between maximizing biomass production of the entire community and biomass production of individual taxa (ergo \"egoistic\" growth). A value of 1.0 would yield the best biomass production across the community but will only allow a few taxa to grow. Smaller values will allow more taxa to grow but will sacrifice overall biomass. A value of 0.5 (the default) has been shown to best reproduce growth rates in the human gut.'\n , 'strategy':\n 'The strategy used when choosing the solution in the optimal flux space. `minimal uptake` uses the fluxes that result in the smallest total uptake from the environment.`pFBA` uses parsimonious Flux Balance Analysis and thus will choose the fluxes with the lowest enzyme requirement for each taxon. `none` will return an arbitrary solution from the optimal flux space.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'results':\n 'The resulting taxa-level growth rates and metabolic exchange fluxes.'},\n name='Simulate growth for community models.', description=\n 'Simulates growth for a set of samples. Note that those are sample-specific or \"personalized\" simulations, so each taxon may have different growth rates and metabolite usage in each sample.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.tradeoff, inputs={\n 'models': CommunityModels[Pickle], 'medium': MicomMedium[Global |\n PerSample]}, parameters={'tradeoff_min': Float % Range(0.0, 1.0,\n inclusive_start=False), 'tradeoff_max': Float % Range(0.0, 1.0,\n inclusive_end=True), 'step': Float % Range(0.0, 1.0), 'threads': Int},\n outputs=[('results', TradeoffResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff_min':\n 'The minimum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0.'\n , 'tradeoff_max':\n 'The maximum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0 and also belarger than `tradeoff_min`.'\n , 'step': 'The tradeoff value step size to use.', 'threads':\n 'The number of threads to use when simulating.'}, output_descriptions={\n 'results':\n 'The resulting taxa-level growth rates for varying tradeoff values.'},\n name='Test a variety of tradeoff values.', description=\n 'Simulates growth for a set of samples while varying the tradeoff between community and taxon biomass production. This can be used to characterize a good tradeoff value for a specific set of samples. Our study suggested that a good tradeoff value is the largest value that allows the majority of taxa in the sample to grow.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_models, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'metadata': Metadata,\n 'query': Str, 'exclude': Bool}, outputs=[('filtered_models',\n CommunityModels[Pickle])], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_models':\n 'The filtered community models.'}, name=\n 'Filters models for a chosen set of samples.', description=\n 'Select a subset of samples and their community models using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_results, inputs={\n 'results': MicomResults}, parameters={'metadata': Metadata, 'query':\n Str, 'exclude': Bool}, outputs=[('filtered_results', MicomResults)],\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_results':\n 'The filtered simulation models.'}, name=\n 'Filters results for a chosen set of samples.', description=\n 'Select a subset of samples and their simulation results using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_growth, inputs=\n {'results': MicomResults}, parameters={}, input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={}, name='Plot taxa growth rates.',\n description=\n 'Plot predicted growth rates for each taxon in each sample. Only points with growing taxa are shown (growth rate sufficiently larger than zero).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_sample,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'cluster': Bool}, input_descriptions={\n 'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'cluster': 'Whether to perform clutering on samples and reactions.'},\n name='Plot gloabl exchange rates.', description=\n 'Plot predicted global exchange fluxes for each sample. When plotting imports this corresponds to the consumption fluxes for each metabolite that is available to the community. When plotting export this corresponds to the production fluxes for each metabolite.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_taxon,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'perplexity': Int % Range(2, None)},\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'perplexity':\n 'TSNE parameter. Relates to the number of neighbors used to calculate distances. Smaller values preserve more local structure and larger values preserve more global structure.'\n }, name='Plot niche overlap.', description=\n 'Plot growth or production niches. The entire set of import or export fluxes for each taxon in each sample is reduced onto a single point on a 2D plane.Taxa that are close to each other either consume similar metabolites (imports) or produce similar metabolites (exports).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_tradeoff,\n inputs={'results': TradeoffResults}, parameters={}, input_descriptions=\n {'results':\n 'A set of MICOM tradeoff analysis results. Contains predicted growth rates for each tested tradeoff.'\n }, parameter_descriptions={}, name='Plot tradeoff results.',\n description=\n 'Plot predicted growth rate distributions for each tradeoff as well as the fraction of growing taxa in each sample and tradeoff value. For a good tradeoff value one usually tries to find the largest tradeoff value that still aloows most taxa to grow.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.fit_phenotype,\n inputs={'results': MicomResults}, parameters={'metadata':\n MetadataColumn[Categorical | Numeric], 'variable_type': Str % Choices(\n 'binary', 'continuous'), 'flux_type': Str % Choices('import',\n 'production'), 'min_coef': Float % Range(0, None)}, input_descriptions=\n {'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'metadata': 'The metadata variable to use.',\n 'variable_type': 'The type of the phenotype variable.', 'flux_type':\n 'Which fluxes to use.', 'min_coef':\n 'Only coefficient with absolute values larger than this will be shown.'\n }, name='Test for differential production', description=\n 'Test for overall metabolite production differences between two groups.',\n citations=[citations['micom']])\nimportlib.import_module('q2_micom._transform')\n",
"step-4": "<mask token>\nimport importlib\nfrom qiime2.plugin import Plugin, Str, Choices, Int, Bool, Range, Float, Metadata, MetadataColumn, Categorical, Numeric, Citations\nimport q2_micom\nfrom q2_micom._formats_and_types import SBML, JSON, Pickle, SBMLFormat, SBMLDirectory, JSONFormat, JSONDirectory, CommunityModelFormat, CommunityModelManifest, CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory, MicomMediumFile, MicomMediumDirectory, MetabolicModels, CommunityModels, MicomResults, MicomMedium, Global, PerSample, TradeoffResults, TradeoffResultsDirectory, REQ_FIELDS\nfrom q2_types.feature_data import FeatureData, Taxonomy\nfrom q2_types.feature_table import FeatureTable, RelativeFrequency, Frequency\ncitations = Citations.load('citations.bib', package='q2_micom')\nplugin = Plugin(name='micom', version=q2_micom.__version__, website=\n 'https://github.com/micom-dev/q2-micom', package='q2_micom',\n description='', short_description=\n 'Plugin for metabolic modeling of microbial communities.', citations=[\n citations['micom']])\nplugin.register_formats(SBMLFormat, SBMLDirectory, JSONFormat,\n JSONDirectory, CommunityModelFormat, CommunityModelManifest,\n CommunityModelDirectory, GrowthRates, Fluxes, MicomResultsDirectory,\n MicomMediumFile, MicomMediumDirectory, TradeoffResultsDirectory)\nplugin.register_semantic_types(MetabolicModels, CommunityModels,\n MicomResults, MicomMedium)\nplugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)\nplugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)\nplugin.register_semantic_type_to_format(CommunityModels[Pickle],\n CommunityModelDirectory)\nplugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)\nplugin.register_semantic_type_to_format(TradeoffResults,\n TradeoffResultsDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[Global],\n MicomMediumDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[PerSample],\n MicomMediumDirectory)\nplugin.methods.register_function(function=q2_micom.db, inputs={},\n parameters={'meta': Metadata, 'rank': Str % Choices(q2_micom._build.\n RANKS), 'threads': Int % Range(1, None)}, outputs=[('metabolic_models',\n MetabolicModels[JSON])], input_descriptions={}, parameter_descriptions=\n {'meta': \n 'Metadata for the individual metabolic models in `folder`. Must contain the the following columns: %s.'\n % ', '.join(REQ_FIELDS), 'rank':\n 'The phylogenetic rank at which to summarize taxa.', 'threads':\n 'The number of threads to use when constructing models.'},\n output_descriptions={'metabolic_models': 'The metabolic model DB.'},\n name='Build a metabolic model database.', description=\n 'Constructs pan-genome models summarized to the specified rank and bundles the models to be used by MICOM. The chosen rank has to be the same you want as when building your community models. So you may not build genus-level community models with a species level database. You will only need to run this function if you want to build a custom DB. For many use cases downloading the prebuilt AGORA DB with the the preferred rank should be sufficient.'\n , citations=[citations['agora'], citations['agora_reply'], citations[\n 'micom']])\nplugin.methods.register_function(function=q2_micom.build, inputs={\n 'abundance': FeatureTable[Frequency | RelativeFrequency], 'taxonomy':\n FeatureData[Taxonomy], 'models': MetabolicModels[JSON]}, parameters={\n 'threads': Int % Range(1, None), 'cutoff': Float % Range(0.0, 1.0),\n 'strict': Bool, 'solver': Str % Choices('auto', 'cplex', 'osqp',\n 'gurobi')}, outputs=[('community_models', CommunityModels[Pickle])],\n input_descriptions={'abundance':\n 'The feature table containing the samples over which beta diversity should be computed.'\n , 'taxonomy': 'The taxonomy assignments for the ASVs in the table.',\n 'models': 'The single taxon model database to use.'},\n parameter_descriptions={'threads':\n 'The number of threads to use when constructing models.', 'cutoff':\n 'Taxa with a relative abundance smaller than this will be dropped.',\n 'strict':\n 'If true will collapse and match on all taxa ranks up to the specified rank (so on all higher ranks as well). If false (default) will match only on single taxa rank specified before. If using the strict option make sure ranks are named the same as in the used database.'\n , 'solver':\n 'The quadratic and linear programming solver that will be used in the models. Will pick an appropriate one by default. `cplex` and `gurobi` are commercial solvers with free academic licenses and have to be installed manually. See the docs for more info.'\n }, output_descriptions={'community_models': 'The community models.'},\n name='Build community models.', description=\n 'Builds the metabolic community models for a set of samples.',\n citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.minimal_medium, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'min_growth': Float %\n Range(0.0, None, inclusive_start=False), 'threads': Int % Range(1, None\n )}, outputs=[('medium', MicomMedium[Global])], input_descriptions={\n 'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'min_growth':\n 'The minimum achievable growth rate for each taxon. The returned growth medium enables all taxa to growth simultaneously with at least this rate.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'medium': 'The resulting growth medium.'}, name=\n 'Obtain a minimal growth medium for models.', description=\n 'Obtains a minimal growth medium for the community models. Please note that this medium does not have any biological feasibility. If you have any knowledge about metabolites present in the environment we recommend you construct the medium by hand.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.grow, inputs={'models':\n CommunityModels[Pickle], 'medium': MicomMedium[Global | PerSample]},\n parameters={'tradeoff': Float % Range(0.0, 1.0, inclusive_start=False,\n inclusive_end=True), 'strategy': Str % Choices('pFBA', 'minimal uptake',\n 'none'), 'threads': Int % Range(1, None)}, outputs=[('results',\n MicomResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff':\n 'The tradeoff parameter. This describes the balance between maximizing biomass production of the entire community and biomass production of individual taxa (ergo \"egoistic\" growth). A value of 1.0 would yield the best biomass production across the community but will only allow a few taxa to grow. Smaller values will allow more taxa to grow but will sacrifice overall biomass. A value of 0.5 (the default) has been shown to best reproduce growth rates in the human gut.'\n , 'strategy':\n 'The strategy used when choosing the solution in the optimal flux space. `minimal uptake` uses the fluxes that result in the smallest total uptake from the environment.`pFBA` uses parsimonious Flux Balance Analysis and thus will choose the fluxes with the lowest enzyme requirement for each taxon. `none` will return an arbitrary solution from the optimal flux space.'\n , 'threads': 'The number of threads to use when simulating.'},\n output_descriptions={'results':\n 'The resulting taxa-level growth rates and metabolic exchange fluxes.'},\n name='Simulate growth for community models.', description=\n 'Simulates growth for a set of samples. Note that those are sample-specific or \"personalized\" simulations, so each taxon may have different growth rates and metabolite usage in each sample.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.tradeoff, inputs={\n 'models': CommunityModels[Pickle], 'medium': MicomMedium[Global |\n PerSample]}, parameters={'tradeoff_min': Float % Range(0.0, 1.0,\n inclusive_start=False), 'tradeoff_max': Float % Range(0.0, 1.0,\n inclusive_end=True), 'step': Float % Range(0.0, 1.0), 'threads': Int},\n outputs=[('results', TradeoffResults)], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n , 'medium': 'The growth medium to use.'}, parameter_descriptions={\n 'tradeoff_min':\n 'The minimum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0.'\n , 'tradeoff_max':\n 'The maximum tradeoff parameter to test. This should be larger than 0.0 and smaller than 1.0 and also belarger than `tradeoff_min`.'\n , 'step': 'The tradeoff value step size to use.', 'threads':\n 'The number of threads to use when simulating.'}, output_descriptions={\n 'results':\n 'The resulting taxa-level growth rates for varying tradeoff values.'},\n name='Test a variety of tradeoff values.', description=\n 'Simulates growth for a set of samples while varying the tradeoff between community and taxon biomass production. This can be used to characterize a good tradeoff value for a specific set of samples. Our study suggested that a good tradeoff value is the largest value that allows the majority of taxa in the sample to grow.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_models, inputs={\n 'models': CommunityModels[Pickle]}, parameters={'metadata': Metadata,\n 'query': Str, 'exclude': Bool}, outputs=[('filtered_models',\n CommunityModels[Pickle])], input_descriptions={'models':\n 'A collection of metabolic community models. This should contain on model for each sample.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_models':\n 'The filtered community models.'}, name=\n 'Filters models for a chosen set of samples.', description=\n 'Select a subset of samples and their community models using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.methods.register_function(function=q2_micom.filter_results, inputs={\n 'results': MicomResults}, parameters={'metadata': Metadata, 'query':\n Str, 'exclude': Bool}, outputs=[('filtered_results', MicomResults)],\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'metadata':\n 'The metadata for the samples to keep or to query.', 'query':\n 'A pandas query expression to select samples from the metadata. This will call `query` on the metadata DataFrame, so you can test your query by loading our metadata into a pandas DataFrame.'\n , 'exclude':\n 'If true will use all samples *except* the ones selected by metadata and query.'\n }, output_descriptions={'filtered_results':\n 'The filtered simulation models.'}, name=\n 'Filters results for a chosen set of samples.', description=\n 'Select a subset of samples and their simulation results using a list of samples or a pandas query expression.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_growth, inputs=\n {'results': MicomResults}, parameters={}, input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={}, name='Plot taxa growth rates.',\n description=\n 'Plot predicted growth rates for each taxon in each sample. Only points with growing taxa are shown (growth rate sufficiently larger than zero).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_sample,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'cluster': Bool}, input_descriptions={\n 'results':\n 'A set of MICOM analysis results. Contains predicted groath rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'cluster': 'Whether to perform clutering on samples and reactions.'},\n name='Plot gloabl exchange rates.', description=\n 'Plot predicted global exchange fluxes for each sample. When plotting imports this corresponds to the consumption fluxes for each metabolite that is available to the community. When plotting export this corresponds to the production fluxes for each metabolite.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.exchanges_per_taxon,\n inputs={'results': MicomResults}, parameters={'direction': Str %\n Choices('import', 'export'), 'perplexity': Int % Range(2, None)},\n input_descriptions={'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'direction': 'The direction of the flux.',\n 'perplexity':\n 'TSNE parameter. Relates to the number of neighbors used to calculate distances. Smaller values preserve more local structure and larger values preserve more global structure.'\n }, name='Plot niche overlap.', description=\n 'Plot growth or production niches. The entire set of import or export fluxes for each taxon in each sample is reduced onto a single point on a 2D plane.Taxa that are close to each other either consume similar metabolites (imports) or produce similar metabolites (exports).'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.plot_tradeoff,\n inputs={'results': TradeoffResults}, parameters={}, input_descriptions=\n {'results':\n 'A set of MICOM tradeoff analysis results. Contains predicted growth rates for each tested tradeoff.'\n }, parameter_descriptions={}, name='Plot tradeoff results.',\n description=\n 'Plot predicted growth rate distributions for each tradeoff as well as the fraction of growing taxa in each sample and tradeoff value. For a good tradeoff value one usually tries to find the largest tradeoff value that still aloows most taxa to grow.'\n , citations=[citations['micom']])\nplugin.visualizers.register_function(function=q2_micom.fit_phenotype,\n inputs={'results': MicomResults}, parameters={'metadata':\n MetadataColumn[Categorical | Numeric], 'variable_type': Str % Choices(\n 'binary', 'continuous'), 'flux_type': Str % Choices('import',\n 'production'), 'min_coef': Float % Range(0, None)}, input_descriptions=\n {'results':\n 'A set of MICOM analysis results. Contains predicted growth rates and exchange fluxes.'\n }, parameter_descriptions={'metadata': 'The metadata variable to use.',\n 'variable_type': 'The type of the phenotype variable.', 'flux_type':\n 'Which fluxes to use.', 'min_coef':\n 'Only coefficient with absolute values larger than this will be shown.'\n }, name='Test for differential production', description=\n 'Test for overall metabolite production differences between two groups.',\n citations=[citations['micom']])\nimportlib.import_module('q2_micom._transform')\n",
"step-5": "\"\"\"Plugin setup.\"\"\"\n\nimport importlib\nfrom qiime2.plugin import (\n Plugin,\n Str,\n Choices,\n Int,\n Bool,\n Range,\n Float,\n Metadata,\n MetadataColumn,\n Categorical,\n Numeric,\n Citations,\n)\n\nimport q2_micom\nfrom q2_micom._formats_and_types import (\n SBML,\n JSON,\n Pickle,\n SBMLFormat,\n SBMLDirectory,\n JSONFormat,\n JSONDirectory,\n CommunityModelFormat,\n CommunityModelManifest,\n CommunityModelDirectory,\n GrowthRates,\n Fluxes,\n MicomResultsDirectory,\n MicomMediumFile,\n MicomMediumDirectory,\n MetabolicModels,\n CommunityModels,\n MicomResults,\n MicomMedium,\n Global,\n PerSample,\n TradeoffResults,\n TradeoffResultsDirectory,\n REQ_FIELDS,\n)\nfrom q2_types.feature_data import FeatureData, Taxonomy\nfrom q2_types.feature_table import FeatureTable, RelativeFrequency, Frequency\n\ncitations = Citations.load(\"citations.bib\", package=\"q2_micom\")\n\n\nplugin = Plugin(\n name=\"micom\",\n version=q2_micom.__version__,\n website=\"https://github.com/micom-dev/q2-micom\",\n package=\"q2_micom\",\n description=(\"\"),\n short_description=\"Plugin for metabolic modeling of microbial communities.\",\n citations=[citations[\"micom\"]],\n)\n\nplugin.register_formats(\n SBMLFormat,\n SBMLDirectory,\n JSONFormat,\n JSONDirectory,\n CommunityModelFormat,\n CommunityModelManifest,\n CommunityModelDirectory,\n GrowthRates,\n Fluxes,\n MicomResultsDirectory,\n MicomMediumFile,\n MicomMediumDirectory,\n TradeoffResultsDirectory,\n)\nplugin.register_semantic_types(\n MetabolicModels, CommunityModels, MicomResults, MicomMedium\n)\nplugin.register_semantic_type_to_format(MetabolicModels[SBML], SBMLDirectory)\nplugin.register_semantic_type_to_format(MetabolicModels[JSON], JSONDirectory)\nplugin.register_semantic_type_to_format(\n CommunityModels[Pickle], CommunityModelDirectory\n)\nplugin.register_semantic_type_to_format(MicomResults, MicomResultsDirectory)\nplugin.register_semantic_type_to_format(TradeoffResults, TradeoffResultsDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[Global], MicomMediumDirectory)\nplugin.register_semantic_type_to_format(MicomMedium[PerSample], MicomMediumDirectory)\n\nplugin.methods.register_function(\n function=q2_micom.db,\n inputs={},\n parameters={\n \"meta\": Metadata,\n \"rank\": Str % Choices(q2_micom._build.RANKS),\n \"threads\": Int % Range(1, None),\n },\n outputs=[(\"metabolic_models\", MetabolicModels[JSON])],\n input_descriptions={},\n parameter_descriptions={\n \"meta\": (\n \"Metadata for the individual metabolic models in `folder`. \"\n \"Must contain the the following columns: %s.\" % \", \".join(REQ_FIELDS)\n ),\n \"rank\": \"The phylogenetic rank at which to summarize taxa.\",\n \"threads\": \"The number of threads to use when constructing models.\",\n },\n output_descriptions={\"metabolic_models\": \"The metabolic model DB.\"},\n name=\"Build a metabolic model database.\",\n description=(\n \"Constructs pan-genome models summarized to the specified rank \"\n \"and bundles the models to be used by MICOM. \"\n \"The chosen rank has to be the same you want as when building your \"\n \"community models. \"\n \"So you may not build genus-level community models with a species \"\n \"level database. \"\n \"You will only need to run this function if you want to build a \"\n \"custom DB. For many use cases downloading the prebuilt AGORA DB \"\n \"with the the preferred rank should be sufficient.\"\n ),\n citations=[\n citations[\"agora\"],\n citations[\"agora_reply\"],\n citations[\"micom\"],\n ],\n)\n\nplugin.methods.register_function(\n function=q2_micom.build,\n inputs={\n \"abundance\": FeatureTable[Frequency | RelativeFrequency],\n \"taxonomy\": FeatureData[Taxonomy],\n \"models\": MetabolicModels[JSON],\n },\n parameters={\n \"threads\": Int % Range(1, None),\n \"cutoff\": Float % Range(0.0, 1.0),\n \"strict\": Bool,\n \"solver\": Str % Choices(\"auto\", \"cplex\", \"osqp\", \"gurobi\"),\n },\n outputs=[(\"community_models\", CommunityModels[Pickle])],\n input_descriptions={\n \"abundance\": (\n \"The feature table containing the samples over which beta \"\n \"diversity should be computed.\"\n ),\n \"taxonomy\": \"The taxonomy assignments for the ASVs in the table.\",\n \"models\": \"The single taxon model database to use.\",\n },\n parameter_descriptions={\n \"threads\": \"The number of threads to use when constructing models.\",\n \"cutoff\": \"Taxa with a relative abundance smaller than this will \"\n \"be dropped.\",\n \"strict\": (\n \"If true will collapse and match on all taxa ranks up to the \"\n \"specified rank (so on all higher ranks as well). If false \"\n \"(default) will match only on single taxa rank specified before. \"\n \"If using the strict option make sure ranks are named the same as in \"\n \"the used database.\"\n ),\n \"solver\": (\n \"The quadratic and linear programming solver that will be used \"\n \"in the models. Will pick an appropriate one by default. \"\n \"`cplex` and `gurobi` are commercial solvers with free academic \"\n \"licenses and have to be installed manually. See the docs for more info.\"\n ),\n },\n output_descriptions={\"community_models\": \"The community models.\"},\n name=\"Build community models.\",\n description=(\"Builds the metabolic community models for a set of samples.\"),\n citations=[citations[\"micom\"]],\n)\n\nplugin.methods.register_function(\n function=q2_micom.minimal_medium,\n inputs={\"models\": CommunityModels[Pickle]},\n parameters={\n \"min_growth\": Float % Range(0.0, None, inclusive_start=False),\n \"threads\": Int % Range(1, None),\n },\n outputs=[(\"medium\", MicomMedium[Global])],\n input_descriptions={\n \"models\": (\n \"A collection of metabolic community models. \"\n \"This should contain on model for each sample.\"\n ),\n },\n parameter_descriptions={\n \"min_growth\": (\n \"The minimum achievable growth rate for each taxon. \"\n \"The returned growth medium enables all taxa to growth \"\n \"simultaneously with at least this rate.\"\n ),\n \"threads\": \"The number of threads to use when simulating.\",\n },\n output_descriptions={\"medium\": \"The resulting growth medium.\"},\n name=\"Obtain a minimal growth medium for models.\",\n description=(\n \"Obtains a minimal growth medium for the community models. \"\n \"Please note that this medium does not have any biological \"\n \"feasibility. If you have any knowledge about metabolites present \"\n \"in the environment we recommend you construct the medium by hand.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.methods.register_function(\n function=q2_micom.grow,\n inputs={\n \"models\": CommunityModels[Pickle],\n \"medium\": MicomMedium[Global | PerSample],\n },\n parameters={\n \"tradeoff\": Float % Range(0.0, 1.0, inclusive_start=False, inclusive_end=True),\n \"strategy\": Str % Choices(\"pFBA\", \"minimal uptake\", \"none\"),\n \"threads\": Int % Range(1, None),\n },\n outputs=[(\"results\", MicomResults)],\n input_descriptions={\n \"models\": (\n \"A collection of metabolic community models. \"\n \"This should contain on model for each sample.\"\n ),\n \"medium\": \"The growth medium to use.\",\n },\n parameter_descriptions={\n \"tradeoff\": (\n \"The tradeoff parameter. This describes the balance \"\n \"between maximizing biomass production of the entire \"\n \"community and biomass production of individual taxa \"\n '(ergo \"egoistic\" growth). A value of 1.0 would yield '\n \"the best biomass production across the community but \"\n \"will only allow a few taxa to grow. Smaller values will \"\n \"allow more taxa to grow but will sacrifice overall \"\n \"biomass. A value of 0.5 (the default) has been shown to \"\n \"best reproduce growth rates in the human gut.\"\n ),\n \"strategy\": (\n \"The strategy used when choosing the solution in the \"\n \"optimal flux space. `minimal uptake` uses the fluxes \"\n \"that result in the smallest total uptake from the environment.\"\n \"`pFBA` uses parsimonious Flux Balance Analysis and thus will choose \"\n \"the fluxes with the lowest enzyme requirement for each taxon. \"\n \"`none` will return an arbitrary solution from the optimal flux space.\"\n ),\n \"threads\": \"The number of threads to use when simulating.\",\n },\n output_descriptions={\n \"results\": \"The resulting taxa-level growth rates and metabolic \"\n \"exchange fluxes.\"\n },\n name=\"Simulate growth for community models.\",\n description=(\n \"Simulates growth for a set of samples. Note that those are \"\n 'sample-specific or \"personalized\" simulations, so each taxon '\n \"may have different growth rates and metabolite usage in each sample.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.methods.register_function(\n function=q2_micom.tradeoff,\n inputs={\n \"models\": CommunityModels[Pickle],\n \"medium\": MicomMedium[Global | PerSample],\n },\n parameters={\n \"tradeoff_min\": Float % Range(0.0, 1.0, inclusive_start=False),\n \"tradeoff_max\": Float % Range(0.0, 1.0, inclusive_end=True),\n \"step\": Float % Range(0.0, 1.0),\n \"threads\": Int,\n },\n outputs=[(\"results\", TradeoffResults)],\n input_descriptions={\n \"models\": (\n \"A collection of metabolic community models. \"\n \"This should contain on model for each sample.\"\n ),\n \"medium\": \"The growth medium to use.\",\n },\n parameter_descriptions={\n \"tradeoff_min\": \"The minimum tradeoff parameter to test. This should \"\n \"be larger than 0.0 and smaller than 1.0.\",\n \"tradeoff_max\": \"The maximum tradeoff parameter to test. This should \"\n \"be larger than 0.0 and smaller than 1.0 and also be\"\n \"larger than `tradeoff_min`.\",\n \"step\": \"The tradeoff value step size to use.\",\n \"threads\": \"The number of threads to use when simulating.\",\n },\n output_descriptions={\n \"results\": \"The resulting taxa-level growth rates for varying \"\n \"tradeoff values.\"\n },\n name=\"Test a variety of tradeoff values.\",\n description=(\n \"Simulates growth for a set of samples while varying the tradeoff \"\n \"between community and taxon biomass production. \"\n \"This can be used to characterize a good tradeoff value for a \"\n \"specific set of samples. Our study suggested that a good tradeoff \"\n \"value is the largest value that allows the majority of taxa in the \"\n \"sample to grow.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.methods.register_function(\n function=q2_micom.filter_models,\n inputs={\"models\": CommunityModels[Pickle]},\n parameters={\"metadata\": Metadata, \"query\": Str, \"exclude\": Bool},\n outputs=[(\"filtered_models\", CommunityModels[Pickle])],\n input_descriptions={\n \"models\": (\n \"A collection of metabolic community models. \"\n \"This should contain on model for each sample.\"\n )\n },\n parameter_descriptions={\n \"metadata\": \"The metadata for the samples to keep or to query.\",\n \"query\": (\n \"A pandas query expression to select samples from the metadata. \"\n \"This will call `query` on the metadata DataFrame, so you can test \"\n \"your query by loading our metadata into a pandas DataFrame.\"\n ),\n \"exclude\": (\n \"If true will use all samples *except* the ones selected \"\n \"by metadata and query.\"\n ),\n },\n output_descriptions={\"filtered_models\": \"The filtered community models.\"},\n name=\"Filters models for a chosen set of samples.\",\n description=(\n \"Select a subset of samples and their community models using a list \"\n \"of samples or a pandas query expression.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.methods.register_function(\n function=q2_micom.filter_results,\n inputs={\"results\": MicomResults},\n parameters={\"metadata\": Metadata, \"query\": Str, \"exclude\": Bool},\n outputs=[(\"filtered_results\", MicomResults)],\n input_descriptions={\n \"results\": (\n \"A set of MICOM analysis results. \"\n \"Contains predicted groath rates and exchange fluxes.\"\n )\n },\n parameter_descriptions={\n \"metadata\": \"The metadata for the samples to keep or to query.\",\n \"query\": (\n \"A pandas query expression to select samples from the metadata. \"\n \"This will call `query` on the metadata DataFrame, so you can test \"\n \"your query by loading our metadata into a pandas DataFrame.\"\n ),\n \"exclude\": (\n \"If true will use all samples *except* the ones selected \"\n \"by metadata and query.\"\n ),\n },\n output_descriptions={\"filtered_results\": \"The filtered simulation models.\"},\n name=\"Filters results for a chosen set of samples.\",\n description=(\n \"Select a subset of samples and their simulation results using a list \"\n \"of samples or a pandas query expression.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.visualizers.register_function(\n function=q2_micom.plot_growth,\n inputs={\"results\": MicomResults},\n parameters={},\n input_descriptions={\n \"results\": (\n \"A set of MICOM analysis results. \"\n \"Contains predicted groath rates and exchange fluxes.\"\n )\n },\n parameter_descriptions={},\n name=\"Plot taxa growth rates.\",\n description=(\n \"Plot predicted growth rates for each taxon in each sample. \"\n \"Only points with growing taxa are shown (growth rate sufficiently \"\n \"larger than zero).\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.visualizers.register_function(\n function=q2_micom.exchanges_per_sample,\n inputs={\"results\": MicomResults},\n parameters={\n \"direction\": Str % Choices(\"import\", \"export\"),\n \"cluster\": Bool,\n },\n input_descriptions={\n \"results\": (\n \"A set of MICOM analysis results. \"\n \"Contains predicted groath rates and exchange fluxes.\"\n )\n },\n parameter_descriptions={\n \"direction\": \"The direction of the flux.\",\n \"cluster\": \"Whether to perform clutering on samples and reactions.\",\n },\n name=\"Plot gloabl exchange rates.\",\n description=(\n \"Plot predicted global exchange fluxes for each sample. \"\n \"When plotting imports this corresponds to the consumption \"\n \"fluxes for each metabolite that is available to the community. \"\n \"When plotting export this corresponds to the production fluxes \"\n \"for each metabolite.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\n\nplugin.visualizers.register_function(\n function=q2_micom.exchanges_per_taxon,\n inputs={\"results\": MicomResults},\n parameters={\n \"direction\": Str % Choices(\"import\", \"export\"),\n \"perplexity\": Int % Range(2, None),\n },\n input_descriptions={\n \"results\": (\n \"A set of MICOM analysis results. \"\n \"Contains predicted growth rates and exchange fluxes.\"\n )\n },\n parameter_descriptions={\n \"direction\": \"The direction of the flux.\",\n \"perplexity\": \"TSNE parameter. Relates to the number of neighbors used to \"\n \"calculate distances. Smaller values preserve more local \"\n \"structure and larger values preserve more global structure.\",\n },\n name=\"Plot niche overlap.\",\n description=(\n \"Plot growth or production niches. \"\n \"The entire set of import or export fluxes for each taxon in each \"\n \"sample is reduced onto a single point on a 2D plane.\"\n \"Taxa that are close to each other either consume similar metabolites \"\n \" (imports) or produce similar metabolites (exports).\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.visualizers.register_function(\n function=q2_micom.plot_tradeoff,\n inputs={\"results\": TradeoffResults},\n parameters={},\n input_descriptions={\n \"results\": (\n \"A set of MICOM tradeoff analysis results. \"\n \"Contains predicted growth rates for each tested tradeoff.\"\n )\n },\n parameter_descriptions={},\n name=\"Plot tradeoff results.\",\n description=(\n \"Plot predicted growth rate distributions for each tradeoff as \"\n \"well as the fraction of growing taxa in each sample and tradeoff \"\n \"value. For a good tradeoff value one usually tries to find the \"\n \"largest tradeoff value that still aloows most taxa to grow.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nplugin.visualizers.register_function(\n function=q2_micom.fit_phenotype,\n inputs={\"results\": MicomResults},\n parameters={\n \"metadata\": MetadataColumn[Categorical | Numeric],\n \"variable_type\": Str % Choices(\"binary\", \"continuous\"),\n \"flux_type\": Str % Choices(\"import\", \"production\"),\n \"min_coef\": Float % Range(0, None),\n },\n input_descriptions={\n \"results\": (\n \"A set of MICOM analysis results. \"\n \"Contains predicted growth rates and exchange fluxes.\"\n ),\n },\n parameter_descriptions={\n \"metadata\": \"The metadata variable to use.\",\n \"variable_type\": \"The type of the phenotype variable.\",\n \"flux_type\": \"Which fluxes to use.\",\n \"min_coef\": (\n \"Only coefficient with absolute values larger than this \" \"will be shown.\"\n ),\n },\n name=\"Test for differential production\",\n description=(\n \"Test for overall metabolite production differences \" \"between two groups.\"\n ),\n citations=[citations[\"micom\"]],\n)\n\nimportlib.import_module(\"q2_micom._transform\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
s = input()
st = '>>-->'
st2 = '<--<<'
sch1 = sch2 = 0
i = 0
j = 0
k = -1
while i != -1:
i = s.find(st, j)
if (k != i) and (i != -1):
k = i
sch1 += 1
j += 1
j = 0
i = 0
k = -1
while i != -1:
i = s.find(st2, j)
if (k != i) and (i != -1):
k = i
sch2 += 1
j += 1
print(sch1+sch2)
|
normal
|
{
"blob_id": "c18e452592d53f22858f2307c60aa997b809c3c3",
"index": 4356,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile i != -1:\n i = s.find(st, j)\n if k != i and i != -1:\n k = i\n sch1 += 1\n j += 1\n<mask token>\nwhile i != -1:\n i = s.find(st2, j)\n if k != i and i != -1:\n k = i\n sch2 += 1\n j += 1\nprint(sch1 + sch2)\n",
"step-3": "s = input()\nst = '>>-->'\nst2 = '<--<<'\nsch1 = sch2 = 0\ni = 0\nj = 0\nk = -1\nwhile i != -1:\n i = s.find(st, j)\n if k != i and i != -1:\n k = i\n sch1 += 1\n j += 1\nj = 0\ni = 0\nk = -1\nwhile i != -1:\n i = s.find(st2, j)\n if k != i and i != -1:\n k = i\n sch2 += 1\n j += 1\nprint(sch1 + sch2)\n",
"step-4": "s = input()\nst = '>>-->'\nst2 = '<--<<'\nsch1 = sch2 = 0\ni = 0\nj = 0\nk = -1\nwhile i != -1:\n i = s.find(st, j)\n if (k != i) and (i != -1):\n k = i\n sch1 += 1\n j += 1\nj = 0\ni = 0\nk = -1\nwhile i != -1:\n i = s.find(st2, j)\n if (k != i) and (i != -1):\n k = i\n sch2 += 1\n j += 1\nprint(sch1+sch2)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class LinkedList:
def __init__(self):
self.head = None
def insertAtHead(self, newNode, curNode):
newNode.next = curNode
if curNode is not None:
curNode.prev = newNode
self.head = newNode
<|reserved_special_token_0|>
def printForward(self, curNode):
while curNode is not None:
print(curNode.data)
curNode = curNode.next
def printReverse(self, curNode):
while curNode.next is not None:
curNode = curNode.next
while curNode is not None:
print(curNode.data)
curNode = curNode.prev
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Node:
<|reserved_special_token_0|>
class LinkedList:
def __init__(self):
self.head = None
def insertAtHead(self, newNode, curNode):
newNode.next = curNode
if curNode is not None:
curNode.prev = newNode
self.head = newNode
def insertAtTail(self, newNode, curNode):
if self.head is None:
self.head = newNode
return
while curNode.next is not None:
curNode = curNode.next
curNode.next = newNode
newNode.prev = curNode
def printForward(self, curNode):
while curNode is not None:
print(curNode.data)
curNode = curNode.next
def printReverse(self, curNode):
while curNode.next is not None:
curNode = curNode.next
while curNode is not None:
print(curNode.data)
curNode = curNode.prev
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Node:
def __init__(self, data):
self.data = data
self.prev = None
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def insertAtHead(self, newNode, curNode):
newNode.next = curNode
if curNode is not None:
curNode.prev = newNode
self.head = newNode
def insertAtTail(self, newNode, curNode):
if self.head is None:
self.head = newNode
return
while curNode.next is not None:
curNode = curNode.next
curNode.next = newNode
newNode.prev = curNode
def printForward(self, curNode):
while curNode is not None:
print(curNode.data)
curNode = curNode.next
def printReverse(self, curNode):
while curNode.next is not None:
curNode = curNode.next
while curNode is not None:
print(curNode.data)
curNode = curNode.prev
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Node:
def __init__(self, data):
self.data = data
self.prev = None
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def insertAtHead(self, newNode, curNode):
newNode.next = curNode
if curNode is not None:
curNode.prev = newNode
self.head = newNode
def insertAtTail(self, newNode, curNode):
if self.head is None:
self.head = newNode
return
while curNode.next is not None:
curNode = curNode.next
curNode.next = newNode
newNode.prev = curNode
def printForward(self, curNode):
while curNode is not None:
print(curNode.data)
curNode = curNode.next
def printReverse(self, curNode):
while curNode.next is not None:
curNode = curNode.next
while curNode is not None:
print(curNode.data)
curNode = curNode.prev
<|reserved_special_token_0|>
for i in range(3):
newNode = Node(input('Enter data: '))
linkedList.insertAtHead(newNode, linkedList.head)
linkedList.printForward(linkedList.head)
print('######################')
linkedList.printReverse(linkedList.head)
<|reserved_special_token_1|>
class Node:
def __init__(self, data):
self.data = data
self.prev = None
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def insertAtHead(self, newNode, curNode):
newNode.next = curNode
if curNode is not None: curNode.prev = newNode
self.head = newNode
def insertAtTail(self, newNode, curNode):
if self.head is None:
self.head = newNode
return
while curNode.next is not None:
curNode = curNode.next
curNode.next = newNode
newNode.prev = curNode
def printForward(self, curNode):
while curNode is not None:
print(curNode.data)
curNode = curNode.next
def printReverse(self, curNode):
while curNode.next is not None:
curNode = curNode.next
while curNode is not None:
print(curNode.data)
curNode = curNode.prev
################################################
linkedList = LinkedList()
for i in range(3):
newNode = Node(input("Enter data: "))
#linkedList.insertAtTail(newNode, linkedList.head)
linkedList.insertAtHead(newNode, linkedList.head)
linkedList.printForward(linkedList.head)
print("######################")
linkedList.printReverse(linkedList.head)
|
flexible
|
{
"blob_id": "a3cbdecbbfc49e8ac045f4aabbea6b9f54ed3d5f",
"index": 4904,
"step-1": "<mask token>\n\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n\n def insertAtHead(self, newNode, curNode):\n newNode.next = curNode\n if curNode is not None:\n curNode.prev = newNode\n self.head = newNode\n <mask token>\n\n def printForward(self, curNode):\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.next\n\n def printReverse(self, curNode):\n while curNode.next is not None:\n curNode = curNode.next\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.prev\n\n\n<mask token>\n",
"step-2": "class Node:\n <mask token>\n\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n\n def insertAtHead(self, newNode, curNode):\n newNode.next = curNode\n if curNode is not None:\n curNode.prev = newNode\n self.head = newNode\n\n def insertAtTail(self, newNode, curNode):\n if self.head is None:\n self.head = newNode\n return\n while curNode.next is not None:\n curNode = curNode.next\n curNode.next = newNode\n newNode.prev = curNode\n\n def printForward(self, curNode):\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.next\n\n def printReverse(self, curNode):\n while curNode.next is not None:\n curNode = curNode.next\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.prev\n\n\n<mask token>\n",
"step-3": "class Node:\n\n def __init__(self, data):\n self.data = data\n self.prev = None\n self.next = None\n\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n\n def insertAtHead(self, newNode, curNode):\n newNode.next = curNode\n if curNode is not None:\n curNode.prev = newNode\n self.head = newNode\n\n def insertAtTail(self, newNode, curNode):\n if self.head is None:\n self.head = newNode\n return\n while curNode.next is not None:\n curNode = curNode.next\n curNode.next = newNode\n newNode.prev = curNode\n\n def printForward(self, curNode):\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.next\n\n def printReverse(self, curNode):\n while curNode.next is not None:\n curNode = curNode.next\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.prev\n\n\n<mask token>\n",
"step-4": "class Node:\n\n def __init__(self, data):\n self.data = data\n self.prev = None\n self.next = None\n\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n\n def insertAtHead(self, newNode, curNode):\n newNode.next = curNode\n if curNode is not None:\n curNode.prev = newNode\n self.head = newNode\n\n def insertAtTail(self, newNode, curNode):\n if self.head is None:\n self.head = newNode\n return\n while curNode.next is not None:\n curNode = curNode.next\n curNode.next = newNode\n newNode.prev = curNode\n\n def printForward(self, curNode):\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.next\n\n def printReverse(self, curNode):\n while curNode.next is not None:\n curNode = curNode.next\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.prev\n\n\n<mask token>\nfor i in range(3):\n newNode = Node(input('Enter data: '))\n linkedList.insertAtHead(newNode, linkedList.head)\nlinkedList.printForward(linkedList.head)\nprint('######################')\nlinkedList.printReverse(linkedList.head)\n",
"step-5": "class Node:\n def __init__(self, data):\n self.data = data\n self.prev = None\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n\n def insertAtHead(self, newNode, curNode):\n newNode.next = curNode\n if curNode is not None: curNode.prev = newNode\n self.head = newNode\n\n\n def insertAtTail(self, newNode, curNode):\n if self.head is None:\n self.head = newNode\n return\n \n while curNode.next is not None:\n curNode = curNode.next\n \n curNode.next = newNode\n newNode.prev = curNode\n\n\n def printForward(self, curNode):\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.next\n\n\n def printReverse(self, curNode):\n while curNode.next is not None:\n curNode = curNode.next\n\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.prev\n\n\n################################################\n\n\nlinkedList = LinkedList()\n\nfor i in range(3):\n newNode = Node(input(\"Enter data: \"))\n #linkedList.insertAtTail(newNode, linkedList.head)\n linkedList.insertAtHead(newNode, linkedList.head)\n\nlinkedList.printForward(linkedList.head)\nprint(\"######################\")\nlinkedList.printReverse(linkedList.head)",
"step-ids": [
5,
7,
8,
9,
11
]
}
|
[
5,
7,
8,
9,
11
] |
import ujson as json
import platform
import socket
import os
from pathlib import Path
def test_json(text):
jobj = json.loads(text)
l = len(jobj['coordinates'])
x = 0
y = 0
z = 0
for coord in jobj['coordinates']:
x += coord['x']
y += coord['y']
z += coord['z']
print(x / l)
print(y / l)
print(z / l)
def notify(msg):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if not s.connect_ex(("localhost", 9001)):
s.sendall(bytes(msg, 'utf8'))
if __name__ == '__main__':
text = Path('/tmp/1.json').read_text()
notify("%s UltraJSON\t%d" % (platform.python_implementation(), os.getpid()))
test_json(text)
notify("stop")
|
normal
|
{
"blob_id": "6f99b4e4204e85c78f9c02a5cd53cd76f52c022c",
"index": 617,
"step-1": "<mask token>\n\n\ndef notify(msg):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n if not s.connect_ex(('localhost', 9001)):\n s.sendall(bytes(msg, 'utf8'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_json(text):\n jobj = json.loads(text)\n l = len(jobj['coordinates'])\n x = 0\n y = 0\n z = 0\n for coord in jobj['coordinates']:\n x += coord['x']\n y += coord['y']\n z += coord['z']\n print(x / l)\n print(y / l)\n print(z / l)\n\n\ndef notify(msg):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n if not s.connect_ex(('localhost', 9001)):\n s.sendall(bytes(msg, 'utf8'))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_json(text):\n jobj = json.loads(text)\n l = len(jobj['coordinates'])\n x = 0\n y = 0\n z = 0\n for coord in jobj['coordinates']:\n x += coord['x']\n y += coord['y']\n z += coord['z']\n print(x / l)\n print(y / l)\n print(z / l)\n\n\ndef notify(msg):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n if not s.connect_ex(('localhost', 9001)):\n s.sendall(bytes(msg, 'utf8'))\n\n\nif __name__ == '__main__':\n text = Path('/tmp/1.json').read_text()\n notify('%s UltraJSON\\t%d' % (platform.python_implementation(), os.getpid())\n )\n test_json(text)\n notify('stop')\n",
"step-4": "import ujson as json\nimport platform\nimport socket\nimport os\nfrom pathlib import Path\n\n\ndef test_json(text):\n jobj = json.loads(text)\n l = len(jobj['coordinates'])\n x = 0\n y = 0\n z = 0\n for coord in jobj['coordinates']:\n x += coord['x']\n y += coord['y']\n z += coord['z']\n print(x / l)\n print(y / l)\n print(z / l)\n\n\ndef notify(msg):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n if not s.connect_ex(('localhost', 9001)):\n s.sendall(bytes(msg, 'utf8'))\n\n\nif __name__ == '__main__':\n text = Path('/tmp/1.json').read_text()\n notify('%s UltraJSON\\t%d' % (platform.python_implementation(), os.getpid())\n )\n test_json(text)\n notify('stop')\n",
"step-5": "import ujson as json\nimport platform\nimport socket\nimport os\nfrom pathlib import Path\n\ndef test_json(text):\n jobj = json.loads(text)\n l = len(jobj['coordinates'])\n x = 0\n y = 0\n z = 0\n\n for coord in jobj['coordinates']:\n x += coord['x']\n y += coord['y']\n z += coord['z']\n\n print(x / l)\n print(y / l)\n print(z / l)\n\ndef notify(msg):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n if not s.connect_ex((\"localhost\", 9001)):\n s.sendall(bytes(msg, 'utf8'))\n\nif __name__ == '__main__':\n text = Path('/tmp/1.json').read_text()\n\n notify(\"%s UltraJSON\\t%d\" % (platform.python_implementation(), os.getpid()))\n\n test_json(text)\n\n notify(\"stop\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def del_flu_sym(x, t=1, Ka=1, Ktt=0.5):
intens = x ** 2
return intens
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def del_flu_sym(x, t=1, Ka=1, Ktt=0.5):
intens = x ** 2
return intens
<|reserved_special_token_0|>
print(dlfl_integral(2))
sym.pprint(dlfl_integral)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def del_flu_sym(x, t=1, Ka=1, Ktt=0.5):
intens = x ** 2
return intens
x = sym.Symbol('x')
t = sym.Symbol('t')
dlfl_integral = sym.integrate(del_flu_sym(x, t), x)
print(dlfl_integral(2))
sym.pprint(dlfl_integral)
<|reserved_special_token_1|>
import sympy as sym
def del_flu_sym(x, t=1, Ka=1, Ktt=0.5):
intens = x ** 2
return intens
x = sym.Symbol('x')
t = sym.Symbol('t')
dlfl_integral = sym.integrate(del_flu_sym(x, t), x)
print(dlfl_integral(2))
sym.pprint(dlfl_integral)
<|reserved_special_token_1|>
#Интегрирование точного решения кинетик затухания люминесценции символьным методом
#Из за сложности получаемых уравнений. Последующий подбор коэффициентов методом МНК
# и печать результата
#
import sympy as sym
def del_flu_sym(x ,t = 1 ,Ka = 1, Ktt = 0.5):
intens = x**2
return intens
x = sym.Symbol('x')
t = sym.Symbol('t')
dlfl_integral = sym.integrate(del_flu_sym(x, t), (x))
print(dlfl_integral(2))
sym.pprint(dlfl_integral)
|
flexible
|
{
"blob_id": "903a431ac39734338b4d464629b4b04a87dc9e8e",
"index": 1776,
"step-1": "<mask token>\n\n\ndef del_flu_sym(x, t=1, Ka=1, Ktt=0.5):\n intens = x ** 2\n return intens\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef del_flu_sym(x, t=1, Ka=1, Ktt=0.5):\n intens = x ** 2\n return intens\n\n\n<mask token>\nprint(dlfl_integral(2))\nsym.pprint(dlfl_integral)\n",
"step-3": "<mask token>\n\n\ndef del_flu_sym(x, t=1, Ka=1, Ktt=0.5):\n intens = x ** 2\n return intens\n\n\nx = sym.Symbol('x')\nt = sym.Symbol('t')\ndlfl_integral = sym.integrate(del_flu_sym(x, t), x)\nprint(dlfl_integral(2))\nsym.pprint(dlfl_integral)\n",
"step-4": "import sympy as sym\n\n\ndef del_flu_sym(x, t=1, Ka=1, Ktt=0.5):\n intens = x ** 2\n return intens\n\n\nx = sym.Symbol('x')\nt = sym.Symbol('t')\ndlfl_integral = sym.integrate(del_flu_sym(x, t), x)\nprint(dlfl_integral(2))\nsym.pprint(dlfl_integral)\n",
"step-5": "#Интегрирование точного решения кинетик затухания люминесценции символьным методом\n#Из за сложности получаемых уравнений. Последующий подбор коэффициентов методом МНК\n# и печать результата\n#\n\nimport sympy as sym\n\n\ndef del_flu_sym(x ,t = 1 ,Ka = 1, Ktt = 0.5):\n intens = x**2\n return intens\n\n\nx = sym.Symbol('x')\nt = sym.Symbol('t')\ndlfl_integral = sym.integrate(del_flu_sym(x, t), (x))\nprint(dlfl_integral(2))\nsym.pprint(dlfl_integral)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.parametrize('input, expected', [pytest.param(5, 12, marks=
pytest.mark.example), pytest.param(50, 20492570929, marks=pytest.mark.
regression)])
def test_challenge116(input, expected):
""" Regression testing challenge116 """
expect(main(input)).to.eq(expected)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pytest
from robber import expect
from pemjh.challenge116 import main
@pytest.mark.parametrize('input, expected', [pytest.param(5, 12, marks=
pytest.mark.example), pytest.param(50, 20492570929, marks=pytest.mark.
regression)])
def test_challenge116(input, expected):
""" Regression testing challenge116 """
expect(main(input)).to.eq(expected)
<|reserved_special_token_1|>
""" Tests for challenge116 """
import pytest
from robber import expect
from pemjh.challenge116 import main
@pytest.mark.parametrize('input, expected',
[
pytest.param(5, 12, marks=pytest.mark.example),
pytest.param(50, 20492570929,
marks=pytest.mark.regression)
])
def test_challenge116(input, expected):
""" Regression testing challenge116 """
expect(main(input)).to.eq(expected)
|
flexible
|
{
"blob_id": "c9279434736d4e94564170fe98163ad3be9470b1",
"index": 4844,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('input, expected', [pytest.param(5, 12, marks=\n pytest.mark.example), pytest.param(50, 20492570929, marks=pytest.mark.\n regression)])\ndef test_challenge116(input, expected):\n \"\"\" Regression testing challenge116 \"\"\"\n expect(main(input)).to.eq(expected)\n",
"step-3": "<mask token>\nimport pytest\nfrom robber import expect\nfrom pemjh.challenge116 import main\n\n\[email protected]('input, expected', [pytest.param(5, 12, marks=\n pytest.mark.example), pytest.param(50, 20492570929, marks=pytest.mark.\n regression)])\ndef test_challenge116(input, expected):\n \"\"\" Regression testing challenge116 \"\"\"\n expect(main(input)).to.eq(expected)\n",
"step-4": "\"\"\" Tests for challenge116 \"\"\"\r\nimport pytest\r\nfrom robber import expect\r\nfrom pemjh.challenge116 import main\r\n\r\n\r\[email protected]('input, expected',\r\n [\r\n pytest.param(5, 12, marks=pytest.mark.example),\r\n pytest.param(50, 20492570929,\r\n marks=pytest.mark.regression)\r\n ])\r\ndef test_challenge116(input, expected):\r\n \"\"\" Regression testing challenge116 \"\"\"\r\n expect(main(input)).to.eq(expected)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# http://www.dalkescientific.com/writings/diary/archive/2007/10/07/wide_finder.html
'''
Making a faster standard library approach
As I was writing an email to Fredrik describing these results,
I came up with another approach to speeding up the performance, using only the standard library.
Fredrik showed that using a two-level filter, with a quick exclusion test using string operations followed by the regular expression test,
was faster than doing only the regular expression test. Quoting him:
The RE engine does indeed use special code for literal prefixes,
but the superlinear substring search algorithm that was introduced in 2.5 is a lot faster in cases like this, so this simple change gives a noticable speedup.
This works because the only about 20% of the lines in the input file matches the quick test and the simple string test is
% python -m timeit -s 's="This is a test. I was here."*4; t="testXYZ"' 't in s'
10000000 loops, best of 3: 0.194 usec per loop
% python -m timeit -s 'import re;s="This is a test. I was here."*4; t=re.compile("testXYZ")' 't.search(s)'
1000000 loops, best of 3: 0.98 usec per loop
% python -c 'print 0.98/0.194'
5.05154639175
%
roughly 5 times faster than the regular expression test.
My observation was that I can defer the regular expression test until later.
Use the quick string test to find all substrings starting with "GET /ongoing/When/" and ending with the " ".
This will include some extra substrings. Tally all of the substrings, including the false positives.
This will do extra work but the tallying code is very fast.
Once the file has been parsed, post-process the counts dictionary and remove those keys which are not allowed by the regular expression.
This works because there are many duplicate keys. Nearly 50% of the entries which pass the quick string test are duplicates.
The keys in the counts dictionary are unique, which mean only one regular expression test needs to be done, instead of one for each match.
If most of the entries were under /ongoing/When/ and most were unique then these optimizations would be a net slowdown.
You have to understand your data as well as the software in order to figure out how to improve things, and there will be tradeoffs.
Remember also I mentioned that string operations are available for buffer objects?
This means I can do the fast find directly on the memory-mapped file, rather than using a chunk reader.
I'll do the quick search for the leading part of the pattern to search for, then another search for the trailing " " (space) character.
'''
# dalke-wf-10.py fast string ops, mmap, post-process filter
import re, os, mmap
from collections import defaultdict
FILE = "o1000k.ap"
import time, sys
if sys.platform == "win32":
timer = time.clock
else:
timer = time.time
t0, t1 = timer(), time.clock()
pat = re.compile(r"GET /ongoing/When/\d\d\dx/(\d\d\d\d/\d\d/\d\d/[^ .]+) ")
search = pat.search
def count_file(filename):
count = defaultdict(int)
fileobj = open(FILE)
filemap = mmap.mmap(fileobj.fileno(), os.path.getsize(FILE), access=mmap.ACCESS_READ)
i = j = 0
# For the first pass, including everything which is a reasonable match.
# It's faster to count everything and filter later than it is to do
# the filtering now.
while 1:
i = filemap.find("GET /ongoing/When/", j)
if i == -1:
break
j = filemap.find(' ', i+19)
field = filemap[i:j]
count[field] += 1
# The previous code included fields which aren't allowed by the
# regular expression. Filter those which don't match the regexp.
new_count = {}
for k, v in count.iteritems():
# because of the way the key was saved, I didn't keep the
# trailing space. Add it back here so the regexp can be used unchanged.
k = k + " "
m = pat.search(k)
if m:
new_count[m.group(1)] = v
return new_count
count = count_file(FILE)
for key in sorted(count, key=count.get)[:10]:
pass # print "%40s = %s" % (key, count[key])
print timer() - t0, time.clock() - t1
# sanity check
for key in sorted(count, key=count.get)[-10:]:
print "%40s = %s" % (key, count[key])
'''
Variable lookups in module scope are slower than lookups in local scope so I introduced the count_file function to get a bit more speed.
I didn't generate numbers for this one but experience says it's nearly always a performance advantage.
The resulting dalke-wf-10 code finishes in 1.0s. Yes, you read that right. It's faster than the mmap/findall solution of dalke-wf-7.py, which took 1.3s.
Still not as fast as mxTextTools at 0.7s, but this solution uses only the standard library.
'''
|
normal
|
{
"blob_id": "734fd4c492f2fd31a0459e90e5c4a7468120b4cd",
"index": 2369,
"step-1": "# http://www.dalkescientific.com/writings/diary/archive/2007/10/07/wide_finder.html\n'''\nMaking a faster standard library approach\n\nAs I was writing an email to Fredrik describing these results,\nI came up with another approach to speeding up the performance, using only the standard library.\n\nFredrik showed that using a two-level filter, with a quick exclusion test using string operations followed by the regular expression test,\nwas faster than doing only the regular expression test. Quoting him:\n\nThe RE engine does indeed use special code for literal prefixes,\nbut the superlinear substring search algorithm that was introduced in 2.5 is a lot faster in cases like this, so this simple change gives a noticable speedup.\nThis works because the only about 20% of the lines in the input file matches the quick test and the simple string test is\n\n% python -m timeit -s 's=\"This is a test. I was here.\"*4; t=\"testXYZ\"' 't in s'\n10000000 loops, best of 3: 0.194 usec per loop\n% python -m timeit -s 'import re;s=\"This is a test. I was here.\"*4; t=re.compile(\"testXYZ\")' 't.search(s)'\n1000000 loops, best of 3: 0.98 usec per loop\n% python -c 'print 0.98/0.194'\n5.05154639175\n%\n\nroughly 5 times faster than the regular expression test.\nMy observation was that I can defer the regular expression test until later.\nUse the quick string test to find all substrings starting with \"GET /ongoing/When/\" and ending with the \" \".\nThis will include some extra substrings. Tally all of the substrings, including the false positives.\nThis will do extra work but the tallying code is very fast.\nOnce the file has been parsed, post-process the counts dictionary and remove those keys which are not allowed by the regular expression.\n\nThis works because there are many duplicate keys. Nearly 50% of the entries which pass the quick string test are duplicates.\nThe keys in the counts dictionary are unique, which mean only one regular expression test needs to be done, instead of one for each match.\n\nIf most of the entries were under /ongoing/When/ and most were unique then these optimizations would be a net slowdown.\nYou have to understand your data as well as the software in order to figure out how to improve things, and there will be tradeoffs.\n\nRemember also I mentioned that string operations are available for buffer objects?\nThis means I can do the fast find directly on the memory-mapped file, rather than using a chunk reader.\nI'll do the quick search for the leading part of the pattern to search for, then another search for the trailing \" \" (space) character.\n'''\n\n# dalke-wf-10.py fast string ops, mmap, post-process filter\nimport re, os, mmap\nfrom collections import defaultdict\n\nFILE = \"o1000k.ap\"\n\nimport time, sys\nif sys.platform == \"win32\":\n timer = time.clock\nelse:\n timer = time.time\n\nt0, t1 = timer(), time.clock()\n\npat = re.compile(r\"GET /ongoing/When/\\d\\d\\dx/(\\d\\d\\d\\d/\\d\\d/\\d\\d/[^ .]+) \")\nsearch = pat.search\n\n\ndef count_file(filename):\n count = defaultdict(int)\n fileobj = open(FILE)\n filemap = mmap.mmap(fileobj.fileno(), os.path.getsize(FILE), access=mmap.ACCESS_READ)\n i = j = 0\n # For the first pass, including everything which is a reasonable match.\n # It's faster to count everything and filter later than it is to do\n # the filtering now.\n while 1:\n i = filemap.find(\"GET /ongoing/When/\", j)\n if i == -1:\n break\n j = filemap.find(' ', i+19)\n field = filemap[i:j]\n count[field] += 1\n\n # The previous code included fields which aren't allowed by the\n # regular expression. Filter those which don't match the regexp.\n new_count = {}\n for k, v in count.iteritems():\n # because of the way the key was saved, I didn't keep the\n # trailing space. Add it back here so the regexp can be used unchanged.\n k = k + \" \"\n m = pat.search(k)\n if m:\n new_count[m.group(1)] = v\n return new_count\n\n\ncount = count_file(FILE)\n\nfor key in sorted(count, key=count.get)[:10]:\n pass # print \"%40s = %s\" % (key, count[key])\n\nprint timer() - t0, time.clock() - t1\n\n# sanity check\nfor key in sorted(count, key=count.get)[-10:]:\n print \"%40s = %s\" % (key, count[key])\n\n'''\nVariable lookups in module scope are slower than lookups in local scope so I introduced the count_file function to get a bit more speed.\nI didn't generate numbers for this one but experience says it's nearly always a performance advantage.\n\nThe resulting dalke-wf-10 code finishes in 1.0s. Yes, you read that right. It's faster than the mmap/findall solution of dalke-wf-7.py, which took 1.3s.\nStill not as fast as mxTextTools at 0.7s, but this solution uses only the standard library.\n'''",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import json
from gamestate.gamestate_module import Gamestate
from time import time
from gamestate import action_getter as action_getter
def test_action_getter():
path = "./../Version_1.0/Tests/General/Action_1.json"
document = json.loads(open(path).read())
gamestate = Gamestate.from_document(document["gamestate"])
nloops = 100
total_time = 0
for _ in range(nloops):
t = time()
action_getter.get_actions(gamestate)
total_time += time() - t
print("Time used to find all actions", str(nloops), "times:", str(round(total_time, 3)))
|
normal
|
{
"blob_id": "b16691429d83f6909a08b10cc0b310bb62cd550d",
"index": 3985,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_action_getter():\n path = './../Version_1.0/Tests/General/Action_1.json'\n document = json.loads(open(path).read())\n gamestate = Gamestate.from_document(document['gamestate'])\n nloops = 100\n total_time = 0\n for _ in range(nloops):\n t = time()\n action_getter.get_actions(gamestate)\n total_time += time() - t\n print('Time used to find all actions', str(nloops), 'times:', str(round\n (total_time, 3)))\n",
"step-3": "import json\nfrom gamestate.gamestate_module import Gamestate\nfrom time import time\nfrom gamestate import action_getter as action_getter\n\n\ndef test_action_getter():\n path = './../Version_1.0/Tests/General/Action_1.json'\n document = json.loads(open(path).read())\n gamestate = Gamestate.from_document(document['gamestate'])\n nloops = 100\n total_time = 0\n for _ in range(nloops):\n t = time()\n action_getter.get_actions(gamestate)\n total_time += time() - t\n print('Time used to find all actions', str(nloops), 'times:', str(round\n (total_time, 3)))\n",
"step-4": "import json\nfrom gamestate.gamestate_module import Gamestate\nfrom time import time\nfrom gamestate import action_getter as action_getter\n\n\ndef test_action_getter():\n path = \"./../Version_1.0/Tests/General/Action_1.json\"\n document = json.loads(open(path).read())\n gamestate = Gamestate.from_document(document[\"gamestate\"])\n\n nloops = 100\n total_time = 0\n for _ in range(nloops):\n t = time()\n action_getter.get_actions(gamestate)\n total_time += time() - t\n\n print(\"Time used to find all actions\", str(nloops), \"times:\", str(round(total_time, 3)))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 3.0.5 on 2020-05-18 12:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cart', '0010_auto_20200518_1718'),
]
operations = [
migrations.AlterField(
model_name='order',
name='fianl_code',
field=models.PositiveIntegerField(blank=True, null=True),
),
]
|
normal
|
{
"blob_id": "da783355c5f888a66f623fa7eeeaf0e4e9fcfa48",
"index": 4982,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('cart', '0010_auto_20200518_1718')]\n operations = [migrations.AlterField(model_name='order', name=\n 'fianl_code', field=models.PositiveIntegerField(blank=True, null=True))\n ]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('cart', '0010_auto_20200518_1718')]\n operations = [migrations.AlterField(model_name='order', name=\n 'fianl_code', field=models.PositiveIntegerField(blank=True, null=True))\n ]\n",
"step-5": "# Generated by Django 3.0.5 on 2020-05-18 12:50\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cart', '0010_auto_20200518_1718'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='order',\n name='fianl_code',\n field=models.PositiveIntegerField(blank=True, null=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getlessonlist():
path = os.path.expanduser('~/.buzzers')
dirs = os.walk(os.path.expanduser('~/.buzzers/lessons'))
lessons = []
for root, d, fs in dirs:
fullfs = [(root + '/' + f) for f in fs]
lessons.extend(fs)
return lessons
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def savelesson(text):
os.path.expanduser('~/.buzzers/lessons')
def getlessonlist():
path = os.path.expanduser('~/.buzzers')
dirs = os.walk(os.path.expanduser('~/.buzzers/lessons'))
lessons = []
for root, d, fs in dirs:
fullfs = [(root + '/' + f) for f in fs]
lessons.extend(fs)
return lessons
<|reserved_special_token_1|>
import os
def savelesson(text):
os.path.expanduser('~/.buzzers/lessons')
def getlessonlist():
path = os.path.expanduser('~/.buzzers')
dirs = os.walk(os.path.expanduser('~/.buzzers/lessons'))
lessons = []
for root, d, fs in dirs:
fullfs = [(root + '/' + f) for f in fs]
lessons.extend(fs)
return lessons
<|reserved_special_token_1|>
import os
def savelesson(text):
os.path.expanduser("~/.buzzers/lessons")
def getlessonlist():
path = os.path.expanduser("~/.buzzers")
dirs = os.walk(os.path.expanduser("~/.buzzers/lessons"))
#"/home/loadquo/files/lhsgghc/Programs/PCSoftware/src/admin/lessons")
lessons = []
for root, d, fs in dirs:
fullfs = [root +"/"+ f for f in fs]
lessons.extend(fs)
return lessons
|
flexible
|
{
"blob_id": "de003440be513d53b87f526ea95c0fbbc4a9f66f",
"index": 2584,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getlessonlist():\n path = os.path.expanduser('~/.buzzers')\n dirs = os.walk(os.path.expanduser('~/.buzzers/lessons'))\n lessons = []\n for root, d, fs in dirs:\n fullfs = [(root + '/' + f) for f in fs]\n lessons.extend(fs)\n return lessons\n",
"step-3": "<mask token>\n\n\ndef savelesson(text):\n os.path.expanduser('~/.buzzers/lessons')\n\n\ndef getlessonlist():\n path = os.path.expanduser('~/.buzzers')\n dirs = os.walk(os.path.expanduser('~/.buzzers/lessons'))\n lessons = []\n for root, d, fs in dirs:\n fullfs = [(root + '/' + f) for f in fs]\n lessons.extend(fs)\n return lessons\n",
"step-4": "import os\n\n\ndef savelesson(text):\n os.path.expanduser('~/.buzzers/lessons')\n\n\ndef getlessonlist():\n path = os.path.expanduser('~/.buzzers')\n dirs = os.walk(os.path.expanduser('~/.buzzers/lessons'))\n lessons = []\n for root, d, fs in dirs:\n fullfs = [(root + '/' + f) for f in fs]\n lessons.extend(fs)\n return lessons\n",
"step-5": "import os\n\ndef savelesson(text):\n os.path.expanduser(\"~/.buzzers/lessons\")\n\ndef getlessonlist():\n path = os.path.expanduser(\"~/.buzzers\")\n dirs = os.walk(os.path.expanduser(\"~/.buzzers/lessons\"))\n#\"/home/loadquo/files/lhsgghc/Programs/PCSoftware/src/admin/lessons\")\n lessons = []\n for root, d, fs in dirs: \n fullfs = [root +\"/\"+ f for f in fs]\n lessons.extend(fs)\n return lessons\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# encoding: utf-8
"""
@description: 有序字典
(notice: python3.6 以后字典已经有序了)
@author: baoqiang
@time: 2019/11/28 1:34 下午
"""
from collections import OrderedDict
def run206_01():
print('Regular dict:')
# d = {'a':'A','b':'B','c':'C'}
d = {}
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
for k, v in d.items():
print(k, v)
print('OrderedDict:')
d = OrderedDict()
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
for k, v in d.items():
print(k, v)
def run206_02():
"""
相等性判断,需要考虑顺序
:return:
"""
print('Regular dict:')
d1 = {'a': 'A', 'b': 'B', 'c': 'C'}
d2 = {'c': 'C', 'b': 'B', 'a': 'A'}
print(d1 == d2)
for k, v in d1.items():
print(k, v)
for k, v in d2.items():
print(k, v)
print('OrderedDict:')
d1 = OrderedDict(d1)
d2 = OrderedDict(d2)
print(d1 == d2)
for k, v in d1.items():
print(k, v)
for k, v in d2.items():
print(k, v)
def run206_03():
"""
re ordering
:return:
"""
d = OrderedDict([('a', 'A'), ('b', 'B'), ('c', 'C')])
print('Before:')
for k, v in d.items():
print(k, v)
d.move_to_end('b')
print('\nmove_to_end():')
for k, v in d.items():
print(k, v)
d.move_to_end('b', last=False)
print('\nmove_to_end(last=False):')
for k, v in d.items():
print(k, v)
|
normal
|
{
"blob_id": "4a7d8db2bc3b753ea1a12120e1ad85f31d572dc7",
"index": 4237,
"step-1": "<mask token>\n\n\ndef run206_01():\n print('Regular dict:')\n d = {}\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n print('OrderedDict:')\n d = OrderedDict()\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef run206_01():\n print('Regular dict:')\n d = {}\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n print('OrderedDict:')\n d = OrderedDict()\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n\n\ndef run206_02():\n \"\"\"\n 相等性判断,需要考虑顺序\n :return:\n \"\"\"\n print('Regular dict:')\n d1 = {'a': 'A', 'b': 'B', 'c': 'C'}\n d2 = {'c': 'C', 'b': 'B', 'a': 'A'}\n print(d1 == d2)\n for k, v in d1.items():\n print(k, v)\n for k, v in d2.items():\n print(k, v)\n print('OrderedDict:')\n d1 = OrderedDict(d1)\n d2 = OrderedDict(d2)\n print(d1 == d2)\n for k, v in d1.items():\n print(k, v)\n for k, v in d2.items():\n print(k, v)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run206_01():\n print('Regular dict:')\n d = {}\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n print('OrderedDict:')\n d = OrderedDict()\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n\n\ndef run206_02():\n \"\"\"\n 相等性判断,需要考虑顺序\n :return:\n \"\"\"\n print('Regular dict:')\n d1 = {'a': 'A', 'b': 'B', 'c': 'C'}\n d2 = {'c': 'C', 'b': 'B', 'a': 'A'}\n print(d1 == d2)\n for k, v in d1.items():\n print(k, v)\n for k, v in d2.items():\n print(k, v)\n print('OrderedDict:')\n d1 = OrderedDict(d1)\n d2 = OrderedDict(d2)\n print(d1 == d2)\n for k, v in d1.items():\n print(k, v)\n for k, v in d2.items():\n print(k, v)\n\n\ndef run206_03():\n \"\"\"\n re ordering\n :return:\n \"\"\"\n d = OrderedDict([('a', 'A'), ('b', 'B'), ('c', 'C')])\n print('Before:')\n for k, v in d.items():\n print(k, v)\n d.move_to_end('b')\n print('\\nmove_to_end():')\n for k, v in d.items():\n print(k, v)\n d.move_to_end('b', last=False)\n print('\\nmove_to_end(last=False):')\n for k, v in d.items():\n print(k, v)\n",
"step-4": "<mask token>\nfrom collections import OrderedDict\n\n\ndef run206_01():\n print('Regular dict:')\n d = {}\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n print('OrderedDict:')\n d = OrderedDict()\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n\n\ndef run206_02():\n \"\"\"\n 相等性判断,需要考虑顺序\n :return:\n \"\"\"\n print('Regular dict:')\n d1 = {'a': 'A', 'b': 'B', 'c': 'C'}\n d2 = {'c': 'C', 'b': 'B', 'a': 'A'}\n print(d1 == d2)\n for k, v in d1.items():\n print(k, v)\n for k, v in d2.items():\n print(k, v)\n print('OrderedDict:')\n d1 = OrderedDict(d1)\n d2 = OrderedDict(d2)\n print(d1 == d2)\n for k, v in d1.items():\n print(k, v)\n for k, v in d2.items():\n print(k, v)\n\n\ndef run206_03():\n \"\"\"\n re ordering\n :return:\n \"\"\"\n d = OrderedDict([('a', 'A'), ('b', 'B'), ('c', 'C')])\n print('Before:')\n for k, v in d.items():\n print(k, v)\n d.move_to_end('b')\n print('\\nmove_to_end():')\n for k, v in d.items():\n print(k, v)\n d.move_to_end('b', last=False)\n print('\\nmove_to_end(last=False):')\n for k, v in d.items():\n print(k, v)\n",
"step-5": "#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@description: 有序字典\n(notice: python3.6 以后字典已经有序了)\n\n@author: baoqiang\n@time: 2019/11/28 1:34 下午\n\"\"\"\n\nfrom collections import OrderedDict\n\n\ndef run206_01():\n print('Regular dict:')\n # d = {'a':'A','b':'B','c':'C'}\n d = {}\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n\n print('OrderedDict:')\n d = OrderedDict()\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n\n\ndef run206_02():\n \"\"\"\n 相等性判断,需要考虑顺序\n :return:\n \"\"\"\n print('Regular dict:')\n d1 = {'a': 'A', 'b': 'B', 'c': 'C'}\n d2 = {'c': 'C', 'b': 'B', 'a': 'A'}\n print(d1 == d2)\n\n for k, v in d1.items():\n print(k, v)\n for k, v in d2.items():\n print(k, v)\n\n print('OrderedDict:')\n d1 = OrderedDict(d1)\n d2 = OrderedDict(d2)\n print(d1 == d2)\n\n for k, v in d1.items():\n print(k, v)\n for k, v in d2.items():\n print(k, v)\n\n\ndef run206_03():\n \"\"\"\n re ordering\n :return:\n \"\"\"\n d = OrderedDict([('a', 'A'), ('b', 'B'), ('c', 'C')])\n\n print('Before:')\n for k, v in d.items():\n print(k, v)\n\n d.move_to_end('b')\n print('\\nmove_to_end():')\n for k, v in d.items():\n print(k, v)\n\n d.move_to_end('b', last=False)\n print('\\nmove_to_end(last=False):')\n for k, v in d.items():\n print(k, v)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class nodo:
def __init__(self, x, y, n, c):
self.columna = x
self.fila = y
self.nombre = n
self.color = c
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class nodo:
def __init__(self, x, y, n, c):
self.columna = x
self.fila = y
self.nombre = n
self.color = c
<|reserved_special_token_0|>
def leer_archivo_matriz(path):
with open(path, 'r', encoding='utf-8') as f:
lineas = f.readlines()
num_fila = 0
estado = ''
for i in lineas:
if re.search(pattern_matriz, i):
separado = re.findall('\\(.*,.*,.*,.*,.*\\)', i)
separados = separado[0].replace('(', '')
separados = separados.replace(')', '')
separados = re.split(',', separados)
separados[0] = separados[0].replace(' ', '')
separados[1] = separados[1].replace(' ', '')
separados[2] = separados[2].replace("'", '')
separados[2] = separados[2].replace(' ', '')
separados[3] = separados[3].replace(' ', '')
separados[4] = separados[4].replace(' ', '')
propiedades['fila'] = separados[0]
propiedades['columna'] = separados[1]
propiedades['nombre_matriz'] = separados[2]
propiedades['forma_nodo'] = separados[3]
propiedades['matriz_doble'] = separados[4]
elif re.search(pattern_fila, i):
separado2 = re.findall('\\).*', i)
separados2 = separado2[0].replace(')', ' ')
separados2 = separados2.replace(';', '')
separados2 = separados2.replace(' ', '')
separado = re.findall('\\(.*\\)', i)
separados = separado[0].replace('(', '')
separados = separados.replace(')', '')
separados = separados.replace(';', '')
separados = separados.replace(' ', '')
separados = re.split(',', separados)
num = 0
for nom in separados:
nom = nom.replace("'", '')
nom = nom.replace(' ', '')
nodos.append(nodo(num, num_fila, nom, separados2))
num = num + 1
num_fila = num_fila + 1
elif re.search(pattern_nodo, i):
separado = re.findall('\\(.*,.*,.*\\).*;', i)
separados = separado[0].replace('(', '')
separados = separados.replace(')', ',')
separados = separados.replace(';', '')
separados = re.split(',', separados)
separados[0] = separados[0].replace(' ', '')
separados[1] = separados[1].replace(' ', '')
separados[2] = separados[2].replace("'", '')
separados[2] = separados[2].replace(' ', '')
separados[3] = separados[3].replace(' ', '')
nodos.append(nodo(int(separados[0]) - 1, int(separados[1]) -
1, separados[2], separados[3]))
elif re.search(pattern_defecto, i):
separado = re.findall('\\(.*\\).*', i)
separados = separado[0].replace('(', '')
separados = separados.replace(')', ',')
separados = separados.replace(';', '')
separados = re.split(',', separados)
separados[0] = separados[0].replace("'", '')
separados[0] = separados[0].replace(' ', '')
separados[1] = separados[1].replace(' ', '')
for nod in nodos:
if nod.nombre == '#':
nod.nombre = separados[0]
nombre_def = separados[0]
if nod.color == '#':
nod.color = separados[1]
color_def = separados[1]
mat = []
for i in range(0, int(propiedades['columna'])):
mat.append([])
for j in range(0, int(propiedades['fila'])):
mat[i].append(nodo(str(j), str(i), nombre_def, color_def))
for i in range(0, int(propiedades['columna'])):
for j in range(0, int(propiedades['fila'])):
for k in nodos:
if mat[i][j].fila == str(int(k.fila)) and mat[i][j
].columna == str(int(k.columna)):
mat[i][j] = k
matriz = propiedades, mat
graficar_matriz(matriz)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class nodo:
def __init__(self, x, y, n, c):
self.columna = x
self.fila = y
self.nombre = n
self.color = c
pattern_matriz = '[M|m][A|a][T|t][R|r][I|i][Z|z]\\s*\\(.*,.*,.*,.*,.*\\)\\{'
pattern_fila = '[F|f][I|i][L|l][A|a]\\s*\\(.*\\)\\s*.*;'
pattern_nodo = '[N|n][O|o][D|d][O|o]\\s*\\(.*,.*,.*\\).*;'
pattern_defecto = '\\}\\s*[D|d][E|e][F|f][E|e][C|c][T|t][O|o]\\s*\\(.*\\).*'
propiedades = {'fila': '', 'columna': '', 'nombre_matriz': '', 'forma_nodo':
'', 'matriz_doble': ''}
nodos = []
nombre_def = ''
color_def = ''
def leer_archivo_matriz(path):
with open(path, 'r', encoding='utf-8') as f:
lineas = f.readlines()
num_fila = 0
estado = ''
for i in lineas:
if re.search(pattern_matriz, i):
separado = re.findall('\\(.*,.*,.*,.*,.*\\)', i)
separados = separado[0].replace('(', '')
separados = separados.replace(')', '')
separados = re.split(',', separados)
separados[0] = separados[0].replace(' ', '')
separados[1] = separados[1].replace(' ', '')
separados[2] = separados[2].replace("'", '')
separados[2] = separados[2].replace(' ', '')
separados[3] = separados[3].replace(' ', '')
separados[4] = separados[4].replace(' ', '')
propiedades['fila'] = separados[0]
propiedades['columna'] = separados[1]
propiedades['nombre_matriz'] = separados[2]
propiedades['forma_nodo'] = separados[3]
propiedades['matriz_doble'] = separados[4]
elif re.search(pattern_fila, i):
separado2 = re.findall('\\).*', i)
separados2 = separado2[0].replace(')', ' ')
separados2 = separados2.replace(';', '')
separados2 = separados2.replace(' ', '')
separado = re.findall('\\(.*\\)', i)
separados = separado[0].replace('(', '')
separados = separados.replace(')', '')
separados = separados.replace(';', '')
separados = separados.replace(' ', '')
separados = re.split(',', separados)
num = 0
for nom in separados:
nom = nom.replace("'", '')
nom = nom.replace(' ', '')
nodos.append(nodo(num, num_fila, nom, separados2))
num = num + 1
num_fila = num_fila + 1
elif re.search(pattern_nodo, i):
separado = re.findall('\\(.*,.*,.*\\).*;', i)
separados = separado[0].replace('(', '')
separados = separados.replace(')', ',')
separados = separados.replace(';', '')
separados = re.split(',', separados)
separados[0] = separados[0].replace(' ', '')
separados[1] = separados[1].replace(' ', '')
separados[2] = separados[2].replace("'", '')
separados[2] = separados[2].replace(' ', '')
separados[3] = separados[3].replace(' ', '')
nodos.append(nodo(int(separados[0]) - 1, int(separados[1]) -
1, separados[2], separados[3]))
elif re.search(pattern_defecto, i):
separado = re.findall('\\(.*\\).*', i)
separados = separado[0].replace('(', '')
separados = separados.replace(')', ',')
separados = separados.replace(';', '')
separados = re.split(',', separados)
separados[0] = separados[0].replace("'", '')
separados[0] = separados[0].replace(' ', '')
separados[1] = separados[1].replace(' ', '')
for nod in nodos:
if nod.nombre == '#':
nod.nombre = separados[0]
nombre_def = separados[0]
if nod.color == '#':
nod.color = separados[1]
color_def = separados[1]
mat = []
for i in range(0, int(propiedades['columna'])):
mat.append([])
for j in range(0, int(propiedades['fila'])):
mat[i].append(nodo(str(j), str(i), nombre_def, color_def))
for i in range(0, int(propiedades['columna'])):
for j in range(0, int(propiedades['fila'])):
for k in nodos:
if mat[i][j].fila == str(int(k.fila)) and mat[i][j
].columna == str(int(k.columna)):
mat[i][j] = k
matriz = propiedades, mat
graficar_matriz(matriz)
<|reserved_special_token_1|>
import re
from mapa import graficar_lista, graficar_matriz
class nodo:
def __init__(self, x, y, n, c):
self.columna = x
self.fila = y
self.nombre = n
self.color = c
pattern_matriz = '[M|m][A|a][T|t][R|r][I|i][Z|z]\\s*\\(.*,.*,.*,.*,.*\\)\\{'
pattern_fila = '[F|f][I|i][L|l][A|a]\\s*\\(.*\\)\\s*.*;'
pattern_nodo = '[N|n][O|o][D|d][O|o]\\s*\\(.*,.*,.*\\).*;'
pattern_defecto = '\\}\\s*[D|d][E|e][F|f][E|e][C|c][T|t][O|o]\\s*\\(.*\\).*'
propiedades = {'fila': '', 'columna': '', 'nombre_matriz': '', 'forma_nodo':
'', 'matriz_doble': ''}
nodos = []
nombre_def = ''
color_def = ''
def leer_archivo_matriz(path):
with open(path, 'r', encoding='utf-8') as f:
lineas = f.readlines()
num_fila = 0
estado = ''
for i in lineas:
if re.search(pattern_matriz, i):
separado = re.findall('\\(.*,.*,.*,.*,.*\\)', i)
separados = separado[0].replace('(', '')
separados = separados.replace(')', '')
separados = re.split(',', separados)
separados[0] = separados[0].replace(' ', '')
separados[1] = separados[1].replace(' ', '')
separados[2] = separados[2].replace("'", '')
separados[2] = separados[2].replace(' ', '')
separados[3] = separados[3].replace(' ', '')
separados[4] = separados[4].replace(' ', '')
propiedades['fila'] = separados[0]
propiedades['columna'] = separados[1]
propiedades['nombre_matriz'] = separados[2]
propiedades['forma_nodo'] = separados[3]
propiedades['matriz_doble'] = separados[4]
elif re.search(pattern_fila, i):
separado2 = re.findall('\\).*', i)
separados2 = separado2[0].replace(')', ' ')
separados2 = separados2.replace(';', '')
separados2 = separados2.replace(' ', '')
separado = re.findall('\\(.*\\)', i)
separados = separado[0].replace('(', '')
separados = separados.replace(')', '')
separados = separados.replace(';', '')
separados = separados.replace(' ', '')
separados = re.split(',', separados)
num = 0
for nom in separados:
nom = nom.replace("'", '')
nom = nom.replace(' ', '')
nodos.append(nodo(num, num_fila, nom, separados2))
num = num + 1
num_fila = num_fila + 1
elif re.search(pattern_nodo, i):
separado = re.findall('\\(.*,.*,.*\\).*;', i)
separados = separado[0].replace('(', '')
separados = separados.replace(')', ',')
separados = separados.replace(';', '')
separados = re.split(',', separados)
separados[0] = separados[0].replace(' ', '')
separados[1] = separados[1].replace(' ', '')
separados[2] = separados[2].replace("'", '')
separados[2] = separados[2].replace(' ', '')
separados[3] = separados[3].replace(' ', '')
nodos.append(nodo(int(separados[0]) - 1, int(separados[1]) -
1, separados[2], separados[3]))
elif re.search(pattern_defecto, i):
separado = re.findall('\\(.*\\).*', i)
separados = separado[0].replace('(', '')
separados = separados.replace(')', ',')
separados = separados.replace(';', '')
separados = re.split(',', separados)
separados[0] = separados[0].replace("'", '')
separados[0] = separados[0].replace(' ', '')
separados[1] = separados[1].replace(' ', '')
for nod in nodos:
if nod.nombre == '#':
nod.nombre = separados[0]
nombre_def = separados[0]
if nod.color == '#':
nod.color = separados[1]
color_def = separados[1]
mat = []
for i in range(0, int(propiedades['columna'])):
mat.append([])
for j in range(0, int(propiedades['fila'])):
mat[i].append(nodo(str(j), str(i), nombre_def, color_def))
for i in range(0, int(propiedades['columna'])):
for j in range(0, int(propiedades['fila'])):
for k in nodos:
if mat[i][j].fila == str(int(k.fila)) and mat[i][j
].columna == str(int(k.columna)):
mat[i][j] = k
matriz = propiedades, mat
graficar_matriz(matriz)
<|reserved_special_token_1|>
import re
from mapa import graficar_lista, graficar_matriz
class nodo:
def __init__(self, x, y, n, c):
self.columna = x
self.fila = y
self.nombre = n
self.color = c
pattern_matriz = r"[M|m][A|a][T|t][R|r][I|i][Z|z]\s*\(.*,.*,.*,.*,.*\)\{"
pattern_fila = r"[F|f][I|i][L|l][A|a]\s*\(.*\)\s*.*;"
pattern_nodo = r"[N|n][O|o][D|d][O|o]\s*\(.*,.*,.*\).*;"
pattern_defecto = r"\}\s*[D|d][E|e][F|f][E|e][C|c][T|t][O|o]\s*\(.*\).*"
propiedades = {
'fila' : '',
'columna' : '',
'nombre_matriz' : '',
'forma_nodo' : '',
'matriz_doble': '',
}
nodos = []
nombre_def = ""
color_def = ""
def leer_archivo_matriz(path):
with open(path, 'r', encoding='utf-8') as f:
lineas = f.readlines()
num_fila = 0
estado = ""
for i in lineas:
if re.search(pattern_matriz, i):
separado = re.findall(r"\(.*,.*,.*,.*,.*\)",i)
separados = separado[0].replace("(","")
separados = separados.replace(")","")
separados = re.split(r",",separados)
separados[0] = separados[0].replace(" ","")
separados[1] = separados[1].replace(" ","")
separados[2] = separados[2].replace("'","")
separados[2] = separados[2].replace(" ","")
separados[3] = separados[3].replace(" ","")
separados[4] = separados[4].replace(" ","")
#Asignar Variables al diccionario
propiedades['fila'] = separados[0]
propiedades['columna'] = separados[1]
propiedades['nombre_matriz'] = separados[2]
propiedades['forma_nodo'] = separados[3]
propiedades['matriz_doble'] = separados[4]
elif re.search(pattern_fila, i):
separado2 = re.findall(r"\).*",i)
separados2 = separado2[0].replace(")"," ")
separados2 = separados2.replace(";","")
separados2 = separados2.replace(" ","")
separado = re.findall(r"\(.*\)",i)
separados = separado[0].replace("(","")
separados = separados.replace(")","")
separados = separados.replace(";","")
separados = separados.replace(" ","")
separados = re.split(r",",separados)
num = 0
for nom in separados:
nom = nom.replace("'", "")
nom = nom.replace(" ", "")
nodos.append(nodo(num, num_fila, nom, separados2))
num = num+1
num_fila = num_fila + 1
elif re.search(pattern_nodo, i):
separado = re.findall(r"\(.*,.*,.*\).*;",i)
separados = separado[0].replace("(","")
separados = separados.replace(")",",")
separados = separados.replace(";","")
separados = re.split(r",",separados)
separados[0] = separados[0].replace(" ","")
separados[1] = separados[1].replace(" ","")
separados[2] = separados[2].replace("'","")
separados[2] = separados[2].replace(" ","")
separados[3] = separados[3].replace(" ","")
nodos.append(nodo(int(separados[0])-1, int(separados[1])-1, separados[2], separados[3]))
elif re.search(pattern_defecto, i):
separado = re.findall(r"\(.*\).*",i)
separados = separado[0].replace("(","")
separados = separados.replace(")",",")
separados = separados.replace(";","")
separados = re.split(r",",separados)
separados[0] = separados[0].replace("'","")
separados[0] = separados[0].replace(" ","")
separados[1] = separados[1].replace(" ","")
for nod in nodos:
if nod.nombre == "#":
nod.nombre = separados[0]
nombre_def = separados[0]
if nod.color == "#":
nod.color = separados[1]
color_def = separados[1]
mat = []
for i in range(0,int(propiedades["columna"])):
mat.append([])
for j in range(0, int(propiedades["fila"])):
mat[i].append(nodo(str(j),str(i),nombre_def, color_def))
for i in range(0,int(propiedades["columna"])):
for j in range(0, int(propiedades["fila"])):
for k in nodos:
if mat[i][j].fila == str(int(k.fila)) and mat[i][j].columna == str(int(k.columna)):
mat[i][j] = k
# for i in range(0,int(propiedades["columna"])):
# for j in range(0, int(propiedades["fila"])):
# print(mat[i][j].fila, mat[i][j].columna,mat[i][j].nombre, mat[i][j].color)
# print(mat)
matriz = (propiedades, mat)
# for i in nodos:
# print(i.nombre, i.color, i.columna, i.fila)
graficar_matriz(matriz)
# leer_archivo_matriz("Matriz.lfp")
|
flexible
|
{
"blob_id": "70373c74e459efb2a310d94ae906910423e8bfd4",
"index": 6631,
"step-1": "<mask token>\n\n\nclass nodo:\n\n def __init__(self, x, y, n, c):\n self.columna = x\n self.fila = y\n self.nombre = n\n self.color = c\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass nodo:\n\n def __init__(self, x, y, n, c):\n self.columna = x\n self.fila = y\n self.nombre = n\n self.color = c\n\n\n<mask token>\n\n\ndef leer_archivo_matriz(path):\n with open(path, 'r', encoding='utf-8') as f:\n lineas = f.readlines()\n num_fila = 0\n estado = ''\n for i in lineas:\n if re.search(pattern_matriz, i):\n separado = re.findall('\\\\(.*,.*,.*,.*,.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n separados[4] = separados[4].replace(' ', '')\n propiedades['fila'] = separados[0]\n propiedades['columna'] = separados[1]\n propiedades['nombre_matriz'] = separados[2]\n propiedades['forma_nodo'] = separados[3]\n propiedades['matriz_doble'] = separados[4]\n elif re.search(pattern_fila, i):\n separado2 = re.findall('\\\\).*', i)\n separados2 = separado2[0].replace(')', ' ')\n separados2 = separados2.replace(';', '')\n separados2 = separados2.replace(' ', '')\n separado = re.findall('\\\\(.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = separados.replace(';', '')\n separados = separados.replace(' ', '')\n separados = re.split(',', separados)\n num = 0\n for nom in separados:\n nom = nom.replace(\"'\", '')\n nom = nom.replace(' ', '')\n nodos.append(nodo(num, num_fila, nom, separados2))\n num = num + 1\n num_fila = num_fila + 1\n elif re.search(pattern_nodo, i):\n separado = re.findall('\\\\(.*,.*,.*\\\\).*;', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n nodos.append(nodo(int(separados[0]) - 1, int(separados[1]) -\n 1, separados[2], separados[3]))\n elif re.search(pattern_defecto, i):\n separado = re.findall('\\\\(.*\\\\).*', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(\"'\", '')\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n for nod in nodos:\n if nod.nombre == '#':\n nod.nombre = separados[0]\n nombre_def = separados[0]\n if nod.color == '#':\n nod.color = separados[1]\n color_def = separados[1]\n mat = []\n for i in range(0, int(propiedades['columna'])):\n mat.append([])\n for j in range(0, int(propiedades['fila'])):\n mat[i].append(nodo(str(j), str(i), nombre_def, color_def))\n for i in range(0, int(propiedades['columna'])):\n for j in range(0, int(propiedades['fila'])):\n for k in nodos:\n if mat[i][j].fila == str(int(k.fila)) and mat[i][j\n ].columna == str(int(k.columna)):\n mat[i][j] = k\n matriz = propiedades, mat\n graficar_matriz(matriz)\n",
"step-3": "<mask token>\n\n\nclass nodo:\n\n def __init__(self, x, y, n, c):\n self.columna = x\n self.fila = y\n self.nombre = n\n self.color = c\n\n\npattern_matriz = '[M|m][A|a][T|t][R|r][I|i][Z|z]\\\\s*\\\\(.*,.*,.*,.*,.*\\\\)\\\\{'\npattern_fila = '[F|f][I|i][L|l][A|a]\\\\s*\\\\(.*\\\\)\\\\s*.*;'\npattern_nodo = '[N|n][O|o][D|d][O|o]\\\\s*\\\\(.*,.*,.*\\\\).*;'\npattern_defecto = '\\\\}\\\\s*[D|d][E|e][F|f][E|e][C|c][T|t][O|o]\\\\s*\\\\(.*\\\\).*'\npropiedades = {'fila': '', 'columna': '', 'nombre_matriz': '', 'forma_nodo':\n '', 'matriz_doble': ''}\nnodos = []\nnombre_def = ''\ncolor_def = ''\n\n\ndef leer_archivo_matriz(path):\n with open(path, 'r', encoding='utf-8') as f:\n lineas = f.readlines()\n num_fila = 0\n estado = ''\n for i in lineas:\n if re.search(pattern_matriz, i):\n separado = re.findall('\\\\(.*,.*,.*,.*,.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n separados[4] = separados[4].replace(' ', '')\n propiedades['fila'] = separados[0]\n propiedades['columna'] = separados[1]\n propiedades['nombre_matriz'] = separados[2]\n propiedades['forma_nodo'] = separados[3]\n propiedades['matriz_doble'] = separados[4]\n elif re.search(pattern_fila, i):\n separado2 = re.findall('\\\\).*', i)\n separados2 = separado2[0].replace(')', ' ')\n separados2 = separados2.replace(';', '')\n separados2 = separados2.replace(' ', '')\n separado = re.findall('\\\\(.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = separados.replace(';', '')\n separados = separados.replace(' ', '')\n separados = re.split(',', separados)\n num = 0\n for nom in separados:\n nom = nom.replace(\"'\", '')\n nom = nom.replace(' ', '')\n nodos.append(nodo(num, num_fila, nom, separados2))\n num = num + 1\n num_fila = num_fila + 1\n elif re.search(pattern_nodo, i):\n separado = re.findall('\\\\(.*,.*,.*\\\\).*;', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n nodos.append(nodo(int(separados[0]) - 1, int(separados[1]) -\n 1, separados[2], separados[3]))\n elif re.search(pattern_defecto, i):\n separado = re.findall('\\\\(.*\\\\).*', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(\"'\", '')\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n for nod in nodos:\n if nod.nombre == '#':\n nod.nombre = separados[0]\n nombre_def = separados[0]\n if nod.color == '#':\n nod.color = separados[1]\n color_def = separados[1]\n mat = []\n for i in range(0, int(propiedades['columna'])):\n mat.append([])\n for j in range(0, int(propiedades['fila'])):\n mat[i].append(nodo(str(j), str(i), nombre_def, color_def))\n for i in range(0, int(propiedades['columna'])):\n for j in range(0, int(propiedades['fila'])):\n for k in nodos:\n if mat[i][j].fila == str(int(k.fila)) and mat[i][j\n ].columna == str(int(k.columna)):\n mat[i][j] = k\n matriz = propiedades, mat\n graficar_matriz(matriz)\n",
"step-4": "import re\nfrom mapa import graficar_lista, graficar_matriz\n\n\nclass nodo:\n\n def __init__(self, x, y, n, c):\n self.columna = x\n self.fila = y\n self.nombre = n\n self.color = c\n\n\npattern_matriz = '[M|m][A|a][T|t][R|r][I|i][Z|z]\\\\s*\\\\(.*,.*,.*,.*,.*\\\\)\\\\{'\npattern_fila = '[F|f][I|i][L|l][A|a]\\\\s*\\\\(.*\\\\)\\\\s*.*;'\npattern_nodo = '[N|n][O|o][D|d][O|o]\\\\s*\\\\(.*,.*,.*\\\\).*;'\npattern_defecto = '\\\\}\\\\s*[D|d][E|e][F|f][E|e][C|c][T|t][O|o]\\\\s*\\\\(.*\\\\).*'\npropiedades = {'fila': '', 'columna': '', 'nombre_matriz': '', 'forma_nodo':\n '', 'matriz_doble': ''}\nnodos = []\nnombre_def = ''\ncolor_def = ''\n\n\ndef leer_archivo_matriz(path):\n with open(path, 'r', encoding='utf-8') as f:\n lineas = f.readlines()\n num_fila = 0\n estado = ''\n for i in lineas:\n if re.search(pattern_matriz, i):\n separado = re.findall('\\\\(.*,.*,.*,.*,.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n separados[4] = separados[4].replace(' ', '')\n propiedades['fila'] = separados[0]\n propiedades['columna'] = separados[1]\n propiedades['nombre_matriz'] = separados[2]\n propiedades['forma_nodo'] = separados[3]\n propiedades['matriz_doble'] = separados[4]\n elif re.search(pattern_fila, i):\n separado2 = re.findall('\\\\).*', i)\n separados2 = separado2[0].replace(')', ' ')\n separados2 = separados2.replace(';', '')\n separados2 = separados2.replace(' ', '')\n separado = re.findall('\\\\(.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = separados.replace(';', '')\n separados = separados.replace(' ', '')\n separados = re.split(',', separados)\n num = 0\n for nom in separados:\n nom = nom.replace(\"'\", '')\n nom = nom.replace(' ', '')\n nodos.append(nodo(num, num_fila, nom, separados2))\n num = num + 1\n num_fila = num_fila + 1\n elif re.search(pattern_nodo, i):\n separado = re.findall('\\\\(.*,.*,.*\\\\).*;', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n nodos.append(nodo(int(separados[0]) - 1, int(separados[1]) -\n 1, separados[2], separados[3]))\n elif re.search(pattern_defecto, i):\n separado = re.findall('\\\\(.*\\\\).*', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(\"'\", '')\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n for nod in nodos:\n if nod.nombre == '#':\n nod.nombre = separados[0]\n nombre_def = separados[0]\n if nod.color == '#':\n nod.color = separados[1]\n color_def = separados[1]\n mat = []\n for i in range(0, int(propiedades['columna'])):\n mat.append([])\n for j in range(0, int(propiedades['fila'])):\n mat[i].append(nodo(str(j), str(i), nombre_def, color_def))\n for i in range(0, int(propiedades['columna'])):\n for j in range(0, int(propiedades['fila'])):\n for k in nodos:\n if mat[i][j].fila == str(int(k.fila)) and mat[i][j\n ].columna == str(int(k.columna)):\n mat[i][j] = k\n matriz = propiedades, mat\n graficar_matriz(matriz)\n",
"step-5": "import re\nfrom mapa import graficar_lista, graficar_matriz\n\nclass nodo:\n def __init__(self, x, y, n, c):\n self.columna = x\n self.fila = y\n self.nombre = n\n self.color = c\n\n\npattern_matriz = r\"[M|m][A|a][T|t][R|r][I|i][Z|z]\\s*\\(.*,.*,.*,.*,.*\\)\\{\"\npattern_fila = r\"[F|f][I|i][L|l][A|a]\\s*\\(.*\\)\\s*.*;\"\npattern_nodo = r\"[N|n][O|o][D|d][O|o]\\s*\\(.*,.*,.*\\).*;\"\npattern_defecto = r\"\\}\\s*[D|d][E|e][F|f][E|e][C|c][T|t][O|o]\\s*\\(.*\\).*\"\n\npropiedades = {\n 'fila' : '',\n 'columna' : '',\n 'nombre_matriz' : '',\n 'forma_nodo' : '',\n 'matriz_doble': '',\n}\n\nnodos = []\nnombre_def = \"\"\ncolor_def = \"\"\ndef leer_archivo_matriz(path): \n with open(path, 'r', encoding='utf-8') as f:\n lineas = f.readlines()\n num_fila = 0\n estado = \"\"\n for i in lineas:\n if re.search(pattern_matriz, i):\n separado = re.findall(r\"\\(.*,.*,.*,.*,.*\\)\",i)\n separados = separado[0].replace(\"(\",\"\")\n separados = separados.replace(\")\",\"\")\n separados = re.split(r\",\",separados)\n separados[0] = separados[0].replace(\" \",\"\")\n separados[1] = separados[1].replace(\" \",\"\")\n separados[2] = separados[2].replace(\"'\",\"\")\n separados[2] = separados[2].replace(\" \",\"\")\n separados[3] = separados[3].replace(\" \",\"\")\n separados[4] = separados[4].replace(\" \",\"\")\n\n #Asignar Variables al diccionario\n propiedades['fila'] = separados[0]\n propiedades['columna'] = separados[1]\n propiedades['nombre_matriz'] = separados[2]\n propiedades['forma_nodo'] = separados[3]\n propiedades['matriz_doble'] = separados[4]\n\n elif re.search(pattern_fila, i):\n separado2 = re.findall(r\"\\).*\",i)\n separados2 = separado2[0].replace(\")\",\" \")\n separados2 = separados2.replace(\";\",\"\")\n separados2 = separados2.replace(\" \",\"\")\n\n separado = re.findall(r\"\\(.*\\)\",i)\n separados = separado[0].replace(\"(\",\"\")\n separados = separados.replace(\")\",\"\")\n separados = separados.replace(\";\",\"\")\n separados = separados.replace(\" \",\"\")\n\n separados = re.split(r\",\",separados)\n num = 0\n for nom in separados:\n nom = nom.replace(\"'\", \"\")\n nom = nom.replace(\" \", \"\")\n nodos.append(nodo(num, num_fila, nom, separados2))\n num = num+1 \n\n num_fila = num_fila + 1\n\n elif re.search(pattern_nodo, i):\n separado = re.findall(r\"\\(.*,.*,.*\\).*;\",i)\n separados = separado[0].replace(\"(\",\"\")\n separados = separados.replace(\")\",\",\")\n separados = separados.replace(\";\",\"\")\n\n separados = re.split(r\",\",separados)\n separados[0] = separados[0].replace(\" \",\"\")\n separados[1] = separados[1].replace(\" \",\"\")\n separados[2] = separados[2].replace(\"'\",\"\")\n separados[2] = separados[2].replace(\" \",\"\")\n separados[3] = separados[3].replace(\" \",\"\")\n\n nodos.append(nodo(int(separados[0])-1, int(separados[1])-1, separados[2], separados[3]))\n \n elif re.search(pattern_defecto, i):\n separado = re.findall(r\"\\(.*\\).*\",i)\n separados = separado[0].replace(\"(\",\"\")\n separados = separados.replace(\")\",\",\")\n separados = separados.replace(\";\",\"\")\n\n separados = re.split(r\",\",separados)\n separados[0] = separados[0].replace(\"'\",\"\")\n separados[0] = separados[0].replace(\" \",\"\")\n separados[1] = separados[1].replace(\" \",\"\")\n\n for nod in nodos:\n if nod.nombre == \"#\":\n nod.nombre = separados[0]\n nombre_def = separados[0]\n if nod.color == \"#\":\n nod.color = separados[1]\n color_def = separados[1]\n \n mat = []\n for i in range(0,int(propiedades[\"columna\"])):\n mat.append([])\n for j in range(0, int(propiedades[\"fila\"])):\n mat[i].append(nodo(str(j),str(i),nombre_def, color_def))\n \n for i in range(0,int(propiedades[\"columna\"])):\n for j in range(0, int(propiedades[\"fila\"])):\n for k in nodos:\n if mat[i][j].fila == str(int(k.fila)) and mat[i][j].columna == str(int(k.columna)):\n mat[i][j] = k\n \n # for i in range(0,int(propiedades[\"columna\"])):\n # for j in range(0, int(propiedades[\"fila\"])):\n # print(mat[i][j].fila, mat[i][j].columna,mat[i][j].nombre, mat[i][j].color)\n \n # print(mat)\n\n \n matriz = (propiedades, mat)\n\n # for i in nodos:\n # print(i.nombre, i.color, i.columna, i.fila)\n\n graficar_matriz(matriz)\n \n# leer_archivo_matriz(\"Matriz.lfp\")",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -- !/python3.10
# Mikhail (myke) Kolodin, 2021
# 2021-10-21 2021-10-21 1.2
# retext.py
# Заменить во входном тексте указанное слово на случайный вариант
# из предложенного набора заменителей.
# Параметры - в командной строке.
import re, random, sys
fin = 'retext-in.txt'
fot = 'retext-out.txt'
t1 = """
here we go again and we know:
here we do the same
"""
def redo(text: str, aword: str, subs: list) -> str:
""" заменятель """
return re.sub(f'(\W){aword}(\W)', r"\1"+random.choice(subs)+r"\2", " "+text+" ").strip()
def test1():
""" тестировщик """
w = "we"
s = ["they", "he", "she"]
print(w, "->", s, "\n", t1, "\n", redo(t1, w, s))
#test1()
def main():
""" запуск """
print("got params:", sys.argv)
argc = len(sys.argv)
if argc < 3:
print("Not enough parameters")
return
w, *subs = sys.argv[1:]
print(w, subs)
with open(fin) as fi:
text = fi.read()
out = redo(text, w, subs)
print("text:", text)
print("out:", out)
with open(fot, 'w') as fo:
fo.write(out)
main()
|
normal
|
{
"blob_id": "d1a179acfda9e76a11f362671fafb50773e2b9d3",
"index": 9405,
"step-1": "<mask token>\n\n\ndef redo(text: str, aword: str, subs: list) ->str:\n \"\"\" заменятель \"\"\"\n return re.sub(f'(\\\\W){aword}(\\\\W)', '\\\\1' + random.choice(subs) + '\\\\2',\n ' ' + text + ' ').strip()\n\n\ndef test1():\n \"\"\" тестировщик \"\"\"\n w = 'we'\n s = ['they', 'he', 'she']\n print(w, '->', s, '\\n', t1, '\\n', redo(t1, w, s))\n\n\ndef main():\n \"\"\" запуск \"\"\"\n print('got params:', sys.argv)\n argc = len(sys.argv)\n if argc < 3:\n print('Not enough parameters')\n return\n w, *subs = sys.argv[1:]\n print(w, subs)\n with open(fin) as fi:\n text = fi.read()\n out = redo(text, w, subs)\n print('text:', text)\n print('out:', out)\n with open(fot, 'w') as fo:\n fo.write(out)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef redo(text: str, aword: str, subs: list) ->str:\n \"\"\" заменятель \"\"\"\n return re.sub(f'(\\\\W){aword}(\\\\W)', '\\\\1' + random.choice(subs) + '\\\\2',\n ' ' + text + ' ').strip()\n\n\ndef test1():\n \"\"\" тестировщик \"\"\"\n w = 'we'\n s = ['they', 'he', 'she']\n print(w, '->', s, '\\n', t1, '\\n', redo(t1, w, s))\n\n\ndef main():\n \"\"\" запуск \"\"\"\n print('got params:', sys.argv)\n argc = len(sys.argv)\n if argc < 3:\n print('Not enough parameters')\n return\n w, *subs = sys.argv[1:]\n print(w, subs)\n with open(fin) as fi:\n text = fi.read()\n out = redo(text, w, subs)\n print('text:', text)\n print('out:', out)\n with open(fot, 'w') as fo:\n fo.write(out)\n\n\nmain()\n",
"step-3": "<mask token>\nfin = 'retext-in.txt'\nfot = 'retext-out.txt'\nt1 = \"\"\"\nhere we go again and we know:\nhere we do the same\n\"\"\"\n\n\ndef redo(text: str, aword: str, subs: list) ->str:\n \"\"\" заменятель \"\"\"\n return re.sub(f'(\\\\W){aword}(\\\\W)', '\\\\1' + random.choice(subs) + '\\\\2',\n ' ' + text + ' ').strip()\n\n\ndef test1():\n \"\"\" тестировщик \"\"\"\n w = 'we'\n s = ['they', 'he', 'she']\n print(w, '->', s, '\\n', t1, '\\n', redo(t1, w, s))\n\n\ndef main():\n \"\"\" запуск \"\"\"\n print('got params:', sys.argv)\n argc = len(sys.argv)\n if argc < 3:\n print('Not enough parameters')\n return\n w, *subs = sys.argv[1:]\n print(w, subs)\n with open(fin) as fi:\n text = fi.read()\n out = redo(text, w, subs)\n print('text:', text)\n print('out:', out)\n with open(fot, 'w') as fo:\n fo.write(out)\n\n\nmain()\n",
"step-4": "import re, random, sys\nfin = 'retext-in.txt'\nfot = 'retext-out.txt'\nt1 = \"\"\"\nhere we go again and we know:\nhere we do the same\n\"\"\"\n\n\ndef redo(text: str, aword: str, subs: list) ->str:\n \"\"\" заменятель \"\"\"\n return re.sub(f'(\\\\W){aword}(\\\\W)', '\\\\1' + random.choice(subs) + '\\\\2',\n ' ' + text + ' ').strip()\n\n\ndef test1():\n \"\"\" тестировщик \"\"\"\n w = 'we'\n s = ['they', 'he', 'she']\n print(w, '->', s, '\\n', t1, '\\n', redo(t1, w, s))\n\n\ndef main():\n \"\"\" запуск \"\"\"\n print('got params:', sys.argv)\n argc = len(sys.argv)\n if argc < 3:\n print('Not enough parameters')\n return\n w, *subs = sys.argv[1:]\n print(w, subs)\n with open(fin) as fi:\n text = fi.read()\n out = redo(text, w, subs)\n print('text:', text)\n print('out:', out)\n with open(fot, 'w') as fo:\n fo.write(out)\n\n\nmain()\n",
"step-5": "# -- !/python3.10\n\n# Mikhail (myke) Kolodin, 2021\n# 2021-10-21 2021-10-21 1.2\n# retext.py\n# Заменить во входном тексте указанное слово на случайный вариант\n# из предложенного набора заменителей.\n# Параметры - в командной строке.\n\nimport re, random, sys\n\nfin = 'retext-in.txt'\nfot = 'retext-out.txt'\n\nt1 = \"\"\"\nhere we go again and we know:\nhere we do the same\n\"\"\"\n\ndef redo(text: str, aword: str, subs: list) -> str:\n \"\"\" заменятель \"\"\"\n return re.sub(f'(\\W){aword}(\\W)', r\"\\1\"+random.choice(subs)+r\"\\2\", \" \"+text+\" \").strip()\n\ndef test1():\n \"\"\" тестировщик \"\"\"\n w = \"we\"\n s = [\"they\", \"he\", \"she\"]\n print(w, \"->\", s, \"\\n\", t1, \"\\n\", redo(t1, w, s))\n\n#test1()\n\ndef main():\n \"\"\" запуск \"\"\"\n print(\"got params:\", sys.argv)\n argc = len(sys.argv)\n if argc < 3:\n print(\"Not enough parameters\")\n return\n w, *subs = sys.argv[1:]\n print(w, subs)\n with open(fin) as fi:\n text = fi.read()\n out = redo(text, w, subs)\n print(\"text:\", text)\n print(\"out:\", out)\n with open(fot, 'w') as fo:\n fo.write(out)\n\nmain()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def test_insert_and_get_db(data):
db.insert(data)
result = db.get_db()
return result == data
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_insert_and_get_db(data):
db.insert(data)
result = db.get_db()
return result == data
if __name__ == '__main__':
print(
f' Test insert dict in to db, and get dict from db is {test_insert_and_get_db(data)}'
)
print(f'List books = {db.list_book()}')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
data = {'python book': ['10.09.2019', 200, 50, False]}
def test_insert_and_get_db(data):
db.insert(data)
result = db.get_db()
return result == data
if __name__ == '__main__':
print(
f' Test insert dict in to db, and get dict from db is {test_insert_and_get_db(data)}'
)
print(f'List books = {db.list_book()}')
<|reserved_special_token_1|>
import db
data = {'python book': ['10.09.2019', 200, 50, False]}
def test_insert_and_get_db(data):
db.insert(data)
result = db.get_db()
return result == data
if __name__ == '__main__':
print(
f' Test insert dict in to db, and get dict from db is {test_insert_and_get_db(data)}'
)
print(f'List books = {db.list_book()}')
|
flexible
|
{
"blob_id": "d5cb875dc31ca3dd7b165206415c346a076dd6e4",
"index": 2901,
"step-1": "<mask token>\n\n\ndef test_insert_and_get_db(data):\n db.insert(data)\n result = db.get_db()\n return result == data\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_insert_and_get_db(data):\n db.insert(data)\n result = db.get_db()\n return result == data\n\n\nif __name__ == '__main__':\n print(\n f' Test insert dict in to db, and get dict from db is {test_insert_and_get_db(data)}'\n )\n print(f'List books = {db.list_book()}')\n",
"step-3": "<mask token>\ndata = {'python book': ['10.09.2019', 200, 50, False]}\n\n\ndef test_insert_and_get_db(data):\n db.insert(data)\n result = db.get_db()\n return result == data\n\n\nif __name__ == '__main__':\n print(\n f' Test insert dict in to db, and get dict from db is {test_insert_and_get_db(data)}'\n )\n print(f'List books = {db.list_book()}')\n",
"step-4": "import db\ndata = {'python book': ['10.09.2019', 200, 50, False]}\n\n\ndef test_insert_and_get_db(data):\n db.insert(data)\n result = db.get_db()\n return result == data\n\n\nif __name__ == '__main__':\n print(\n f' Test insert dict in to db, and get dict from db is {test_insert_and_get_db(data)}'\n )\n print(f'List books = {db.list_book()}')\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
import random
import glob
import json
import time
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler
from SimpleDataLoader import CustomDataset, get_params_from_filename
import numpy as np
from DNN_model import Net
import torch.optim as optim
import torch.nn as nn
import torch
from tqdm import tqdm
from MMS_compute import xpress_solver
import copy
path_to_data = 'Dataset'
def split_to_train_validation(path_to_data):
dataset = CustomDataset(path_to_data)
print(len(dataset))
batch_size = 300
validation_split = 0.2
shuffle_dataset = True
random_seed= 56
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
if shuffle_dataset :
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
print(len(train_indices), len(val_indices))
# Creating PT data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
train_loader = DataLoader(dataset, batch_size=batch_size,
sampler=train_sampler)
validation_loader = DataLoader(dataset, batch_size=batch_size,
sampler=valid_sampler)
print(len(train_loader), len(validation_loader))
return train_loader, validation_loader
train_loader, validation_loader = split_to_train_validation(path_to_data)
net = Net()
loss_func = nn.MSELoss()
# loss_func = nn.L1Loss()
optimizer = optim.Adam(net.parameters(), lr=1e-4)
def compute_loss(dataloader, net):
loss = 0
if torch.cuda.is_available():
net.cuda()
net.eval()
n_batches = 0
with torch.no_grad():
for x, y in dataloader:
n_batches += 1
if torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
pred = net(x)
loss += loss_func(pred, y).item()
loss = loss / n_batches
return loss
n_epochs = 50
pbar = tqdm(range(n_epochs))
validation_loss_vs_epoch = []
if torch.cuda.is_available():
net.cuda()
for epoch in pbar:
if len(validation_loss_vs_epoch) > 1:
print('epoch', epoch, ' val loss:' + '{0:.5f}'.format(validation_loss_vs_epoch[-1]))
net.train() # put the net into "training mode"
for x, y in train_loader:
y = y.to(torch.float32)
if torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
optimizer.zero_grad()
pred = net(x)
loss = loss_func(pred, y)
loss.backward()
optimizer.step()
net.eval() # put the net into evaluation mode
valid_loss = compute_loss(validation_loader, net)
validation_loss_vs_epoch.append(valid_loss)
# n = 5
# m = 50
# max_val = 100
# values = [random.randrange(0, max_val + 1) for _ in range(m)]
# values.sort(reverse=True)
# values += [0]*50
# mms = xpress_solver(values,n)[0]
# sum_vals = sum(values)
# new_values = [val/sum_vals for val in values]
# pred = net(torch.FloatTensor([float(n)]+new_values))
# pred_num = float(pred.data[0])
# print(pred, mms, pred*sum_vals)
# print(pred_num*sum_vals)
def zero_pad(values, max_m):
m = len(values)
values += [0] * (max_m - m)
def solve_with_solver(values_copy, n):
return xpress_solver(values_copy, n)
def solve_with_net(values_copy, n):
start = time.time()
sum_vals = sum(values_copy)
new_values = [val / sum_vals for val in values_copy]
pred = net(torch.FloatTensor([float(n)] + new_values))
pred_num = float(pred.data[0])
final_result = pred_num*sum_vals
end = time.time()
return final_result, end-start
def test_net(path):
max_m = 100
filelist = glob.glob(path + '/*.json')
print(len(filelist))
test_result = dict()
filelist_len = len(filelist)
for count, filename in enumerate(filelist):
n, m, max_val = get_params_from_filename(filename)
data_list_in_file = []
with open(filename) as jsonFile:
data_list_in_file = json.load(jsonFile)
idx = random.randint(0, len(data_list_in_file)-1)
example=data_list_in_file[idx]
values = example[0]["values"]
values_copy = copy.deepcopy(values)
values_copy.sort(reverse=True)
solver_result, solver_time = solve_with_solver(values_copy, n)
zero_pad(values_copy, max_m)
net_result, net_time = solve_with_net(values_copy, n)
test_result[str((n, m, max_val))] = {
'values_idx': idx,
'solver_result': solver_result,
'solver_time':solver_time,
'net_result':net_result,
'net_time':net_time
}
if count % 20 == 0:
print(count, 'out of', filelist_len)
test_result_path = './TestResults/test_results.json'
with open(test_result_path, 'w+') as json_file:
json.dump(test_result, json_file, indent=4)
test_net(path_to_data)
|
normal
|
{
"blob_id": "1f63f9234596787e4859b740d3a7fbfaacc9c0c8",
"index": 9930,
"step-1": "<mask token>\n\n\ndef zero_pad(values, max_m):\n m = len(values)\n values += [0] * (max_m - m)\n\n\ndef solve_with_solver(values_copy, n):\n return xpress_solver(values_copy, n)\n\n\ndef solve_with_net(values_copy, n):\n start = time.time()\n sum_vals = sum(values_copy)\n new_values = [(val / sum_vals) for val in values_copy]\n pred = net(torch.FloatTensor([float(n)] + new_values))\n pred_num = float(pred.data[0])\n final_result = pred_num * sum_vals\n end = time.time()\n return final_result, end - start\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef split_to_train_validation(path_to_data):\n dataset = CustomDataset(path_to_data)\n print(len(dataset))\n batch_size = 300\n validation_split = 0.2\n shuffle_dataset = True\n random_seed = 56\n dataset_size = len(dataset)\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split * dataset_size))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices, val_indices = indices[split:], indices[:split]\n print(len(train_indices), len(val_indices))\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(val_indices)\n train_loader = DataLoader(dataset, batch_size=batch_size, sampler=\n train_sampler)\n validation_loader = DataLoader(dataset, batch_size=batch_size, sampler=\n valid_sampler)\n print(len(train_loader), len(validation_loader))\n return train_loader, validation_loader\n\n\n<mask token>\n\n\ndef compute_loss(dataloader, net):\n loss = 0\n if torch.cuda.is_available():\n net.cuda()\n net.eval()\n n_batches = 0\n with torch.no_grad():\n for x, y in dataloader:\n n_batches += 1\n if torch.cuda.is_available():\n x = x.cuda()\n y = y.cuda()\n pred = net(x)\n loss += loss_func(pred, y).item()\n loss = loss / n_batches\n return loss\n\n\n<mask token>\n\n\ndef zero_pad(values, max_m):\n m = len(values)\n values += [0] * (max_m - m)\n\n\ndef solve_with_solver(values_copy, n):\n return xpress_solver(values_copy, n)\n\n\ndef solve_with_net(values_copy, n):\n start = time.time()\n sum_vals = sum(values_copy)\n new_values = [(val / sum_vals) for val in values_copy]\n pred = net(torch.FloatTensor([float(n)] + new_values))\n pred_num = float(pred.data[0])\n final_result = pred_num * sum_vals\n end = time.time()\n return final_result, end - start\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef split_to_train_validation(path_to_data):\n dataset = CustomDataset(path_to_data)\n print(len(dataset))\n batch_size = 300\n validation_split = 0.2\n shuffle_dataset = True\n random_seed = 56\n dataset_size = len(dataset)\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split * dataset_size))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices, val_indices = indices[split:], indices[:split]\n print(len(train_indices), len(val_indices))\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(val_indices)\n train_loader = DataLoader(dataset, batch_size=batch_size, sampler=\n train_sampler)\n validation_loader = DataLoader(dataset, batch_size=batch_size, sampler=\n valid_sampler)\n print(len(train_loader), len(validation_loader))\n return train_loader, validation_loader\n\n\n<mask token>\n\n\ndef compute_loss(dataloader, net):\n loss = 0\n if torch.cuda.is_available():\n net.cuda()\n net.eval()\n n_batches = 0\n with torch.no_grad():\n for x, y in dataloader:\n n_batches += 1\n if torch.cuda.is_available():\n x = x.cuda()\n y = y.cuda()\n pred = net(x)\n loss += loss_func(pred, y).item()\n loss = loss / n_batches\n return loss\n\n\n<mask token>\nif torch.cuda.is_available():\n net.cuda()\nfor epoch in pbar:\n if len(validation_loss_vs_epoch) > 1:\n print('epoch', epoch, ' val loss:' + '{0:.5f}'.format(\n validation_loss_vs_epoch[-1]))\n net.train()\n for x, y in train_loader:\n y = y.to(torch.float32)\n if torch.cuda.is_available():\n x = x.cuda()\n y = y.cuda()\n optimizer.zero_grad()\n pred = net(x)\n loss = loss_func(pred, y)\n loss.backward()\n optimizer.step()\n net.eval()\n valid_loss = compute_loss(validation_loader, net)\n validation_loss_vs_epoch.append(valid_loss)\n\n\ndef zero_pad(values, max_m):\n m = len(values)\n values += [0] * (max_m - m)\n\n\ndef solve_with_solver(values_copy, n):\n return xpress_solver(values_copy, n)\n\n\ndef solve_with_net(values_copy, n):\n start = time.time()\n sum_vals = sum(values_copy)\n new_values = [(val / sum_vals) for val in values_copy]\n pred = net(torch.FloatTensor([float(n)] + new_values))\n pred_num = float(pred.data[0])\n final_result = pred_num * sum_vals\n end = time.time()\n return final_result, end - start\n\n\ndef test_net(path):\n max_m = 100\n filelist = glob.glob(path + '/*.json')\n print(len(filelist))\n test_result = dict()\n filelist_len = len(filelist)\n for count, filename in enumerate(filelist):\n n, m, max_val = get_params_from_filename(filename)\n data_list_in_file = []\n with open(filename) as jsonFile:\n data_list_in_file = json.load(jsonFile)\n idx = random.randint(0, len(data_list_in_file) - 1)\n example = data_list_in_file[idx]\n values = example[0]['values']\n values_copy = copy.deepcopy(values)\n values_copy.sort(reverse=True)\n solver_result, solver_time = solve_with_solver(values_copy, n)\n zero_pad(values_copy, max_m)\n net_result, net_time = solve_with_net(values_copy, n)\n test_result[str((n, m, max_val))] = {'values_idx': idx,\n 'solver_result': solver_result, 'solver_time': solver_time,\n 'net_result': net_result, 'net_time': net_time}\n if count % 20 == 0:\n print(count, 'out of', filelist_len)\n test_result_path = './TestResults/test_results.json'\n with open(test_result_path, 'w+') as json_file:\n json.dump(test_result, json_file, indent=4)\n\n\ntest_net(path_to_data)\n",
"step-4": "import random\nimport glob\nimport json\nimport time\nfrom torch.utils.data import Dataset, DataLoader, SubsetRandomSampler\nfrom SimpleDataLoader import CustomDataset, get_params_from_filename\nimport numpy as np\nfrom DNN_model import Net\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch\nfrom tqdm import tqdm\nfrom MMS_compute import xpress_solver\nimport copy\npath_to_data = 'Dataset'\n\n\ndef split_to_train_validation(path_to_data):\n dataset = CustomDataset(path_to_data)\n print(len(dataset))\n batch_size = 300\n validation_split = 0.2\n shuffle_dataset = True\n random_seed = 56\n dataset_size = len(dataset)\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split * dataset_size))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices, val_indices = indices[split:], indices[:split]\n print(len(train_indices), len(val_indices))\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(val_indices)\n train_loader = DataLoader(dataset, batch_size=batch_size, sampler=\n train_sampler)\n validation_loader = DataLoader(dataset, batch_size=batch_size, sampler=\n valid_sampler)\n print(len(train_loader), len(validation_loader))\n return train_loader, validation_loader\n\n\ntrain_loader, validation_loader = split_to_train_validation(path_to_data)\nnet = Net()\nloss_func = nn.MSELoss()\noptimizer = optim.Adam(net.parameters(), lr=0.0001)\n\n\ndef compute_loss(dataloader, net):\n loss = 0\n if torch.cuda.is_available():\n net.cuda()\n net.eval()\n n_batches = 0\n with torch.no_grad():\n for x, y in dataloader:\n n_batches += 1\n if torch.cuda.is_available():\n x = x.cuda()\n y = y.cuda()\n pred = net(x)\n loss += loss_func(pred, y).item()\n loss = loss / n_batches\n return loss\n\n\nn_epochs = 50\npbar = tqdm(range(n_epochs))\nvalidation_loss_vs_epoch = []\nif torch.cuda.is_available():\n net.cuda()\nfor epoch in pbar:\n if len(validation_loss_vs_epoch) > 1:\n print('epoch', epoch, ' val loss:' + '{0:.5f}'.format(\n validation_loss_vs_epoch[-1]))\n net.train()\n for x, y in train_loader:\n y = y.to(torch.float32)\n if torch.cuda.is_available():\n x = x.cuda()\n y = y.cuda()\n optimizer.zero_grad()\n pred = net(x)\n loss = loss_func(pred, y)\n loss.backward()\n optimizer.step()\n net.eval()\n valid_loss = compute_loss(validation_loader, net)\n validation_loss_vs_epoch.append(valid_loss)\n\n\ndef zero_pad(values, max_m):\n m = len(values)\n values += [0] * (max_m - m)\n\n\ndef solve_with_solver(values_copy, n):\n return xpress_solver(values_copy, n)\n\n\ndef solve_with_net(values_copy, n):\n start = time.time()\n sum_vals = sum(values_copy)\n new_values = [(val / sum_vals) for val in values_copy]\n pred = net(torch.FloatTensor([float(n)] + new_values))\n pred_num = float(pred.data[0])\n final_result = pred_num * sum_vals\n end = time.time()\n return final_result, end - start\n\n\ndef test_net(path):\n max_m = 100\n filelist = glob.glob(path + '/*.json')\n print(len(filelist))\n test_result = dict()\n filelist_len = len(filelist)\n for count, filename in enumerate(filelist):\n n, m, max_val = get_params_from_filename(filename)\n data_list_in_file = []\n with open(filename) as jsonFile:\n data_list_in_file = json.load(jsonFile)\n idx = random.randint(0, len(data_list_in_file) - 1)\n example = data_list_in_file[idx]\n values = example[0]['values']\n values_copy = copy.deepcopy(values)\n values_copy.sort(reverse=True)\n solver_result, solver_time = solve_with_solver(values_copy, n)\n zero_pad(values_copy, max_m)\n net_result, net_time = solve_with_net(values_copy, n)\n test_result[str((n, m, max_val))] = {'values_idx': idx,\n 'solver_result': solver_result, 'solver_time': solver_time,\n 'net_result': net_result, 'net_time': net_time}\n if count % 20 == 0:\n print(count, 'out of', filelist_len)\n test_result_path = './TestResults/test_results.json'\n with open(test_result_path, 'w+') as json_file:\n json.dump(test_result, json_file, indent=4)\n\n\ntest_net(path_to_data)\n",
"step-5": "import random\nimport glob\nimport json\nimport time\n\nfrom torch.utils.data import Dataset, DataLoader, SubsetRandomSampler\nfrom SimpleDataLoader import CustomDataset, get_params_from_filename\nimport numpy as np\nfrom DNN_model import Net\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch\nfrom tqdm import tqdm\nfrom MMS_compute import xpress_solver\nimport copy\n\n\npath_to_data = 'Dataset'\n\ndef split_to_train_validation(path_to_data):\n\n dataset = CustomDataset(path_to_data)\n print(len(dataset))\n\n batch_size = 300\n validation_split = 0.2\n shuffle_dataset = True\n random_seed= 56\n dataset_size = len(dataset)\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split * dataset_size))\n if shuffle_dataset :\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices, val_indices = indices[split:], indices[:split]\n print(len(train_indices), len(val_indices))\n\n # Creating PT data samplers and loaders:\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(val_indices)\n\n train_loader = DataLoader(dataset, batch_size=batch_size,\n sampler=train_sampler)\n validation_loader = DataLoader(dataset, batch_size=batch_size,\n sampler=valid_sampler)\n\n print(len(train_loader), len(validation_loader))\n return train_loader, validation_loader\n\n\ntrain_loader, validation_loader = split_to_train_validation(path_to_data)\n\nnet = Net()\n\n\n\n\n\nloss_func = nn.MSELoss()\n# loss_func = nn.L1Loss()\noptimizer = optim.Adam(net.parameters(), lr=1e-4)\n\n\ndef compute_loss(dataloader, net):\n loss = 0\n\n if torch.cuda.is_available():\n net.cuda()\n net.eval()\n\n n_batches = 0\n with torch.no_grad():\n for x, y in dataloader:\n n_batches += 1\n\n if torch.cuda.is_available():\n x = x.cuda()\n y = y.cuda()\n pred = net(x)\n\n loss += loss_func(pred, y).item()\n\n loss = loss / n_batches\n return loss\n\n\n\n\nn_epochs = 50\n\npbar = tqdm(range(n_epochs))\nvalidation_loss_vs_epoch = []\n\nif torch.cuda.is_available():\n net.cuda()\n\nfor epoch in pbar:\n\n if len(validation_loss_vs_epoch) > 1:\n print('epoch', epoch, ' val loss:' + '{0:.5f}'.format(validation_loss_vs_epoch[-1]))\n\n net.train() # put the net into \"training mode\"\n for x, y in train_loader:\n y = y.to(torch.float32)\n\n if torch.cuda.is_available():\n x = x.cuda()\n y = y.cuda()\n\n optimizer.zero_grad()\n pred = net(x)\n loss = loss_func(pred, y)\n loss.backward()\n optimizer.step()\n\n net.eval() # put the net into evaluation mode\n\n valid_loss = compute_loss(validation_loader, net)\n\n validation_loss_vs_epoch.append(valid_loss)\n\n# n = 5\n# m = 50\n# max_val = 100\n# values = [random.randrange(0, max_val + 1) for _ in range(m)]\n# values.sort(reverse=True)\n# values += [0]*50\n# mms = xpress_solver(values,n)[0]\n# sum_vals = sum(values)\n# new_values = [val/sum_vals for val in values]\n# pred = net(torch.FloatTensor([float(n)]+new_values))\n# pred_num = float(pred.data[0])\n# print(pred, mms, pred*sum_vals)\n# print(pred_num*sum_vals)\n\n\ndef zero_pad(values, max_m):\n m = len(values)\n values += [0] * (max_m - m)\n\n\ndef solve_with_solver(values_copy, n):\n return xpress_solver(values_copy, n)\n\n\n\ndef solve_with_net(values_copy, n):\n start = time.time()\n sum_vals = sum(values_copy)\n new_values = [val / sum_vals for val in values_copy]\n pred = net(torch.FloatTensor([float(n)] + new_values))\n pred_num = float(pred.data[0])\n final_result = pred_num*sum_vals\n end = time.time()\n return final_result, end-start\n\ndef test_net(path):\n max_m = 100\n filelist = glob.glob(path + '/*.json')\n print(len(filelist))\n\n test_result = dict()\n filelist_len = len(filelist)\n for count, filename in enumerate(filelist):\n n, m, max_val = get_params_from_filename(filename)\n data_list_in_file = []\n with open(filename) as jsonFile:\n data_list_in_file = json.load(jsonFile)\n idx = random.randint(0, len(data_list_in_file)-1)\n example=data_list_in_file[idx]\n values = example[0][\"values\"]\n values_copy = copy.deepcopy(values)\n values_copy.sort(reverse=True)\n solver_result, solver_time = solve_with_solver(values_copy, n)\n\n zero_pad(values_copy, max_m)\n net_result, net_time = solve_with_net(values_copy, n)\n test_result[str((n, m, max_val))] = {\n 'values_idx': idx,\n 'solver_result': solver_result,\n 'solver_time':solver_time,\n 'net_result':net_result,\n 'net_time':net_time\n }\n if count % 20 == 0:\n print(count, 'out of', filelist_len)\n test_result_path = './TestResults/test_results.json'\n with open(test_result_path, 'w+') as json_file:\n json.dump(test_result, json_file, indent=4)\n\ntest_net(path_to_data)",
"step-ids": [
3,
5,
7,
9,
10
]
}
|
[
3,
5,
7,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('app', '0004_product_pr_number')]
operations = [migrations.RemoveField(model_name='payment', name='PA_id'
), migrations.AddField(model_name='payment', name='buyer', field=
models.CharField(default=0, max_length=32), preserve_default=False),
migrations.AlterField(model_name='payment', name='PA_type', field=
models.CharField(default='credit', max_length=32))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('app', '0004_product_pr_number')]
operations = [migrations.RemoveField(model_name='payment', name='PA_id'
), migrations.AddField(model_name='payment', name='buyer', field=
models.CharField(default=0, max_length=32), preserve_default=False),
migrations.AlterField(model_name='payment', name='PA_type', field=
models.CharField(default='credit', max_length=32))]
<|reserved_special_token_1|>
# Generated by Django 2.2 on 2021-01-31 14:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0004_product_pr_number'),
]
operations = [
migrations.RemoveField(
model_name='payment',
name='PA_id',
),
migrations.AddField(
model_name='payment',
name='buyer',
field=models.CharField(default=0, max_length=32),
preserve_default=False,
),
migrations.AlterField(
model_name='payment',
name='PA_type',
field=models.CharField(default='credit', max_length=32),
),
]
|
flexible
|
{
"blob_id": "388772386f25d6c2f9cc8778b7ce1b2ad0920851",
"index": 6986,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('app', '0004_product_pr_number')]\n operations = [migrations.RemoveField(model_name='payment', name='PA_id'\n ), migrations.AddField(model_name='payment', name='buyer', field=\n models.CharField(default=0, max_length=32), preserve_default=False),\n migrations.AlterField(model_name='payment', name='PA_type', field=\n models.CharField(default='credit', max_length=32))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('app', '0004_product_pr_number')]\n operations = [migrations.RemoveField(model_name='payment', name='PA_id'\n ), migrations.AddField(model_name='payment', name='buyer', field=\n models.CharField(default=0, max_length=32), preserve_default=False),\n migrations.AlterField(model_name='payment', name='PA_type', field=\n models.CharField(default='credit', max_length=32))]\n",
"step-5": "# Generated by Django 2.2 on 2021-01-31 14:11\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0004_product_pr_number'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='payment',\n name='PA_id',\n ),\n migrations.AddField(\n model_name='payment',\n name='buyer',\n field=models.CharField(default=0, max_length=32),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='payment',\n name='PA_type',\n field=models.CharField(default='credit', max_length=32),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pyhs2
import sys
import datetime
i = datetime.datetime.now()
# args
if len(sys.argv) < 2:
print "Run with python version 2.6"
print "Requires arg: <orgId>"
sys.exit()
orgId = sys.argv[1]
print "\n\nCreating document external ID manifest for Org ID: " + orgId
## strings
fileLine = "%s\t%s\t%s\n" #external_id doc_source assign_authority
query = """select * from (select external_id, doc_source, assign_authority from summary_doc_manifest where org_id = '%s'
UNION ALL select get_json_object(line, '$.document.id') as external_id, get_json_object(line, '$.document.source') as doc_source,
get_json_object(line, '$.document.assignAuthority') as assign_authority from production_logs_datacheckandrecover_epoch
where get_json_object(line, '$.docManifest') is not null and get_json_object(line, '$.orgId') = '%s'
and day=%s and month=%s and year=2014) joined_table""" %(orgId, orgId, i.day, i.month)
fileName = orgId + "-manifest"
## hive connection
conn = pyhs2.connect(host='10.196.47.205',
port=10000,
authMechanism="PLAIN",
user='hive',
password='',
database='default')
cur = conn.cursor()
count = 0
print "Executing query: " + query
cur.execute(query)
print "Building query results..."
out = open(fileName, "w")
for row in cur.fetch():
out.write(fileLine%(row[0], row[1], row[2]))
count+=1
if count%1000000 == 0:
print "...wrote " + str(count) + " entries so far."
out.close()
print "...wrote " + str(count) + " entries into the file: " + fileName
print "\n"
|
normal
|
{
"blob_id": "29c630b56eb56d91d1e917078138a2bbf562e0bf",
"index": 579,
"step-1": "import pyhs2\nimport sys\nimport datetime\ni = datetime.datetime.now()\n\n# args\nif len(sys.argv) < 2:\n print \"Run with python version 2.6\"\n print \"Requires arg: <orgId>\"\n sys.exit()\n\norgId = sys.argv[1]\n\nprint \"\\n\\nCreating document external ID manifest for Org ID: \" + orgId\n\n## strings\nfileLine = \"%s\\t%s\\t%s\\n\" #external_id doc_source assign_authority\nquery = \"\"\"select * from (select external_id, doc_source, assign_authority from summary_doc_manifest where org_id = '%s'\n\t\tUNION ALL select get_json_object(line, '$.document.id') as external_id, get_json_object(line, '$.document.source') as doc_source,\n\t\tget_json_object(line, '$.document.assignAuthority') as assign_authority from production_logs_datacheckandrecover_epoch\n\t\twhere get_json_object(line, '$.docManifest') is not null and get_json_object(line, '$.orgId') = '%s'\n\t\tand day=%s and month=%s and year=2014) joined_table\"\"\" %(orgId, orgId, i.day, i.month) \nfileName = orgId + \"-manifest\"\n\n## hive connection\nconn = pyhs2.connect(host='10.196.47.205',\n port=10000,\n authMechanism=\"PLAIN\",\n user='hive',\n password='',\n database='default')\ncur = conn.cursor()\n\ncount = 0\n\nprint \"Executing query: \" + query\ncur.execute(query)\n\nprint \"Building query results...\"\nout = open(fileName, \"w\")\nfor row in cur.fetch():\n out.write(fileLine%(row[0], row[1], row[2]))\n count+=1\n if count%1000000 == 0:\n print \"...wrote \" + str(count) + \" entries so far.\"\nout.close()\n\nprint \"...wrote \" + str(count) + \" entries into the file: \" + fileName\nprint \"\\n\"",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('token.txt', 'r') as f:
token = f.read()
headers = {'X-Xapp-Token': token}
with open('dataset_24476_4.txt', 'r') as id:
for line in id:
address = 'https://api.artsy.net/api/artists/' + line.strip()
r = requests.get(address, headers=headers)
j = json.loads(r.text)
l.append((j['sortable_name'], j['birthday']))
<|reserved_special_token_0|>
for i in l:
print(i[0])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
l = list()
with open('token.txt', 'r') as f:
token = f.read()
headers = {'X-Xapp-Token': token}
with open('dataset_24476_4.txt', 'r') as id:
for line in id:
address = 'https://api.artsy.net/api/artists/' + line.strip()
r = requests.get(address, headers=headers)
j = json.loads(r.text)
l.append((j['sortable_name'], j['birthday']))
l = sorted(l, key=lambda tup: (tup[1], tup[0]))
for i in l:
print(i[0])
<|reserved_special_token_1|>
import requests
import json
l = list()
with open('token.txt', 'r') as f:
token = f.read()
headers = {'X-Xapp-Token': token}
with open('dataset_24476_4.txt', 'r') as id:
for line in id:
address = 'https://api.artsy.net/api/artists/' + line.strip()
r = requests.get(address, headers=headers)
j = json.loads(r.text)
l.append((j['sortable_name'], j['birthday']))
l = sorted(l, key=lambda tup: (tup[1], tup[0]))
for i in l:
print(i[0])
<|reserved_special_token_1|>
import requests
import json
l = list()
with open ( "token.txt", "r") as f:
token = f.read()
# создаем заголовок, содержащий наш токен
headers = {"X-Xapp-Token" : token}
with open('dataset_24476_4.txt', 'r') as id:
for line in id:
address = "https://api.artsy.net/api/artists/" + line.strip()
# инициируем запрос с заголовком
r = requests.get(address, headers=headers)
# разбираем ответ сервера
j = json.loads(r.text)
l.append((j['sortable_name'], j['birthday']))
#l.append((('Warhol Bandy', '1928')))
#l.append((('Warhol Aandy', '1928')))
l = sorted(l, key=lambda tup: (tup[1], tup[0]))
for i in l:
print(i[0])
# year = '0000'
# new_l = []
#
# k = []
#
# for i in l:
# if i[1] != year:
# k = []
# k.append(i[0])
# year = i[1]
# else:
# k.append(i[0])
# k.sort()
# print(next(name for name in k))
|
flexible
|
{
"blob_id": "e1ecc08f66e094841647f72b78bcd29ed8d32668",
"index": 5976,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('token.txt', 'r') as f:\n token = f.read()\n headers = {'X-Xapp-Token': token}\n with open('dataset_24476_4.txt', 'r') as id:\n for line in id:\n address = 'https://api.artsy.net/api/artists/' + line.strip()\n r = requests.get(address, headers=headers)\n j = json.loads(r.text)\n l.append((j['sortable_name'], j['birthday']))\n<mask token>\nfor i in l:\n print(i[0])\n",
"step-3": "<mask token>\nl = list()\nwith open('token.txt', 'r') as f:\n token = f.read()\n headers = {'X-Xapp-Token': token}\n with open('dataset_24476_4.txt', 'r') as id:\n for line in id:\n address = 'https://api.artsy.net/api/artists/' + line.strip()\n r = requests.get(address, headers=headers)\n j = json.loads(r.text)\n l.append((j['sortable_name'], j['birthday']))\nl = sorted(l, key=lambda tup: (tup[1], tup[0]))\nfor i in l:\n print(i[0])\n",
"step-4": "import requests\nimport json\nl = list()\nwith open('token.txt', 'r') as f:\n token = f.read()\n headers = {'X-Xapp-Token': token}\n with open('dataset_24476_4.txt', 'r') as id:\n for line in id:\n address = 'https://api.artsy.net/api/artists/' + line.strip()\n r = requests.get(address, headers=headers)\n j = json.loads(r.text)\n l.append((j['sortable_name'], j['birthday']))\nl = sorted(l, key=lambda tup: (tup[1], tup[0]))\nfor i in l:\n print(i[0])\n",
"step-5": "import requests\nimport json\n\nl = list()\n\nwith open ( \"token.txt\", \"r\") as f:\n\n token = f.read()\n\n # создаем заголовок, содержащий наш токен\n headers = {\"X-Xapp-Token\" : token}\n\n with open('dataset_24476_4.txt', 'r') as id:\n\n for line in id:\n address = \"https://api.artsy.net/api/artists/\" + line.strip()\n # инициируем запрос с заголовком\n r = requests.get(address, headers=headers)\n\n # разбираем ответ сервера\n j = json.loads(r.text)\n\n l.append((j['sortable_name'], j['birthday']))\n\n#l.append((('Warhol Bandy', '1928')))\n#l.append((('Warhol Aandy', '1928')))\n\n\nl = sorted(l, key=lambda tup: (tup[1], tup[0]))\nfor i in l:\n print(i[0])\n\n# year = '0000'\n# new_l = []\n#\n# k = []\n#\n# for i in l:\n# if i[1] != year:\n# k = []\n# k.append(i[0])\n# year = i[1]\n# else:\n# k.append(i[0])\n# k.sort()\n# print(next(name for name in k))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
import os
import sys
import socket
import signal
import functools
import atexit
import tempfile
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
from queue import Queue, Empty
from time import sleep
import json
from .exceptions import CommandError, TimeoutWaitingFor
ON_POSIX = 'posix' in sys.builtin_module_names
# Directory relative to basetest module location
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
# Location of binary files (usually the src/ folder)
BIN_PREFIX = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "..", "src")
)
# Default location of test certificates
DEFAULT_CERT_PATH = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "test_certs")
)
# Default location of test extensions
DEFAULT_EXTENSION_PATH = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "test_extensions")
)
# Environment flags to control skipping of shared tests
SHARED_SKIP = os.environ.get("SHARED_SKIP", False)
# Environment flags to control use of PATH or in-tree binaries
SHARED_USE_PATH = os.environ.get("SHARED_USE_PATH", False)
UUID_REGEXP = ("[0-9A-Fa-f]{8}-" + ("[0-9A-Fa-f]{4}-" * 3) + "[0-9A-Fa-f]{12}")
def shared_binary_location(cmd="shared"):
""" ../src/ is used by default.
"""
return os.path.join(BIN_PREFIX, cmd)
return binary_location(cmd, SHARED_USE_PATH)
def binary_location(cmd, USE_PATH=False):
""" ../src/ is used by default.
"""
return os.path.join(BIN_PREFIX, cmd)
def wait_condition(cond, timeout=1, sleeptime=.01):
"""Wait for condition to return anything other than None
"""
# NOTE Increasing sleeptime can dramatically increase testsuite runtime
# It also reduces CPU load significantly
if timeout is None:
timeout = 1
if timeout < sleeptime:
print("Warning, timeout cannot be smaller than", sleeptime)
timeout = sleeptime
# Max number of attempts until giving up
tries = int(timeout / sleeptime)
for i in range(tries):
val = cond()
if val is not None:
break
sleep(sleeptime)
return val
def wait_process(pid, timeout=None):
"""Wait for process to finish
"""
def process():
try:
os.kill(pid, 0)
except OSError:
# Process is dead
return True
else:
# Process is still ticking
return None
return wait_condition(process, timeout)
def _queue_output(arguments, pidq, outputq):
"""Read/Write output/input of given process.
This function is meant to be executed in a thread as it may block
"""
kwargs = arguments["process"]
input = arguments["input"]
try:
proc = Popen(**kwargs)
except OSError as e:
# pid None is read by the main thread as a crash of the process
pidq.put(None)
outputq.put((
"",
("Unexpected exception caught during execution: '{0}' . ".format(e)),
255)) # false exitcode
return
# Put the PID in the queue for main process to know.
pidq.put(proc.pid)
# Send input and wait for finish
out, err = proc.communicate(input)
out, err = out.decode('utf-8'), err.decode('utf-8')
# Give the output back to the caller
outputq.put((out, err, proc.returncode))
def _retrieve_output(thread, timeout, queue, thread_error):
"""Fetch output from binary subprocess queues
"""
# Try to join the thread on failure abort
thread.join(timeout)
if thread.isAlive():
# Join should have killed the thread. This is unexpected
raise TimeoutWaitingFor(thread_error + ". Unexpected error")
# Thread died so we should have output
try:
# data = (stdout, stderr, exitcode)
data = queue.get(timeout=timeout)
except Empty:
data = TimeoutWaitingFor("streams from program")
return data
def _get_output(arguments, timeout=None):
"""Collect output from the subprocess without blocking the main process if
subprocess hangs.
"""
# NOTE Increase this value if tests fail with None being received as
# stdout/stderr instead of the expected content
output_timeout = 0.1 # seconds
pidq = Queue()
outputq = Queue()
t = Thread(target=_queue_output, args=(arguments, pidq, outputq))
t.daemon = True
t.start()
try:
pid = pidq.get(timeout=timeout)
except Empty:
pid = None
# Process crashed or timed out for some reason
if pid is None:
return _retrieve_output(t, output_timeout, outputq,
"Program to start")
# Wait for process to finish (normal execution)
state = wait_process(pid, timeout)
if state:
# Process finished
return _retrieve_output(t, output_timeout, outputq,
"Program thread to join")
# If we reach this point we assume the process got stuck or timed out
for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):
# Start with lower signals and escalate if process ignores them
try:
os.kill(pid, signal.SIGABRT)
except OSError as e:
# 3 means the process finished/died between last check and now
if e.errno != 3:
raise
# Wait for process to finish (should die/exit after signal)
state = wait_process(pid, timeout)
if state:
# Process finished
return _retrieve_output(t, output_timeout, outputq,
"Program to die")
# This should never happen but in case something goes really bad
raise OSError("Program stopped responding and couldn't be killed")
def run_cmd_wait(cmd, input=None, stdout=PIPE, stderr=PIPE,
merge_streams=False, env=os.environ, timeout=None):
"Run a subprocess and wait for it to finish"
if input is None:
stdin = None
else:
stdin = PIPE
if merge_streams:
stderr = STDOUT
else:
stderr = PIPE
arguments = {
"process": {
"args": cmd,
"stdin": stdin,
"stdout": stdout,
"stderr": stderr,
"bufsize": 1,
"close_fds": ON_POSIX,
"env": env,
},
"input": input,
}
out, err, exit = _get_output(arguments, timeout)
if merge_streams:
if exit != 0:
raise CommandError(cmd, exit, out)
else:
return exit, out
else:
if exit != 0:
raise CommandError(cmd, exit, out, err)
else:
return exit, out, err
def run_cmd_wait_nofail(*args, **kwargs):
"""Same as run_cmd_wait but silence the exception if it happens"""
try:
return run_cmd_wait(*args, **kwargs)
except CommandError as e:
return e.code, e.out, e.err
def memoize(obj):
"""Keep an in-memory cache of function results given its inputs
"""
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
from shutil import which
which = memoize(which)
def parse_datafile(file):
"""Parse .data files, treating files as JSON
"""
data = []
with open(file) as fh:
for line in fh:
line = line.rstrip("\n")
# Turn [] strings into {} to be treated properly as JSON hashes
if line.startswith('[') and line.endswith(']'):
line = '{' + line[1:-1] + '}'
if line.startswith("{"):
data.append(json.loads(line))
else:
data.append(line)
return data
def mkstemp(data):
"""
Create a temporary file that is removed at process exit
"""
def rmtemp(name):
try:
os.remove(name)
except OSError:
pass
f = tempfile.NamedTemporaryFile(delete=False)
f.write(data)
f.close()
# Ensure removal at end of python session
atexit.register(rmtemp, f.name)
return f.name
def mkstemp_exec(data):
"""Create a temporary executable file that is removed at process exit
"""
name = mkstemp(data)
os.chmod(name, 0o755)
return name
# vim: ai sts=4 et sw=4
|
normal
|
{
"blob_id": "7f220a970d65a91228501f7db59089e6c0604fb5",
"index": 9915,
"step-1": "<mask token>\n\n\ndef wait_condition(cond, timeout=1, sleeptime=0.01):\n \"\"\"Wait for condition to return anything other than None\n \"\"\"\n if timeout is None:\n timeout = 1\n if timeout < sleeptime:\n print('Warning, timeout cannot be smaller than', sleeptime)\n timeout = sleeptime\n tries = int(timeout / sleeptime)\n for i in range(tries):\n val = cond()\n if val is not None:\n break\n sleep(sleeptime)\n return val\n\n\n<mask token>\n\n\ndef _queue_output(arguments, pidq, outputq):\n \"\"\"Read/Write output/input of given process.\n This function is meant to be executed in a thread as it may block\n \"\"\"\n kwargs = arguments['process']\n input = arguments['input']\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n pidq.put(None)\n outputq.put(('',\n \"Unexpected exception caught during execution: '{0}' . \".format\n (e), 255))\n return\n pidq.put(proc.pid)\n out, err = proc.communicate(input)\n out, err = out.decode('utf-8'), err.decode('utf-8')\n outputq.put((out, err, proc.returncode))\n\n\ndef _retrieve_output(thread, timeout, queue, thread_error):\n \"\"\"Fetch output from binary subprocess queues\n \"\"\"\n thread.join(timeout)\n if thread.isAlive():\n raise TimeoutWaitingFor(thread_error + '. Unexpected error')\n try:\n data = queue.get(timeout=timeout)\n except Empty:\n data = TimeoutWaitingFor('streams from program')\n return data\n\n\ndef _get_output(arguments, timeout=None):\n \"\"\"Collect output from the subprocess without blocking the main process if\n subprocess hangs.\n \"\"\"\n output_timeout = 0.1\n pidq = Queue()\n outputq = Queue()\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq, 'Program to start')\n state = wait_process(pid, timeout)\n if state:\n return _retrieve_output(t, output_timeout, outputq,\n 'Program thread to join')\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n if e.errno != 3:\n raise\n state = wait_process(pid, timeout)\n if state:\n return _retrieve_output(t, output_timeout, outputq,\n 'Program to die')\n raise OSError(\"Program stopped responding and couldn't be killed\")\n\n\n<mask token>\n\n\ndef memoize(obj):\n \"\"\"Keep an in-memory cache of function results given its inputs\n \"\"\"\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n return memoizer\n\n\n<mask token>\n\n\ndef mkstemp(data):\n \"\"\"\n Create a temporary file that is removed at process exit\n \"\"\"\n\n def rmtemp(name):\n try:\n os.remove(name)\n except OSError:\n pass\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(data)\n f.close()\n atexit.register(rmtemp, f.name)\n return f.name\n\n\ndef mkstemp_exec(data):\n \"\"\"Create a temporary executable file that is removed at process exit\n \"\"\"\n name = mkstemp(data)\n os.chmod(name, 493)\n return name\n",
"step-2": "<mask token>\n\n\ndef wait_condition(cond, timeout=1, sleeptime=0.01):\n \"\"\"Wait for condition to return anything other than None\n \"\"\"\n if timeout is None:\n timeout = 1\n if timeout < sleeptime:\n print('Warning, timeout cannot be smaller than', sleeptime)\n timeout = sleeptime\n tries = int(timeout / sleeptime)\n for i in range(tries):\n val = cond()\n if val is not None:\n break\n sleep(sleeptime)\n return val\n\n\ndef wait_process(pid, timeout=None):\n \"\"\"Wait for process to finish\n \"\"\"\n\n def process():\n try:\n os.kill(pid, 0)\n except OSError:\n return True\n else:\n return None\n return wait_condition(process, timeout)\n\n\ndef _queue_output(arguments, pidq, outputq):\n \"\"\"Read/Write output/input of given process.\n This function is meant to be executed in a thread as it may block\n \"\"\"\n kwargs = arguments['process']\n input = arguments['input']\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n pidq.put(None)\n outputq.put(('',\n \"Unexpected exception caught during execution: '{0}' . \".format\n (e), 255))\n return\n pidq.put(proc.pid)\n out, err = proc.communicate(input)\n out, err = out.decode('utf-8'), err.decode('utf-8')\n outputq.put((out, err, proc.returncode))\n\n\ndef _retrieve_output(thread, timeout, queue, thread_error):\n \"\"\"Fetch output from binary subprocess queues\n \"\"\"\n thread.join(timeout)\n if thread.isAlive():\n raise TimeoutWaitingFor(thread_error + '. Unexpected error')\n try:\n data = queue.get(timeout=timeout)\n except Empty:\n data = TimeoutWaitingFor('streams from program')\n return data\n\n\ndef _get_output(arguments, timeout=None):\n \"\"\"Collect output from the subprocess without blocking the main process if\n subprocess hangs.\n \"\"\"\n output_timeout = 0.1\n pidq = Queue()\n outputq = Queue()\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq, 'Program to start')\n state = wait_process(pid, timeout)\n if state:\n return _retrieve_output(t, output_timeout, outputq,\n 'Program thread to join')\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n if e.errno != 3:\n raise\n state = wait_process(pid, timeout)\n if state:\n return _retrieve_output(t, output_timeout, outputq,\n 'Program to die')\n raise OSError(\"Program stopped responding and couldn't be killed\")\n\n\ndef run_cmd_wait(cmd, input=None, stdout=PIPE, stderr=PIPE, merge_streams=\n False, env=os.environ, timeout=None):\n \"\"\"Run a subprocess and wait for it to finish\"\"\"\n if input is None:\n stdin = None\n else:\n stdin = PIPE\n if merge_streams:\n stderr = STDOUT\n else:\n stderr = PIPE\n arguments = {'process': {'args': cmd, 'stdin': stdin, 'stdout': stdout,\n 'stderr': stderr, 'bufsize': 1, 'close_fds': ON_POSIX, 'env': env},\n 'input': input}\n out, err, exit = _get_output(arguments, timeout)\n if merge_streams:\n if exit != 0:\n raise CommandError(cmd, exit, out)\n else:\n return exit, out\n elif exit != 0:\n raise CommandError(cmd, exit, out, err)\n else:\n return exit, out, err\n\n\ndef run_cmd_wait_nofail(*args, **kwargs):\n \"\"\"Same as run_cmd_wait but silence the exception if it happens\"\"\"\n try:\n return run_cmd_wait(*args, **kwargs)\n except CommandError as e:\n return e.code, e.out, e.err\n\n\ndef memoize(obj):\n \"\"\"Keep an in-memory cache of function results given its inputs\n \"\"\"\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n return memoizer\n\n\n<mask token>\n\n\ndef parse_datafile(file):\n \"\"\"Parse .data files, treating files as JSON\n \"\"\"\n data = []\n with open(file) as fh:\n for line in fh:\n line = line.rstrip('\\n')\n if line.startswith('[') and line.endswith(']'):\n line = '{' + line[1:-1] + '}'\n if line.startswith('{'):\n data.append(json.loads(line))\n else:\n data.append(line)\n return data\n\n\ndef mkstemp(data):\n \"\"\"\n Create a temporary file that is removed at process exit\n \"\"\"\n\n def rmtemp(name):\n try:\n os.remove(name)\n except OSError:\n pass\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(data)\n f.close()\n atexit.register(rmtemp, f.name)\n return f.name\n\n\ndef mkstemp_exec(data):\n \"\"\"Create a temporary executable file that is removed at process exit\n \"\"\"\n name = mkstemp(data)\n os.chmod(name, 493)\n return name\n",
"step-3": "<mask token>\n\n\ndef shared_binary_location(cmd='shared'):\n \"\"\" ../src/ is used by default.\n \"\"\"\n return os.path.join(BIN_PREFIX, cmd)\n return binary_location(cmd, SHARED_USE_PATH)\n\n\ndef binary_location(cmd, USE_PATH=False):\n \"\"\" ../src/ is used by default.\n \"\"\"\n return os.path.join(BIN_PREFIX, cmd)\n\n\ndef wait_condition(cond, timeout=1, sleeptime=0.01):\n \"\"\"Wait for condition to return anything other than None\n \"\"\"\n if timeout is None:\n timeout = 1\n if timeout < sleeptime:\n print('Warning, timeout cannot be smaller than', sleeptime)\n timeout = sleeptime\n tries = int(timeout / sleeptime)\n for i in range(tries):\n val = cond()\n if val is not None:\n break\n sleep(sleeptime)\n return val\n\n\ndef wait_process(pid, timeout=None):\n \"\"\"Wait for process to finish\n \"\"\"\n\n def process():\n try:\n os.kill(pid, 0)\n except OSError:\n return True\n else:\n return None\n return wait_condition(process, timeout)\n\n\ndef _queue_output(arguments, pidq, outputq):\n \"\"\"Read/Write output/input of given process.\n This function is meant to be executed in a thread as it may block\n \"\"\"\n kwargs = arguments['process']\n input = arguments['input']\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n pidq.put(None)\n outputq.put(('',\n \"Unexpected exception caught during execution: '{0}' . \".format\n (e), 255))\n return\n pidq.put(proc.pid)\n out, err = proc.communicate(input)\n out, err = out.decode('utf-8'), err.decode('utf-8')\n outputq.put((out, err, proc.returncode))\n\n\ndef _retrieve_output(thread, timeout, queue, thread_error):\n \"\"\"Fetch output from binary subprocess queues\n \"\"\"\n thread.join(timeout)\n if thread.isAlive():\n raise TimeoutWaitingFor(thread_error + '. Unexpected error')\n try:\n data = queue.get(timeout=timeout)\n except Empty:\n data = TimeoutWaitingFor('streams from program')\n return data\n\n\ndef _get_output(arguments, timeout=None):\n \"\"\"Collect output from the subprocess without blocking the main process if\n subprocess hangs.\n \"\"\"\n output_timeout = 0.1\n pidq = Queue()\n outputq = Queue()\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq, 'Program to start')\n state = wait_process(pid, timeout)\n if state:\n return _retrieve_output(t, output_timeout, outputq,\n 'Program thread to join')\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n if e.errno != 3:\n raise\n state = wait_process(pid, timeout)\n if state:\n return _retrieve_output(t, output_timeout, outputq,\n 'Program to die')\n raise OSError(\"Program stopped responding and couldn't be killed\")\n\n\ndef run_cmd_wait(cmd, input=None, stdout=PIPE, stderr=PIPE, merge_streams=\n False, env=os.environ, timeout=None):\n \"\"\"Run a subprocess and wait for it to finish\"\"\"\n if input is None:\n stdin = None\n else:\n stdin = PIPE\n if merge_streams:\n stderr = STDOUT\n else:\n stderr = PIPE\n arguments = {'process': {'args': cmd, 'stdin': stdin, 'stdout': stdout,\n 'stderr': stderr, 'bufsize': 1, 'close_fds': ON_POSIX, 'env': env},\n 'input': input}\n out, err, exit = _get_output(arguments, timeout)\n if merge_streams:\n if exit != 0:\n raise CommandError(cmd, exit, out)\n else:\n return exit, out\n elif exit != 0:\n raise CommandError(cmd, exit, out, err)\n else:\n return exit, out, err\n\n\ndef run_cmd_wait_nofail(*args, **kwargs):\n \"\"\"Same as run_cmd_wait but silence the exception if it happens\"\"\"\n try:\n return run_cmd_wait(*args, **kwargs)\n except CommandError as e:\n return e.code, e.out, e.err\n\n\ndef memoize(obj):\n \"\"\"Keep an in-memory cache of function results given its inputs\n \"\"\"\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n return memoizer\n\n\n<mask token>\n\n\ndef parse_datafile(file):\n \"\"\"Parse .data files, treating files as JSON\n \"\"\"\n data = []\n with open(file) as fh:\n for line in fh:\n line = line.rstrip('\\n')\n if line.startswith('[') and line.endswith(']'):\n line = '{' + line[1:-1] + '}'\n if line.startswith('{'):\n data.append(json.loads(line))\n else:\n data.append(line)\n return data\n\n\ndef mkstemp(data):\n \"\"\"\n Create a temporary file that is removed at process exit\n \"\"\"\n\n def rmtemp(name):\n try:\n os.remove(name)\n except OSError:\n pass\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(data)\n f.close()\n atexit.register(rmtemp, f.name)\n return f.name\n\n\ndef mkstemp_exec(data):\n \"\"\"Create a temporary executable file that is removed at process exit\n \"\"\"\n name = mkstemp(data)\n os.chmod(name, 493)\n return name\n",
"step-4": "<mask token>\nON_POSIX = 'posix' in sys.builtin_module_names\nCURRENT_DIR = os.path.dirname(os.path.abspath(__file__))\nBIN_PREFIX = os.path.abspath(os.path.join(CURRENT_DIR, '..', '..', 'src'))\nDEFAULT_CERT_PATH = os.path.abspath(os.path.join(CURRENT_DIR, '..',\n 'test_certs'))\nDEFAULT_EXTENSION_PATH = os.path.abspath(os.path.join(CURRENT_DIR, '..',\n 'test_extensions'))\nSHARED_SKIP = os.environ.get('SHARED_SKIP', False)\nSHARED_USE_PATH = os.environ.get('SHARED_USE_PATH', False)\nUUID_REGEXP = '[0-9A-Fa-f]{8}-' + '[0-9A-Fa-f]{4}-' * 3 + '[0-9A-Fa-f]{12}'\n\n\ndef shared_binary_location(cmd='shared'):\n \"\"\" ../src/ is used by default.\n \"\"\"\n return os.path.join(BIN_PREFIX, cmd)\n return binary_location(cmd, SHARED_USE_PATH)\n\n\ndef binary_location(cmd, USE_PATH=False):\n \"\"\" ../src/ is used by default.\n \"\"\"\n return os.path.join(BIN_PREFIX, cmd)\n\n\ndef wait_condition(cond, timeout=1, sleeptime=0.01):\n \"\"\"Wait for condition to return anything other than None\n \"\"\"\n if timeout is None:\n timeout = 1\n if timeout < sleeptime:\n print('Warning, timeout cannot be smaller than', sleeptime)\n timeout = sleeptime\n tries = int(timeout / sleeptime)\n for i in range(tries):\n val = cond()\n if val is not None:\n break\n sleep(sleeptime)\n return val\n\n\ndef wait_process(pid, timeout=None):\n \"\"\"Wait for process to finish\n \"\"\"\n\n def process():\n try:\n os.kill(pid, 0)\n except OSError:\n return True\n else:\n return None\n return wait_condition(process, timeout)\n\n\ndef _queue_output(arguments, pidq, outputq):\n \"\"\"Read/Write output/input of given process.\n This function is meant to be executed in a thread as it may block\n \"\"\"\n kwargs = arguments['process']\n input = arguments['input']\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n pidq.put(None)\n outputq.put(('',\n \"Unexpected exception caught during execution: '{0}' . \".format\n (e), 255))\n return\n pidq.put(proc.pid)\n out, err = proc.communicate(input)\n out, err = out.decode('utf-8'), err.decode('utf-8')\n outputq.put((out, err, proc.returncode))\n\n\ndef _retrieve_output(thread, timeout, queue, thread_error):\n \"\"\"Fetch output from binary subprocess queues\n \"\"\"\n thread.join(timeout)\n if thread.isAlive():\n raise TimeoutWaitingFor(thread_error + '. Unexpected error')\n try:\n data = queue.get(timeout=timeout)\n except Empty:\n data = TimeoutWaitingFor('streams from program')\n return data\n\n\ndef _get_output(arguments, timeout=None):\n \"\"\"Collect output from the subprocess without blocking the main process if\n subprocess hangs.\n \"\"\"\n output_timeout = 0.1\n pidq = Queue()\n outputq = Queue()\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq, 'Program to start')\n state = wait_process(pid, timeout)\n if state:\n return _retrieve_output(t, output_timeout, outputq,\n 'Program thread to join')\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n if e.errno != 3:\n raise\n state = wait_process(pid, timeout)\n if state:\n return _retrieve_output(t, output_timeout, outputq,\n 'Program to die')\n raise OSError(\"Program stopped responding and couldn't be killed\")\n\n\ndef run_cmd_wait(cmd, input=None, stdout=PIPE, stderr=PIPE, merge_streams=\n False, env=os.environ, timeout=None):\n \"\"\"Run a subprocess and wait for it to finish\"\"\"\n if input is None:\n stdin = None\n else:\n stdin = PIPE\n if merge_streams:\n stderr = STDOUT\n else:\n stderr = PIPE\n arguments = {'process': {'args': cmd, 'stdin': stdin, 'stdout': stdout,\n 'stderr': stderr, 'bufsize': 1, 'close_fds': ON_POSIX, 'env': env},\n 'input': input}\n out, err, exit = _get_output(arguments, timeout)\n if merge_streams:\n if exit != 0:\n raise CommandError(cmd, exit, out)\n else:\n return exit, out\n elif exit != 0:\n raise CommandError(cmd, exit, out, err)\n else:\n return exit, out, err\n\n\ndef run_cmd_wait_nofail(*args, **kwargs):\n \"\"\"Same as run_cmd_wait but silence the exception if it happens\"\"\"\n try:\n return run_cmd_wait(*args, **kwargs)\n except CommandError as e:\n return e.code, e.out, e.err\n\n\ndef memoize(obj):\n \"\"\"Keep an in-memory cache of function results given its inputs\n \"\"\"\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n return memoizer\n\n\n<mask token>\nwhich = memoize(which)\n\n\ndef parse_datafile(file):\n \"\"\"Parse .data files, treating files as JSON\n \"\"\"\n data = []\n with open(file) as fh:\n for line in fh:\n line = line.rstrip('\\n')\n if line.startswith('[') and line.endswith(']'):\n line = '{' + line[1:-1] + '}'\n if line.startswith('{'):\n data.append(json.loads(line))\n else:\n data.append(line)\n return data\n\n\ndef mkstemp(data):\n \"\"\"\n Create a temporary file that is removed at process exit\n \"\"\"\n\n def rmtemp(name):\n try:\n os.remove(name)\n except OSError:\n pass\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(data)\n f.close()\n atexit.register(rmtemp, f.name)\n return f.name\n\n\ndef mkstemp_exec(data):\n \"\"\"Create a temporary executable file that is removed at process exit\n \"\"\"\n name = mkstemp(data)\n os.chmod(name, 493)\n return name\n",
"step-5": "# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport socket\nimport signal\nimport functools\nimport atexit\nimport tempfile\nfrom subprocess import Popen, PIPE, STDOUT\nfrom threading import Thread\nfrom queue import Queue, Empty\nfrom time import sleep\nimport json\nfrom .exceptions import CommandError, TimeoutWaitingFor\n\nON_POSIX = 'posix' in sys.builtin_module_names\n\n# Directory relative to basetest module location\nCURRENT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Location of binary files (usually the src/ folder)\nBIN_PREFIX = os.path.abspath(\n os.path.join(CURRENT_DIR, \"..\", \"..\", \"src\")\n)\n\n# Default location of test certificates\nDEFAULT_CERT_PATH = os.path.abspath(\n os.path.join(CURRENT_DIR, \"..\", \"test_certs\")\n)\n\n# Default location of test extensions\nDEFAULT_EXTENSION_PATH = os.path.abspath(\n os.path.join(CURRENT_DIR, \"..\", \"test_extensions\")\n)\n\n\n# Environment flags to control skipping of shared tests\nSHARED_SKIP = os.environ.get(\"SHARED_SKIP\", False)\n# Environment flags to control use of PATH or in-tree binaries\nSHARED_USE_PATH = os.environ.get(\"SHARED_USE_PATH\", False)\n\nUUID_REGEXP = (\"[0-9A-Fa-f]{8}-\" + (\"[0-9A-Fa-f]{4}-\" * 3) + \"[0-9A-Fa-f]{12}\")\n\n\ndef shared_binary_location(cmd=\"shared\"):\n \"\"\" ../src/ is used by default.\n \"\"\"\n return os.path.join(BIN_PREFIX, cmd)\n return binary_location(cmd, SHARED_USE_PATH)\n\n\ndef binary_location(cmd, USE_PATH=False):\n \"\"\" ../src/ is used by default.\n \"\"\"\n return os.path.join(BIN_PREFIX, cmd)\n\n\ndef wait_condition(cond, timeout=1, sleeptime=.01):\n \"\"\"Wait for condition to return anything other than None\n \"\"\"\n # NOTE Increasing sleeptime can dramatically increase testsuite runtime\n # It also reduces CPU load significantly\n if timeout is None:\n timeout = 1\n\n if timeout < sleeptime:\n print(\"Warning, timeout cannot be smaller than\", sleeptime)\n timeout = sleeptime\n\n # Max number of attempts until giving up\n tries = int(timeout / sleeptime)\n\n for i in range(tries):\n val = cond()\n\n if val is not None:\n break\n\n sleep(sleeptime)\n\n return val\n\n\ndef wait_process(pid, timeout=None):\n \"\"\"Wait for process to finish\n \"\"\"\n def process():\n try:\n os.kill(pid, 0)\n except OSError:\n # Process is dead\n return True\n else:\n # Process is still ticking\n return None\n\n return wait_condition(process, timeout)\n\n\ndef _queue_output(arguments, pidq, outputq):\n \"\"\"Read/Write output/input of given process.\n This function is meant to be executed in a thread as it may block\n \"\"\"\n kwargs = arguments[\"process\"]\n input = arguments[\"input\"]\n\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n # pid None is read by the main thread as a crash of the process\n pidq.put(None)\n\n outputq.put((\n \"\",\n (\"Unexpected exception caught during execution: '{0}' . \".format(e)),\n 255)) # false exitcode\n\n return\n\n # Put the PID in the queue for main process to know.\n pidq.put(proc.pid)\n\n # Send input and wait for finish\n out, err = proc.communicate(input)\n\n out, err = out.decode('utf-8'), err.decode('utf-8')\n\n # Give the output back to the caller\n outputq.put((out, err, proc.returncode))\n\n\ndef _retrieve_output(thread, timeout, queue, thread_error):\n \"\"\"Fetch output from binary subprocess queues\n \"\"\"\n # Try to join the thread on failure abort\n thread.join(timeout)\n if thread.isAlive():\n # Join should have killed the thread. This is unexpected\n raise TimeoutWaitingFor(thread_error + \". Unexpected error\")\n\n # Thread died so we should have output\n try:\n # data = (stdout, stderr, exitcode)\n data = queue.get(timeout=timeout)\n except Empty:\n data = TimeoutWaitingFor(\"streams from program\")\n\n return data\n\n\ndef _get_output(arguments, timeout=None):\n \"\"\"Collect output from the subprocess without blocking the main process if\n subprocess hangs.\n \"\"\"\n # NOTE Increase this value if tests fail with None being received as\n # stdout/stderr instead of the expected content\n output_timeout = 0.1 # seconds\n\n pidq = Queue()\n outputq = Queue()\n\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n\n # Process crashed or timed out for some reason\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq,\n \"Program to start\")\n\n # Wait for process to finish (normal execution)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"Program thread to join\")\n\n # If we reach this point we assume the process got stuck or timed out\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n # Start with lower signals and escalate if process ignores them\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n # 3 means the process finished/died between last check and now\n if e.errno != 3:\n raise\n\n # Wait for process to finish (should die/exit after signal)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"Program to die\")\n\n # This should never happen but in case something goes really bad\n raise OSError(\"Program stopped responding and couldn't be killed\")\n\n\ndef run_cmd_wait(cmd, input=None, stdout=PIPE, stderr=PIPE,\n merge_streams=False, env=os.environ, timeout=None):\n \"Run a subprocess and wait for it to finish\"\n\n if input is None:\n stdin = None\n else:\n stdin = PIPE\n\n if merge_streams:\n stderr = STDOUT\n else:\n stderr = PIPE\n\n arguments = {\n \"process\": {\n \"args\": cmd,\n \"stdin\": stdin,\n \"stdout\": stdout,\n \"stderr\": stderr,\n \"bufsize\": 1,\n \"close_fds\": ON_POSIX,\n \"env\": env,\n },\n \"input\": input,\n }\n out, err, exit = _get_output(arguments, timeout)\n\n if merge_streams:\n if exit != 0:\n raise CommandError(cmd, exit, out)\n else:\n return exit, out\n else:\n if exit != 0:\n raise CommandError(cmd, exit, out, err)\n else:\n return exit, out, err\n\n\ndef run_cmd_wait_nofail(*args, **kwargs):\n \"\"\"Same as run_cmd_wait but silence the exception if it happens\"\"\"\n try:\n return run_cmd_wait(*args, **kwargs)\n except CommandError as e:\n return e.code, e.out, e.err\n\n\ndef memoize(obj):\n \"\"\"Keep an in-memory cache of function results given its inputs\n \"\"\"\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n return memoizer\n\n\nfrom shutil import which\nwhich = memoize(which)\n\n\ndef parse_datafile(file):\n \"\"\"Parse .data files, treating files as JSON\n \"\"\"\n data = []\n with open(file) as fh:\n for line in fh:\n line = line.rstrip(\"\\n\")\n\n # Turn [] strings into {} to be treated properly as JSON hashes\n if line.startswith('[') and line.endswith(']'):\n line = '{' + line[1:-1] + '}'\n\n if line.startswith(\"{\"):\n data.append(json.loads(line))\n else:\n data.append(line)\n return data\n\n\ndef mkstemp(data):\n \"\"\"\n Create a temporary file that is removed at process exit\n \"\"\"\n def rmtemp(name):\n try:\n os.remove(name)\n except OSError:\n pass\n\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(data)\n f.close()\n\n # Ensure removal at end of python session\n atexit.register(rmtemp, f.name)\n\n return f.name\n\n\ndef mkstemp_exec(data):\n \"\"\"Create a temporary executable file that is removed at process exit\n \"\"\"\n name = mkstemp(data)\n os.chmod(name, 0o755)\n\n return name\n\n# vim: ai sts=4 et sw=4\n",
"step-ids": [
7,
11,
13,
14,
16
]
}
|
[
7,
11,
13,
14,
16
] |
<|reserved_special_token_0|>
def outputQ() ->int:
try:
outputQ = int(input())
return outputQ
finally:
print('output quantity:' + str(outputQ))
<|reserved_special_token_0|>
def quantityToOutput(outputQ: int) ->list:
outputCount = 0
outputList = list()
while outputCount < outputQ:
outputList.append(input())
outputCount += 1
return outputList
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def tripQuantity() ->int:
try:
locationQ = int(input())
return locationQ
finally:
print('locationQ: ' + str(locationQ))
def outputQ() ->int:
try:
outputQ = int(input())
return outputQ
finally:
print('output quantity:' + str(outputQ))
def quantityToLocations(tripQ: int) ->list:
locationCount = 0
locationList = list()
while locationCount < tripQ:
locationList.append(input())
locationCount += 1
return locationList
def quantityToOutput(outputQ: int) ->list:
outputCount = 0
outputList = list()
while outputCount < outputQ:
outputList.append(input())
outputCount += 1
return outputList
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def tripQuantity() ->int:
try:
locationQ = int(input())
return locationQ
finally:
print('locationQ: ' + str(locationQ))
def outputQ() ->int:
try:
outputQ = int(input())
return outputQ
finally:
print('output quantity:' + str(outputQ))
def quantityToLocations(tripQ: int) ->list:
locationCount = 0
locationList = list()
while locationCount < tripQ:
locationList.append(input())
locationCount += 1
return locationList
def quantityToOutput(outputQ: int) ->list:
outputCount = 0
outputList = list()
while outputCount < outputQ:
outputList.append(input())
outputCount += 1
return outputList
if __name__ == '__main__':
locationQ = tripQuantity()
locationList = quantityToLocations(locationQ)
newSearch = Module1.URL()
newSearch.set_from_location(locationList[0])
newSearch.set_to_location(locationList[1:len(locationList)])
print(str(newSearch.get_To_Location()))
newSearch.set_request_url()
newSearch_request_url = newSearch.get_Request_URL()
newSearch_reponse = newSearch.search_request_response()
"""
#USED TO REQUEST MAPQUEST SEARCH
x = urllib.request.urlopen(url)
#USED TO DECODE MAPQUEST RESPONSE
y = x.read().decode(encoding = 'utf-8')
print(y) # USE decoded response string to check with pretty json
#USED TO CONVERT DECODED STRING TO DICT/LISTS
z = json.loads(y) #dictionary of mapquest response which also includes lists
print(type(z['route']['locations']))
locationsList = z['route']['locations']
print(locationsList)
print(locationsList[1]['latLng'])
i = 0
if i < len(locationsList):
for key in locationsList[i]:
if key == 'latLng':
print(locationsList[i][key])
i+=1
#### i = 0
#### if i < len(locationsList):
#### if locationList[i] == 'latLng':
#### print(locationsList[i])
####
#print (y)
"""
<|reserved_special_token_1|>
import Module1
def tripQuantity() ->int:
try:
locationQ = int(input())
return locationQ
finally:
print('locationQ: ' + str(locationQ))
def outputQ() ->int:
try:
outputQ = int(input())
return outputQ
finally:
print('output quantity:' + str(outputQ))
def quantityToLocations(tripQ: int) ->list:
locationCount = 0
locationList = list()
while locationCount < tripQ:
locationList.append(input())
locationCount += 1
return locationList
def quantityToOutput(outputQ: int) ->list:
outputCount = 0
outputList = list()
while outputCount < outputQ:
outputList.append(input())
outputCount += 1
return outputList
if __name__ == '__main__':
locationQ = tripQuantity()
locationList = quantityToLocations(locationQ)
newSearch = Module1.URL()
newSearch.set_from_location(locationList[0])
newSearch.set_to_location(locationList[1:len(locationList)])
print(str(newSearch.get_To_Location()))
newSearch.set_request_url()
newSearch_request_url = newSearch.get_Request_URL()
newSearch_reponse = newSearch.search_request_response()
"""
#USED TO REQUEST MAPQUEST SEARCH
x = urllib.request.urlopen(url)
#USED TO DECODE MAPQUEST RESPONSE
y = x.read().decode(encoding = 'utf-8')
print(y) # USE decoded response string to check with pretty json
#USED TO CONVERT DECODED STRING TO DICT/LISTS
z = json.loads(y) #dictionary of mapquest response which also includes lists
print(type(z['route']['locations']))
locationsList = z['route']['locations']
print(locationsList)
print(locationsList[1]['latLng'])
i = 0
if i < len(locationsList):
for key in locationsList[i]:
if key == 'latLng':
print(locationsList[i][key])
i+=1
#### i = 0
#### if i < len(locationsList):
#### if locationList[i] == 'latLng':
#### print(locationsList[i])
####
#print (y)
"""
<|reserved_special_token_1|>
# Duy B. Lam
# 61502602
# Project 3
# A module that reads the input and constructs the objects
# that will generate the program's output. This is the only
# module that should have an if __name__ == '__main__' block
# to make it executable; you would execute this module to run your program.
import Module1
#USED TO RETREIVE THE NUMBER OF LOCATIONS
def tripQuantity() -> int:
try:
locationQ = int(input())
return locationQ
finally:
print('locationQ: ' + str(locationQ))
#USED TO RETREIVE THE NUMBER OF REQUESTED OUTPUTS
def outputQ() -> int:
try:
outputQ = int(input())
return outputQ
finally:
print('output quantity:' + str(outputQ))
#USED TO RECORD SEARCH LOCATIONS
def quantityToLocations(tripQ: int) -> list:
locationCount = 0
locationList = list()
while (locationCount < tripQ):
locationList.append(input())
locationCount+=1
return locationList
#USED TO RECORD OUTPUT OPTIONS
def quantityToOutput(outputQ: int) -> list:
outputCount = 0
outputList = list()
while (outputCount < outputQ):
outputList.append(input())
outputCount += 1
return outputList
if __name__ == '__main__':
#USED TO GET USER INPUTS
locationQ = tripQuantity()
locationList = quantityToLocations(locationQ) #print to double check
#CREATES A NEW SEARCH INSTANCE AND IT'S REQUEST URL
newSearch = Module1.URL()
newSearch.set_from_location(locationList[0])
newSearch.set_to_location(locationList[1:len(locationList)])
print(str(newSearch.get_To_Location()))
newSearch.set_request_url()
newSearch_request_url = newSearch.get_Request_URL() #print to double check
#THIS FUNCTION MAKES THE REQUEST AND GATHERS RESPONSE INTO DICTIONARY
newSearch_reponse = newSearch.search_request_response()
#USED TO GET USER OUTPUTS
#outputQ = outputQ()
#outputList = quantityToOutput(outputQ)
#print(outputList)
'''
#USED TO REQUEST MAPQUEST SEARCH
x = urllib.request.urlopen(url)
#USED TO DECODE MAPQUEST RESPONSE
y = x.read().decode(encoding = 'utf-8')
print(y) # USE decoded response string to check with pretty json
#USED TO CONVERT DECODED STRING TO DICT/LISTS
z = json.loads(y) #dictionary of mapquest response which also includes lists
print(type(z['route']['locations']))
locationsList = z['route']['locations']
print(locationsList)
print(locationsList[1]['latLng'])
i = 0
if i < len(locationsList):
for key in locationsList[i]:
if key == 'latLng':
print(locationsList[i][key])
i+=1
#### i = 0
#### if i < len(locationsList):
#### if locationList[i] == 'latLng':
#### print(locationsList[i])
####
#print (y)
'''
|
flexible
|
{
"blob_id": "da19bc4fc999bd48a3d55b8cb5f47ba6208bc02b",
"index": 4502,
"step-1": "<mask token>\n\n\ndef outputQ() ->int:\n try:\n outputQ = int(input())\n return outputQ\n finally:\n print('output quantity:' + str(outputQ))\n\n\n<mask token>\n\n\ndef quantityToOutput(outputQ: int) ->list:\n outputCount = 0\n outputList = list()\n while outputCount < outputQ:\n outputList.append(input())\n outputCount += 1\n return outputList\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef tripQuantity() ->int:\n try:\n locationQ = int(input())\n return locationQ\n finally:\n print('locationQ: ' + str(locationQ))\n\n\ndef outputQ() ->int:\n try:\n outputQ = int(input())\n return outputQ\n finally:\n print('output quantity:' + str(outputQ))\n\n\ndef quantityToLocations(tripQ: int) ->list:\n locationCount = 0\n locationList = list()\n while locationCount < tripQ:\n locationList.append(input())\n locationCount += 1\n return locationList\n\n\ndef quantityToOutput(outputQ: int) ->list:\n outputCount = 0\n outputList = list()\n while outputCount < outputQ:\n outputList.append(input())\n outputCount += 1\n return outputList\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef tripQuantity() ->int:\n try:\n locationQ = int(input())\n return locationQ\n finally:\n print('locationQ: ' + str(locationQ))\n\n\ndef outputQ() ->int:\n try:\n outputQ = int(input())\n return outputQ\n finally:\n print('output quantity:' + str(outputQ))\n\n\ndef quantityToLocations(tripQ: int) ->list:\n locationCount = 0\n locationList = list()\n while locationCount < tripQ:\n locationList.append(input())\n locationCount += 1\n return locationList\n\n\ndef quantityToOutput(outputQ: int) ->list:\n outputCount = 0\n outputList = list()\n while outputCount < outputQ:\n outputList.append(input())\n outputCount += 1\n return outputList\n\n\nif __name__ == '__main__':\n locationQ = tripQuantity()\n locationList = quantityToLocations(locationQ)\n newSearch = Module1.URL()\n newSearch.set_from_location(locationList[0])\n newSearch.set_to_location(locationList[1:len(locationList)])\n print(str(newSearch.get_To_Location()))\n newSearch.set_request_url()\n newSearch_request_url = newSearch.get_Request_URL()\n newSearch_reponse = newSearch.search_request_response()\n \"\"\"\n #USED TO REQUEST MAPQUEST SEARCH\n x = urllib.request.urlopen(url)\n \n #USED TO DECODE MAPQUEST RESPONSE\n y = x.read().decode(encoding = 'utf-8')\n print(y) # USE decoded response string to check with pretty json\n \n #USED TO CONVERT DECODED STRING TO DICT/LISTS\n z = json.loads(y) #dictionary of mapquest response which also includes lists\n\n print(type(z['route']['locations']))\n \n locationsList = z['route']['locations']\n print(locationsList)\n print(locationsList[1]['latLng'])\n i = 0\n if i < len(locationsList):\n for key in locationsList[i]:\n if key == 'latLng':\n print(locationsList[i][key])\n i+=1\n#### i = 0\n#### if i < len(locationsList):\n#### if locationList[i] == 'latLng':\n#### print(locationsList[i])\n#### \n \n #print (y)\n\"\"\"\n",
"step-4": "import Module1\n\n\ndef tripQuantity() ->int:\n try:\n locationQ = int(input())\n return locationQ\n finally:\n print('locationQ: ' + str(locationQ))\n\n\ndef outputQ() ->int:\n try:\n outputQ = int(input())\n return outputQ\n finally:\n print('output quantity:' + str(outputQ))\n\n\ndef quantityToLocations(tripQ: int) ->list:\n locationCount = 0\n locationList = list()\n while locationCount < tripQ:\n locationList.append(input())\n locationCount += 1\n return locationList\n\n\ndef quantityToOutput(outputQ: int) ->list:\n outputCount = 0\n outputList = list()\n while outputCount < outputQ:\n outputList.append(input())\n outputCount += 1\n return outputList\n\n\nif __name__ == '__main__':\n locationQ = tripQuantity()\n locationList = quantityToLocations(locationQ)\n newSearch = Module1.URL()\n newSearch.set_from_location(locationList[0])\n newSearch.set_to_location(locationList[1:len(locationList)])\n print(str(newSearch.get_To_Location()))\n newSearch.set_request_url()\n newSearch_request_url = newSearch.get_Request_URL()\n newSearch_reponse = newSearch.search_request_response()\n \"\"\"\n #USED TO REQUEST MAPQUEST SEARCH\n x = urllib.request.urlopen(url)\n \n #USED TO DECODE MAPQUEST RESPONSE\n y = x.read().decode(encoding = 'utf-8')\n print(y) # USE decoded response string to check with pretty json\n \n #USED TO CONVERT DECODED STRING TO DICT/LISTS\n z = json.loads(y) #dictionary of mapquest response which also includes lists\n\n print(type(z['route']['locations']))\n \n locationsList = z['route']['locations']\n print(locationsList)\n print(locationsList[1]['latLng'])\n i = 0\n if i < len(locationsList):\n for key in locationsList[i]:\n if key == 'latLng':\n print(locationsList[i][key])\n i+=1\n#### i = 0\n#### if i < len(locationsList):\n#### if locationList[i] == 'latLng':\n#### print(locationsList[i])\n#### \n \n #print (y)\n\"\"\"\n",
"step-5": "# Duy B. Lam\r\n# 61502602\r\n# Project 3\r\n\r\n# A module that reads the input and constructs the objects\r\n# that will generate the program's output. This is the only\r\n# module that should have an if __name__ == '__main__' block\r\n# to make it executable; you would execute this module to run your program.\r\n\r\n\r\nimport Module1\r\n\r\n\r\n#USED TO RETREIVE THE NUMBER OF LOCATIONS\r\ndef tripQuantity() -> int:\r\n try:\r\n locationQ = int(input())\r\n return locationQ\r\n finally:\r\n print('locationQ: ' + str(locationQ))\r\n\r\n#USED TO RETREIVE THE NUMBER OF REQUESTED OUTPUTS \r\ndef outputQ() -> int:\r\n try:\r\n outputQ = int(input())\r\n return outputQ\r\n finally:\r\n print('output quantity:' + str(outputQ))\r\n\r\n#USED TO RECORD SEARCH LOCATIONS\r\ndef quantityToLocations(tripQ: int) -> list:\r\n locationCount = 0\r\n locationList = list()\r\n while (locationCount < tripQ):\r\n locationList.append(input())\r\n locationCount+=1\r\n return locationList \r\n\r\n#USED TO RECORD OUTPUT OPTIONS\r\ndef quantityToOutput(outputQ: int) -> list:\r\n outputCount = 0\r\n outputList = list()\r\n while (outputCount < outputQ):\r\n outputList.append(input())\r\n outputCount += 1\r\n return outputList\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n #USED TO GET USER INPUTS\r\n locationQ = tripQuantity()\r\n locationList = quantityToLocations(locationQ) #print to double check\r\n \r\n #CREATES A NEW SEARCH INSTANCE AND IT'S REQUEST URL\r\n newSearch = Module1.URL()\r\n newSearch.set_from_location(locationList[0])\r\n newSearch.set_to_location(locationList[1:len(locationList)])\r\n print(str(newSearch.get_To_Location()))\r\n newSearch.set_request_url()\r\n newSearch_request_url = newSearch.get_Request_URL() #print to double check\r\n\r\n #THIS FUNCTION MAKES THE REQUEST AND GATHERS RESPONSE INTO DICTIONARY\r\n newSearch_reponse = newSearch.search_request_response()\r\n \r\n #USED TO GET USER OUTPUTS\r\n #outputQ = outputQ()\r\n #outputList = quantityToOutput(outputQ)\r\n #print(outputList)\r\n\r\n \r\n \r\n \r\n '''\r\n #USED TO REQUEST MAPQUEST SEARCH\r\n x = urllib.request.urlopen(url)\r\n \r\n #USED TO DECODE MAPQUEST RESPONSE\r\n y = x.read().decode(encoding = 'utf-8')\r\n print(y) # USE decoded response string to check with pretty json\r\n \r\n #USED TO CONVERT DECODED STRING TO DICT/LISTS\r\n z = json.loads(y) #dictionary of mapquest response which also includes lists\r\n\r\n print(type(z['route']['locations']))\r\n \r\n locationsList = z['route']['locations']\r\n print(locationsList)\r\n print(locationsList[1]['latLng'])\r\n i = 0\r\n if i < len(locationsList):\r\n for key in locationsList[i]:\r\n if key == 'latLng':\r\n print(locationsList[i][key])\r\n i+=1\r\n#### i = 0\r\n#### if i < len(locationsList):\r\n#### if locationList[i] == 'latLng':\r\n#### print(locationsList[i])\r\n#### \r\n \r\n #print (y)\r\n'''\r\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
from django.urls import path
from . import views as user_views
from produtos import views as prod_views
from django.contrib.auth import views as auth_views
app_name = 'user'
urlpatterns = [
path('detalhes/', user_views.painel, name="painel"),
path('produto/ajax/delete_prod/', prod_views.deleteProd, name="deleteProd"),
path('produto/', user_views.painelProdutos, name="painel_produtos"),
path('<int:id_produto>', prod_views.detalheProduto, name="detalhe_prod"),
path('ajax/delete_prod/', prod_views.deleteProd, name='deleteProd'),
]
|
normal
|
{
"blob_id": "a7f2791e359b848a217beadc77fc983d971ef8b0",
"index": 8436,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'user'\nurlpatterns = [path('detalhes/', user_views.painel, name='painel'), path(\n 'produto/ajax/delete_prod/', prod_views.deleteProd, name='deleteProd'),\n path('produto/', user_views.painelProdutos, name='painel_produtos'),\n path('<int:id_produto>', prod_views.detalheProduto, name='detalhe_prod'\n ), path('ajax/delete_prod/', prod_views.deleteProd, name='deleteProd')]\n",
"step-3": "from django.urls import path\nfrom . import views as user_views\nfrom produtos import views as prod_views\nfrom django.contrib.auth import views as auth_views\napp_name = 'user'\nurlpatterns = [path('detalhes/', user_views.painel, name='painel'), path(\n 'produto/ajax/delete_prod/', prod_views.deleteProd, name='deleteProd'),\n path('produto/', user_views.painelProdutos, name='painel_produtos'),\n path('<int:id_produto>', prod_views.detalheProduto, name='detalhe_prod'\n ), path('ajax/delete_prod/', prod_views.deleteProd, name='deleteProd')]\n",
"step-4": "from django.urls import path\nfrom . import views as user_views\nfrom produtos import views as prod_views\nfrom django.contrib.auth import views as auth_views\n\napp_name = 'user'\n\nurlpatterns = [\n path('detalhes/', user_views.painel, name=\"painel\"),\n path('produto/ajax/delete_prod/', prod_views.deleteProd, name=\"deleteProd\"),\n path('produto/', user_views.painelProdutos, name=\"painel_produtos\"),\n path('<int:id_produto>', prod_views.detalheProduto, name=\"detalhe_prod\"),\n path('ajax/delete_prod/', prod_views.deleteProd, name='deleteProd'),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Wolfram(IPlugin):
def can_perform(self, friday, request):
return 'result' in request and 'resolvedQuery' in request['result'
] and 'action' in request['result'] and request['result']['action'
] == 'wisdom.unknown'
def perform(self, friday, request):
question = request['result']['resolvedQuery']
client = wolframalpha.Client(keys['WOLFRAM'])
res = client.query(question)
answer = str(list(res))
"""if len(res):
results = list(res.results)
if len(results):
answer = results[0].text[0]
else:
answer = ' '.join([each_answer.subpods[0].text for each_answer in res.pods
if each_answer.subpods[0].text])
else:
# answer = "Sorry, Wolfram doesn't know the answer."
answer = ""
"""
"""# Replace some of its notation so it's more easily read.
answer = answer.replace('
', '. ').replace('~~', ' or about ')
# Get the result to a computation and don't bother reading the original question.
if '=' in answer:
answer = answer[answer.index('=') + 1:].strip()
"""
return answer
<|reserved_special_token_1|>
<|reserved_special_token_0|>
keys_file.close()
class Wolfram(IPlugin):
def can_perform(self, friday, request):
return 'result' in request and 'resolvedQuery' in request['result'
] and 'action' in request['result'] and request['result']['action'
] == 'wisdom.unknown'
def perform(self, friday, request):
question = request['result']['resolvedQuery']
client = wolframalpha.Client(keys['WOLFRAM'])
res = client.query(question)
answer = str(list(res))
"""if len(res):
results = list(res.results)
if len(results):
answer = results[0].text[0]
else:
answer = ' '.join([each_answer.subpods[0].text for each_answer in res.pods
if each_answer.subpods[0].text])
else:
# answer = "Sorry, Wolfram doesn't know the answer."
answer = ""
"""
"""# Replace some of its notation so it's more easily read.
answer = answer.replace('
', '. ').replace('~~', ' or about ')
# Get the result to a computation and don't bother reading the original question.
if '=' in answer:
answer = answer[answer.index('=') + 1:].strip()
"""
return answer
<|reserved_special_token_1|>
<|reserved_special_token_0|>
keys_file = open('friday/plugins/KEYS')
keys = yaml.load(keys_file)
keys_file.close()
class Wolfram(IPlugin):
def can_perform(self, friday, request):
return 'result' in request and 'resolvedQuery' in request['result'
] and 'action' in request['result'] and request['result']['action'
] == 'wisdom.unknown'
def perform(self, friday, request):
question = request['result']['resolvedQuery']
client = wolframalpha.Client(keys['WOLFRAM'])
res = client.query(question)
answer = str(list(res))
"""if len(res):
results = list(res.results)
if len(results):
answer = results[0].text[0]
else:
answer = ' '.join([each_answer.subpods[0].text for each_answer in res.pods
if each_answer.subpods[0].text])
else:
# answer = "Sorry, Wolfram doesn't know the answer."
answer = ""
"""
"""# Replace some of its notation so it's more easily read.
answer = answer.replace('
', '. ').replace('~~', ' or about ')
# Get the result to a computation and don't bother reading the original question.
if '=' in answer:
answer = answer[answer.index('=') + 1:].strip()
"""
return answer
<|reserved_special_token_1|>
from yapsy.IPlugin import IPlugin
import wolframalpha
import yaml
keys_file = open('friday/plugins/KEYS')
keys = yaml.load(keys_file)
keys_file.close()
class Wolfram(IPlugin):
def can_perform(self, friday, request):
return 'result' in request and 'resolvedQuery' in request['result'
] and 'action' in request['result'] and request['result']['action'
] == 'wisdom.unknown'
def perform(self, friday, request):
question = request['result']['resolvedQuery']
client = wolframalpha.Client(keys['WOLFRAM'])
res = client.query(question)
answer = str(list(res))
"""if len(res):
results = list(res.results)
if len(results):
answer = results[0].text[0]
else:
answer = ' '.join([each_answer.subpods[0].text for each_answer in res.pods
if each_answer.subpods[0].text])
else:
# answer = "Sorry, Wolfram doesn't know the answer."
answer = ""
"""
"""# Replace some of its notation so it's more easily read.
answer = answer.replace('
', '. ').replace('~~', ' or about ')
# Get the result to a computation and don't bother reading the original question.
if '=' in answer:
answer = answer[answer.index('=') + 1:].strip()
"""
return answer
<|reserved_special_token_1|>
from yapsy.IPlugin import IPlugin
import wolframalpha
import yaml
keys_file = open("friday/plugins/KEYS")
keys = yaml.load(keys_file)
keys_file.close()
class Wolfram(IPlugin):
def can_perform(self, friday, request):
return 'result' in request and 'resolvedQuery' in request['result']\
and 'action' in request['result'] and request['result']['action'] == 'wisdom.unknown'
# result = request['result'] # Assumes we're using gTTS
# # Get the text that is supposed to be spoken aloud
# reply = result['fulfillment']['speech']
# # Get what the service thought you said
# question = result['resolvedQuery']
def perform(self, friday, request):
question = request['result']['resolvedQuery']
client = wolframalpha.Client(keys['WOLFRAM'])
res = client.query(question)
answer = str(list(res))
"""if len(res):
results = list(res.results)
if len(results):
answer = results[0].text[0]
else:
answer = ' '.join([each_answer.subpods[0].text for each_answer in res.pods
if each_answer.subpods[0].text])
else:
# answer = "Sorry, Wolfram doesn't know the answer."
answer = ""
"""
"""# Replace some of its notation so it's more easily read.
answer = answer.replace('\n', '. ').replace('~~', ' or about ')
# Get the result to a computation and don't bother reading the original question.
if '=' in answer:
answer = answer[answer.index('=') + 1:].strip()
"""
return answer
#
# def wolfram_query(question):
# # Every service should have a general set of requirements under which
# # it is activated, this would be one of the ones that Wolfram Alpha
# # uses, it does have others as well. Consider having a single method
# # in the plugin system that returns a boolean determining whether
# # a plugin should be activated.
# if question:
#
#
# def wolfram_query_old(question):
# import wolframalpha
# # Every service should have a general set of requirements under which
# # it is activated, this would be one of the ones that Wolfram Alpha
# # uses, it does have others as well. Consider having a single method
# # in the plugin system that returns a boolean determining whether
# # a plugin should be activated.
# if question.lower().startswith('wolfram'):
# question = question[8:]
# client = wolframalpha.Client(user_info.WOLFRAM_KEY)
# res = client.query(question)
# try:
# return next(res.results).text # This really needs to be changed.
# # I shouldn't have to rely upon error catching for my flow control.
# except StopIteration:
# pass
# try:
# answer = ' '.join([each_answer.text for each_answer in res.pods if each_answer])
# except TypeError:
# answer = None
# if not answer:
# answer = "Sorry, Wolfram doesn't know the answer."
#
# # Replace some of its notation so it's more easily read.
# answer = answer.replace('\n', '; ').replace('~~', ' or about ')
# # Get the result to a computation and don't bother reading the original question.
# if '=' in answer:
# answer = answer[answer.index('=')+1:]
# return [answer, None] # Follows answer format of [text, action]
#
|
flexible
|
{
"blob_id": "57564c2e94a65187bf5e033ee06926fb593e11a7",
"index": 7733,
"step-1": "<mask token>\n\n\nclass Wolfram(IPlugin):\n\n def can_perform(self, friday, request):\n return 'result' in request and 'resolvedQuery' in request['result'\n ] and 'action' in request['result'] and request['result']['action'\n ] == 'wisdom.unknown'\n\n def perform(self, friday, request):\n question = request['result']['resolvedQuery']\n client = wolframalpha.Client(keys['WOLFRAM'])\n res = client.query(question)\n answer = str(list(res))\n \"\"\"if len(res):\n results = list(res.results)\n if len(results):\n answer = results[0].text[0]\n else:\n answer = ' '.join([each_answer.subpods[0].text for each_answer in res.pods\n if each_answer.subpods[0].text])\n else:\n # answer = \"Sorry, Wolfram doesn't know the answer.\"\n answer = \"\"\n \"\"\"\n \"\"\"# Replace some of its notation so it's more easily read.\n answer = answer.replace('\n', '. ').replace('~~', ' or about ')\n # Get the result to a computation and don't bother reading the original question.\n if '=' in answer:\n answer = answer[answer.index('=') + 1:].strip()\n \"\"\"\n return answer\n",
"step-2": "<mask token>\nkeys_file.close()\n\n\nclass Wolfram(IPlugin):\n\n def can_perform(self, friday, request):\n return 'result' in request and 'resolvedQuery' in request['result'\n ] and 'action' in request['result'] and request['result']['action'\n ] == 'wisdom.unknown'\n\n def perform(self, friday, request):\n question = request['result']['resolvedQuery']\n client = wolframalpha.Client(keys['WOLFRAM'])\n res = client.query(question)\n answer = str(list(res))\n \"\"\"if len(res):\n results = list(res.results)\n if len(results):\n answer = results[0].text[0]\n else:\n answer = ' '.join([each_answer.subpods[0].text for each_answer in res.pods\n if each_answer.subpods[0].text])\n else:\n # answer = \"Sorry, Wolfram doesn't know the answer.\"\n answer = \"\"\n \"\"\"\n \"\"\"# Replace some of its notation so it's more easily read.\n answer = answer.replace('\n', '. ').replace('~~', ' or about ')\n # Get the result to a computation and don't bother reading the original question.\n if '=' in answer:\n answer = answer[answer.index('=') + 1:].strip()\n \"\"\"\n return answer\n",
"step-3": "<mask token>\nkeys_file = open('friday/plugins/KEYS')\nkeys = yaml.load(keys_file)\nkeys_file.close()\n\n\nclass Wolfram(IPlugin):\n\n def can_perform(self, friday, request):\n return 'result' in request and 'resolvedQuery' in request['result'\n ] and 'action' in request['result'] and request['result']['action'\n ] == 'wisdom.unknown'\n\n def perform(self, friday, request):\n question = request['result']['resolvedQuery']\n client = wolframalpha.Client(keys['WOLFRAM'])\n res = client.query(question)\n answer = str(list(res))\n \"\"\"if len(res):\n results = list(res.results)\n if len(results):\n answer = results[0].text[0]\n else:\n answer = ' '.join([each_answer.subpods[0].text for each_answer in res.pods\n if each_answer.subpods[0].text])\n else:\n # answer = \"Sorry, Wolfram doesn't know the answer.\"\n answer = \"\"\n \"\"\"\n \"\"\"# Replace some of its notation so it's more easily read.\n answer = answer.replace('\n', '. ').replace('~~', ' or about ')\n # Get the result to a computation and don't bother reading the original question.\n if '=' in answer:\n answer = answer[answer.index('=') + 1:].strip()\n \"\"\"\n return answer\n",
"step-4": "from yapsy.IPlugin import IPlugin\nimport wolframalpha\nimport yaml\nkeys_file = open('friday/plugins/KEYS')\nkeys = yaml.load(keys_file)\nkeys_file.close()\n\n\nclass Wolfram(IPlugin):\n\n def can_perform(self, friday, request):\n return 'result' in request and 'resolvedQuery' in request['result'\n ] and 'action' in request['result'] and request['result']['action'\n ] == 'wisdom.unknown'\n\n def perform(self, friday, request):\n question = request['result']['resolvedQuery']\n client = wolframalpha.Client(keys['WOLFRAM'])\n res = client.query(question)\n answer = str(list(res))\n \"\"\"if len(res):\n results = list(res.results)\n if len(results):\n answer = results[0].text[0]\n else:\n answer = ' '.join([each_answer.subpods[0].text for each_answer in res.pods\n if each_answer.subpods[0].text])\n else:\n # answer = \"Sorry, Wolfram doesn't know the answer.\"\n answer = \"\"\n \"\"\"\n \"\"\"# Replace some of its notation so it's more easily read.\n answer = answer.replace('\n', '. ').replace('~~', ' or about ')\n # Get the result to a computation and don't bother reading the original question.\n if '=' in answer:\n answer = answer[answer.index('=') + 1:].strip()\n \"\"\"\n return answer\n",
"step-5": "from yapsy.IPlugin import IPlugin\nimport wolframalpha\nimport yaml\n\nkeys_file = open(\"friday/plugins/KEYS\")\nkeys = yaml.load(keys_file)\nkeys_file.close()\n\n\nclass Wolfram(IPlugin):\n def can_perform(self, friday, request):\n return 'result' in request and 'resolvedQuery' in request['result']\\\n and 'action' in request['result'] and request['result']['action'] == 'wisdom.unknown'\n # result = request['result'] # Assumes we're using gTTS\n # # Get the text that is supposed to be spoken aloud\n # reply = result['fulfillment']['speech']\n # # Get what the service thought you said\n # question = result['resolvedQuery']\n\n\n def perform(self, friday, request):\n question = request['result']['resolvedQuery']\n client = wolframalpha.Client(keys['WOLFRAM'])\n res = client.query(question)\n answer = str(list(res))\n \"\"\"if len(res):\n results = list(res.results)\n if len(results):\n answer = results[0].text[0]\n else:\n answer = ' '.join([each_answer.subpods[0].text for each_answer in res.pods\n if each_answer.subpods[0].text])\n else:\n # answer = \"Sorry, Wolfram doesn't know the answer.\"\n answer = \"\"\n \"\"\"\n \"\"\"# Replace some of its notation so it's more easily read.\n answer = answer.replace('\\n', '. ').replace('~~', ' or about ')\n # Get the result to a computation and don't bother reading the original question.\n if '=' in answer:\n answer = answer[answer.index('=') + 1:].strip()\n \"\"\"\n return answer\n\n#\n# def wolfram_query(question):\n# # Every service should have a general set of requirements under which\n# # it is activated, this would be one of the ones that Wolfram Alpha\n# # uses, it does have others as well. Consider having a single method\n# # in the plugin system that returns a boolean determining whether\n# # a plugin should be activated.\n# if question:\n#\n#\n# def wolfram_query_old(question):\n# import wolframalpha\n# # Every service should have a general set of requirements under which\n# # it is activated, this would be one of the ones that Wolfram Alpha\n# # uses, it does have others as well. Consider having a single method\n# # in the plugin system that returns a boolean determining whether\n# # a plugin should be activated.\n# if question.lower().startswith('wolfram'):\n# question = question[8:]\n# client = wolframalpha.Client(user_info.WOLFRAM_KEY)\n# res = client.query(question)\n# try:\n# return next(res.results).text # This really needs to be changed.\n# # I shouldn't have to rely upon error catching for my flow control.\n# except StopIteration:\n# pass\n# try:\n# answer = ' '.join([each_answer.text for each_answer in res.pods if each_answer])\n# except TypeError:\n# answer = None\n# if not answer:\n# answer = \"Sorry, Wolfram doesn't know the answer.\"\n#\n# # Replace some of its notation so it's more easily read.\n# answer = answer.replace('\\n', '; ').replace('~~', ' or about ')\n# # Get the result to a computation and don't bother reading the original question.\n# if '=' in answer:\n# answer = answer[answer.index('=')+1:]\n# return [answer, None] # Follows answer format of [text, action]\n#\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class FunctionalUnit:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return (
"""
Id: {}
Instruction Count: {}
Latency: {}
Status: {}
End Cycle: {}
Destination {}
"""
.format(self.func_id, self.instruction_count, self.latency,
self.status, self.end_cycle, self.destination))
def get_latency(self):
"""Gets the latency of the functional unit
Keyword arguments:
None
Return: Int
"""
return self.latency
<|reserved_special_token_0|>
def get_status(self):
"""Gets the status of a functional unit
Keyword arguments:
None
Return: Int FREE (0) or BUSY (1)
"""
return self.status
def get_statistics(self):
"""Gets the statistics for the functional unit
Keyword arguments:
None
Returns: Tuple of function id and instruction count
"""
return self.func_id, self.instruction_count
def get_end(self):
"""Gets the end cycle
Keyword arguments:
None
Returns: Int of the end cycle
"""
return self.end_cycle
def get_destination(self):
"""Gets the location to which the functional unit will
write
Keyword arguments:
None
Returns: String of renamed destination
"""
return self.destination
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FunctionalUnit:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return (
"""
Id: {}
Instruction Count: {}
Latency: {}
Status: {}
End Cycle: {}
Destination {}
"""
.format(self.func_id, self.instruction_count, self.latency,
self.status, self.end_cycle, self.destination))
def get_latency(self):
"""Gets the latency of the functional unit
Keyword arguments:
None
Return: Int
"""
return self.latency
<|reserved_special_token_0|>
def get_status(self):
"""Gets the status of a functional unit
Keyword arguments:
None
Return: Int FREE (0) or BUSY (1)
"""
return self.status
def get_statistics(self):
"""Gets the statistics for the functional unit
Keyword arguments:
None
Returns: Tuple of function id and instruction count
"""
return self.func_id, self.instruction_count
def get_end(self):
"""Gets the end cycle
Keyword arguments:
None
Returns: Int of the end cycle
"""
return self.end_cycle
def get_destination(self):
"""Gets the location to which the functional unit will
write
Keyword arguments:
None
Returns: String of renamed destination
"""
return self.destination
def increment_instr(self):
"""Increments the instruction count for the FU
Keyword arguments:
None
Returns: None
"""
self.instruction_count += 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FunctionalUnit:
"""FunctionalUnit Class to encompass methods needed for
Integer, Divide, Multipler, Load, Store Functional
Units in tomsim
"""
def __init__(self, func_id, lat):
self.instruction_count = 0
self.latency = lat
self.status = FREE
self.func_id = func_id
self.end_cycle = None
self.destination = None
def __str__(self):
return (
"""
Id: {}
Instruction Count: {}
Latency: {}
Status: {}
End Cycle: {}
Destination {}
"""
.format(self.func_id, self.instruction_count, self.latency,
self.status, self.end_cycle, self.destination))
def get_latency(self):
"""Gets the latency of the functional unit
Keyword arguments:
None
Return: Int
"""
return self.latency
def set_status(self, status):
"""Sets the status of a functional unit
Keyword arguments:
status -- the status to set a functional unit to
either BUSY or FREE
Returns
None
"""
self.status = status
def get_status(self):
"""Gets the status of a functional unit
Keyword arguments:
None
Return: Int FREE (0) or BUSY (1)
"""
return self.status
def get_statistics(self):
"""Gets the statistics for the functional unit
Keyword arguments:
None
Returns: Tuple of function id and instruction count
"""
return self.func_id, self.instruction_count
def get_end(self):
"""Gets the end cycle
Keyword arguments:
None
Returns: Int of the end cycle
"""
return self.end_cycle
def get_destination(self):
"""Gets the location to which the functional unit will
write
Keyword arguments:
None
Returns: String of renamed destination
"""
return self.destination
def increment_instr(self):
"""Increments the instruction count for the FU
Keyword arguments:
None
Returns: None
"""
self.instruction_count += 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
BUSY = 1
FREE = 0
class FunctionalUnit:
"""FunctionalUnit Class to encompass methods needed for
Integer, Divide, Multipler, Load, Store Functional
Units in tomsim
"""
def __init__(self, func_id, lat):
self.instruction_count = 0
self.latency = lat
self.status = FREE
self.func_id = func_id
self.end_cycle = None
self.destination = None
def __str__(self):
return (
"""
Id: {}
Instruction Count: {}
Latency: {}
Status: {}
End Cycle: {}
Destination {}
"""
.format(self.func_id, self.instruction_count, self.latency,
self.status, self.end_cycle, self.destination))
def get_latency(self):
"""Gets the latency of the functional unit
Keyword arguments:
None
Return: Int
"""
return self.latency
def set_status(self, status):
"""Sets the status of a functional unit
Keyword arguments:
status -- the status to set a functional unit to
either BUSY or FREE
Returns
None
"""
self.status = status
def get_status(self):
"""Gets the status of a functional unit
Keyword arguments:
None
Return: Int FREE (0) or BUSY (1)
"""
return self.status
def get_statistics(self):
"""Gets the statistics for the functional unit
Keyword arguments:
None
Returns: Tuple of function id and instruction count
"""
return self.func_id, self.instruction_count
def get_end(self):
"""Gets the end cycle
Keyword arguments:
None
Returns: Int of the end cycle
"""
return self.end_cycle
def get_destination(self):
"""Gets the location to which the functional unit will
write
Keyword arguments:
None
Returns: String of renamed destination
"""
return self.destination
def increment_instr(self):
"""Increments the instruction count for the FU
Keyword arguments:
None
Returns: None
"""
self.instruction_count += 1
<|reserved_special_token_1|>
"""
Project: tomsim simulator
Module: FunctionalUnit
Course: CS2410
Author: Cyrus Ramavarapu
Date: 19 November 2016
"""
# DEFINES
BUSY = 1
FREE = 0
class FunctionalUnit:
"""FunctionalUnit Class to encompass methods needed for
Integer, Divide, Multipler, Load, Store Functional
Units in tomsim
"""
def __init__(self, func_id, lat):
self.instruction_count = 0
self.latency = lat
self.status = FREE
self.func_id = func_id
self.end_cycle = None
self.destination = None
def __str__(self):
return """
Id: {}
Instruction Count: {}
Latency: {}
Status: {}
End Cycle: {}
Destination {}
""".format(self.func_id,
self.instruction_count,
self.latency,
self.status,
self.end_cycle,
self.destination)
def get_latency(self):
"""Gets the latency of the functional unit
Keyword arguments:
None
Return: Int
"""
return self.latency
def set_status(self, status):
"""Sets the status of a functional unit
Keyword arguments:
status -- the status to set a functional unit to
either BUSY or FREE
Returns
None
"""
self.status = status
def get_status(self):
"""Gets the status of a functional unit
Keyword arguments:
None
Return: Int FREE (0) or BUSY (1)
"""
return self.status
def get_statistics(self):
"""Gets the statistics for the functional unit
Keyword arguments:
None
Returns: Tuple of function id and instruction count
"""
return (self.func_id, self.instruction_count)
def get_end(self):
"""Gets the end cycle
Keyword arguments:
None
Returns: Int of the end cycle
"""
return self.end_cycle
def get_destination(self):
"""Gets the location to which the functional unit will
write
Keyword arguments:
None
Returns: String of renamed destination
"""
return self.destination
def increment_instr(self):
"""Increments the instruction count for the FU
Keyword arguments:
None
Returns: None
"""
self.instruction_count += 1
|
flexible
|
{
"blob_id": "a2a94e87bb9af1ccaf516581d6662d776caf0b0d",
"index": 6284,
"step-1": "<mask token>\n\n\nclass FunctionalUnit:\n <mask token>\n <mask token>\n\n def __str__(self):\n return (\n \"\"\"\n Id: {}\n Instruction Count: {}\n Latency: {}\n Status: {}\n End Cycle: {}\n Destination {}\n \"\"\"\n .format(self.func_id, self.instruction_count, self.latency,\n self.status, self.end_cycle, self.destination))\n\n def get_latency(self):\n \"\"\"Gets the latency of the functional unit\n\n Keyword arguments:\n None\n\n Return: Int\n \"\"\"\n return self.latency\n <mask token>\n\n def get_status(self):\n \"\"\"Gets the status of a functional unit\n\n Keyword arguments:\n None\n\n Return: Int FREE (0) or BUSY (1)\n \"\"\"\n return self.status\n\n def get_statistics(self):\n \"\"\"Gets the statistics for the functional unit\n\n Keyword arguments:\n None\n\n Returns: Tuple of function id and instruction count\n \"\"\"\n return self.func_id, self.instruction_count\n\n def get_end(self):\n \"\"\"Gets the end cycle\n\n Keyword arguments:\n None\n\n Returns: Int of the end cycle\n \"\"\"\n return self.end_cycle\n\n def get_destination(self):\n \"\"\"Gets the location to which the functional unit will\n write\n\n Keyword arguments:\n None\n\n Returns: String of renamed destination\n \"\"\"\n return self.destination\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FunctionalUnit:\n <mask token>\n <mask token>\n\n def __str__(self):\n return (\n \"\"\"\n Id: {}\n Instruction Count: {}\n Latency: {}\n Status: {}\n End Cycle: {}\n Destination {}\n \"\"\"\n .format(self.func_id, self.instruction_count, self.latency,\n self.status, self.end_cycle, self.destination))\n\n def get_latency(self):\n \"\"\"Gets the latency of the functional unit\n\n Keyword arguments:\n None\n\n Return: Int\n \"\"\"\n return self.latency\n <mask token>\n\n def get_status(self):\n \"\"\"Gets the status of a functional unit\n\n Keyword arguments:\n None\n\n Return: Int FREE (0) or BUSY (1)\n \"\"\"\n return self.status\n\n def get_statistics(self):\n \"\"\"Gets the statistics for the functional unit\n\n Keyword arguments:\n None\n\n Returns: Tuple of function id and instruction count\n \"\"\"\n return self.func_id, self.instruction_count\n\n def get_end(self):\n \"\"\"Gets the end cycle\n\n Keyword arguments:\n None\n\n Returns: Int of the end cycle\n \"\"\"\n return self.end_cycle\n\n def get_destination(self):\n \"\"\"Gets the location to which the functional unit will\n write\n\n Keyword arguments:\n None\n\n Returns: String of renamed destination\n \"\"\"\n return self.destination\n\n def increment_instr(self):\n \"\"\"Increments the instruction count for the FU\n\n Keyword arguments:\n None\n\n Returns: None\n \"\"\"\n self.instruction_count += 1\n",
"step-3": "<mask token>\n\n\nclass FunctionalUnit:\n \"\"\"FunctionalUnit Class to encompass methods needed for\n Integer, Divide, Multipler, Load, Store Functional\n Units in tomsim\n \"\"\"\n\n def __init__(self, func_id, lat):\n self.instruction_count = 0\n self.latency = lat\n self.status = FREE\n self.func_id = func_id\n self.end_cycle = None\n self.destination = None\n\n def __str__(self):\n return (\n \"\"\"\n Id: {}\n Instruction Count: {}\n Latency: {}\n Status: {}\n End Cycle: {}\n Destination {}\n \"\"\"\n .format(self.func_id, self.instruction_count, self.latency,\n self.status, self.end_cycle, self.destination))\n\n def get_latency(self):\n \"\"\"Gets the latency of the functional unit\n\n Keyword arguments:\n None\n\n Return: Int\n \"\"\"\n return self.latency\n\n def set_status(self, status):\n \"\"\"Sets the status of a functional unit\n\n Keyword arguments:\n status -- the status to set a functional unit to\n either BUSY or FREE\n\n Returns\n None\n \"\"\"\n self.status = status\n\n def get_status(self):\n \"\"\"Gets the status of a functional unit\n\n Keyword arguments:\n None\n\n Return: Int FREE (0) or BUSY (1)\n \"\"\"\n return self.status\n\n def get_statistics(self):\n \"\"\"Gets the statistics for the functional unit\n\n Keyword arguments:\n None\n\n Returns: Tuple of function id and instruction count\n \"\"\"\n return self.func_id, self.instruction_count\n\n def get_end(self):\n \"\"\"Gets the end cycle\n\n Keyword arguments:\n None\n\n Returns: Int of the end cycle\n \"\"\"\n return self.end_cycle\n\n def get_destination(self):\n \"\"\"Gets the location to which the functional unit will\n write\n\n Keyword arguments:\n None\n\n Returns: String of renamed destination\n \"\"\"\n return self.destination\n\n def increment_instr(self):\n \"\"\"Increments the instruction count for the FU\n\n Keyword arguments:\n None\n\n Returns: None\n \"\"\"\n self.instruction_count += 1\n",
"step-4": "<mask token>\nBUSY = 1\nFREE = 0\n\n\nclass FunctionalUnit:\n \"\"\"FunctionalUnit Class to encompass methods needed for\n Integer, Divide, Multipler, Load, Store Functional\n Units in tomsim\n \"\"\"\n\n def __init__(self, func_id, lat):\n self.instruction_count = 0\n self.latency = lat\n self.status = FREE\n self.func_id = func_id\n self.end_cycle = None\n self.destination = None\n\n def __str__(self):\n return (\n \"\"\"\n Id: {}\n Instruction Count: {}\n Latency: {}\n Status: {}\n End Cycle: {}\n Destination {}\n \"\"\"\n .format(self.func_id, self.instruction_count, self.latency,\n self.status, self.end_cycle, self.destination))\n\n def get_latency(self):\n \"\"\"Gets the latency of the functional unit\n\n Keyword arguments:\n None\n\n Return: Int\n \"\"\"\n return self.latency\n\n def set_status(self, status):\n \"\"\"Sets the status of a functional unit\n\n Keyword arguments:\n status -- the status to set a functional unit to\n either BUSY or FREE\n\n Returns\n None\n \"\"\"\n self.status = status\n\n def get_status(self):\n \"\"\"Gets the status of a functional unit\n\n Keyword arguments:\n None\n\n Return: Int FREE (0) or BUSY (1)\n \"\"\"\n return self.status\n\n def get_statistics(self):\n \"\"\"Gets the statistics for the functional unit\n\n Keyword arguments:\n None\n\n Returns: Tuple of function id and instruction count\n \"\"\"\n return self.func_id, self.instruction_count\n\n def get_end(self):\n \"\"\"Gets the end cycle\n\n Keyword arguments:\n None\n\n Returns: Int of the end cycle\n \"\"\"\n return self.end_cycle\n\n def get_destination(self):\n \"\"\"Gets the location to which the functional unit will\n write\n\n Keyword arguments:\n None\n\n Returns: String of renamed destination\n \"\"\"\n return self.destination\n\n def increment_instr(self):\n \"\"\"Increments the instruction count for the FU\n\n Keyword arguments:\n None\n\n Returns: None\n \"\"\"\n self.instruction_count += 1\n",
"step-5": "\"\"\"\nProject: tomsim simulator\nModule: FunctionalUnit\nCourse: CS2410\nAuthor: Cyrus Ramavarapu\nDate: 19 November 2016\n\"\"\"\n\n# DEFINES\nBUSY = 1\nFREE = 0\n\n\nclass FunctionalUnit:\n \"\"\"FunctionalUnit Class to encompass methods needed for\n Integer, Divide, Multipler, Load, Store Functional\n Units in tomsim\n \"\"\"\n\n def __init__(self, func_id, lat):\n self.instruction_count = 0\n self.latency = lat\n self.status = FREE\n self.func_id = func_id\n self.end_cycle = None\n self.destination = None\n\n def __str__(self):\n return \"\"\"\n Id: {}\n Instruction Count: {}\n Latency: {}\n Status: {}\n End Cycle: {}\n Destination {}\n \"\"\".format(self.func_id,\n self.instruction_count,\n self.latency,\n self.status,\n self.end_cycle,\n self.destination)\n\n def get_latency(self):\n \"\"\"Gets the latency of the functional unit\n\n Keyword arguments:\n None\n\n Return: Int\n \"\"\"\n return self.latency\n\n def set_status(self, status):\n \"\"\"Sets the status of a functional unit\n\n Keyword arguments:\n status -- the status to set a functional unit to\n either BUSY or FREE\n\n Returns\n None\n \"\"\"\n self.status = status\n\n def get_status(self):\n \"\"\"Gets the status of a functional unit\n\n Keyword arguments:\n None\n\n Return: Int FREE (0) or BUSY (1)\n \"\"\"\n return self.status\n\n def get_statistics(self):\n \"\"\"Gets the statistics for the functional unit\n\n Keyword arguments:\n None\n\n Returns: Tuple of function id and instruction count\n \"\"\"\n\n return (self.func_id, self.instruction_count)\n\n def get_end(self):\n \"\"\"Gets the end cycle\n\n Keyword arguments:\n None\n\n Returns: Int of the end cycle\n \"\"\"\n\n return self.end_cycle\n\n def get_destination(self):\n \"\"\"Gets the location to which the functional unit will\n write\n\n Keyword arguments:\n None\n\n Returns: String of renamed destination\n \"\"\"\n\n return self.destination\n\n def increment_instr(self):\n \"\"\"Increments the instruction count for the FU\n\n Keyword arguments:\n None\n\n Returns: None\n \"\"\"\n self.instruction_count += 1\n",
"step-ids": [
7,
8,
11,
12,
13
]
}
|
[
7,
8,
11,
12,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
data = urllib.request.urlopen(img_url)
raw_data = data.read()
nparr = np.frombuffer(raw_data, np.byte)
image_raw = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)
cv2.imshow('test', image_raw)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
img_url = 'http://192.168.0.2:7079/hi'
while True:
data = urllib.request.urlopen(img_url)
raw_data = data.read()
nparr = np.frombuffer(raw_data, np.byte)
image_raw = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)
cv2.imshow('test', image_raw)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import urllib.request
import io
import cv2
import numpy as np
img_url = 'http://192.168.0.2:7079/hi'
while True:
data = urllib.request.urlopen(img_url)
raw_data = data.read()
nparr = np.frombuffer(raw_data, np.byte)
image_raw = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)
cv2.imshow('test', image_raw)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import urllib.request
import io
import cv2
import numpy as np
img_url = 'http://192.168.0.2:7079/hi'
while True:
data = urllib.request.urlopen(img_url)
raw_data = data.read()
nparr = np.frombuffer(raw_data, np.byte)
image_raw = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)
cv2.imshow("test", image_raw)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "c120db53e1ea5a5b865b891cf602a13113fb1e41",
"index": 4113,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n data = urllib.request.urlopen(img_url)\n raw_data = data.read()\n nparr = np.frombuffer(raw_data, np.byte)\n image_raw = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)\n cv2.imshow('test', image_raw)\n if cv2.waitKey(1) == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nimg_url = 'http://192.168.0.2:7079/hi'\nwhile True:\n data = urllib.request.urlopen(img_url)\n raw_data = data.read()\n nparr = np.frombuffer(raw_data, np.byte)\n image_raw = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)\n cv2.imshow('test', image_raw)\n if cv2.waitKey(1) == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-4": "import urllib.request\nimport io\nimport cv2\nimport numpy as np\nimg_url = 'http://192.168.0.2:7079/hi'\nwhile True:\n data = urllib.request.urlopen(img_url)\n raw_data = data.read()\n nparr = np.frombuffer(raw_data, np.byte)\n image_raw = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)\n cv2.imshow('test', image_raw)\n if cv2.waitKey(1) == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-5": "import urllib.request\nimport io\nimport cv2\nimport numpy as np\n\nimg_url = 'http://192.168.0.2:7079/hi'\nwhile True:\n data = urllib.request.urlopen(img_url)\n raw_data = data.read()\n\n nparr = np.frombuffer(raw_data, np.byte)\n image_raw = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)\n cv2.imshow(\"test\", image_raw)\n if cv2.waitKey(1) == ord('q'):\n break\n\ncv2.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding:utf-8 -*
import tushare as ts
import numpy as np
import pandas as pd
import datetime
import chardet
import urllib
import urllib2
import re
from bs4 import BeautifulSoup
import time
from pandas import Series,DataFrame
def get_relation(stock1,stock2):
hist_data = ts.get_hist_data(stock1,start='2018-05-01')
if hist_data is None:
return 0
hist_data.sort_values(by = "date",ascending = True,inplace = True)
hist_data_second = ts.get_hist_data(stock2,start='2018-05-01')
if hist_data_second is None:
return 0
hist_data_second.sort_values(by = "date",ascending = True,inplace = True)
result = pd.concat([hist_data,hist_data_second],axis = 1)
result = result['close']
result = result.dropna(how = 'any')
#result.to_excel('result.xlsx')
corr_result= result.corr()
result=np.array(corr_result.iloc[1:3,0:1])
return result[0][0]
year = datetime.datetime.now().strftime('%Y')
month = datetime.datetime.now().strftime('%m')
day = datetime.datetime.now().strftime('%d')
second = datetime.datetime.now().strftime('%s')
season = int(month) /3 +1
basic = ts.get_stock_basics()
basic.to_excel( year+month+day+second + '_basics.xlsx')
grouped_pe = basic['pe'].groupby(basic['industry'])
grouped_pe.mean().to_excel( year+month+day+second + '_grouped_pe.xlsx')
grouped_pb = basic['pb'].groupby(basic['industry'])
#print grouped.mean()
grouped_pb.mean().to_excel( year+month+day+second + '_grouped_pb.xlsx')
#np_industry = np.array(grouped_pb.mean().index)
grouped_industry=pd.concat([grouped_pe.mean(),grouped_pb.mean()],axis =1 ,join = 'inner')
grouped_industry.to_excel( year+month+day+second + '_grouped_industry.xlsx')
np_industry = np.array(grouped_pb.mean().index)
#for industry in np_industry:
# current_industy = basic[basic['industry'].isin([str(industry)])]
# current_industy.to_excel(str(industry)+ '.xlsx')
yj_current_season=ts.forecast_data(int(year),season)
yj_last_season=ts.forecast_data(int(year),season-1)
yj_last_season_index=yj_last_season.set_index('code')
yj_curren_seaon_index=yj_current_season.set_index('code')
yj_index=pd.concat([yj_curren_seaon_index,yj_last_season_index],axis =0 ,join = 'outer')
#yj_index.to_excel('index_yeji.xlsx')
result = pd.concat([yj_index,basic],axis =1 ,join = 'inner')
#result_select = result[result['type'].isin([u'\u9884\u5347',u'\u9884\u589e'])]
result_select = result[result['type'].isin([u'\u9884\u589e'])]
result_select.sort_values(by = "report_date",ascending = False,inplace = True)
result_select = result_select[result_select['report_date'].isin([np.array(result_select['report_date'])[0]])]
for code in np.array(result_select.index):
result_select.ix[str(code),'mean-pe'] = grouped_pe.mean()[result_select.ix[str(code),'industry']]
hist_data = ts.get_hist_data(str(code),start='2018-05-01')
if hist_data is not None:
hist_data.sort_values(by = "date",ascending = False,inplace = True)
hist_data = hist_data.iloc[0:5,:]
#five_day_everage = hist_data['close'].mean()
#hist_data.to_excel( year+month+day+second+str(code) + 'history.xlsx')
result_select.ix[str(code),'five-day-mean'] = hist_data['close'].mean()
close_price = np.array(hist_data['close'])
if close_price.size > 0:
result_select.ix[str(code),'last_day_price'] = np.array(hist_data['close'])[0]
result_select.ix[str(code),'increase-rate'] = \
(np.array(hist_data['close'])[0] - hist_data['close'].mean())/hist_data['close'].mean()
result_select.ix[str(code),'touzhijiazhi'] = \
(result_select.ix[str(code),'totalAssets']*10000)/(result_select.ix[str(code),'totals']*10000*10000)
result_select.ix[str(code),'price-values'] = \
result_select.ix[str(code),'touzhijiazhi'] /result_select.ix[str(code),'last_day_price']
if result_select.ix[str(code),'pe'] == 0:
result_select.ix[str(code),'pe'] = result_select.ix[str(code),'mean-pe']
result_select.ix[str(code),'pray-values'] = \
result_select.ix[str(code),'price-values'] * result_select.ix[str(code),'npr']/100.0 \
*result_select.ix[str(code),'mean-pe'] /result_select.ix[str(code),'pe'] \
*hist_data['close'].mean()/result_select.ix[str(code),'last_day_price']
result_select.to_excel( year+month+day+second + '_yeji.xlsx')
i = datetime.datetime.now()
#print ("当前的日期是%s" %i)
time_string = "%s-%s-%s"%(i.year,i.month,i.day)
print time_string
url ='http://query.sse.com.cn/infodisplay/queryBltnBookInfo.do?jsonCallBack=jsonpCallback55433&isNew=1&publishYear=2018'
#url ='https://query.sse.com.cn/infodisplay/'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Host':'query.sse.com.cn',
'Referer':'http://www.sse.com.cn/disclosure/listedinfo/periodic/',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN',
'Connection': 'keep-alive'
}
#values = {'inputCode':'000063'}
#pos_data = urllib.urlencode(values)
def compare_time(time1,time2):
s_time = time.mktime(time.strptime(time1,'%Y-%m-%d'))
e_time = time.mktime(time.strptime(time2,'%Y-%m-%d'))
return int(s_time) - int(e_time)
def my_save(filename,contents):
fh=open(filename,'w')
fh.write(contents)
fh.close()
request = urllib2.Request(url,headers = headers)
page = urllib2.urlopen(request)
#page.encoding = 'utf-8'
soup = BeautifulSoup(page,"lxml")
html = soup.select('p')
string1 = str(html[0])
string2 = string1.split('ROWNUM_')
df=pd.DataFrame(columns=['Name','code','type','publishDate0','actualDate'])
for string in string2:
name= re.findall(r'companyAbbr":"(.+?)","',string)
code= re.findall(r'companyCode":"(.+?)","',string)
report_type= re.findall(r'bulletinType":"(.+?)","',string)
date = re.findall(r'publishDate0":"(.+?)","',string)
actual = re.findall(r'actualDate":"(.+?)","',string)
if len(actual) == 0 and len(date)!=0 and compare_time(str(date[0]),time_string) > 0:
df=df.append(pd.DataFrame({'Name':name,'code':code,'type':report_type,'publishDate0':date}),ignore_index=True)
df.sort_values(by = "publishDate0",ascending = True,inplace = True)
#df= df.iloc[0:16,:]
df.to_excel('ready_to_report.xlsx')
np_ready_report = np.unique(np.array(df['code']))
np_increase_report = np.array(result_select.index)
forcast=pd.DataFrame()
#forcast=pd.DataFrame(columns=['increase code','forcast code','relation'])
index =0;
for code1 in np_increase_report:
for code2 in np_ready_report:
if cmp(basic.ix[str(code2),'industry'],basic.ix[str(code1),'industry']) == 0:
relation = get_relation(str(code1),str(code2))
forcast.ix[str(index),'increase code'] = code1
forcast.ix[str(index),'forcast code'] = code2
forcast.ix[str(index),'relation'] = relation
forcast.ix[str(index),'publishDate0'] = np.array(df[df['code'].isin([code2])]['publishDate0'])[0]
forcast.ix[str(index),'forcast industry'] = basic.ix[str(code2),'industry']
forcast.ix[str(index),'increase industry'] = basic.ix[str(code1),'industry']
index = index +1
forcast.to_excel('forcast.xlsx')
|
normal
|
{
"blob_id": "00f2aafe1a0c66d0414d189b9fa3bbc2da9fd727",
"index": 2066,
"step-1": "# -*- coding:utf-8 -*\nimport tushare as ts\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport chardet\nimport urllib\nimport urllib2\nimport re\nfrom bs4 import BeautifulSoup\nimport time\nfrom pandas import Series,DataFrame\n\ndef get_relation(stock1,stock2):\n hist_data = ts.get_hist_data(stock1,start='2018-05-01')\n if hist_data is None:\n return 0\n hist_data.sort_values(by = \"date\",ascending = True,inplace = True)\n hist_data_second = ts.get_hist_data(stock2,start='2018-05-01')\n if hist_data_second is None:\n return 0\n hist_data_second.sort_values(by = \"date\",ascending = True,inplace = True)\n result = pd.concat([hist_data,hist_data_second],axis = 1)\n result = result['close']\n result = result.dropna(how = 'any')\n #result.to_excel('result.xlsx')\n corr_result= result.corr()\n result=np.array(corr_result.iloc[1:3,0:1])\n return result[0][0]\n\nyear = datetime.datetime.now().strftime('%Y')\nmonth = datetime.datetime.now().strftime('%m')\nday = datetime.datetime.now().strftime('%d')\nsecond = datetime.datetime.now().strftime('%s')\nseason = int(month) /3 +1\nbasic = ts.get_stock_basics()\nbasic.to_excel( year+month+day+second + '_basics.xlsx')\n\ngrouped_pe = basic['pe'].groupby(basic['industry'])\n\ngrouped_pe.mean().to_excel( year+month+day+second + '_grouped_pe.xlsx')\n\ngrouped_pb = basic['pb'].groupby(basic['industry'])\n#print grouped.mean()\ngrouped_pb.mean().to_excel( year+month+day+second + '_grouped_pb.xlsx')\n\n#np_industry = np.array(grouped_pb.mean().index)\ngrouped_industry=pd.concat([grouped_pe.mean(),grouped_pb.mean()],axis =1 ,join = 'inner')\ngrouped_industry.to_excel( year+month+day+second + '_grouped_industry.xlsx')\nnp_industry = np.array(grouped_pb.mean().index)\n#for industry in np_industry:\n# current_industy = basic[basic['industry'].isin([str(industry)])]\n# current_industy.to_excel(str(industry)+ '.xlsx')\n\nyj_current_season=ts.forecast_data(int(year),season)\nyj_last_season=ts.forecast_data(int(year),season-1)\n\nyj_last_season_index=yj_last_season.set_index('code')\nyj_curren_seaon_index=yj_current_season.set_index('code')\nyj_index=pd.concat([yj_curren_seaon_index,yj_last_season_index],axis =0 ,join = 'outer')\n#yj_index.to_excel('index_yeji.xlsx')\nresult = pd.concat([yj_index,basic],axis =1 ,join = 'inner')\n#result_select = result[result['type'].isin([u'\\u9884\\u5347',u'\\u9884\\u589e'])]\nresult_select = result[result['type'].isin([u'\\u9884\\u589e'])]\nresult_select.sort_values(by = \"report_date\",ascending = False,inplace = True)\nresult_select = result_select[result_select['report_date'].isin([np.array(result_select['report_date'])[0]])]\n\nfor code in np.array(result_select.index):\n\tresult_select.ix[str(code),'mean-pe'] = grouped_pe.mean()[result_select.ix[str(code),'industry']] \n\thist_data = ts.get_hist_data(str(code),start='2018-05-01')\n\tif hist_data is not None:\n \t\thist_data.sort_values(by = \"date\",ascending = False,inplace = True)\n \t\thist_data = hist_data.iloc[0:5,:]\n \t\t#five_day_everage = hist_data['close'].mean()\n \t\t#hist_data.to_excel( year+month+day+second+str(code) + 'history.xlsx')\n\t\t\tresult_select.ix[str(code),'five-day-mean'] = hist_data['close'].mean()\n close_price = np.array(hist_data['close'])\n if close_price.size > 0:\n \t\t\tresult_select.ix[str(code),'last_day_price'] = np.array(hist_data['close'])[0]\n result_select.ix[str(code),'increase-rate'] = \\\n (np.array(hist_data['close'])[0] - hist_data['close'].mean())/hist_data['close'].mean()\n \n result_select.ix[str(code),'touzhijiazhi'] = \\\n (result_select.ix[str(code),'totalAssets']*10000)/(result_select.ix[str(code),'totals']*10000*10000) \n\n result_select.ix[str(code),'price-values'] = \\\n result_select.ix[str(code),'touzhijiazhi'] /result_select.ix[str(code),'last_day_price']\n if result_select.ix[str(code),'pe'] == 0:\n result_select.ix[str(code),'pe'] = result_select.ix[str(code),'mean-pe']\n result_select.ix[str(code),'pray-values'] = \\\n result_select.ix[str(code),'price-values'] * result_select.ix[str(code),'npr']/100.0 \\\n *result_select.ix[str(code),'mean-pe'] /result_select.ix[str(code),'pe'] \\\n *hist_data['close'].mean()/result_select.ix[str(code),'last_day_price']\n\n \nresult_select.to_excel( year+month+day+second + '_yeji.xlsx')\n\ni = datetime.datetime.now()\n#print (\"当前的日期是%s\" %i)\ntime_string = \"%s-%s-%s\"%(i.year,i.month,i.day)\nprint time_string\nurl ='http://query.sse.com.cn/infodisplay/queryBltnBookInfo.do?jsonCallBack=jsonpCallback55433&isNew=1&publishYear=2018'\n#url ='https://query.sse.com.cn/infodisplay/'\n\nheaders = {\n'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n'Host':'query.sse.com.cn',\n'Referer':'http://www.sse.com.cn/disclosure/listedinfo/periodic/',\n'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n'Accept-Encoding': 'gzip, deflate',\n'Accept-Language': 'zh-CN',\n'Connection': 'keep-alive'\n}\n#values = {'inputCode':'000063'}\n#pos_data = urllib.urlencode(values)\ndef compare_time(time1,time2):\n s_time = time.mktime(time.strptime(time1,'%Y-%m-%d'))\n e_time = time.mktime(time.strptime(time2,'%Y-%m-%d'))\n return int(s_time) - int(e_time)\n\ndef my_save(filename,contents):\n fh=open(filename,'w')\n fh.write(contents)\n fh.close()\n\n\nrequest = urllib2.Request(url,headers = headers)\npage = urllib2.urlopen(request)\n#page.encoding = 'utf-8'\nsoup = BeautifulSoup(page,\"lxml\")\nhtml = soup.select('p')\nstring1 = str(html[0])\nstring2 = string1.split('ROWNUM_')\ndf=pd.DataFrame(columns=['Name','code','type','publishDate0','actualDate'])\nfor string in string2:\n name= re.findall(r'companyAbbr\":\"(.+?)\",\"',string)\n code= re.findall(r'companyCode\":\"(.+?)\",\"',string)\n report_type= re.findall(r'bulletinType\":\"(.+?)\",\"',string)\n date = re.findall(r'publishDate0\":\"(.+?)\",\"',string)\n\n actual = re.findall(r'actualDate\":\"(.+?)\",\"',string)\n if len(actual) == 0 and len(date)!=0 and compare_time(str(date[0]),time_string) > 0:\n df=df.append(pd.DataFrame({'Name':name,'code':code,'type':report_type,'publishDate0':date}),ignore_index=True)\ndf.sort_values(by = \"publishDate0\",ascending = True,inplace = True)\n#df= df.iloc[0:16,:]\ndf.to_excel('ready_to_report.xlsx')\n\n\nnp_ready_report = np.unique(np.array(df['code']))\n\n\nnp_increase_report = np.array(result_select.index)\nforcast=pd.DataFrame()\n#forcast=pd.DataFrame(columns=['increase code','forcast code','relation'])\nindex =0;\nfor code1 in np_increase_report:\n for code2 in np_ready_report:\n if cmp(basic.ix[str(code2),'industry'],basic.ix[str(code1),'industry']) == 0:\n \trelation = get_relation(str(code1),str(code2))\n \tforcast.ix[str(index),'increase code'] = code1\n \tforcast.ix[str(index),'forcast code'] = code2\n \tforcast.ix[str(index),'relation'] = relation\n \tforcast.ix[str(index),'publishDate0'] = np.array(df[df['code'].isin([code2])]['publishDate0'])[0]\n \tforcast.ix[str(index),'forcast industry'] = basic.ix[str(code2),'industry']\n \tforcast.ix[str(index),'increase industry'] = basic.ix[str(code1),'industry']\n\t\tindex = index +1\n\nforcast.to_excel('forcast.xlsx')\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from . import scramsha1, scrammer
|
normal
|
{
"blob_id": "8c336edddadbf4689721b474c254ded061ecf4b5",
"index": 743,
"step-1": "<mask token>\n",
"step-2": "from . import scramsha1, scrammer\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#coding=utf8
uu=u'中国'
s = uu.encode('utf-8')
if s == '中国' :
print 11111
print u"一次性还本息".encode('utf-8')
|
normal
|
{
"blob_id": "9bf4725c054578aa8da2a563f67fd5c72c2fe831",
"index": 8918,
"step-1": "#coding=utf8\n\nuu=u'中国'\ns = uu.encode('utf-8')\nif s == '中国' :\n print 11111\nprint u\"一次性还本息\".encode('utf-8')\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class VimaptAbortOperationException(VimaptException):
pass
<|reserved_special_token_1|>
from .VimaptException import VimaptException
class VimaptAbortOperationException(VimaptException):
pass
|
flexible
|
{
"blob_id": "f52bac3e658a34b82721746364fab11d25d470c4",
"index": 5302,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass VimaptAbortOperationException(VimaptException):\n pass\n",
"step-3": "from .VimaptException import VimaptException\n\n\nclass VimaptAbortOperationException(VimaptException):\n pass\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
# $Id: iprscan5_urllib2.py 2809 2015-03-13 16:10:25Z uludag $
# ======================================================================
#
# Copyright 2009-2014 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================
# InterProScan 5 (REST) Python client using urllib2 and
# xmltramp (http://www.aaronsw.com/2002/xmltramp/).
#
# Tested with:
# Python 2.6.5 (Ubuntu 10.04 LTS)
# Python 2.7.3 (Ubuntu 12.04 LTS)
#
# See:
# http://www.ebi.ac.uk/Tools/webservices/services/pfa/iprscan5_rest
# http://www.ebi.ac.uk/Tools/webservices/tutorials/python
# ======================================================================
# Base URL for service
import urllib.request, urllib.error, urllib.parse
import urllib.request, urllib.parse, urllib.error
import time
import sys
import re
import os
import platform
import argparse
import xmltramp
baseUrl = 'http://www.ebi.ac.uk/Tools/services/rest/iprscan5'
# Load libraries
# Set interval for checking status
checkInterval = 10
# Output level
outputLevel = 1
# Debug level
debugLevel = 0
# Number of option arguments.
numOpts = len(sys.argv)
# Usage message
parser = argparse.ArgumentParser()
# Tool specific options
parser.add_argument('--input', required=True, help='input FASTA file')
parser.add_argument('--appl',
help='signature methods to use, see --paramDetail appl')
parser.add_argument('--crc', action="store_true",
help='enable InterProScan Matches look-up (ignored)')
parser.add_argument('--nocrc', action="store_true",
help='disable InterProScan Matches look-up (ignored)')
parser.add_argument('--goterms', action="store_true",
help='enable inclusion of GO terms')
parser.add_argument('--nogoterms', action="store_true",
help='disable inclusion of GO terms')
parser.add_argument('--pathways', action="store_true",
help='enable inclusion of pathway terms')
parser.add_argument('--nopathways', action="store_true",
help='disable inclusion of pathway terms')
parser.add_argument('--sequence', help='input sequence file name')
# General options
parser.add_argument('--email', required=True, help='e-mail address')
parser.add_argument('--title', help='job title')
parser.add_argument('--outfile', help='file name for results')
parser.add_argument('--outformat', help='output format for results')
parser.add_argument('--async', action='store_true', help='asynchronous mode')
parser.add_argument('--jobid', help='job identifier')
parser.add_argument('--polljob', action="store_true", help='get job result')
parser.add_argument('--status', action="store_true", help='get job status')
parser.add_argument('--resultTypes', action='store_true',
help='get result types')
parser.add_argument('--params', action='store_true',
help='list input parameters')
parser.add_argument('--paramDetail', help='get details for parameter')
parser.add_argument('--quiet', action='store_true',
help='decrease output level')
parser.add_argument('--verbose', action='store_true',
help='increase output level')
parser.add_argument('--baseURL', default=baseUrl, help='Base URL for service')
parser.add_argument('--debugLevel', type=int,
default=debugLevel, help='debug output level')
options = parser.parse_args()
# Increase output level
if options.verbose:
outputLevel += 1
# Decrease output level
if options.quiet:
outputLevel -= 1
# Debug level
if options.debugLevel:
debugLevel = options.debugLevel
# Debug print
def printDebugMessage(functionName, message, level):
if(level <= debugLevel):
print('[' + functionName + '] ' + message, file=sys.stderr)
# User-agent for request (see RFC2616).
def getUserAgent():
printDebugMessage('getUserAgent', 'Begin', 11)
# Agent string for urllib2 library.
urllib_agent = 'Python-urllib/%s' % urllib2.__version__
clientRevision = '$Revision: 2809 $'
clientVersion = '0'
if len(clientRevision) > 11:
clientVersion = clientRevision[11:-2]
# Prepend client specific agent string.
user_agent = 'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % (
clientVersion, os.path.basename(__file__),
platform.python_version(), platform.system(),
urllib_agent
)
printDebugMessage('getUserAgent', 'user_agent: ' + user_agent, 12)
printDebugMessage('getUserAgent', 'End', 11)
return user_agent
# Wrapper for a REST (HTTP GET) request
def restRequest(url):
printDebugMessage('restRequest', 'Begin', 11)
printDebugMessage('restRequest', 'url: ' + url, 11)
# Errors are indicated by HTTP status codes.
try:
# Set the User-agent.
user_agent = getUserAgent()
http_headers = {'User-Agent': user_agent}
req = urllib.request.Request(url, None, http_headers)
# Make the request (HTTP GET).
reqH = urllib.request.urlopen(req)
result = reqH.read()
reqH.close()
# Errors are indicated by HTTP status codes.
except urllib.error.HTTPError as ex:
# Trap exception and output the document to get error message.
print(ex.read(), file=sys.stderr)
raise
printDebugMessage('restRequest', 'End', 11)
return result
# Get input parameters list
def serviceGetParameters():
printDebugMessage('serviceGetParameters', 'Begin', 1)
requestUrl = baseUrl + '/parameters'
printDebugMessage('serviceGetParameters', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetParameters', 'End', 1)
return doc['id':]
# Print list of parameters
def printGetParameters():
printDebugMessage('printGetParameters', 'Begin', 1)
idList = serviceGetParameters()
for id in idList:
print(id)
printDebugMessage('printGetParameters', 'End', 1)
# Get input parameter information
def serviceGetParameterDetails(paramName):
printDebugMessage('serviceGetParameterDetails', 'Begin', 1)
printDebugMessage('serviceGetParameterDetails',
'paramName: ' + paramName, 2)
requestUrl = baseUrl + '/parameterdetails/' + paramName
printDebugMessage('serviceGetParameterDetails',
'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetParameterDetails', 'End', 1)
return doc
# Print description of a parameter
def printGetParameterDetails(paramName):
printDebugMessage('printGetParameterDetails', 'Begin', 1)
doc = serviceGetParameterDetails(paramName)
print(str(doc.name) + "\t" + str(doc.type))
print(doc.description)
for value in doc.values:
print(value.value, end=' ')
if str(value.defaultValue) == 'true':
print('default', end=' ')
print()
print("\t" + str(value.label))
if(hasattr(value, 'properties')):
for wsProperty in value.properties:
print("\t" + str(wsProperty.key) + "\t" + str(wsProperty.value))
#print doc
printDebugMessage('printGetParameterDetails', 'End', 1)
# Submit job
def serviceRun(email, title, params):
printDebugMessage('serviceRun', 'Begin', 1)
# Insert e-mail and title into params
params['email'] = email
if title:
params['title'] = title
requestUrl = baseUrl + '/run/'
printDebugMessage('serviceRun', 'requestUrl: ' + requestUrl, 2)
# Signature methods requires special handling (list)
applData = ''
if 'appl' in params:
# So extract from params
applList = params['appl']
del params['appl']
# Build the method data options
for appl in applList:
applData += '&appl=' + appl
# Get the data for the other options
requestData = urllib.parse.urlencode(params)
# Concatenate the two parts.
requestData += applData
printDebugMessage('serviceRun', 'requestData: ' + requestData, 2)
# Errors are indicated by HTTP status codes.
try:
# Set the HTTP User-agent.
user_agent = getUserAgent()
http_headers = {'User-Agent': user_agent}
req = urllib.request.Request(requestUrl, None, http_headers)
# Make the submission (HTTP POST).
reqH = urllib.request.urlopen(req, requestData)
jobId = reqH.read()
reqH.close()
except urllib.error.HTTPError as ex:
# Trap exception and output the document to get error message.
print(ex.read(), file=sys.stderr)
raise
printDebugMessage('serviceRun', 'jobId: ' + jobId, 2)
printDebugMessage('serviceRun', 'End', 1)
return jobId
# Get job status
def serviceGetStatus(jobId):
printDebugMessage('serviceGetStatus', 'Begin', 1)
printDebugMessage('serviceGetStatus', 'jobId: ' + jobId, 2)
requestUrl = baseUrl + '/status/' + jobId
printDebugMessage('serviceGetStatus', 'requestUrl: ' + requestUrl, 2)
status = restRequest(requestUrl)
printDebugMessage('serviceGetStatus', 'status: ' + status, 2)
printDebugMessage('serviceGetStatus', 'End', 1)
return status
# Print the status of a job
def printGetStatus(jobId):
printDebugMessage('printGetStatus', 'Begin', 1)
status = serviceGetStatus(jobId)
print(status)
printDebugMessage('printGetStatus', 'End', 1)
# Get available result types for job
def serviceGetResultTypes(jobId):
printDebugMessage('serviceGetResultTypes', 'Begin', 1)
printDebugMessage('serviceGetResultTypes', 'jobId: ' + jobId, 2)
requestUrl = baseUrl + '/resulttypes/' + jobId
printDebugMessage('serviceGetResultTypes', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetResultTypes', 'End', 1)
return doc['type':]
# Print list of available result types for a job.
def printGetResultTypes(jobId):
printDebugMessage('printGetResultTypes', 'Begin', 1)
resultTypeList = serviceGetResultTypes(jobId)
for resultType in resultTypeList:
print(resultType['identifier'])
if(hasattr(resultType, 'label')):
print("\t", resultType['label'])
if(hasattr(resultType, 'description')):
print("\t", resultType['description'])
if(hasattr(resultType, 'mediaType')):
print("\t", resultType['mediaType'])
if(hasattr(resultType, 'fileSuffix')):
print("\t", resultType['fileSuffix'])
printDebugMessage('printGetResultTypes', 'End', 1)
# Get result
def serviceGetResult(jobId, type_):
printDebugMessage('serviceGetResult', 'Begin', 1)
printDebugMessage('serviceGetResult', 'jobId: ' + jobId, 2)
printDebugMessage('serviceGetResult', 'type_: ' + type_, 2)
requestUrl = baseUrl + '/result/' + jobId + '/' + type_
result = restRequest(requestUrl)
printDebugMessage('serviceGetResult', 'End', 1)
return result
# Client-side poll
def clientPoll(jobId):
printDebugMessage('clientPoll', 'Begin', 1)
result = 'PENDING'
while result == 'RUNNING' or result == 'PENDING':
result = serviceGetStatus(jobId)
print(result, file=sys.stderr)
if result == 'RUNNING' or result == 'PENDING':
time.sleep(checkInterval)
printDebugMessage('clientPoll', 'End', 1)
# Get result for a jobid
def getResult(jobId):
printDebugMessage('getResult', 'Begin', 1)
printDebugMessage('getResult', 'jobId: ' + jobId, 1)
# Check status and wait if necessary
clientPoll(jobId)
# Get available result types
resultTypes = serviceGetResultTypes(jobId)
for resultType in resultTypes:
# Derive the filename for the result
if options.outfile:
filename = options.outfile + '.' + \
str(resultType['identifier']) + '.' + \
str(resultType['fileSuffix'])
else:
filename = jobId + '.' + \
str(resultType['identifier']) + '.' + \
str(resultType['fileSuffix'])
# Write a result file
if not options.outformat or options.outformat == str(resultType['identifier']):
# Get the result
result = serviceGetResult(jobId, str(resultType['identifier']))
fh = open(filename, 'w')
fh.write(result)
fh.close()
print(filename)
printDebugMessage('getResult', 'End', 1)
# Read a file
def readFile(filename):
printDebugMessage('readFile', 'Begin', 1)
fh = open(filename, 'r')
data = fh.read()
fh.close()
printDebugMessage('readFile', 'End', 1)
return data
# No options... print help.
if numOpts < 2:
parser.print_help()
# List parameters
elif options.params:
printGetParameters()
# Get parameter details
elif options.paramDetail:
printGetParameterDetails(options.paramDetail)
# Submit job
elif options.email and not options.jobid:
params = {}
if 1 > 0:
if os.access(options.input, os.R_OK): # Read file into content
params['sequence'] = readFile(options.input)
else: # Argument is a sequence id
params['sequence'] = options.input
elif options.sequence: # Specified via option
if os.access(options.sequence, os.R_OK): # Read file into content
params['sequence'] = readFile(options.sequence)
else: # Argument is a sequence id
params['sequence'] = options.sequence
# Map flag options to boolean values.
# if options.crc:
# params['crc'] = True
# elif options.nocrc:
# params['crc'] = False
if options.goterms:
params['goterms'] = True
elif options.nogoterms:
params['goterms'] = False
if options.pathways:
params['pathways'] = True
elif options.nopathways:
params['pathways'] = False
# Add the other options (if defined)
if options.appl:
params['appl'] = re.split('[ \t\n,;]+', options.appl)
# Submit the job
jobid = serviceRun(options.email, options.title, params)
if options.async: # Async mode
print(jobid)
else: # Sync mode
print(jobid, file=sys.stderr)
time.sleep(5)
getResult(jobid)
# Get job status
elif options.status and options.jobid:
printGetStatus(options.jobid)
# List result types for job
elif options.resultTypes and options.jobid:
printGetResultTypes(options.jobid)
# Get results for job
elif options.polljob and options.jobid:
getResult(options.jobid)
else:
print('Error: unrecognised argument combination', file=sys.stderr)
parser.print_help()
|
normal
|
{
"blob_id": "3dd9ce6d5d1ba0bebadae4068e2c898802180e1d",
"index": 8825,
"step-1": "#!/usr/bin/env python\n# $Id: iprscan5_urllib2.py 2809 2015-03-13 16:10:25Z uludag $\n# ======================================================================\n#\n# Copyright 2009-2014 EMBL - European Bioinformatics Institute\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ======================================================================\n# InterProScan 5 (REST) Python client using urllib2 and\n# xmltramp (http://www.aaronsw.com/2002/xmltramp/).\n#\n# Tested with:\n# Python 2.6.5 (Ubuntu 10.04 LTS)\n# Python 2.7.3 (Ubuntu 12.04 LTS)\n#\n# See:\n# http://www.ebi.ac.uk/Tools/webservices/services/pfa/iprscan5_rest\n# http://www.ebi.ac.uk/Tools/webservices/tutorials/python\n# ======================================================================\n# Base URL for service\nimport urllib.request, urllib.error, urllib.parse\nimport urllib.request, urllib.parse, urllib.error\nimport time\nimport sys\nimport re\nimport os\nimport platform\nimport argparse\nimport xmltramp\nbaseUrl = 'http://www.ebi.ac.uk/Tools/services/rest/iprscan5'\n\n# Load libraries\n\n# Set interval for checking status\ncheckInterval = 10\n# Output level\noutputLevel = 1\n# Debug level\ndebugLevel = 0\n# Number of option arguments.\nnumOpts = len(sys.argv)\n\n# Usage message\nparser = argparse.ArgumentParser()\n# Tool specific options\nparser.add_argument('--input', required=True, help='input FASTA file')\nparser.add_argument('--appl', \n\t\t\t\t\thelp='signature methods to use, see --paramDetail appl')\nparser.add_argument('--crc', action=\"store_true\",\n help='enable InterProScan Matches look-up (ignored)')\nparser.add_argument('--nocrc', action=\"store_true\",\n help='disable InterProScan Matches look-up (ignored)')\nparser.add_argument('--goterms', action=\"store_true\",\n help='enable inclusion of GO terms')\nparser.add_argument('--nogoterms', action=\"store_true\",\n help='disable inclusion of GO terms')\nparser.add_argument('--pathways', action=\"store_true\",\n help='enable inclusion of pathway terms')\nparser.add_argument('--nopathways', action=\"store_true\",\n help='disable inclusion of pathway terms')\nparser.add_argument('--sequence', help='input sequence file name')\n# General options\nparser.add_argument('--email', required=True, help='e-mail address')\nparser.add_argument('--title', help='job title')\nparser.add_argument('--outfile', help='file name for results')\nparser.add_argument('--outformat', help='output format for results')\nparser.add_argument('--async', action='store_true', help='asynchronous mode')\nparser.add_argument('--jobid', help='job identifier')\nparser.add_argument('--polljob', action=\"store_true\", help='get job result')\nparser.add_argument('--status', action=\"store_true\", help='get job status')\nparser.add_argument('--resultTypes', action='store_true',\n help='get result types')\nparser.add_argument('--params', action='store_true',\n help='list input parameters')\nparser.add_argument('--paramDetail', help='get details for parameter')\nparser.add_argument('--quiet', action='store_true',\n help='decrease output level')\nparser.add_argument('--verbose', action='store_true',\n help='increase output level')\nparser.add_argument('--baseURL', default=baseUrl, help='Base URL for service')\nparser.add_argument('--debugLevel', type=int,\n default=debugLevel, help='debug output level')\noptions = parser.parse_args()\n\n# Increase output level\nif options.verbose:\n outputLevel += 1\n\n# Decrease output level\nif options.quiet:\n outputLevel -= 1\n\n# Debug level\nif options.debugLevel:\n debugLevel = options.debugLevel\n\n# Debug print\n\n\ndef printDebugMessage(functionName, message, level):\n if(level <= debugLevel):\n print('[' + functionName + '] ' + message, file=sys.stderr)\n\n# User-agent for request (see RFC2616).\n\n\ndef getUserAgent():\n printDebugMessage('getUserAgent', 'Begin', 11)\n # Agent string for urllib2 library.\n urllib_agent = 'Python-urllib/%s' % urllib2.__version__\n clientRevision = '$Revision: 2809 $'\n clientVersion = '0'\n if len(clientRevision) > 11:\n clientVersion = clientRevision[11:-2]\n # Prepend client specific agent string.\n user_agent = 'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % (\n clientVersion, os.path.basename(__file__),\n platform.python_version(), platform.system(),\n urllib_agent\n )\n printDebugMessage('getUserAgent', 'user_agent: ' + user_agent, 12)\n printDebugMessage('getUserAgent', 'End', 11)\n return user_agent\n\n# Wrapper for a REST (HTTP GET) request\n\n\ndef restRequest(url):\n printDebugMessage('restRequest', 'Begin', 11)\n printDebugMessage('restRequest', 'url: ' + url, 11)\n # Errors are indicated by HTTP status codes.\n try:\n # Set the User-agent.\n user_agent = getUserAgent()\n http_headers = {'User-Agent': user_agent}\n req = urllib.request.Request(url, None, http_headers)\n # Make the request (HTTP GET).\n reqH = urllib.request.urlopen(req)\n result = reqH.read()\n reqH.close()\n # Errors are indicated by HTTP status codes.\n except urllib.error.HTTPError as ex:\n # Trap exception and output the document to get error message.\n print(ex.read(), file=sys.stderr)\n raise\n printDebugMessage('restRequest', 'End', 11)\n return result\n\n# Get input parameters list\n\n\ndef serviceGetParameters():\n printDebugMessage('serviceGetParameters', 'Begin', 1)\n requestUrl = baseUrl + '/parameters'\n printDebugMessage('serviceGetParameters', 'requestUrl: ' + requestUrl, 2)\n xmlDoc = restRequest(requestUrl)\n doc = xmltramp.parse(xmlDoc)\n printDebugMessage('serviceGetParameters', 'End', 1)\n return doc['id':]\n\n# Print list of parameters\n\n\ndef printGetParameters():\n printDebugMessage('printGetParameters', 'Begin', 1)\n idList = serviceGetParameters()\n for id in idList:\n print(id)\n printDebugMessage('printGetParameters', 'End', 1)\n\n# Get input parameter information\n\n\ndef serviceGetParameterDetails(paramName):\n printDebugMessage('serviceGetParameterDetails', 'Begin', 1)\n printDebugMessage('serviceGetParameterDetails',\n 'paramName: ' + paramName, 2)\n requestUrl = baseUrl + '/parameterdetails/' + paramName\n printDebugMessage('serviceGetParameterDetails',\n 'requestUrl: ' + requestUrl, 2)\n xmlDoc = restRequest(requestUrl)\n doc = xmltramp.parse(xmlDoc)\n printDebugMessage('serviceGetParameterDetails', 'End', 1)\n return doc\n\n# Print description of a parameter\n\n\ndef printGetParameterDetails(paramName):\n printDebugMessage('printGetParameterDetails', 'Begin', 1)\n doc = serviceGetParameterDetails(paramName)\n print(str(doc.name) + \"\\t\" + str(doc.type))\n print(doc.description)\n for value in doc.values:\n print(value.value, end=' ')\n if str(value.defaultValue) == 'true':\n print('default', end=' ')\n print()\n print(\"\\t\" + str(value.label))\n if(hasattr(value, 'properties')):\n for wsProperty in value.properties:\n print(\"\\t\" + str(wsProperty.key) + \"\\t\" + str(wsProperty.value))\n #print doc\n printDebugMessage('printGetParameterDetails', 'End', 1)\n\n# Submit job\n\n\ndef serviceRun(email, title, params):\n printDebugMessage('serviceRun', 'Begin', 1)\n # Insert e-mail and title into params\n params['email'] = email\n if title:\n params['title'] = title\n requestUrl = baseUrl + '/run/'\n printDebugMessage('serviceRun', 'requestUrl: ' + requestUrl, 2)\n # Signature methods requires special handling (list)\n applData = ''\n if 'appl' in params:\n # So extract from params\n applList = params['appl']\n del params['appl']\n # Build the method data options\n for appl in applList:\n applData += '&appl=' + appl\n # Get the data for the other options\n requestData = urllib.parse.urlencode(params)\n # Concatenate the two parts.\n requestData += applData\n printDebugMessage('serviceRun', 'requestData: ' + requestData, 2)\n # Errors are indicated by HTTP status codes.\n try:\n # Set the HTTP User-agent.\n user_agent = getUserAgent()\n http_headers = {'User-Agent': user_agent}\n req = urllib.request.Request(requestUrl, None, http_headers)\n # Make the submission (HTTP POST).\n reqH = urllib.request.urlopen(req, requestData)\n jobId = reqH.read()\n reqH.close()\n except urllib.error.HTTPError as ex:\n # Trap exception and output the document to get error message.\n print(ex.read(), file=sys.stderr)\n raise\n printDebugMessage('serviceRun', 'jobId: ' + jobId, 2)\n printDebugMessage('serviceRun', 'End', 1)\n return jobId\n\n# Get job status\n\n\ndef serviceGetStatus(jobId):\n printDebugMessage('serviceGetStatus', 'Begin', 1)\n printDebugMessage('serviceGetStatus', 'jobId: ' + jobId, 2)\n requestUrl = baseUrl + '/status/' + jobId\n printDebugMessage('serviceGetStatus', 'requestUrl: ' + requestUrl, 2)\n status = restRequest(requestUrl)\n printDebugMessage('serviceGetStatus', 'status: ' + status, 2)\n printDebugMessage('serviceGetStatus', 'End', 1)\n return status\n\n# Print the status of a job\n\n\ndef printGetStatus(jobId):\n printDebugMessage('printGetStatus', 'Begin', 1)\n status = serviceGetStatus(jobId)\n print(status)\n printDebugMessage('printGetStatus', 'End', 1)\n\n\n# Get available result types for job\ndef serviceGetResultTypes(jobId):\n printDebugMessage('serviceGetResultTypes', 'Begin', 1)\n printDebugMessage('serviceGetResultTypes', 'jobId: ' + jobId, 2)\n requestUrl = baseUrl + '/resulttypes/' + jobId\n printDebugMessage('serviceGetResultTypes', 'requestUrl: ' + requestUrl, 2)\n xmlDoc = restRequest(requestUrl)\n doc = xmltramp.parse(xmlDoc)\n printDebugMessage('serviceGetResultTypes', 'End', 1)\n return doc['type':]\n\n# Print list of available result types for a job.\n\n\ndef printGetResultTypes(jobId):\n printDebugMessage('printGetResultTypes', 'Begin', 1)\n resultTypeList = serviceGetResultTypes(jobId)\n for resultType in resultTypeList:\n print(resultType['identifier'])\n if(hasattr(resultType, 'label')):\n print(\"\\t\", resultType['label'])\n if(hasattr(resultType, 'description')):\n print(\"\\t\", resultType['description'])\n if(hasattr(resultType, 'mediaType')):\n print(\"\\t\", resultType['mediaType'])\n if(hasattr(resultType, 'fileSuffix')):\n print(\"\\t\", resultType['fileSuffix'])\n printDebugMessage('printGetResultTypes', 'End', 1)\n\n# Get result\n\n\ndef serviceGetResult(jobId, type_):\n printDebugMessage('serviceGetResult', 'Begin', 1)\n printDebugMessage('serviceGetResult', 'jobId: ' + jobId, 2)\n printDebugMessage('serviceGetResult', 'type_: ' + type_, 2)\n requestUrl = baseUrl + '/result/' + jobId + '/' + type_\n result = restRequest(requestUrl)\n printDebugMessage('serviceGetResult', 'End', 1)\n return result\n\n# Client-side poll\n\n\ndef clientPoll(jobId):\n printDebugMessage('clientPoll', 'Begin', 1)\n result = 'PENDING'\n while result == 'RUNNING' or result == 'PENDING':\n result = serviceGetStatus(jobId)\n print(result, file=sys.stderr)\n if result == 'RUNNING' or result == 'PENDING':\n time.sleep(checkInterval)\n printDebugMessage('clientPoll', 'End', 1)\n\n# Get result for a jobid\n\n\ndef getResult(jobId):\n printDebugMessage('getResult', 'Begin', 1)\n printDebugMessage('getResult', 'jobId: ' + jobId, 1)\n # Check status and wait if necessary\n clientPoll(jobId)\n # Get available result types\n resultTypes = serviceGetResultTypes(jobId)\n for resultType in resultTypes:\n # Derive the filename for the result\n if options.outfile:\n filename = options.outfile + '.' + \\\n str(resultType['identifier']) + '.' + \\\n str(resultType['fileSuffix'])\n else:\n filename = jobId + '.' + \\\n str(resultType['identifier']) + '.' + \\\n str(resultType['fileSuffix'])\n # Write a result file\n if not options.outformat or options.outformat == str(resultType['identifier']):\n # Get the result\n result = serviceGetResult(jobId, str(resultType['identifier']))\n fh = open(filename, 'w')\n fh.write(result)\n fh.close()\n print(filename)\n printDebugMessage('getResult', 'End', 1)\n\n# Read a file\n\n\ndef readFile(filename):\n printDebugMessage('readFile', 'Begin', 1)\n fh = open(filename, 'r')\n data = fh.read()\n fh.close()\n printDebugMessage('readFile', 'End', 1)\n return data\n\n\n# No options... print help.\nif numOpts < 2:\n parser.print_help()\n# List parameters\nelif options.params:\n printGetParameters()\n# Get parameter details\nelif options.paramDetail:\n printGetParameterDetails(options.paramDetail)\n# Submit job\nelif options.email and not options.jobid:\n params = {}\n if 1 > 0:\n if os.access(options.input, os.R_OK): # Read file into content\n params['sequence'] = readFile(options.input)\n else: # Argument is a sequence id\n params['sequence'] = options.input\n elif options.sequence: # Specified via option\n if os.access(options.sequence, os.R_OK): # Read file into content\n params['sequence'] = readFile(options.sequence)\n else: # Argument is a sequence id\n params['sequence'] = options.sequence\n # Map flag options to boolean values.\n # if options.crc:\n # params['crc'] = True\n # elif options.nocrc:\n # params['crc'] = False\n if options.goterms:\n params['goterms'] = True\n elif options.nogoterms:\n params['goterms'] = False\n if options.pathways:\n params['pathways'] = True\n elif options.nopathways:\n params['pathways'] = False\n # Add the other options (if defined)\n if options.appl:\n params['appl'] = re.split('[ \\t\\n,;]+', options.appl)\n\n # Submit the job\n jobid = serviceRun(options.email, options.title, params)\n if options.async: # Async mode\n print(jobid)\n else: # Sync mode\n print(jobid, file=sys.stderr)\n time.sleep(5)\n getResult(jobid)\n# Get job status\nelif options.status and options.jobid:\n printGetStatus(options.jobid)\n# List result types for job\nelif options.resultTypes and options.jobid:\n printGetResultTypes(options.jobid)\n# Get results for job\nelif options.polljob and options.jobid:\n getResult(options.jobid)\nelse:\n print('Error: unrecognised argument combination', file=sys.stderr)\n parser.print_help()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MusicStore(Store):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MusicStore(Store):
def make_product(self, name):
"""Overides from parent - return a new MusicProduct Object"""
<|reserved_special_token_1|>
from Store import Store
from MusicProduct import MusicProduct
class MusicStore(Store):
def make_product(self, name):
"""Overides from parent - return a new MusicProduct Object"""
<|reserved_special_token_1|>
from Store import Store
from MusicProduct import MusicProduct
class MusicStore(Store):
def make_product(self, name):
'''Overides from parent - return a new MusicProduct Object'''
|
flexible
|
{
"blob_id": "0a50b31155afce2558ec066267a9fd0c56964759",
"index": 5653,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass MusicStore(Store):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass MusicStore(Store):\n\n def make_product(self, name):\n \"\"\"Overides from parent - return a new MusicProduct Object\"\"\"\n",
"step-4": "from Store import Store\nfrom MusicProduct import MusicProduct\n\n\nclass MusicStore(Store):\n\n def make_product(self, name):\n \"\"\"Overides from parent - return a new MusicProduct Object\"\"\"\n",
"step-5": "from Store import Store\nfrom MusicProduct import MusicProduct\n\nclass MusicStore(Store):\n\n def make_product(self, name):\n '''Overides from parent - return a new MusicProduct Object'''\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pytest
from pandas import (
Index,
NaT,
)
import pandas._testing as tm
def test_astype_str_from_bytes():
# https://github.com/pandas-dev/pandas/issues/38607
idx = Index(["あ", b"a"], dtype="object")
result = idx.astype(str)
expected = Index(["あ", "a"], dtype="object")
tm.assert_index_equal(result, expected)
def test_astype_invalid_nas_to_tdt64_raises():
# GH#45722 don't cast np.datetime64 NaTs to timedelta64 NaT
idx = Index([NaT.asm8] * 2, dtype=object)
msg = r"Cannot cast Index to dtype timedelta64\[ns\]"
with pytest.raises(TypeError, match=msg):
idx.astype("m8[ns]")
|
normal
|
{
"blob_id": "13b2fea09f5a4300563dd8870fe1841b47756b36",
"index": 9972,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_astype_invalid_nas_to_tdt64_raises():\n idx = Index([NaT.asm8] * 2, dtype=object)\n msg = 'Cannot cast Index to dtype timedelta64\\\\[ns\\\\]'\n with pytest.raises(TypeError, match=msg):\n idx.astype('m8[ns]')\n",
"step-3": "<mask token>\n\n\ndef test_astype_str_from_bytes():\n idx = Index(['あ', b'a'], dtype='object')\n result = idx.astype(str)\n expected = Index(['あ', 'a'], dtype='object')\n tm.assert_index_equal(result, expected)\n\n\ndef test_astype_invalid_nas_to_tdt64_raises():\n idx = Index([NaT.asm8] * 2, dtype=object)\n msg = 'Cannot cast Index to dtype timedelta64\\\\[ns\\\\]'\n with pytest.raises(TypeError, match=msg):\n idx.astype('m8[ns]')\n",
"step-4": "import pytest\nfrom pandas import Index, NaT\nimport pandas._testing as tm\n\n\ndef test_astype_str_from_bytes():\n idx = Index(['あ', b'a'], dtype='object')\n result = idx.astype(str)\n expected = Index(['あ', 'a'], dtype='object')\n tm.assert_index_equal(result, expected)\n\n\ndef test_astype_invalid_nas_to_tdt64_raises():\n idx = Index([NaT.asm8] * 2, dtype=object)\n msg = 'Cannot cast Index to dtype timedelta64\\\\[ns\\\\]'\n with pytest.raises(TypeError, match=msg):\n idx.astype('m8[ns]')\n",
"step-5": "import pytest\n\nfrom pandas import (\n Index,\n NaT,\n)\nimport pandas._testing as tm\n\n\ndef test_astype_str_from_bytes():\n # https://github.com/pandas-dev/pandas/issues/38607\n idx = Index([\"あ\", b\"a\"], dtype=\"object\")\n result = idx.astype(str)\n expected = Index([\"あ\", \"a\"], dtype=\"object\")\n tm.assert_index_equal(result, expected)\n\n\ndef test_astype_invalid_nas_to_tdt64_raises():\n # GH#45722 don't cast np.datetime64 NaTs to timedelta64 NaT\n idx = Index([NaT.asm8] * 2, dtype=object)\n\n msg = r\"Cannot cast Index to dtype timedelta64\\[ns\\]\"\n with pytest.raises(TypeError, match=msg):\n idx.astype(\"m8[ns]\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""Project agnostic helper functions that could be migrated to and external lib.
"""
|
flexible
|
{
"blob_id": "f15bb4ab93ecb2689bf74687852e60dfa98caea9",
"index": 7374,
"step-1": "<mask token>\n",
"step-2": "\"\"\"Project agnostic helper functions that could be migrated to and external lib.\n\"\"\"\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import gitlab
from core import settings
gl = gitlab.Gitlab('https://gitlab.intecracy.com/', private_token='dxQyb5fNbLnBxvvpFjyc')
gl.auth()
project = gl.projects.get(settings.projectID)
print(project)
pipelines = project.pipelines.get(26452)
print pipelines
pipelines_jobs = pipelines.jobs.list()[2]
jobs = project.jobs.get(pipelines_jobs.id, lazy=True)
#jobs = project.jobs.get(52986)
print jobs
#print jsonString
jobs.play()
#jobs.trace()
#id_job = project.jobs.get()
#print id_job
|
normal
|
{
"blob_id": "c7f8731fe58a0e0065827b82bb4ad4af670541db",
"index": 5101,
"step-1": "import gitlab\nfrom core import settings\n\n\ngl = gitlab.Gitlab('https://gitlab.intecracy.com/', private_token='dxQyb5fNbLnBxvvpFjyc')\n\ngl.auth()\n\nproject = gl.projects.get(settings.projectID)\nprint(project)\npipelines = project.pipelines.get(26452)\nprint pipelines\npipelines_jobs = pipelines.jobs.list()[2]\njobs = project.jobs.get(pipelines_jobs.id, lazy=True)\n#jobs = project.jobs.get(52986)\nprint jobs\n#print jsonString\njobs.play()\n#jobs.trace()\n\n#id_job = project.jobs.get()\n#print id_job\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python2
import sys
import argparse
"""
This program generates an extract table having the following format:
<S1> <S2> <S3> ... <Sn> ||| <T1> <T2> <T3> ... <Tk> ||| 0-0
Each line is a mapping from a source sentence to target sentence
with special delimiter characters.
You can give the output of this script to extract2bin to generate
a binary phrase table.
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument("source_text", type=argparse.FileType("r"),
help="Tokenized sentences in the source language")
parser.add_argument("target_text", type=argparse.FileType("r"),
help="Tokenized sentences in the target language")
args = parser.parse_args()
src_lines = args.source_text.readlines()
len_src = len(src_lines)
tgt_lines = args.target_text.readlines()
len_tgt = len(tgt_lines)
if len_src != len_tgt:
print "Number of sentences doesn't match: %d - %d\n" % (len_src,len_tgt)
return 1
for s, t in zip(src_lines, tgt_lines):
print "%s ||| %s ||| 0-0" % (s.rstrip(), t.rstrip())
args.source_text.close()
args.target_text.close()
if __name__ == '__main__':
sys.exit(main())
|
normal
|
{
"blob_id": "cf0cf028d5f67e8deca8ebd3ad76d9c1e3563002",
"index": 258,
"step-1": "#!/usr/bin/python2\n\nimport sys\nimport argparse\n\n\"\"\"\nThis program generates an extract table having the following format:\n <S1> <S2> <S3> ... <Sn> ||| <T1> <T2> <T3> ... <Tk> ||| 0-0\n\nEach line is a mapping from a source sentence to target sentence\nwith special delimiter characters.\n\nYou can give the output of this script to extract2bin to generate\na binary phrase table.\n\"\"\"\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"source_text\", type=argparse.FileType(\"r\"),\n help=\"Tokenized sentences in the source language\")\n parser.add_argument(\"target_text\", type=argparse.FileType(\"r\"),\n help=\"Tokenized sentences in the target language\")\n args = parser.parse_args()\n\n src_lines = args.source_text.readlines()\n len_src = len(src_lines)\n tgt_lines = args.target_text.readlines()\n len_tgt = len(tgt_lines)\n\n if len_src != len_tgt:\n print \"Number of sentences doesn't match: %d - %d\\n\" % (len_src,len_tgt)\n return 1\n\n for s, t in zip(src_lines, tgt_lines):\n print \"%s ||| %s ||| 0-0\" % (s.rstrip(), t.rstrip())\n\n args.source_text.close()\n args.target_text.close()\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
"""
primary game method
"""
view = View()
view.root.mainloop()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
"""
primary game method
"""
view = View()
view.root.mainloop()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from view import View
def main():
"""
primary game method
"""
view = View()
view.root.mainloop()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#Eyal Reis - 203249354
from view import View
def main():
"""
primary game method
"""
view = View()
view.root.mainloop()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "640eae824e43e394bf0624dd4cf7dcec78f43604",
"index": 4947,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n \"\"\"\n primary game method\n \"\"\"\n view = View()\n view.root.mainloop()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n \"\"\"\n primary game method\n \"\"\"\n view = View()\n view.root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from view import View\n\n\ndef main():\n \"\"\"\n primary game method\n \"\"\"\n view = View()\n view.root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#Eyal Reis - 203249354\n\nfrom view import View\n\ndef main():\n \"\"\"\n primary game method\n \"\"\"\n view = View()\n view.root.mainloop()\n \nif __name__ == \"__main__\":\n main()\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
import sys
def trim_reads(fastq, selection, extra_cut, orientation, output, outputType,
seqLen, trim):
# Store all read/sequence ids that did not match with KoRV
ids = []
with open(selection, 'r') as f:
for line in f:
ids.append(line.strip())
# Store trimming position for each read/sequence id
trim_pros = {}
for line in trim.split('\n'):
if len(line):
line = line.split('\t')
if (line[0] == 'read name'):
if (line[1] == 'end position' and orientation != 3) or \
(line[1] == 'start position' and orientation != 5):
print('Wrong setting! 3\' trimming needs the end position'
'and 3\' trimming needs the start position.')
sys.exit()
else:
trim_pros[line[0]] = int(line[1])
# Read fastq file line by line and copy a sequence to a new fastq file if:
# 1. Read did not align against KoRV (id is in selection)
# 2. Line is not blank
# 3. Sequence length is greater than the given seqLen
with open(output, 'w') as o:
with open(fastq, 'r') as f:
while True:
identifier = f.readline()
sequence = f.readline()
plus = f.readline()
quality = f.readline()
if not identifier or not sequence or \
not plus or not quality:
break
read_id = identifier.strip()[1:].split(' ')[0]
if read_id in ids:
if read_id in trim_pros:
if (orientation == 3):
cut = trim_pros[read_id] + extra_cut
sequence = sequence[cut:(cut + seqLen)].strip()
quality = quality[cut:(cut + seqLen)].strip()
if (orientation == 5):
cut = trim_pros[read_id] - extra_cut
sequence = sequence[max(cut - seqLen, 0):cut]
quality = quality[max(cut - seqLen, 0):cut]
if (len(sequence) >= seqLen):
if (outputType == 'fasta'):
o.write('>' + identifier[1:])
o.write(sequence[:seqLen] + '\n')
else:
o.write(identifier)
o.write(sequence[:seqLen] + '\n')
o.write(plus)
o.write(quality[:seqLen] + '\n')
#############
# MAIN #
#############
def main():
trim = sys.stdin.read()
if len(sys.argv) > 7:
trim_reads(sys.argv[1], sys.argv[2], int(sys.argv[3]),
int(sys.argv[4]), sys.argv[5], sys.argv[6],
int(sys.argv[7]), trim)
else:
print("trim_reads.py [fastq] [selection] [extracut] [orientation] "
"[output] [format] [maxlen] < [trimming-info]")
sys.exit()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "3f3ed40bf800eddb2722171d5fd94f6c292162de",
"index": 5865,
"step-1": "<mask token>\n\n\ndef main():\n trim = sys.stdin.read()\n if len(sys.argv) > 7:\n trim_reads(sys.argv[1], sys.argv[2], int(sys.argv[3]), int(sys.argv\n [4]), sys.argv[5], sys.argv[6], int(sys.argv[7]), trim)\n else:\n print(\n 'trim_reads.py [fastq] [selection] [extracut] [orientation] [output] [format] [maxlen] < [trimming-info]'\n )\n sys.exit()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef trim_reads(fastq, selection, extra_cut, orientation, output, outputType,\n seqLen, trim):\n ids = []\n with open(selection, 'r') as f:\n for line in f:\n ids.append(line.strip())\n trim_pros = {}\n for line in trim.split('\\n'):\n if len(line):\n line = line.split('\\t')\n if line[0] == 'read name':\n if line[1] == 'end position' and orientation != 3 or line[1\n ] == 'start position' and orientation != 5:\n print(\n \"Wrong setting! 3' trimming needs the end positionand 3' trimming needs the start position.\"\n )\n sys.exit()\n else:\n trim_pros[line[0]] = int(line[1])\n with open(output, 'w') as o:\n with open(fastq, 'r') as f:\n while True:\n identifier = f.readline()\n sequence = f.readline()\n plus = f.readline()\n quality = f.readline()\n if not identifier or not sequence or not plus or not quality:\n break\n read_id = identifier.strip()[1:].split(' ')[0]\n if read_id in ids:\n if read_id in trim_pros:\n if orientation == 3:\n cut = trim_pros[read_id] + extra_cut\n sequence = sequence[cut:cut + seqLen].strip()\n quality = quality[cut:cut + seqLen].strip()\n if orientation == 5:\n cut = trim_pros[read_id] - extra_cut\n sequence = sequence[max(cut - seqLen, 0):cut]\n quality = quality[max(cut - seqLen, 0):cut]\n if len(sequence) >= seqLen:\n if outputType == 'fasta':\n o.write('>' + identifier[1:])\n o.write(sequence[:seqLen] + '\\n')\n else:\n o.write(identifier)\n o.write(sequence[:seqLen] + '\\n')\n o.write(plus)\n o.write(quality[:seqLen] + '\\n')\n\n\ndef main():\n trim = sys.stdin.read()\n if len(sys.argv) > 7:\n trim_reads(sys.argv[1], sys.argv[2], int(sys.argv[3]), int(sys.argv\n [4]), sys.argv[5], sys.argv[6], int(sys.argv[7]), trim)\n else:\n print(\n 'trim_reads.py [fastq] [selection] [extracut] [orientation] [output] [format] [maxlen] < [trimming-info]'\n )\n sys.exit()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef trim_reads(fastq, selection, extra_cut, orientation, output, outputType,\n seqLen, trim):\n ids = []\n with open(selection, 'r') as f:\n for line in f:\n ids.append(line.strip())\n trim_pros = {}\n for line in trim.split('\\n'):\n if len(line):\n line = line.split('\\t')\n if line[0] == 'read name':\n if line[1] == 'end position' and orientation != 3 or line[1\n ] == 'start position' and orientation != 5:\n print(\n \"Wrong setting! 3' trimming needs the end positionand 3' trimming needs the start position.\"\n )\n sys.exit()\n else:\n trim_pros[line[0]] = int(line[1])\n with open(output, 'w') as o:\n with open(fastq, 'r') as f:\n while True:\n identifier = f.readline()\n sequence = f.readline()\n plus = f.readline()\n quality = f.readline()\n if not identifier or not sequence or not plus or not quality:\n break\n read_id = identifier.strip()[1:].split(' ')[0]\n if read_id in ids:\n if read_id in trim_pros:\n if orientation == 3:\n cut = trim_pros[read_id] + extra_cut\n sequence = sequence[cut:cut + seqLen].strip()\n quality = quality[cut:cut + seqLen].strip()\n if orientation == 5:\n cut = trim_pros[read_id] - extra_cut\n sequence = sequence[max(cut - seqLen, 0):cut]\n quality = quality[max(cut - seqLen, 0):cut]\n if len(sequence) >= seqLen:\n if outputType == 'fasta':\n o.write('>' + identifier[1:])\n o.write(sequence[:seqLen] + '\\n')\n else:\n o.write(identifier)\n o.write(sequence[:seqLen] + '\\n')\n o.write(plus)\n o.write(quality[:seqLen] + '\\n')\n\n\ndef main():\n trim = sys.stdin.read()\n if len(sys.argv) > 7:\n trim_reads(sys.argv[1], sys.argv[2], int(sys.argv[3]), int(sys.argv\n [4]), sys.argv[5], sys.argv[6], int(sys.argv[7]), trim)\n else:\n print(\n 'trim_reads.py [fastq] [selection] [extracut] [orientation] [output] [format] [maxlen] < [trimming-info]'\n )\n sys.exit()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\n\n\ndef trim_reads(fastq, selection, extra_cut, orientation, output, outputType,\n seqLen, trim):\n ids = []\n with open(selection, 'r') as f:\n for line in f:\n ids.append(line.strip())\n trim_pros = {}\n for line in trim.split('\\n'):\n if len(line):\n line = line.split('\\t')\n if line[0] == 'read name':\n if line[1] == 'end position' and orientation != 3 or line[1\n ] == 'start position' and orientation != 5:\n print(\n \"Wrong setting! 3' trimming needs the end positionand 3' trimming needs the start position.\"\n )\n sys.exit()\n else:\n trim_pros[line[0]] = int(line[1])\n with open(output, 'w') as o:\n with open(fastq, 'r') as f:\n while True:\n identifier = f.readline()\n sequence = f.readline()\n plus = f.readline()\n quality = f.readline()\n if not identifier or not sequence or not plus or not quality:\n break\n read_id = identifier.strip()[1:].split(' ')[0]\n if read_id in ids:\n if read_id in trim_pros:\n if orientation == 3:\n cut = trim_pros[read_id] + extra_cut\n sequence = sequence[cut:cut + seqLen].strip()\n quality = quality[cut:cut + seqLen].strip()\n if orientation == 5:\n cut = trim_pros[read_id] - extra_cut\n sequence = sequence[max(cut - seqLen, 0):cut]\n quality = quality[max(cut - seqLen, 0):cut]\n if len(sequence) >= seqLen:\n if outputType == 'fasta':\n o.write('>' + identifier[1:])\n o.write(sequence[:seqLen] + '\\n')\n else:\n o.write(identifier)\n o.write(sequence[:seqLen] + '\\n')\n o.write(plus)\n o.write(quality[:seqLen] + '\\n')\n\n\ndef main():\n trim = sys.stdin.read()\n if len(sys.argv) > 7:\n trim_reads(sys.argv[1], sys.argv[2], int(sys.argv[3]), int(sys.argv\n [4]), sys.argv[5], sys.argv[6], int(sys.argv[7]), trim)\n else:\n print(\n 'trim_reads.py [fastq] [selection] [extracut] [orientation] [output] [format] [maxlen] < [trimming-info]'\n )\n sys.exit()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\nimport sys\n\n\ndef trim_reads(fastq, selection, extra_cut, orientation, output, outputType,\n seqLen, trim):\n\n # Store all read/sequence ids that did not match with KoRV\n ids = []\n with open(selection, 'r') as f:\n for line in f:\n ids.append(line.strip())\n\n # Store trimming position for each read/sequence id\n trim_pros = {}\n for line in trim.split('\\n'):\n if len(line):\n line = line.split('\\t')\n if (line[0] == 'read name'):\n if (line[1] == 'end position' and orientation != 3) or \\\n (line[1] == 'start position' and orientation != 5):\n print('Wrong setting! 3\\' trimming needs the end position'\n 'and 3\\' trimming needs the start position.')\n sys.exit()\n else:\n trim_pros[line[0]] = int(line[1])\n\n # Read fastq file line by line and copy a sequence to a new fastq file if:\n # 1. Read did not align against KoRV (id is in selection)\n # 2. Line is not blank\n # 3. Sequence length is greater than the given seqLen\n with open(output, 'w') as o:\n with open(fastq, 'r') as f:\n while True:\n identifier = f.readline()\n sequence = f.readline()\n plus = f.readline()\n quality = f.readline()\n\n if not identifier or not sequence or \\\n not plus or not quality:\n break\n\n read_id = identifier.strip()[1:].split(' ')[0]\n\n if read_id in ids:\n if read_id in trim_pros:\n if (orientation == 3):\n cut = trim_pros[read_id] + extra_cut\n sequence = sequence[cut:(cut + seqLen)].strip()\n quality = quality[cut:(cut + seqLen)].strip()\n if (orientation == 5):\n cut = trim_pros[read_id] - extra_cut\n sequence = sequence[max(cut - seqLen, 0):cut]\n quality = quality[max(cut - seqLen, 0):cut]\n\n if (len(sequence) >= seqLen):\n if (outputType == 'fasta'):\n o.write('>' + identifier[1:])\n o.write(sequence[:seqLen] + '\\n')\n else:\n o.write(identifier)\n o.write(sequence[:seqLen] + '\\n')\n o.write(plus)\n o.write(quality[:seqLen] + '\\n')\n\n\n#############\n# MAIN #\n#############\n\ndef main():\n trim = sys.stdin.read()\n if len(sys.argv) > 7:\n trim_reads(sys.argv[1], sys.argv[2], int(sys.argv[3]),\n int(sys.argv[4]), sys.argv[5], sys.argv[6],\n int(sys.argv[7]), trim)\n else:\n print(\"trim_reads.py [fastq] [selection] [extracut] [orientation] \"\n \"[output] [format] [maxlen] < [trimming-info]\")\n sys.exit()\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i, s in enumerate(stick):
if s == '(':
temp.append(s)
elif stick[i - 1] == '(':
temp.pop()
cnt += len(temp)
else:
temp.pop()
cnt += 1
print(cnt)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.stdin = open('input.txt', 'r')
stick = input()
cnt = 0
temp = []
for i, s in enumerate(stick):
if s == '(':
temp.append(s)
elif stick[i - 1] == '(':
temp.pop()
cnt += len(temp)
else:
temp.pop()
cnt += 1
print(cnt)
<|reserved_special_token_1|>
import sys
sys.stdin = open('input.txt', 'r')
stick = input()
cnt = 0
temp = []
for i, s in enumerate(stick):
if s == '(':
temp.append(s)
elif stick[i - 1] == '(':
temp.pop()
cnt += len(temp)
else:
temp.pop()
cnt += 1
print(cnt)
<|reserved_special_token_1|>
import sys
sys.stdin = open("input.txt", "r")
stick = input()
cnt = 0
temp =[]
for i,s in enumerate(stick):
#'('나오면 무조건 추가
if s == '(':
temp.append(s)
else:
#절단인 경우
if stick[i-1] == '(':
temp.pop()
cnt += len(temp)
#길이가 짧아 아웃
else:
temp.pop()
cnt +=1
print(cnt)
|
flexible
|
{
"blob_id": "9f38148c19f0cb9522725d9eb27c91f70055cba1",
"index": 4998,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i, s in enumerate(stick):\n if s == '(':\n temp.append(s)\n elif stick[i - 1] == '(':\n temp.pop()\n cnt += len(temp)\n else:\n temp.pop()\n cnt += 1\nprint(cnt)\n",
"step-3": "<mask token>\nsys.stdin = open('input.txt', 'r')\nstick = input()\ncnt = 0\ntemp = []\nfor i, s in enumerate(stick):\n if s == '(':\n temp.append(s)\n elif stick[i - 1] == '(':\n temp.pop()\n cnt += len(temp)\n else:\n temp.pop()\n cnt += 1\nprint(cnt)\n",
"step-4": "import sys\nsys.stdin = open('input.txt', 'r')\nstick = input()\ncnt = 0\ntemp = []\nfor i, s in enumerate(stick):\n if s == '(':\n temp.append(s)\n elif stick[i - 1] == '(':\n temp.pop()\n cnt += len(temp)\n else:\n temp.pop()\n cnt += 1\nprint(cnt)\n",
"step-5": "import sys\nsys.stdin = open(\"input.txt\", \"r\")\nstick = input()\ncnt = 0\ntemp =[]\n\nfor i,s in enumerate(stick):\n #'('나오면 무조건 추가\n if s == '(':\n temp.append(s)\n \n else:\n #절단인 경우\n if stick[i-1] == '(':\n temp.pop()\n cnt += len(temp)\n\n #길이가 짧아 아웃 \n else:\n temp.pop()\n cnt +=1\n \n \nprint(cnt)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def stats_count_info(request):
root_path = request.app['PATH-DB']
cpt = 0
d = dict()
dirs_data = dict()
for root, dirs, files in os.walk(root_path, topdown=False):
cpt += len(files)
size = sum(getsize(join(root, name)) for name in files)
subdir_size = sum(dirs_data[join(root, d)] for d in dirs)
size = dirs_data[root] = size + subdir_size
if root.find('.meta') != -1:
continue
d[root] = size
ret = ''
ret += '<h2>Files Count</h2>Number of files: {}<br /><br />'.format(cpt)
ret += '<h2>Disk Consumption</h2>'
ret += 'Database disk consumption overall: {} MB<br /><br />'.format(d[
root_path] // (1024 * 1024))
ret += '<h4>Resouce Usage Listed by Objects</h4><br />'
ret += TBL_HEAD
for k in sorted(d, key=d.get, reverse=True):
ret += '<tr>'
ret += '<td>{}</td><td>{}</td>'.format(k, d[k])
ret += TBL_FOOTER
return ret
<|reserved_special_token_0|>
def handle(request):
return generate_disk_info_page(request)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def stats_count_info(request):
root_path = request.app['PATH-DB']
cpt = 0
d = dict()
dirs_data = dict()
for root, dirs, files in os.walk(root_path, topdown=False):
cpt += len(files)
size = sum(getsize(join(root, name)) for name in files)
subdir_size = sum(dirs_data[join(root, d)] for d in dirs)
size = dirs_data[root] = size + subdir_size
if root.find('.meta') != -1:
continue
d[root] = size
ret = ''
ret += '<h2>Files Count</h2>Number of files: {}<br /><br />'.format(cpt)
ret += '<h2>Disk Consumption</h2>'
ret += 'Database disk consumption overall: {} MB<br /><br />'.format(d[
root_path] // (1024 * 1024))
ret += '<h4>Resouce Usage Listed by Objects</h4><br />'
ret += TBL_HEAD
for k in sorted(d, key=d.get, reverse=True):
ret += '<tr>'
ret += '<td>{}</td><td>{}</td>'.format(k, d[k])
ret += TBL_FOOTER
return ret
def generate_disk_info_page(request):
page = request.app['BLOB-HEADER']
page += stats_count_info(request)
page += request.app['BLOB-FOOTER']
return web.Response(body=page, content_type='text/html')
def handle(request):
return generate_disk_info_page(request)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
TBL_HEAD = """
<table class="table table-striped table-hover table-sm">
<thead>
<tr>
<th scope="col">Directory</th>
<th scope="col">Size</th>
</tr>
</thead>
<tbody>
"""
TBL_FOOTER = """
</tbody>
</table>
"""
def stats_count_info(request):
root_path = request.app['PATH-DB']
cpt = 0
d = dict()
dirs_data = dict()
for root, dirs, files in os.walk(root_path, topdown=False):
cpt += len(files)
size = sum(getsize(join(root, name)) for name in files)
subdir_size = sum(dirs_data[join(root, d)] for d in dirs)
size = dirs_data[root] = size + subdir_size
if root.find('.meta') != -1:
continue
d[root] = size
ret = ''
ret += '<h2>Files Count</h2>Number of files: {}<br /><br />'.format(cpt)
ret += '<h2>Disk Consumption</h2>'
ret += 'Database disk consumption overall: {} MB<br /><br />'.format(d[
root_path] // (1024 * 1024))
ret += '<h4>Resouce Usage Listed by Objects</h4><br />'
ret += TBL_HEAD
for k in sorted(d, key=d.get, reverse=True):
ret += '<tr>'
ret += '<td>{}</td><td>{}</td>'.format(k, d[k])
ret += TBL_FOOTER
return ret
def generate_disk_info_page(request):
page = request.app['BLOB-HEADER']
page += stats_count_info(request)
page += request.app['BLOB-FOOTER']
return web.Response(body=page, content_type='text/html')
def handle(request):
return generate_disk_info_page(request)
<|reserved_special_token_1|>
import os
import time
import re
import json
from os.path import join, getsize
from aiohttp import web
from utils import helper
TBL_HEAD = """
<table class="table table-striped table-hover table-sm">
<thead>
<tr>
<th scope="col">Directory</th>
<th scope="col">Size</th>
</tr>
</thead>
<tbody>
"""
TBL_FOOTER = """
</tbody>
</table>
"""
def stats_count_info(request):
root_path = request.app['PATH-DB']
cpt = 0
d = dict()
dirs_data = dict()
for root, dirs, files in os.walk(root_path, topdown=False):
cpt += len(files)
size = sum(getsize(join(root, name)) for name in files)
subdir_size = sum(dirs_data[join(root, d)] for d in dirs)
size = dirs_data[root] = size + subdir_size
if root.find('.meta') != -1:
continue
d[root] = size
ret = ''
ret += '<h2>Files Count</h2>Number of files: {}<br /><br />'.format(cpt)
ret += '<h2>Disk Consumption</h2>'
ret += 'Database disk consumption overall: {} MB<br /><br />'.format(d[
root_path] // (1024 * 1024))
ret += '<h4>Resouce Usage Listed by Objects</h4><br />'
ret += TBL_HEAD
for k in sorted(d, key=d.get, reverse=True):
ret += '<tr>'
ret += '<td>{}</td><td>{}</td>'.format(k, d[k])
ret += TBL_FOOTER
return ret
def generate_disk_info_page(request):
page = request.app['BLOB-HEADER']
page += stats_count_info(request)
page += request.app['BLOB-FOOTER']
return web.Response(body=page, content_type='text/html')
def handle(request):
return generate_disk_info_page(request)
<|reserved_special_token_1|>
import os
import time
import re
import json
from os.path import join, getsize
from aiohttp import web
from utils import helper
TBL_HEAD = '''
<table class="table table-striped table-hover table-sm">
<thead>
<tr>
<th scope="col">Directory</th>
<th scope="col">Size</th>
</tr>
</thead>
<tbody>
'''
TBL_FOOTER = '''
</tbody>
</table>
'''
def stats_count_info(request):
root_path = request.app['PATH-DB']
cpt = 0
d = dict()
dirs_data = dict()
for root, dirs, files in os.walk(root_path, topdown=False):
cpt += len(files)
size = sum(getsize(join(root, name)) for name in files)
subdir_size = sum(dirs_data[join(root,d)] for d in dirs)
size = dirs_data[root] = size + subdir_size
if root.find('.meta') != -1:
# we ignore (internal) meta directories
continue
d[root] = size
ret = ''
ret += "<h2>Files Count</h2>Number of files: {}<br /><br />".format(cpt)
ret += "<h2>Disk Consumption</h2>"
ret += "Database disk consumption overall: {} MB<br /><br />".format(d[root_path] // (1024*1024))
ret += "<h4>Resouce Usage Listed by Objects</h4><br />"
ret += TBL_HEAD
for k in sorted(d, key=d.get, reverse=True):
ret += '<tr>'
ret += "<td>{}</td><td>{}</td>".format(k, d[k])
ret += TBL_FOOTER
return ret
def generate_disk_info_page(request):
page = request.app['BLOB-HEADER']
page += stats_count_info(request)
page += request.app['BLOB-FOOTER']
return web.Response(body=page, content_type='text/html')
def handle(request):
return generate_disk_info_page(request)
|
flexible
|
{
"blob_id": "7c9b51ae7cde9c3a00888dac6df710b93af6dd7f",
"index": 4836,
"step-1": "<mask token>\n\n\ndef stats_count_info(request):\n root_path = request.app['PATH-DB']\n cpt = 0\n d = dict()\n dirs_data = dict()\n for root, dirs, files in os.walk(root_path, topdown=False):\n cpt += len(files)\n size = sum(getsize(join(root, name)) for name in files)\n subdir_size = sum(dirs_data[join(root, d)] for d in dirs)\n size = dirs_data[root] = size + subdir_size\n if root.find('.meta') != -1:\n continue\n d[root] = size\n ret = ''\n ret += '<h2>Files Count</h2>Number of files: {}<br /><br />'.format(cpt)\n ret += '<h2>Disk Consumption</h2>'\n ret += 'Database disk consumption overall: {} MB<br /><br />'.format(d[\n root_path] // (1024 * 1024))\n ret += '<h4>Resouce Usage Listed by Objects</h4><br />'\n ret += TBL_HEAD\n for k in sorted(d, key=d.get, reverse=True):\n ret += '<tr>'\n ret += '<td>{}</td><td>{}</td>'.format(k, d[k])\n ret += TBL_FOOTER\n return ret\n\n\n<mask token>\n\n\ndef handle(request):\n return generate_disk_info_page(request)\n",
"step-2": "<mask token>\n\n\ndef stats_count_info(request):\n root_path = request.app['PATH-DB']\n cpt = 0\n d = dict()\n dirs_data = dict()\n for root, dirs, files in os.walk(root_path, topdown=False):\n cpt += len(files)\n size = sum(getsize(join(root, name)) for name in files)\n subdir_size = sum(dirs_data[join(root, d)] for d in dirs)\n size = dirs_data[root] = size + subdir_size\n if root.find('.meta') != -1:\n continue\n d[root] = size\n ret = ''\n ret += '<h2>Files Count</h2>Number of files: {}<br /><br />'.format(cpt)\n ret += '<h2>Disk Consumption</h2>'\n ret += 'Database disk consumption overall: {} MB<br /><br />'.format(d[\n root_path] // (1024 * 1024))\n ret += '<h4>Resouce Usage Listed by Objects</h4><br />'\n ret += TBL_HEAD\n for k in sorted(d, key=d.get, reverse=True):\n ret += '<tr>'\n ret += '<td>{}</td><td>{}</td>'.format(k, d[k])\n ret += TBL_FOOTER\n return ret\n\n\ndef generate_disk_info_page(request):\n page = request.app['BLOB-HEADER']\n page += stats_count_info(request)\n page += request.app['BLOB-FOOTER']\n return web.Response(body=page, content_type='text/html')\n\n\ndef handle(request):\n return generate_disk_info_page(request)\n",
"step-3": "<mask token>\nTBL_HEAD = \"\"\"\n<table class=\"table table-striped table-hover table-sm\">\n <thead>\n <tr>\n <th scope=\"col\">Directory</th>\n <th scope=\"col\">Size</th>\n </tr>\n </thead>\n <tbody>\n\"\"\"\nTBL_FOOTER = \"\"\"\n </tbody>\n</table>\n\"\"\"\n\n\ndef stats_count_info(request):\n root_path = request.app['PATH-DB']\n cpt = 0\n d = dict()\n dirs_data = dict()\n for root, dirs, files in os.walk(root_path, topdown=False):\n cpt += len(files)\n size = sum(getsize(join(root, name)) for name in files)\n subdir_size = sum(dirs_data[join(root, d)] for d in dirs)\n size = dirs_data[root] = size + subdir_size\n if root.find('.meta') != -1:\n continue\n d[root] = size\n ret = ''\n ret += '<h2>Files Count</h2>Number of files: {}<br /><br />'.format(cpt)\n ret += '<h2>Disk Consumption</h2>'\n ret += 'Database disk consumption overall: {} MB<br /><br />'.format(d[\n root_path] // (1024 * 1024))\n ret += '<h4>Resouce Usage Listed by Objects</h4><br />'\n ret += TBL_HEAD\n for k in sorted(d, key=d.get, reverse=True):\n ret += '<tr>'\n ret += '<td>{}</td><td>{}</td>'.format(k, d[k])\n ret += TBL_FOOTER\n return ret\n\n\ndef generate_disk_info_page(request):\n page = request.app['BLOB-HEADER']\n page += stats_count_info(request)\n page += request.app['BLOB-FOOTER']\n return web.Response(body=page, content_type='text/html')\n\n\ndef handle(request):\n return generate_disk_info_page(request)\n",
"step-4": "import os\nimport time\nimport re\nimport json\nfrom os.path import join, getsize\nfrom aiohttp import web\nfrom utils import helper\nTBL_HEAD = \"\"\"\n<table class=\"table table-striped table-hover table-sm\">\n <thead>\n <tr>\n <th scope=\"col\">Directory</th>\n <th scope=\"col\">Size</th>\n </tr>\n </thead>\n <tbody>\n\"\"\"\nTBL_FOOTER = \"\"\"\n </tbody>\n</table>\n\"\"\"\n\n\ndef stats_count_info(request):\n root_path = request.app['PATH-DB']\n cpt = 0\n d = dict()\n dirs_data = dict()\n for root, dirs, files in os.walk(root_path, topdown=False):\n cpt += len(files)\n size = sum(getsize(join(root, name)) for name in files)\n subdir_size = sum(dirs_data[join(root, d)] for d in dirs)\n size = dirs_data[root] = size + subdir_size\n if root.find('.meta') != -1:\n continue\n d[root] = size\n ret = ''\n ret += '<h2>Files Count</h2>Number of files: {}<br /><br />'.format(cpt)\n ret += '<h2>Disk Consumption</h2>'\n ret += 'Database disk consumption overall: {} MB<br /><br />'.format(d[\n root_path] // (1024 * 1024))\n ret += '<h4>Resouce Usage Listed by Objects</h4><br />'\n ret += TBL_HEAD\n for k in sorted(d, key=d.get, reverse=True):\n ret += '<tr>'\n ret += '<td>{}</td><td>{}</td>'.format(k, d[k])\n ret += TBL_FOOTER\n return ret\n\n\ndef generate_disk_info_page(request):\n page = request.app['BLOB-HEADER']\n page += stats_count_info(request)\n page += request.app['BLOB-FOOTER']\n return web.Response(body=page, content_type='text/html')\n\n\ndef handle(request):\n return generate_disk_info_page(request)\n",
"step-5": "import os\nimport time\nimport re\nimport json\nfrom os.path import join, getsize\n\nfrom aiohttp import web\n\nfrom utils import helper\n\nTBL_HEAD = '''\n<table class=\"table table-striped table-hover table-sm\">\n <thead>\n <tr>\n <th scope=\"col\">Directory</th>\n <th scope=\"col\">Size</th>\n </tr>\n </thead>\n <tbody>\n'''\n\nTBL_FOOTER = '''\n </tbody>\n</table>\n'''\n\ndef stats_count_info(request):\n root_path = request.app['PATH-DB']\n cpt = 0\n d = dict()\n dirs_data = dict()\n for root, dirs, files in os.walk(root_path, topdown=False):\n cpt += len(files)\n size = sum(getsize(join(root, name)) for name in files)\n subdir_size = sum(dirs_data[join(root,d)] for d in dirs)\n size = dirs_data[root] = size + subdir_size\n if root.find('.meta') != -1:\n # we ignore (internal) meta directories\n continue\n d[root] = size\n\n ret = ''\n ret += \"<h2>Files Count</h2>Number of files: {}<br /><br />\".format(cpt)\n ret += \"<h2>Disk Consumption</h2>\"\n ret += \"Database disk consumption overall: {} MB<br /><br />\".format(d[root_path] // (1024*1024))\n ret += \"<h4>Resouce Usage Listed by Objects</h4><br />\"\n ret += TBL_HEAD\n for k in sorted(d, key=d.get, reverse=True):\n ret += '<tr>'\n ret += \"<td>{}</td><td>{}</td>\".format(k, d[k])\n ret += TBL_FOOTER\n return ret\n\ndef generate_disk_info_page(request):\n page = request.app['BLOB-HEADER']\n page += stats_count_info(request)\n page += request.app['BLOB-FOOTER']\n return web.Response(body=page, content_type='text/html')\n\n\ndef handle(request):\n return generate_disk_info_page(request)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""Project Euler: 0010
https://projecteuler.net/problem=10
Summation of primes
The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
Find the sum of all the primes below two million.
"""
import math
import sys
PROBLEM = 10
SOLVED = True
SPEED = 29.16
TAGS = ['primes']
class Primes(object):
"""Iteratable class that handles prime number generation and testing"""
# cache of currently known primes
known_primes = [2, 3]
def __init__(self, maximum=float('inf'), count=float('inf')):
self.maximum = maximum
self.count = count
self.__iter = 0
def __iter__(self):
return self
def __getitem__(self, key):
if not isinstance(key, int):
raise TypeError('Cannot use "%s" as a list index' % type(key))
while len(self.known_primes) <= key:
self.next()
return self.known_primes[key]
def next(self):
"""Fetch the next prime number"""
if self.__iter >= self.count:
# print 'Reached maximum count %d (%d)' % (self.count, self.__iter)
raise StopIteration()
if self.__iter < len(self.known_primes):
if self.known_primes[self.__iter] > self.maximum:
raise StopIteration()
key = self.__iter
self.__iter += 1
return self.known_primes[key]
candidate = self.known_primes[-1] + 2
while True:
# print 'Checking to see if candidate %d is prime' % candidate
if candidate > self.maximum:
raise StopIteration()
if not self.first_factor(candidate):
self.known_primes.append(candidate)
self.__iter += 1
return candidate
candidate += 2
@classmethod
def first_factor(cls, number):
"""Returns the lowest factor of the number.
If the number is prime, None is returned instead.
"""
for prime in cls(maximum=math.sqrt(number)):
if not number % prime:
return prime
return None
@classmethod
def factor(cls, number):
"""Returns a list of prime factors that this number is composed of"""
factors = []
for prime in cls():
if prime > number:
break
# print 'Checking to see if %d is a factor of %d' % (prime, number)
# reduce the total iterations
if prime > math.sqrt(number):
factors.append(number)
break
while not number % prime:
number /= prime
factors.append(prime)
return factors
def main(args=sys.argv[1:]):
"""Solve problem."""
print 'Project Euler: %04d' % PROBLEM
maximum = 2000000
if len(args) > 0:
maximum = int(args[0])
print sum(Primes(maximum=maximum))
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "adf8b52f6e71546b591ceb34a9425c28f74883fa",
"index": 6288,
"step-1": "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\"\"\"Project Euler: 0010\n\nhttps://projecteuler.net/problem=10\n\nSummation of primes\n\nThe sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.\n\nFind the sum of all the primes below two million.\n\"\"\"\n\nimport math\nimport sys\n\nPROBLEM = 10\nSOLVED = True\nSPEED = 29.16\nTAGS = ['primes']\n\n\nclass Primes(object):\n \"\"\"Iteratable class that handles prime number generation and testing\"\"\"\n\n # cache of currently known primes\n known_primes = [2, 3]\n\n def __init__(self, maximum=float('inf'), count=float('inf')):\n self.maximum = maximum\n self.count = count\n self.__iter = 0\n\n def __iter__(self):\n return self\n\n def __getitem__(self, key):\n if not isinstance(key, int):\n raise TypeError('Cannot use \"%s\" as a list index' % type(key))\n while len(self.known_primes) <= key:\n self.next()\n return self.known_primes[key]\n\n def next(self):\n \"\"\"Fetch the next prime number\"\"\"\n if self.__iter >= self.count:\n # print 'Reached maximum count %d (%d)' % (self.count, self.__iter)\n raise StopIteration()\n if self.__iter < len(self.known_primes):\n if self.known_primes[self.__iter] > self.maximum:\n raise StopIteration()\n key = self.__iter\n self.__iter += 1\n return self.known_primes[key]\n candidate = self.known_primes[-1] + 2\n while True:\n # print 'Checking to see if candidate %d is prime' % candidate\n if candidate > self.maximum:\n raise StopIteration()\n if not self.first_factor(candidate):\n self.known_primes.append(candidate)\n self.__iter += 1\n return candidate\n candidate += 2\n\n @classmethod\n def first_factor(cls, number):\n \"\"\"Returns the lowest factor of the number.\n If the number is prime, None is returned instead.\n \"\"\"\n for prime in cls(maximum=math.sqrt(number)):\n if not number % prime:\n return prime\n return None\n\n @classmethod\n def factor(cls, number):\n \"\"\"Returns a list of prime factors that this number is composed of\"\"\"\n factors = []\n for prime in cls():\n if prime > number:\n break\n # print 'Checking to see if %d is a factor of %d' % (prime, number)\n # reduce the total iterations\n if prime > math.sqrt(number):\n factors.append(number)\n break\n while not number % prime:\n number /= prime\n factors.append(prime)\n return factors\n\n\ndef main(args=sys.argv[1:]):\n \"\"\"Solve problem.\"\"\"\n print 'Project Euler: %04d' % PROBLEM\n maximum = 2000000\n if len(args) > 0:\n maximum = int(args[0])\n print sum(Primes(maximum=maximum))\n\n\nif __name__ == '__main__':\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class dm107s:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def incremt(self, rl, pt, th, yw):
self._value_to_change = [128, 128, 128, 128]
self._change_val = [rl, pt, th, yw]
for x in range(len(self._value_to_change)):
self._value_to_change[x] += self._change_val[x]
if self._value_to_change[x] <= 0:
self._value_to_change[x] = 0
if self._value_to_change[x] >= 255:
self._value_to_change[x] = 255
[self.roll, self.pitch, self.throttle, self.yaw
] = self._value_to_change
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def throttle_dwn(self):
self.throttle -= 20
if self.throttle < 18:
self.throttle = 18
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def calib_gyro(self):
if self._calibrate_flag == False:
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 4
self.onoff = 0
self._calibrate_flag = True
self._calibrate_timer = time()
class naza:
def __init__(self, ip, port):
self.roll = 8
self.pitch = 8
self.throttle = 8
self.yaw = 8
self._takeoff_flag = False
self._ignite_flag = False
self._ignite_send = False
self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self.ip = ip
self.port = port
self._ignite_timer = 0
self._takeoff_timer = 0
self._stopped = False
def start(self):
self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)
self._thread.start()
return self
def get_hex(self):
self.command_out = (self.throttle << 12 | self.yaw << 8 | self.
pitch << 4 | self.roll)
self.command_out = hex(self.command_out)[2:]
return self.command_out
def send_ctrl(self):
while not self._stopped:
if self._ignite_send == True:
ignite_msg = 'st'
self._package = ignite_msg.encode()
else:
self._package = self.get_hex().encode()
self.sess.sendto(self._package, (self.ip, self.port))
self.Flag_off()
sleep(0.05)
def close_connection(self):
self._stopped = True
if self._thread.daemon == False:
self._thread.join()
self.sess.close()
def default(self):
self.roll = 8
self.pitch = 8
self.throttle = 8
self.yaw = 8
self._takeoff_flag = False
self._ignite_flag = False
def incremt(self, rl, pt, th, yw):
self._value_to_change = [8, 8, 8, 8]
self._change_val = [rl, pt, th, yw]
for x in range(len(self._value_to_change)):
self._value_to_change[x] += self._change_val[x]
if self._value_to_change[x] <= 0:
self._value_to_change[x] = 0
if self._value_to_change[x] >= 15:
self._value_to_change[x] = 15
[self.roll, self.pitch, self.throttle, self.yaw
] = self._value_to_change
def roll_right(self):
if self.roll < 15:
self.roll += 1
def pitch_fwd(self):
if self.pitch < 15:
self.pitch += 1
def throttle_up(self):
if self.throttle < 15:
self.throttle += 1
def yaw_right(self):
if self.yaw < 15:
self.yaw += 1
def roll_left(self):
if self.roll > 0:
self.roll -= 1
def pitch_bwd(self):
if self.pitch > 0:
self.pitch -= 1
def throttle_dwn(self):
if self.throttle > 0:
self.throttle -= 1
def yaw_left(self):
if self.yaw > 0:
self.yaw -= 1
def ignite(self):
if self._ignite_flag == False:
self._ignite_flag = True
self._ignite_send = True
self._ignite_timer = time()
def takeoff(self):
if self._takeoff_flag == False:
self.throttle = 12
self._takeoff_flag = True
self._takeoff_timer = time()
def Flag_off(self):
if self._ignite_flag == True:
if time() - self._ignite_timer >= 1 and time(
) - self._ignite_timer < 1.5:
self._ignite_send = False
self.roll = 8
self.pitch = 8
self.yaw = 8
self.throttle = 0
if time() - self._ignite_timer >= 1.5 and time(
) - self._ignite_timer < 2:
self.throttle = 2
if time() - self._ignite_timer >= 2 and time(
) - self._ignite_timer < 2.5:
self.throttle = 4
if time() - self._ignite_timer >= 2.5 and time(
) - self._ignite_timer < 3:
self.throttle = 6
if time() - self._ignite_timer >= 3 and time(
) - self._ignite_timer < 4:
self.throttle = 8
if time() - self._ignite_timer >= 4:
self._ignite_flag = False
self.takeoff()
if self._takeoff_flag == True and time() - self._takeoff_timer >= 4:
self.throttle = 8
self._takeoff_flag = False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class dm107s:
<|reserved_special_token_0|>
def start(self):
self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)
self._thread.start()
return self
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def default(self):
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 0
self.onoff = 1
self._takeoff_flag = False
def incremt(self, rl, pt, th, yw):
self._value_to_change = [128, 128, 128, 128]
self._change_val = [rl, pt, th, yw]
for x in range(len(self._value_to_change)):
self._value_to_change[x] += self._change_val[x]
if self._value_to_change[x] <= 0:
self._value_to_change[x] = 0
if self._value_to_change[x] >= 255:
self._value_to_change[x] = 255
[self.roll, self.pitch, self.throttle, self.yaw
] = self._value_to_change
def roll_right(self):
self.roll += 20
if self.roll > 248:
self.roll = 248
<|reserved_special_token_0|>
def throttle_up(self):
self.throttle += 20
if self.throttle > 248:
self.throttle = 248
def yaw_right(self):
self.yaw -= 20
if self.yaw < 18:
self.yaw = 18
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def throttle_dwn(self):
self.throttle -= 20
if self.throttle < 18:
self.throttle = 18
def yaw_left(self):
self.yaw += 20
if self.yaw > 248:
self.yaw = 248
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def calib_gyro(self):
if self._calibrate_flag == False:
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 4
self.onoff = 0
self._calibrate_flag = True
self._calibrate_timer = time()
class naza:
def __init__(self, ip, port):
self.roll = 8
self.pitch = 8
self.throttle = 8
self.yaw = 8
self._takeoff_flag = False
self._ignite_flag = False
self._ignite_send = False
self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self.ip = ip
self.port = port
self._ignite_timer = 0
self._takeoff_timer = 0
self._stopped = False
def start(self):
self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)
self._thread.start()
return self
def get_hex(self):
self.command_out = (self.throttle << 12 | self.yaw << 8 | self.
pitch << 4 | self.roll)
self.command_out = hex(self.command_out)[2:]
return self.command_out
def send_ctrl(self):
while not self._stopped:
if self._ignite_send == True:
ignite_msg = 'st'
self._package = ignite_msg.encode()
else:
self._package = self.get_hex().encode()
self.sess.sendto(self._package, (self.ip, self.port))
self.Flag_off()
sleep(0.05)
def close_connection(self):
self._stopped = True
if self._thread.daemon == False:
self._thread.join()
self.sess.close()
def default(self):
self.roll = 8
self.pitch = 8
self.throttle = 8
self.yaw = 8
self._takeoff_flag = False
self._ignite_flag = False
def incremt(self, rl, pt, th, yw):
self._value_to_change = [8, 8, 8, 8]
self._change_val = [rl, pt, th, yw]
for x in range(len(self._value_to_change)):
self._value_to_change[x] += self._change_val[x]
if self._value_to_change[x] <= 0:
self._value_to_change[x] = 0
if self._value_to_change[x] >= 15:
self._value_to_change[x] = 15
[self.roll, self.pitch, self.throttle, self.yaw
] = self._value_to_change
def roll_right(self):
if self.roll < 15:
self.roll += 1
def pitch_fwd(self):
if self.pitch < 15:
self.pitch += 1
def throttle_up(self):
if self.throttle < 15:
self.throttle += 1
def yaw_right(self):
if self.yaw < 15:
self.yaw += 1
def roll_left(self):
if self.roll > 0:
self.roll -= 1
def pitch_bwd(self):
if self.pitch > 0:
self.pitch -= 1
def throttle_dwn(self):
if self.throttle > 0:
self.throttle -= 1
def yaw_left(self):
if self.yaw > 0:
self.yaw -= 1
def ignite(self):
if self._ignite_flag == False:
self._ignite_flag = True
self._ignite_send = True
self._ignite_timer = time()
def takeoff(self):
if self._takeoff_flag == False:
self.throttle = 12
self._takeoff_flag = True
self._takeoff_timer = time()
def Flag_off(self):
if self._ignite_flag == True:
if time() - self._ignite_timer >= 1 and time(
) - self._ignite_timer < 1.5:
self._ignite_send = False
self.roll = 8
self.pitch = 8
self.yaw = 8
self.throttle = 0
if time() - self._ignite_timer >= 1.5 and time(
) - self._ignite_timer < 2:
self.throttle = 2
if time() - self._ignite_timer >= 2 and time(
) - self._ignite_timer < 2.5:
self.throttle = 4
if time() - self._ignite_timer >= 2.5 and time(
) - self._ignite_timer < 3:
self.throttle = 6
if time() - self._ignite_timer >= 3 and time(
) - self._ignite_timer < 4:
self.throttle = 8
if time() - self._ignite_timer >= 4:
self._ignite_flag = False
self.takeoff()
if self._takeoff_flag == True and time() - self._takeoff_timer >= 4:
self.throttle = 8
self._takeoff_flag = False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class dm107s:
<|reserved_special_token_0|>
def start(self):
self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)
self._thread.start()
return self
<|reserved_special_token_0|>
def _get_packet(self):
self._hex_code = self.get_hex()
self.package = bytes.fromhex(self._hex_code)
return self.package
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def default(self):
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 0
self.onoff = 1
self._takeoff_flag = False
def incremt(self, rl, pt, th, yw):
self._value_to_change = [128, 128, 128, 128]
self._change_val = [rl, pt, th, yw]
for x in range(len(self._value_to_change)):
self._value_to_change[x] += self._change_val[x]
if self._value_to_change[x] <= 0:
self._value_to_change[x] = 0
if self._value_to_change[x] >= 255:
self._value_to_change[x] = 255
[self.roll, self.pitch, self.throttle, self.yaw
] = self._value_to_change
def roll_right(self):
self.roll += 20
if self.roll > 248:
self.roll = 248
def pitch_fwd(self):
self.pitch += 20
if self.pitch > 248:
self.pitch = 248
def throttle_up(self):
self.throttle += 20
if self.throttle > 248:
self.throttle = 248
def yaw_right(self):
self.yaw -= 20
if self.yaw < 18:
self.yaw = 18
def roll_left(self):
self.roll -= 20
if self.roll < 18:
self.roll = 18
<|reserved_special_token_0|>
def throttle_dwn(self):
self.throttle -= 20
if self.throttle < 18:
self.throttle = 18
def yaw_left(self):
self.yaw += 20
if self.yaw > 248:
self.yaw = 248
<|reserved_special_token_0|>
def land(self):
if self._takeoff_flag == False:
self.commands = 1
self._takeoff_flag = True
self._takeoff_timer = time()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def calib_gyro(self):
if self._calibrate_flag == False:
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 4
self.onoff = 0
self._calibrate_flag = True
self._calibrate_timer = time()
class naza:
def __init__(self, ip, port):
self.roll = 8
self.pitch = 8
self.throttle = 8
self.yaw = 8
self._takeoff_flag = False
self._ignite_flag = False
self._ignite_send = False
self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self.ip = ip
self.port = port
self._ignite_timer = 0
self._takeoff_timer = 0
self._stopped = False
def start(self):
self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)
self._thread.start()
return self
def get_hex(self):
self.command_out = (self.throttle << 12 | self.yaw << 8 | self.
pitch << 4 | self.roll)
self.command_out = hex(self.command_out)[2:]
return self.command_out
def send_ctrl(self):
while not self._stopped:
if self._ignite_send == True:
ignite_msg = 'st'
self._package = ignite_msg.encode()
else:
self._package = self.get_hex().encode()
self.sess.sendto(self._package, (self.ip, self.port))
self.Flag_off()
sleep(0.05)
def close_connection(self):
self._stopped = True
if self._thread.daemon == False:
self._thread.join()
self.sess.close()
def default(self):
self.roll = 8
self.pitch = 8
self.throttle = 8
self.yaw = 8
self._takeoff_flag = False
self._ignite_flag = False
def incremt(self, rl, pt, th, yw):
self._value_to_change = [8, 8, 8, 8]
self._change_val = [rl, pt, th, yw]
for x in range(len(self._value_to_change)):
self._value_to_change[x] += self._change_val[x]
if self._value_to_change[x] <= 0:
self._value_to_change[x] = 0
if self._value_to_change[x] >= 15:
self._value_to_change[x] = 15
[self.roll, self.pitch, self.throttle, self.yaw
] = self._value_to_change
def roll_right(self):
if self.roll < 15:
self.roll += 1
def pitch_fwd(self):
if self.pitch < 15:
self.pitch += 1
def throttle_up(self):
if self.throttle < 15:
self.throttle += 1
def yaw_right(self):
if self.yaw < 15:
self.yaw += 1
def roll_left(self):
if self.roll > 0:
self.roll -= 1
def pitch_bwd(self):
if self.pitch > 0:
self.pitch -= 1
def throttle_dwn(self):
if self.throttle > 0:
self.throttle -= 1
def yaw_left(self):
if self.yaw > 0:
self.yaw -= 1
def ignite(self):
if self._ignite_flag == False:
self._ignite_flag = True
self._ignite_send = True
self._ignite_timer = time()
def takeoff(self):
if self._takeoff_flag == False:
self.throttle = 12
self._takeoff_flag = True
self._takeoff_timer = time()
def Flag_off(self):
if self._ignite_flag == True:
if time() - self._ignite_timer >= 1 and time(
) - self._ignite_timer < 1.5:
self._ignite_send = False
self.roll = 8
self.pitch = 8
self.yaw = 8
self.throttle = 0
if time() - self._ignite_timer >= 1.5 and time(
) - self._ignite_timer < 2:
self.throttle = 2
if time() - self._ignite_timer >= 2 and time(
) - self._ignite_timer < 2.5:
self.throttle = 4
if time() - self._ignite_timer >= 2.5 and time(
) - self._ignite_timer < 3:
self.throttle = 6
if time() - self._ignite_timer >= 3 and time(
) - self._ignite_timer < 4:
self.throttle = 8
if time() - self._ignite_timer >= 4:
self._ignite_flag = False
self.takeoff()
if self._takeoff_flag == True and time() - self._takeoff_timer >= 4:
self.throttle = 8
self._takeoff_flag = False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class dm107s:
def __init__(self):
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 0
self.onoff = 1
self._takeoff_flag = False
self._calibrate_flag = False
self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self._takeoff_timer = 0
self._calibrate_timer = 0
self._stopped = False
def start(self):
self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)
self._thread.start()
return self
def get_hex(self):
self.command_out = (26122 << 144 | self.roll << 136 | self.pitch <<
128 | self.throttle << 120 | self.yaw << 112 | self.commands <<
104 | self.onoff * 2 << 96 | 65535 << 80 | (self.roll ^ self.
pitch ^ self.throttle ^ self.yaw ^ self.commands ^ self.onoff *
2) << 8 | 153)
self.command_out = hex(self.command_out)[2:]
return self.command_out
def _get_packet(self):
self._hex_code = self.get_hex()
self.package = bytes.fromhex(self._hex_code)
return self.package
def send_ctrl(self):
while not self._stopped:
self._package = self._get_packet()
self.sess.sendto(self._package, ('192.168.100.1', 19798))
self.Flag_off()
sleep(0.02)
def close_connection(self):
self._stopped = True
if self._thread.daemon == False:
self._thread.join()
self.sess.close()
def default(self):
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 0
self.onoff = 1
self._takeoff_flag = False
def incremt(self, rl, pt, th, yw):
self._value_to_change = [128, 128, 128, 128]
self._change_val = [rl, pt, th, yw]
for x in range(len(self._value_to_change)):
self._value_to_change[x] += self._change_val[x]
if self._value_to_change[x] <= 0:
self._value_to_change[x] = 0
if self._value_to_change[x] >= 255:
self._value_to_change[x] = 255
[self.roll, self.pitch, self.throttle, self.yaw
] = self._value_to_change
def roll_right(self):
self.roll += 20
if self.roll > 248:
self.roll = 248
def pitch_fwd(self):
self.pitch += 20
if self.pitch > 248:
self.pitch = 248
def throttle_up(self):
self.throttle += 20
if self.throttle > 248:
self.throttle = 248
def yaw_right(self):
self.yaw -= 20
if self.yaw < 18:
self.yaw = 18
def roll_left(self):
self.roll -= 20
if self.roll < 18:
self.roll = 18
<|reserved_special_token_0|>
def throttle_dwn(self):
self.throttle -= 20
if self.throttle < 18:
self.throttle = 18
def yaw_left(self):
self.yaw += 20
if self.yaw > 248:
self.yaw = 248
def takeoff(self):
if self._takeoff_flag == False:
self.commands = 1
self._takeoff_flag = True
self._takeoff_timer = time()
def land(self):
if self._takeoff_flag == False:
self.commands = 1
self._takeoff_flag = True
self._takeoff_timer = time()
<|reserved_special_token_0|>
def emergency_stop(self):
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 2
self.onoff = 1
self._takeoff_flag = False
def calib_gyro(self):
if self._calibrate_flag == False:
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 4
self.onoff = 0
self._calibrate_flag = True
self._calibrate_timer = time()
class naza:
def __init__(self, ip, port):
self.roll = 8
self.pitch = 8
self.throttle = 8
self.yaw = 8
self._takeoff_flag = False
self._ignite_flag = False
self._ignite_send = False
self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self.ip = ip
self.port = port
self._ignite_timer = 0
self._takeoff_timer = 0
self._stopped = False
def start(self):
self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)
self._thread.start()
return self
def get_hex(self):
self.command_out = (self.throttle << 12 | self.yaw << 8 | self.
pitch << 4 | self.roll)
self.command_out = hex(self.command_out)[2:]
return self.command_out
def send_ctrl(self):
while not self._stopped:
if self._ignite_send == True:
ignite_msg = 'st'
self._package = ignite_msg.encode()
else:
self._package = self.get_hex().encode()
self.sess.sendto(self._package, (self.ip, self.port))
self.Flag_off()
sleep(0.05)
def close_connection(self):
self._stopped = True
if self._thread.daemon == False:
self._thread.join()
self.sess.close()
def default(self):
self.roll = 8
self.pitch = 8
self.throttle = 8
self.yaw = 8
self._takeoff_flag = False
self._ignite_flag = False
def incremt(self, rl, pt, th, yw):
self._value_to_change = [8, 8, 8, 8]
self._change_val = [rl, pt, th, yw]
for x in range(len(self._value_to_change)):
self._value_to_change[x] += self._change_val[x]
if self._value_to_change[x] <= 0:
self._value_to_change[x] = 0
if self._value_to_change[x] >= 15:
self._value_to_change[x] = 15
[self.roll, self.pitch, self.throttle, self.yaw
] = self._value_to_change
def roll_right(self):
if self.roll < 15:
self.roll += 1
def pitch_fwd(self):
if self.pitch < 15:
self.pitch += 1
def throttle_up(self):
if self.throttle < 15:
self.throttle += 1
def yaw_right(self):
if self.yaw < 15:
self.yaw += 1
def roll_left(self):
if self.roll > 0:
self.roll -= 1
def pitch_bwd(self):
if self.pitch > 0:
self.pitch -= 1
def throttle_dwn(self):
if self.throttle > 0:
self.throttle -= 1
def yaw_left(self):
if self.yaw > 0:
self.yaw -= 1
def ignite(self):
if self._ignite_flag == False:
self._ignite_flag = True
self._ignite_send = True
self._ignite_timer = time()
def takeoff(self):
if self._takeoff_flag == False:
self.throttle = 12
self._takeoff_flag = True
self._takeoff_timer = time()
def Flag_off(self):
if self._ignite_flag == True:
if time() - self._ignite_timer >= 1 and time(
) - self._ignite_timer < 1.5:
self._ignite_send = False
self.roll = 8
self.pitch = 8
self.yaw = 8
self.throttle = 0
if time() - self._ignite_timer >= 1.5 and time(
) - self._ignite_timer < 2:
self.throttle = 2
if time() - self._ignite_timer >= 2 and time(
) - self._ignite_timer < 2.5:
self.throttle = 4
if time() - self._ignite_timer >= 2.5 and time(
) - self._ignite_timer < 3:
self.throttle = 6
if time() - self._ignite_timer >= 3 and time(
) - self._ignite_timer < 4:
self.throttle = 8
if time() - self._ignite_timer >= 4:
self._ignite_flag = False
self.takeoff()
if self._takeoff_flag == True and time() - self._takeoff_timer >= 4:
self.throttle = 8
self._takeoff_flag = False
<|reserved_special_token_1|>
import socket
from time import time, sleep
from threading import Thread
# Define drone
class dm107s():
# Default control value
def __init__(self):
# 4 values for flight
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
# 0 - normal mode, 2 - emergency stop, 4 - gyroscope calibration
self.commands = 0
# Required for wifi control
self.onoff = 1
# Prevent multiple takeoff button presses
self._takeoff_flag = False
# Prevent multiple calibrate button presses
self._calibrate_flag = False
# Connect to UDP port
self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
#self.sess.connect(('192.168.100.1', 19798))
# Initialize timer value
self._takeoff_timer = 0
self._calibrate_timer = 0
# Flag to stop thread
self._stopped = False
# Start separated thread for drone control
def start(self):
self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)
self._thread.start()
return self
# Get command hex for drone
def get_hex(self):
# XOR is for checksum
self.command_out=((26122<<144)|self.roll<<136|self.pitch<<128|self.throttle<<120|self.yaw<<112|self.commands<<104|self.onoff*2<<96|65535<<80|(self.roll^self.pitch^self.throttle^self.yaw^self.commands^(self.onoff*2))<<8|153)
self.command_out = hex(self.command_out)[2::]
return self.command_out
# Turn hex to byte package
def _get_packet(self):
self._hex_code = self.get_hex()
self.package = bytes.fromhex(self._hex_code)
return self.package
# Send control to drone
def send_ctrl(self):
while not self._stopped:
self._package = self._get_packet()
#self.sess.send(self._package)
self.sess.sendto(self._package, ('192.168.100.1', 19798))
self.Flag_off()
sleep(0.02)
# Close connection to drone
def close_connection(self):
self._stopped = True
if self._thread.daemon == False:
self._thread.join()
self.sess.close()
# Return to default
def default(self):
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 0
self.onoff = 1
self._takeoff_flag = False
# Increment control
def incremt(self, rl, pt, th, yw):
self._value_to_change = [128, 128, 128, 128]
self._change_val = [rl, pt, th, yw]
for x in range(len(self._value_to_change)):
self._value_to_change[x] += self._change_val[x]
if self._value_to_change[x] <= 0:
self._value_to_change[x] = 0
if self._value_to_change[x] >= 255:
self._value_to_change[x] = 255
[self.roll, self.pitch, self.throttle, self.yaw] = self._value_to_change
# Roll right
def roll_right(self):
self.roll += 20
if self.roll > 248:
self.roll = 248
# Pitch forward
def pitch_fwd(self):
self.pitch += 20
if self.pitch > 248:
self.pitch = 248
# Increase throttle
def throttle_up(self):
self.throttle += 20
if self.throttle > 248:
self.throttle = 248
# Yaw right
def yaw_right(self):
self.yaw -= 20
if self.yaw < 18:
self.yaw = 18
# Roll left
def roll_left(self):
self.roll -= 20
if self.roll < 18:
self.roll = 18
# Pitch backward
def pitch_bwd(self):
self.pitch -= 20
if self.pitch < 18:
self.pitch = 18
# Decrease throttle
def throttle_dwn(self):
self.throttle -= 20
if self.throttle < 18:
self.throttle = 18
# Yaw left
def yaw_left(self):
self.yaw += 20
if self.yaw > 248:
self.yaw = 248
# Takeoff
def takeoff(self):
if self._takeoff_flag == False:
self.commands = 1
self._takeoff_flag = True
self._takeoff_timer = time()
# Landing
def land(self):
if self._takeoff_flag == False:
self.commands = 1
self._takeoff_flag = True
self._takeoff_timer = time()
# Flip takeoff flag
def Flag_off(self):
if (self._takeoff_flag == True and (time() - self._takeoff_timer >= 1)):
self.commands = 0
self._takeoff_flag = False
if (self._calibrate_flag == True and (time() - self._calibrate_timer >= 3)):
self.commands = 0
self.onoff = 1
self._calibrate_flag = False
# Stop IMMEDIATELY
def emergency_stop(self):
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 2
self.onoff = 1
self._takeoff_flag = False
# Calibrate gyroscope
def calib_gyro(self):
if self._calibrate_flag == False:
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 4
self.onoff = 0
self._calibrate_flag = True
self._calibrate_timer = time()
class naza():
# Default control value
def __init__(self, ip, port):
# 4 values for flight
self.roll = 8
self.pitch = 8
self.throttle = 8
self.yaw = 8
# Prevent multiple takeoff button presses
self._takeoff_flag = False
# Prevent multiple ignite button presses
self._ignite_flag = False
self._ignite_send = False
# Connect to UDP port
self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self.ip = ip
self.port = port
#self.sess.connect((ip, port))
# Initialize timer value
self._ignite_timer = 0
self._takeoff_timer = 0
# Flag to stop thread
self._stopped = False
# Start separated thread for drone control
def start(self):
self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)
self._thread.start()
return self
# Get command hex for drone
def get_hex(self):
# XOR is for checksum
self.command_out=(self.throttle<<12|self.yaw<<8|self.pitch<<4|self.roll)
self.command_out = hex(self.command_out)[2::]
return self.command_out
# Send control to drone
def send_ctrl(self):
while not self._stopped:
if self._ignite_send == True:
ignite_msg = 'st'
self._package = ignite_msg.encode()
else:
self._package = self.get_hex().encode()
#self.sess.send(self._package)
self.sess.sendto(self._package, (self.ip, self.port))
self.Flag_off()
sleep(0.05)
# Close connection to drone
def close_connection(self):
self._stopped = True
if self._thread.daemon == False:
self._thread.join()
self.sess.close()
# Return to default
def default(self):
self.roll = 8
self.pitch = 8
self.throttle = 8
self.yaw = 8
self._takeoff_flag = False
self._ignite_flag = False
# Increment control
def incremt(self, rl, pt, th, yw):
self._value_to_change = [8, 8, 8, 8]
self._change_val = [rl, pt, th, yw]
for x in range(len(self._value_to_change)):
self._value_to_change[x] += self._change_val[x]
if self._value_to_change[x] <= 0:
self._value_to_change[x] = 0
if self._value_to_change[x] >= 15:
self._value_to_change[x] = 15
[self.roll, self.pitch, self.throttle, self.yaw] = self._value_to_change
# Roll right
def roll_right(self):
if self.roll < 15:
self.roll += 1
# Pitch forward
def pitch_fwd(self):
if self.pitch < 15:
self.pitch += 1
# Increase throttle
def throttle_up(self):
if self.throttle < 15:
self.throttle += 1
# Yaw right
def yaw_right(self):
if self.yaw < 15:
self.yaw += 1
# Roll left
def roll_left(self):
if self.roll > 0:
self.roll -= 1
# Pitch backward
def pitch_bwd(self):
if self.pitch > 0:
self.pitch -= 1
# Decrease throttle
def throttle_dwn(self):
if self.throttle > 0:
self.throttle -= 1
# Yaw left
def yaw_left(self):
if self.yaw > 0:
self.yaw -= 1
# Start engine
def ignite(self):
if self._ignite_flag == False:
self._ignite_flag = True
self._ignite_send = True
self._ignite_timer = time()
# Takeoff
def takeoff(self):
if self._takeoff_flag == False:
self.throttle = 12
self._takeoff_flag = True
self._takeoff_timer = time()
# Flip takeoff flag
def Flag_off(self):
if self._ignite_flag == True:
if (time() - self._ignite_timer >= 1) and (time() - self._ignite_timer < 1.5):
self._ignite_send = False
self.roll = 8
self.pitch = 8
self.yaw = 8
self.throttle = 0
# Warming up engine
if (time() - self._ignite_timer >= 1.5) and (time() - self._ignite_timer < 2):
self.throttle = 2
if (time() - self._ignite_timer >= 2) and (time() - self._ignite_timer < 2.5):
self.throttle = 4
if (time() - self._ignite_timer >= 2.5) and (time() - self._ignite_timer < 3):
self.throttle = 6
if (time() - self._ignite_timer >= 3) and (time() - self._ignite_timer < 4):
self.throttle = 8
# After starting engine, takeoff after 4s
if (time() - self._ignite_timer >= 4):
self._ignite_flag = False
self.takeoff()
if (self._takeoff_flag == True and (time() - self._takeoff_timer >= 4)):
self.throttle = 8
self._takeoff_flag = False
|
flexible
|
{
"blob_id": "ee8e117db0348aa37d6aa37e6c06255101f1cff4",
"index": 2752,
"step-1": "<mask token>\n\n\nclass dm107s:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [128, 128, 128, 128]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 255:\n self._value_to_change[x] = 255\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def throttle_dwn(self):\n self.throttle -= 20\n if self.throttle < 18:\n self.throttle = 18\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def calib_gyro(self):\n if self._calibrate_flag == False:\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 4\n self.onoff = 0\n self._calibrate_flag = True\n self._calibrate_timer = time()\n\n\nclass naza:\n\n def __init__(self, ip, port):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n self._ignite_send = False\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self.ip = ip\n self.port = port\n self._ignite_timer = 0\n self._takeoff_timer = 0\n self._stopped = False\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n\n def get_hex(self):\n self.command_out = (self.throttle << 12 | self.yaw << 8 | self.\n pitch << 4 | self.roll)\n self.command_out = hex(self.command_out)[2:]\n return self.command_out\n\n def send_ctrl(self):\n while not self._stopped:\n if self._ignite_send == True:\n ignite_msg = 'st'\n self._package = ignite_msg.encode()\n else:\n self._package = self.get_hex().encode()\n self.sess.sendto(self._package, (self.ip, self.port))\n self.Flag_off()\n sleep(0.05)\n\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n\n def default(self):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [8, 8, 8, 8]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 15:\n self._value_to_change[x] = 15\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n if self.roll < 15:\n self.roll += 1\n\n def pitch_fwd(self):\n if self.pitch < 15:\n self.pitch += 1\n\n def throttle_up(self):\n if self.throttle < 15:\n self.throttle += 1\n\n def yaw_right(self):\n if self.yaw < 15:\n self.yaw += 1\n\n def roll_left(self):\n if self.roll > 0:\n self.roll -= 1\n\n def pitch_bwd(self):\n if self.pitch > 0:\n self.pitch -= 1\n\n def throttle_dwn(self):\n if self.throttle > 0:\n self.throttle -= 1\n\n def yaw_left(self):\n if self.yaw > 0:\n self.yaw -= 1\n\n def ignite(self):\n if self._ignite_flag == False:\n self._ignite_flag = True\n self._ignite_send = True\n self._ignite_timer = time()\n\n def takeoff(self):\n if self._takeoff_flag == False:\n self.throttle = 12\n self._takeoff_flag = True\n self._takeoff_timer = time()\n\n def Flag_off(self):\n if self._ignite_flag == True:\n if time() - self._ignite_timer >= 1 and time(\n ) - self._ignite_timer < 1.5:\n self._ignite_send = False\n self.roll = 8\n self.pitch = 8\n self.yaw = 8\n self.throttle = 0\n if time() - self._ignite_timer >= 1.5 and time(\n ) - self._ignite_timer < 2:\n self.throttle = 2\n if time() - self._ignite_timer >= 2 and time(\n ) - self._ignite_timer < 2.5:\n self.throttle = 4\n if time() - self._ignite_timer >= 2.5 and time(\n ) - self._ignite_timer < 3:\n self.throttle = 6\n if time() - self._ignite_timer >= 3 and time(\n ) - self._ignite_timer < 4:\n self.throttle = 8\n if time() - self._ignite_timer >= 4:\n self._ignite_flag = False\n self.takeoff()\n if self._takeoff_flag == True and time() - self._takeoff_timer >= 4:\n self.throttle = 8\n self._takeoff_flag = False\n",
"step-2": "<mask token>\n\n\nclass dm107s:\n <mask token>\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def default(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 0\n self.onoff = 1\n self._takeoff_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [128, 128, 128, 128]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 255:\n self._value_to_change[x] = 255\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n self.roll += 20\n if self.roll > 248:\n self.roll = 248\n <mask token>\n\n def throttle_up(self):\n self.throttle += 20\n if self.throttle > 248:\n self.throttle = 248\n\n def yaw_right(self):\n self.yaw -= 20\n if self.yaw < 18:\n self.yaw = 18\n <mask token>\n <mask token>\n\n def throttle_dwn(self):\n self.throttle -= 20\n if self.throttle < 18:\n self.throttle = 18\n\n def yaw_left(self):\n self.yaw += 20\n if self.yaw > 248:\n self.yaw = 248\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def calib_gyro(self):\n if self._calibrate_flag == False:\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 4\n self.onoff = 0\n self._calibrate_flag = True\n self._calibrate_timer = time()\n\n\nclass naza:\n\n def __init__(self, ip, port):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n self._ignite_send = False\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self.ip = ip\n self.port = port\n self._ignite_timer = 0\n self._takeoff_timer = 0\n self._stopped = False\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n\n def get_hex(self):\n self.command_out = (self.throttle << 12 | self.yaw << 8 | self.\n pitch << 4 | self.roll)\n self.command_out = hex(self.command_out)[2:]\n return self.command_out\n\n def send_ctrl(self):\n while not self._stopped:\n if self._ignite_send == True:\n ignite_msg = 'st'\n self._package = ignite_msg.encode()\n else:\n self._package = self.get_hex().encode()\n self.sess.sendto(self._package, (self.ip, self.port))\n self.Flag_off()\n sleep(0.05)\n\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n\n def default(self):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [8, 8, 8, 8]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 15:\n self._value_to_change[x] = 15\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n if self.roll < 15:\n self.roll += 1\n\n def pitch_fwd(self):\n if self.pitch < 15:\n self.pitch += 1\n\n def throttle_up(self):\n if self.throttle < 15:\n self.throttle += 1\n\n def yaw_right(self):\n if self.yaw < 15:\n self.yaw += 1\n\n def roll_left(self):\n if self.roll > 0:\n self.roll -= 1\n\n def pitch_bwd(self):\n if self.pitch > 0:\n self.pitch -= 1\n\n def throttle_dwn(self):\n if self.throttle > 0:\n self.throttle -= 1\n\n def yaw_left(self):\n if self.yaw > 0:\n self.yaw -= 1\n\n def ignite(self):\n if self._ignite_flag == False:\n self._ignite_flag = True\n self._ignite_send = True\n self._ignite_timer = time()\n\n def takeoff(self):\n if self._takeoff_flag == False:\n self.throttle = 12\n self._takeoff_flag = True\n self._takeoff_timer = time()\n\n def Flag_off(self):\n if self._ignite_flag == True:\n if time() - self._ignite_timer >= 1 and time(\n ) - self._ignite_timer < 1.5:\n self._ignite_send = False\n self.roll = 8\n self.pitch = 8\n self.yaw = 8\n self.throttle = 0\n if time() - self._ignite_timer >= 1.5 and time(\n ) - self._ignite_timer < 2:\n self.throttle = 2\n if time() - self._ignite_timer >= 2 and time(\n ) - self._ignite_timer < 2.5:\n self.throttle = 4\n if time() - self._ignite_timer >= 2.5 and time(\n ) - self._ignite_timer < 3:\n self.throttle = 6\n if time() - self._ignite_timer >= 3 and time(\n ) - self._ignite_timer < 4:\n self.throttle = 8\n if time() - self._ignite_timer >= 4:\n self._ignite_flag = False\n self.takeoff()\n if self._takeoff_flag == True and time() - self._takeoff_timer >= 4:\n self.throttle = 8\n self._takeoff_flag = False\n",
"step-3": "<mask token>\n\n\nclass dm107s:\n <mask token>\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n <mask token>\n\n def _get_packet(self):\n self._hex_code = self.get_hex()\n self.package = bytes.fromhex(self._hex_code)\n return self.package\n <mask token>\n <mask token>\n\n def default(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 0\n self.onoff = 1\n self._takeoff_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [128, 128, 128, 128]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 255:\n self._value_to_change[x] = 255\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n self.roll += 20\n if self.roll > 248:\n self.roll = 248\n\n def pitch_fwd(self):\n self.pitch += 20\n if self.pitch > 248:\n self.pitch = 248\n\n def throttle_up(self):\n self.throttle += 20\n if self.throttle > 248:\n self.throttle = 248\n\n def yaw_right(self):\n self.yaw -= 20\n if self.yaw < 18:\n self.yaw = 18\n\n def roll_left(self):\n self.roll -= 20\n if self.roll < 18:\n self.roll = 18\n <mask token>\n\n def throttle_dwn(self):\n self.throttle -= 20\n if self.throttle < 18:\n self.throttle = 18\n\n def yaw_left(self):\n self.yaw += 20\n if self.yaw > 248:\n self.yaw = 248\n <mask token>\n\n def land(self):\n if self._takeoff_flag == False:\n self.commands = 1\n self._takeoff_flag = True\n self._takeoff_timer = time()\n <mask token>\n <mask token>\n\n def calib_gyro(self):\n if self._calibrate_flag == False:\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 4\n self.onoff = 0\n self._calibrate_flag = True\n self._calibrate_timer = time()\n\n\nclass naza:\n\n def __init__(self, ip, port):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n self._ignite_send = False\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self.ip = ip\n self.port = port\n self._ignite_timer = 0\n self._takeoff_timer = 0\n self._stopped = False\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n\n def get_hex(self):\n self.command_out = (self.throttle << 12 | self.yaw << 8 | self.\n pitch << 4 | self.roll)\n self.command_out = hex(self.command_out)[2:]\n return self.command_out\n\n def send_ctrl(self):\n while not self._stopped:\n if self._ignite_send == True:\n ignite_msg = 'st'\n self._package = ignite_msg.encode()\n else:\n self._package = self.get_hex().encode()\n self.sess.sendto(self._package, (self.ip, self.port))\n self.Flag_off()\n sleep(0.05)\n\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n\n def default(self):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [8, 8, 8, 8]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 15:\n self._value_to_change[x] = 15\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n if self.roll < 15:\n self.roll += 1\n\n def pitch_fwd(self):\n if self.pitch < 15:\n self.pitch += 1\n\n def throttle_up(self):\n if self.throttle < 15:\n self.throttle += 1\n\n def yaw_right(self):\n if self.yaw < 15:\n self.yaw += 1\n\n def roll_left(self):\n if self.roll > 0:\n self.roll -= 1\n\n def pitch_bwd(self):\n if self.pitch > 0:\n self.pitch -= 1\n\n def throttle_dwn(self):\n if self.throttle > 0:\n self.throttle -= 1\n\n def yaw_left(self):\n if self.yaw > 0:\n self.yaw -= 1\n\n def ignite(self):\n if self._ignite_flag == False:\n self._ignite_flag = True\n self._ignite_send = True\n self._ignite_timer = time()\n\n def takeoff(self):\n if self._takeoff_flag == False:\n self.throttle = 12\n self._takeoff_flag = True\n self._takeoff_timer = time()\n\n def Flag_off(self):\n if self._ignite_flag == True:\n if time() - self._ignite_timer >= 1 and time(\n ) - self._ignite_timer < 1.5:\n self._ignite_send = False\n self.roll = 8\n self.pitch = 8\n self.yaw = 8\n self.throttle = 0\n if time() - self._ignite_timer >= 1.5 and time(\n ) - self._ignite_timer < 2:\n self.throttle = 2\n if time() - self._ignite_timer >= 2 and time(\n ) - self._ignite_timer < 2.5:\n self.throttle = 4\n if time() - self._ignite_timer >= 2.5 and time(\n ) - self._ignite_timer < 3:\n self.throttle = 6\n if time() - self._ignite_timer >= 3 and time(\n ) - self._ignite_timer < 4:\n self.throttle = 8\n if time() - self._ignite_timer >= 4:\n self._ignite_flag = False\n self.takeoff()\n if self._takeoff_flag == True and time() - self._takeoff_timer >= 4:\n self.throttle = 8\n self._takeoff_flag = False\n",
"step-4": "<mask token>\n\n\nclass dm107s:\n\n def __init__(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 0\n self.onoff = 1\n self._takeoff_flag = False\n self._calibrate_flag = False\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self._takeoff_timer = 0\n self._calibrate_timer = 0\n self._stopped = False\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n\n def get_hex(self):\n self.command_out = (26122 << 144 | self.roll << 136 | self.pitch <<\n 128 | self.throttle << 120 | self.yaw << 112 | self.commands <<\n 104 | self.onoff * 2 << 96 | 65535 << 80 | (self.roll ^ self.\n pitch ^ self.throttle ^ self.yaw ^ self.commands ^ self.onoff *\n 2) << 8 | 153)\n self.command_out = hex(self.command_out)[2:]\n return self.command_out\n\n def _get_packet(self):\n self._hex_code = self.get_hex()\n self.package = bytes.fromhex(self._hex_code)\n return self.package\n\n def send_ctrl(self):\n while not self._stopped:\n self._package = self._get_packet()\n self.sess.sendto(self._package, ('192.168.100.1', 19798))\n self.Flag_off()\n sleep(0.02)\n\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n\n def default(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 0\n self.onoff = 1\n self._takeoff_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [128, 128, 128, 128]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 255:\n self._value_to_change[x] = 255\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n self.roll += 20\n if self.roll > 248:\n self.roll = 248\n\n def pitch_fwd(self):\n self.pitch += 20\n if self.pitch > 248:\n self.pitch = 248\n\n def throttle_up(self):\n self.throttle += 20\n if self.throttle > 248:\n self.throttle = 248\n\n def yaw_right(self):\n self.yaw -= 20\n if self.yaw < 18:\n self.yaw = 18\n\n def roll_left(self):\n self.roll -= 20\n if self.roll < 18:\n self.roll = 18\n <mask token>\n\n def throttle_dwn(self):\n self.throttle -= 20\n if self.throttle < 18:\n self.throttle = 18\n\n def yaw_left(self):\n self.yaw += 20\n if self.yaw > 248:\n self.yaw = 248\n\n def takeoff(self):\n if self._takeoff_flag == False:\n self.commands = 1\n self._takeoff_flag = True\n self._takeoff_timer = time()\n\n def land(self):\n if self._takeoff_flag == False:\n self.commands = 1\n self._takeoff_flag = True\n self._takeoff_timer = time()\n <mask token>\n\n def emergency_stop(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 2\n self.onoff = 1\n self._takeoff_flag = False\n\n def calib_gyro(self):\n if self._calibrate_flag == False:\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 4\n self.onoff = 0\n self._calibrate_flag = True\n self._calibrate_timer = time()\n\n\nclass naza:\n\n def __init__(self, ip, port):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n self._ignite_send = False\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self.ip = ip\n self.port = port\n self._ignite_timer = 0\n self._takeoff_timer = 0\n self._stopped = False\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n\n def get_hex(self):\n self.command_out = (self.throttle << 12 | self.yaw << 8 | self.\n pitch << 4 | self.roll)\n self.command_out = hex(self.command_out)[2:]\n return self.command_out\n\n def send_ctrl(self):\n while not self._stopped:\n if self._ignite_send == True:\n ignite_msg = 'st'\n self._package = ignite_msg.encode()\n else:\n self._package = self.get_hex().encode()\n self.sess.sendto(self._package, (self.ip, self.port))\n self.Flag_off()\n sleep(0.05)\n\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n\n def default(self):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [8, 8, 8, 8]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 15:\n self._value_to_change[x] = 15\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n if self.roll < 15:\n self.roll += 1\n\n def pitch_fwd(self):\n if self.pitch < 15:\n self.pitch += 1\n\n def throttle_up(self):\n if self.throttle < 15:\n self.throttle += 1\n\n def yaw_right(self):\n if self.yaw < 15:\n self.yaw += 1\n\n def roll_left(self):\n if self.roll > 0:\n self.roll -= 1\n\n def pitch_bwd(self):\n if self.pitch > 0:\n self.pitch -= 1\n\n def throttle_dwn(self):\n if self.throttle > 0:\n self.throttle -= 1\n\n def yaw_left(self):\n if self.yaw > 0:\n self.yaw -= 1\n\n def ignite(self):\n if self._ignite_flag == False:\n self._ignite_flag = True\n self._ignite_send = True\n self._ignite_timer = time()\n\n def takeoff(self):\n if self._takeoff_flag == False:\n self.throttle = 12\n self._takeoff_flag = True\n self._takeoff_timer = time()\n\n def Flag_off(self):\n if self._ignite_flag == True:\n if time() - self._ignite_timer >= 1 and time(\n ) - self._ignite_timer < 1.5:\n self._ignite_send = False\n self.roll = 8\n self.pitch = 8\n self.yaw = 8\n self.throttle = 0\n if time() - self._ignite_timer >= 1.5 and time(\n ) - self._ignite_timer < 2:\n self.throttle = 2\n if time() - self._ignite_timer >= 2 and time(\n ) - self._ignite_timer < 2.5:\n self.throttle = 4\n if time() - self._ignite_timer >= 2.5 and time(\n ) - self._ignite_timer < 3:\n self.throttle = 6\n if time() - self._ignite_timer >= 3 and time(\n ) - self._ignite_timer < 4:\n self.throttle = 8\n if time() - self._ignite_timer >= 4:\n self._ignite_flag = False\n self.takeoff()\n if self._takeoff_flag == True and time() - self._takeoff_timer >= 4:\n self.throttle = 8\n self._takeoff_flag = False\n",
"step-5": "import socket\nfrom time import time, sleep\nfrom threading import Thread\n\n# Define drone\nclass dm107s():\n # Default control value\n def __init__(self):\n # 4 values for flight\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n # 0 - normal mode, 2 - emergency stop, 4 - gyroscope calibration\n self.commands = 0\n # Required for wifi control\n self.onoff = 1\n # Prevent multiple takeoff button presses\n self._takeoff_flag = False\n # Prevent multiple calibrate button presses\n self._calibrate_flag = False\n # Connect to UDP port\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n #self.sess.connect(('192.168.100.1', 19798))\n # Initialize timer value\n self._takeoff_timer = 0\n self._calibrate_timer = 0\n # Flag to stop thread\n self._stopped = False\n \n # Start separated thread for drone control\n def start(self): \n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n \n # Get command hex for drone\n def get_hex(self):\n # XOR is for checksum\n self.command_out=((26122<<144)|self.roll<<136|self.pitch<<128|self.throttle<<120|self.yaw<<112|self.commands<<104|self.onoff*2<<96|65535<<80|(self.roll^self.pitch^self.throttle^self.yaw^self.commands^(self.onoff*2))<<8|153)\n self.command_out = hex(self.command_out)[2::]\n return self.command_out\n \n # Turn hex to byte package\n def _get_packet(self):\n self._hex_code = self.get_hex()\n self.package = bytes.fromhex(self._hex_code)\n return self.package\n \n # Send control to drone\n def send_ctrl(self):\n while not self._stopped:\n self._package = self._get_packet()\n #self.sess.send(self._package)\n self.sess.sendto(self._package, ('192.168.100.1', 19798))\n self.Flag_off()\n sleep(0.02)\n \n # Close connection to drone\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n \n # Return to default\n def default(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 0\n self.onoff = 1\n self._takeoff_flag = False\n \n # Increment control\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [128, 128, 128, 128]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 255:\n self._value_to_change[x] = 255\n [self.roll, self.pitch, self.throttle, self.yaw] = self._value_to_change\n \n # Roll right\n def roll_right(self):\n self.roll += 20\n if self.roll > 248:\n self.roll = 248\n \n # Pitch forward\n def pitch_fwd(self):\n self.pitch += 20\n if self.pitch > 248:\n self.pitch = 248\n \n # Increase throttle\n def throttle_up(self):\n self.throttle += 20\n if self.throttle > 248:\n self.throttle = 248\n \n # Yaw right\n def yaw_right(self):\n self.yaw -= 20\n if self.yaw < 18:\n self.yaw = 18\n \n # Roll left\n def roll_left(self):\n self.roll -= 20\n if self.roll < 18:\n self.roll = 18\n \n # Pitch backward\n def pitch_bwd(self):\n self.pitch -= 20\n if self.pitch < 18:\n self.pitch = 18\n \n # Decrease throttle\n def throttle_dwn(self):\n self.throttle -= 20\n if self.throttle < 18:\n self.throttle = 18\n \n # Yaw left\n def yaw_left(self):\n self.yaw += 20\n if self.yaw > 248:\n self.yaw = 248\n \n # Takeoff\n def takeoff(self):\n if self._takeoff_flag == False:\n self.commands = 1\n self._takeoff_flag = True\n self._takeoff_timer = time()\n \n # Landing\n def land(self):\n if self._takeoff_flag == False:\n self.commands = 1\n self._takeoff_flag = True\n self._takeoff_timer = time()\n \n # Flip takeoff flag\n def Flag_off(self):\n if (self._takeoff_flag == True and (time() - self._takeoff_timer >= 1)):\n self.commands = 0\n self._takeoff_flag = False\n if (self._calibrate_flag == True and (time() - self._calibrate_timer >= 3)):\n self.commands = 0\n self.onoff = 1\n self._calibrate_flag = False\n\n # Stop IMMEDIATELY\n def emergency_stop(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 2\n self.onoff = 1\n self._takeoff_flag = False\n \n # Calibrate gyroscope\n def calib_gyro(self):\n if self._calibrate_flag == False:\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 4\n self.onoff = 0\n self._calibrate_flag = True\n self._calibrate_timer = time()\n\nclass naza():\n # Default control value\n def __init__(self, ip, port):\n # 4 values for flight\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n # Prevent multiple takeoff button presses\n self._takeoff_flag = False\n # Prevent multiple ignite button presses\n self._ignite_flag = False\n self._ignite_send = False\n # Connect to UDP port\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self.ip = ip\n self.port = port\n #self.sess.connect((ip, port))\n # Initialize timer value\n self._ignite_timer = 0\n self._takeoff_timer = 0\n # Flag to stop thread\n self._stopped = False\n \n # Start separated thread for drone control\n def start(self): \n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n \n # Get command hex for drone\n def get_hex(self):\n # XOR is for checksum\n self.command_out=(self.throttle<<12|self.yaw<<8|self.pitch<<4|self.roll)\n self.command_out = hex(self.command_out)[2::]\n return self.command_out\n \n # Send control to drone\n def send_ctrl(self):\n while not self._stopped:\n if self._ignite_send == True:\n ignite_msg = 'st'\n self._package = ignite_msg.encode()\n else:\n self._package = self.get_hex().encode()\n #self.sess.send(self._package)\n self.sess.sendto(self._package, (self.ip, self.port))\n self.Flag_off()\n sleep(0.05)\n \n # Close connection to drone\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n \n # Return to default\n def default(self):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n \n # Increment control\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [8, 8, 8, 8]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 15:\n self._value_to_change[x] = 15\n [self.roll, self.pitch, self.throttle, self.yaw] = self._value_to_change\n \n # Roll right\n def roll_right(self):\n if self.roll < 15:\n self.roll += 1\n \n # Pitch forward\n def pitch_fwd(self):\n if self.pitch < 15:\n self.pitch += 1\n \n # Increase throttle\n def throttle_up(self):\n if self.throttle < 15:\n self.throttle += 1\n \n # Yaw right\n def yaw_right(self):\n if self.yaw < 15:\n self.yaw += 1\n \n # Roll left\n def roll_left(self):\n if self.roll > 0:\n self.roll -= 1\n \n # Pitch backward\n def pitch_bwd(self):\n if self.pitch > 0:\n self.pitch -= 1\n \n # Decrease throttle\n def throttle_dwn(self):\n if self.throttle > 0:\n self.throttle -= 1\n \n # Yaw left\n def yaw_left(self):\n if self.yaw > 0:\n self.yaw -= 1\n \n # Start engine\n def ignite(self):\n if self._ignite_flag == False:\n self._ignite_flag = True\n self._ignite_send = True\n self._ignite_timer = time()\n \n # Takeoff\n def takeoff(self):\n if self._takeoff_flag == False:\n self.throttle = 12\n self._takeoff_flag = True\n self._takeoff_timer = time()\n \n # Flip takeoff flag\n def Flag_off(self):\n if self._ignite_flag == True:\n if (time() - self._ignite_timer >= 1) and (time() - self._ignite_timer < 1.5):\n self._ignite_send = False\n self.roll = 8\n self.pitch = 8\n self.yaw = 8\n self.throttle = 0\n # Warming up engine\n if (time() - self._ignite_timer >= 1.5) and (time() - self._ignite_timer < 2):\n self.throttle = 2\n if (time() - self._ignite_timer >= 2) and (time() - self._ignite_timer < 2.5):\n self.throttle = 4\n if (time() - self._ignite_timer >= 2.5) and (time() - self._ignite_timer < 3):\n self.throttle = 6\n if (time() - self._ignite_timer >= 3) and (time() - self._ignite_timer < 4):\n self.throttle = 8\n # After starting engine, takeoff after 4s\n if (time() - self._ignite_timer >= 4):\n self._ignite_flag = False\n self.takeoff()\n if (self._takeoff_flag == True and (time() - self._takeoff_timer >= 4)):\n self.throttle = 8\n self._takeoff_flag = False\n",
"step-ids": [
23,
29,
33,
39,
43
]
}
|
[
23,
29,
33,
39,
43
] |
<|reserved_special_token_0|>
def index():
return 'too many secrets', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def get_at():
return oidc.get_access_token(), 200, {'Content-Type':
'text/plain; charset=utf-8'}
<|reserved_special_token_0|>
def get_test1():
return 'successful call to test1', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def get_test2():
return 'successful call to test2', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def get_test3():
return 'successful call to test3', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def get_unprotected():
return 'successful call to unprotected', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def raw_api():
return {'token': g.oidc_token_info}
<|reserved_special_token_0|>
def create_app(config, oidc_overrides=None):
global oidc
app = Flask(__name__)
app.config.update(config)
if oidc_overrides is None:
oidc_overrides = {}
app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)
oidc = app.oidc
app.route('/')(app.oidc.check(index))
app.route('/at')(app.oidc.check(get_at))
app.route('/rt')(app.oidc.check(get_rt))
rendered = app.oidc.accept_token(True, ['openid'], auth_header_key=
'Authorization')(api)
app.route('/api', methods=['GET', 'POST'])(rendered)
configure_keycloak_test_uris(app)
unrendered = app.oidc.accept_token(True, ['openid'], render_errors=
False, auth_header_key='Authorization')(raw_api)
def externally_rendered_api(*args, **kwds):
inner_response = unrendered(*args, **kwds)
if isinstance(inner_response, tuple):
raw_response, response_code, headers = inner_response
rendered_response = json.dumps(raw_response
), response_code, headers
else:
rendered_response = json.dumps(inner_response)
return rendered_response
app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api
)
return app
<|reserved_special_token_0|>
def configure_mock_object_version1(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode = Mock()
test_app.oidc.keycloakApi.jwt_decode.return_value = (
decoded_jwt_with_permission_test1_and_test2)
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1,
resource_test2]
def configure_mock_version2(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode.return_value = (
decoded_jwt_with_permission_test3)
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index():
return 'too many secrets', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def get_at():
return oidc.get_access_token(), 200, {'Content-Type':
'text/plain; charset=utf-8'}
<|reserved_special_token_0|>
def get_test1():
return 'successful call to test1', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def get_test2():
return 'successful call to test2', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def get_test3():
return 'successful call to test3', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def get_unprotected():
return 'successful call to unprotected', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def raw_api():
return {'token': g.oidc_token_info}
<|reserved_special_token_0|>
def create_app(config, oidc_overrides=None):
global oidc
app = Flask(__name__)
app.config.update(config)
if oidc_overrides is None:
oidc_overrides = {}
app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)
oidc = app.oidc
app.route('/')(app.oidc.check(index))
app.route('/at')(app.oidc.check(get_at))
app.route('/rt')(app.oidc.check(get_rt))
rendered = app.oidc.accept_token(True, ['openid'], auth_header_key=
'Authorization')(api)
app.route('/api', methods=['GET', 'POST'])(rendered)
configure_keycloak_test_uris(app)
unrendered = app.oidc.accept_token(True, ['openid'], render_errors=
False, auth_header_key='Authorization')(raw_api)
def externally_rendered_api(*args, **kwds):
inner_response = unrendered(*args, **kwds)
if isinstance(inner_response, tuple):
raw_response, response_code, headers = inner_response
rendered_response = json.dumps(raw_response
), response_code, headers
else:
rendered_response = json.dumps(inner_response)
return rendered_response
app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api
)
return app
<|reserved_special_token_0|>
def _configure_mock_object(test_app):
test_app.oidc.validate_token = Mock()
test_app.oidc.validate_token.return_value = True
test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)
test_app.oidc.keycloakApi.authorize = Mock()
test_app.oidc.keycloakApi.authorize.return_value = valid_rpt
test_app.oidc.keycloakApi.get_access_token = Mock()
test_app.oidc.keycloakApi.get_access_token.return_value = access_token
test_app.oidc.keycloakApi._get_realm_pub_key = Mock()
test_app.oidc.keycloakApi._get_realm_pub_key.return_value = 'abc'
def configure_mock_object_version1(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode = Mock()
test_app.oidc.keycloakApi.jwt_decode.return_value = (
decoded_jwt_with_permission_test1_and_test2)
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1,
resource_test2]
def configure_mock_version2(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode.return_value = (
decoded_jwt_with_permission_test3)
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index():
return 'too many secrets', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def get_at():
return oidc.get_access_token(), 200, {'Content-Type':
'text/plain; charset=utf-8'}
<|reserved_special_token_0|>
def get_test1():
return 'successful call to test1', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def get_test2():
return 'successful call to test2', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def get_test3():
return 'successful call to test3', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def get_unprotected():
return 'successful call to unprotected', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def raw_api():
return {'token': g.oidc_token_info}
<|reserved_special_token_0|>
def get_test4():
return 'successful call to test4', 200, {'Content-Type':
'text/plain; charset=utf-8'}
<|reserved_special_token_0|>
def create_app(config, oidc_overrides=None):
global oidc
app = Flask(__name__)
app.config.update(config)
if oidc_overrides is None:
oidc_overrides = {}
app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)
oidc = app.oidc
app.route('/')(app.oidc.check(index))
app.route('/at')(app.oidc.check(get_at))
app.route('/rt')(app.oidc.check(get_rt))
rendered = app.oidc.accept_token(True, ['openid'], auth_header_key=
'Authorization')(api)
app.route('/api', methods=['GET', 'POST'])(rendered)
configure_keycloak_test_uris(app)
unrendered = app.oidc.accept_token(True, ['openid'], render_errors=
False, auth_header_key='Authorization')(raw_api)
def externally_rendered_api(*args, **kwds):
inner_response = unrendered(*args, **kwds)
if isinstance(inner_response, tuple):
raw_response, response_code, headers = inner_response
rendered_response = json.dumps(raw_response
), response_code, headers
else:
rendered_response = json.dumps(inner_response)
return rendered_response
app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api
)
return app
def configure_keycloak_test_uris(app):
test1 = app.oidc.check_authorization(True)(get_test1)
app.route('/test1', methods=['GET', 'POST'])(test1)
test2 = app.oidc.check_authorization(True)(get_test2)
app.route('/test2', methods=['GET', 'POST'])(test2)
test3 = app.oidc.check_authorization(True)(get_test3)
app.route('/test3', methods=['GET', 'POST'])(test3)
callback_method.return_value = True
test4 = app.oidc.check_authorization(True, validation_func=callback_method
)(get_test4)
app.route('/test4', methods=['GET', 'POST'])(test4)
unprotected = app.oidc.check_authorization(False)(get_unprotected)
app.route('/unprotected', methods=['GET'])(unprotected)
def _configure_mock_object(test_app):
test_app.oidc.validate_token = Mock()
test_app.oidc.validate_token.return_value = True
test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)
test_app.oidc.keycloakApi.authorize = Mock()
test_app.oidc.keycloakApi.authorize.return_value = valid_rpt
test_app.oidc.keycloakApi.get_access_token = Mock()
test_app.oidc.keycloakApi.get_access_token.return_value = access_token
test_app.oidc.keycloakApi._get_realm_pub_key = Mock()
test_app.oidc.keycloakApi._get_realm_pub_key.return_value = 'abc'
def configure_mock_object_version1(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode = Mock()
test_app.oidc.keycloakApi.jwt_decode.return_value = (
decoded_jwt_with_permission_test1_and_test2)
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1,
resource_test2]
def configure_mock_version2(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode.return_value = (
decoded_jwt_with_permission_test3)
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index():
return 'too many secrets', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def get_at():
return oidc.get_access_token(), 200, {'Content-Type':
'text/plain; charset=utf-8'}
<|reserved_special_token_0|>
def get_test1():
return 'successful call to test1', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def get_test2():
return 'successful call to test2', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def get_test3():
return 'successful call to test3', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def get_unprotected():
return 'successful call to unprotected', 200, {'Content-Type':
'text/plain; charset=utf-8'}
def raw_api():
return {'token': g.oidc_token_info}
def api():
return json.dumps(raw_api())
def get_test4():
return 'successful call to test4', 200, {'Content-Type':
'text/plain; charset=utf-8'}
<|reserved_special_token_0|>
def create_app(config, oidc_overrides=None):
global oidc
app = Flask(__name__)
app.config.update(config)
if oidc_overrides is None:
oidc_overrides = {}
app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)
oidc = app.oidc
app.route('/')(app.oidc.check(index))
app.route('/at')(app.oidc.check(get_at))
app.route('/rt')(app.oidc.check(get_rt))
rendered = app.oidc.accept_token(True, ['openid'], auth_header_key=
'Authorization')(api)
app.route('/api', methods=['GET', 'POST'])(rendered)
configure_keycloak_test_uris(app)
unrendered = app.oidc.accept_token(True, ['openid'], render_errors=
False, auth_header_key='Authorization')(raw_api)
def externally_rendered_api(*args, **kwds):
inner_response = unrendered(*args, **kwds)
if isinstance(inner_response, tuple):
raw_response, response_code, headers = inner_response
rendered_response = json.dumps(raw_response
), response_code, headers
else:
rendered_response = json.dumps(inner_response)
return rendered_response
app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api
)
return app
def configure_keycloak_test_uris(app):
test1 = app.oidc.check_authorization(True)(get_test1)
app.route('/test1', methods=['GET', 'POST'])(test1)
test2 = app.oidc.check_authorization(True)(get_test2)
app.route('/test2', methods=['GET', 'POST'])(test2)
test3 = app.oidc.check_authorization(True)(get_test3)
app.route('/test3', methods=['GET', 'POST'])(test3)
callback_method.return_value = True
test4 = app.oidc.check_authorization(True, validation_func=callback_method
)(get_test4)
app.route('/test4', methods=['GET', 'POST'])(test4)
unprotected = app.oidc.check_authorization(False)(get_unprotected)
app.route('/unprotected', methods=['GET'])(unprotected)
def _configure_mock_object(test_app):
test_app.oidc.validate_token = Mock()
test_app.oidc.validate_token.return_value = True
test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)
test_app.oidc.keycloakApi.authorize = Mock()
test_app.oidc.keycloakApi.authorize.return_value = valid_rpt
test_app.oidc.keycloakApi.get_access_token = Mock()
test_app.oidc.keycloakApi.get_access_token.return_value = access_token
test_app.oidc.keycloakApi._get_realm_pub_key = Mock()
test_app.oidc.keycloakApi._get_realm_pub_key.return_value = 'abc'
def configure_mock_object_version1(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode = Mock()
test_app.oidc.keycloakApi.jwt_decode.return_value = (
decoded_jwt_with_permission_test1_and_test2)
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1,
resource_test2]
def configure_mock_version2(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode.return_value = (
decoded_jwt_with_permission_test3)
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]
def configure_mock_version3(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode.return_value = None
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]
<|reserved_special_token_1|>
"""
Flask app for testing the OpenID Connect extension.
"""
import json
from unittest.mock import MagicMock, Mock
from flask import Flask, g
import flask_oidc
from tests.json_snippets import *
oidc = None
def index():
return "too many secrets", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_at():
return oidc.get_access_token(), 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_rt():
return oidc.get_refresh_token(), 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_test1():
return "successful call to test1", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_test2():
return "successful call to test2", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_test3():
return "successful call to test3", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_unprotected():
return "successful call to unprotected", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def raw_api():
return {'token': g.oidc_token_info}
def api():
return json.dumps(raw_api())
def get_test4():
return "successful call to test4", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
callback_method = Mock()
def create_app(config, oidc_overrides=None):
global oidc
app = Flask(__name__)
app.config.update(config)
if oidc_overrides is None:
oidc_overrides = {}
app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)
oidc = app.oidc
app.route('/')(app.oidc.check(index))
app.route('/at')(app.oidc.check(get_at))
app.route('/rt')(app.oidc.check(get_rt))
# Check standalone usage
rendered = app.oidc.accept_token(True, ['openid'], auth_header_key='Authorization')(api)
app.route('/api', methods=['GET', 'POST'])(rendered)
configure_keycloak_test_uris(app)
# Check combination with an external API renderer like Flask-RESTful
unrendered = app.oidc.accept_token(True, ['openid'], render_errors=False, auth_header_key='Authorization')(raw_api)
def externally_rendered_api(*args, **kwds):
inner_response = unrendered(*args, **kwds)
if isinstance(inner_response, tuple):
raw_response, response_code, headers = inner_response
rendered_response = json.dumps(raw_response), response_code, headers
else:
rendered_response = json.dumps(inner_response)
return rendered_response
app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api)
return app
def configure_keycloak_test_uris(app):
test1 = app.oidc.check_authorization(True)(get_test1)
app.route('/test1', methods=['GET', 'POST'])(test1)
test2 = app.oidc.check_authorization(True)(get_test2)
app.route('/test2', methods=['GET', 'POST'])(test2)
test3 = app.oidc.check_authorization(True)(get_test3)
app.route('/test3', methods=['GET', 'POST'])(test3)
callback_method.return_value = True
test4 = app.oidc.check_authorization(True, validation_func=callback_method)(get_test4)
app.route('/test4', methods=['GET', 'POST'])(test4)
unprotected = app.oidc.check_authorization(False)(get_unprotected)
app.route('/unprotected', methods=['GET'])(unprotected)
def _configure_mock_object(test_app):
test_app.oidc.validate_token = Mock()
test_app.oidc.validate_token.return_value = True
test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)
test_app.oidc.keycloakApi.authorize = Mock()
test_app.oidc.keycloakApi.authorize.return_value = valid_rpt
test_app.oidc.keycloakApi.get_access_token = Mock()
test_app.oidc.keycloakApi.get_access_token.return_value = access_token
test_app.oidc.keycloakApi._get_realm_pub_key = Mock()
test_app.oidc.keycloakApi._get_realm_pub_key.return_value = "abc"
def configure_mock_object_version1(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode = Mock()
test_app.oidc.keycloakApi.jwt_decode.return_value = decoded_jwt_with_permission_test1_and_test2
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1, resource_test2]
def configure_mock_version2(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode.return_value = decoded_jwt_with_permission_test3
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]
def configure_mock_version3(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode.return_value = None
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]
|
flexible
|
{
"blob_id": "ef3fa538828315845de5e2f7d4949f690e44276e",
"index": 6009,
"step-1": "<mask token>\n\n\ndef index():\n return 'too many secrets', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_at():\n return oidc.get_access_token(), 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef get_test1():\n return 'successful call to test1', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test2():\n return 'successful call to test2', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test3():\n return 'successful call to test3', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_unprotected():\n return 'successful call to unprotected', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef raw_api():\n return {'token': g.oidc_token_info}\n\n\n<mask token>\n\n\ndef create_app(config, oidc_overrides=None):\n global oidc\n app = Flask(__name__)\n app.config.update(config)\n if oidc_overrides is None:\n oidc_overrides = {}\n app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)\n oidc = app.oidc\n app.route('/')(app.oidc.check(index))\n app.route('/at')(app.oidc.check(get_at))\n app.route('/rt')(app.oidc.check(get_rt))\n rendered = app.oidc.accept_token(True, ['openid'], auth_header_key=\n 'Authorization')(api)\n app.route('/api', methods=['GET', 'POST'])(rendered)\n configure_keycloak_test_uris(app)\n unrendered = app.oidc.accept_token(True, ['openid'], render_errors=\n False, auth_header_key='Authorization')(raw_api)\n\n def externally_rendered_api(*args, **kwds):\n inner_response = unrendered(*args, **kwds)\n if isinstance(inner_response, tuple):\n raw_response, response_code, headers = inner_response\n rendered_response = json.dumps(raw_response\n ), response_code, headers\n else:\n rendered_response = json.dumps(inner_response)\n return rendered_response\n app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api\n )\n return app\n\n\n<mask token>\n\n\ndef configure_mock_object_version1(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode = Mock()\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test1_and_test2)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1,\n resource_test2]\n\n\ndef configure_mock_version2(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test3)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef index():\n return 'too many secrets', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_at():\n return oidc.get_access_token(), 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef get_test1():\n return 'successful call to test1', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test2():\n return 'successful call to test2', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test3():\n return 'successful call to test3', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_unprotected():\n return 'successful call to unprotected', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef raw_api():\n return {'token': g.oidc_token_info}\n\n\n<mask token>\n\n\ndef create_app(config, oidc_overrides=None):\n global oidc\n app = Flask(__name__)\n app.config.update(config)\n if oidc_overrides is None:\n oidc_overrides = {}\n app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)\n oidc = app.oidc\n app.route('/')(app.oidc.check(index))\n app.route('/at')(app.oidc.check(get_at))\n app.route('/rt')(app.oidc.check(get_rt))\n rendered = app.oidc.accept_token(True, ['openid'], auth_header_key=\n 'Authorization')(api)\n app.route('/api', methods=['GET', 'POST'])(rendered)\n configure_keycloak_test_uris(app)\n unrendered = app.oidc.accept_token(True, ['openid'], render_errors=\n False, auth_header_key='Authorization')(raw_api)\n\n def externally_rendered_api(*args, **kwds):\n inner_response = unrendered(*args, **kwds)\n if isinstance(inner_response, tuple):\n raw_response, response_code, headers = inner_response\n rendered_response = json.dumps(raw_response\n ), response_code, headers\n else:\n rendered_response = json.dumps(inner_response)\n return rendered_response\n app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api\n )\n return app\n\n\n<mask token>\n\n\ndef _configure_mock_object(test_app):\n test_app.oidc.validate_token = Mock()\n test_app.oidc.validate_token.return_value = True\n test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)\n test_app.oidc.keycloakApi.authorize = Mock()\n test_app.oidc.keycloakApi.authorize.return_value = valid_rpt\n test_app.oidc.keycloakApi.get_access_token = Mock()\n test_app.oidc.keycloakApi.get_access_token.return_value = access_token\n test_app.oidc.keycloakApi._get_realm_pub_key = Mock()\n test_app.oidc.keycloakApi._get_realm_pub_key.return_value = 'abc'\n\n\ndef configure_mock_object_version1(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode = Mock()\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test1_and_test2)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1,\n resource_test2]\n\n\ndef configure_mock_version2(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test3)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef index():\n return 'too many secrets', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_at():\n return oidc.get_access_token(), 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef get_test1():\n return 'successful call to test1', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test2():\n return 'successful call to test2', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test3():\n return 'successful call to test3', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_unprotected():\n return 'successful call to unprotected', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef raw_api():\n return {'token': g.oidc_token_info}\n\n\n<mask token>\n\n\ndef get_test4():\n return 'successful call to test4', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef create_app(config, oidc_overrides=None):\n global oidc\n app = Flask(__name__)\n app.config.update(config)\n if oidc_overrides is None:\n oidc_overrides = {}\n app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)\n oidc = app.oidc\n app.route('/')(app.oidc.check(index))\n app.route('/at')(app.oidc.check(get_at))\n app.route('/rt')(app.oidc.check(get_rt))\n rendered = app.oidc.accept_token(True, ['openid'], auth_header_key=\n 'Authorization')(api)\n app.route('/api', methods=['GET', 'POST'])(rendered)\n configure_keycloak_test_uris(app)\n unrendered = app.oidc.accept_token(True, ['openid'], render_errors=\n False, auth_header_key='Authorization')(raw_api)\n\n def externally_rendered_api(*args, **kwds):\n inner_response = unrendered(*args, **kwds)\n if isinstance(inner_response, tuple):\n raw_response, response_code, headers = inner_response\n rendered_response = json.dumps(raw_response\n ), response_code, headers\n else:\n rendered_response = json.dumps(inner_response)\n return rendered_response\n app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api\n )\n return app\n\n\ndef configure_keycloak_test_uris(app):\n test1 = app.oidc.check_authorization(True)(get_test1)\n app.route('/test1', methods=['GET', 'POST'])(test1)\n test2 = app.oidc.check_authorization(True)(get_test2)\n app.route('/test2', methods=['GET', 'POST'])(test2)\n test3 = app.oidc.check_authorization(True)(get_test3)\n app.route('/test3', methods=['GET', 'POST'])(test3)\n callback_method.return_value = True\n test4 = app.oidc.check_authorization(True, validation_func=callback_method\n )(get_test4)\n app.route('/test4', methods=['GET', 'POST'])(test4)\n unprotected = app.oidc.check_authorization(False)(get_unprotected)\n app.route('/unprotected', methods=['GET'])(unprotected)\n\n\ndef _configure_mock_object(test_app):\n test_app.oidc.validate_token = Mock()\n test_app.oidc.validate_token.return_value = True\n test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)\n test_app.oidc.keycloakApi.authorize = Mock()\n test_app.oidc.keycloakApi.authorize.return_value = valid_rpt\n test_app.oidc.keycloakApi.get_access_token = Mock()\n test_app.oidc.keycloakApi.get_access_token.return_value = access_token\n test_app.oidc.keycloakApi._get_realm_pub_key = Mock()\n test_app.oidc.keycloakApi._get_realm_pub_key.return_value = 'abc'\n\n\ndef configure_mock_object_version1(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode = Mock()\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test1_and_test2)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1,\n resource_test2]\n\n\ndef configure_mock_version2(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test3)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef index():\n return 'too many secrets', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_at():\n return oidc.get_access_token(), 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef get_test1():\n return 'successful call to test1', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test2():\n return 'successful call to test2', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test3():\n return 'successful call to test3', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_unprotected():\n return 'successful call to unprotected', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef raw_api():\n return {'token': g.oidc_token_info}\n\n\ndef api():\n return json.dumps(raw_api())\n\n\ndef get_test4():\n return 'successful call to test4', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef create_app(config, oidc_overrides=None):\n global oidc\n app = Flask(__name__)\n app.config.update(config)\n if oidc_overrides is None:\n oidc_overrides = {}\n app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)\n oidc = app.oidc\n app.route('/')(app.oidc.check(index))\n app.route('/at')(app.oidc.check(get_at))\n app.route('/rt')(app.oidc.check(get_rt))\n rendered = app.oidc.accept_token(True, ['openid'], auth_header_key=\n 'Authorization')(api)\n app.route('/api', methods=['GET', 'POST'])(rendered)\n configure_keycloak_test_uris(app)\n unrendered = app.oidc.accept_token(True, ['openid'], render_errors=\n False, auth_header_key='Authorization')(raw_api)\n\n def externally_rendered_api(*args, **kwds):\n inner_response = unrendered(*args, **kwds)\n if isinstance(inner_response, tuple):\n raw_response, response_code, headers = inner_response\n rendered_response = json.dumps(raw_response\n ), response_code, headers\n else:\n rendered_response = json.dumps(inner_response)\n return rendered_response\n app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api\n )\n return app\n\n\ndef configure_keycloak_test_uris(app):\n test1 = app.oidc.check_authorization(True)(get_test1)\n app.route('/test1', methods=['GET', 'POST'])(test1)\n test2 = app.oidc.check_authorization(True)(get_test2)\n app.route('/test2', methods=['GET', 'POST'])(test2)\n test3 = app.oidc.check_authorization(True)(get_test3)\n app.route('/test3', methods=['GET', 'POST'])(test3)\n callback_method.return_value = True\n test4 = app.oidc.check_authorization(True, validation_func=callback_method\n )(get_test4)\n app.route('/test4', methods=['GET', 'POST'])(test4)\n unprotected = app.oidc.check_authorization(False)(get_unprotected)\n app.route('/unprotected', methods=['GET'])(unprotected)\n\n\ndef _configure_mock_object(test_app):\n test_app.oidc.validate_token = Mock()\n test_app.oidc.validate_token.return_value = True\n test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)\n test_app.oidc.keycloakApi.authorize = Mock()\n test_app.oidc.keycloakApi.authorize.return_value = valid_rpt\n test_app.oidc.keycloakApi.get_access_token = Mock()\n test_app.oidc.keycloakApi.get_access_token.return_value = access_token\n test_app.oidc.keycloakApi._get_realm_pub_key = Mock()\n test_app.oidc.keycloakApi._get_realm_pub_key.return_value = 'abc'\n\n\ndef configure_mock_object_version1(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode = Mock()\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test1_and_test2)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1,\n resource_test2]\n\n\ndef configure_mock_version2(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test3)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n\n\ndef configure_mock_version3(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = None\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n",
"step-5": "\"\"\"\nFlask app for testing the OpenID Connect extension.\n\"\"\"\n\nimport json\nfrom unittest.mock import MagicMock, Mock\n\nfrom flask import Flask, g\nimport flask_oidc\nfrom tests.json_snippets import *\n\noidc = None\n\n\ndef index():\n return \"too many secrets\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_at():\n return oidc.get_access_token(), 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_rt():\n return oidc.get_refresh_token(), 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_test1():\n return \"successful call to test1\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_test2():\n return \"successful call to test2\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_test3():\n return \"successful call to test3\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_unprotected():\n return \"successful call to unprotected\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef raw_api():\n return {'token': g.oidc_token_info}\n\n\ndef api():\n return json.dumps(raw_api())\n\n\ndef get_test4():\n return \"successful call to test4\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ncallback_method = Mock()\n\n\ndef create_app(config, oidc_overrides=None):\n global oidc\n\n app = Flask(__name__)\n app.config.update(config)\n if oidc_overrides is None:\n oidc_overrides = {}\n app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)\n oidc = app.oidc\n\n app.route('/')(app.oidc.check(index))\n app.route('/at')(app.oidc.check(get_at))\n app.route('/rt')(app.oidc.check(get_rt))\n # Check standalone usage\n rendered = app.oidc.accept_token(True, ['openid'], auth_header_key='Authorization')(api)\n app.route('/api', methods=['GET', 'POST'])(rendered)\n\n configure_keycloak_test_uris(app)\n\n # Check combination with an external API renderer like Flask-RESTful\n unrendered = app.oidc.accept_token(True, ['openid'], render_errors=False, auth_header_key='Authorization')(raw_api)\n\n def externally_rendered_api(*args, **kwds):\n inner_response = unrendered(*args, **kwds)\n if isinstance(inner_response, tuple):\n raw_response, response_code, headers = inner_response\n rendered_response = json.dumps(raw_response), response_code, headers\n else:\n rendered_response = json.dumps(inner_response)\n return rendered_response\n\n app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api)\n return app\n\n\ndef configure_keycloak_test_uris(app):\n test1 = app.oidc.check_authorization(True)(get_test1)\n app.route('/test1', methods=['GET', 'POST'])(test1)\n test2 = app.oidc.check_authorization(True)(get_test2)\n app.route('/test2', methods=['GET', 'POST'])(test2)\n test3 = app.oidc.check_authorization(True)(get_test3)\n app.route('/test3', methods=['GET', 'POST'])(test3)\n\n callback_method.return_value = True\n\n test4 = app.oidc.check_authorization(True, validation_func=callback_method)(get_test4)\n app.route('/test4', methods=['GET', 'POST'])(test4)\n\n unprotected = app.oidc.check_authorization(False)(get_unprotected)\n app.route('/unprotected', methods=['GET'])(unprotected)\n\n\ndef _configure_mock_object(test_app):\n test_app.oidc.validate_token = Mock()\n test_app.oidc.validate_token.return_value = True\n test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)\n test_app.oidc.keycloakApi.authorize = Mock()\n test_app.oidc.keycloakApi.authorize.return_value = valid_rpt\n test_app.oidc.keycloakApi.get_access_token = Mock()\n test_app.oidc.keycloakApi.get_access_token.return_value = access_token\n test_app.oidc.keycloakApi._get_realm_pub_key = Mock()\n test_app.oidc.keycloakApi._get_realm_pub_key.return_value = \"abc\"\n\n\ndef configure_mock_object_version1(test_app):\n _configure_mock_object(test_app)\n\n test_app.oidc.keycloakApi.jwt_decode = Mock()\n test_app.oidc.keycloakApi.jwt_decode.return_value = decoded_jwt_with_permission_test1_and_test2\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1, resource_test2]\n\n\ndef configure_mock_version2(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = decoded_jwt_with_permission_test3\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n\n\ndef configure_mock_version3(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = None\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n",
"step-ids": [
10,
11,
13,
15,
19
]
}
|
[
10,
11,
13,
15,
19
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
driver.get('https://www.baidu.com')
<|reserved_special_token_0|>
elem.send_keys('python selenium', Keys.ENTER)
print(driver.page_source)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
driver = webdriver.Chrome()
driver.get('https://www.baidu.com')
elem = driver.find_element_by_xpath('//*[@id="kw"]')
elem.send_keys('python selenium', Keys.ENTER)
print(driver.page_source)
<|reserved_special_token_1|>
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome()
driver.get('https://www.baidu.com')
elem = driver.find_element_by_xpath('//*[@id="kw"]')
elem.send_keys('python selenium', Keys.ENTER)
print(driver.page_source)
<|reserved_special_token_1|>
# coding: utf-8
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome()
driver.get("https://www.baidu.com")
elem = driver.find_element_by_xpath('//*[@id="kw"]')
elem.send_keys("python selenium", Keys.ENTER)
print(driver.page_source)
|
flexible
|
{
"blob_id": "3c8352ff2fc92ada1b58603df2a1a402e57842be",
"index": 8606,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndriver.get('https://www.baidu.com')\n<mask token>\nelem.send_keys('python selenium', Keys.ENTER)\nprint(driver.page_source)\n",
"step-3": "<mask token>\ndriver = webdriver.Chrome()\ndriver.get('https://www.baidu.com')\nelem = driver.find_element_by_xpath('//*[@id=\"kw\"]')\nelem.send_keys('python selenium', Keys.ENTER)\nprint(driver.page_source)\n",
"step-4": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\ndriver = webdriver.Chrome()\ndriver.get('https://www.baidu.com')\nelem = driver.find_element_by_xpath('//*[@id=\"kw\"]')\nelem.send_keys('python selenium', Keys.ENTER)\nprint(driver.page_source)\n",
"step-5": "# coding: utf-8\r\n\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\ndriver = webdriver.Chrome()\r\ndriver.get(\"https://www.baidu.com\")\r\n\r\nelem = driver.find_element_by_xpath('//*[@id=\"kw\"]')\r\nelem.send_keys(\"python selenium\", Keys.ENTER)\r\n\r\nprint(driver.page_source)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pytest
from domain.story import Story
from tests.dot_dictionary import DotDict
@pytest.fixture()
def deployed_story_over_a_weekend():
revision_0 = DotDict({
'CreationDate': "2019-07-11T14:33:20.000Z"
})
revision_1 = DotDict({
'CreationDate': "2019-07-31T15:33:20.000Z",
'Description': "SCHEDULE STATE changed from [To-Do] to [In-Progress], READY changed from [true] to [false]"
})
revision_2 = DotDict({
'CreationDate': "2019-08-06T16:33:20.000Z",
'Description': "SCHEDULE STATE changed from [Ready For Prod] to [Deployed]"
})
rally_story = DotDict({
'ScheduleState': 'Completed',
'RevisionHistory': DotDict({
'Revisions': [revision_2, revision_1, revision_0]
})
});
return Story(rally_story, ['Backlog', 'To-Do', 'In-Progress', 'Completed', 'Ready For Prod', 'Deployed'],
{'In-Progress', 'Development'}, {'Deployed', 'Prod - ON'})
def test_cycle_time_only_includes_business_days(deployed_story_over_a_weekend):
assert deployed_story_over_a_weekend.cycle_time == 7
def test_find_current_start_state() :
assert 'In-Progress' == Story.find_current_state_name({'Backlog', 'To-Do', 'In-Progress', 'Completed', 'Ready For Prod', 'Deployed'}, {'In-Progress', 'Development'})
|
normal
|
{
"blob_id": "d10c74338ea18ef3e5fb6a4dd2224faa4f94aa62",
"index": 9950,
"step-1": "<mask token>\n\n\ndef test_find_current_start_state():\n assert 'In-Progress' == Story.find_current_state_name({'Backlog',\n 'To-Do', 'In-Progress', 'Completed', 'Ready For Prod', 'Deployed'},\n {'In-Progress', 'Development'})\n",
"step-2": "<mask token>\n\n\[email protected]()\ndef deployed_story_over_a_weekend():\n revision_0 = DotDict({'CreationDate': '2019-07-11T14:33:20.000Z'})\n revision_1 = DotDict({'CreationDate': '2019-07-31T15:33:20.000Z',\n 'Description':\n 'SCHEDULE STATE changed from [To-Do] to [In-Progress], READY changed from [true] to [false]'\n })\n revision_2 = DotDict({'CreationDate': '2019-08-06T16:33:20.000Z',\n 'Description':\n 'SCHEDULE STATE changed from [Ready For Prod] to [Deployed]'})\n rally_story = DotDict({'ScheduleState': 'Completed', 'RevisionHistory':\n DotDict({'Revisions': [revision_2, revision_1, revision_0]})})\n return Story(rally_story, ['Backlog', 'To-Do', 'In-Progress',\n 'Completed', 'Ready For Prod', 'Deployed'], {'In-Progress',\n 'Development'}, {'Deployed', 'Prod - ON'})\n\n\n<mask token>\n\n\ndef test_find_current_start_state():\n assert 'In-Progress' == Story.find_current_state_name({'Backlog',\n 'To-Do', 'In-Progress', 'Completed', 'Ready For Prod', 'Deployed'},\n {'In-Progress', 'Development'})\n",
"step-3": "<mask token>\n\n\[email protected]()\ndef deployed_story_over_a_weekend():\n revision_0 = DotDict({'CreationDate': '2019-07-11T14:33:20.000Z'})\n revision_1 = DotDict({'CreationDate': '2019-07-31T15:33:20.000Z',\n 'Description':\n 'SCHEDULE STATE changed from [To-Do] to [In-Progress], READY changed from [true] to [false]'\n })\n revision_2 = DotDict({'CreationDate': '2019-08-06T16:33:20.000Z',\n 'Description':\n 'SCHEDULE STATE changed from [Ready For Prod] to [Deployed]'})\n rally_story = DotDict({'ScheduleState': 'Completed', 'RevisionHistory':\n DotDict({'Revisions': [revision_2, revision_1, revision_0]})})\n return Story(rally_story, ['Backlog', 'To-Do', 'In-Progress',\n 'Completed', 'Ready For Prod', 'Deployed'], {'In-Progress',\n 'Development'}, {'Deployed', 'Prod - ON'})\n\n\ndef test_cycle_time_only_includes_business_days(deployed_story_over_a_weekend):\n assert deployed_story_over_a_weekend.cycle_time == 7\n\n\ndef test_find_current_start_state():\n assert 'In-Progress' == Story.find_current_state_name({'Backlog',\n 'To-Do', 'In-Progress', 'Completed', 'Ready For Prod', 'Deployed'},\n {'In-Progress', 'Development'})\n",
"step-4": "import pytest\nfrom domain.story import Story\nfrom tests.dot_dictionary import DotDict\n\n\[email protected]()\ndef deployed_story_over_a_weekend():\n revision_0 = DotDict({'CreationDate': '2019-07-11T14:33:20.000Z'})\n revision_1 = DotDict({'CreationDate': '2019-07-31T15:33:20.000Z',\n 'Description':\n 'SCHEDULE STATE changed from [To-Do] to [In-Progress], READY changed from [true] to [false]'\n })\n revision_2 = DotDict({'CreationDate': '2019-08-06T16:33:20.000Z',\n 'Description':\n 'SCHEDULE STATE changed from [Ready For Prod] to [Deployed]'})\n rally_story = DotDict({'ScheduleState': 'Completed', 'RevisionHistory':\n DotDict({'Revisions': [revision_2, revision_1, revision_0]})})\n return Story(rally_story, ['Backlog', 'To-Do', 'In-Progress',\n 'Completed', 'Ready For Prod', 'Deployed'], {'In-Progress',\n 'Development'}, {'Deployed', 'Prod - ON'})\n\n\ndef test_cycle_time_only_includes_business_days(deployed_story_over_a_weekend):\n assert deployed_story_over_a_weekend.cycle_time == 7\n\n\ndef test_find_current_start_state():\n assert 'In-Progress' == Story.find_current_state_name({'Backlog',\n 'To-Do', 'In-Progress', 'Completed', 'Ready For Prod', 'Deployed'},\n {'In-Progress', 'Development'})\n",
"step-5": "import pytest\nfrom domain.story import Story\nfrom tests.dot_dictionary import DotDict\n\[email protected]()\ndef deployed_story_over_a_weekend():\n revision_0 = DotDict({\n 'CreationDate': \"2019-07-11T14:33:20.000Z\"\n })\n revision_1 = DotDict({\n 'CreationDate': \"2019-07-31T15:33:20.000Z\",\n 'Description': \"SCHEDULE STATE changed from [To-Do] to [In-Progress], READY changed from [true] to [false]\"\n })\n revision_2 = DotDict({\n 'CreationDate': \"2019-08-06T16:33:20.000Z\",\n 'Description': \"SCHEDULE STATE changed from [Ready For Prod] to [Deployed]\"\n })\n rally_story = DotDict({\n 'ScheduleState': 'Completed',\n 'RevisionHistory': DotDict({\n 'Revisions': [revision_2, revision_1, revision_0]\n })\n });\n return Story(rally_story, ['Backlog', 'To-Do', 'In-Progress', 'Completed', 'Ready For Prod', 'Deployed'],\n {'In-Progress', 'Development'}, {'Deployed', 'Prod - ON'})\n\n\ndef test_cycle_time_only_includes_business_days(deployed_story_over_a_weekend):\n assert deployed_story_over_a_weekend.cycle_time == 7\n\n\ndef test_find_current_start_state() :\n assert 'In-Progress' == Story.find_current_state_name({'Backlog', 'To-Do', 'In-Progress', 'Completed', 'Ready For Prod', 'Deployed'}, {'In-Progress', 'Development'})\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
fig.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
df = pd.read_csv('phone.csv')
fig = ff.create_distplot([df['Avg Rating'].tolist()], ['Samsung'],
show_hist=False)
fig.show()
<|reserved_special_token_1|>
import plotly.figure_factory as ff
import pandas as pd
import csv
df = pd.read_csv('phone.csv')
fig = ff.create_distplot([df['Avg Rating'].tolist()], ['Samsung'],
show_hist=False)
fig.show()
<|reserved_special_token_1|>
import plotly.figure_factory as ff
import pandas as pd
import csv
df=pd.read_csv("phone.csv")
fig=ff.create_distplot([df["Avg Rating"].tolist()],["Samsung"],show_hist=False)
fig.show()
|
flexible
|
{
"blob_id": "5ae4f489da7b4f0913c9b16c86cc60537cc51234",
"index": 9858,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfig.show()\n",
"step-3": "<mask token>\ndf = pd.read_csv('phone.csv')\nfig = ff.create_distplot([df['Avg Rating'].tolist()], ['Samsung'],\n show_hist=False)\nfig.show()\n",
"step-4": "import plotly.figure_factory as ff\nimport pandas as pd\nimport csv\ndf = pd.read_csv('phone.csv')\nfig = ff.create_distplot([df['Avg Rating'].tolist()], ['Samsung'],\n show_hist=False)\nfig.show()\n",
"step-5": "import plotly.figure_factory as ff\r\nimport pandas as pd\r\nimport csv\r\n\r\ndf=pd.read_csv(\"phone.csv\")\r\nfig=ff.create_distplot([df[\"Avg Rating\"].tolist()],[\"Samsung\"],show_hist=False)\r\nfig.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Use the same techniques such as (but not limited to):
1) Sockets
2) File I/O
3) raw_input()
from the OSINT HW to complete this assignment. Good luck!
"""
import socket
import re
import time
host = "cornerstoneairlines.co" # IP address here
port = 45 # Port here
def execute_cmd(cmd):
"""
Sockets: https://docs.python.org/2/library/socket.html
How to use the socket s:
# Establish socket connection
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
Reading:
data = s.recv(1024) # Receives 1024 bytes from IP/Port
print(data) # Prints data
Sending:
s.send("something to send\n") # Send a newline \n at the end of your command
"""
regex = re.match('^\s*(\w*)\s*([A-Za-z0-9.\/\-\_]*)\s*([A-Za-z0-9.\/\-\_]*)\s*$', cmd)
val = regex.group(1)
# print('val: %s' % val)
if val == 'shell':
path = '/'
while True:
usr_in = raw_input(path + ">")
if usr_in == 'exit':
break
command = ';' + ' cd ' + path + '; ' + usr_in
if ('cd' in usr_in):
# print('here')
reg = re.match('^\s*cd\s*([A-Za-z0-9.\/\-\_]*)\s*$', usr_in)
if (reg.group(1) == ''):
path = '/'
elif (reg.group(1)[0] == '/'):
path = reg.group(1)
else:
path += reg.group(1)
if (path[-1] != '/'):
path += '/'
# print('command: "%s"' % command)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
data = s.recv(1024)
time.sleep(2)
# print('%s' % data)
s.send(command + '\n')
time.sleep(2)
# print('"%s" sent' % command)
data = s.recv(1024)
print('%s' % data)
s.close()
elif val == 'pull':
command = '; ' + 'cat ' + regex.group(2)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
data = s.recv(1024)
time.sleep(2)
s.send(command + '\n')
time.sleep(2)
# print('"%s" sent' % command)
data = s.recv(1024)
# print('%s' % data)
s.close()
f = open(regex.group(3), 'w')
f.write(data)
f.close()
elif val == 'quit':
return -1
elif val == 'help':
print('shell - Drop into an interactive shell - exit with "exit"')
print('pull <remote path> <local path> - download files')
print('help - show the help menu')
print('quit - quit this program')
else:
print('invalid command')
return 0
if __name__ == '__main__':
while True:
cmd = raw_input('>')
if execute_cmd(cmd) == -1:
break
|
normal
|
{
"blob_id": "e0f25addad8af4541f1404b76d4798d2223d9715",
"index": 5116,
"step-1": "<mask token>\n\n\ndef execute_cmd(cmd):\n \"\"\"\n Sockets: https://docs.python.org/2/library/socket.html\n How to use the socket s:\n\n # Establish socket connection\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n\n Reading:\n\n data = s.recv(1024) # Receives 1024 bytes from IP/Port\n print(data) # Prints data\n\n Sending:\n\n s.send(\"something to send\n\") # Send a newline \n at the end of your command\n \"\"\"\n regex = re.match(\n '^\\\\s*(\\\\w*)\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*$'\n , cmd)\n val = regex.group(1)\n if val == 'shell':\n path = '/'\n while True:\n usr_in = raw_input(path + '>')\n if usr_in == 'exit':\n break\n command = ';' + ' cd ' + path + '; ' + usr_in\n if 'cd' in usr_in:\n reg = re.match('^\\\\s*cd\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*$',\n usr_in)\n if reg.group(1) == '':\n path = '/'\n elif reg.group(1)[0] == '/':\n path = reg.group(1)\n else:\n path += reg.group(1)\n if path[-1] != '/':\n path += '/'\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n data = s.recv(1024)\n print('%s' % data)\n s.close()\n elif val == 'pull':\n command = '; ' + 'cat ' + regex.group(2)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n data = s.recv(1024)\n s.close()\n f = open(regex.group(3), 'w')\n f.write(data)\n f.close()\n elif val == 'quit':\n return -1\n elif val == 'help':\n print('shell - Drop into an interactive shell - exit with \"exit\"')\n print('pull <remote path> <local path> - download files')\n print('help - show the help menu')\n print('quit - quit this program')\n else:\n print('invalid command')\n return 0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef execute_cmd(cmd):\n \"\"\"\n Sockets: https://docs.python.org/2/library/socket.html\n How to use the socket s:\n\n # Establish socket connection\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n\n Reading:\n\n data = s.recv(1024) # Receives 1024 bytes from IP/Port\n print(data) # Prints data\n\n Sending:\n\n s.send(\"something to send\n\") # Send a newline \n at the end of your command\n \"\"\"\n regex = re.match(\n '^\\\\s*(\\\\w*)\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*$'\n , cmd)\n val = regex.group(1)\n if val == 'shell':\n path = '/'\n while True:\n usr_in = raw_input(path + '>')\n if usr_in == 'exit':\n break\n command = ';' + ' cd ' + path + '; ' + usr_in\n if 'cd' in usr_in:\n reg = re.match('^\\\\s*cd\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*$',\n usr_in)\n if reg.group(1) == '':\n path = '/'\n elif reg.group(1)[0] == '/':\n path = reg.group(1)\n else:\n path += reg.group(1)\n if path[-1] != '/':\n path += '/'\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n data = s.recv(1024)\n print('%s' % data)\n s.close()\n elif val == 'pull':\n command = '; ' + 'cat ' + regex.group(2)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n data = s.recv(1024)\n s.close()\n f = open(regex.group(3), 'w')\n f.write(data)\n f.close()\n elif val == 'quit':\n return -1\n elif val == 'help':\n print('shell - Drop into an interactive shell - exit with \"exit\"')\n print('pull <remote path> <local path> - download files')\n print('help - show the help menu')\n print('quit - quit this program')\n else:\n print('invalid command')\n return 0\n\n\nif __name__ == '__main__':\n while True:\n cmd = raw_input('>')\n if execute_cmd(cmd) == -1:\n break\n",
"step-3": "<mask token>\nhost = 'cornerstoneairlines.co'\nport = 45\n\n\ndef execute_cmd(cmd):\n \"\"\"\n Sockets: https://docs.python.org/2/library/socket.html\n How to use the socket s:\n\n # Establish socket connection\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n\n Reading:\n\n data = s.recv(1024) # Receives 1024 bytes from IP/Port\n print(data) # Prints data\n\n Sending:\n\n s.send(\"something to send\n\") # Send a newline \n at the end of your command\n \"\"\"\n regex = re.match(\n '^\\\\s*(\\\\w*)\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*$'\n , cmd)\n val = regex.group(1)\n if val == 'shell':\n path = '/'\n while True:\n usr_in = raw_input(path + '>')\n if usr_in == 'exit':\n break\n command = ';' + ' cd ' + path + '; ' + usr_in\n if 'cd' in usr_in:\n reg = re.match('^\\\\s*cd\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*$',\n usr_in)\n if reg.group(1) == '':\n path = '/'\n elif reg.group(1)[0] == '/':\n path = reg.group(1)\n else:\n path += reg.group(1)\n if path[-1] != '/':\n path += '/'\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n data = s.recv(1024)\n print('%s' % data)\n s.close()\n elif val == 'pull':\n command = '; ' + 'cat ' + regex.group(2)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n data = s.recv(1024)\n s.close()\n f = open(regex.group(3), 'w')\n f.write(data)\n f.close()\n elif val == 'quit':\n return -1\n elif val == 'help':\n print('shell - Drop into an interactive shell - exit with \"exit\"')\n print('pull <remote path> <local path> - download files')\n print('help - show the help menu')\n print('quit - quit this program')\n else:\n print('invalid command')\n return 0\n\n\nif __name__ == '__main__':\n while True:\n cmd = raw_input('>')\n if execute_cmd(cmd) == -1:\n break\n",
"step-4": "<mask token>\nimport socket\nimport re\nimport time\nhost = 'cornerstoneairlines.co'\nport = 45\n\n\ndef execute_cmd(cmd):\n \"\"\"\n Sockets: https://docs.python.org/2/library/socket.html\n How to use the socket s:\n\n # Establish socket connection\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n\n Reading:\n\n data = s.recv(1024) # Receives 1024 bytes from IP/Port\n print(data) # Prints data\n\n Sending:\n\n s.send(\"something to send\n\") # Send a newline \n at the end of your command\n \"\"\"\n regex = re.match(\n '^\\\\s*(\\\\w*)\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*$'\n , cmd)\n val = regex.group(1)\n if val == 'shell':\n path = '/'\n while True:\n usr_in = raw_input(path + '>')\n if usr_in == 'exit':\n break\n command = ';' + ' cd ' + path + '; ' + usr_in\n if 'cd' in usr_in:\n reg = re.match('^\\\\s*cd\\\\s*([A-Za-z0-9.\\\\/\\\\-\\\\_]*)\\\\s*$',\n usr_in)\n if reg.group(1) == '':\n path = '/'\n elif reg.group(1)[0] == '/':\n path = reg.group(1)\n else:\n path += reg.group(1)\n if path[-1] != '/':\n path += '/'\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n data = s.recv(1024)\n print('%s' % data)\n s.close()\n elif val == 'pull':\n command = '; ' + 'cat ' + regex.group(2)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n data = s.recv(1024)\n s.close()\n f = open(regex.group(3), 'w')\n f.write(data)\n f.close()\n elif val == 'quit':\n return -1\n elif val == 'help':\n print('shell - Drop into an interactive shell - exit with \"exit\"')\n print('pull <remote path> <local path> - download files')\n print('help - show the help menu')\n print('quit - quit this program')\n else:\n print('invalid command')\n return 0\n\n\nif __name__ == '__main__':\n while True:\n cmd = raw_input('>')\n if execute_cmd(cmd) == -1:\n break\n",
"step-5": "\"\"\"\n Use the same techniques such as (but not limited to):\n 1) Sockets\n 2) File I/O\n 3) raw_input()\n\n from the OSINT HW to complete this assignment. Good luck!\n\"\"\"\n\nimport socket\nimport re\nimport time\n\nhost = \"cornerstoneairlines.co\" # IP address here\nport = 45 # Port here\n\ndef execute_cmd(cmd):\n \"\"\"\n Sockets: https://docs.python.org/2/library/socket.html\n How to use the socket s:\n\n # Establish socket connection\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n\n Reading:\n\n data = s.recv(1024) # Receives 1024 bytes from IP/Port\n print(data) # Prints data\n\n Sending:\n\n s.send(\"something to send\\n\") # Send a newline \\n at the end of your command\n \"\"\"\n regex = re.match('^\\s*(\\w*)\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*$', cmd)\n val = regex.group(1)\n# print('val: %s' % val)\n if val == 'shell':\n path = '/'\n while True:\n usr_in = raw_input(path + \">\")\n if usr_in == 'exit':\n break\n command = ';' + ' cd ' + path + '; ' + usr_in\n if ('cd' in usr_in):\n# print('here')\n reg = re.match('^\\s*cd\\s*([A-Za-z0-9.\\/\\-\\_]*)\\s*$', usr_in)\n if (reg.group(1) == ''):\n path = '/'\n elif (reg.group(1)[0] == '/'):\n path = reg.group(1)\n else:\n path += reg.group(1)\n if (path[-1] != '/'):\n path += '/'\n# print('command: \"%s\"' % command)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n# print('%s' % data)\n s.send(command + '\\n')\n time.sleep(2)\n# print('\"%s\" sent' % command)\n data = s.recv(1024)\n print('%s' % data)\n s.close()\n elif val == 'pull':\n command = '; ' + 'cat ' + regex.group(2)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n data = s.recv(1024)\n time.sleep(2)\n s.send(command + '\\n')\n time.sleep(2)\n# print('\"%s\" sent' % command)\n data = s.recv(1024)\n# print('%s' % data)\n s.close()\n f = open(regex.group(3), 'w')\n f.write(data)\n f.close()\n elif val == 'quit':\n return -1\n elif val == 'help':\n print('shell - Drop into an interactive shell - exit with \"exit\"')\n print('pull <remote path> <local path> - download files')\n print('help - show the help menu')\n print('quit - quit this program')\n else:\n print('invalid command')\n\n return 0\n\n\n\n\nif __name__ == '__main__':\n while True:\n cmd = raw_input('>')\n if execute_cmd(cmd) == -1:\n break\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
###############################################################################
#
#
# Project:
# Purpose:
#
#
# Author: Massimo Di Stefano , [email protected]
#
###############################################################################
# Copyright (c) 2009, Massimo Di Stefano <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
__author__ = "Massimo Di Stefano"
__copyright__ = "Copyright 2009, gfoss.it"
__credits__ = [""]
__license__ = "GPL V3"
__version__ = "1.0.0"
__maintainer__ = "Massimo Di Stefano"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = ""
try:
from osgeo import osr, ogr, gdal
except ImportError:
import osr, ogr, gdal
import string
import sys
def GeomType2Name(type):
if type == ogr.wkbUnknown:
return 'wkbUnknown'
elif type == ogr.wkbPoint:
return 'wkbPoint'
elif type == ogr.wkbLineString:
return 'wkbLineString'
elif type == ogr.wkbPolygon:
return 'wkbPolygon'
elif type == ogr.wkbMultiPoint:
return 'wkbMultiPoint'
elif type == ogr.wkbMultiLineString:
return 'wkbMultiLineString'
elif type == ogr.wkbMultiPolygon:
return 'wkbMultiPolygon'
elif type == ogr.wkbGeometryCollection:
return 'wkbGeometryCollection'
elif type == ogr.wkbNone:
return 'wkbNone'
elif type == ogr.wkbLinearRing:
return 'wkbLinearRing'
else:
return 'wkbUnknown'
def Esc(x):
return gdal.EscapeString(x, gdal.CPLES_XML)
def makestile(outfile, brush, pen, size, fill, thickness):
brush = brush.split(',')
pen = pen.split(',')
size = size.split(',')
outfile = outfile.replace('.vrt', '')
outfile = outfile + '.omd'
omd = '// vector file rendering options\n'
omd += 'brush_color: %s %s %s \n' % (brush[0], brush[1], brush[2])
omd += 'pen_color: %s %s %s \n' % (pen[0], pen[1], pen[2])
omd += 'point_width_height: %s %s \n' % (size[0], size[1])
omd += 'fill_flag: %s \n' % (fill)
omd += 'thickness: %s \n' % (thickness)
open(outfile, 'w').write(omd)
def ogrvrt(infile, outfile):
layer_list = []
relative = "0"
schema = 0
print infile
src_ds = ogr.Open(infile, update=0)
if len(layer_list) == 0:
for layer in src_ds:
layer_list.append(layer.GetLayerDefn().GetName())
vrt = '<OGRVRTDataSource>\n'
for name in layer_list:
layer = src_ds.GetLayerByName(name)
layerdef = layer.GetLayerDefn()
vrt += ' <OGRVRTLayer name="%s">\n' % Esc(name)
vrt += ' <SrcDataSource relativeToVRT="%s" shared="%d">%s</SrcDataSource>\n' \
% (relative, not schema, Esc(infile))
if schema:
vrt += ' <SrcLayer>@dummy@</SrcLayer>\n'
else:
vrt += ' <SrcLayer>%s</SrcLayer>\n' % Esc(name)
vrt += ' <GeometryType>%s</GeometryType>\n' \
% GeomType2Name(layerdef.GetGeomType())
srs = layer.GetSpatialRef()
if srs is not None:
vrt += ' <LayerSRS>%s</LayerSRS>\n' \
% (Esc(srs.ExportToWkt()))
# Process all the fields.
for fld_index in range(layerdef.GetFieldCount()):
src_fd = layerdef.GetFieldDefn(fld_index)
if src_fd.GetType() == ogr.OFTInteger:
type = 'Integer'
elif src_fd.GetType() == ogr.OFTString:
type = 'String'
elif src_fd.GetType() == ogr.OFTReal:
type = 'Real'
elif src_fd.GetType() == ogr.OFTStringList:
type = 'StringList'
elif src_fd.GetType() == ogr.OFTIntegerList:
type = 'IntegerList'
elif src_fd.GetType() == ogr.OFTRealList:
type = 'RealList'
elif src_fd.GetType() == ogr.OFTBinary:
type = 'Binary'
elif src_fd.GetType() == ogr.OFTDate:
type = 'Date'
elif src_fd.GetType() == ogr.OFTTime:
type = 'Time'
elif src_fd.GetType() == ogr.OFTDateTime:
type = 'DateTime'
else:
type = 'String'
vrt += ' <Field name="%s" type="%s"' \
% (Esc(src_fd.GetName()), type)
if not schema:
vrt += ' src="%s"' % Esc(src_fd.GetName())
if src_fd.GetWidth() > 0:
vrt += ' width="%d"' % src_fd.GetWidth()
if src_fd.GetPrecision() > 0:
vrt += ' precision="%d"' % src_fd.GetPrecision()
vrt += '/>\n'
vrt += ' </OGRVRTLayer>\n'
vrt += '</OGRVRTDataSource>\n'
file = open(outfile, 'w')
file.write(vrt)
file.close()
print 'vrt wroted'
|
normal
|
{
"blob_id": "59338170b44be037f749790a7942c2bcca1fc078",
"index": 2434,
"step-1": "#!/usr/bin/env python\n###############################################################################\n#\n#\n# Project:\n# Purpose:\n#\n#\n# Author: Massimo Di Stefano , [email protected]\n#\n###############################################################################\n# Copyright (c) 2009, Massimo Di Stefano <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n###############################################################################\n\n__author__ = \"Massimo Di Stefano\"\n__copyright__ = \"Copyright 2009, gfoss.it\"\n__credits__ = [\"\"]\n__license__ = \"GPL V3\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"Massimo Di Stefano\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n__date__ = \"\"\n\ntry:\n from osgeo import osr, ogr, gdal\nexcept ImportError:\n import osr, ogr, gdal\n\nimport string\nimport sys\n\n\ndef GeomType2Name(type):\n if type == ogr.wkbUnknown:\n return 'wkbUnknown'\n elif type == ogr.wkbPoint:\n return 'wkbPoint'\n elif type == ogr.wkbLineString:\n return 'wkbLineString'\n elif type == ogr.wkbPolygon:\n return 'wkbPolygon'\n elif type == ogr.wkbMultiPoint:\n return 'wkbMultiPoint'\n elif type == ogr.wkbMultiLineString:\n return 'wkbMultiLineString'\n elif type == ogr.wkbMultiPolygon:\n return 'wkbMultiPolygon'\n elif type == ogr.wkbGeometryCollection:\n return 'wkbGeometryCollection'\n elif type == ogr.wkbNone:\n return 'wkbNone'\n elif type == ogr.wkbLinearRing:\n return 'wkbLinearRing'\n else:\n return 'wkbUnknown'\n\n\ndef Esc(x):\n return gdal.EscapeString(x, gdal.CPLES_XML)\n\n\ndef makestile(outfile, brush, pen, size, fill, thickness):\n brush = brush.split(',')\n pen = pen.split(',')\n size = size.split(',')\n outfile = outfile.replace('.vrt', '')\n outfile = outfile + '.omd'\n omd = '// vector file rendering options\\n'\n omd += 'brush_color: %s %s %s \\n' % (brush[0], brush[1], brush[2])\n omd += 'pen_color: %s %s %s \\n' % (pen[0], pen[1], pen[2])\n omd += 'point_width_height: %s %s \\n' % (size[0], size[1])\n omd += 'fill_flag: %s \\n' % (fill)\n omd += 'thickness: %s \\n' % (thickness)\n open(outfile, 'w').write(omd)\n\n\ndef ogrvrt(infile, outfile):\n layer_list = []\n relative = \"0\"\n schema = 0\n print infile\n src_ds = ogr.Open(infile, update=0)\n if len(layer_list) == 0:\n for layer in src_ds:\n layer_list.append(layer.GetLayerDefn().GetName())\n vrt = '<OGRVRTDataSource>\\n'\n for name in layer_list:\n layer = src_ds.GetLayerByName(name)\n layerdef = layer.GetLayerDefn()\n vrt += ' <OGRVRTLayer name=\"%s\">\\n' % Esc(name)\n vrt += ' <SrcDataSource relativeToVRT=\"%s\" shared=\"%d\">%s</SrcDataSource>\\n' \\\n % (relative, not schema, Esc(infile))\n if schema:\n vrt += ' <SrcLayer>@dummy@</SrcLayer>\\n'\n else:\n vrt += ' <SrcLayer>%s</SrcLayer>\\n' % Esc(name)\n vrt += ' <GeometryType>%s</GeometryType>\\n' \\\n % GeomType2Name(layerdef.GetGeomType())\n srs = layer.GetSpatialRef()\n if srs is not None:\n vrt += ' <LayerSRS>%s</LayerSRS>\\n' \\\n % (Esc(srs.ExportToWkt()))\n # Process all the fields.\n for fld_index in range(layerdef.GetFieldCount()):\n src_fd = layerdef.GetFieldDefn(fld_index)\n if src_fd.GetType() == ogr.OFTInteger:\n type = 'Integer'\n elif src_fd.GetType() == ogr.OFTString:\n type = 'String'\n elif src_fd.GetType() == ogr.OFTReal:\n type = 'Real'\n elif src_fd.GetType() == ogr.OFTStringList:\n type = 'StringList'\n elif src_fd.GetType() == ogr.OFTIntegerList:\n type = 'IntegerList'\n elif src_fd.GetType() == ogr.OFTRealList:\n type = 'RealList'\n elif src_fd.GetType() == ogr.OFTBinary:\n type = 'Binary'\n elif src_fd.GetType() == ogr.OFTDate:\n type = 'Date'\n elif src_fd.GetType() == ogr.OFTTime:\n type = 'Time'\n elif src_fd.GetType() == ogr.OFTDateTime:\n type = 'DateTime'\n else:\n type = 'String'\n\n vrt += ' <Field name=\"%s\" type=\"%s\"' \\\n % (Esc(src_fd.GetName()), type)\n if not schema:\n vrt += ' src=\"%s\"' % Esc(src_fd.GetName())\n if src_fd.GetWidth() > 0:\n vrt += ' width=\"%d\"' % src_fd.GetWidth()\n if src_fd.GetPrecision() > 0:\n vrt += ' precision=\"%d\"' % src_fd.GetPrecision()\n vrt += '/>\\n'\n vrt += ' </OGRVRTLayer>\\n'\n vrt += '</OGRVRTDataSource>\\n'\n file = open(outfile, 'w')\n file.write(vrt)\n file.close()\n print 'vrt wroted'\n\t\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import re
from collections import OrderedDict
OPENING_TAG = '<{}>'
CLOSING_TAG= '</{}>'
U_LIST = '<ul>{}</ul>'
LIST_ITEM = '<li>{}</li>'
STRONG = '<strong>{}</strong>'
ITALIC = '<em>{}</em>'
PARAGRAPH = '<p>{}</p>'
HEADERS = OrderedDict({'######': 'h6',
'#####': 'h5',
'####': 'h4',
'###:': 'h3',
'##': 'h2',
'#': 'h1'})
def replace_header_tags(l=''):
for k,v in HEADERS.items():
line_with_header = re.match(f'{k} (.*)', l)
if line_with_header:
rest_string = line_with_header.group(1)
return OPENING_TAG.format(v) + rest_string + CLOSING_TAG.format(v)
return l
def replace_bold_tags(l=''):
line_with_bold = re.match('(.*)__(.*)__(.*)', l)
if line_with_bold:
return line_with_bold.group(1) + \
STRONG.format(line_with_bold.group(2)) + line_with_bold.group(3)
return l
def replace_italic_tags(l=''):
line_with_ital = re.match('(.*)_(.*)_(.*)', l)
if line_with_ital:
return line_with_ital.group(1) + \
ITALIC.format(line_with_ital.group(2)) + line_with_ital.group(3)
return l
def apply_p_tag_if_no_tag(l=''):
return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)
def check_if_list_item(l=''):
list_item = re.match(r'\* (.*)', l)
if list_item:
return LIST_ITEM.format(list_item.group(1))
return False
def is_last_line(i, _list):
return _list.index(i) == len(_list) - 1
def parse(markdown):
lines = markdown.split('\n')
res = ''
current_list = ''
for i in lines:
line = replace_header_tags(i)
line = replace_bold_tags(line)
line = replace_italic_tags(line)
list_item = check_if_list_item(line)
if list_item:
current_list += list_item
res += U_LIST.format(current_list) if is_last_line(i, lines) else ''
else:
res += U_LIST.format(current_list) if current_list else ''
current_list = ''
res += apply_p_tag_if_no_tag(line)
return res
|
normal
|
{
"blob_id": "6b0b60ec571cf026d0f0cff3d9517362c16b459b",
"index": 6092,
"step-1": "<mask token>\n\n\ndef replace_bold_tags(l=''):\n line_with_bold = re.match('(.*)__(.*)__(.*)', l)\n if line_with_bold:\n return line_with_bold.group(1) + STRONG.format(line_with_bold.group(2)\n ) + line_with_bold.group(3)\n return l\n\n\n<mask token>\n\n\ndef apply_p_tag_if_no_tag(l=''):\n return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)\n\n\ndef check_if_list_item(l=''):\n list_item = re.match('\\\\* (.*)', l)\n if list_item:\n return LIST_ITEM.format(list_item.group(1))\n return False\n\n\ndef is_last_line(i, _list):\n return _list.index(i) == len(_list) - 1\n\n\ndef parse(markdown):\n lines = markdown.split('\\n')\n res = ''\n current_list = ''\n for i in lines:\n line = replace_header_tags(i)\n line = replace_bold_tags(line)\n line = replace_italic_tags(line)\n list_item = check_if_list_item(line)\n if list_item:\n current_list += list_item\n res += U_LIST.format(current_list) if is_last_line(i, lines\n ) else ''\n else:\n res += U_LIST.format(current_list) if current_list else ''\n current_list = ''\n res += apply_p_tag_if_no_tag(line)\n return res\n",
"step-2": "<mask token>\n\n\ndef replace_bold_tags(l=''):\n line_with_bold = re.match('(.*)__(.*)__(.*)', l)\n if line_with_bold:\n return line_with_bold.group(1) + STRONG.format(line_with_bold.group(2)\n ) + line_with_bold.group(3)\n return l\n\n\ndef replace_italic_tags(l=''):\n line_with_ital = re.match('(.*)_(.*)_(.*)', l)\n if line_with_ital:\n return line_with_ital.group(1) + ITALIC.format(line_with_ital.group(2)\n ) + line_with_ital.group(3)\n return l\n\n\ndef apply_p_tag_if_no_tag(l=''):\n return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)\n\n\ndef check_if_list_item(l=''):\n list_item = re.match('\\\\* (.*)', l)\n if list_item:\n return LIST_ITEM.format(list_item.group(1))\n return False\n\n\ndef is_last_line(i, _list):\n return _list.index(i) == len(_list) - 1\n\n\ndef parse(markdown):\n lines = markdown.split('\\n')\n res = ''\n current_list = ''\n for i in lines:\n line = replace_header_tags(i)\n line = replace_bold_tags(line)\n line = replace_italic_tags(line)\n list_item = check_if_list_item(line)\n if list_item:\n current_list += list_item\n res += U_LIST.format(current_list) if is_last_line(i, lines\n ) else ''\n else:\n res += U_LIST.format(current_list) if current_list else ''\n current_list = ''\n res += apply_p_tag_if_no_tag(line)\n return res\n",
"step-3": "<mask token>\nOPENING_TAG = '<{}>'\nCLOSING_TAG = '</{}>'\nU_LIST = '<ul>{}</ul>'\nLIST_ITEM = '<li>{}</li>'\nSTRONG = '<strong>{}</strong>'\nITALIC = '<em>{}</em>'\nPARAGRAPH = '<p>{}</p>'\nHEADERS = OrderedDict({'######': 'h6', '#####': 'h5', '####': 'h4', '###:':\n 'h3', '##': 'h2', '#': 'h1'})\n\n\ndef replace_header_tags(l=''):\n for k, v in HEADERS.items():\n line_with_header = re.match(f'{k} (.*)', l)\n if line_with_header:\n rest_string = line_with_header.group(1)\n return OPENING_TAG.format(v) + rest_string + CLOSING_TAG.format(v)\n return l\n\n\ndef replace_bold_tags(l=''):\n line_with_bold = re.match('(.*)__(.*)__(.*)', l)\n if line_with_bold:\n return line_with_bold.group(1) + STRONG.format(line_with_bold.group(2)\n ) + line_with_bold.group(3)\n return l\n\n\ndef replace_italic_tags(l=''):\n line_with_ital = re.match('(.*)_(.*)_(.*)', l)\n if line_with_ital:\n return line_with_ital.group(1) + ITALIC.format(line_with_ital.group(2)\n ) + line_with_ital.group(3)\n return l\n\n\ndef apply_p_tag_if_no_tag(l=''):\n return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)\n\n\ndef check_if_list_item(l=''):\n list_item = re.match('\\\\* (.*)', l)\n if list_item:\n return LIST_ITEM.format(list_item.group(1))\n return False\n\n\ndef is_last_line(i, _list):\n return _list.index(i) == len(_list) - 1\n\n\ndef parse(markdown):\n lines = markdown.split('\\n')\n res = ''\n current_list = ''\n for i in lines:\n line = replace_header_tags(i)\n line = replace_bold_tags(line)\n line = replace_italic_tags(line)\n list_item = check_if_list_item(line)\n if list_item:\n current_list += list_item\n res += U_LIST.format(current_list) if is_last_line(i, lines\n ) else ''\n else:\n res += U_LIST.format(current_list) if current_list else ''\n current_list = ''\n res += apply_p_tag_if_no_tag(line)\n return res\n",
"step-4": "import re\nfrom collections import OrderedDict\nOPENING_TAG = '<{}>'\nCLOSING_TAG = '</{}>'\nU_LIST = '<ul>{}</ul>'\nLIST_ITEM = '<li>{}</li>'\nSTRONG = '<strong>{}</strong>'\nITALIC = '<em>{}</em>'\nPARAGRAPH = '<p>{}</p>'\nHEADERS = OrderedDict({'######': 'h6', '#####': 'h5', '####': 'h4', '###:':\n 'h3', '##': 'h2', '#': 'h1'})\n\n\ndef replace_header_tags(l=''):\n for k, v in HEADERS.items():\n line_with_header = re.match(f'{k} (.*)', l)\n if line_with_header:\n rest_string = line_with_header.group(1)\n return OPENING_TAG.format(v) + rest_string + CLOSING_TAG.format(v)\n return l\n\n\ndef replace_bold_tags(l=''):\n line_with_bold = re.match('(.*)__(.*)__(.*)', l)\n if line_with_bold:\n return line_with_bold.group(1) + STRONG.format(line_with_bold.group(2)\n ) + line_with_bold.group(3)\n return l\n\n\ndef replace_italic_tags(l=''):\n line_with_ital = re.match('(.*)_(.*)_(.*)', l)\n if line_with_ital:\n return line_with_ital.group(1) + ITALIC.format(line_with_ital.group(2)\n ) + line_with_ital.group(3)\n return l\n\n\ndef apply_p_tag_if_no_tag(l=''):\n return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)\n\n\ndef check_if_list_item(l=''):\n list_item = re.match('\\\\* (.*)', l)\n if list_item:\n return LIST_ITEM.format(list_item.group(1))\n return False\n\n\ndef is_last_line(i, _list):\n return _list.index(i) == len(_list) - 1\n\n\ndef parse(markdown):\n lines = markdown.split('\\n')\n res = ''\n current_list = ''\n for i in lines:\n line = replace_header_tags(i)\n line = replace_bold_tags(line)\n line = replace_italic_tags(line)\n list_item = check_if_list_item(line)\n if list_item:\n current_list += list_item\n res += U_LIST.format(current_list) if is_last_line(i, lines\n ) else ''\n else:\n res += U_LIST.format(current_list) if current_list else ''\n current_list = ''\n res += apply_p_tag_if_no_tag(line)\n return res\n",
"step-5": "import re\nfrom collections import OrderedDict\n\nOPENING_TAG = '<{}>'\nCLOSING_TAG= '</{}>'\nU_LIST = '<ul>{}</ul>'\nLIST_ITEM = '<li>{}</li>'\nSTRONG = '<strong>{}</strong>'\nITALIC = '<em>{}</em>'\nPARAGRAPH = '<p>{}</p>'\nHEADERS = OrderedDict({'######': 'h6',\n '#####': 'h5',\n '####': 'h4',\n '###:': 'h3',\n '##': 'h2',\n '#': 'h1'})\n\n\ndef replace_header_tags(l=''):\n for k,v in HEADERS.items():\n line_with_header = re.match(f'{k} (.*)', l)\n if line_with_header:\n rest_string = line_with_header.group(1)\n return OPENING_TAG.format(v) + rest_string + CLOSING_TAG.format(v)\n return l\n\n\ndef replace_bold_tags(l=''):\n line_with_bold = re.match('(.*)__(.*)__(.*)', l)\n if line_with_bold:\n return line_with_bold.group(1) + \\\n STRONG.format(line_with_bold.group(2)) + line_with_bold.group(3)\n return l\n\n\ndef replace_italic_tags(l=''):\n line_with_ital = re.match('(.*)_(.*)_(.*)', l)\n if line_with_ital:\n return line_with_ital.group(1) + \\\n ITALIC.format(line_with_ital.group(2)) + line_with_ital.group(3)\n return l\n\n\ndef apply_p_tag_if_no_tag(l=''):\n return l if re.match('<h|<ul|<p|<li', l) else PARAGRAPH.format(l)\n\n\ndef check_if_list_item(l=''):\n list_item = re.match(r'\\* (.*)', l)\n if list_item:\n return LIST_ITEM.format(list_item.group(1))\n return False\n\n\ndef is_last_line(i, _list):\n return _list.index(i) == len(_list) - 1\n\n\ndef parse(markdown):\n lines = markdown.split('\\n')\n res = ''\n current_list = ''\n for i in lines:\n line = replace_header_tags(i)\n line = replace_bold_tags(line)\n line = replace_italic_tags(line)\n\n list_item = check_if_list_item(line)\n if list_item:\n current_list += list_item\n res += U_LIST.format(current_list) if is_last_line(i, lines) else ''\n else:\n res += U_LIST.format(current_list) if current_list else ''\n current_list = ''\n res += apply_p_tag_if_no_tag(line)\n return res\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
charObj.append(seriesObj)
charObj.set_categories(cat)
sheet.add_chart(charObj, 'I2')
<|reserved_special_token_0|>
charObj.append(seriesObj)
charObj.set_categories(cat)
sheet.add_chart(charObj, 'I18')
<|reserved_special_token_0|>
charObj.append(seriesObj)
charObj.set_categories(cat)
sheet.add_chart(charObj, 'Q2')
wb.save('stu_scores _Grade.xlsx')
wb.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
wb = openpyxl.load_workbook('/Users/mac/Desktop/stu_scores _Grade 2.xlsx')
sheet = wb['stu_scores_01']
data = openpyxl.chart.Reference(sheet, min_col=3, min_row=34, max_row=34,
max_col=7)
cat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,
max_row=1)
seriesObj = openpyxl.chart.Series(data, title='bar chart of each subject',
title_from_data=True)
charObj = openpyxl.chart.BarChart()
charObj.title = 'My Bar Chart'
charObj.x_axis.title = 'bar chart of each subject'
charObj.append(seriesObj)
charObj.set_categories(cat)
sheet.add_chart(charObj, 'I2')
data = openpyxl.chart.Reference(sheet, min_col=3, min_row=35, max_row=35,
max_col=7)
cat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,
max_row=1)
seriesObj = openpyxl.chart.Series(data, title=
'bar chart of boys each subject', title_from_data=True)
charObj = openpyxl.chart.BarChart()
charObj.title = 'My Bar Chart'
charObj.x_axis.title = 'Boys each subject'
charObj.append(seriesObj)
charObj.set_categories(cat)
sheet.add_chart(charObj, 'I18')
data = openpyxl.chart.Reference(sheet, min_col=3, min_row=36, max_row=36,
max_col=7)
cat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,
max_row=1)
seriesObj = openpyxl.chart.Series(data, title=
'bar chart of girls each subject', title_from_data=True)
charObj = openpyxl.chart.BarChart()
charObj.title = 'My Bar Chart'
charObj.x_axis.title = 'girls each subject'
charObj.append(seriesObj)
charObj.set_categories(cat)
sheet.add_chart(charObj, 'Q2')
wb.save('stu_scores _Grade.xlsx')
wb.close()
<|reserved_special_token_1|>
import openpyxl
from openpyxl import Workbook
import openpyxl as openpyxl
from openpyxl.chart import BarChart
wb = openpyxl.load_workbook('/Users/mac/Desktop/stu_scores _Grade 2.xlsx')
sheet = wb['stu_scores_01']
data = openpyxl.chart.Reference(sheet, min_col=3, min_row=34, max_row=34,
max_col=7)
cat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,
max_row=1)
seriesObj = openpyxl.chart.Series(data, title='bar chart of each subject',
title_from_data=True)
charObj = openpyxl.chart.BarChart()
charObj.title = 'My Bar Chart'
charObj.x_axis.title = 'bar chart of each subject'
charObj.append(seriesObj)
charObj.set_categories(cat)
sheet.add_chart(charObj, 'I2')
data = openpyxl.chart.Reference(sheet, min_col=3, min_row=35, max_row=35,
max_col=7)
cat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,
max_row=1)
seriesObj = openpyxl.chart.Series(data, title=
'bar chart of boys each subject', title_from_data=True)
charObj = openpyxl.chart.BarChart()
charObj.title = 'My Bar Chart'
charObj.x_axis.title = 'Boys each subject'
charObj.append(seriesObj)
charObj.set_categories(cat)
sheet.add_chart(charObj, 'I18')
data = openpyxl.chart.Reference(sheet, min_col=3, min_row=36, max_row=36,
max_col=7)
cat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,
max_row=1)
seriesObj = openpyxl.chart.Series(data, title=
'bar chart of girls each subject', title_from_data=True)
charObj = openpyxl.chart.BarChart()
charObj.title = 'My Bar Chart'
charObj.x_axis.title = 'girls each subject'
charObj.append(seriesObj)
charObj.set_categories(cat)
sheet.add_chart(charObj, 'Q2')
wb.save('stu_scores _Grade.xlsx')
wb.close()
<|reserved_special_token_1|>
import openpyxl
from openpyxl import Workbook
import openpyxl as openpyxl
from openpyxl.chart import BarChart
wb = openpyxl.load_workbook('/Users/mac/Desktop/stu_scores _Grade 2.xlsx')
sheet = wb['stu_scores_01']
data = openpyxl.chart.Reference(sheet, min_col=3, min_row=34, max_row=34,max_col=7)
cat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,max_row=1)
seriesObj = openpyxl.chart.Series(data, title="bar chart of each subject", title_from_data=True)
charObj = openpyxl.chart.BarChart()
charObj.title = "My Bar Chart"
charObj.x_axis.title = 'bar chart of each subject'
charObj.append(seriesObj)
charObj.set_categories(cat)
sheet.add_chart(charObj,"I2")
#new one
data = openpyxl.chart.Reference(sheet, min_col=3, min_row=35, max_row=35,max_col=7)
cat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,max_row=1)
seriesObj = openpyxl.chart.Series(data, title="bar chart of boys each subject", title_from_data=True)
charObj = openpyxl.chart.BarChart()
charObj.title = "My Bar Chart"
charObj.x_axis.title = 'Boys each subject'
charObj.append(seriesObj)
charObj.set_categories(cat)
sheet.add_chart(charObj,"I18")
data = openpyxl.chart.Reference(sheet, min_col=3, min_row=36, max_row=36,max_col=7)
cat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,max_row=1)
seriesObj = openpyxl.chart.Series(data, title="bar chart of girls each subject", title_from_data=True)
charObj = openpyxl.chart.BarChart()
charObj.title = "My Bar Chart"
charObj.x_axis.title = 'girls each subject'
charObj.append(seriesObj)
charObj.set_categories(cat)
sheet.add_chart(charObj,"Q2")
wb.save('stu_scores _Grade.xlsx')
wb.close()
|
flexible
|
{
"blob_id": "bb9ff561ff94bbe4d20f14287ba313386ea78609",
"index": 9121,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'I2')\n<mask token>\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'I18')\n<mask token>\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'Q2')\nwb.save('stu_scores _Grade.xlsx')\nwb.close()\n",
"step-3": "<mask token>\nwb = openpyxl.load_workbook('/Users/mac/Desktop/stu_scores _Grade 2.xlsx')\nsheet = wb['stu_scores_01']\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=34, max_row=34,\n max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,\n max_row=1)\nseriesObj = openpyxl.chart.Series(data, title='bar chart of each subject',\n title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = 'My Bar Chart'\ncharObj.x_axis.title = 'bar chart of each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'I2')\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=35, max_row=35,\n max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,\n max_row=1)\nseriesObj = openpyxl.chart.Series(data, title=\n 'bar chart of boys each subject', title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = 'My Bar Chart'\ncharObj.x_axis.title = 'Boys each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'I18')\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=36, max_row=36,\n max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,\n max_row=1)\nseriesObj = openpyxl.chart.Series(data, title=\n 'bar chart of girls each subject', title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = 'My Bar Chart'\ncharObj.x_axis.title = 'girls each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'Q2')\nwb.save('stu_scores _Grade.xlsx')\nwb.close()\n",
"step-4": "import openpyxl\nfrom openpyxl import Workbook\nimport openpyxl as openpyxl\nfrom openpyxl.chart import BarChart\nwb = openpyxl.load_workbook('/Users/mac/Desktop/stu_scores _Grade 2.xlsx')\nsheet = wb['stu_scores_01']\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=34, max_row=34,\n max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,\n max_row=1)\nseriesObj = openpyxl.chart.Series(data, title='bar chart of each subject',\n title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = 'My Bar Chart'\ncharObj.x_axis.title = 'bar chart of each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'I2')\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=35, max_row=35,\n max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,\n max_row=1)\nseriesObj = openpyxl.chart.Series(data, title=\n 'bar chart of boys each subject', title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = 'My Bar Chart'\ncharObj.x_axis.title = 'Boys each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'I18')\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=36, max_row=36,\n max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,\n max_row=1)\nseriesObj = openpyxl.chart.Series(data, title=\n 'bar chart of girls each subject', title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = 'My Bar Chart'\ncharObj.x_axis.title = 'girls each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'Q2')\nwb.save('stu_scores _Grade.xlsx')\nwb.close()\n",
"step-5": "import openpyxl\nfrom openpyxl import Workbook\nimport openpyxl as openpyxl\nfrom openpyxl.chart import BarChart\n\nwb = openpyxl.load_workbook('/Users/mac/Desktop/stu_scores _Grade 2.xlsx')\nsheet = wb['stu_scores_01']\n\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=34, max_row=34,max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,max_row=1)\n\nseriesObj = openpyxl.chart.Series(data, title=\"bar chart of each subject\", title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = \"My Bar Chart\"\ncharObj.x_axis.title = 'bar chart of each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj,\"I2\")\n#new one\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=35, max_row=35,max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,max_row=1)\n\nseriesObj = openpyxl.chart.Series(data, title=\"bar chart of boys each subject\", title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = \"My Bar Chart\"\ncharObj.x_axis.title = 'Boys each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj,\"I18\")\n\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=36, max_row=36,max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,max_row=1)\n\nseriesObj = openpyxl.chart.Series(data, title=\"bar chart of girls each subject\", title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = \"My Bar Chart\"\ncharObj.x_axis.title = 'girls each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj,\"Q2\")\n\nwb.save('stu_scores _Grade.xlsx')\nwb.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def most_frequent_char(lst):
char_dict = {}
for word in lst:
for char in word:
if char in char_dict:
char_dict[char] += 1
else:
char_dict[char] = 1
max_value = max(char_dict.values())
max_keys = []
for key, value in char_dict.items():
if value == max_value:
max_keys.append(key)
return sorted(max_keys)
|
normal
|
{
"blob_id": "be1ddaf5b4a7fb203fea62d061b06afb45d6867d",
"index": 4690,
"step-1": "<mask token>\n",
"step-2": "def most_frequent_char(lst):\n char_dict = {}\n for word in lst:\n for char in word:\n if char in char_dict:\n char_dict[char] += 1\n else:\n char_dict[char] = 1\n max_value = max(char_dict.values())\n max_keys = []\n for key, value in char_dict.items():\n if value == max_value:\n max_keys.append(key)\n return sorted(max_keys)\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# A class is like a blueprint for creating objects. An object has properties and methods(functions) associated with it. Almost everything in Python is an object
# import connect
# from connect import connect
#create class
import pymysql
# import MySQLdb
conn = pymysql.connect(host='127.0.0.1',user='root',password='',db='Python')
connect = conn.cursor()
class User():
#constructor
def __init__(self, name,email,age):
self.name = name
self.email = email
self.age = age
def getUserInfo(self):
# return f'His name is {self.name} and his age is {self.age}'
# conn = pymysql.connect(host='127.0.0.1',user='root',password='',db='Python')
# connect = conn.cursor()
user_data = 'select * from students;'
connect.execute(user_data)
data = connect.fetchall()
i=0
for new_data in data:
# print(f'student name is {data[i][1]} {data[i][2]} and age is {data[i][3]}')
print(data)
i += 1
# return connect.fetchall()
def IncreaseAge(self):
self.age += 1
class Customer(User):
#constructor
def __init__(self, name,email,age):
self.name = name
self.email = email
self.age = age
self.balance = 0
def getBalance(self,balance):
self.balance = balance
#Init User object
brad = User('Kaushal Patel','[email protected]',22)
customer = Customer('Babulal Kumawat','[email protected]',22)
# brad.IncreaseAge()
# customer.getBalance(22)
# print(customer.getUserInfo())
# print(brad.getUserInfo())
# print(brad.getUserInfo())
# brad.getUserInfo()
brad.getUserInfo()
|
normal
|
{
"blob_id": "ea045d04b40341f34c780dceab1f21df93b7207a",
"index": 7689,
"step-1": "<mask token>\n\n\nclass User:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Customer(User):\n\n def __init__(self, name, email, age):\n self.name = name\n self.email = email\n self.age = age\n self.balance = 0\n\n def getBalance(self, balance):\n self.balance = balance\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass User:\n\n def __init__(self, name, email, age):\n self.name = name\n self.email = email\n self.age = age\n <mask token>\n <mask token>\n\n\nclass Customer(User):\n\n def __init__(self, name, email, age):\n self.name = name\n self.email = email\n self.age = age\n self.balance = 0\n\n def getBalance(self, balance):\n self.balance = balance\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass User:\n\n def __init__(self, name, email, age):\n self.name = name\n self.email = email\n self.age = age\n\n def getUserInfo(self):\n user_data = 'select * from students;'\n connect.execute(user_data)\n data = connect.fetchall()\n i = 0\n for new_data in data:\n print(data)\n i += 1\n\n def IncreaseAge(self):\n self.age += 1\n\n\nclass Customer(User):\n\n def __init__(self, name, email, age):\n self.name = name\n self.email = email\n self.age = age\n self.balance = 0\n\n def getBalance(self, balance):\n self.balance = balance\n\n\n<mask token>\nbrad.getUserInfo()\n",
"step-4": "import pymysql\nconn = pymysql.connect(host='127.0.0.1', user='root', password='', db='Python')\nconnect = conn.cursor()\n\n\nclass User:\n\n def __init__(self, name, email, age):\n self.name = name\n self.email = email\n self.age = age\n\n def getUserInfo(self):\n user_data = 'select * from students;'\n connect.execute(user_data)\n data = connect.fetchall()\n i = 0\n for new_data in data:\n print(data)\n i += 1\n\n def IncreaseAge(self):\n self.age += 1\n\n\nclass Customer(User):\n\n def __init__(self, name, email, age):\n self.name = name\n self.email = email\n self.age = age\n self.balance = 0\n\n def getBalance(self, balance):\n self.balance = balance\n\n\nbrad = User('Kaushal Patel', '[email protected]', 22)\ncustomer = Customer('Babulal Kumawat', '[email protected]', 22)\nbrad.getUserInfo()\n",
"step-5": "# A class is like a blueprint for creating objects. An object has properties and methods(functions) associated with it. Almost everything in Python is an object\n# import connect \n# from connect import connect\n#create class\n\nimport pymysql\n# import MySQLdb\nconn = pymysql.connect(host='127.0.0.1',user='root',password='',db='Python')\nconnect = conn.cursor()\nclass User():\n #constructor\n def __init__(self, name,email,age):\n self.name = name\n self.email = email\n self.age = age\n\n def getUserInfo(self):\n # return f'His name is {self.name} and his age is {self.age}'\n # conn = pymysql.connect(host='127.0.0.1',user='root',password='',db='Python')\n # connect = conn.cursor()\n user_data = 'select * from students;'\n connect.execute(user_data)\n data = connect.fetchall()\n i=0\n for new_data in data:\n # print(f'student name is {data[i][1]} {data[i][2]} and age is {data[i][3]}')\n print(data)\n i += 1\n # return connect.fetchall()\n\n def IncreaseAge(self):\n self.age += 1\n\n\nclass Customer(User):\n #constructor\n def __init__(self, name,email,age):\n self.name = name\n self.email = email\n self.age = age\n self.balance = 0\n\n def getBalance(self,balance):\n self.balance = balance\n\n \n \n#Init User object\nbrad = User('Kaushal Patel','[email protected]',22)\n\ncustomer = Customer('Babulal Kumawat','[email protected]',22)\n# brad.IncreaseAge()\n\n# customer.getBalance(22)\n# print(customer.getUserInfo())\n# print(brad.getUserInfo())\n\n# print(brad.getUserInfo())\n# brad.getUserInfo()\nbrad.getUserInfo()",
"step-ids": [
4,
5,
8,
10,
11
]
}
|
[
4,
5,
8,
10,
11
] |
<|reserved_special_token_0|>
class Bitcoin:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Bitcoin:
<|reserved_special_token_0|>
def __init__(self):
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Bitcoin:
<|reserved_special_token_0|>
def __init__(self):
pass
def get_current_price(self, url=coindesk):
self.resp = requests.get(url)
if self.resp.status_code == 200:
return json.loads(self.resp.content.decode('utf-8'))
else:
return None
def float_price(self, json_response):
if json_response is not None:
rate = json_response['bpi']['EUR']['rate_float']
try:
return float(rate)
except:
return None
else:
return None
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Bitcoin:
coindesk = 'https://api.coindesk.com/v1/bpi/currentprice.json'
def __init__(self):
pass
def get_current_price(self, url=coindesk):
self.resp = requests.get(url)
if self.resp.status_code == 200:
return json.loads(self.resp.content.decode('utf-8'))
else:
return None
def float_price(self, json_response):
if json_response is not None:
rate = json_response['bpi']['EUR']['rate_float']
try:
return float(rate)
except:
return None
else:
return None
<|reserved_special_token_1|>
import json
import requests
class Bitcoin:
coindesk = 'https://api.coindesk.com/v1/bpi/currentprice.json'
def __init__(self):
pass
def get_current_price(self, url=coindesk):
self.resp = requests.get(url)
if self.resp.status_code == 200:
return json.loads(self.resp.content.decode('utf-8'))
else:
return None
def float_price(self, json_response):
if json_response is not None:
rate = json_response['bpi']['EUR']['rate_float']
try:
return float(rate)
except:
return None
else:
return None
|
flexible
|
{
"blob_id": "3bfe4021d5cf9bd24c0fb778b252bc04c6ac47ed",
"index": 1847,
"step-1": "<mask token>\n\n\nclass Bitcoin:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Bitcoin:\n <mask token>\n\n def __init__(self):\n pass\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Bitcoin:\n <mask token>\n\n def __init__(self):\n pass\n\n def get_current_price(self, url=coindesk):\n self.resp = requests.get(url)\n if self.resp.status_code == 200:\n return json.loads(self.resp.content.decode('utf-8'))\n else:\n return None\n\n def float_price(self, json_response):\n if json_response is not None:\n rate = json_response['bpi']['EUR']['rate_float']\n try:\n return float(rate)\n except:\n return None\n else:\n return None\n",
"step-4": "<mask token>\n\n\nclass Bitcoin:\n coindesk = 'https://api.coindesk.com/v1/bpi/currentprice.json'\n\n def __init__(self):\n pass\n\n def get_current_price(self, url=coindesk):\n self.resp = requests.get(url)\n if self.resp.status_code == 200:\n return json.loads(self.resp.content.decode('utf-8'))\n else:\n return None\n\n def float_price(self, json_response):\n if json_response is not None:\n rate = json_response['bpi']['EUR']['rate_float']\n try:\n return float(rate)\n except:\n return None\n else:\n return None\n",
"step-5": "import json\nimport requests\n\n\nclass Bitcoin:\n coindesk = 'https://api.coindesk.com/v1/bpi/currentprice.json'\n\n def __init__(self):\n pass\n\n def get_current_price(self, url=coindesk):\n self.resp = requests.get(url)\n if self.resp.status_code == 200:\n return json.loads(self.resp.content.decode('utf-8'))\n else:\n return None\n\n def float_price(self, json_response):\n if json_response is not None:\n rate = json_response['bpi']['EUR']['rate_float']\n try:\n return float(rate)\n except:\n return None\n else:\n return None\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
'''
Various tools for cleaning out nulls and imputing
'''
|
normal
|
{
"blob_id": "bd310ab0bc193410b8f93ad5516b0731d2eba54f",
"index": 6268,
"step-1": "<mask token>\n",
"step-2": "'''\nVarious tools for cleaning out nulls and imputing \n'''\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CoopHtmlEditorAppConfig(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CoopHtmlEditorAppConfig(AppConfig):
name = 'coop_html_editor'
verbose_name = 'Html Editor'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from django.apps import AppConfig
class CoopHtmlEditorAppConfig(AppConfig):
name = 'coop_html_editor'
verbose_name = 'Html Editor'
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
app definition
"""
from django.apps import AppConfig
class CoopHtmlEditorAppConfig(AppConfig):
name = 'coop_html_editor'
verbose_name = "Html Editor"
|
flexible
|
{
"blob_id": "641cbe2f35925d070249820a2e3a4f1cdd1cf642",
"index": 8697,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CoopHtmlEditorAppConfig(AppConfig):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CoopHtmlEditorAppConfig(AppConfig):\n name = 'coop_html_editor'\n verbose_name = 'Html Editor'\n",
"step-4": "<mask token>\nfrom django.apps import AppConfig\n\n\nclass CoopHtmlEditorAppConfig(AppConfig):\n name = 'coop_html_editor'\n verbose_name = 'Html Editor'\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\napp definition\n\"\"\"\n\nfrom django.apps import AppConfig\n\n\nclass CoopHtmlEditorAppConfig(AppConfig):\n name = 'coop_html_editor'\n verbose_name = \"Html Editor\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# "Time Warner Python" Salma Hashem netid: sh5640
#Design costumer service application by asking users series of questions, and based on the customers' answers to the questions, provide them with instructions.
#Ask the user to choose from the following options
print("Choose from the following options: ")
#assign each menu option to a number
one= " 1. My internet is not working."
two= "2. My cable is not working."
three= "3. My phones are not working."
four= "4. My bill is wrong."
five= "5. I want to upgrade my plan."
#Print the options each on its own line and ask the user to input a number and convert into an integer
print(one, "\n", two, "\n", three, "\n", four, "\n", five)
value= int(input("(Enter a value 1 - 5): "))
#assign variables to user inputs using if else statements for scenario one and print output based on user inputs
if value==1:
modem_on=input("\nIs your modem on? (Enter Y or N): ")
if modem_on=="Y":
router_on=input("\nIs your router on? (Enter Y or N): ")
if router_on=="Y":
redlight= input("\nDoes your router emit a red light? (Enter Y or N): ")
if redlight=="Y":
print("Unplug your router and wait thirty seconds. Then plug your router into the nearest outlet to restart your router. If you still cannot connect to the internet, restart this program. Note, this program will now terminate. Goodbye!")
else:
comp_wifi_on=input("\nAre both your computer and wifi on? (Enter Y or N): ")
if comp_wifi_on=="Y":
print("It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.")
else:
print("If your computer is not on, please turn it on by pressing the power button. Also make sure your computer's wifi is on. If you still cannot connect to the internet, restart this program. Note, this program will now terminate. Goodbye!")
else:
print("Plug your router into the nearest outlet to turn on your router. If you still cannot connect to the Internet, restart this program. Note, this program will now terminate. Goodbye!")
else:
print("Plug your modem into the nearest outlet to turn on your modem. If you still cannot connect to the Internet, restart this program. Note, this program will now terminate. Goodbye!")
#assign variables to user inputs using if statements for scenario two and print output based on user inputs
if value==2:
cable_on=input("\nIs your cable box on? (Enter Y or N): ")
if cable_on=="Y":
tv_on=input("\nIs your TV on? (Enter Y or N): ")
if tv_on=="Y":
print("It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.")
else:
print("Plug your TV into the nearest outlet and press the power button on your remote to turn on your TV. If you still do not recieve a cable signal, restart this program. Note, this program will now terminate. Goodbye!")
else:
print("Plug your cable box into the nearest outlet to turn on your cable box. If you still do not recieve a cable signal, please restart this program. Note, this program will now terminate. Goodbye!")
#assign variables to user inputs using if statements for scenario three and print output based on user inputs
if value==3:
phones_on=input("\nAre your phones on? (Enter Y or N): ")
if phone_on=="Y":
landline_plugged=input("\nIs there a landline wire plugged into each phone or the wireless phone terminal? (Enter Y or N): ")
if landline_plugged=="Y":
print("It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.")
else:
print("Plug a landline wire into each phone or phone terminal. If you still cannot use your phones, please restart this program. Note, this program will now terminate. Goodbye!")
else:
print("Plug your phones into the nearest outlet to turn them on. If you are using wireless phones, please make sure you change them before attempting to use them. If you still cannot use your phones, please restart this program. Note, this program will now terminate. Goodbye!")
#assign variables to user inputs using if statements for scenario four and print output based on user inputs
if value==4:
late_payment= input("\nWere you late on your last payment? (Enter Y or N): ")
if late_payment=="Y":
print("If you were late on your last payment, you will be charged an additional 5% interest fee. Therefore, your bill may be more than usual. If you would like to contest your charge, please call 555-555-5555 for additional support with this matter. Thank you for your patience. Note, this program will now terminate. Goodbye!")
else:
print("It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.")
#scenario 5--evaluate input and print output based on user input
if value==5:
print("It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.")
#create if statements to evaluate invalid user inputs
if value<1 or value>5:
print("You entered an invalid menu choice. It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.")
|
normal
|
{
"blob_id": "736b84bbcf1d5954b491068be4060edeade2c1c5",
"index": 2205,
"step-1": "<mask token>\n",
"step-2": "print('Choose from the following options: ')\n<mask token>\nprint(one, '\\n', two, '\\n', three, '\\n', four, '\\n', five)\n<mask token>\nif value == 1:\n modem_on = input('\\nIs your modem on? (Enter Y or N): ')\n if modem_on == 'Y':\n router_on = input('\\nIs your router on? (Enter Y or N): ')\n if router_on == 'Y':\n redlight = input(\n '\\nDoes your router emit a red light? (Enter Y or N): ')\n if redlight == 'Y':\n print(\n 'Unplug your router and wait thirty seconds. Then plug your router into the nearest outlet to restart your router. If you still cannot connect to the internet, restart this program. Note, this program will now terminate. Goodbye!'\n )\n else:\n comp_wifi_on = input(\n '\\nAre both your computer and wifi on? (Enter Y or N): ')\n if comp_wifi_on == 'Y':\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\n else:\n print(\n \"If your computer is not on, please turn it on by pressing the power button. Also make sure your computer's wifi is on. If you still cannot connect to the internet, restart this program. Note, this program will now terminate. Goodbye!\"\n )\n else:\n print(\n 'Plug your router into the nearest outlet to turn on your router. If you still cannot connect to the Internet, restart this program. Note, this program will now terminate. Goodbye!'\n )\n else:\n print(\n 'Plug your modem into the nearest outlet to turn on your modem. If you still cannot connect to the Internet, restart this program. Note, this program will now terminate. Goodbye!'\n )\nif value == 2:\n cable_on = input('\\nIs your cable box on? (Enter Y or N): ')\n if cable_on == 'Y':\n tv_on = input('\\nIs your TV on? (Enter Y or N): ')\n if tv_on == 'Y':\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\n else:\n print(\n 'Plug your TV into the nearest outlet and press the power button on your remote to turn on your TV. If you still do not recieve a cable signal, restart this program. Note, this program will now terminate. Goodbye!'\n )\n else:\n print(\n 'Plug your cable box into the nearest outlet to turn on your cable box. If you still do not recieve a cable signal, please restart this program. Note, this program will now terminate. Goodbye!'\n )\nif value == 3:\n phones_on = input('\\nAre your phones on? (Enter Y or N): ')\n if phone_on == 'Y':\n landline_plugged = input(\n \"\"\"\nIs there a landline wire plugged into each phone or the wireless phone terminal? (Enter Y or N): \"\"\"\n )\n if landline_plugged == 'Y':\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\n else:\n print(\n 'Plug a landline wire into each phone or phone terminal. If you still cannot use your phones, please restart this program. Note, this program will now terminate. Goodbye!'\n )\n else:\n print(\n 'Plug your phones into the nearest outlet to turn them on. If you are using wireless phones, please make sure you change them before attempting to use them. If you still cannot use your phones, please restart this program. Note, this program will now terminate. Goodbye!'\n )\nif value == 4:\n late_payment = input(\n '\\nWere you late on your last payment? (Enter Y or N): ')\n if late_payment == 'Y':\n print(\n 'If you were late on your last payment, you will be charged an additional 5% interest fee. Therefore, your bill may be more than usual. If you would like to contest your charge, please call 555-555-5555 for additional support with this matter. Thank you for your patience. Note, this program will now terminate. Goodbye!'\n )\n else:\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\nif value == 5:\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\nif value < 1 or value > 5:\n print(\n 'You entered an invalid menu choice. It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\n",
"step-3": "print('Choose from the following options: ')\none = ' 1. My internet is not working.'\ntwo = '2. My cable is not working.'\nthree = '3. My phones are not working.'\nfour = '4. My bill is wrong.'\nfive = '5. I want to upgrade my plan.'\nprint(one, '\\n', two, '\\n', three, '\\n', four, '\\n', five)\nvalue = int(input('(Enter a value 1 - 5): '))\nif value == 1:\n modem_on = input('\\nIs your modem on? (Enter Y or N): ')\n if modem_on == 'Y':\n router_on = input('\\nIs your router on? (Enter Y or N): ')\n if router_on == 'Y':\n redlight = input(\n '\\nDoes your router emit a red light? (Enter Y or N): ')\n if redlight == 'Y':\n print(\n 'Unplug your router and wait thirty seconds. Then plug your router into the nearest outlet to restart your router. If you still cannot connect to the internet, restart this program. Note, this program will now terminate. Goodbye!'\n )\n else:\n comp_wifi_on = input(\n '\\nAre both your computer and wifi on? (Enter Y or N): ')\n if comp_wifi_on == 'Y':\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\n else:\n print(\n \"If your computer is not on, please turn it on by pressing the power button. Also make sure your computer's wifi is on. If you still cannot connect to the internet, restart this program. Note, this program will now terminate. Goodbye!\"\n )\n else:\n print(\n 'Plug your router into the nearest outlet to turn on your router. If you still cannot connect to the Internet, restart this program. Note, this program will now terminate. Goodbye!'\n )\n else:\n print(\n 'Plug your modem into the nearest outlet to turn on your modem. If you still cannot connect to the Internet, restart this program. Note, this program will now terminate. Goodbye!'\n )\nif value == 2:\n cable_on = input('\\nIs your cable box on? (Enter Y or N): ')\n if cable_on == 'Y':\n tv_on = input('\\nIs your TV on? (Enter Y or N): ')\n if tv_on == 'Y':\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\n else:\n print(\n 'Plug your TV into the nearest outlet and press the power button on your remote to turn on your TV. If you still do not recieve a cable signal, restart this program. Note, this program will now terminate. Goodbye!'\n )\n else:\n print(\n 'Plug your cable box into the nearest outlet to turn on your cable box. If you still do not recieve a cable signal, please restart this program. Note, this program will now terminate. Goodbye!'\n )\nif value == 3:\n phones_on = input('\\nAre your phones on? (Enter Y or N): ')\n if phone_on == 'Y':\n landline_plugged = input(\n \"\"\"\nIs there a landline wire plugged into each phone or the wireless phone terminal? (Enter Y or N): \"\"\"\n )\n if landline_plugged == 'Y':\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\n else:\n print(\n 'Plug a landline wire into each phone or phone terminal. If you still cannot use your phones, please restart this program. Note, this program will now terminate. Goodbye!'\n )\n else:\n print(\n 'Plug your phones into the nearest outlet to turn them on. If you are using wireless phones, please make sure you change them before attempting to use them. If you still cannot use your phones, please restart this program. Note, this program will now terminate. Goodbye!'\n )\nif value == 4:\n late_payment = input(\n '\\nWere you late on your last payment? (Enter Y or N): ')\n if late_payment == 'Y':\n print(\n 'If you were late on your last payment, you will be charged an additional 5% interest fee. Therefore, your bill may be more than usual. If you would like to contest your charge, please call 555-555-5555 for additional support with this matter. Thank you for your patience. Note, this program will now terminate. Goodbye!'\n )\n else:\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\nif value == 5:\n print(\n 'It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\nif value < 1 or value > 5:\n print(\n 'You entered an invalid menu choice. It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.'\n )\n",
"step-4": "# \"Time Warner Python\" Salma Hashem netid: sh5640\n#Design costumer service application by asking users series of questions, and based on the customers' answers to the questions, provide them with instructions. \n#Ask the user to choose from the following options \nprint(\"Choose from the following options: \")\n#assign each menu option to a number\none= \" 1. My internet is not working.\"\ntwo= \"2. My cable is not working.\"\nthree= \"3. My phones are not working.\"\nfour= \"4. My bill is wrong.\"\nfive= \"5. I want to upgrade my plan.\"\n#Print the options each on its own line and ask the user to input a number and convert into an integer\nprint(one, \"\\n\", two, \"\\n\", three, \"\\n\", four, \"\\n\", five)\nvalue= int(input(\"(Enter a value 1 - 5): \"))\n#assign variables to user inputs using if else statements for scenario one and print output based on user inputs \n\n\nif value==1:\n modem_on=input(\"\\nIs your modem on? (Enter Y or N): \")\n if modem_on==\"Y\":\n router_on=input(\"\\nIs your router on? (Enter Y or N): \")\n if router_on==\"Y\":\n redlight= input(\"\\nDoes your router emit a red light? (Enter Y or N): \")\n if redlight==\"Y\":\n print(\"Unplug your router and wait thirty seconds. Then plug your router into the nearest outlet to restart your router. If you still cannot connect to the internet, restart this program. Note, this program will now terminate. Goodbye!\")\n else:\n comp_wifi_on=input(\"\\nAre both your computer and wifi on? (Enter Y or N): \")\n if comp_wifi_on==\"Y\":\n print(\"It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.\")\n else:\n print(\"If your computer is not on, please turn it on by pressing the power button. Also make sure your computer's wifi is on. If you still cannot connect to the internet, restart this program. Note, this program will now terminate. Goodbye!\")\n else:\n print(\"Plug your router into the nearest outlet to turn on your router. If you still cannot connect to the Internet, restart this program. Note, this program will now terminate. Goodbye!\")\n \n else:\n print(\"Plug your modem into the nearest outlet to turn on your modem. If you still cannot connect to the Internet, restart this program. Note, this program will now terminate. Goodbye!\")\n#assign variables to user inputs using if statements for scenario two and print output based on user inputs \nif value==2:\n cable_on=input(\"\\nIs your cable box on? (Enter Y or N): \")\n if cable_on==\"Y\":\n tv_on=input(\"\\nIs your TV on? (Enter Y or N): \")\n if tv_on==\"Y\":\n print(\"It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.\")\n else:\n print(\"Plug your TV into the nearest outlet and press the power button on your remote to turn on your TV. If you still do not recieve a cable signal, restart this program. Note, this program will now terminate. Goodbye!\")\n else:\n print(\"Plug your cable box into the nearest outlet to turn on your cable box. If you still do not recieve a cable signal, please restart this program. Note, this program will now terminate. Goodbye!\")\n#assign variables to user inputs using if statements for scenario three and print output based on user inputs \nif value==3:\n phones_on=input(\"\\nAre your phones on? (Enter Y or N): \")\n if phone_on==\"Y\":\n landline_plugged=input(\"\\nIs there a landline wire plugged into each phone or the wireless phone terminal? (Enter Y or N): \")\n if landline_plugged==\"Y\":\n print(\"It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.\")\n else:\n print(\"Plug a landline wire into each phone or phone terminal. If you still cannot use your phones, please restart this program. Note, this program will now terminate. Goodbye!\")\n else:\n print(\"Plug your phones into the nearest outlet to turn them on. If you are using wireless phones, please make sure you change them before attempting to use them. If you still cannot use your phones, please restart this program. Note, this program will now terminate. Goodbye!\")\n#assign variables to user inputs using if statements for scenario four and print output based on user inputs\nif value==4:\n late_payment= input(\"\\nWere you late on your last payment? (Enter Y or N): \")\n if late_payment==\"Y\":\n print(\"If you were late on your last payment, you will be charged an additional 5% interest fee. Therefore, your bill may be more than usual. If you would like to contest your charge, please call 555-555-5555 for additional support with this matter. Thank you for your patience. Note, this program will now terminate. Goodbye!\")\n else:\n print(\"It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.\")\n#scenario 5--evaluate input and print output based on user input\nif value==5:\n print(\"It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.\")\n#create if statements to evaluate invalid user inputs\nif value<1 or value>5:\n print(\"You entered an invalid menu choice. It looks like you may need additional support. Please call 555-555-5555 for additional support with this matter. Thank you for your patience.\")\n \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from people.models import Medium, Profile, Staff, Instructor, Student, Alumni, Donation, Address, Award, Reference, Experience, Skill, Education, ImporterUsers
from anonymizer import Anonymizer
class MediumAnonymizer(Anonymizer):
model = Medium
attributes = [
('medium_id', "integer"),
('description', "varchar"),
]
class ProfileAnonymizer(Anonymizer):
model = Profile
attributes = [
('user_id', "SKIP"),
('person_id', "SKIP"),
('datatel_avatar_url', "SKIP"),
('suffix', "choice"),
('salutation', "choice"),
('middle_name', "name"),
('title', "varchar"),
('about', "lorem"),
('email2', "email"),
('home_phone1', "phonenumber"),
('biz_phone1', "phonenumber"),
('mobile_phone1', "phonenumber"),
('fax', "phonenumber"),
('allow_contact', "bool"),
('show_name', "bool"),
('url_personal', "varchar"),
('url_org', "varchar"),
('accepted_terms', "bool"),
('email_on_follow', "bool"),
]
class StaffAnonymizer(Anonymizer):
model = Staff
attributes = [
('profile_id', "SKIP"),
('office_num', "varchar"),
('extension', "varchar"),
]
class InstructorAnonymizer(Anonymizer):
model = Instructor
attributes = [
('profile_id', "SKIP"),
('office_num', "varchar"),
('extension', "varchar"),
('bio_short', "lorem"),
('bio_long', "lorem"),
]
class StudentAnonymizer(Anonymizer):
model = Student
attributes = [
('profile_id', "SKIP"),
('grad_year', "choice"),
('funding_amount', "SKIP"),
('enrollment_date', "date"),
('program_length', "integer"),
('visiting_scholar', "bool"),
]
class AlumniAnonymizer(Anonymizer):
model = Alumni
attributes = [
('profile_id', "SKIP"),
('grad_year', "choice"),
('third_year', "bool"),
('j200_inst', "varchar"),
('funding_amount', "SKIP"),
('enrollment_date', "date"),
('program_length', "integer"),
('equipment_balance', "SKIP"),
('visiting_scholar', "bool"),
('employer', "varchar"),
('specialty', "varchar"),
('medium', "choice"),
('prev_emp1', "varchar"),
('prev_emp2', "varchar"),
('prev_emp3', "varchar"),
('notes_exclude', "bool"),
('notes', "lorem"),
('mod_date', "date"),
('pub_display', "bool"),
('freelance', "bool"),
('region', "choice"),
('prev_intern1', "varchar"),
('prev_intern2', "varchar"),
('prev_intern3', "varchar"),
('first_job', "varchar"),
('books', "lorem"),
('deceased_notes', "varchar"),
('mia', "bool"),
('mia_notes', "lorem"),
('interview', "bool"),
('interview_year', "choice"),
('interview_notes', "lorem"),
('agents_year', "choice"),
('agents_notes', "lorem"),
('event_attend_notes', "lorem"),
('famous_notes', "lorem"),
('volunteer_speak', "bool"),
('volunteer_committee', "bool"),
('volunteer_interview', "bool"),
('volunteer_mentor', "bool"),
('volunteer_agent', "bool"),
('maillist_class', "bool"),
('no_maillists', "bool"),
('no_reminder', "bool"),
('suggestions', "lorem"),
('committee_notes', "lorem"),
('inactive', "bool"),
('revision', "integer"),
]
class DonationAnonymizer(Anonymizer):
model = Donation
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('amount', "integer"),
('date', "date"),
('description', "varchar"),
('notes', "lorem"),
]
class AddressAnonymizer(Anonymizer):
model = Address
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('address_type', "choice"),
('street_1', "street_address"),
('street_2', "street_address"),
('street_3', "street_address"),
('city', "city"),
('state', "choice"),
('state_other', "varchar"),
('postal_code', "uk_postcode"),
('display', "bool"),
]
class AwardAnonymizer(Anonymizer):
model = Award
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('title', "varchar"),
('description', "lorem"),
('date_received', "date"),
('display', "bool"),
]
class ReferenceAnonymizer(Anonymizer):
model = Reference
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('body', "lorem"),
]
class ExperienceAnonymizer(Anonymizer):
model = Experience
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('experience_type', "choice"),
('title', "varchar"),
('description', "lorem"),
('company', "varchar"),
('city', "city"),
('state', "choice"),
('country', "varchar"),
('start_date', "date"),
('end_date', "date"),
('display', "bool"),
]
class SkillAnonymizer(Anonymizer):
model = Skill
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('summary', "lorem"),
('display', "bool"),
]
class EducationAnonymizer(Anonymizer):
model = Education
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('diploma', "choice"),
('school', "varchar"),
('description', "lorem"),
('start_date', "date"),
('end_date', "date"),
('display', "bool"),
]
class ImporterUsersAnonymizer(Anonymizer):
model = ImporterUsers
attributes = [
('id', "SKIP"),
('action', "SKIP"),
('person_id', "SKIP"),
('section_id', "SKIP"),
('first_name', "SKIP"),
('last_name', "SKIP"),
('email', "SKIP"),
('photo_url', "SKIP"),
('person_type', "SKIP"),
]
|
normal
|
{
"blob_id": "63182a8708729606f96794cddb163f707252ba61",
"index": 3205,
"step-1": "<mask token>\n\n\nclass DonationAnonymizer(Anonymizer):\n model = Donation\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('amount',\n 'integer'), ('date', 'date'), ('description', 'varchar'), ('notes',\n 'lorem')]\n\n\nclass AddressAnonymizer(Anonymizer):\n model = Address\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('address_type',\n 'choice'), ('street_1', 'street_address'), ('street_2',\n 'street_address'), ('street_3', 'street_address'), ('city', 'city'),\n ('state', 'choice'), ('state_other', 'varchar'), ('postal_code',\n 'uk_postcode'), ('display', 'bool')]\n\n\nclass AwardAnonymizer(Anonymizer):\n model = Award\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('title',\n 'varchar'), ('description', 'lorem'), ('date_received', 'date'), (\n 'display', 'bool')]\n\n\nclass ReferenceAnonymizer(Anonymizer):\n model = Reference\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('body', 'lorem')]\n\n\nclass ExperienceAnonymizer(Anonymizer):\n model = Experience\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), (\n 'experience_type', 'choice'), ('title', 'varchar'), ('description',\n 'lorem'), ('company', 'varchar'), ('city', 'city'), ('state',\n 'choice'), ('country', 'varchar'), ('start_date', 'date'), (\n 'end_date', 'date'), ('display', 'bool')]\n\n\nclass SkillAnonymizer(Anonymizer):\n model = Skill\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('summary',\n 'lorem'), ('display', 'bool')]\n\n\nclass EducationAnonymizer(Anonymizer):\n model = Education\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('diploma',\n 'choice'), ('school', 'varchar'), ('description', 'lorem'), (\n 'start_date', 'date'), ('end_date', 'date'), ('display', 'bool')]\n\n\nclass ImporterUsersAnonymizer(Anonymizer):\n model = ImporterUsers\n attributes = [('id', 'SKIP'), ('action', 'SKIP'), ('person_id', 'SKIP'),\n ('section_id', 'SKIP'), ('first_name', 'SKIP'), ('last_name',\n 'SKIP'), ('email', 'SKIP'), ('photo_url', 'SKIP'), ('person_type',\n 'SKIP')]\n",
"step-2": "<mask token>\n\n\nclass AlumniAnonymizer(Anonymizer):\n model = Alumni\n attributes = [('profile_id', 'SKIP'), ('grad_year', 'choice'), (\n 'third_year', 'bool'), ('j200_inst', 'varchar'), ('funding_amount',\n 'SKIP'), ('enrollment_date', 'date'), ('program_length', 'integer'),\n ('equipment_balance', 'SKIP'), ('visiting_scholar', 'bool'), (\n 'employer', 'varchar'), ('specialty', 'varchar'), ('medium',\n 'choice'), ('prev_emp1', 'varchar'), ('prev_emp2', 'varchar'), (\n 'prev_emp3', 'varchar'), ('notes_exclude', 'bool'), ('notes',\n 'lorem'), ('mod_date', 'date'), ('pub_display', 'bool'), (\n 'freelance', 'bool'), ('region', 'choice'), ('prev_intern1',\n 'varchar'), ('prev_intern2', 'varchar'), ('prev_intern3', 'varchar'\n ), ('first_job', 'varchar'), ('books', 'lorem'), ('deceased_notes',\n 'varchar'), ('mia', 'bool'), ('mia_notes', 'lorem'), ('interview',\n 'bool'), ('interview_year', 'choice'), ('interview_notes', 'lorem'),\n ('agents_year', 'choice'), ('agents_notes', 'lorem'), (\n 'event_attend_notes', 'lorem'), ('famous_notes', 'lorem'), (\n 'volunteer_speak', 'bool'), ('volunteer_committee', 'bool'), (\n 'volunteer_interview', 'bool'), ('volunteer_mentor', 'bool'), (\n 'volunteer_agent', 'bool'), ('maillist_class', 'bool'), (\n 'no_maillists', 'bool'), ('no_reminder', 'bool'), ('suggestions',\n 'lorem'), ('committee_notes', 'lorem'), ('inactive', 'bool'), (\n 'revision', 'integer')]\n\n\nclass DonationAnonymizer(Anonymizer):\n model = Donation\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('amount',\n 'integer'), ('date', 'date'), ('description', 'varchar'), ('notes',\n 'lorem')]\n\n\nclass AddressAnonymizer(Anonymizer):\n model = Address\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('address_type',\n 'choice'), ('street_1', 'street_address'), ('street_2',\n 'street_address'), ('street_3', 'street_address'), ('city', 'city'),\n ('state', 'choice'), ('state_other', 'varchar'), ('postal_code',\n 'uk_postcode'), ('display', 'bool')]\n\n\nclass AwardAnonymizer(Anonymizer):\n model = Award\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('title',\n 'varchar'), ('description', 'lorem'), ('date_received', 'date'), (\n 'display', 'bool')]\n\n\nclass ReferenceAnonymizer(Anonymizer):\n model = Reference\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('body', 'lorem')]\n\n\nclass ExperienceAnonymizer(Anonymizer):\n model = Experience\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), (\n 'experience_type', 'choice'), ('title', 'varchar'), ('description',\n 'lorem'), ('company', 'varchar'), ('city', 'city'), ('state',\n 'choice'), ('country', 'varchar'), ('start_date', 'date'), (\n 'end_date', 'date'), ('display', 'bool')]\n\n\nclass SkillAnonymizer(Anonymizer):\n model = Skill\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('summary',\n 'lorem'), ('display', 'bool')]\n\n\nclass EducationAnonymizer(Anonymizer):\n model = Education\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('diploma',\n 'choice'), ('school', 'varchar'), ('description', 'lorem'), (\n 'start_date', 'date'), ('end_date', 'date'), ('display', 'bool')]\n\n\nclass ImporterUsersAnonymizer(Anonymizer):\n model = ImporterUsers\n attributes = [('id', 'SKIP'), ('action', 'SKIP'), ('person_id', 'SKIP'),\n ('section_id', 'SKIP'), ('first_name', 'SKIP'), ('last_name',\n 'SKIP'), ('email', 'SKIP'), ('photo_url', 'SKIP'), ('person_type',\n 'SKIP')]\n",
"step-3": "<mask token>\n\n\nclass MediumAnonymizer(Anonymizer):\n model = Medium\n attributes = [('medium_id', 'integer'), ('description', 'varchar')]\n\n\nclass ProfileAnonymizer(Anonymizer):\n model = Profile\n attributes = [('user_id', 'SKIP'), ('person_id', 'SKIP'), (\n 'datatel_avatar_url', 'SKIP'), ('suffix', 'choice'), ('salutation',\n 'choice'), ('middle_name', 'name'), ('title', 'varchar'), ('about',\n 'lorem'), ('email2', 'email'), ('home_phone1', 'phonenumber'), (\n 'biz_phone1', 'phonenumber'), ('mobile_phone1', 'phonenumber'), (\n 'fax', 'phonenumber'), ('allow_contact', 'bool'), ('show_name',\n 'bool'), ('url_personal', 'varchar'), ('url_org', 'varchar'), (\n 'accepted_terms', 'bool'), ('email_on_follow', 'bool')]\n\n\nclass StaffAnonymizer(Anonymizer):\n model = Staff\n attributes = [('profile_id', 'SKIP'), ('office_num', 'varchar'), (\n 'extension', 'varchar')]\n\n\nclass InstructorAnonymizer(Anonymizer):\n model = Instructor\n attributes = [('profile_id', 'SKIP'), ('office_num', 'varchar'), (\n 'extension', 'varchar'), ('bio_short', 'lorem'), ('bio_long', 'lorem')]\n\n\nclass StudentAnonymizer(Anonymizer):\n model = Student\n attributes = [('profile_id', 'SKIP'), ('grad_year', 'choice'), (\n 'funding_amount', 'SKIP'), ('enrollment_date', 'date'), (\n 'program_length', 'integer'), ('visiting_scholar', 'bool')]\n\n\nclass AlumniAnonymizer(Anonymizer):\n model = Alumni\n attributes = [('profile_id', 'SKIP'), ('grad_year', 'choice'), (\n 'third_year', 'bool'), ('j200_inst', 'varchar'), ('funding_amount',\n 'SKIP'), ('enrollment_date', 'date'), ('program_length', 'integer'),\n ('equipment_balance', 'SKIP'), ('visiting_scholar', 'bool'), (\n 'employer', 'varchar'), ('specialty', 'varchar'), ('medium',\n 'choice'), ('prev_emp1', 'varchar'), ('prev_emp2', 'varchar'), (\n 'prev_emp3', 'varchar'), ('notes_exclude', 'bool'), ('notes',\n 'lorem'), ('mod_date', 'date'), ('pub_display', 'bool'), (\n 'freelance', 'bool'), ('region', 'choice'), ('prev_intern1',\n 'varchar'), ('prev_intern2', 'varchar'), ('prev_intern3', 'varchar'\n ), ('first_job', 'varchar'), ('books', 'lorem'), ('deceased_notes',\n 'varchar'), ('mia', 'bool'), ('mia_notes', 'lorem'), ('interview',\n 'bool'), ('interview_year', 'choice'), ('interview_notes', 'lorem'),\n ('agents_year', 'choice'), ('agents_notes', 'lorem'), (\n 'event_attend_notes', 'lorem'), ('famous_notes', 'lorem'), (\n 'volunteer_speak', 'bool'), ('volunteer_committee', 'bool'), (\n 'volunteer_interview', 'bool'), ('volunteer_mentor', 'bool'), (\n 'volunteer_agent', 'bool'), ('maillist_class', 'bool'), (\n 'no_maillists', 'bool'), ('no_reminder', 'bool'), ('suggestions',\n 'lorem'), ('committee_notes', 'lorem'), ('inactive', 'bool'), (\n 'revision', 'integer')]\n\n\nclass DonationAnonymizer(Anonymizer):\n model = Donation\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('amount',\n 'integer'), ('date', 'date'), ('description', 'varchar'), ('notes',\n 'lorem')]\n\n\nclass AddressAnonymizer(Anonymizer):\n model = Address\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('address_type',\n 'choice'), ('street_1', 'street_address'), ('street_2',\n 'street_address'), ('street_3', 'street_address'), ('city', 'city'),\n ('state', 'choice'), ('state_other', 'varchar'), ('postal_code',\n 'uk_postcode'), ('display', 'bool')]\n\n\nclass AwardAnonymizer(Anonymizer):\n model = Award\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('title',\n 'varchar'), ('description', 'lorem'), ('date_received', 'date'), (\n 'display', 'bool')]\n\n\nclass ReferenceAnonymizer(Anonymizer):\n model = Reference\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('body', 'lorem')]\n\n\nclass ExperienceAnonymizer(Anonymizer):\n model = Experience\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), (\n 'experience_type', 'choice'), ('title', 'varchar'), ('description',\n 'lorem'), ('company', 'varchar'), ('city', 'city'), ('state',\n 'choice'), ('country', 'varchar'), ('start_date', 'date'), (\n 'end_date', 'date'), ('display', 'bool')]\n\n\nclass SkillAnonymizer(Anonymizer):\n model = Skill\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('summary',\n 'lorem'), ('display', 'bool')]\n\n\nclass EducationAnonymizer(Anonymizer):\n model = Education\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('diploma',\n 'choice'), ('school', 'varchar'), ('description', 'lorem'), (\n 'start_date', 'date'), ('end_date', 'date'), ('display', 'bool')]\n\n\nclass ImporterUsersAnonymizer(Anonymizer):\n model = ImporterUsers\n attributes = [('id', 'SKIP'), ('action', 'SKIP'), ('person_id', 'SKIP'),\n ('section_id', 'SKIP'), ('first_name', 'SKIP'), ('last_name',\n 'SKIP'), ('email', 'SKIP'), ('photo_url', 'SKIP'), ('person_type',\n 'SKIP')]\n",
"step-4": "from people.models import Medium, Profile, Staff, Instructor, Student, Alumni, Donation, Address, Award, Reference, Experience, Skill, Education, ImporterUsers\nfrom anonymizer import Anonymizer\n\n\nclass MediumAnonymizer(Anonymizer):\n model = Medium\n attributes = [('medium_id', 'integer'), ('description', 'varchar')]\n\n\nclass ProfileAnonymizer(Anonymizer):\n model = Profile\n attributes = [('user_id', 'SKIP'), ('person_id', 'SKIP'), (\n 'datatel_avatar_url', 'SKIP'), ('suffix', 'choice'), ('salutation',\n 'choice'), ('middle_name', 'name'), ('title', 'varchar'), ('about',\n 'lorem'), ('email2', 'email'), ('home_phone1', 'phonenumber'), (\n 'biz_phone1', 'phonenumber'), ('mobile_phone1', 'phonenumber'), (\n 'fax', 'phonenumber'), ('allow_contact', 'bool'), ('show_name',\n 'bool'), ('url_personal', 'varchar'), ('url_org', 'varchar'), (\n 'accepted_terms', 'bool'), ('email_on_follow', 'bool')]\n\n\nclass StaffAnonymizer(Anonymizer):\n model = Staff\n attributes = [('profile_id', 'SKIP'), ('office_num', 'varchar'), (\n 'extension', 'varchar')]\n\n\nclass InstructorAnonymizer(Anonymizer):\n model = Instructor\n attributes = [('profile_id', 'SKIP'), ('office_num', 'varchar'), (\n 'extension', 'varchar'), ('bio_short', 'lorem'), ('bio_long', 'lorem')]\n\n\nclass StudentAnonymizer(Anonymizer):\n model = Student\n attributes = [('profile_id', 'SKIP'), ('grad_year', 'choice'), (\n 'funding_amount', 'SKIP'), ('enrollment_date', 'date'), (\n 'program_length', 'integer'), ('visiting_scholar', 'bool')]\n\n\nclass AlumniAnonymizer(Anonymizer):\n model = Alumni\n attributes = [('profile_id', 'SKIP'), ('grad_year', 'choice'), (\n 'third_year', 'bool'), ('j200_inst', 'varchar'), ('funding_amount',\n 'SKIP'), ('enrollment_date', 'date'), ('program_length', 'integer'),\n ('equipment_balance', 'SKIP'), ('visiting_scholar', 'bool'), (\n 'employer', 'varchar'), ('specialty', 'varchar'), ('medium',\n 'choice'), ('prev_emp1', 'varchar'), ('prev_emp2', 'varchar'), (\n 'prev_emp3', 'varchar'), ('notes_exclude', 'bool'), ('notes',\n 'lorem'), ('mod_date', 'date'), ('pub_display', 'bool'), (\n 'freelance', 'bool'), ('region', 'choice'), ('prev_intern1',\n 'varchar'), ('prev_intern2', 'varchar'), ('prev_intern3', 'varchar'\n ), ('first_job', 'varchar'), ('books', 'lorem'), ('deceased_notes',\n 'varchar'), ('mia', 'bool'), ('mia_notes', 'lorem'), ('interview',\n 'bool'), ('interview_year', 'choice'), ('interview_notes', 'lorem'),\n ('agents_year', 'choice'), ('agents_notes', 'lorem'), (\n 'event_attend_notes', 'lorem'), ('famous_notes', 'lorem'), (\n 'volunteer_speak', 'bool'), ('volunteer_committee', 'bool'), (\n 'volunteer_interview', 'bool'), ('volunteer_mentor', 'bool'), (\n 'volunteer_agent', 'bool'), ('maillist_class', 'bool'), (\n 'no_maillists', 'bool'), ('no_reminder', 'bool'), ('suggestions',\n 'lorem'), ('committee_notes', 'lorem'), ('inactive', 'bool'), (\n 'revision', 'integer')]\n\n\nclass DonationAnonymizer(Anonymizer):\n model = Donation\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('amount',\n 'integer'), ('date', 'date'), ('description', 'varchar'), ('notes',\n 'lorem')]\n\n\nclass AddressAnonymizer(Anonymizer):\n model = Address\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('address_type',\n 'choice'), ('street_1', 'street_address'), ('street_2',\n 'street_address'), ('street_3', 'street_address'), ('city', 'city'),\n ('state', 'choice'), ('state_other', 'varchar'), ('postal_code',\n 'uk_postcode'), ('display', 'bool')]\n\n\nclass AwardAnonymizer(Anonymizer):\n model = Award\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('title',\n 'varchar'), ('description', 'lorem'), ('date_received', 'date'), (\n 'display', 'bool')]\n\n\nclass ReferenceAnonymizer(Anonymizer):\n model = Reference\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('body', 'lorem')]\n\n\nclass ExperienceAnonymizer(Anonymizer):\n model = Experience\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), (\n 'experience_type', 'choice'), ('title', 'varchar'), ('description',\n 'lorem'), ('company', 'varchar'), ('city', 'city'), ('state',\n 'choice'), ('country', 'varchar'), ('start_date', 'date'), (\n 'end_date', 'date'), ('display', 'bool')]\n\n\nclass SkillAnonymizer(Anonymizer):\n model = Skill\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('summary',\n 'lorem'), ('display', 'bool')]\n\n\nclass EducationAnonymizer(Anonymizer):\n model = Education\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('diploma',\n 'choice'), ('school', 'varchar'), ('description', 'lorem'), (\n 'start_date', 'date'), ('end_date', 'date'), ('display', 'bool')]\n\n\nclass ImporterUsersAnonymizer(Anonymizer):\n model = ImporterUsers\n attributes = [('id', 'SKIP'), ('action', 'SKIP'), ('person_id', 'SKIP'),\n ('section_id', 'SKIP'), ('first_name', 'SKIP'), ('last_name',\n 'SKIP'), ('email', 'SKIP'), ('photo_url', 'SKIP'), ('person_type',\n 'SKIP')]\n",
"step-5": "from people.models import Medium, Profile, Staff, Instructor, Student, Alumni, Donation, Address, Award, Reference, Experience, Skill, Education, ImporterUsers\nfrom anonymizer import Anonymizer\n\nclass MediumAnonymizer(Anonymizer):\n\n model = Medium\n\n attributes = [\n ('medium_id', \"integer\"),\n ('description', \"varchar\"),\n ]\n\n\nclass ProfileAnonymizer(Anonymizer):\n\n model = Profile\n\n attributes = [\n ('user_id', \"SKIP\"),\n ('person_id', \"SKIP\"),\n ('datatel_avatar_url', \"SKIP\"),\n ('suffix', \"choice\"),\n ('salutation', \"choice\"),\n ('middle_name', \"name\"),\n ('title', \"varchar\"),\n ('about', \"lorem\"),\n ('email2', \"email\"),\n ('home_phone1', \"phonenumber\"),\n ('biz_phone1', \"phonenumber\"),\n ('mobile_phone1', \"phonenumber\"),\n ('fax', \"phonenumber\"),\n ('allow_contact', \"bool\"),\n ('show_name', \"bool\"),\n ('url_personal', \"varchar\"),\n ('url_org', \"varchar\"),\n ('accepted_terms', \"bool\"),\n ('email_on_follow', \"bool\"),\n ]\n\n\nclass StaffAnonymizer(Anonymizer):\n\n model = Staff\n\n attributes = [\n ('profile_id', \"SKIP\"),\n ('office_num', \"varchar\"),\n ('extension', \"varchar\"),\n ]\n\n\nclass InstructorAnonymizer(Anonymizer):\n\n model = Instructor\n\n attributes = [\n ('profile_id', \"SKIP\"),\n ('office_num', \"varchar\"),\n ('extension', \"varchar\"),\n ('bio_short', \"lorem\"),\n ('bio_long', \"lorem\"),\n ]\n\n\nclass StudentAnonymizer(Anonymizer):\n\n model = Student\n\n attributes = [\n ('profile_id', \"SKIP\"),\n ('grad_year', \"choice\"),\n ('funding_amount', \"SKIP\"),\n ('enrollment_date', \"date\"),\n ('program_length', \"integer\"),\n ('visiting_scholar', \"bool\"),\n ]\n\n\nclass AlumniAnonymizer(Anonymizer):\n\n model = Alumni\n\n attributes = [\n ('profile_id', \"SKIP\"),\n ('grad_year', \"choice\"),\n ('third_year', \"bool\"),\n ('j200_inst', \"varchar\"),\n ('funding_amount', \"SKIP\"),\n ('enrollment_date', \"date\"),\n ('program_length', \"integer\"),\n ('equipment_balance', \"SKIP\"),\n ('visiting_scholar', \"bool\"),\n ('employer', \"varchar\"),\n ('specialty', \"varchar\"),\n ('medium', \"choice\"),\n ('prev_emp1', \"varchar\"),\n ('prev_emp2', \"varchar\"),\n ('prev_emp3', \"varchar\"),\n ('notes_exclude', \"bool\"),\n ('notes', \"lorem\"),\n ('mod_date', \"date\"),\n ('pub_display', \"bool\"),\n ('freelance', \"bool\"),\n ('region', \"choice\"),\n ('prev_intern1', \"varchar\"),\n ('prev_intern2', \"varchar\"),\n ('prev_intern3', \"varchar\"),\n ('first_job', \"varchar\"),\n ('books', \"lorem\"),\n ('deceased_notes', \"varchar\"),\n ('mia', \"bool\"),\n ('mia_notes', \"lorem\"),\n ('interview', \"bool\"),\n ('interview_year', \"choice\"),\n ('interview_notes', \"lorem\"),\n ('agents_year', \"choice\"),\n ('agents_notes', \"lorem\"),\n ('event_attend_notes', \"lorem\"),\n ('famous_notes', \"lorem\"),\n ('volunteer_speak', \"bool\"),\n ('volunteer_committee', \"bool\"),\n ('volunteer_interview', \"bool\"),\n ('volunteer_mentor', \"bool\"),\n ('volunteer_agent', \"bool\"),\n ('maillist_class', \"bool\"),\n ('no_maillists', \"bool\"),\n ('no_reminder', \"bool\"),\n ('suggestions', \"lorem\"),\n ('committee_notes', \"lorem\"),\n ('inactive', \"bool\"),\n ('revision', \"integer\"),\n ]\n\n\nclass DonationAnonymizer(Anonymizer):\n\n model = Donation\n\n attributes = [\n ('id', \"SKIP\"),\n ('profile_id', \"SKIP\"),\n ('amount', \"integer\"),\n ('date', \"date\"),\n ('description', \"varchar\"),\n ('notes', \"lorem\"),\n ]\n\n\nclass AddressAnonymizer(Anonymizer):\n\n model = Address\n\n attributes = [\n ('id', \"SKIP\"),\n ('profile_id', \"SKIP\"),\n ('address_type', \"choice\"),\n ('street_1', \"street_address\"),\n ('street_2', \"street_address\"),\n ('street_3', \"street_address\"),\n ('city', \"city\"),\n ('state', \"choice\"),\n ('state_other', \"varchar\"),\n ('postal_code', \"uk_postcode\"),\n ('display', \"bool\"),\n ]\n\n\nclass AwardAnonymizer(Anonymizer):\n\n model = Award\n\n attributes = [\n ('id', \"SKIP\"),\n ('profile_id', \"SKIP\"),\n ('title', \"varchar\"),\n ('description', \"lorem\"),\n ('date_received', \"date\"),\n ('display', \"bool\"),\n ]\n\n\nclass ReferenceAnonymizer(Anonymizer):\n\n model = Reference\n\n attributes = [\n ('id', \"SKIP\"),\n ('profile_id', \"SKIP\"),\n ('body', \"lorem\"),\n ]\n\n\nclass ExperienceAnonymizer(Anonymizer):\n\n model = Experience\n\n attributes = [\n ('id', \"SKIP\"),\n ('profile_id', \"SKIP\"),\n ('experience_type', \"choice\"),\n ('title', \"varchar\"),\n ('description', \"lorem\"),\n ('company', \"varchar\"),\n ('city', \"city\"),\n ('state', \"choice\"),\n ('country', \"varchar\"),\n ('start_date', \"date\"),\n ('end_date', \"date\"),\n ('display', \"bool\"),\n ]\n\n\nclass SkillAnonymizer(Anonymizer):\n\n model = Skill\n\n attributes = [\n ('id', \"SKIP\"),\n ('profile_id', \"SKIP\"),\n ('summary', \"lorem\"),\n ('display', \"bool\"),\n ]\n\n\nclass EducationAnonymizer(Anonymizer):\n\n model = Education\n\n attributes = [\n ('id', \"SKIP\"),\n ('profile_id', \"SKIP\"),\n ('diploma', \"choice\"),\n ('school', \"varchar\"),\n ('description', \"lorem\"),\n ('start_date', \"date\"),\n ('end_date', \"date\"),\n ('display', \"bool\"),\n ]\n\n\nclass ImporterUsersAnonymizer(Anonymizer):\n\n model = ImporterUsers\n\n attributes = [\n ('id', \"SKIP\"),\n ('action', \"SKIP\"),\n ('person_id', \"SKIP\"),\n ('section_id', \"SKIP\"),\n ('first_name', \"SKIP\"),\n ('last_name', \"SKIP\"),\n ('email', \"SKIP\"),\n ('photo_url', \"SKIP\"),\n ('person_type', \"SKIP\"),\n ]\n",
"step-ids": [
16,
18,
28,
29,
30
]
}
|
[
16,
18,
28,
29,
30
] |
<|reserved_special_token_0|>
class GenomicJobControllerTest(BaseTestCase):
def setUp(self):
super(GenomicJobControllerTest, self).setUp()
self.data_file_dao = GenomicGcDataFileDao()
self.event_data_dao = MessageBrokenEventDataDao()
self.incident_dao = GenomicIncidentDao()
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.user_event_metrics_dao = UserEventMetricsDao()
self.job_run_dao = GenomicJobRunDao()
self.report_state_dao = GenomicMemberReportStateDao()
self.appointment_event_dao = GenomicAppointmentEventDao()
self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_updating_members_blocklists(self):
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
ids_should_be_updated = []
for i in range(4):
ids_should_be_updated.append(self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set.id,
biobankId='100153482', sampleId='21042005280', genomeType=
'test_investigation_one' if i & 2 != 0 else 'aou_wgs',
genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if
i & 2 == 0 else 'N').id)
for i in range(2):
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, biobankId='100153482', sampleId='21042005280',
genomeType='aou_array', genomicWorkflowState=
GenomicWorkflowState.AW0, ai_an='N')
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS
) as controller:
controller.update_members_blocklists()
created_members = self.member_dao.get_all()
blocklisted = list(filter(lambda x: x.blockResults == 1 or x.
blockResearch == 1, created_members))
self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in
blocklisted].sort())
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'aian' for obj in created_members if obj.ai_an == 'Y' and obj.
genomicWorkflowState == GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in created_members if obj.
ai_an == 'Y' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'test_sample_swap' for obj in created_members if obj.genomeType ==
'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 1 and obj.
blockResultsReason is not None and obj.blockResultsReason ==
'test_sample_swap' for obj in created_members if obj.genomeType ==
'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResearch == 0 and obj.
blockResearchReason is None for obj in created_members if obj.
genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in created_members if obj.
genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
with self.member_dao.session() as session:
session.query(GenomicSetMember).delete()
run_result = self.job_run_dao.get(1)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.
COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
for i in range(4):
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, biobankId='100153482', sampleId='21042005280',
genomeType='test_investigation_one' if i & 2 != 0 else
'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,
ai_an='Y' if i & 2 == 0 else 'N')
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS
) as controller:
controller.update_members_blocklists()
modified_members = self.member_dao.get_all()
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in modified_members if obj.
ai_an == 'Y' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'test_sample_swap' for obj in modified_members if obj.
genomeType == 'test_investigation_one' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResults == 1 and obj.
blockResultsReason is not None and obj.blockResultsReason ==
'test_sample_swap' for obj in modified_members if obj.
genomeType == 'test_investigation_one' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
run_result = self.job_run_dao.get(2)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.
COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
<|reserved_special_token_0|>
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_reconcile_pdr_data(self, mock_cloud_task):
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
cloud_task_endpoint = 'rebuild_genomic_table_records_task'
first_run = self.job_run_dao.get_all()
self.assertEqual(mock_cloud_task.call_count, 1)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 1)
self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.
model_type.__tablename__)
self.assertTrue(type(call_args[0].args[0]['ids']) is list)
self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in
first_run])
self.assertEqual(call_args[0].args[1], cloud_task_endpoint)
participant = self.data_generator.create_database_participant()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)
plus_ten = plus_ten.replace(microsecond=0)
with FakeClock(plus_ten):
for i in range(2):
gen_member = (self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set
.id, biobankId='100153482', sampleId='21042005280',
genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1))
gen_processed_file = (self.data_generator.
create_database_genomic_file_processed(runId=first_run[
0].id, startTime=clock.CLOCK.now(), filePath=
f'test_file_path_{i}', bucketName='test_bucket',
fileName='test_file_name'))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id)
manifest = (self.data_generator.
create_database_genomic_manifest_file(manifestTypeId=2,
filePath=f'test_file_path_{i}'))
self.data_generator.create_database_genomic_manifest_feedback(
inputManifestFileId=manifest.id, feedbackRecordCount=2)
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=participant.participantId, event_name=
'test_event', run_id=1)
self.data_generator.create_database_genomic_informing_loop(
message_record_id=1, event_type=
'informing_loop_decision', module_type='gem',
participant_id=participant.participantId,
decision_value='maybe_later', event_authored_time=clock
.CLOCK.now())
self.data_generator.create_database_genomic_cvl_past_due(
cvl_site_id='co', email_notification_sent=0, sample_id=
'sample_test', results_type='hdr',
genomic_set_member_id=gen_member.id)
self.data_generator.create_database_genomic_appointment(
message_record_id=i, appointment_id=i, event_type=
'appointment_scheduled', module_type='hdr',
participant_id=participant.participantId,
event_authored_time=clock.CLOCK.now(), source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()
), appointment_timezone='America/Los_Angeles', location
='123 address st', contact_number='17348675309',
language='en')
self.data_generator.create_database_genomic_member_report_state(
genomic_set_member_id=gen_member.id, participant_id=
participant.participantId, module='gem',
genomic_report_state=GenomicReportState.GEM_RPT_READY,
event_authored_time=clock.CLOCK.now())
self.data_generator.create_genomic_result_viewed(participant_id
=participant.participantId, event_type='result_viewed',
event_authored_time=clock.CLOCK.now(), module_type=
'gem', sample_id=gen_member.sampleId)
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
affected_tables = ['genomic_set', 'genomic_set_member',
'genomic_job_run', 'genomic_file_processed',
'genomic_gc_validation_metrics', 'genomic_manifest_file',
'genomic_manifest_feedback', 'genomic_informing_loop',
'genomic_cvl_results_past_due', 'user_event_metrics',
'genomic_member_report_state', 'genomic_result_viewed',
'genomic_appointment_event']
num_calls = len(affected_tables) + 1
self.assertEqual(mock_cloud_task.call_count, num_calls)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), num_calls)
mock_tables = set([obj[0][0]['table'] for obj in call_args])
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue([mock_tables].sort() == affected_tables.sort())
self.assertTrue(all(obj for obj in mock_endpoint if obj ==
cloud_task_endpoint))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_reconcile_message_broker_results_ready(self):
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
self.data_generator.create_database_genomic_job_run(jobId=
GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())
for pid in range(7):
self.data_generator.create_database_participant(participantId=1 +
pid, biobankId=1 + pid)
for i in range(1, 6):
self.data_generator.create_database_genomic_set_member(
participantId=i, genomicSetId=1, biobankId=i,
collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'
)
if i < 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='pgx.result_ready', run_id=1)
if i == 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.result_ready.informative', run_id=1)
if i == 5:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.result_ready.uninformative',
run_id=1)
genomic_cvl_pipeline.reconcile_message_broker_results_ready()
report_state_dao = GenomicMemberReportStateDao()
states = report_state_dao.get_all()
self.assertEqual(5, len(states))
pgx_records = [rec for rec in states if rec.module == 'pgx_v1']
hdr_record_uninf = [rec for rec in states if rec.
genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0
]
hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==
GenomicReportState.HDR_RPT_POSITIVE][0]
for pgx_record in pgx_records:
self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.
genomic_report_state)
self.assertEqual('PGX_RPT_READY', pgx_record.
genomic_report_state_str)
self.assertEqual(int(pgx_record.sample_id), pgx_record.
participant_id + 10)
self.assertEqual('result_ready', pgx_record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.
event_authored_time)
self.assertIsNotNone(pgx_record.created_from_metric_id)
self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.
genomic_report_state_str)
self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.
participant_id + 10)
self.assertEqual('result_ready', hdr_record_uninf.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0),
hdr_record_uninf.event_authored_time)
self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)
self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.
genomic_report_state_str)
self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.
participant_id + 10)
self.assertEqual('result_ready', hdr_record_pos.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.
event_authored_time)
self.assertIsNotNone(hdr_record_pos.created_from_metric_id)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_reconcile_appointments_with_metrics(self):
fake_date = parser.parse('2020-05-29T08:00:01-05:00')
for num in range(4):
summary = self.data_generator.create_database_participant_summary()
missing_json = {'event': 'appointment_updated',
'eventAuthoredTime': '2022-09-16T17:18:38Z',
'participantId': f'P{summary.participantId}', 'messageBody':
{'module_type': 'hdr', 'appointment_timestamp':
'2022-09-19T19:30:00+00:00', 'id': 55,
'appointment_timezone': 'America/Los_Angeles', 'location':
'CA', 'contact_number': '18043704252', 'language': 'en',
'source': 'Color'}}
if num % 2 == 0:
self.data_generator.create_database_genomic_appointment(
message_record_id=num, appointment_id=num, event_type=
'appointment_scheduled', module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()
), appointment_timezone='America/Los_Angeles', location
='123 address st', contact_number='17348675309',
language='en')
self.data_generator.create_database_genomic_appointment_metric(
participant_id=summary.participantId, appointment_event=
json.dumps(missing_json, indent=4) if num % 2 != 0 else
'foo', file_path='test_file_path', module_type='hdr',
event_authored_time=fake_date, event_type=
'appointment_updated' if num % 2 != 0 else
'appointment_scheduled')
current_events = self.appointment_event_dao.get_all()
self.assertEqual(len(current_events), 2)
current_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is None for obj in
current_metrics))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE
) as controller:
controller.reconcile_appointment_events_from_metrics()
job_run = self.job_run_dao.get_all()
self.assertEqual(len(job_run), 1)
self.assertTrue(job_run[0].jobId == GenomicJob.
APPOINTMENT_METRICS_RECONCILE)
current_events = self.appointment_event_dao.get_all()
self.assertEqual(len(current_events), 4)
scheduled = list(filter(lambda x: x.event_type ==
'appointment_scheduled', current_events))
self.assertEqual(len(scheduled), 2)
self.assertTrue(all(obj.created_from_metric_id is None for obj in
scheduled))
updated = list(filter(lambda x: x.event_type ==
'appointment_updated', current_events))
self.assertEqual(len(updated), 2)
self.assertTrue(all(obj.created_from_metric_id is not None for obj in
updated))
current_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in
current_metrics))
self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for
obj in current_metrics))
self.clear_table_after_test('genomic_appointment_event_metrics')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_appointments_gror_changed(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
notified_dao = GenomicAppointmentEventNotifiedDao()
config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [
'[email protected]'])
num_participants = 4
for num in range(num_participants):
gror = num if num > 1 else 1
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=gror)
self.data_generator.create_database_genomic_appointment(
message_record_id=num, appointment_id=num, event_type=
'appointment_scheduled', module_type='hdr', participant_id=
summary.participantId, event_authored_time=fake_date,
source='Color', appointment_timestamp=format_datetime(clock
.CLOCK.now()), appointment_timezone='America/Los_Angeles',
location='123 address st', contact_number='17348675309',
language='en')
changed_ppts = (self.appointment_event_dao.
get_appointments_gror_changed())
self.assertEqual(2, len(changed_ppts))
with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED
) as controller:
controller.check_appointments_gror_changed()
self.assertEqual(email_mock.call_count, 1)
notified_appointments = notified_dao.get_all()
self.assertEqual(2, len(notified_appointments))
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=2)
self.data_generator.create_database_genomic_appointment(
message_record_id=5, appointment_id=5, event_type=
'appointment_scheduled', module_type='hdr', participant_id=
summary.participantId, event_authored_time=fake_date, source=
'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(
)), appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
changed_ppts = (self.appointment_event_dao.
get_appointments_gror_changed())
self.assertEqual(1, len(changed_ppts))
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_14day_escalation(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
fake_date2 = parser.parse('2022-09-02T14:14:00')
fake_date3 = parser.parse('2022-09-03T15:15:00')
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [
'[email protected]'])
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1)
set_member = (self.data_generator.
create_database_genomic_set_member(participantId=summary.
participantId, genomicSetId=1, biobankId=1001,
collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId, genomic_report_state=
GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=
set_member.id, module='hdr_v1', event_authored_time=fake_date)
pids.append(summary.participantId)
self.data_generator.create_database_genomic_appointment(
message_record_id=101, appointment_id=102, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[0], event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=102, appointment_id=103, event_type=
'appointment_completed', module_type='hdr', participant_id=pids
[1], event_authored_time=fake_date, source='Color',
appointment_timestamp=fake_date, appointment_timezone=
'America/Los_Angeles', location='123 address st',
contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=103, appointment_id=104, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date2, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=104, appointment_id=104, event_type=
'appointment_cancelled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date3, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
notified_dao = GenomicDefaultBaseDao(model_type=
GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{'participant_id': pids[4], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': True}, {'participant_id': pids[5], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': False}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = (self.report_state_dao.
get_hdr_result_positive_no_appointment(num_days=14))
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION
) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject,
'GCR Outreach 14 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_execute_auto_generation_from_last_run(self, cloud_task_mock):
with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:
controller.job_result = GenomicSubProcessResult.ERROR
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(
job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR
)
self.assertEqual(cloud_task_mock.called, False)
self.assertEqual(cloud_task_mock.call_count, 0)
with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:
controller.job_result = GenomicSubProcessResult.SUCCESS
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(
job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.
SUCCESS)
self.assertEqual(cloud_task_mock.called, True)
self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(
'manifest_type') == 'p0')
self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==
'genomic-generate-manifest')
all_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(all_job_runs), 2)
self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.
SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))
self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in
all_job_runs))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GenomicJobControllerTest(BaseTestCase):
def setUp(self):
super(GenomicJobControllerTest, self).setUp()
self.data_file_dao = GenomicGcDataFileDao()
self.event_data_dao = MessageBrokenEventDataDao()
self.incident_dao = GenomicIncidentDao()
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.user_event_metrics_dao = UserEventMetricsDao()
self.job_run_dao = GenomicJobRunDao()
self.report_state_dao = GenomicMemberReportStateDao()
self.appointment_event_dao = GenomicAppointmentEventDao()
self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()
def test_incident_with_long_message(self):
"""Make sure the length of incident messages doesn't cause issues when recording them"""
incident_message = '1' * (GenomicIncident.message.type.length + 20)
mock_slack_handler = mock.MagicMock()
job_controller = GenomicJobController(job_id=1)
job_controller.genomic_alert_slack = mock_slack_handler
job_controller.create_incident(message=incident_message, slack=True)
incident: GenomicIncident = self.session.query(GenomicIncident).one()
self.assertTrue(incident_message.startswith(incident.message))
mock_slack_handler.send_message_to_webhook.assert_called_with(
message_data={'text': incident_message})
<|reserved_special_token_0|>
def test_gvcf_files_ingestion_create_incident(self):
bucket_name = 'test_bucket'
file_path = (
'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'
)
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id, biobankId='111111111', sampleId=
'222222222222', genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1)
gen_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS)
gen_processed_file = (self.data_generator.
create_database_genomic_file_processed(runId=gen_job_run.id,
startTime=clock.CLOCK.now(), filePath='/test_file_path',
bucketName=bucket_name, fileName='test_file_name'))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id, genomicFileProcessedId=
gen_processed_file.id)
with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:
controller.ingest_data_files_into_gc_metrics(file_path, bucket_name
)
incident = self.incident_dao.get(1)
self.assertIsNotNone(incident)
self.assertEqual(incident.code, GenomicIncidentCode.
UNABLE_TO_FIND_METRIC.name)
self.assertEqual(incident.data_file_path, file_path)
self.assertEqual(incident.message,
'INGEST_DATA_FILES: Cannot find genomics metric record for sample id: 21042005280'
)
<|reserved_special_token_0|>
def test_updating_members_blocklists(self):
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
ids_should_be_updated = []
for i in range(4):
ids_should_be_updated.append(self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set.id,
biobankId='100153482', sampleId='21042005280', genomeType=
'test_investigation_one' if i & 2 != 0 else 'aou_wgs',
genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if
i & 2 == 0 else 'N').id)
for i in range(2):
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, biobankId='100153482', sampleId='21042005280',
genomeType='aou_array', genomicWorkflowState=
GenomicWorkflowState.AW0, ai_an='N')
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS
) as controller:
controller.update_members_blocklists()
created_members = self.member_dao.get_all()
blocklisted = list(filter(lambda x: x.blockResults == 1 or x.
blockResearch == 1, created_members))
self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in
blocklisted].sort())
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'aian' for obj in created_members if obj.ai_an == 'Y' and obj.
genomicWorkflowState == GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in created_members if obj.
ai_an == 'Y' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'test_sample_swap' for obj in created_members if obj.genomeType ==
'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 1 and obj.
blockResultsReason is not None and obj.blockResultsReason ==
'test_sample_swap' for obj in created_members if obj.genomeType ==
'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResearch == 0 and obj.
blockResearchReason is None for obj in created_members if obj.
genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in created_members if obj.
genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
with self.member_dao.session() as session:
session.query(GenomicSetMember).delete()
run_result = self.job_run_dao.get(1)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.
COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
for i in range(4):
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, biobankId='100153482', sampleId='21042005280',
genomeType='test_investigation_one' if i & 2 != 0 else
'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,
ai_an='Y' if i & 2 == 0 else 'N')
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS
) as controller:
controller.update_members_blocklists()
modified_members = self.member_dao.get_all()
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in modified_members if obj.
ai_an == 'Y' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'test_sample_swap' for obj in modified_members if obj.
genomeType == 'test_investigation_one' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResults == 1 and obj.
blockResultsReason is not None and obj.blockResultsReason ==
'test_sample_swap' for obj in modified_members if obj.
genomeType == 'test_investigation_one' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
run_result = self.job_run_dao.get(2)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.
COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
def test_ingest_user_metrics_file(self):
test_file = 'Genomic-Metrics-File-User-Events-Test.csv'
bucket_name = 'test_bucket'
sub_folder = 'user_events'
pids = []
file_ingester = GenomicFileIngester()
for _ in range(2):
pid = self.data_generator.create_database_participant()
pids.append(pid.participantId)
test_metrics_file = create_ingestion_test_file(test_file,
bucket_name, sub_folder)
test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'
with open_cloud_file(test_file_path) as csv_file:
metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)
with GenomicJobController(GenomicJob.METRICS_FILE_INGEST
) as controller:
controller.ingest_metrics_file(metric_type='user_events',
file_path=test_file_path)
job_run_id = controller.job_run.id
metrics = self.user_event_metrics_dao.get_all()
for pid in pids:
file_metrics = list(filter(lambda x: int(x['participant_id'].
split('P')[-1]) == pid, metrics_to_ingest['rows']))
participant_ingested_metrics = list(filter(lambda x: x.
participant_id == pid, metrics))
self.assertEqual(len(file_metrics), len(
participant_ingested_metrics))
self.assertTrue(all(obj.run_id == job_run_id for obj in
participant_ingested_metrics))
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_reconcile_pdr_data(self, mock_cloud_task):
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
cloud_task_endpoint = 'rebuild_genomic_table_records_task'
first_run = self.job_run_dao.get_all()
self.assertEqual(mock_cloud_task.call_count, 1)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 1)
self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.
model_type.__tablename__)
self.assertTrue(type(call_args[0].args[0]['ids']) is list)
self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in
first_run])
self.assertEqual(call_args[0].args[1], cloud_task_endpoint)
participant = self.data_generator.create_database_participant()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)
plus_ten = plus_ten.replace(microsecond=0)
with FakeClock(plus_ten):
for i in range(2):
gen_member = (self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set
.id, biobankId='100153482', sampleId='21042005280',
genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1))
gen_processed_file = (self.data_generator.
create_database_genomic_file_processed(runId=first_run[
0].id, startTime=clock.CLOCK.now(), filePath=
f'test_file_path_{i}', bucketName='test_bucket',
fileName='test_file_name'))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id)
manifest = (self.data_generator.
create_database_genomic_manifest_file(manifestTypeId=2,
filePath=f'test_file_path_{i}'))
self.data_generator.create_database_genomic_manifest_feedback(
inputManifestFileId=manifest.id, feedbackRecordCount=2)
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=participant.participantId, event_name=
'test_event', run_id=1)
self.data_generator.create_database_genomic_informing_loop(
message_record_id=1, event_type=
'informing_loop_decision', module_type='gem',
participant_id=participant.participantId,
decision_value='maybe_later', event_authored_time=clock
.CLOCK.now())
self.data_generator.create_database_genomic_cvl_past_due(
cvl_site_id='co', email_notification_sent=0, sample_id=
'sample_test', results_type='hdr',
genomic_set_member_id=gen_member.id)
self.data_generator.create_database_genomic_appointment(
message_record_id=i, appointment_id=i, event_type=
'appointment_scheduled', module_type='hdr',
participant_id=participant.participantId,
event_authored_time=clock.CLOCK.now(), source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()
), appointment_timezone='America/Los_Angeles', location
='123 address st', contact_number='17348675309',
language='en')
self.data_generator.create_database_genomic_member_report_state(
genomic_set_member_id=gen_member.id, participant_id=
participant.participantId, module='gem',
genomic_report_state=GenomicReportState.GEM_RPT_READY,
event_authored_time=clock.CLOCK.now())
self.data_generator.create_genomic_result_viewed(participant_id
=participant.participantId, event_type='result_viewed',
event_authored_time=clock.CLOCK.now(), module_type=
'gem', sample_id=gen_member.sampleId)
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
affected_tables = ['genomic_set', 'genomic_set_member',
'genomic_job_run', 'genomic_file_processed',
'genomic_gc_validation_metrics', 'genomic_manifest_file',
'genomic_manifest_feedback', 'genomic_informing_loop',
'genomic_cvl_results_past_due', 'user_event_metrics',
'genomic_member_report_state', 'genomic_result_viewed',
'genomic_appointment_event']
num_calls = len(affected_tables) + 1
self.assertEqual(mock_cloud_task.call_count, num_calls)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), num_calls)
mock_tables = set([obj[0][0]['table'] for obj in call_args])
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue([mock_tables].sort() == affected_tables.sort())
self.assertTrue(all(obj for obj in mock_endpoint if obj ==
cloud_task_endpoint))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_reconcile_message_broker_results_ready(self):
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
self.data_generator.create_database_genomic_job_run(jobId=
GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())
for pid in range(7):
self.data_generator.create_database_participant(participantId=1 +
pid, biobankId=1 + pid)
for i in range(1, 6):
self.data_generator.create_database_genomic_set_member(
participantId=i, genomicSetId=1, biobankId=i,
collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'
)
if i < 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='pgx.result_ready', run_id=1)
if i == 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.result_ready.informative', run_id=1)
if i == 5:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.result_ready.uninformative',
run_id=1)
genomic_cvl_pipeline.reconcile_message_broker_results_ready()
report_state_dao = GenomicMemberReportStateDao()
states = report_state_dao.get_all()
self.assertEqual(5, len(states))
pgx_records = [rec for rec in states if rec.module == 'pgx_v1']
hdr_record_uninf = [rec for rec in states if rec.
genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0
]
hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==
GenomicReportState.HDR_RPT_POSITIVE][0]
for pgx_record in pgx_records:
self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.
genomic_report_state)
self.assertEqual('PGX_RPT_READY', pgx_record.
genomic_report_state_str)
self.assertEqual(int(pgx_record.sample_id), pgx_record.
participant_id + 10)
self.assertEqual('result_ready', pgx_record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.
event_authored_time)
self.assertIsNotNone(pgx_record.created_from_metric_id)
self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.
genomic_report_state_str)
self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.
participant_id + 10)
self.assertEqual('result_ready', hdr_record_uninf.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0),
hdr_record_uninf.event_authored_time)
self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)
self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.
genomic_report_state_str)
self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.
participant_id + 10)
self.assertEqual('result_ready', hdr_record_pos.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.
event_authored_time)
self.assertIsNotNone(hdr_record_pos.created_from_metric_id)
<|reserved_special_token_0|>
def test_ingest_appointment_metrics_file(self):
test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'
bucket_name = 'test_bucket'
sub_folder = 'appointment_events'
pids = []
for _ in range(4):
summary = self.data_generator.create_database_participant_summary()
pids.append(summary.participantId)
test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'
appointment_data = test_data.load_test_data_json(
'Genomic-Metrics-File-Appointment-Events-Test.json')
appointment_data_str = json.dumps(appointment_data, indent=4)
with open_cloud_file(test_file_path, mode='wb') as cloud_file:
cloud_file.write(appointment_data_str.encode('utf-8'))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST
) as controller:
controller.ingest_appointment_metrics_file(file_path=test_file_path
)
all_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(all_metrics), 5)
self.assertTrue(all(obj.participant_id in pids for obj in all_metrics))
self.assertTrue(all(obj.file_path == test_file_path for obj in
all_metrics))
self.assertTrue(all(obj.appointment_event is not None for obj in
all_metrics))
self.assertTrue(all(obj.created is not None for obj in all_metrics))
self.assertTrue(all(obj.modified is not None for obj in all_metrics))
self.assertTrue(all(obj.module_type is not None for obj in all_metrics)
)
self.assertTrue(all(obj.event_authored_time is not None for obj in
all_metrics))
self.assertTrue(all(obj.event_type is not None for obj in all_metrics))
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 1)
current_job_run = current_job_runs[0]
self.assertTrue(current_job_run.jobId == GenomicJob.
APPOINTMENT_METRICS_FILE_INGEST)
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.SUCCESS)
self.clear_table_after_test('genomic_appointment_event_metrics')
def test_reconcile_appointments_with_metrics(self):
fake_date = parser.parse('2020-05-29T08:00:01-05:00')
for num in range(4):
summary = self.data_generator.create_database_participant_summary()
missing_json = {'event': 'appointment_updated',
'eventAuthoredTime': '2022-09-16T17:18:38Z',
'participantId': f'P{summary.participantId}', 'messageBody':
{'module_type': 'hdr', 'appointment_timestamp':
'2022-09-19T19:30:00+00:00', 'id': 55,
'appointment_timezone': 'America/Los_Angeles', 'location':
'CA', 'contact_number': '18043704252', 'language': 'en',
'source': 'Color'}}
if num % 2 == 0:
self.data_generator.create_database_genomic_appointment(
message_record_id=num, appointment_id=num, event_type=
'appointment_scheduled', module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()
), appointment_timezone='America/Los_Angeles', location
='123 address st', contact_number='17348675309',
language='en')
self.data_generator.create_database_genomic_appointment_metric(
participant_id=summary.participantId, appointment_event=
json.dumps(missing_json, indent=4) if num % 2 != 0 else
'foo', file_path='test_file_path', module_type='hdr',
event_authored_time=fake_date, event_type=
'appointment_updated' if num % 2 != 0 else
'appointment_scheduled')
current_events = self.appointment_event_dao.get_all()
self.assertEqual(len(current_events), 2)
current_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is None for obj in
current_metrics))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE
) as controller:
controller.reconcile_appointment_events_from_metrics()
job_run = self.job_run_dao.get_all()
self.assertEqual(len(job_run), 1)
self.assertTrue(job_run[0].jobId == GenomicJob.
APPOINTMENT_METRICS_RECONCILE)
current_events = self.appointment_event_dao.get_all()
self.assertEqual(len(current_events), 4)
scheduled = list(filter(lambda x: x.event_type ==
'appointment_scheduled', current_events))
self.assertEqual(len(scheduled), 2)
self.assertTrue(all(obj.created_from_metric_id is None for obj in
scheduled))
updated = list(filter(lambda x: x.event_type ==
'appointment_updated', current_events))
self.assertEqual(len(updated), 2)
self.assertTrue(all(obj.created_from_metric_id is not None for obj in
updated))
current_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in
current_metrics))
self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for
obj in current_metrics))
self.clear_table_after_test('genomic_appointment_event_metrics')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_appointments_gror_changed(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
notified_dao = GenomicAppointmentEventNotifiedDao()
config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [
'[email protected]'])
num_participants = 4
for num in range(num_participants):
gror = num if num > 1 else 1
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=gror)
self.data_generator.create_database_genomic_appointment(
message_record_id=num, appointment_id=num, event_type=
'appointment_scheduled', module_type='hdr', participant_id=
summary.participantId, event_authored_time=fake_date,
source='Color', appointment_timestamp=format_datetime(clock
.CLOCK.now()), appointment_timezone='America/Los_Angeles',
location='123 address st', contact_number='17348675309',
language='en')
changed_ppts = (self.appointment_event_dao.
get_appointments_gror_changed())
self.assertEqual(2, len(changed_ppts))
with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED
) as controller:
controller.check_appointments_gror_changed()
self.assertEqual(email_mock.call_count, 1)
notified_appointments = notified_dao.get_all()
self.assertEqual(2, len(notified_appointments))
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=2)
self.data_generator.create_database_genomic_appointment(
message_record_id=5, appointment_id=5, event_type=
'appointment_scheduled', module_type='hdr', participant_id=
summary.participantId, event_authored_time=fake_date, source=
'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(
)), appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
changed_ppts = (self.appointment_event_dao.
get_appointments_gror_changed())
self.assertEqual(1, len(changed_ppts))
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_14day_escalation(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
fake_date2 = parser.parse('2022-09-02T14:14:00')
fake_date3 = parser.parse('2022-09-03T15:15:00')
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [
'[email protected]'])
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1)
set_member = (self.data_generator.
create_database_genomic_set_member(participantId=summary.
participantId, genomicSetId=1, biobankId=1001,
collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId, genomic_report_state=
GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=
set_member.id, module='hdr_v1', event_authored_time=fake_date)
pids.append(summary.participantId)
self.data_generator.create_database_genomic_appointment(
message_record_id=101, appointment_id=102, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[0], event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=102, appointment_id=103, event_type=
'appointment_completed', module_type='hdr', participant_id=pids
[1], event_authored_time=fake_date, source='Color',
appointment_timestamp=fake_date, appointment_timezone=
'America/Los_Angeles', location='123 address st',
contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=103, appointment_id=104, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date2, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=104, appointment_id=104, event_type=
'appointment_cancelled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date3, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
notified_dao = GenomicDefaultBaseDao(model_type=
GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{'participant_id': pids[4], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': True}, {'participant_id': pids[5], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': False}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = (self.report_state_dao.
get_hdr_result_positive_no_appointment(num_days=14))
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION
) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject,
'GCR Outreach 14 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_execute_auto_generation_from_last_run(self, cloud_task_mock):
with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:
controller.job_result = GenomicSubProcessResult.ERROR
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(
job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR
)
self.assertEqual(cloud_task_mock.called, False)
self.assertEqual(cloud_task_mock.call_count, 0)
with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:
controller.job_result = GenomicSubProcessResult.SUCCESS
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(
job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.
SUCCESS)
self.assertEqual(cloud_task_mock.called, True)
self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(
'manifest_type') == 'p0')
self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==
'genomic-generate-manifest')
all_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(all_job_runs), 2)
self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.
SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))
self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in
all_job_runs))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GenomicJobControllerTest(BaseTestCase):
def setUp(self):
super(GenomicJobControllerTest, self).setUp()
self.data_file_dao = GenomicGcDataFileDao()
self.event_data_dao = MessageBrokenEventDataDao()
self.incident_dao = GenomicIncidentDao()
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.user_event_metrics_dao = UserEventMetricsDao()
self.job_run_dao = GenomicJobRunDao()
self.report_state_dao = GenomicMemberReportStateDao()
self.appointment_event_dao = GenomicAppointmentEventDao()
self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()
def test_incident_with_long_message(self):
"""Make sure the length of incident messages doesn't cause issues when recording them"""
incident_message = '1' * (GenomicIncident.message.type.length + 20)
mock_slack_handler = mock.MagicMock()
job_controller = GenomicJobController(job_id=1)
job_controller.genomic_alert_slack = mock_slack_handler
job_controller.create_incident(message=incident_message, slack=True)
incident: GenomicIncident = self.session.query(GenomicIncident).one()
self.assertTrue(incident_message.startswith(incident.message))
mock_slack_handler.send_message_to_webhook.assert_called_with(
message_data={'text': incident_message})
<|reserved_special_token_0|>
def test_gvcf_files_ingestion_create_incident(self):
bucket_name = 'test_bucket'
file_path = (
'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'
)
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id, biobankId='111111111', sampleId=
'222222222222', genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1)
gen_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS)
gen_processed_file = (self.data_generator.
create_database_genomic_file_processed(runId=gen_job_run.id,
startTime=clock.CLOCK.now(), filePath='/test_file_path',
bucketName=bucket_name, fileName='test_file_name'))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id, genomicFileProcessedId=
gen_processed_file.id)
with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:
controller.ingest_data_files_into_gc_metrics(file_path, bucket_name
)
incident = self.incident_dao.get(1)
self.assertIsNotNone(incident)
self.assertEqual(incident.code, GenomicIncidentCode.
UNABLE_TO_FIND_METRIC.name)
self.assertEqual(incident.data_file_path, file_path)
self.assertEqual(incident.message,
'INGEST_DATA_FILES: Cannot find genomics metric record for sample id: 21042005280'
)
<|reserved_special_token_0|>
def test_updating_members_blocklists(self):
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
ids_should_be_updated = []
for i in range(4):
ids_should_be_updated.append(self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set.id,
biobankId='100153482', sampleId='21042005280', genomeType=
'test_investigation_one' if i & 2 != 0 else 'aou_wgs',
genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if
i & 2 == 0 else 'N').id)
for i in range(2):
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, biobankId='100153482', sampleId='21042005280',
genomeType='aou_array', genomicWorkflowState=
GenomicWorkflowState.AW0, ai_an='N')
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS
) as controller:
controller.update_members_blocklists()
created_members = self.member_dao.get_all()
blocklisted = list(filter(lambda x: x.blockResults == 1 or x.
blockResearch == 1, created_members))
self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in
blocklisted].sort())
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'aian' for obj in created_members if obj.ai_an == 'Y' and obj.
genomicWorkflowState == GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in created_members if obj.
ai_an == 'Y' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'test_sample_swap' for obj in created_members if obj.genomeType ==
'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 1 and obj.
blockResultsReason is not None and obj.blockResultsReason ==
'test_sample_swap' for obj in created_members if obj.genomeType ==
'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResearch == 0 and obj.
blockResearchReason is None for obj in created_members if obj.
genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in created_members if obj.
genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
with self.member_dao.session() as session:
session.query(GenomicSetMember).delete()
run_result = self.job_run_dao.get(1)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.
COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
for i in range(4):
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, biobankId='100153482', sampleId='21042005280',
genomeType='test_investigation_one' if i & 2 != 0 else
'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,
ai_an='Y' if i & 2 == 0 else 'N')
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS
) as controller:
controller.update_members_blocklists()
modified_members = self.member_dao.get_all()
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in modified_members if obj.
ai_an == 'Y' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'test_sample_swap' for obj in modified_members if obj.
genomeType == 'test_investigation_one' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResults == 1 and obj.
blockResultsReason is not None and obj.blockResultsReason ==
'test_sample_swap' for obj in modified_members if obj.
genomeType == 'test_investigation_one' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
run_result = self.job_run_dao.get(2)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.
COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
def test_ingest_user_metrics_file(self):
test_file = 'Genomic-Metrics-File-User-Events-Test.csv'
bucket_name = 'test_bucket'
sub_folder = 'user_events'
pids = []
file_ingester = GenomicFileIngester()
for _ in range(2):
pid = self.data_generator.create_database_participant()
pids.append(pid.participantId)
test_metrics_file = create_ingestion_test_file(test_file,
bucket_name, sub_folder)
test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'
with open_cloud_file(test_file_path) as csv_file:
metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)
with GenomicJobController(GenomicJob.METRICS_FILE_INGEST
) as controller:
controller.ingest_metrics_file(metric_type='user_events',
file_path=test_file_path)
job_run_id = controller.job_run.id
metrics = self.user_event_metrics_dao.get_all()
for pid in pids:
file_metrics = list(filter(lambda x: int(x['participant_id'].
split('P')[-1]) == pid, metrics_to_ingest['rows']))
participant_ingested_metrics = list(filter(lambda x: x.
participant_id == pid, metrics))
self.assertEqual(len(file_metrics), len(
participant_ingested_metrics))
self.assertTrue(all(obj.run_id == job_run_id for obj in
participant_ingested_metrics))
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_reconcile_pdr_data(self, mock_cloud_task):
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
cloud_task_endpoint = 'rebuild_genomic_table_records_task'
first_run = self.job_run_dao.get_all()
self.assertEqual(mock_cloud_task.call_count, 1)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 1)
self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.
model_type.__tablename__)
self.assertTrue(type(call_args[0].args[0]['ids']) is list)
self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in
first_run])
self.assertEqual(call_args[0].args[1], cloud_task_endpoint)
participant = self.data_generator.create_database_participant()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)
plus_ten = plus_ten.replace(microsecond=0)
with FakeClock(plus_ten):
for i in range(2):
gen_member = (self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set
.id, biobankId='100153482', sampleId='21042005280',
genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1))
gen_processed_file = (self.data_generator.
create_database_genomic_file_processed(runId=first_run[
0].id, startTime=clock.CLOCK.now(), filePath=
f'test_file_path_{i}', bucketName='test_bucket',
fileName='test_file_name'))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id)
manifest = (self.data_generator.
create_database_genomic_manifest_file(manifestTypeId=2,
filePath=f'test_file_path_{i}'))
self.data_generator.create_database_genomic_manifest_feedback(
inputManifestFileId=manifest.id, feedbackRecordCount=2)
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=participant.participantId, event_name=
'test_event', run_id=1)
self.data_generator.create_database_genomic_informing_loop(
message_record_id=1, event_type=
'informing_loop_decision', module_type='gem',
participant_id=participant.participantId,
decision_value='maybe_later', event_authored_time=clock
.CLOCK.now())
self.data_generator.create_database_genomic_cvl_past_due(
cvl_site_id='co', email_notification_sent=0, sample_id=
'sample_test', results_type='hdr',
genomic_set_member_id=gen_member.id)
self.data_generator.create_database_genomic_appointment(
message_record_id=i, appointment_id=i, event_type=
'appointment_scheduled', module_type='hdr',
participant_id=participant.participantId,
event_authored_time=clock.CLOCK.now(), source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()
), appointment_timezone='America/Los_Angeles', location
='123 address st', contact_number='17348675309',
language='en')
self.data_generator.create_database_genomic_member_report_state(
genomic_set_member_id=gen_member.id, participant_id=
participant.participantId, module='gem',
genomic_report_state=GenomicReportState.GEM_RPT_READY,
event_authored_time=clock.CLOCK.now())
self.data_generator.create_genomic_result_viewed(participant_id
=participant.participantId, event_type='result_viewed',
event_authored_time=clock.CLOCK.now(), module_type=
'gem', sample_id=gen_member.sampleId)
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
affected_tables = ['genomic_set', 'genomic_set_member',
'genomic_job_run', 'genomic_file_processed',
'genomic_gc_validation_metrics', 'genomic_manifest_file',
'genomic_manifest_feedback', 'genomic_informing_loop',
'genomic_cvl_results_past_due', 'user_event_metrics',
'genomic_member_report_state', 'genomic_result_viewed',
'genomic_appointment_event']
num_calls = len(affected_tables) + 1
self.assertEqual(mock_cloud_task.call_count, num_calls)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), num_calls)
mock_tables = set([obj[0][0]['table'] for obj in call_args])
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue([mock_tables].sort() == affected_tables.sort())
self.assertTrue(all(obj for obj in mock_endpoint if obj ==
cloud_task_endpoint))
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_retry_manifest_ingestions_if_deltas(self, mock_cloud_task):
bucket_name = 'test-bucket'
aw1_file_name = (
'AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv')
aw1_manifest_path = f'{bucket_name}/{aw1_file_name}'
aw2_file_name = (
'AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv')
aw2_manifest_path = f'{bucket_name}/{aw2_file_name}'
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
aw1_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(), endTime=
clock.CLOCK.now(), runResult=GenomicSubProcessResult.SUCCESS)
aw2_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.METRICS_INGESTION, startTime=clock.CLOCK.now(),
endTime=clock.CLOCK.now(), runResult=GenomicSubProcessResult.
SUCCESS)
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS
) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(3)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)
self.assertEqual(mock_cloud_task.call_count, 0)
self.assertFalse(mock_cloud_task.call_count)
self.data_generator.create_database_genomic_aw1_raw(file_path=
aw1_manifest_path, package_id='PKG-2104-026571', biobank_id=
'A10001')
self.data_generator.create_database_genomic_aw2_raw(file_path=
aw2_manifest_path, biobank_id='A10001', sample_id='100001',
biobankidsampleid='A10001_100001')
aw1_manifest_file = (self.data_generator.
create_database_genomic_manifest_file(created=clock.CLOCK.now(),
modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.AW1, filePath=
aw1_manifest_path, fileName=aw1_file_name, bucketName=
bucket_name, recordCount=1, rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now()))
aw2_manifest_file = (self.data_generator.
create_database_genomic_manifest_file(created=clock.CLOCK.now(),
modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.AW2, filePath=
aw2_manifest_path, fileName=aw2_file_name, bucketName=
bucket_name, recordCount=1, rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now()))
aw1_file_processed = (self.data_generator.
create_database_genomic_file_processed(runId=aw1_job_run.id,
startTime=clock.CLOCK.now(), genomicManifestFileId=
aw1_manifest_file.id, filePath=f'/{aw1_manifest_path}',
bucketName=bucket_name, fileName=aw1_file_name))
aw2_file_processed = (self.data_generator.
create_database_genomic_file_processed(runId=aw2_job_run.id,
startTime=clock.CLOCK.now(), genomicManifestFileId=
aw2_manifest_file.id, filePath=f'/{aw2_manifest_path}',
bucketName=bucket_name, fileName=aw2_file_name))
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id, biobankId='100153482', sampleId=
'21042005280', genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1, aw1FileProcessedId=aw1_file_processed.id)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id, genomicFileProcessedId=
aw2_file_processed.id)
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS
) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(4)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)
self.assertEqual(mock_cloud_task.call_count, 0)
self.assertFalse(mock_cloud_task.call_count)
with self.member_dao.session() as session:
session.query(GenomicGCValidationMetrics).delete()
session.query(GenomicSetMember).delete()
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS
) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(5)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.SUCCESS)
self.assertEqual(mock_cloud_task.call_count, 2)
self.assertTrue(mock_cloud_task.call_count)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 2)
cloud_task_endpoint = ['ingest_aw1_manifest_task',
'ingest_aw2_manifest_task']
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue(all(obj for obj in mock_endpoint if obj ==
cloud_task_endpoint))
mock_buckets = set([obj[0][0]['bucket_name'] for obj in call_args])
self.assertTrue(len(mock_buckets), 1)
self.assertTrue(list(mock_buckets)[0] == bucket_name)
def test_calculate_informing_loop_ready_flags(self):
num_participants = 4
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
for num in range(num_participants):
plus_num = clock.CLOCK.now() + datetime.timedelta(minutes=num)
plus_num = plus_num.replace(microsecond=0)
with FakeClock(plus_num):
summary = (self.data_generator.
create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1))
stored_sample = (self.data_generator.
create_database_biobank_stored_sample(biobankId=summary
.biobankId, biobankOrderIdentifier=self.fake.pyint()))
collection_site = self.data_generator.create_database_site(
siteType='Clinic')
order = self.data_generator.create_database_biobank_order(
collectedSiteId=collection_site.siteId, participantId=
summary.participantId, finalizedTime=plus_num)
self.data_generator.create_database_biobank_order_identifier(
value=stored_sample.biobankOrderIdentifier,
biobankOrderId=order.biobankOrderId, system='1')
self.data_generator.create_database_biobank_order_identifier(
value=stored_sample.biobankOrderIdentifier,
biobankOrderId=order.biobankOrderId, system='2')
member = (self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set
.id, participantId=summary.participantId, genomeType=
config.GENOME_TYPE_WGS, qcStatus=GenomicQcStatus.PASS,
gcManifestSampleSource='Whole Blood', collectionTubeId=
stored_sample.biobankStoredSampleId))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=member.id, sexConcordance='True',
drcFpConcordance='Pass', drcSexConcordance='Pass',
processingStatus='Pass')
members_for_ready_loop = (self.member_dao.
get_members_for_informing_loop_ready())
self.assertEqual(len(members_for_ready_loop), num_participants)
current_set_members = self.member_dao.get_all()
self.assertTrue(all(obj.informingLoopReadyFlag == 0 for obj in
current_set_members))
self.assertTrue(all(obj.informingLoopReadyFlagModified is None for
obj in current_set_members))
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY
) as controller:
controller.calculate_informing_loop_ready_flags()
members_for_ready_loop = (self.member_dao.
get_members_for_informing_loop_ready())
self.assertEqual(len(members_for_ready_loop), num_participants)
calculation_limit = 2
config.override_setting(config.CALCULATE_READY_FLAG_LIMIT, [
calculation_limit])
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY
) as controller:
controller.calculate_informing_loop_ready_flags()
current_set_members = self.member_dao.get_all()
self.assertTrue(any(obj.informingLoopReadyFlag == 1 for obj in
current_set_members))
self.assertTrue(any(obj.informingLoopReadyFlagModified is not None for
obj in current_set_members))
current_loops_set = [obj for obj in current_set_members if obj.
informingLoopReadyFlag == 1 and obj.
informingLoopReadyFlagModified is not None]
self.assertEqual(len(current_loops_set), calculation_limit)
members_for_ready_loop = (self.member_dao.
get_members_for_informing_loop_ready())
self.assertEqual(len(members_for_ready_loop), num_participants // 2)
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY
) as controller:
controller.calculate_informing_loop_ready_flags()
current_set_members = self.member_dao.get_all()
self.assertTrue(all(obj.informingLoopReadyFlag == 1 for obj in
current_set_members))
self.assertTrue(all(obj.informingLoopReadyFlagModified is not None for
obj in current_set_members))
members_for_ready_loop = (self.member_dao.
get_members_for_informing_loop_ready())
self.assertEqual(len(members_for_ready_loop), 0)
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_getting_results_withdrawn(self, email_mock):
num_participants = 4
result_withdrawal_dao = GenomicResultWithdrawalsDao()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
gen_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS)
pids = []
for num in range(num_participants):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1,
withdrawalStatus=WithdrawalStatus.EARLY_OUT)
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, participantId=summary.participantId,
genomeType=config.GENOME_TYPE_ARRAY, gemA1ManifestJobRunId=
gen_job_run.id if num % 2 == 0 else None)
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, participantId=summary.participantId,
genomeType=config.GENOME_TYPE_WGS, cvlW1ilHdrJobRunId=
gen_job_run.id)
pids.append(summary.participantId)
config.override_setting(config.RDR_GENOMICS_NOTIFICATION_EMAIL,
'[email protected]')
with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS
) as controller:
controller.check_results_withdrawals()
self.assertEqual(email_mock.call_count, 2)
call_args = email_mock.call_args_list
self.assertTrue(any('GEM' in call.args[0].subject for call in
call_args))
self.assertTrue(any('HEALTH' in call.args[0].subject for call in
call_args))
job_runs = self.job_run_dao.get_all()
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
RESULTS_PIPELINE_WITHDRAWALS, job_runs))[0]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.SUCCESS)
all_withdrawal_records = result_withdrawal_dao.get_all()
self.assertTrue(len(all_withdrawal_records) == len(pids))
self.assertTrue(all(obj.participant_id in pids for obj in
all_withdrawal_records))
array_results = list(filter(lambda x: x.array_results == 1,
all_withdrawal_records))
self.assertTrue(len(array_results), 2)
cvl_results = list(filter(lambda x: x.cvl_results == 1,
all_withdrawal_records))
self.assertTrue(len(cvl_results), num_participants)
with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS
) as controller:
controller.check_results_withdrawals()
self.assertEqual(email_mock.call_count, 2)
job_runs = self.job_run_dao.get_all()
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
RESULTS_PIPELINE_WITHDRAWALS, job_runs))[1]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.NO_RESULTS)
def test_gem_results_to_report_state(self):
num_participants = 8
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
gem_a2_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.GEM_A2_MANIFEST, startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS)
pids_to_update, member_ids = [], []
for num in range(num_participants):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1,
withdrawalStatus=WithdrawalStatus.EARLY_OUT)
member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id, participantId=summary.
participantId, genomeType=config.GENOME_TYPE_ARRAY)
if num % 2 == 0:
member_ids.append(member.id)
pids_to_update.append(summary.participantId)
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 2)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
GEM_RESULT_REPORTS, current_job_runs))[0]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.NO_RESULTS)
current_members = self.member_dao.get_all()
for member in current_members:
if member.participantId in pids_to_update:
member.gemA2ManifestJobRunId = gem_a2_job_run.id
member.genomicWorkflowState = (GenomicWorkflowState.
GEM_RPT_READY)
self.member_dao.update(member)
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 3)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
GEM_RESULT_REPORTS, current_job_runs))[1]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.SUCCESS)
current_gem_report_states = self.report_state_dao.get_all()
self.assertEqual(len(current_gem_report_states), len(pids_to_update))
self.assertTrue(all(obj.event_type == 'result_ready' for obj in
current_gem_report_states))
self.assertTrue(all(obj.event_authored_time is not None for obj in
current_gem_report_states))
self.assertTrue(all(obj.module == 'gem' for obj in
current_gem_report_states))
self.assertTrue(all(obj.genomic_report_state == GenomicReportState.
GEM_RPT_READY for obj in current_gem_report_states))
self.assertTrue(all(obj.genomic_report_state_str ==
GenomicReportState.GEM_RPT_READY.name for obj in
current_gem_report_states))
self.assertTrue(all(obj.genomic_set_member_id in member_ids for obj in
current_gem_report_states))
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 4)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
GEM_RESULT_REPORTS, current_job_runs))[2]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.NO_RESULTS)
self.clear_table_after_test('genomic_member_report_state')
<|reserved_special_token_0|>
def test_reconcile_message_broker_results_ready(self):
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
self.data_generator.create_database_genomic_job_run(jobId=
GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())
for pid in range(7):
self.data_generator.create_database_participant(participantId=1 +
pid, biobankId=1 + pid)
for i in range(1, 6):
self.data_generator.create_database_genomic_set_member(
participantId=i, genomicSetId=1, biobankId=i,
collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'
)
if i < 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='pgx.result_ready', run_id=1)
if i == 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.result_ready.informative', run_id=1)
if i == 5:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.result_ready.uninformative',
run_id=1)
genomic_cvl_pipeline.reconcile_message_broker_results_ready()
report_state_dao = GenomicMemberReportStateDao()
states = report_state_dao.get_all()
self.assertEqual(5, len(states))
pgx_records = [rec for rec in states if rec.module == 'pgx_v1']
hdr_record_uninf = [rec for rec in states if rec.
genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0
]
hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==
GenomicReportState.HDR_RPT_POSITIVE][0]
for pgx_record in pgx_records:
self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.
genomic_report_state)
self.assertEqual('PGX_RPT_READY', pgx_record.
genomic_report_state_str)
self.assertEqual(int(pgx_record.sample_id), pgx_record.
participant_id + 10)
self.assertEqual('result_ready', pgx_record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.
event_authored_time)
self.assertIsNotNone(pgx_record.created_from_metric_id)
self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.
genomic_report_state_str)
self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.
participant_id + 10)
self.assertEqual('result_ready', hdr_record_uninf.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0),
hdr_record_uninf.event_authored_time)
self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)
self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.
genomic_report_state_str)
self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.
participant_id + 10)
self.assertEqual('result_ready', hdr_record_pos.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.
event_authored_time)
self.assertIsNotNone(hdr_record_pos.created_from_metric_id)
<|reserved_special_token_0|>
def test_ingest_appointment_metrics_file(self):
test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'
bucket_name = 'test_bucket'
sub_folder = 'appointment_events'
pids = []
for _ in range(4):
summary = self.data_generator.create_database_participant_summary()
pids.append(summary.participantId)
test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'
appointment_data = test_data.load_test_data_json(
'Genomic-Metrics-File-Appointment-Events-Test.json')
appointment_data_str = json.dumps(appointment_data, indent=4)
with open_cloud_file(test_file_path, mode='wb') as cloud_file:
cloud_file.write(appointment_data_str.encode('utf-8'))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST
) as controller:
controller.ingest_appointment_metrics_file(file_path=test_file_path
)
all_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(all_metrics), 5)
self.assertTrue(all(obj.participant_id in pids for obj in all_metrics))
self.assertTrue(all(obj.file_path == test_file_path for obj in
all_metrics))
self.assertTrue(all(obj.appointment_event is not None for obj in
all_metrics))
self.assertTrue(all(obj.created is not None for obj in all_metrics))
self.assertTrue(all(obj.modified is not None for obj in all_metrics))
self.assertTrue(all(obj.module_type is not None for obj in all_metrics)
)
self.assertTrue(all(obj.event_authored_time is not None for obj in
all_metrics))
self.assertTrue(all(obj.event_type is not None for obj in all_metrics))
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 1)
current_job_run = current_job_runs[0]
self.assertTrue(current_job_run.jobId == GenomicJob.
APPOINTMENT_METRICS_FILE_INGEST)
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.SUCCESS)
self.clear_table_after_test('genomic_appointment_event_metrics')
def test_reconcile_appointments_with_metrics(self):
fake_date = parser.parse('2020-05-29T08:00:01-05:00')
for num in range(4):
summary = self.data_generator.create_database_participant_summary()
missing_json = {'event': 'appointment_updated',
'eventAuthoredTime': '2022-09-16T17:18:38Z',
'participantId': f'P{summary.participantId}', 'messageBody':
{'module_type': 'hdr', 'appointment_timestamp':
'2022-09-19T19:30:00+00:00', 'id': 55,
'appointment_timezone': 'America/Los_Angeles', 'location':
'CA', 'contact_number': '18043704252', 'language': 'en',
'source': 'Color'}}
if num % 2 == 0:
self.data_generator.create_database_genomic_appointment(
message_record_id=num, appointment_id=num, event_type=
'appointment_scheduled', module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()
), appointment_timezone='America/Los_Angeles', location
='123 address st', contact_number='17348675309',
language='en')
self.data_generator.create_database_genomic_appointment_metric(
participant_id=summary.participantId, appointment_event=
json.dumps(missing_json, indent=4) if num % 2 != 0 else
'foo', file_path='test_file_path', module_type='hdr',
event_authored_time=fake_date, event_type=
'appointment_updated' if num % 2 != 0 else
'appointment_scheduled')
current_events = self.appointment_event_dao.get_all()
self.assertEqual(len(current_events), 2)
current_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is None for obj in
current_metrics))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE
) as controller:
controller.reconcile_appointment_events_from_metrics()
job_run = self.job_run_dao.get_all()
self.assertEqual(len(job_run), 1)
self.assertTrue(job_run[0].jobId == GenomicJob.
APPOINTMENT_METRICS_RECONCILE)
current_events = self.appointment_event_dao.get_all()
self.assertEqual(len(current_events), 4)
scheduled = list(filter(lambda x: x.event_type ==
'appointment_scheduled', current_events))
self.assertEqual(len(scheduled), 2)
self.assertTrue(all(obj.created_from_metric_id is None for obj in
scheduled))
updated = list(filter(lambda x: x.event_type ==
'appointment_updated', current_events))
self.assertEqual(len(updated), 2)
self.assertTrue(all(obj.created_from_metric_id is not None for obj in
updated))
current_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in
current_metrics))
self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for
obj in current_metrics))
self.clear_table_after_test('genomic_appointment_event_metrics')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_appointments_gror_changed(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
notified_dao = GenomicAppointmentEventNotifiedDao()
config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [
'[email protected]'])
num_participants = 4
for num in range(num_participants):
gror = num if num > 1 else 1
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=gror)
self.data_generator.create_database_genomic_appointment(
message_record_id=num, appointment_id=num, event_type=
'appointment_scheduled', module_type='hdr', participant_id=
summary.participantId, event_authored_time=fake_date,
source='Color', appointment_timestamp=format_datetime(clock
.CLOCK.now()), appointment_timezone='America/Los_Angeles',
location='123 address st', contact_number='17348675309',
language='en')
changed_ppts = (self.appointment_event_dao.
get_appointments_gror_changed())
self.assertEqual(2, len(changed_ppts))
with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED
) as controller:
controller.check_appointments_gror_changed()
self.assertEqual(email_mock.call_count, 1)
notified_appointments = notified_dao.get_all()
self.assertEqual(2, len(notified_appointments))
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=2)
self.data_generator.create_database_genomic_appointment(
message_record_id=5, appointment_id=5, event_type=
'appointment_scheduled', module_type='hdr', participant_id=
summary.participantId, event_authored_time=fake_date, source=
'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(
)), appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
changed_ppts = (self.appointment_event_dao.
get_appointments_gror_changed())
self.assertEqual(1, len(changed_ppts))
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_14day_escalation(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
fake_date2 = parser.parse('2022-09-02T14:14:00')
fake_date3 = parser.parse('2022-09-03T15:15:00')
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [
'[email protected]'])
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1)
set_member = (self.data_generator.
create_database_genomic_set_member(participantId=summary.
participantId, genomicSetId=1, biobankId=1001,
collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId, genomic_report_state=
GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=
set_member.id, module='hdr_v1', event_authored_time=fake_date)
pids.append(summary.participantId)
self.data_generator.create_database_genomic_appointment(
message_record_id=101, appointment_id=102, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[0], event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=102, appointment_id=103, event_type=
'appointment_completed', module_type='hdr', participant_id=pids
[1], event_authored_time=fake_date, source='Color',
appointment_timestamp=fake_date, appointment_timezone=
'America/Los_Angeles', location='123 address st',
contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=103, appointment_id=104, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date2, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=104, appointment_id=104, event_type=
'appointment_cancelled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date3, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
notified_dao = GenomicDefaultBaseDao(model_type=
GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{'participant_id': pids[4], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': True}, {'participant_id': pids[5], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': False}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = (self.report_state_dao.
get_hdr_result_positive_no_appointment(num_days=14))
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION
) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject,
'GCR Outreach 14 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_execute_auto_generation_from_last_run(self, cloud_task_mock):
with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:
controller.job_result = GenomicSubProcessResult.ERROR
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(
job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR
)
self.assertEqual(cloud_task_mock.called, False)
self.assertEqual(cloud_task_mock.call_count, 0)
with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:
controller.job_result = GenomicSubProcessResult.SUCCESS
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(
job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.
SUCCESS)
self.assertEqual(cloud_task_mock.called, True)
self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(
'manifest_type') == 'p0')
self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==
'genomic-generate-manifest')
all_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(all_job_runs), 2)
self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.
SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))
self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in
all_job_runs))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GenomicJobControllerTest(BaseTestCase):
def setUp(self):
super(GenomicJobControllerTest, self).setUp()
self.data_file_dao = GenomicGcDataFileDao()
self.event_data_dao = MessageBrokenEventDataDao()
self.incident_dao = GenomicIncidentDao()
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.user_event_metrics_dao = UserEventMetricsDao()
self.job_run_dao = GenomicJobRunDao()
self.report_state_dao = GenomicMemberReportStateDao()
self.appointment_event_dao = GenomicAppointmentEventDao()
self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()
def test_incident_with_long_message(self):
"""Make sure the length of incident messages doesn't cause issues when recording them"""
incident_message = '1' * (GenomicIncident.message.type.length + 20)
mock_slack_handler = mock.MagicMock()
job_controller = GenomicJobController(job_id=1)
job_controller.genomic_alert_slack = mock_slack_handler
job_controller.create_incident(message=incident_message, slack=True)
incident: GenomicIncident = self.session.query(GenomicIncident).one()
self.assertTrue(incident_message.startswith(incident.message))
mock_slack_handler.send_message_to_webhook.assert_called_with(
message_data={'text': incident_message})
def test_gvcf_files_ingestion(self):
job_controller = GenomicJobController(job_id=38)
bucket_name = 'test_bucket'
file_path = (
'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'
)
file_path_md5 = (
'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz.md5sum'
)
full_path = f'{bucket_name}/{file_path}'
full_path_md5 = f'{bucket_name}/{file_path_md5}'
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id, biobankId='100153482', sampleId=
'21042005280', genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1)
gen_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS)
gen_processed_file = (self.data_generator.
create_database_genomic_file_processed(runId=gen_job_run.id,
startTime=clock.CLOCK.now(), filePath='/test_file_path',
bucketName='test_bucket', fileName='test_file_name'))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id, genomicFileProcessedId=
gen_processed_file.id)
job_controller.ingest_data_files_into_gc_metrics(file_path_md5,
bucket_name)
metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)
self.assertIsNotNone(metrics.gvcfMd5Path)
self.assertEqual(metrics.gvcfMd5Path, full_path_md5)
job_controller.ingest_data_files_into_gc_metrics(file_path, bucket_name
)
metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)
self.assertIsNotNone(metrics.gvcfPath)
self.assertEqual(metrics.gvcfPath, full_path)
def test_gvcf_files_ingestion_create_incident(self):
bucket_name = 'test_bucket'
file_path = (
'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'
)
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id, biobankId='111111111', sampleId=
'222222222222', genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1)
gen_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS)
gen_processed_file = (self.data_generator.
create_database_genomic_file_processed(runId=gen_job_run.id,
startTime=clock.CLOCK.now(), filePath='/test_file_path',
bucketName=bucket_name, fileName='test_file_name'))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id, genomicFileProcessedId=
gen_processed_file.id)
with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:
controller.ingest_data_files_into_gc_metrics(file_path, bucket_name
)
incident = self.incident_dao.get(1)
self.assertIsNotNone(incident)
self.assertEqual(incident.code, GenomicIncidentCode.
UNABLE_TO_FIND_METRIC.name)
self.assertEqual(incident.data_file_path, file_path)
self.assertEqual(incident.message,
'INGEST_DATA_FILES: Cannot find genomics metric record for sample id: 21042005280'
)
def test_accession_data_files(self):
test_bucket_baylor = 'fake-data-bucket-baylor'
test_idat_file = (
'fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01_Grn.idat'
)
test_vcf_file = (
'fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01.vcf.gz'
)
test_cram_file = (
'fake-data-bucket-baylor/Wgs_sample_raw_data/CRAMs_CRAIs/BCM_A100134256_21063006771_SIA0017196_1.cram'
)
test_files = [test_idat_file, test_vcf_file, test_cram_file]
test_time = datetime.datetime(2021, 7, 9, 14, 1, 1)
with clock.FakeClock(test_time):
for file_path in test_files:
with GenomicJobController(GenomicJob.ACCESSION_DATA_FILES
) as controller:
controller.accession_data_files(file_path,
test_bucket_baylor)
inserted_files = self.data_file_dao.get_all()
expected_idat = GenomicGcDataFile(id=1, created=test_time, modified
=test_time, file_path=test_idat_file, gc_site_id='jh',
bucket_name='fake-data-bucket-baylor', file_prefix=
'Genotyping_sample_raw_data', file_name=
'204027270091_R02C01_Grn.idat', file_type='Grn.idat',
identifier_type='chipwellbarcode', identifier_value=
'204027270091_R02C01', ignore_flag=0)
expected_vcf = GenomicGcDataFile(id=2, created=test_time, modified=
test_time, file_path=test_vcf_file, gc_site_id='jh',
bucket_name='fake-data-bucket-baylor', file_prefix=
'Genotyping_sample_raw_data', file_name=
'204027270091_R02C01.vcf.gz', file_type='vcf.gz',
identifier_type='chipwellbarcode', identifier_value=
'204027270091_R02C01', ignore_flag=0)
expected_cram = GenomicGcDataFile(id=3, created=test_time, modified
=test_time, file_path=test_cram_file, gc_site_id='bcm',
bucket_name='fake-data-bucket-baylor', file_prefix=
'Wgs_sample_raw_data/CRAMs_CRAIs', file_name=
'BCM_A100134256_21063006771_SIA0017196_1.cram', file_type=
'cram', identifier_type='sample_id', identifier_value=
'21063006771', ignore_flag=0)
expected_objs = {(0): expected_idat, (1): expected_vcf, (2):
expected_cram}
for i in range(3):
self.assertEqual(expected_objs[i].bucket_name, inserted_files[i
].bucket_name)
self.assertEqual(expected_objs[i].created, inserted_files[i].
created)
self.assertEqual(expected_objs[i].file_name, inserted_files[i].
file_name)
self.assertEqual(expected_objs[i].file_path, inserted_files[i].
file_path)
self.assertEqual(expected_objs[i].file_prefix, inserted_files[i
].file_prefix)
self.assertEqual(expected_objs[i].file_type, inserted_files[i].
file_type)
self.assertEqual(expected_objs[i].gc_site_id, inserted_files[i]
.gc_site_id)
self.assertEqual(expected_objs[i].id, inserted_files[i].id)
self.assertEqual(expected_objs[i].identifier_type,
inserted_files[i].identifier_type)
self.assertEqual(expected_objs[i].identifier_value,
inserted_files[i].identifier_value)
self.assertEqual(expected_objs[i].ignore_flag, inserted_files[i
].ignore_flag)
self.assertEqual(expected_objs[i].metadata, inserted_files[i].
metadata)
self.assertEqual(expected_objs[i].modified, inserted_files[i].
modified)
def test_updating_members_blocklists(self):
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
ids_should_be_updated = []
for i in range(4):
ids_should_be_updated.append(self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set.id,
biobankId='100153482', sampleId='21042005280', genomeType=
'test_investigation_one' if i & 2 != 0 else 'aou_wgs',
genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if
i & 2 == 0 else 'N').id)
for i in range(2):
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, biobankId='100153482', sampleId='21042005280',
genomeType='aou_array', genomicWorkflowState=
GenomicWorkflowState.AW0, ai_an='N')
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS
) as controller:
controller.update_members_blocklists()
created_members = self.member_dao.get_all()
blocklisted = list(filter(lambda x: x.blockResults == 1 or x.
blockResearch == 1, created_members))
self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in
blocklisted].sort())
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'aian' for obj in created_members if obj.ai_an == 'Y' and obj.
genomicWorkflowState == GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in created_members if obj.
ai_an == 'Y' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'test_sample_swap' for obj in created_members if obj.genomeType ==
'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 1 and obj.
blockResultsReason is not None and obj.blockResultsReason ==
'test_sample_swap' for obj in created_members if obj.genomeType ==
'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResearch == 0 and obj.
blockResearchReason is None for obj in created_members if obj.
genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in created_members if obj.
genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
with self.member_dao.session() as session:
session.query(GenomicSetMember).delete()
run_result = self.job_run_dao.get(1)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.
COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
for i in range(4):
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, biobankId='100153482', sampleId='21042005280',
genomeType='test_investigation_one' if i & 2 != 0 else
'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,
ai_an='Y' if i & 2 == 0 else 'N')
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS
) as controller:
controller.update_members_blocklists()
modified_members = self.member_dao.get_all()
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in modified_members if obj.
ai_an == 'Y' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'test_sample_swap' for obj in modified_members if obj.
genomeType == 'test_investigation_one' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResults == 1 and obj.
blockResultsReason is not None and obj.blockResultsReason ==
'test_sample_swap' for obj in modified_members if obj.
genomeType == 'test_investigation_one' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
run_result = self.job_run_dao.get(2)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.
COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
def test_ingest_user_metrics_file(self):
test_file = 'Genomic-Metrics-File-User-Events-Test.csv'
bucket_name = 'test_bucket'
sub_folder = 'user_events'
pids = []
file_ingester = GenomicFileIngester()
for _ in range(2):
pid = self.data_generator.create_database_participant()
pids.append(pid.participantId)
test_metrics_file = create_ingestion_test_file(test_file,
bucket_name, sub_folder)
test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'
with open_cloud_file(test_file_path) as csv_file:
metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)
with GenomicJobController(GenomicJob.METRICS_FILE_INGEST
) as controller:
controller.ingest_metrics_file(metric_type='user_events',
file_path=test_file_path)
job_run_id = controller.job_run.id
metrics = self.user_event_metrics_dao.get_all()
for pid in pids:
file_metrics = list(filter(lambda x: int(x['participant_id'].
split('P')[-1]) == pid, metrics_to_ingest['rows']))
participant_ingested_metrics = list(filter(lambda x: x.
participant_id == pid, metrics))
self.assertEqual(len(file_metrics), len(
participant_ingested_metrics))
self.assertTrue(all(obj.run_id == job_run_id for obj in
participant_ingested_metrics))
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_reconcile_pdr_data(self, mock_cloud_task):
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
cloud_task_endpoint = 'rebuild_genomic_table_records_task'
first_run = self.job_run_dao.get_all()
self.assertEqual(mock_cloud_task.call_count, 1)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 1)
self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.
model_type.__tablename__)
self.assertTrue(type(call_args[0].args[0]['ids']) is list)
self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in
first_run])
self.assertEqual(call_args[0].args[1], cloud_task_endpoint)
participant = self.data_generator.create_database_participant()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)
plus_ten = plus_ten.replace(microsecond=0)
with FakeClock(plus_ten):
for i in range(2):
gen_member = (self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set
.id, biobankId='100153482', sampleId='21042005280',
genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1))
gen_processed_file = (self.data_generator.
create_database_genomic_file_processed(runId=first_run[
0].id, startTime=clock.CLOCK.now(), filePath=
f'test_file_path_{i}', bucketName='test_bucket',
fileName='test_file_name'))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id)
manifest = (self.data_generator.
create_database_genomic_manifest_file(manifestTypeId=2,
filePath=f'test_file_path_{i}'))
self.data_generator.create_database_genomic_manifest_feedback(
inputManifestFileId=manifest.id, feedbackRecordCount=2)
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=participant.participantId, event_name=
'test_event', run_id=1)
self.data_generator.create_database_genomic_informing_loop(
message_record_id=1, event_type=
'informing_loop_decision', module_type='gem',
participant_id=participant.participantId,
decision_value='maybe_later', event_authored_time=clock
.CLOCK.now())
self.data_generator.create_database_genomic_cvl_past_due(
cvl_site_id='co', email_notification_sent=0, sample_id=
'sample_test', results_type='hdr',
genomic_set_member_id=gen_member.id)
self.data_generator.create_database_genomic_appointment(
message_record_id=i, appointment_id=i, event_type=
'appointment_scheduled', module_type='hdr',
participant_id=participant.participantId,
event_authored_time=clock.CLOCK.now(), source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()
), appointment_timezone='America/Los_Angeles', location
='123 address st', contact_number='17348675309',
language='en')
self.data_generator.create_database_genomic_member_report_state(
genomic_set_member_id=gen_member.id, participant_id=
participant.participantId, module='gem',
genomic_report_state=GenomicReportState.GEM_RPT_READY,
event_authored_time=clock.CLOCK.now())
self.data_generator.create_genomic_result_viewed(participant_id
=participant.participantId, event_type='result_viewed',
event_authored_time=clock.CLOCK.now(), module_type=
'gem', sample_id=gen_member.sampleId)
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
affected_tables = ['genomic_set', 'genomic_set_member',
'genomic_job_run', 'genomic_file_processed',
'genomic_gc_validation_metrics', 'genomic_manifest_file',
'genomic_manifest_feedback', 'genomic_informing_loop',
'genomic_cvl_results_past_due', 'user_event_metrics',
'genomic_member_report_state', 'genomic_result_viewed',
'genomic_appointment_event']
num_calls = len(affected_tables) + 1
self.assertEqual(mock_cloud_task.call_count, num_calls)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), num_calls)
mock_tables = set([obj[0][0]['table'] for obj in call_args])
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue([mock_tables].sort() == affected_tables.sort())
self.assertTrue(all(obj for obj in mock_endpoint if obj ==
cloud_task_endpoint))
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_retry_manifest_ingestions_if_deltas(self, mock_cloud_task):
bucket_name = 'test-bucket'
aw1_file_name = (
'AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv')
aw1_manifest_path = f'{bucket_name}/{aw1_file_name}'
aw2_file_name = (
'AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv')
aw2_manifest_path = f'{bucket_name}/{aw2_file_name}'
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
aw1_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(), endTime=
clock.CLOCK.now(), runResult=GenomicSubProcessResult.SUCCESS)
aw2_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.METRICS_INGESTION, startTime=clock.CLOCK.now(),
endTime=clock.CLOCK.now(), runResult=GenomicSubProcessResult.
SUCCESS)
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS
) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(3)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)
self.assertEqual(mock_cloud_task.call_count, 0)
self.assertFalse(mock_cloud_task.call_count)
self.data_generator.create_database_genomic_aw1_raw(file_path=
aw1_manifest_path, package_id='PKG-2104-026571', biobank_id=
'A10001')
self.data_generator.create_database_genomic_aw2_raw(file_path=
aw2_manifest_path, biobank_id='A10001', sample_id='100001',
biobankidsampleid='A10001_100001')
aw1_manifest_file = (self.data_generator.
create_database_genomic_manifest_file(created=clock.CLOCK.now(),
modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.AW1, filePath=
aw1_manifest_path, fileName=aw1_file_name, bucketName=
bucket_name, recordCount=1, rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now()))
aw2_manifest_file = (self.data_generator.
create_database_genomic_manifest_file(created=clock.CLOCK.now(),
modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.AW2, filePath=
aw2_manifest_path, fileName=aw2_file_name, bucketName=
bucket_name, recordCount=1, rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now()))
aw1_file_processed = (self.data_generator.
create_database_genomic_file_processed(runId=aw1_job_run.id,
startTime=clock.CLOCK.now(), genomicManifestFileId=
aw1_manifest_file.id, filePath=f'/{aw1_manifest_path}',
bucketName=bucket_name, fileName=aw1_file_name))
aw2_file_processed = (self.data_generator.
create_database_genomic_file_processed(runId=aw2_job_run.id,
startTime=clock.CLOCK.now(), genomicManifestFileId=
aw2_manifest_file.id, filePath=f'/{aw2_manifest_path}',
bucketName=bucket_name, fileName=aw2_file_name))
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id, biobankId='100153482', sampleId=
'21042005280', genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1, aw1FileProcessedId=aw1_file_processed.id)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id, genomicFileProcessedId=
aw2_file_processed.id)
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS
) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(4)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)
self.assertEqual(mock_cloud_task.call_count, 0)
self.assertFalse(mock_cloud_task.call_count)
with self.member_dao.session() as session:
session.query(GenomicGCValidationMetrics).delete()
session.query(GenomicSetMember).delete()
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS
) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(5)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.SUCCESS)
self.assertEqual(mock_cloud_task.call_count, 2)
self.assertTrue(mock_cloud_task.call_count)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 2)
cloud_task_endpoint = ['ingest_aw1_manifest_task',
'ingest_aw2_manifest_task']
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue(all(obj for obj in mock_endpoint if obj ==
cloud_task_endpoint))
mock_buckets = set([obj[0][0]['bucket_name'] for obj in call_args])
self.assertTrue(len(mock_buckets), 1)
self.assertTrue(list(mock_buckets)[0] == bucket_name)
def test_calculate_informing_loop_ready_flags(self):
num_participants = 4
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
for num in range(num_participants):
plus_num = clock.CLOCK.now() + datetime.timedelta(minutes=num)
plus_num = plus_num.replace(microsecond=0)
with FakeClock(plus_num):
summary = (self.data_generator.
create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1))
stored_sample = (self.data_generator.
create_database_biobank_stored_sample(biobankId=summary
.biobankId, biobankOrderIdentifier=self.fake.pyint()))
collection_site = self.data_generator.create_database_site(
siteType='Clinic')
order = self.data_generator.create_database_biobank_order(
collectedSiteId=collection_site.siteId, participantId=
summary.participantId, finalizedTime=plus_num)
self.data_generator.create_database_biobank_order_identifier(
value=stored_sample.biobankOrderIdentifier,
biobankOrderId=order.biobankOrderId, system='1')
self.data_generator.create_database_biobank_order_identifier(
value=stored_sample.biobankOrderIdentifier,
biobankOrderId=order.biobankOrderId, system='2')
member = (self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set
.id, participantId=summary.participantId, genomeType=
config.GENOME_TYPE_WGS, qcStatus=GenomicQcStatus.PASS,
gcManifestSampleSource='Whole Blood', collectionTubeId=
stored_sample.biobankStoredSampleId))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=member.id, sexConcordance='True',
drcFpConcordance='Pass', drcSexConcordance='Pass',
processingStatus='Pass')
members_for_ready_loop = (self.member_dao.
get_members_for_informing_loop_ready())
self.assertEqual(len(members_for_ready_loop), num_participants)
current_set_members = self.member_dao.get_all()
self.assertTrue(all(obj.informingLoopReadyFlag == 0 for obj in
current_set_members))
self.assertTrue(all(obj.informingLoopReadyFlagModified is None for
obj in current_set_members))
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY
) as controller:
controller.calculate_informing_loop_ready_flags()
members_for_ready_loop = (self.member_dao.
get_members_for_informing_loop_ready())
self.assertEqual(len(members_for_ready_loop), num_participants)
calculation_limit = 2
config.override_setting(config.CALCULATE_READY_FLAG_LIMIT, [
calculation_limit])
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY
) as controller:
controller.calculate_informing_loop_ready_flags()
current_set_members = self.member_dao.get_all()
self.assertTrue(any(obj.informingLoopReadyFlag == 1 for obj in
current_set_members))
self.assertTrue(any(obj.informingLoopReadyFlagModified is not None for
obj in current_set_members))
current_loops_set = [obj for obj in current_set_members if obj.
informingLoopReadyFlag == 1 and obj.
informingLoopReadyFlagModified is not None]
self.assertEqual(len(current_loops_set), calculation_limit)
members_for_ready_loop = (self.member_dao.
get_members_for_informing_loop_ready())
self.assertEqual(len(members_for_ready_loop), num_participants // 2)
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY
) as controller:
controller.calculate_informing_loop_ready_flags()
current_set_members = self.member_dao.get_all()
self.assertTrue(all(obj.informingLoopReadyFlag == 1 for obj in
current_set_members))
self.assertTrue(all(obj.informingLoopReadyFlagModified is not None for
obj in current_set_members))
members_for_ready_loop = (self.member_dao.
get_members_for_informing_loop_ready())
self.assertEqual(len(members_for_ready_loop), 0)
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_getting_results_withdrawn(self, email_mock):
num_participants = 4
result_withdrawal_dao = GenomicResultWithdrawalsDao()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
gen_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS)
pids = []
for num in range(num_participants):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1,
withdrawalStatus=WithdrawalStatus.EARLY_OUT)
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, participantId=summary.participantId,
genomeType=config.GENOME_TYPE_ARRAY, gemA1ManifestJobRunId=
gen_job_run.id if num % 2 == 0 else None)
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, participantId=summary.participantId,
genomeType=config.GENOME_TYPE_WGS, cvlW1ilHdrJobRunId=
gen_job_run.id)
pids.append(summary.participantId)
config.override_setting(config.RDR_GENOMICS_NOTIFICATION_EMAIL,
'[email protected]')
with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS
) as controller:
controller.check_results_withdrawals()
self.assertEqual(email_mock.call_count, 2)
call_args = email_mock.call_args_list
self.assertTrue(any('GEM' in call.args[0].subject for call in
call_args))
self.assertTrue(any('HEALTH' in call.args[0].subject for call in
call_args))
job_runs = self.job_run_dao.get_all()
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
RESULTS_PIPELINE_WITHDRAWALS, job_runs))[0]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.SUCCESS)
all_withdrawal_records = result_withdrawal_dao.get_all()
self.assertTrue(len(all_withdrawal_records) == len(pids))
self.assertTrue(all(obj.participant_id in pids for obj in
all_withdrawal_records))
array_results = list(filter(lambda x: x.array_results == 1,
all_withdrawal_records))
self.assertTrue(len(array_results), 2)
cvl_results = list(filter(lambda x: x.cvl_results == 1,
all_withdrawal_records))
self.assertTrue(len(cvl_results), num_participants)
with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS
) as controller:
controller.check_results_withdrawals()
self.assertEqual(email_mock.call_count, 2)
job_runs = self.job_run_dao.get_all()
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
RESULTS_PIPELINE_WITHDRAWALS, job_runs))[1]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.NO_RESULTS)
def test_gem_results_to_report_state(self):
num_participants = 8
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
gem_a2_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.GEM_A2_MANIFEST, startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS)
pids_to_update, member_ids = [], []
for num in range(num_participants):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1,
withdrawalStatus=WithdrawalStatus.EARLY_OUT)
member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id, participantId=summary.
participantId, genomeType=config.GENOME_TYPE_ARRAY)
if num % 2 == 0:
member_ids.append(member.id)
pids_to_update.append(summary.participantId)
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 2)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
GEM_RESULT_REPORTS, current_job_runs))[0]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.NO_RESULTS)
current_members = self.member_dao.get_all()
for member in current_members:
if member.participantId in pids_to_update:
member.gemA2ManifestJobRunId = gem_a2_job_run.id
member.genomicWorkflowState = (GenomicWorkflowState.
GEM_RPT_READY)
self.member_dao.update(member)
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 3)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
GEM_RESULT_REPORTS, current_job_runs))[1]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.SUCCESS)
current_gem_report_states = self.report_state_dao.get_all()
self.assertEqual(len(current_gem_report_states), len(pids_to_update))
self.assertTrue(all(obj.event_type == 'result_ready' for obj in
current_gem_report_states))
self.assertTrue(all(obj.event_authored_time is not None for obj in
current_gem_report_states))
self.assertTrue(all(obj.module == 'gem' for obj in
current_gem_report_states))
self.assertTrue(all(obj.genomic_report_state == GenomicReportState.
GEM_RPT_READY for obj in current_gem_report_states))
self.assertTrue(all(obj.genomic_report_state_str ==
GenomicReportState.GEM_RPT_READY.name for obj in
current_gem_report_states))
self.assertTrue(all(obj.genomic_set_member_id in member_ids for obj in
current_gem_report_states))
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 4)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
GEM_RESULT_REPORTS, current_job_runs))[2]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.NO_RESULTS)
self.clear_table_after_test('genomic_member_report_state')
def test_reconcile_informing_loop(self):
event_dao = UserEventMetricsDao()
event_dao.truncate()
il_dao = GenomicInformingLoopDao()
for pid in range(8):
self.data_generator.create_database_participant(participantId=1 +
pid, biobankId=1 + pid)
self.data_generator.create_database_genomic_job_run(jobId=
GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
for b in ['aou_array', 'aou_wgs']:
for i in range(1, 9):
self.data_generator.create_database_genomic_set_member(
participantId=i, genomicSetId=1, biobankId=i,
collectionTubeId=100 + i, sampleId=10 + i, genomeType=b)
events = ['gem.informing_loop.started',
'gem.informing_loop.screen8_no',
'gem.informing_loop.screen8_yes', 'hdr.informing_loop.started',
'gem.informing_loop.screen3', 'pgx.informing_loop.screen8_no',
'hdr.informing_loop.screen10_no']
for p in range(4):
for i in range(len(events)):
self.data_generator.create_database_genomic_user_event_metrics(
created=clock.CLOCK.now(), modified=clock.CLOCK.now(),
participant_id=p + 1, created_at=datetime.datetime(2021,
12, 29, 0) + datetime.timedelta(hours=i), event_name=
events[i], run_id=1, ignore_flag=0)
decisions = [None, 'no', 'yes']
for p in range(3):
for i in range(2):
self.data_generator.create_database_genomic_informing_loop(
message_record_id=i, event_type=
'informing_loop_started' if i == 0 else
'informing_loop_decision', module_type='gem',
participant_id=p + 1, decision_value=decisions[i],
sample_id=100 + p, event_authored_time=datetime.
datetime(2021, 12, 29, 0) + datetime.timedelta(hours=i))
self.data_generator.create_database_genomic_user_event_metrics(created
=clock.CLOCK.now(), modified=clock.CLOCK.now(), participant_id=
6, created_at=datetime.datetime(2021, 12, 29, 0), event_name=
'gem.informing_loop.screen8_yes', run_id=1, ignore_flag=0)
genomic_pipeline.reconcile_informing_loop_responses()
pid_list = [1, 2, 3, 6]
new_il_values = il_dao.get_latest_il_for_pids(pid_list=pid_list,
module='gem')
for value in new_il_values:
self.assertEqual('yes', value.decision_value)
pid_list = [1, 2, 3, 4]
for module in ['hdr', 'pgx']:
new_il_values = il_dao.get_latest_il_for_pids(pid_list=pid_list,
module=module)
for value in new_il_values:
self.assertEqual('no', value.decision_value)
self.assertIsNotNone(value.created_from_metric_id)
def test_reconcile_message_broker_results_ready(self):
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
self.data_generator.create_database_genomic_job_run(jobId=
GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())
for pid in range(7):
self.data_generator.create_database_participant(participantId=1 +
pid, biobankId=1 + pid)
for i in range(1, 6):
self.data_generator.create_database_genomic_set_member(
participantId=i, genomicSetId=1, biobankId=i,
collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'
)
if i < 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='pgx.result_ready', run_id=1)
if i == 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.result_ready.informative', run_id=1)
if i == 5:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.result_ready.uninformative',
run_id=1)
genomic_cvl_pipeline.reconcile_message_broker_results_ready()
report_state_dao = GenomicMemberReportStateDao()
states = report_state_dao.get_all()
self.assertEqual(5, len(states))
pgx_records = [rec for rec in states if rec.module == 'pgx_v1']
hdr_record_uninf = [rec for rec in states if rec.
genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0
]
hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==
GenomicReportState.HDR_RPT_POSITIVE][0]
for pgx_record in pgx_records:
self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.
genomic_report_state)
self.assertEqual('PGX_RPT_READY', pgx_record.
genomic_report_state_str)
self.assertEqual(int(pgx_record.sample_id), pgx_record.
participant_id + 10)
self.assertEqual('result_ready', pgx_record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.
event_authored_time)
self.assertIsNotNone(pgx_record.created_from_metric_id)
self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.
genomic_report_state_str)
self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.
participant_id + 10)
self.assertEqual('result_ready', hdr_record_uninf.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0),
hdr_record_uninf.event_authored_time)
self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)
self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.
genomic_report_state_str)
self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.
participant_id + 10)
self.assertEqual('result_ready', hdr_record_pos.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.
event_authored_time)
self.assertIsNotNone(hdr_record_pos.created_from_metric_id)
def test_reconcile_message_broker_results_viewed(self):
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
self.data_generator.create_database_genomic_job_run(jobId=
GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())
for pid in range(3):
self.data_generator.create_database_participant(participantId=1 +
pid, biobankId=1 + pid)
for i in range(1, 3):
self.data_generator.create_database_genomic_set_member(
participantId=i, genomicSetId=1, biobankId=i,
collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'
)
if i == 1:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='pgx.opened_at', run_id=1)
if i == 2:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.opened_at', run_id=1)
genomic_cvl_pipeline.reconcile_message_broker_results_viewed()
result_viewed_dao = GenomicResultViewedDao()
results = result_viewed_dao.get_all()
self.assertEqual(2, len(results))
for record in results:
if record.participant_id == 1:
self.assertEqual('pgx_v1', record.module_type)
else:
self.assertEqual('hdr_v1', record.module_type)
self.assertEqual(int(record.sample_id), record.participant_id + 10)
self.assertEqual('result_viewed', record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), record.
first_viewed)
self.assertIsNotNone(record.created_from_metric_id)
def test_ingest_appointment_metrics_file(self):
test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'
bucket_name = 'test_bucket'
sub_folder = 'appointment_events'
pids = []
for _ in range(4):
summary = self.data_generator.create_database_participant_summary()
pids.append(summary.participantId)
test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'
appointment_data = test_data.load_test_data_json(
'Genomic-Metrics-File-Appointment-Events-Test.json')
appointment_data_str = json.dumps(appointment_data, indent=4)
with open_cloud_file(test_file_path, mode='wb') as cloud_file:
cloud_file.write(appointment_data_str.encode('utf-8'))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST
) as controller:
controller.ingest_appointment_metrics_file(file_path=test_file_path
)
all_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(all_metrics), 5)
self.assertTrue(all(obj.participant_id in pids for obj in all_metrics))
self.assertTrue(all(obj.file_path == test_file_path for obj in
all_metrics))
self.assertTrue(all(obj.appointment_event is not None for obj in
all_metrics))
self.assertTrue(all(obj.created is not None for obj in all_metrics))
self.assertTrue(all(obj.modified is not None for obj in all_metrics))
self.assertTrue(all(obj.module_type is not None for obj in all_metrics)
)
self.assertTrue(all(obj.event_authored_time is not None for obj in
all_metrics))
self.assertTrue(all(obj.event_type is not None for obj in all_metrics))
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 1)
current_job_run = current_job_runs[0]
self.assertTrue(current_job_run.jobId == GenomicJob.
APPOINTMENT_METRICS_FILE_INGEST)
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.SUCCESS)
self.clear_table_after_test('genomic_appointment_event_metrics')
def test_reconcile_appointments_with_metrics(self):
fake_date = parser.parse('2020-05-29T08:00:01-05:00')
for num in range(4):
summary = self.data_generator.create_database_participant_summary()
missing_json = {'event': 'appointment_updated',
'eventAuthoredTime': '2022-09-16T17:18:38Z',
'participantId': f'P{summary.participantId}', 'messageBody':
{'module_type': 'hdr', 'appointment_timestamp':
'2022-09-19T19:30:00+00:00', 'id': 55,
'appointment_timezone': 'America/Los_Angeles', 'location':
'CA', 'contact_number': '18043704252', 'language': 'en',
'source': 'Color'}}
if num % 2 == 0:
self.data_generator.create_database_genomic_appointment(
message_record_id=num, appointment_id=num, event_type=
'appointment_scheduled', module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()
), appointment_timezone='America/Los_Angeles', location
='123 address st', contact_number='17348675309',
language='en')
self.data_generator.create_database_genomic_appointment_metric(
participant_id=summary.participantId, appointment_event=
json.dumps(missing_json, indent=4) if num % 2 != 0 else
'foo', file_path='test_file_path', module_type='hdr',
event_authored_time=fake_date, event_type=
'appointment_updated' if num % 2 != 0 else
'appointment_scheduled')
current_events = self.appointment_event_dao.get_all()
self.assertEqual(len(current_events), 2)
current_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is None for obj in
current_metrics))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE
) as controller:
controller.reconcile_appointment_events_from_metrics()
job_run = self.job_run_dao.get_all()
self.assertEqual(len(job_run), 1)
self.assertTrue(job_run[0].jobId == GenomicJob.
APPOINTMENT_METRICS_RECONCILE)
current_events = self.appointment_event_dao.get_all()
self.assertEqual(len(current_events), 4)
scheduled = list(filter(lambda x: x.event_type ==
'appointment_scheduled', current_events))
self.assertEqual(len(scheduled), 2)
self.assertTrue(all(obj.created_from_metric_id is None for obj in
scheduled))
updated = list(filter(lambda x: x.event_type ==
'appointment_updated', current_events))
self.assertEqual(len(updated), 2)
self.assertTrue(all(obj.created_from_metric_id is not None for obj in
updated))
current_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in
current_metrics))
self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for
obj in current_metrics))
self.clear_table_after_test('genomic_appointment_event_metrics')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_appointments_gror_changed(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
notified_dao = GenomicAppointmentEventNotifiedDao()
config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [
'[email protected]'])
num_participants = 4
for num in range(num_participants):
gror = num if num > 1 else 1
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=gror)
self.data_generator.create_database_genomic_appointment(
message_record_id=num, appointment_id=num, event_type=
'appointment_scheduled', module_type='hdr', participant_id=
summary.participantId, event_authored_time=fake_date,
source='Color', appointment_timestamp=format_datetime(clock
.CLOCK.now()), appointment_timezone='America/Los_Angeles',
location='123 address st', contact_number='17348675309',
language='en')
changed_ppts = (self.appointment_event_dao.
get_appointments_gror_changed())
self.assertEqual(2, len(changed_ppts))
with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED
) as controller:
controller.check_appointments_gror_changed()
self.assertEqual(email_mock.call_count, 1)
notified_appointments = notified_dao.get_all()
self.assertEqual(2, len(notified_appointments))
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=2)
self.data_generator.create_database_genomic_appointment(
message_record_id=5, appointment_id=5, event_type=
'appointment_scheduled', module_type='hdr', participant_id=
summary.participantId, event_authored_time=fake_date, source=
'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(
)), appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
changed_ppts = (self.appointment_event_dao.
get_appointments_gror_changed())
self.assertEqual(1, len(changed_ppts))
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_14day_escalation(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
fake_date2 = parser.parse('2022-09-02T14:14:00')
fake_date3 = parser.parse('2022-09-03T15:15:00')
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [
'[email protected]'])
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1)
set_member = (self.data_generator.
create_database_genomic_set_member(participantId=summary.
participantId, genomicSetId=1, biobankId=1001,
collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId, genomic_report_state=
GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=
set_member.id, module='hdr_v1', event_authored_time=fake_date)
pids.append(summary.participantId)
self.data_generator.create_database_genomic_appointment(
message_record_id=101, appointment_id=102, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[0], event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=102, appointment_id=103, event_type=
'appointment_completed', module_type='hdr', participant_id=pids
[1], event_authored_time=fake_date, source='Color',
appointment_timestamp=fake_date, appointment_timezone=
'America/Los_Angeles', location='123 address st',
contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=103, appointment_id=104, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date2, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=104, appointment_id=104, event_type=
'appointment_cancelled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date3, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
notified_dao = GenomicDefaultBaseDao(model_type=
GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{'participant_id': pids[4], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': True}, {'participant_id': pids[5], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': False}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = (self.report_state_dao.
get_hdr_result_positive_no_appointment(num_days=14))
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION
) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject,
'GCR Outreach 14 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
<|reserved_special_token_0|>
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_ce_escalation(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
fake_date2 = parser.parse('2022-09-02T14:14:00')
fake_date3 = parser.parse('2022-09-03T15:15:00')
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [
'[email protected]'])
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1)
set_member = (self.data_generator.
create_database_genomic_set_member(participantId=summary.
participantId, genomicSetId=1, biobankId=1001,
collectionTubeId=100, sampleId=10, genomeType='aou_wgs',
participantOrigin='careevolution'))
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId, genomic_report_state=
GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=
set_member.id, module='hdr_v1', event_authored_time=fake_date)
pids.append(summary.participantId)
self.data_generator.create_database_genomic_appointment(
message_record_id=101, appointment_id=102, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[0], event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=102, appointment_id=103, event_type=
'appointment_completed', module_type='hdr', participant_id=pids
[1], event_authored_time=fake_date, source='Color',
appointment_timestamp=fake_date, appointment_timezone=
'America/Los_Angeles', location='123 address st',
contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=103, appointment_id=104, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date2, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=104, appointment_id=104, event_type=
'appointment_cancelled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date3, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
notified_dao = GenomicDefaultBaseDao(model_type=
GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{'participant_id': pids[4], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': True}, {'participant_id': pids[5], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': False}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = (self.report_state_dao.
get_hdr_result_positive_no_appointment(num_days=30,
participant_origin='careevolution'))
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_CE_OUTREACH_ESCALATION
) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject,
'GCR Outreach 30 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_execute_auto_generation_from_last_run(self, cloud_task_mock):
with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:
controller.job_result = GenomicSubProcessResult.ERROR
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(
job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR
)
self.assertEqual(cloud_task_mock.called, False)
self.assertEqual(cloud_task_mock.call_count, 0)
with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:
controller.job_result = GenomicSubProcessResult.SUCCESS
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(
job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.
SUCCESS)
self.assertEqual(cloud_task_mock.called, True)
self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(
'manifest_type') == 'p0')
self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==
'genomic-generate-manifest')
all_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(all_job_runs), 2)
self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.
SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))
self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in
all_job_runs))
<|reserved_special_token_1|>
import datetime
import json
from dateutil import parser
import mock
from python_http_client.exceptions import ForbiddenError
from rdr_service import clock, config
from rdr_service.api_util import open_cloud_file
from rdr_service.clock import FakeClock
from rdr_service.dao.database_utils import format_datetime
from rdr_service.dao.genomics_dao import GenomicGcDataFileDao, GenomicGCValidationMetricsDao, GenomicIncidentDao, \
GenomicSetMemberDao, UserEventMetricsDao, GenomicJobRunDao, GenomicResultWithdrawalsDao, \
GenomicMemberReportStateDao, GenomicAppointmentEventMetricsDao, GenomicAppointmentEventDao, GenomicResultViewedDao, \
GenomicInformingLoopDao, GenomicAppointmentEventNotifiedDao, GenomicDefaultBaseDao
from rdr_service.dao.message_broker_dao import MessageBrokenEventDataDao
from rdr_service.genomic_enums import GenomicIncidentCode, GenomicJob, GenomicWorkflowState, GenomicSubProcessResult, \
GenomicSubProcessStatus, GenomicManifestTypes, GenomicQcStatus, GenomicReportState
from rdr_service.genomic.genomic_job_components import GenomicFileIngester
from rdr_service.genomic.genomic_job_controller import GenomicJobController
from rdr_service.model.genomics import GenomicGcDataFile, GenomicIncident, GenomicSetMember, GenomicGCValidationMetrics,\
GenomicGCROutreachEscalationNotified
from rdr_service.offline.genomics import genomic_pipeline, genomic_cvl_pipeline
from rdr_service.participant_enums import WithdrawalStatus
from tests import test_data
from tests.genomics_tests.test_genomic_utils import create_ingestion_test_file
from tests.helpers.unittest_base import BaseTestCase
class GenomicJobControllerTest(BaseTestCase):
def setUp(self):
super(GenomicJobControllerTest, self).setUp()
self.data_file_dao = GenomicGcDataFileDao()
self.event_data_dao = MessageBrokenEventDataDao()
self.incident_dao = GenomicIncidentDao()
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.user_event_metrics_dao = UserEventMetricsDao()
self.job_run_dao = GenomicJobRunDao()
self.report_state_dao = GenomicMemberReportStateDao()
self.appointment_event_dao = GenomicAppointmentEventDao()
self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()
def test_incident_with_long_message(self):
"""Make sure the length of incident messages doesn't cause issues when recording them"""
incident_message = "1" * (GenomicIncident.message.type.length + 20)
mock_slack_handler = mock.MagicMock()
job_controller = GenomicJobController(job_id=1)
job_controller.genomic_alert_slack = mock_slack_handler
job_controller.create_incident(message=incident_message, slack=True)
# Double check that the incident was saved successfully, with part of the message
incident: GenomicIncident = self.session.query(GenomicIncident).one()
self.assertTrue(incident_message.startswith(incident.message))
# Make sure Slack received the full message
mock_slack_handler.send_message_to_webhook.assert_called_with(
message_data={
'text': incident_message
}
)
def test_gvcf_files_ingestion(self):
job_controller = GenomicJobController(job_id=38)
bucket_name = "test_bucket"
file_path = "Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz"
file_path_md5 = "Wgs_sample_raw_data/SS_VCF_research/" \
"BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz.md5sum"
full_path = f'{bucket_name}/{file_path}'
full_path_md5 = f'{bucket_name}/{file_path_md5}'
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType="aou_wgs",
genomicWorkflowState=GenomicWorkflowState.AW1
)
gen_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
gen_processed_file = self.data_generator.create_database_genomic_file_processed(
runId=gen_job_run.id,
startTime=clock.CLOCK.now(),
filePath='/test_file_path',
bucketName='test_bucket',
fileName='test_file_name',
)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id
)
job_controller.ingest_data_files_into_gc_metrics(file_path_md5, bucket_name)
metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)
self.assertIsNotNone(metrics.gvcfMd5Path)
self.assertEqual(metrics.gvcfMd5Path, full_path_md5)
job_controller.ingest_data_files_into_gc_metrics(file_path, bucket_name)
metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)
self.assertIsNotNone(metrics.gvcfPath)
self.assertEqual(metrics.gvcfPath, full_path)
def test_gvcf_files_ingestion_create_incident(self):
bucket_name = "test_bucket"
file_path = "Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz"
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="111111111",
sampleId="222222222222",
genomeType="aou_wgs",
genomicWorkflowState=GenomicWorkflowState.AW1
)
gen_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
gen_processed_file = self.data_generator.create_database_genomic_file_processed(
runId=gen_job_run.id,
startTime=clock.CLOCK.now(),
filePath='/test_file_path',
bucketName=bucket_name,
fileName='test_file_name',
)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id
)
with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:
controller.ingest_data_files_into_gc_metrics(file_path, bucket_name)
incident = self.incident_dao.get(1)
self.assertIsNotNone(incident)
self.assertEqual(incident.code, GenomicIncidentCode.UNABLE_TO_FIND_METRIC.name)
self.assertEqual(incident.data_file_path, file_path)
self.assertEqual(incident.message, 'INGEST_DATA_FILES: Cannot find '
'genomics metric record for sample id: '
'21042005280')
def test_accession_data_files(self):
test_bucket_baylor = "fake-data-bucket-baylor"
test_idat_file = "fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01_Grn.idat"
test_vcf_file = "fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01.vcf.gz"
test_cram_file = "fake-data-bucket-baylor/Wgs_sample_raw_data/" \
"CRAMs_CRAIs/BCM_A100134256_21063006771_SIA0017196_1.cram"
test_files = [test_idat_file, test_vcf_file, test_cram_file]
test_time = datetime.datetime(2021, 7, 9, 14, 1, 1)
# run job controller method on each file
with clock.FakeClock(test_time):
for file_path in test_files:
with GenomicJobController(GenomicJob.ACCESSION_DATA_FILES) as controller:
controller.accession_data_files(file_path, test_bucket_baylor)
inserted_files = self.data_file_dao.get_all()
# idat
expected_idat = GenomicGcDataFile(
id=1,
created=test_time,
modified=test_time,
file_path=test_idat_file,
gc_site_id='jh',
bucket_name='fake-data-bucket-baylor',
file_prefix='Genotyping_sample_raw_data',
file_name='204027270091_R02C01_Grn.idat',
file_type='Grn.idat',
identifier_type='chipwellbarcode',
identifier_value='204027270091_R02C01',
ignore_flag=0,
)
# vcf
expected_vcf = GenomicGcDataFile(
id=2,
created=test_time,
modified=test_time,
file_path=test_vcf_file,
gc_site_id='jh',
bucket_name='fake-data-bucket-baylor',
file_prefix='Genotyping_sample_raw_data',
file_name='204027270091_R02C01.vcf.gz',
file_type='vcf.gz',
identifier_type='chipwellbarcode',
identifier_value='204027270091_R02C01',
ignore_flag=0,
)
# cram
expected_cram = GenomicGcDataFile(
id=3,
created=test_time,
modified=test_time,
file_path=test_cram_file,
gc_site_id='bcm',
bucket_name='fake-data-bucket-baylor',
file_prefix='Wgs_sample_raw_data/CRAMs_CRAIs',
file_name='BCM_A100134256_21063006771_SIA0017196_1.cram',
file_type='cram',
identifier_type='sample_id',
identifier_value='21063006771',
ignore_flag=0,
)
# obj mapping
expected_objs = {
0: expected_idat,
1: expected_vcf,
2: expected_cram
}
# verify test objects match expectations
for i in range(3):
self.assertEqual(expected_objs[i].bucket_name, inserted_files[i].bucket_name)
self.assertEqual(expected_objs[i].created, inserted_files[i].created)
self.assertEqual(expected_objs[i].file_name, inserted_files[i].file_name)
self.assertEqual(expected_objs[i].file_path, inserted_files[i].file_path)
self.assertEqual(expected_objs[i].file_prefix, inserted_files[i].file_prefix)
self.assertEqual(expected_objs[i].file_type, inserted_files[i].file_type)
self.assertEqual(expected_objs[i].gc_site_id, inserted_files[i].gc_site_id)
self.assertEqual(expected_objs[i].id, inserted_files[i].id)
self.assertEqual(expected_objs[i].identifier_type, inserted_files[i].identifier_type)
self.assertEqual(expected_objs[i].identifier_value, inserted_files[i].identifier_value)
self.assertEqual(expected_objs[i].ignore_flag, inserted_files[i].ignore_flag)
self.assertEqual(expected_objs[i].metadata, inserted_files[i].metadata)
self.assertEqual(expected_objs[i].modified, inserted_files[i].modified)
def test_updating_members_blocklists(self):
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
ids_should_be_updated = []
# for just created and wf state query and MATCHES criteria
for i in range(4):
ids_should_be_updated.append(
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType='test_investigation_one' if i & 2 != 0 else 'aou_wgs',
genomicWorkflowState=GenomicWorkflowState.AW0,
ai_an='Y' if i & 2 == 0 else 'N'
).id
)
# for just created and wf state query and DOES NOT MATCH criteria
for i in range(2):
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType='aou_array',
genomicWorkflowState=GenomicWorkflowState.AW0,
ai_an='N'
)
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS) as controller:
controller.update_members_blocklists()
# current config json in base_config.json
created_members = self.member_dao.get_all()
blocklisted = list(filter(lambda x: x.blockResults == 1 or x.blockResearch == 1, created_members))
self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in blocklisted].sort())
# should be RESEARCH blocked
self.assertTrue(all(
obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'aian'
for obj in created_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW0)
)
# should NOT be RESULTS blocked
self.assertTrue(all(
obj.blockResults == 0 and obj.blockResultsReason is None
for obj in created_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW0)
)
# should be RESEARCH blocked
self.assertTrue(all(
obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'test_sample_swap'
for obj in created_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0)
)
# should be RESULTS blocked
self.assertTrue(all(
obj.blockResults == 1 and obj.blockResultsReason is not None and obj.blockResultsReason == 'test_sample_swap'
for obj in created_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0)
)
# should NOT be RESEARCH/RESULTS blocked
self.assertTrue(all(
obj.blockResearch == 0 and obj.blockResearchReason is None
for obj in created_members if obj.genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0)
)
self.assertTrue(all(
obj.blockResults == 0 and obj.blockResultsReason is None
for obj in created_members if obj.genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0)
)
# clear current set member records
with self.member_dao.session() as session:
session.query(GenomicSetMember).delete()
run_result = self.job_run_dao.get(1)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
# for modified data query and MATCHES criteria
for i in range(4):
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType='test_investigation_one' if i & 2 != 0 else 'aou_wgs',
genomicWorkflowState=GenomicWorkflowState.AW1,
ai_an='Y' if i & 2 == 0 else 'N'
)
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS) as controller:
controller.update_members_blocklists()
modified_members = self.member_dao.get_all()
# should be RESEARCH blocked
self.assertTrue(all(
obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'aian'
for obj in modified_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW1)
)
# should NOT be RESULTS blocked
self.assertTrue(all(
obj.blockResults == 0 and obj.blockResultsReason is None
for obj in modified_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW1)
)
# should be RESEARCH blocked
self.assertTrue(all(
obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'test_sample_swap'
for obj in modified_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW1)
)
# should be RESULTS blocked
self.assertTrue(all(
obj.blockResults == 1 and obj.blockResultsReason is not None and obj.blockResultsReason == 'test_sample_swap'
for obj in modified_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW1)
)
run_result = self.job_run_dao.get(2)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
def test_ingest_user_metrics_file(self):
test_file = 'Genomic-Metrics-File-User-Events-Test.csv'
bucket_name = 'test_bucket'
sub_folder = 'user_events'
pids = []
file_ingester = GenomicFileIngester()
for _ in range(2):
pid = self.data_generator.create_database_participant()
pids.append(pid.participantId)
test_metrics_file = create_ingestion_test_file(
test_file,
bucket_name,
sub_folder)
test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'
with open_cloud_file(test_file_path) as csv_file:
metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)
with GenomicJobController(GenomicJob.METRICS_FILE_INGEST) as controller:
controller.ingest_metrics_file(
metric_type='user_events',
file_path=test_file_path,
)
job_run_id = controller.job_run.id
metrics = self.user_event_metrics_dao.get_all()
for pid in pids:
file_metrics = list(filter(lambda x: int(x['participant_id'].split('P')[-1]) == pid, metrics_to_ingest[
'rows']))
participant_ingested_metrics = list(filter(lambda x: x.participant_id == pid, metrics))
self.assertEqual(len(file_metrics), len(participant_ingested_metrics))
self.assertTrue(all(obj.run_id == job_run_id for obj in participant_ingested_metrics))
@mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')
def test_reconcile_pdr_data(self, mock_cloud_task):
# init new job run in __enter__
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
cloud_task_endpoint = 'rebuild_genomic_table_records_task'
first_run = self.job_run_dao.get_all()
self.assertEqual(mock_cloud_task.call_count, 1)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 1)
self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.model_type.__tablename__)
self.assertTrue(type(call_args[0].args[0]['ids']) is list)
self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in first_run])
self.assertEqual(call_args[0].args[1], cloud_task_endpoint)
participant = self.data_generator.create_database_participant()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)
plus_ten = plus_ten.replace(microsecond=0)
with FakeClock(plus_ten):
for i in range(2):
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType="aou_wgs",
genomicWorkflowState=GenomicWorkflowState.AW1
)
gen_processed_file = self.data_generator.create_database_genomic_file_processed(
runId=first_run[0].id,
startTime=clock.CLOCK.now(),
filePath=f'test_file_path_{i}',
bucketName='test_bucket',
fileName='test_file_name',
)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id
)
manifest = self.data_generator.create_database_genomic_manifest_file(
manifestTypeId=2,
filePath=f'test_file_path_{i}'
)
self.data_generator.create_database_genomic_manifest_feedback(
inputManifestFileId=manifest.id,
feedbackRecordCount=2
)
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=participant.participantId,
event_name='test_event',
run_id=1,
)
self.data_generator.create_database_genomic_informing_loop(
message_record_id=1,
event_type='informing_loop_decision',
module_type='gem',
participant_id=participant.participantId,
decision_value='maybe_later',
event_authored_time=clock.CLOCK.now()
)
self.data_generator.create_database_genomic_cvl_past_due(
cvl_site_id='co',
email_notification_sent=0,
sample_id='sample_test',
results_type='hdr',
genomic_set_member_id=gen_member.id
)
self.data_generator.create_database_genomic_appointment(
message_record_id=i,
appointment_id=i,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=participant.participantId,
event_authored_time=clock.CLOCK.now(),
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
self.data_generator.create_database_genomic_member_report_state(
genomic_set_member_id=gen_member.id,
participant_id=participant.participantId,
module='gem',
genomic_report_state=GenomicReportState.GEM_RPT_READY,
event_authored_time=clock.CLOCK.now()
)
self.data_generator.create_genomic_result_viewed(
participant_id=participant.participantId,
event_type='result_viewed',
event_authored_time=clock.CLOCK.now(),
module_type='gem',
sample_id=gen_member.sampleId
)
# gets new records that were created with last job run from above
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
affected_tables = [
'genomic_set',
'genomic_set_member',
'genomic_job_run',
'genomic_file_processed',
'genomic_gc_validation_metrics',
'genomic_manifest_file',
'genomic_manifest_feedback',
'genomic_informing_loop',
'genomic_cvl_results_past_due',
'user_event_metrics',
'genomic_member_report_state',
'genomic_result_viewed',
'genomic_appointment_event'
]
num_calls = len(affected_tables) + 1
self.assertEqual(mock_cloud_task.call_count, num_calls)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), num_calls)
mock_tables = set([obj[0][0]['table'] for obj in call_args])
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue([mock_tables].sort() == affected_tables.sort())
self.assertTrue(all(obj for obj in mock_endpoint if obj == cloud_task_endpoint))
@mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')
def test_retry_manifest_ingestions_if_deltas(self, mock_cloud_task):
bucket_name = "test-bucket"
aw1_file_name = "AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv"
aw1_manifest_path = f"{bucket_name}/{aw1_file_name}"
aw2_file_name = "AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv"
aw2_manifest_path = f"{bucket_name}/{aw2_file_name}"
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
# Create AW1 job_run
aw1_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=clock.CLOCK.now(),
endTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
# Create AW2 job_run
aw2_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.METRICS_INGESTION,
startTime=clock.CLOCK.now(),
endTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
# should have no data
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(3)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)
self.assertEqual(mock_cloud_task.call_count, 0)
self.assertFalse(mock_cloud_task.call_count)
# Create genomic_aw1_raw record
self.data_generator.create_database_genomic_aw1_raw(
file_path=aw1_manifest_path,
package_id="PKG-2104-026571",
biobank_id="A10001",
)
# Create genomic_aw2_raw record
self.data_generator.create_database_genomic_aw2_raw(
file_path=aw2_manifest_path,
biobank_id="A10001",
sample_id="100001",
biobankidsampleid="A10001_100001",
)
# Create AW1 genomic_manifest_file record
aw1_manifest_file = self.data_generator.create_database_genomic_manifest_file(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.AW1,
filePath=aw1_manifest_path,
fileName=aw1_file_name,
bucketName=bucket_name,
recordCount=1,
rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now(),
)
# Create AW2 genomic_manifest_file record
aw2_manifest_file = self.data_generator.create_database_genomic_manifest_file(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.AW2,
filePath=aw2_manifest_path,
fileName=aw2_file_name,
bucketName=bucket_name,
recordCount=1,
rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now(),
)
# Create AW1 file_processed
aw1_file_processed = self.data_generator.create_database_genomic_file_processed(
runId=aw1_job_run.id,
startTime=clock.CLOCK.now(),
genomicManifestFileId=aw1_manifest_file.id,
filePath=f"/{aw1_manifest_path}",
bucketName=bucket_name,
fileName=aw1_file_name,
)
# Create AW2 file_processed
aw2_file_processed = self.data_generator.create_database_genomic_file_processed(
runId=aw2_job_run.id,
startTime=clock.CLOCK.now(),
genomicManifestFileId=aw2_manifest_file.id,
filePath=f"/{aw2_manifest_path}",
bucketName=bucket_name,
fileName=aw2_file_name,
)
# genomic_set_member for AW1
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType="aou_wgs",
genomicWorkflowState=GenomicWorkflowState.AW1,
aw1FileProcessedId=aw1_file_processed.id
)
# genomic_gc_validation_metrics for AW1
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=aw2_file_processed.id
)
# one AW1/AW2 with no deltas
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(4)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)
self.assertEqual(mock_cloud_task.call_count, 0)
self.assertFalse(mock_cloud_task.call_count)
# empty tables resulting in deltas and cloud task calls
with self.member_dao.session() as session:
session.query(GenomicGCValidationMetrics).delete()
session.query(GenomicSetMember).delete()
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(5)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.SUCCESS)
# one AW1/AW2 with deltas
self.assertEqual(mock_cloud_task.call_count, 2)
self.assertTrue(mock_cloud_task.call_count)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 2)
cloud_task_endpoint = ['ingest_aw1_manifest_task', 'ingest_aw2_manifest_task']
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue(all(obj for obj in mock_endpoint if obj == cloud_task_endpoint))
mock_buckets = set([obj[0][0]['bucket_name'] for obj in call_args])
self.assertTrue(len(mock_buckets), 1)
self.assertTrue(list(mock_buckets)[0] == bucket_name)
def test_calculate_informing_loop_ready_flags(self):
num_participants = 4
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
for num in range(num_participants):
plus_num = clock.CLOCK.now() + datetime.timedelta(minutes=num)
plus_num = plus_num.replace(microsecond=0)
with FakeClock(plus_num):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1
)
stored_sample = self.data_generator.create_database_biobank_stored_sample(
biobankId=summary.biobankId,
biobankOrderIdentifier=self.fake.pyint()
)
collection_site = self.data_generator.create_database_site(
siteType='Clinic'
)
order = self.data_generator.create_database_biobank_order(
collectedSiteId=collection_site.siteId,
participantId=summary.participantId,
finalizedTime=plus_num
)
self.data_generator.create_database_biobank_order_identifier(
value=stored_sample.biobankOrderIdentifier,
biobankOrderId=order.biobankOrderId,
system="1",
)
self.data_generator.create_database_biobank_order_identifier(
value=stored_sample.biobankOrderIdentifier,
biobankOrderId=order.biobankOrderId,
system="2",
)
member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
participantId=summary.participantId,
genomeType=config.GENOME_TYPE_WGS,
qcStatus=GenomicQcStatus.PASS,
gcManifestSampleSource='Whole Blood',
collectionTubeId=stored_sample.biobankStoredSampleId
)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=member.id,
sexConcordance='True',
drcFpConcordance='Pass',
drcSexConcordance='Pass',
processingStatus='Pass'
)
members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()
self.assertEqual(len(members_for_ready_loop), num_participants)
current_set_members = self.member_dao.get_all()
self.assertTrue(all(obj.informingLoopReadyFlag == 0 for obj in current_set_members))
self.assertTrue(all(obj.informingLoopReadyFlagModified is None for obj in current_set_members))
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:
controller.calculate_informing_loop_ready_flags()
# no config object, controller method should return
members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()
self.assertEqual(len(members_for_ready_loop), num_participants)
calculation_limit = 2
config.override_setting(config.CALCULATE_READY_FLAG_LIMIT, [calculation_limit])
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:
controller.calculate_informing_loop_ready_flags()
current_set_members = self.member_dao.get_all()
self.assertTrue(any(obj.informingLoopReadyFlag == 1 for obj in current_set_members))
self.assertTrue(any(obj.informingLoopReadyFlagModified is not None for obj in current_set_members))
current_loops_set = [obj for obj in current_set_members if obj.informingLoopReadyFlag == 1
and obj.informingLoopReadyFlagModified is not None]
self.assertEqual(len(current_loops_set), calculation_limit)
members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()
self.assertEqual(len(members_for_ready_loop), num_participants // 2)
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:
controller.calculate_informing_loop_ready_flags()
current_set_members = self.member_dao.get_all()
self.assertTrue(all(obj.informingLoopReadyFlag == 1 for obj in current_set_members))
self.assertTrue(all(obj.informingLoopReadyFlagModified is not None for obj in current_set_members))
members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()
self.assertEqual(len(members_for_ready_loop), 0)
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_getting_results_withdrawn(self, email_mock):
num_participants = 4
result_withdrawal_dao = GenomicResultWithdrawalsDao()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
gen_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
pids = []
for num in range(num_participants):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1,
withdrawalStatus=WithdrawalStatus.EARLY_OUT
)
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
participantId=summary.participantId,
genomeType=config.GENOME_TYPE_ARRAY,
gemA1ManifestJobRunId=gen_job_run.id if num % 2 == 0 else None
)
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
participantId=summary.participantId,
genomeType=config.GENOME_TYPE_WGS,
cvlW1ilHdrJobRunId=gen_job_run.id
)
pids.append(summary.participantId)
config.override_setting(config.RDR_GENOMICS_NOTIFICATION_EMAIL, '[email protected]')
with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS) as controller:
controller.check_results_withdrawals()
# mock checks should be two => 1 GEM 1 HEALTH
self.assertEqual(email_mock.call_count, 2)
call_args = email_mock.call_args_list
self.assertTrue(any('GEM' in call.args[0].subject for call in call_args))
self.assertTrue(any('HEALTH' in call.args[0].subject for call in call_args))
job_runs = self.job_run_dao.get_all()
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.RESULTS_PIPELINE_WITHDRAWALS, job_runs))[0]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)
all_withdrawal_records = result_withdrawal_dao.get_all()
self.assertTrue(len(all_withdrawal_records) == len(pids))
self.assertTrue(all(obj.participant_id in pids for obj in all_withdrawal_records))
array_results = list(filter(lambda x: x.array_results == 1, all_withdrawal_records))
# should only be 2
self.assertTrue(len(array_results), 2)
cvl_results = list(filter(lambda x: x.cvl_results == 1, all_withdrawal_records))
# should be 4 for num of participants
self.assertTrue(len(cvl_results), num_participants)
with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS) as controller:
controller.check_results_withdrawals()
# mock checks should still be two on account of no records
self.assertEqual(email_mock.call_count, 2)
job_runs = self.job_run_dao.get_all()
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.RESULTS_PIPELINE_WITHDRAWALS, job_runs))[1]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)
def test_gem_results_to_report_state(self):
num_participants = 8
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
gem_a2_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.GEM_A2_MANIFEST,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
pids_to_update, member_ids = [], []
for num in range(num_participants):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1,
withdrawalStatus=WithdrawalStatus.EARLY_OUT
)
member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
participantId=summary.participantId,
genomeType=config.GENOME_TYPE_ARRAY
)
if num % 2 == 0:
member_ids.append(member.id)
pids_to_update.append(summary.participantId)
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 2)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[0]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)
current_members = self.member_dao.get_all()
# 4 members updated correctly should return
for member in current_members:
if member.participantId in pids_to_update:
member.gemA2ManifestJobRunId = gem_a2_job_run.id
member.genomicWorkflowState = GenomicWorkflowState.GEM_RPT_READY
self.member_dao.update(member)
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 3)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[1]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)
current_gem_report_states = self.report_state_dao.get_all()
self.assertEqual(len(current_gem_report_states), len(pids_to_update))
self.assertTrue(all(obj.event_type == 'result_ready' for obj in current_gem_report_states))
self.assertTrue(all(obj.event_authored_time is not None for obj in current_gem_report_states))
self.assertTrue(all(obj.module == 'gem' for obj in current_gem_report_states))
self.assertTrue(
all(obj.genomic_report_state == GenomicReportState.GEM_RPT_READY for obj in current_gem_report_states)
)
self.assertTrue(
all(obj.genomic_report_state_str == GenomicReportState.GEM_RPT_READY.name for obj in
current_gem_report_states)
)
self.assertTrue(
all(obj.genomic_set_member_id in member_ids for obj in
current_gem_report_states)
)
# 4 members inserted already should not return
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 4)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[2]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)
self.clear_table_after_test('genomic_member_report_state')
def test_reconcile_informing_loop(self):
event_dao = UserEventMetricsDao()
event_dao.truncate() # for test suite
il_dao = GenomicInformingLoopDao()
for pid in range(8):
self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)
# Set up initial job run ID
self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.METRICS_FILE_INGEST,
startTime=clock.CLOCK.now()
)
# create genomic set
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
# insert set members
for b in ["aou_array", "aou_wgs"]:
for i in range(1, 9):
self.data_generator.create_database_genomic_set_member(
participantId=i,
genomicSetId=1,
biobankId=i,
collectionTubeId=100 + i,
sampleId=10 + i,
genomeType=b,
)
# Set up ingested metrics data
events = ['gem.informing_loop.started',
'gem.informing_loop.screen8_no',
'gem.informing_loop.screen8_yes',
'hdr.informing_loop.started',
'gem.informing_loop.screen3',
'pgx.informing_loop.screen8_no',
'hdr.informing_loop.screen10_no']
for p in range(4):
for i in range(len(events)):
self.data_generator.create_database_genomic_user_event_metrics(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
participant_id=p + 1,
created_at=datetime.datetime(2021, 12, 29, 00) + datetime.timedelta(hours=i),
event_name=events[i],
run_id=1,
ignore_flag=0,
)
# Set up informing loop from message broker records
decisions = [None, 'no', 'yes']
for p in range(3):
for i in range(2):
self.data_generator.create_database_genomic_informing_loop(
message_record_id=i,
event_type='informing_loop_started' if i == 0 else 'informing_loop_decision',
module_type='gem',
participant_id=p + 1,
decision_value=decisions[i],
sample_id=100 + p,
event_authored_time=datetime.datetime(2021, 12, 29, 00) + datetime.timedelta(hours=i)
)
# Test for no message but yes user event
self.data_generator.create_database_genomic_user_event_metrics(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
participant_id=6,
created_at=datetime.datetime(2021, 12, 29, 00),
event_name='gem.informing_loop.screen8_yes',
run_id=1,
ignore_flag=0,
)
# Run reconcile job
genomic_pipeline.reconcile_informing_loop_responses()
# Test mismatched GEM data ingested correctly
pid_list = [1, 2, 3, 6]
new_il_values = il_dao.get_latest_il_for_pids(
pid_list=pid_list,
module="gem"
)
for value in new_il_values:
self.assertEqual("yes", value.decision_value)
pid_list = [1, 2, 3, 4]
for module in ["hdr", "pgx"]:
new_il_values = il_dao.get_latest_il_for_pids(
pid_list=pid_list,
module=module
)
for value in new_il_values:
self.assertEqual("no", value.decision_value)
self.assertIsNotNone(value.created_from_metric_id)
def test_reconcile_message_broker_results_ready(self):
# Create Test Participants' data
# create genomic set
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
# Set up initial job run ID
self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.METRICS_FILE_INGEST,
startTime=clock.CLOCK.now()
)
for pid in range(7):
self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)
# insert set members and event metrics records
for i in range(1, 6):
self.data_generator.create_database_genomic_set_member(
participantId=i,
genomicSetId=1,
biobankId=i,
collectionTubeId=100 + i,
sampleId=10 + i,
genomeType="aou_wgs",
)
# 3 PGX records
if i < 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="pgx.result_ready",
run_id=1,
)
# 1 HDR Positive
if i == 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="hdr.result_ready.informative",
run_id=1,
)
# 1 HDR uninformative
if i == 5:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="hdr.result_ready.uninformative",
run_id=1,
)
# Run job
genomic_cvl_pipeline.reconcile_message_broker_results_ready()
# Test correct data inserted
report_state_dao = GenomicMemberReportStateDao()
states = report_state_dao.get_all()
self.assertEqual(5, len(states))
pgx_records = [rec for rec in states if rec.module == "pgx_v1"]
hdr_record_uninf = [rec for rec in states
if rec.genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0]
hdr_record_pos = [rec for rec in states
if rec.genomic_report_state == GenomicReportState.HDR_RPT_POSITIVE][0]
for pgx_record in pgx_records:
self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.genomic_report_state)
self.assertEqual("PGX_RPT_READY", pgx_record.genomic_report_state_str)
self.assertEqual(int(pgx_record.sample_id), pgx_record.participant_id + 10)
self.assertEqual("result_ready", pgx_record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 00), pgx_record.event_authored_time)
self.assertIsNotNone(pgx_record.created_from_metric_id)
self.assertEqual("HDR_RPT_UNINFORMATIVE", hdr_record_uninf.genomic_report_state_str)
self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.participant_id + 10)
self.assertEqual("result_ready", hdr_record_uninf.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 00), hdr_record_uninf.event_authored_time)
self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)
self.assertEqual("HDR_RPT_POSITIVE", hdr_record_pos.genomic_report_state_str)
self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.participant_id + 10)
self.assertEqual("result_ready", hdr_record_pos.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 00), hdr_record_pos.event_authored_time)
self.assertIsNotNone(hdr_record_pos.created_from_metric_id)
def test_reconcile_message_broker_results_viewed(self):
# Create Test Participants' data
# create genomic set
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
# Set up initial job run ID
self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.METRICS_FILE_INGEST,
startTime=clock.CLOCK.now()
)
for pid in range(3):
self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)
# insert set members and event metrics records
for i in range(1, 3):
self.data_generator.create_database_genomic_set_member(
participantId=i,
genomicSetId=1,
biobankId=i,
collectionTubeId=100 + i,
sampleId=10 + i,
genomeType="aou_wgs",
)
# 1 PGX Viewed
if i == 1:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="pgx.opened_at",
run_id=1,
)
# 1 HDR Viewed
if i == 2:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="hdr.opened_at",
run_id=1,
)
genomic_cvl_pipeline.reconcile_message_broker_results_viewed()
# Test correct data inserted
result_viewed_dao = GenomicResultViewedDao()
results = result_viewed_dao.get_all()
self.assertEqual(2, len(results))
for record in results:
if record.participant_id == 1:
self.assertEqual("pgx_v1", record.module_type)
else:
self.assertEqual("hdr_v1", record.module_type)
self.assertEqual(int(record.sample_id), record.participant_id + 10)
self.assertEqual("result_viewed", record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 00), record.first_viewed)
self.assertIsNotNone(record.created_from_metric_id)
def test_ingest_appointment_metrics_file(self):
test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'
bucket_name = 'test_bucket'
sub_folder = 'appointment_events'
pids = []
for _ in range(4):
summary = self.data_generator.create_database_participant_summary()
pids.append(summary.participantId)
test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'
appointment_data = test_data.load_test_data_json(
"Genomic-Metrics-File-Appointment-Events-Test.json")
appointment_data_str = json.dumps(appointment_data, indent=4)
with open_cloud_file(test_file_path, mode='wb') as cloud_file:
cloud_file.write(appointment_data_str.encode("utf-8"))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST) as controller:
controller.ingest_appointment_metrics_file(
file_path=test_file_path,
)
all_metrics = self.appointment_metrics_dao.get_all()
# should be 5 metric records for whats in json file
self.assertEqual(len(all_metrics), 5)
self.assertTrue(all((obj.participant_id in pids for obj in all_metrics)))
self.assertTrue(all((obj.file_path == test_file_path for obj in all_metrics)))
self.assertTrue(all((obj.appointment_event is not None for obj in all_metrics)))
self.assertTrue(all((obj.created is not None for obj in all_metrics)))
self.assertTrue(all((obj.modified is not None for obj in all_metrics)))
self.assertTrue(all((obj.module_type is not None for obj in all_metrics)))
self.assertTrue(all((obj.event_authored_time is not None for obj in all_metrics)))
self.assertTrue(all((obj.event_type is not None for obj in all_metrics)))
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 1)
current_job_run = current_job_runs[0]
self.assertTrue(current_job_run.jobId == GenomicJob.APPOINTMENT_METRICS_FILE_INGEST)
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)
self.clear_table_after_test('genomic_appointment_event_metrics')
def test_reconcile_appointments_with_metrics(self):
fake_date = parser.parse('2020-05-29T08:00:01-05:00')
for num in range(4):
summary = self.data_generator.create_database_participant_summary()
missing_json = {
"event": "appointment_updated",
"eventAuthoredTime": "2022-09-16T17:18:38Z",
"participantId": f'P{summary.participantId}',
"messageBody": {
"module_type": "hdr",
"appointment_timestamp": "2022-09-19T19:30:00+00:00",
"id": 55,
"appointment_timezone": "America/Los_Angeles",
"location": "CA",
"contact_number": "18043704252",
"language": "en",
"source": "Color"
}
}
if num % 2 == 0:
self.data_generator.create_database_genomic_appointment(
message_record_id=num,
appointment_id=num,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
self.data_generator.create_database_genomic_appointment_metric(
participant_id=summary.participantId,
appointment_event=json.dumps(missing_json, indent=4) if num % 2 != 0 else 'foo',
file_path='test_file_path',
module_type='hdr',
event_authored_time=fake_date,
event_type='appointment_updated' if num % 2 != 0 else 'appointment_scheduled'
)
current_events = self.appointment_event_dao.get_all()
# should be 2 initial appointment events
self.assertEqual(len(current_events), 2)
current_metrics = self.appointment_metrics_dao.get_all()
# should be 4 initial appointment events
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is None for obj in current_metrics))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE) as controller:
controller.reconcile_appointment_events_from_metrics()
job_run = self.job_run_dao.get_all()
self.assertEqual(len(job_run), 1)
self.assertTrue(job_run[0].jobId == GenomicJob.APPOINTMENT_METRICS_RECONCILE)
current_events = self.appointment_event_dao.get_all()
# should be 4 appointment events 2 initial + 2 added
self.assertEqual(len(current_events), 4)
scheduled = list(filter(lambda x: x.event_type == 'appointment_scheduled', current_events))
self.assertEqual(len(scheduled), 2)
self.assertTrue(all(obj.created_from_metric_id is None for obj in scheduled))
updated = list(filter(lambda x: x.event_type == 'appointment_updated', current_events))
self.assertEqual(len(updated), 2)
self.assertTrue(all(obj.created_from_metric_id is not None for obj in updated))
current_metrics = self.appointment_metrics_dao.get_all()
# should STILL be 4 initial appointment events
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in current_metrics))
self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for obj in current_metrics))
self.clear_table_after_test('genomic_appointment_event_metrics')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_appointments_gror_changed(self, email_mock):
fake_date = parser.parse("2022-09-01T13:43:23")
notified_dao = GenomicAppointmentEventNotifiedDao()
config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, ['[email protected]'])
num_participants = 4
for num in range(num_participants):
gror = num if num > 1 else 1
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=gror
)
self.data_generator.create_database_genomic_appointment(
message_record_id=num,
appointment_id=num,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
changed_ppts = self.appointment_event_dao.get_appointments_gror_changed()
self.assertEqual(2, len(changed_ppts))
with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED) as controller:
controller.check_appointments_gror_changed()
self.assertEqual(email_mock.call_count, 1)
notified_appointments = notified_dao.get_all()
self.assertEqual(2, len(notified_appointments))
# test notified not returned by query
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=2
)
self.data_generator.create_database_genomic_appointment(
message_record_id=5,
appointment_id=5,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
changed_ppts = self.appointment_event_dao.get_appointments_gror_changed()
self.assertEqual(1, len(changed_ppts))
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_14day_escalation(self, email_mock):
fake_date = parser.parse("2022-09-01T13:43:23")
fake_date2 = parser.parse("2022-09-02T14:14:00")
fake_date3 = parser.parse("2022-09-03T15:15:00")
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['[email protected]'])
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1
)
set_member = self.data_generator.create_database_genomic_set_member(
participantId=summary.participantId,
genomicSetId=1,
biobankId=1001,
collectionTubeId=100,
sampleId=10,
genomeType="aou_wgs",
)
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId,
genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,
genomic_set_member_id=set_member.id,
module='hdr_v1',
event_authored_time=fake_date
)
pids.append(summary.participantId)
# Appointment scheduled in future: don't notify
self.data_generator.create_database_genomic_appointment(
message_record_id=101,
appointment_id=102,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=pids[0],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
# Appointment completed: don't notify
self.data_generator.create_database_genomic_appointment(
message_record_id=102,
appointment_id=103,
event_type='appointment_completed',
module_type='hdr',
participant_id=pids[1],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=fake_date,
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
# Appointment scheduled then canceled: notify
self.data_generator.create_database_genomic_appointment(
message_record_id=103,
appointment_id=104,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=pids[2],
event_authored_time=fake_date2,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
self.data_generator.create_database_genomic_appointment(
message_record_id=104,
appointment_id=104,
event_type='appointment_cancelled',
module_type='hdr',
participant_id=pids[2],
event_authored_time=fake_date3,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{
'participant_id': pids[4],
'created': clock.CLOCK.now(),
'modified': clock.CLOCK.now(),
'message_sent': True
},{
'participant_id': pids[5],
'created': clock.CLOCK.now(),
'modified': clock.CLOCK.now(),
'message_sent': False
}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = self.report_state_dao.get_hdr_result_positive_no_appointment(num_days=14)
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject, 'GCR Outreach 14 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_14day_escalation_error(self, email_mock):
email_mock.side_effect = ForbiddenError(mock.Mock(code=403))
mock_slack_handler = mock.MagicMock()
fake_date = parser.parse("2023-06-01T13:43:23")
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['[email protected]'])
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
pids = []
for _ in range(2):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1
)
set_member = self.data_generator.create_database_genomic_set_member(
participantId=summary.participantId,
genomicSetId=1,
biobankId=1001,
collectionTubeId=100,
sampleId=10,
genomeType="aou_wgs",
)
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId,
genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,
genomic_set_member_id=set_member.id,
module='hdr_v1',
event_authored_time=fake_date
)
pids.append(summary.participantId)
self.data_generator.create_database_genomic_appointment(
message_record_id=102,
appointment_id=103,
event_type='appointment_completed',
module_type='hdr',
participant_id=pids[1],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=fake_date,
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION) as controller:
controller.genomic_alert_slack = mock_slack_handler
controller.check_gcr_escalation(controller.job_id)
notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)
with notified_dao.session() as session:
notification = session.query(
GenomicGCROutreachEscalationNotified
).filter(
GenomicGCROutreachEscalationNotified.participant_id == pids[0]
).one()
self.assertEqual(email_mock.call_count, 1)
self.assertEqual(mock_slack_handler.send_message_to_webhook.call_count, 1)
self.assertEqual(False, notification.message_sent)
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_ce_escalation(self, email_mock):
fake_date = parser.parse("2022-09-01T13:43:23")
fake_date2 = parser.parse("2022-09-02T14:14:00")
fake_date3 = parser.parse("2022-09-03T15:15:00")
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['[email protected]'])
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1
)
set_member = self.data_generator.create_database_genomic_set_member(
participantId=summary.participantId,
genomicSetId=1,
biobankId=1001,
collectionTubeId=100,
sampleId=10,
genomeType="aou_wgs",
participantOrigin='careevolution'
)
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId,
genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,
genomic_set_member_id=set_member.id,
module='hdr_v1',
event_authored_time=fake_date
)
pids.append(summary.participantId)
# Appointment scheduled in future: don't notify
self.data_generator.create_database_genomic_appointment(
message_record_id=101,
appointment_id=102,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=pids[0],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
# Appointment completed: don't notify
self.data_generator.create_database_genomic_appointment(
message_record_id=102,
appointment_id=103,
event_type='appointment_completed',
module_type='hdr',
participant_id=pids[1],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=fake_date,
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
# Appointment scheduled then canceled: notify
self.data_generator.create_database_genomic_appointment(
message_record_id=103,
appointment_id=104,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=pids[2],
event_authored_time=fake_date2,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
self.data_generator.create_database_genomic_appointment(
message_record_id=104,
appointment_id=104,
event_type='appointment_cancelled',
module_type='hdr',
participant_id=pids[2],
event_authored_time=fake_date3,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{
'participant_id': pids[4],
'created': clock.CLOCK.now(),
'modified': clock.CLOCK.now(),
'message_sent': True
},{
'participant_id': pids[5],
'created': clock.CLOCK.now(),
'modified': clock.CLOCK.now(),
'message_sent': False
}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = self.report_state_dao.get_hdr_result_positive_no_appointment(
num_days=30,
participant_origin='careevolution'
)
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_CE_OUTREACH_ESCALATION) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject, 'GCR Outreach 30 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
@mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')
def test_execute_auto_generation_from_last_run(self, cloud_task_mock):
with GenomicJobController(
GenomicJob.PR_PR_WORKFLOW
) as controller:
controller.job_result = GenomicSubProcessResult.ERROR
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR)
# task SHOULD NOT be called
self.assertEqual(cloud_task_mock.called, False)
self.assertEqual(cloud_task_mock.call_count, 0)
with GenomicJobController(
GenomicJob.PR_PR_WORKFLOW
) as controller:
controller.job_result = GenomicSubProcessResult.SUCCESS
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.SUCCESS)
# task SHOULD be called
self.assertEqual(cloud_task_mock.called, True)
self.assertTrue(cloud_task_mock.call_args[1].get('payload').get('manifest_type') == 'p0')
self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') == 'genomic-generate-manifest')
all_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(all_job_runs), 2)
self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.SUCCESS, GenomicSubProcessResult.ERROR] for obj
in all_job_runs))
self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in all_job_runs))
|
flexible
|
{
"blob_id": "bd179fda18551d4f3d8a4d695a9da38ee607ef1d",
"index": 2168,
"step-1": "<mask token>\n\n\nclass GenomicJobControllerTest(BaseTestCase):\n\n def setUp(self):\n super(GenomicJobControllerTest, self).setUp()\n self.data_file_dao = GenomicGcDataFileDao()\n self.event_data_dao = MessageBrokenEventDataDao()\n self.incident_dao = GenomicIncidentDao()\n self.member_dao = GenomicSetMemberDao()\n self.metrics_dao = GenomicGCValidationMetricsDao()\n self.user_event_metrics_dao = UserEventMetricsDao()\n self.job_run_dao = GenomicJobRunDao()\n self.report_state_dao = GenomicMemberReportStateDao()\n self.appointment_event_dao = GenomicAppointmentEventDao()\n self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_updating_members_blocklists(self):\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n ids_should_be_updated = []\n for i in range(4):\n ids_should_be_updated.append(self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set.id,\n biobankId='100153482', sampleId='21042005280', genomeType=\n 'test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if\n i & 2 == 0 else 'N').id)\n for i in range(2):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_array', genomicWorkflowState=\n GenomicWorkflowState.AW0, ai_an='N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n created_members = self.member_dao.get_all()\n blocklisted = list(filter(lambda x: x.blockResults == 1 or x.\n blockResearch == 1, created_members))\n self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in\n blocklisted].sort())\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in created_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 0 and obj.\n blockResearchReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n with self.member_dao.session() as session:\n session.query(GenomicSetMember).delete()\n run_result = self.job_run_dao.get(1)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n for i in range(4):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='test_investigation_one' if i & 2 != 0 else\n 'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,\n ai_an='Y' if i & 2 == 0 else 'N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n modified_members = self.member_dao.get_all()\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in modified_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n run_result = self.job_run_dao.get(2)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n <mask token>\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_reconcile_pdr_data(self, mock_cloud_task):\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n cloud_task_endpoint = 'rebuild_genomic_table_records_task'\n first_run = self.job_run_dao.get_all()\n self.assertEqual(mock_cloud_task.call_count, 1)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 1)\n self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.\n model_type.__tablename__)\n self.assertTrue(type(call_args[0].args[0]['ids']) is list)\n self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in\n first_run])\n self.assertEqual(call_args[0].args[1], cloud_task_endpoint)\n participant = self.data_generator.create_database_participant()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)\n plus_ten = plus_ten.replace(microsecond=0)\n with FakeClock(plus_ten):\n for i in range(2):\n gen_member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1))\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=first_run[\n 0].id, startTime=clock.CLOCK.now(), filePath=\n f'test_file_path_{i}', bucketName='test_bucket',\n fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id)\n manifest = (self.data_generator.\n create_database_genomic_manifest_file(manifestTypeId=2,\n filePath=f'test_file_path_{i}'))\n self.data_generator.create_database_genomic_manifest_feedback(\n inputManifestFileId=manifest.id, feedbackRecordCount=2)\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=participant.participantId, event_name=\n 'test_event', run_id=1)\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=1, event_type=\n 'informing_loop_decision', module_type='gem',\n participant_id=participant.participantId,\n decision_value='maybe_later', event_authored_time=clock\n .CLOCK.now())\n self.data_generator.create_database_genomic_cvl_past_due(\n cvl_site_id='co', email_notification_sent=0, sample_id=\n 'sample_test', results_type='hdr',\n genomic_set_member_id=gen_member.id)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=i, appointment_id=i, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=participant.participantId,\n event_authored_time=clock.CLOCK.now(), source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_member_report_state(\n genomic_set_member_id=gen_member.id, participant_id=\n participant.participantId, module='gem',\n genomic_report_state=GenomicReportState.GEM_RPT_READY,\n event_authored_time=clock.CLOCK.now())\n self.data_generator.create_genomic_result_viewed(participant_id\n =participant.participantId, event_type='result_viewed',\n event_authored_time=clock.CLOCK.now(), module_type=\n 'gem', sample_id=gen_member.sampleId)\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n affected_tables = ['genomic_set', 'genomic_set_member',\n 'genomic_job_run', 'genomic_file_processed',\n 'genomic_gc_validation_metrics', 'genomic_manifest_file',\n 'genomic_manifest_feedback', 'genomic_informing_loop',\n 'genomic_cvl_results_past_due', 'user_event_metrics',\n 'genomic_member_report_state', 'genomic_result_viewed',\n 'genomic_appointment_event']\n num_calls = len(affected_tables) + 1\n self.assertEqual(mock_cloud_task.call_count, num_calls)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), num_calls)\n mock_tables = set([obj[0][0]['table'] for obj in call_args])\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue([mock_tables].sort() == affected_tables.sort())\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_reconcile_message_broker_results_ready(self):\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n for pid in range(7):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n for i in range(1, 6):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'\n )\n if i < 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='pgx.result_ready', run_id=1)\n if i == 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.informative', run_id=1)\n if i == 5:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.uninformative',\n run_id=1)\n genomic_cvl_pipeline.reconcile_message_broker_results_ready()\n report_state_dao = GenomicMemberReportStateDao()\n states = report_state_dao.get_all()\n self.assertEqual(5, len(states))\n pgx_records = [rec for rec in states if rec.module == 'pgx_v1']\n hdr_record_uninf = [rec for rec in states if rec.\n genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0\n ]\n hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==\n GenomicReportState.HDR_RPT_POSITIVE][0]\n for pgx_record in pgx_records:\n self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.\n genomic_report_state)\n self.assertEqual('PGX_RPT_READY', pgx_record.\n genomic_report_state_str)\n self.assertEqual(int(pgx_record.sample_id), pgx_record.\n participant_id + 10)\n self.assertEqual('result_ready', pgx_record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.\n event_authored_time)\n self.assertIsNotNone(pgx_record.created_from_metric_id)\n self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_uninf.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0),\n hdr_record_uninf.event_authored_time)\n self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)\n self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_pos.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.\n event_authored_time)\n self.assertIsNotNone(hdr_record_pos.created_from_metric_id)\n <mask token>\n <mask token>\n\n def test_reconcile_appointments_with_metrics(self):\n fake_date = parser.parse('2020-05-29T08:00:01-05:00')\n for num in range(4):\n summary = self.data_generator.create_database_participant_summary()\n missing_json = {'event': 'appointment_updated',\n 'eventAuthoredTime': '2022-09-16T17:18:38Z',\n 'participantId': f'P{summary.participantId}', 'messageBody':\n {'module_type': 'hdr', 'appointment_timestamp':\n '2022-09-19T19:30:00+00:00', 'id': 55,\n 'appointment_timezone': 'America/Los_Angeles', 'location':\n 'CA', 'contact_number': '18043704252', 'language': 'en',\n 'source': 'Color'}}\n if num % 2 == 0:\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_appointment_metric(\n participant_id=summary.participantId, appointment_event=\n json.dumps(missing_json, indent=4) if num % 2 != 0 else\n 'foo', file_path='test_file_path', module_type='hdr',\n event_authored_time=fake_date, event_type=\n 'appointment_updated' if num % 2 != 0 else\n 'appointment_scheduled')\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 2)\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is None for obj in\n current_metrics))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE\n ) as controller:\n controller.reconcile_appointment_events_from_metrics()\n job_run = self.job_run_dao.get_all()\n self.assertEqual(len(job_run), 1)\n self.assertTrue(job_run[0].jobId == GenomicJob.\n APPOINTMENT_METRICS_RECONCILE)\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 4)\n scheduled = list(filter(lambda x: x.event_type ==\n 'appointment_scheduled', current_events))\n self.assertEqual(len(scheduled), 2)\n self.assertTrue(all(obj.created_from_metric_id is None for obj in\n scheduled))\n updated = list(filter(lambda x: x.event_type ==\n 'appointment_updated', current_events))\n self.assertEqual(len(updated), 2)\n self.assertTrue(all(obj.created_from_metric_id is not None for obj in\n updated))\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in\n current_metrics))\n self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for\n obj in current_metrics))\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_appointments_gror_changed(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n notified_dao = GenomicAppointmentEventNotifiedDao()\n config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [\n '[email protected]'])\n num_participants = 4\n for num in range(num_participants):\n gror = num if num > 1 else 1\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=gror)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date,\n source='Color', appointment_timestamp=format_datetime(clock\n .CLOCK.now()), appointment_timezone='America/Los_Angeles',\n location='123 address st', contact_number='17348675309',\n language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(2, len(changed_ppts))\n with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED\n ) as controller:\n controller.check_appointments_gror_changed()\n self.assertEqual(email_mock.call_count, 1)\n notified_appointments = notified_dao.get_all()\n self.assertEqual(2, len(notified_appointments))\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=2)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=5, appointment_id=5, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date, source=\n 'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(\n )), appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(1, len(changed_ppts))\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n fake_date2 = parser.parse('2022-09-02T14:14:00')\n fake_date3 = parser.parse('2022-09-03T15:15:00')\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [\n '[email protected]'])\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1)\n set_member = (self.data_generator.\n create_database_genomic_set_member(participantId=summary.\n participantId, genomicSetId=1, biobankId=1001,\n collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId, genomic_report_state=\n GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=\n set_member.id, module='hdr_v1', event_authored_time=fake_date)\n pids.append(summary.participantId)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101, appointment_id=102, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [0], event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102, appointment_id=103, event_type=\n 'appointment_completed', module_type='hdr', participant_id=pids\n [1], event_authored_time=fake_date, source='Color',\n appointment_timestamp=fake_date, appointment_timezone=\n 'America/Los_Angeles', location='123 address st',\n contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103, appointment_id=104, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date2, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104, appointment_id=104, event_type=\n 'appointment_cancelled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date3, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n notified_dao = GenomicDefaultBaseDao(model_type=\n GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{'participant_id': pids[4], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': True}, {'participant_id': pids[5], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': False}])\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = (self.report_state_dao.\n get_hdr_result_positive_no_appointment(num_days=14))\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION\n ) as controller:\n controller.check_gcr_escalation(controller.job_id)\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject,\n 'GCR Outreach 14 Day Escalation')\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n <mask token>\n <mask token>\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_execute_auto_generation_from_last_run(self, cloud_task_mock):\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.ERROR\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR\n )\n self.assertEqual(cloud_task_mock.called, False)\n self.assertEqual(cloud_task_mock.call_count, 0)\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.SUCCESS\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.\n SUCCESS)\n self.assertEqual(cloud_task_mock.called, True)\n self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(\n 'manifest_type') == 'p0')\n self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==\n 'genomic-generate-manifest')\n all_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(all_job_runs), 2)\n self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.\n SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))\n self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in\n all_job_runs))\n",
"step-2": "<mask token>\n\n\nclass GenomicJobControllerTest(BaseTestCase):\n\n def setUp(self):\n super(GenomicJobControllerTest, self).setUp()\n self.data_file_dao = GenomicGcDataFileDao()\n self.event_data_dao = MessageBrokenEventDataDao()\n self.incident_dao = GenomicIncidentDao()\n self.member_dao = GenomicSetMemberDao()\n self.metrics_dao = GenomicGCValidationMetricsDao()\n self.user_event_metrics_dao = UserEventMetricsDao()\n self.job_run_dao = GenomicJobRunDao()\n self.report_state_dao = GenomicMemberReportStateDao()\n self.appointment_event_dao = GenomicAppointmentEventDao()\n self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()\n\n def test_incident_with_long_message(self):\n \"\"\"Make sure the length of incident messages doesn't cause issues when recording them\"\"\"\n incident_message = '1' * (GenomicIncident.message.type.length + 20)\n mock_slack_handler = mock.MagicMock()\n job_controller = GenomicJobController(job_id=1)\n job_controller.genomic_alert_slack = mock_slack_handler\n job_controller.create_incident(message=incident_message, slack=True)\n incident: GenomicIncident = self.session.query(GenomicIncident).one()\n self.assertTrue(incident_message.startswith(incident.message))\n mock_slack_handler.send_message_to_webhook.assert_called_with(\n message_data={'text': incident_message})\n <mask token>\n\n def test_gvcf_files_ingestion_create_incident(self):\n bucket_name = 'test_bucket'\n file_path = (\n 'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'\n )\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='111111111', sampleId=\n '222222222222', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=gen_job_run.id,\n startTime=clock.CLOCK.now(), filePath='/test_file_path',\n bucketName=bucket_name, fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n gen_processed_file.id)\n with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:\n controller.ingest_data_files_into_gc_metrics(file_path, bucket_name\n )\n incident = self.incident_dao.get(1)\n self.assertIsNotNone(incident)\n self.assertEqual(incident.code, GenomicIncidentCode.\n UNABLE_TO_FIND_METRIC.name)\n self.assertEqual(incident.data_file_path, file_path)\n self.assertEqual(incident.message,\n 'INGEST_DATA_FILES: Cannot find genomics metric record for sample id: 21042005280'\n )\n <mask token>\n\n def test_updating_members_blocklists(self):\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n ids_should_be_updated = []\n for i in range(4):\n ids_should_be_updated.append(self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set.id,\n biobankId='100153482', sampleId='21042005280', genomeType=\n 'test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if\n i & 2 == 0 else 'N').id)\n for i in range(2):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_array', genomicWorkflowState=\n GenomicWorkflowState.AW0, ai_an='N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n created_members = self.member_dao.get_all()\n blocklisted = list(filter(lambda x: x.blockResults == 1 or x.\n blockResearch == 1, created_members))\n self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in\n blocklisted].sort())\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in created_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 0 and obj.\n blockResearchReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n with self.member_dao.session() as session:\n session.query(GenomicSetMember).delete()\n run_result = self.job_run_dao.get(1)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n for i in range(4):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='test_investigation_one' if i & 2 != 0 else\n 'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,\n ai_an='Y' if i & 2 == 0 else 'N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n modified_members = self.member_dao.get_all()\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in modified_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n run_result = self.job_run_dao.get(2)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n\n def test_ingest_user_metrics_file(self):\n test_file = 'Genomic-Metrics-File-User-Events-Test.csv'\n bucket_name = 'test_bucket'\n sub_folder = 'user_events'\n pids = []\n file_ingester = GenomicFileIngester()\n for _ in range(2):\n pid = self.data_generator.create_database_participant()\n pids.append(pid.participantId)\n test_metrics_file = create_ingestion_test_file(test_file,\n bucket_name, sub_folder)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'\n with open_cloud_file(test_file_path) as csv_file:\n metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)\n with GenomicJobController(GenomicJob.METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_metrics_file(metric_type='user_events',\n file_path=test_file_path)\n job_run_id = controller.job_run.id\n metrics = self.user_event_metrics_dao.get_all()\n for pid in pids:\n file_metrics = list(filter(lambda x: int(x['participant_id'].\n split('P')[-1]) == pid, metrics_to_ingest['rows']))\n participant_ingested_metrics = list(filter(lambda x: x.\n participant_id == pid, metrics))\n self.assertEqual(len(file_metrics), len(\n participant_ingested_metrics))\n self.assertTrue(all(obj.run_id == job_run_id for obj in\n participant_ingested_metrics))\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_reconcile_pdr_data(self, mock_cloud_task):\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n cloud_task_endpoint = 'rebuild_genomic_table_records_task'\n first_run = self.job_run_dao.get_all()\n self.assertEqual(mock_cloud_task.call_count, 1)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 1)\n self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.\n model_type.__tablename__)\n self.assertTrue(type(call_args[0].args[0]['ids']) is list)\n self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in\n first_run])\n self.assertEqual(call_args[0].args[1], cloud_task_endpoint)\n participant = self.data_generator.create_database_participant()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)\n plus_ten = plus_ten.replace(microsecond=0)\n with FakeClock(plus_ten):\n for i in range(2):\n gen_member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1))\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=first_run[\n 0].id, startTime=clock.CLOCK.now(), filePath=\n f'test_file_path_{i}', bucketName='test_bucket',\n fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id)\n manifest = (self.data_generator.\n create_database_genomic_manifest_file(manifestTypeId=2,\n filePath=f'test_file_path_{i}'))\n self.data_generator.create_database_genomic_manifest_feedback(\n inputManifestFileId=manifest.id, feedbackRecordCount=2)\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=participant.participantId, event_name=\n 'test_event', run_id=1)\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=1, event_type=\n 'informing_loop_decision', module_type='gem',\n participant_id=participant.participantId,\n decision_value='maybe_later', event_authored_time=clock\n .CLOCK.now())\n self.data_generator.create_database_genomic_cvl_past_due(\n cvl_site_id='co', email_notification_sent=0, sample_id=\n 'sample_test', results_type='hdr',\n genomic_set_member_id=gen_member.id)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=i, appointment_id=i, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=participant.participantId,\n event_authored_time=clock.CLOCK.now(), source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_member_report_state(\n genomic_set_member_id=gen_member.id, participant_id=\n participant.participantId, module='gem',\n genomic_report_state=GenomicReportState.GEM_RPT_READY,\n event_authored_time=clock.CLOCK.now())\n self.data_generator.create_genomic_result_viewed(participant_id\n =participant.participantId, event_type='result_viewed',\n event_authored_time=clock.CLOCK.now(), module_type=\n 'gem', sample_id=gen_member.sampleId)\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n affected_tables = ['genomic_set', 'genomic_set_member',\n 'genomic_job_run', 'genomic_file_processed',\n 'genomic_gc_validation_metrics', 'genomic_manifest_file',\n 'genomic_manifest_feedback', 'genomic_informing_loop',\n 'genomic_cvl_results_past_due', 'user_event_metrics',\n 'genomic_member_report_state', 'genomic_result_viewed',\n 'genomic_appointment_event']\n num_calls = len(affected_tables) + 1\n self.assertEqual(mock_cloud_task.call_count, num_calls)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), num_calls)\n mock_tables = set([obj[0][0]['table'] for obj in call_args])\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue([mock_tables].sort() == affected_tables.sort())\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_reconcile_message_broker_results_ready(self):\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n for pid in range(7):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n for i in range(1, 6):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'\n )\n if i < 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='pgx.result_ready', run_id=1)\n if i == 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.informative', run_id=1)\n if i == 5:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.uninformative',\n run_id=1)\n genomic_cvl_pipeline.reconcile_message_broker_results_ready()\n report_state_dao = GenomicMemberReportStateDao()\n states = report_state_dao.get_all()\n self.assertEqual(5, len(states))\n pgx_records = [rec for rec in states if rec.module == 'pgx_v1']\n hdr_record_uninf = [rec for rec in states if rec.\n genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0\n ]\n hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==\n GenomicReportState.HDR_RPT_POSITIVE][0]\n for pgx_record in pgx_records:\n self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.\n genomic_report_state)\n self.assertEqual('PGX_RPT_READY', pgx_record.\n genomic_report_state_str)\n self.assertEqual(int(pgx_record.sample_id), pgx_record.\n participant_id + 10)\n self.assertEqual('result_ready', pgx_record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.\n event_authored_time)\n self.assertIsNotNone(pgx_record.created_from_metric_id)\n self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_uninf.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0),\n hdr_record_uninf.event_authored_time)\n self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)\n self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_pos.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.\n event_authored_time)\n self.assertIsNotNone(hdr_record_pos.created_from_metric_id)\n <mask token>\n\n def test_ingest_appointment_metrics_file(self):\n test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'\n bucket_name = 'test_bucket'\n sub_folder = 'appointment_events'\n pids = []\n for _ in range(4):\n summary = self.data_generator.create_database_participant_summary()\n pids.append(summary.participantId)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'\n appointment_data = test_data.load_test_data_json(\n 'Genomic-Metrics-File-Appointment-Events-Test.json')\n appointment_data_str = json.dumps(appointment_data, indent=4)\n with open_cloud_file(test_file_path, mode='wb') as cloud_file:\n cloud_file.write(appointment_data_str.encode('utf-8'))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_appointment_metrics_file(file_path=test_file_path\n )\n all_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(all_metrics), 5)\n self.assertTrue(all(obj.participant_id in pids for obj in all_metrics))\n self.assertTrue(all(obj.file_path == test_file_path for obj in\n all_metrics))\n self.assertTrue(all(obj.appointment_event is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.created is not None for obj in all_metrics))\n self.assertTrue(all(obj.modified is not None for obj in all_metrics))\n self.assertTrue(all(obj.module_type is not None for obj in all_metrics)\n )\n self.assertTrue(all(obj.event_authored_time is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.event_type is not None for obj in all_metrics))\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 1)\n current_job_run = current_job_runs[0]\n self.assertTrue(current_job_run.jobId == GenomicJob.\n APPOINTMENT_METRICS_FILE_INGEST)\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n def test_reconcile_appointments_with_metrics(self):\n fake_date = parser.parse('2020-05-29T08:00:01-05:00')\n for num in range(4):\n summary = self.data_generator.create_database_participant_summary()\n missing_json = {'event': 'appointment_updated',\n 'eventAuthoredTime': '2022-09-16T17:18:38Z',\n 'participantId': f'P{summary.participantId}', 'messageBody':\n {'module_type': 'hdr', 'appointment_timestamp':\n '2022-09-19T19:30:00+00:00', 'id': 55,\n 'appointment_timezone': 'America/Los_Angeles', 'location':\n 'CA', 'contact_number': '18043704252', 'language': 'en',\n 'source': 'Color'}}\n if num % 2 == 0:\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_appointment_metric(\n participant_id=summary.participantId, appointment_event=\n json.dumps(missing_json, indent=4) if num % 2 != 0 else\n 'foo', file_path='test_file_path', module_type='hdr',\n event_authored_time=fake_date, event_type=\n 'appointment_updated' if num % 2 != 0 else\n 'appointment_scheduled')\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 2)\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is None for obj in\n current_metrics))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE\n ) as controller:\n controller.reconcile_appointment_events_from_metrics()\n job_run = self.job_run_dao.get_all()\n self.assertEqual(len(job_run), 1)\n self.assertTrue(job_run[0].jobId == GenomicJob.\n APPOINTMENT_METRICS_RECONCILE)\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 4)\n scheduled = list(filter(lambda x: x.event_type ==\n 'appointment_scheduled', current_events))\n self.assertEqual(len(scheduled), 2)\n self.assertTrue(all(obj.created_from_metric_id is None for obj in\n scheduled))\n updated = list(filter(lambda x: x.event_type ==\n 'appointment_updated', current_events))\n self.assertEqual(len(updated), 2)\n self.assertTrue(all(obj.created_from_metric_id is not None for obj in\n updated))\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in\n current_metrics))\n self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for\n obj in current_metrics))\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_appointments_gror_changed(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n notified_dao = GenomicAppointmentEventNotifiedDao()\n config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [\n '[email protected]'])\n num_participants = 4\n for num in range(num_participants):\n gror = num if num > 1 else 1\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=gror)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date,\n source='Color', appointment_timestamp=format_datetime(clock\n .CLOCK.now()), appointment_timezone='America/Los_Angeles',\n location='123 address st', contact_number='17348675309',\n language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(2, len(changed_ppts))\n with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED\n ) as controller:\n controller.check_appointments_gror_changed()\n self.assertEqual(email_mock.call_count, 1)\n notified_appointments = notified_dao.get_all()\n self.assertEqual(2, len(notified_appointments))\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=2)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=5, appointment_id=5, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date, source=\n 'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(\n )), appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(1, len(changed_ppts))\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n fake_date2 = parser.parse('2022-09-02T14:14:00')\n fake_date3 = parser.parse('2022-09-03T15:15:00')\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [\n '[email protected]'])\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1)\n set_member = (self.data_generator.\n create_database_genomic_set_member(participantId=summary.\n participantId, genomicSetId=1, biobankId=1001,\n collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId, genomic_report_state=\n GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=\n set_member.id, module='hdr_v1', event_authored_time=fake_date)\n pids.append(summary.participantId)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101, appointment_id=102, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [0], event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102, appointment_id=103, event_type=\n 'appointment_completed', module_type='hdr', participant_id=pids\n [1], event_authored_time=fake_date, source='Color',\n appointment_timestamp=fake_date, appointment_timezone=\n 'America/Los_Angeles', location='123 address st',\n contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103, appointment_id=104, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date2, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104, appointment_id=104, event_type=\n 'appointment_cancelled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date3, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n notified_dao = GenomicDefaultBaseDao(model_type=\n GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{'participant_id': pids[4], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': True}, {'participant_id': pids[5], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': False}])\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = (self.report_state_dao.\n get_hdr_result_positive_no_appointment(num_days=14))\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION\n ) as controller:\n controller.check_gcr_escalation(controller.job_id)\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject,\n 'GCR Outreach 14 Day Escalation')\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n <mask token>\n <mask token>\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_execute_auto_generation_from_last_run(self, cloud_task_mock):\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.ERROR\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR\n )\n self.assertEqual(cloud_task_mock.called, False)\n self.assertEqual(cloud_task_mock.call_count, 0)\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.SUCCESS\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.\n SUCCESS)\n self.assertEqual(cloud_task_mock.called, True)\n self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(\n 'manifest_type') == 'p0')\n self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==\n 'genomic-generate-manifest')\n all_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(all_job_runs), 2)\n self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.\n SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))\n self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in\n all_job_runs))\n",
"step-3": "<mask token>\n\n\nclass GenomicJobControllerTest(BaseTestCase):\n\n def setUp(self):\n super(GenomicJobControllerTest, self).setUp()\n self.data_file_dao = GenomicGcDataFileDao()\n self.event_data_dao = MessageBrokenEventDataDao()\n self.incident_dao = GenomicIncidentDao()\n self.member_dao = GenomicSetMemberDao()\n self.metrics_dao = GenomicGCValidationMetricsDao()\n self.user_event_metrics_dao = UserEventMetricsDao()\n self.job_run_dao = GenomicJobRunDao()\n self.report_state_dao = GenomicMemberReportStateDao()\n self.appointment_event_dao = GenomicAppointmentEventDao()\n self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()\n\n def test_incident_with_long_message(self):\n \"\"\"Make sure the length of incident messages doesn't cause issues when recording them\"\"\"\n incident_message = '1' * (GenomicIncident.message.type.length + 20)\n mock_slack_handler = mock.MagicMock()\n job_controller = GenomicJobController(job_id=1)\n job_controller.genomic_alert_slack = mock_slack_handler\n job_controller.create_incident(message=incident_message, slack=True)\n incident: GenomicIncident = self.session.query(GenomicIncident).one()\n self.assertTrue(incident_message.startswith(incident.message))\n mock_slack_handler.send_message_to_webhook.assert_called_with(\n message_data={'text': incident_message})\n <mask token>\n\n def test_gvcf_files_ingestion_create_incident(self):\n bucket_name = 'test_bucket'\n file_path = (\n 'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'\n )\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='111111111', sampleId=\n '222222222222', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=gen_job_run.id,\n startTime=clock.CLOCK.now(), filePath='/test_file_path',\n bucketName=bucket_name, fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n gen_processed_file.id)\n with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:\n controller.ingest_data_files_into_gc_metrics(file_path, bucket_name\n )\n incident = self.incident_dao.get(1)\n self.assertIsNotNone(incident)\n self.assertEqual(incident.code, GenomicIncidentCode.\n UNABLE_TO_FIND_METRIC.name)\n self.assertEqual(incident.data_file_path, file_path)\n self.assertEqual(incident.message,\n 'INGEST_DATA_FILES: Cannot find genomics metric record for sample id: 21042005280'\n )\n <mask token>\n\n def test_updating_members_blocklists(self):\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n ids_should_be_updated = []\n for i in range(4):\n ids_should_be_updated.append(self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set.id,\n biobankId='100153482', sampleId='21042005280', genomeType=\n 'test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if\n i & 2 == 0 else 'N').id)\n for i in range(2):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_array', genomicWorkflowState=\n GenomicWorkflowState.AW0, ai_an='N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n created_members = self.member_dao.get_all()\n blocklisted = list(filter(lambda x: x.blockResults == 1 or x.\n blockResearch == 1, created_members))\n self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in\n blocklisted].sort())\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in created_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 0 and obj.\n blockResearchReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n with self.member_dao.session() as session:\n session.query(GenomicSetMember).delete()\n run_result = self.job_run_dao.get(1)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n for i in range(4):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='test_investigation_one' if i & 2 != 0 else\n 'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,\n ai_an='Y' if i & 2 == 0 else 'N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n modified_members = self.member_dao.get_all()\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in modified_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n run_result = self.job_run_dao.get(2)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n\n def test_ingest_user_metrics_file(self):\n test_file = 'Genomic-Metrics-File-User-Events-Test.csv'\n bucket_name = 'test_bucket'\n sub_folder = 'user_events'\n pids = []\n file_ingester = GenomicFileIngester()\n for _ in range(2):\n pid = self.data_generator.create_database_participant()\n pids.append(pid.participantId)\n test_metrics_file = create_ingestion_test_file(test_file,\n bucket_name, sub_folder)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'\n with open_cloud_file(test_file_path) as csv_file:\n metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)\n with GenomicJobController(GenomicJob.METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_metrics_file(metric_type='user_events',\n file_path=test_file_path)\n job_run_id = controller.job_run.id\n metrics = self.user_event_metrics_dao.get_all()\n for pid in pids:\n file_metrics = list(filter(lambda x: int(x['participant_id'].\n split('P')[-1]) == pid, metrics_to_ingest['rows']))\n participant_ingested_metrics = list(filter(lambda x: x.\n participant_id == pid, metrics))\n self.assertEqual(len(file_metrics), len(\n participant_ingested_metrics))\n self.assertTrue(all(obj.run_id == job_run_id for obj in\n participant_ingested_metrics))\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_reconcile_pdr_data(self, mock_cloud_task):\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n cloud_task_endpoint = 'rebuild_genomic_table_records_task'\n first_run = self.job_run_dao.get_all()\n self.assertEqual(mock_cloud_task.call_count, 1)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 1)\n self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.\n model_type.__tablename__)\n self.assertTrue(type(call_args[0].args[0]['ids']) is list)\n self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in\n first_run])\n self.assertEqual(call_args[0].args[1], cloud_task_endpoint)\n participant = self.data_generator.create_database_participant()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)\n plus_ten = plus_ten.replace(microsecond=0)\n with FakeClock(plus_ten):\n for i in range(2):\n gen_member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1))\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=first_run[\n 0].id, startTime=clock.CLOCK.now(), filePath=\n f'test_file_path_{i}', bucketName='test_bucket',\n fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id)\n manifest = (self.data_generator.\n create_database_genomic_manifest_file(manifestTypeId=2,\n filePath=f'test_file_path_{i}'))\n self.data_generator.create_database_genomic_manifest_feedback(\n inputManifestFileId=manifest.id, feedbackRecordCount=2)\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=participant.participantId, event_name=\n 'test_event', run_id=1)\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=1, event_type=\n 'informing_loop_decision', module_type='gem',\n participant_id=participant.participantId,\n decision_value='maybe_later', event_authored_time=clock\n .CLOCK.now())\n self.data_generator.create_database_genomic_cvl_past_due(\n cvl_site_id='co', email_notification_sent=0, sample_id=\n 'sample_test', results_type='hdr',\n genomic_set_member_id=gen_member.id)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=i, appointment_id=i, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=participant.participantId,\n event_authored_time=clock.CLOCK.now(), source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_member_report_state(\n genomic_set_member_id=gen_member.id, participant_id=\n participant.participantId, module='gem',\n genomic_report_state=GenomicReportState.GEM_RPT_READY,\n event_authored_time=clock.CLOCK.now())\n self.data_generator.create_genomic_result_viewed(participant_id\n =participant.participantId, event_type='result_viewed',\n event_authored_time=clock.CLOCK.now(), module_type=\n 'gem', sample_id=gen_member.sampleId)\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n affected_tables = ['genomic_set', 'genomic_set_member',\n 'genomic_job_run', 'genomic_file_processed',\n 'genomic_gc_validation_metrics', 'genomic_manifest_file',\n 'genomic_manifest_feedback', 'genomic_informing_loop',\n 'genomic_cvl_results_past_due', 'user_event_metrics',\n 'genomic_member_report_state', 'genomic_result_viewed',\n 'genomic_appointment_event']\n num_calls = len(affected_tables) + 1\n self.assertEqual(mock_cloud_task.call_count, num_calls)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), num_calls)\n mock_tables = set([obj[0][0]['table'] for obj in call_args])\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue([mock_tables].sort() == affected_tables.sort())\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_retry_manifest_ingestions_if_deltas(self, mock_cloud_task):\n bucket_name = 'test-bucket'\n aw1_file_name = (\n 'AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv')\n aw1_manifest_path = f'{bucket_name}/{aw1_file_name}'\n aw2_file_name = (\n 'AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv')\n aw2_manifest_path = f'{bucket_name}/{aw2_file_name}'\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n aw1_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(), endTime=\n clock.CLOCK.now(), runResult=GenomicSubProcessResult.SUCCESS)\n aw2_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.METRICS_INGESTION, startTime=clock.CLOCK.now(),\n endTime=clock.CLOCK.now(), runResult=GenomicSubProcessResult.\n SUCCESS)\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(3)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n self.data_generator.create_database_genomic_aw1_raw(file_path=\n aw1_manifest_path, package_id='PKG-2104-026571', biobank_id=\n 'A10001')\n self.data_generator.create_database_genomic_aw2_raw(file_path=\n aw2_manifest_path, biobank_id='A10001', sample_id='100001',\n biobankidsampleid='A10001_100001')\n aw1_manifest_file = (self.data_generator.\n create_database_genomic_manifest_file(created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW1, filePath=\n aw1_manifest_path, fileName=aw1_file_name, bucketName=\n bucket_name, recordCount=1, rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now()))\n aw2_manifest_file = (self.data_generator.\n create_database_genomic_manifest_file(created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW2, filePath=\n aw2_manifest_path, fileName=aw2_file_name, bucketName=\n bucket_name, recordCount=1, rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now()))\n aw1_file_processed = (self.data_generator.\n create_database_genomic_file_processed(runId=aw1_job_run.id,\n startTime=clock.CLOCK.now(), genomicManifestFileId=\n aw1_manifest_file.id, filePath=f'/{aw1_manifest_path}',\n bucketName=bucket_name, fileName=aw1_file_name))\n aw2_file_processed = (self.data_generator.\n create_database_genomic_file_processed(runId=aw2_job_run.id,\n startTime=clock.CLOCK.now(), genomicManifestFileId=\n aw2_manifest_file.id, filePath=f'/{aw2_manifest_path}',\n bucketName=bucket_name, fileName=aw2_file_name))\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='100153482', sampleId=\n '21042005280', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1, aw1FileProcessedId=aw1_file_processed.id)\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n aw2_file_processed.id)\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(4)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n with self.member_dao.session() as session:\n session.query(GenomicGCValidationMetrics).delete()\n session.query(GenomicSetMember).delete()\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(5)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.SUCCESS)\n self.assertEqual(mock_cloud_task.call_count, 2)\n self.assertTrue(mock_cloud_task.call_count)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 2)\n cloud_task_endpoint = ['ingest_aw1_manifest_task',\n 'ingest_aw2_manifest_task']\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n mock_buckets = set([obj[0][0]['bucket_name'] for obj in call_args])\n self.assertTrue(len(mock_buckets), 1)\n self.assertTrue(list(mock_buckets)[0] == bucket_name)\n\n def test_calculate_informing_loop_ready_flags(self):\n num_participants = 4\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n for num in range(num_participants):\n plus_num = clock.CLOCK.now() + datetime.timedelta(minutes=num)\n plus_num = plus_num.replace(microsecond=0)\n with FakeClock(plus_num):\n summary = (self.data_generator.\n create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1))\n stored_sample = (self.data_generator.\n create_database_biobank_stored_sample(biobankId=summary\n .biobankId, biobankOrderIdentifier=self.fake.pyint()))\n collection_site = self.data_generator.create_database_site(\n siteType='Clinic')\n order = self.data_generator.create_database_biobank_order(\n collectedSiteId=collection_site.siteId, participantId=\n summary.participantId, finalizedTime=plus_num)\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId, system='1')\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId, system='2')\n member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, participantId=summary.participantId, genomeType=\n config.GENOME_TYPE_WGS, qcStatus=GenomicQcStatus.PASS,\n gcManifestSampleSource='Whole Blood', collectionTubeId=\n stored_sample.biobankStoredSampleId))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=member.id, sexConcordance='True',\n drcFpConcordance='Pass', drcSexConcordance='Pass',\n processingStatus='Pass')\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants)\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 0 for obj in\n current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is None for\n obj in current_set_members))\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants)\n calculation_limit = 2\n config.override_setting(config.CALCULATE_READY_FLAG_LIMIT, [\n calculation_limit])\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n current_set_members = self.member_dao.get_all()\n self.assertTrue(any(obj.informingLoopReadyFlag == 1 for obj in\n current_set_members))\n self.assertTrue(any(obj.informingLoopReadyFlagModified is not None for\n obj in current_set_members))\n current_loops_set = [obj for obj in current_set_members if obj.\n informingLoopReadyFlag == 1 and obj.\n informingLoopReadyFlagModified is not None]\n self.assertEqual(len(current_loops_set), calculation_limit)\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants // 2)\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 1 for obj in\n current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is not None for\n obj in current_set_members))\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), 0)\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_getting_results_withdrawn(self, email_mock):\n num_participants = 4\n result_withdrawal_dao = GenomicResultWithdrawalsDao()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n pids = []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT)\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_ARRAY, gemA1ManifestJobRunId=\n gen_job_run.id if num % 2 == 0 else None)\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_WGS, cvlW1ilHdrJobRunId=\n gen_job_run.id)\n pids.append(summary.participantId)\n config.override_setting(config.RDR_GENOMICS_NOTIFICATION_EMAIL,\n '[email protected]')\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS\n ) as controller:\n controller.check_results_withdrawals()\n self.assertEqual(email_mock.call_count, 2)\n call_args = email_mock.call_args_list\n self.assertTrue(any('GEM' in call.args[0].subject for call in\n call_args))\n self.assertTrue(any('HEALTH' in call.args[0].subject for call in\n call_args))\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n RESULTS_PIPELINE_WITHDRAWALS, job_runs))[0]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n all_withdrawal_records = result_withdrawal_dao.get_all()\n self.assertTrue(len(all_withdrawal_records) == len(pids))\n self.assertTrue(all(obj.participant_id in pids for obj in\n all_withdrawal_records))\n array_results = list(filter(lambda x: x.array_results == 1,\n all_withdrawal_records))\n self.assertTrue(len(array_results), 2)\n cvl_results = list(filter(lambda x: x.cvl_results == 1,\n all_withdrawal_records))\n self.assertTrue(len(cvl_results), num_participants)\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS\n ) as controller:\n controller.check_results_withdrawals()\n self.assertEqual(email_mock.call_count, 2)\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n RESULTS_PIPELINE_WITHDRAWALS, job_runs))[1]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n\n def test_gem_results_to_report_state(self):\n num_participants = 8\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gem_a2_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.GEM_A2_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n pids_to_update, member_ids = [], []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT)\n member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, participantId=summary.\n participantId, genomeType=config.GENOME_TYPE_ARRAY)\n if num % 2 == 0:\n member_ids.append(member.id)\n pids_to_update.append(summary.participantId)\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 2)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[0]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n current_members = self.member_dao.get_all()\n for member in current_members:\n if member.participantId in pids_to_update:\n member.gemA2ManifestJobRunId = gem_a2_job_run.id\n member.genomicWorkflowState = (GenomicWorkflowState.\n GEM_RPT_READY)\n self.member_dao.update(member)\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 3)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[1]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n current_gem_report_states = self.report_state_dao.get_all()\n self.assertEqual(len(current_gem_report_states), len(pids_to_update))\n self.assertTrue(all(obj.event_type == 'result_ready' for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.event_authored_time is not None for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.module == 'gem' for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.genomic_report_state == GenomicReportState.\n GEM_RPT_READY for obj in current_gem_report_states))\n self.assertTrue(all(obj.genomic_report_state_str ==\n GenomicReportState.GEM_RPT_READY.name for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.genomic_set_member_id in member_ids for obj in\n current_gem_report_states))\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 4)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[2]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n self.clear_table_after_test('genomic_member_report_state')\n <mask token>\n\n def test_reconcile_message_broker_results_ready(self):\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n for pid in range(7):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n for i in range(1, 6):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'\n )\n if i < 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='pgx.result_ready', run_id=1)\n if i == 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.informative', run_id=1)\n if i == 5:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.uninformative',\n run_id=1)\n genomic_cvl_pipeline.reconcile_message_broker_results_ready()\n report_state_dao = GenomicMemberReportStateDao()\n states = report_state_dao.get_all()\n self.assertEqual(5, len(states))\n pgx_records = [rec for rec in states if rec.module == 'pgx_v1']\n hdr_record_uninf = [rec for rec in states if rec.\n genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0\n ]\n hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==\n GenomicReportState.HDR_RPT_POSITIVE][0]\n for pgx_record in pgx_records:\n self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.\n genomic_report_state)\n self.assertEqual('PGX_RPT_READY', pgx_record.\n genomic_report_state_str)\n self.assertEqual(int(pgx_record.sample_id), pgx_record.\n participant_id + 10)\n self.assertEqual('result_ready', pgx_record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.\n event_authored_time)\n self.assertIsNotNone(pgx_record.created_from_metric_id)\n self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_uninf.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0),\n hdr_record_uninf.event_authored_time)\n self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)\n self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_pos.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.\n event_authored_time)\n self.assertIsNotNone(hdr_record_pos.created_from_metric_id)\n <mask token>\n\n def test_ingest_appointment_metrics_file(self):\n test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'\n bucket_name = 'test_bucket'\n sub_folder = 'appointment_events'\n pids = []\n for _ in range(4):\n summary = self.data_generator.create_database_participant_summary()\n pids.append(summary.participantId)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'\n appointment_data = test_data.load_test_data_json(\n 'Genomic-Metrics-File-Appointment-Events-Test.json')\n appointment_data_str = json.dumps(appointment_data, indent=4)\n with open_cloud_file(test_file_path, mode='wb') as cloud_file:\n cloud_file.write(appointment_data_str.encode('utf-8'))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_appointment_metrics_file(file_path=test_file_path\n )\n all_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(all_metrics), 5)\n self.assertTrue(all(obj.participant_id in pids for obj in all_metrics))\n self.assertTrue(all(obj.file_path == test_file_path for obj in\n all_metrics))\n self.assertTrue(all(obj.appointment_event is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.created is not None for obj in all_metrics))\n self.assertTrue(all(obj.modified is not None for obj in all_metrics))\n self.assertTrue(all(obj.module_type is not None for obj in all_metrics)\n )\n self.assertTrue(all(obj.event_authored_time is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.event_type is not None for obj in all_metrics))\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 1)\n current_job_run = current_job_runs[0]\n self.assertTrue(current_job_run.jobId == GenomicJob.\n APPOINTMENT_METRICS_FILE_INGEST)\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n def test_reconcile_appointments_with_metrics(self):\n fake_date = parser.parse('2020-05-29T08:00:01-05:00')\n for num in range(4):\n summary = self.data_generator.create_database_participant_summary()\n missing_json = {'event': 'appointment_updated',\n 'eventAuthoredTime': '2022-09-16T17:18:38Z',\n 'participantId': f'P{summary.participantId}', 'messageBody':\n {'module_type': 'hdr', 'appointment_timestamp':\n '2022-09-19T19:30:00+00:00', 'id': 55,\n 'appointment_timezone': 'America/Los_Angeles', 'location':\n 'CA', 'contact_number': '18043704252', 'language': 'en',\n 'source': 'Color'}}\n if num % 2 == 0:\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_appointment_metric(\n participant_id=summary.participantId, appointment_event=\n json.dumps(missing_json, indent=4) if num % 2 != 0 else\n 'foo', file_path='test_file_path', module_type='hdr',\n event_authored_time=fake_date, event_type=\n 'appointment_updated' if num % 2 != 0 else\n 'appointment_scheduled')\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 2)\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is None for obj in\n current_metrics))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE\n ) as controller:\n controller.reconcile_appointment_events_from_metrics()\n job_run = self.job_run_dao.get_all()\n self.assertEqual(len(job_run), 1)\n self.assertTrue(job_run[0].jobId == GenomicJob.\n APPOINTMENT_METRICS_RECONCILE)\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 4)\n scheduled = list(filter(lambda x: x.event_type ==\n 'appointment_scheduled', current_events))\n self.assertEqual(len(scheduled), 2)\n self.assertTrue(all(obj.created_from_metric_id is None for obj in\n scheduled))\n updated = list(filter(lambda x: x.event_type ==\n 'appointment_updated', current_events))\n self.assertEqual(len(updated), 2)\n self.assertTrue(all(obj.created_from_metric_id is not None for obj in\n updated))\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in\n current_metrics))\n self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for\n obj in current_metrics))\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_appointments_gror_changed(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n notified_dao = GenomicAppointmentEventNotifiedDao()\n config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [\n '[email protected]'])\n num_participants = 4\n for num in range(num_participants):\n gror = num if num > 1 else 1\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=gror)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date,\n source='Color', appointment_timestamp=format_datetime(clock\n .CLOCK.now()), appointment_timezone='America/Los_Angeles',\n location='123 address st', contact_number='17348675309',\n language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(2, len(changed_ppts))\n with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED\n ) as controller:\n controller.check_appointments_gror_changed()\n self.assertEqual(email_mock.call_count, 1)\n notified_appointments = notified_dao.get_all()\n self.assertEqual(2, len(notified_appointments))\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=2)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=5, appointment_id=5, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date, source=\n 'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(\n )), appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(1, len(changed_ppts))\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n fake_date2 = parser.parse('2022-09-02T14:14:00')\n fake_date3 = parser.parse('2022-09-03T15:15:00')\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [\n '[email protected]'])\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1)\n set_member = (self.data_generator.\n create_database_genomic_set_member(participantId=summary.\n participantId, genomicSetId=1, biobankId=1001,\n collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId, genomic_report_state=\n GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=\n set_member.id, module='hdr_v1', event_authored_time=fake_date)\n pids.append(summary.participantId)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101, appointment_id=102, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [0], event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102, appointment_id=103, event_type=\n 'appointment_completed', module_type='hdr', participant_id=pids\n [1], event_authored_time=fake_date, source='Color',\n appointment_timestamp=fake_date, appointment_timezone=\n 'America/Los_Angeles', location='123 address st',\n contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103, appointment_id=104, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date2, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104, appointment_id=104, event_type=\n 'appointment_cancelled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date3, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n notified_dao = GenomicDefaultBaseDao(model_type=\n GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{'participant_id': pids[4], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': True}, {'participant_id': pids[5], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': False}])\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = (self.report_state_dao.\n get_hdr_result_positive_no_appointment(num_days=14))\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION\n ) as controller:\n controller.check_gcr_escalation(controller.job_id)\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject,\n 'GCR Outreach 14 Day Escalation')\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n <mask token>\n <mask token>\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_execute_auto_generation_from_last_run(self, cloud_task_mock):\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.ERROR\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR\n )\n self.assertEqual(cloud_task_mock.called, False)\n self.assertEqual(cloud_task_mock.call_count, 0)\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.SUCCESS\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.\n SUCCESS)\n self.assertEqual(cloud_task_mock.called, True)\n self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(\n 'manifest_type') == 'p0')\n self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==\n 'genomic-generate-manifest')\n all_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(all_job_runs), 2)\n self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.\n SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))\n self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in\n all_job_runs))\n",
"step-4": "<mask token>\n\n\nclass GenomicJobControllerTest(BaseTestCase):\n\n def setUp(self):\n super(GenomicJobControllerTest, self).setUp()\n self.data_file_dao = GenomicGcDataFileDao()\n self.event_data_dao = MessageBrokenEventDataDao()\n self.incident_dao = GenomicIncidentDao()\n self.member_dao = GenomicSetMemberDao()\n self.metrics_dao = GenomicGCValidationMetricsDao()\n self.user_event_metrics_dao = UserEventMetricsDao()\n self.job_run_dao = GenomicJobRunDao()\n self.report_state_dao = GenomicMemberReportStateDao()\n self.appointment_event_dao = GenomicAppointmentEventDao()\n self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()\n\n def test_incident_with_long_message(self):\n \"\"\"Make sure the length of incident messages doesn't cause issues when recording them\"\"\"\n incident_message = '1' * (GenomicIncident.message.type.length + 20)\n mock_slack_handler = mock.MagicMock()\n job_controller = GenomicJobController(job_id=1)\n job_controller.genomic_alert_slack = mock_slack_handler\n job_controller.create_incident(message=incident_message, slack=True)\n incident: GenomicIncident = self.session.query(GenomicIncident).one()\n self.assertTrue(incident_message.startswith(incident.message))\n mock_slack_handler.send_message_to_webhook.assert_called_with(\n message_data={'text': incident_message})\n\n def test_gvcf_files_ingestion(self):\n job_controller = GenomicJobController(job_id=38)\n bucket_name = 'test_bucket'\n file_path = (\n 'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'\n )\n file_path_md5 = (\n 'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz.md5sum'\n )\n full_path = f'{bucket_name}/{file_path}'\n full_path_md5 = f'{bucket_name}/{file_path_md5}'\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='100153482', sampleId=\n '21042005280', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=gen_job_run.id,\n startTime=clock.CLOCK.now(), filePath='/test_file_path',\n bucketName='test_bucket', fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n gen_processed_file.id)\n job_controller.ingest_data_files_into_gc_metrics(file_path_md5,\n bucket_name)\n metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)\n self.assertIsNotNone(metrics.gvcfMd5Path)\n self.assertEqual(metrics.gvcfMd5Path, full_path_md5)\n job_controller.ingest_data_files_into_gc_metrics(file_path, bucket_name\n )\n metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)\n self.assertIsNotNone(metrics.gvcfPath)\n self.assertEqual(metrics.gvcfPath, full_path)\n\n def test_gvcf_files_ingestion_create_incident(self):\n bucket_name = 'test_bucket'\n file_path = (\n 'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'\n )\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='111111111', sampleId=\n '222222222222', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=gen_job_run.id,\n startTime=clock.CLOCK.now(), filePath='/test_file_path',\n bucketName=bucket_name, fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n gen_processed_file.id)\n with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:\n controller.ingest_data_files_into_gc_metrics(file_path, bucket_name\n )\n incident = self.incident_dao.get(1)\n self.assertIsNotNone(incident)\n self.assertEqual(incident.code, GenomicIncidentCode.\n UNABLE_TO_FIND_METRIC.name)\n self.assertEqual(incident.data_file_path, file_path)\n self.assertEqual(incident.message,\n 'INGEST_DATA_FILES: Cannot find genomics metric record for sample id: 21042005280'\n )\n\n def test_accession_data_files(self):\n test_bucket_baylor = 'fake-data-bucket-baylor'\n test_idat_file = (\n 'fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01_Grn.idat'\n )\n test_vcf_file = (\n 'fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01.vcf.gz'\n )\n test_cram_file = (\n 'fake-data-bucket-baylor/Wgs_sample_raw_data/CRAMs_CRAIs/BCM_A100134256_21063006771_SIA0017196_1.cram'\n )\n test_files = [test_idat_file, test_vcf_file, test_cram_file]\n test_time = datetime.datetime(2021, 7, 9, 14, 1, 1)\n with clock.FakeClock(test_time):\n for file_path in test_files:\n with GenomicJobController(GenomicJob.ACCESSION_DATA_FILES\n ) as controller:\n controller.accession_data_files(file_path,\n test_bucket_baylor)\n inserted_files = self.data_file_dao.get_all()\n expected_idat = GenomicGcDataFile(id=1, created=test_time, modified\n =test_time, file_path=test_idat_file, gc_site_id='jh',\n bucket_name='fake-data-bucket-baylor', file_prefix=\n 'Genotyping_sample_raw_data', file_name=\n '204027270091_R02C01_Grn.idat', file_type='Grn.idat',\n identifier_type='chipwellbarcode', identifier_value=\n '204027270091_R02C01', ignore_flag=0)\n expected_vcf = GenomicGcDataFile(id=2, created=test_time, modified=\n test_time, file_path=test_vcf_file, gc_site_id='jh',\n bucket_name='fake-data-bucket-baylor', file_prefix=\n 'Genotyping_sample_raw_data', file_name=\n '204027270091_R02C01.vcf.gz', file_type='vcf.gz',\n identifier_type='chipwellbarcode', identifier_value=\n '204027270091_R02C01', ignore_flag=0)\n expected_cram = GenomicGcDataFile(id=3, created=test_time, modified\n =test_time, file_path=test_cram_file, gc_site_id='bcm',\n bucket_name='fake-data-bucket-baylor', file_prefix=\n 'Wgs_sample_raw_data/CRAMs_CRAIs', file_name=\n 'BCM_A100134256_21063006771_SIA0017196_1.cram', file_type=\n 'cram', identifier_type='sample_id', identifier_value=\n '21063006771', ignore_flag=0)\n expected_objs = {(0): expected_idat, (1): expected_vcf, (2):\n expected_cram}\n for i in range(3):\n self.assertEqual(expected_objs[i].bucket_name, inserted_files[i\n ].bucket_name)\n self.assertEqual(expected_objs[i].created, inserted_files[i].\n created)\n self.assertEqual(expected_objs[i].file_name, inserted_files[i].\n file_name)\n self.assertEqual(expected_objs[i].file_path, inserted_files[i].\n file_path)\n self.assertEqual(expected_objs[i].file_prefix, inserted_files[i\n ].file_prefix)\n self.assertEqual(expected_objs[i].file_type, inserted_files[i].\n file_type)\n self.assertEqual(expected_objs[i].gc_site_id, inserted_files[i]\n .gc_site_id)\n self.assertEqual(expected_objs[i].id, inserted_files[i].id)\n self.assertEqual(expected_objs[i].identifier_type,\n inserted_files[i].identifier_type)\n self.assertEqual(expected_objs[i].identifier_value,\n inserted_files[i].identifier_value)\n self.assertEqual(expected_objs[i].ignore_flag, inserted_files[i\n ].ignore_flag)\n self.assertEqual(expected_objs[i].metadata, inserted_files[i].\n metadata)\n self.assertEqual(expected_objs[i].modified, inserted_files[i].\n modified)\n\n def test_updating_members_blocklists(self):\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n ids_should_be_updated = []\n for i in range(4):\n ids_should_be_updated.append(self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set.id,\n biobankId='100153482', sampleId='21042005280', genomeType=\n 'test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if\n i & 2 == 0 else 'N').id)\n for i in range(2):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_array', genomicWorkflowState=\n GenomicWorkflowState.AW0, ai_an='N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n created_members = self.member_dao.get_all()\n blocklisted = list(filter(lambda x: x.blockResults == 1 or x.\n blockResearch == 1, created_members))\n self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in\n blocklisted].sort())\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in created_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 0 and obj.\n blockResearchReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n with self.member_dao.session() as session:\n session.query(GenomicSetMember).delete()\n run_result = self.job_run_dao.get(1)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n for i in range(4):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='test_investigation_one' if i & 2 != 0 else\n 'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,\n ai_an='Y' if i & 2 == 0 else 'N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n modified_members = self.member_dao.get_all()\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in modified_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n run_result = self.job_run_dao.get(2)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n\n def test_ingest_user_metrics_file(self):\n test_file = 'Genomic-Metrics-File-User-Events-Test.csv'\n bucket_name = 'test_bucket'\n sub_folder = 'user_events'\n pids = []\n file_ingester = GenomicFileIngester()\n for _ in range(2):\n pid = self.data_generator.create_database_participant()\n pids.append(pid.participantId)\n test_metrics_file = create_ingestion_test_file(test_file,\n bucket_name, sub_folder)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'\n with open_cloud_file(test_file_path) as csv_file:\n metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)\n with GenomicJobController(GenomicJob.METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_metrics_file(metric_type='user_events',\n file_path=test_file_path)\n job_run_id = controller.job_run.id\n metrics = self.user_event_metrics_dao.get_all()\n for pid in pids:\n file_metrics = list(filter(lambda x: int(x['participant_id'].\n split('P')[-1]) == pid, metrics_to_ingest['rows']))\n participant_ingested_metrics = list(filter(lambda x: x.\n participant_id == pid, metrics))\n self.assertEqual(len(file_metrics), len(\n participant_ingested_metrics))\n self.assertTrue(all(obj.run_id == job_run_id for obj in\n participant_ingested_metrics))\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_reconcile_pdr_data(self, mock_cloud_task):\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n cloud_task_endpoint = 'rebuild_genomic_table_records_task'\n first_run = self.job_run_dao.get_all()\n self.assertEqual(mock_cloud_task.call_count, 1)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 1)\n self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.\n model_type.__tablename__)\n self.assertTrue(type(call_args[0].args[0]['ids']) is list)\n self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in\n first_run])\n self.assertEqual(call_args[0].args[1], cloud_task_endpoint)\n participant = self.data_generator.create_database_participant()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)\n plus_ten = plus_ten.replace(microsecond=0)\n with FakeClock(plus_ten):\n for i in range(2):\n gen_member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1))\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=first_run[\n 0].id, startTime=clock.CLOCK.now(), filePath=\n f'test_file_path_{i}', bucketName='test_bucket',\n fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id)\n manifest = (self.data_generator.\n create_database_genomic_manifest_file(manifestTypeId=2,\n filePath=f'test_file_path_{i}'))\n self.data_generator.create_database_genomic_manifest_feedback(\n inputManifestFileId=manifest.id, feedbackRecordCount=2)\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=participant.participantId, event_name=\n 'test_event', run_id=1)\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=1, event_type=\n 'informing_loop_decision', module_type='gem',\n participant_id=participant.participantId,\n decision_value='maybe_later', event_authored_time=clock\n .CLOCK.now())\n self.data_generator.create_database_genomic_cvl_past_due(\n cvl_site_id='co', email_notification_sent=0, sample_id=\n 'sample_test', results_type='hdr',\n genomic_set_member_id=gen_member.id)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=i, appointment_id=i, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=participant.participantId,\n event_authored_time=clock.CLOCK.now(), source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_member_report_state(\n genomic_set_member_id=gen_member.id, participant_id=\n participant.participantId, module='gem',\n genomic_report_state=GenomicReportState.GEM_RPT_READY,\n event_authored_time=clock.CLOCK.now())\n self.data_generator.create_genomic_result_viewed(participant_id\n =participant.participantId, event_type='result_viewed',\n event_authored_time=clock.CLOCK.now(), module_type=\n 'gem', sample_id=gen_member.sampleId)\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n affected_tables = ['genomic_set', 'genomic_set_member',\n 'genomic_job_run', 'genomic_file_processed',\n 'genomic_gc_validation_metrics', 'genomic_manifest_file',\n 'genomic_manifest_feedback', 'genomic_informing_loop',\n 'genomic_cvl_results_past_due', 'user_event_metrics',\n 'genomic_member_report_state', 'genomic_result_viewed',\n 'genomic_appointment_event']\n num_calls = len(affected_tables) + 1\n self.assertEqual(mock_cloud_task.call_count, num_calls)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), num_calls)\n mock_tables = set([obj[0][0]['table'] for obj in call_args])\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue([mock_tables].sort() == affected_tables.sort())\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_retry_manifest_ingestions_if_deltas(self, mock_cloud_task):\n bucket_name = 'test-bucket'\n aw1_file_name = (\n 'AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv')\n aw1_manifest_path = f'{bucket_name}/{aw1_file_name}'\n aw2_file_name = (\n 'AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv')\n aw2_manifest_path = f'{bucket_name}/{aw2_file_name}'\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n aw1_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(), endTime=\n clock.CLOCK.now(), runResult=GenomicSubProcessResult.SUCCESS)\n aw2_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.METRICS_INGESTION, startTime=clock.CLOCK.now(),\n endTime=clock.CLOCK.now(), runResult=GenomicSubProcessResult.\n SUCCESS)\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(3)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n self.data_generator.create_database_genomic_aw1_raw(file_path=\n aw1_manifest_path, package_id='PKG-2104-026571', biobank_id=\n 'A10001')\n self.data_generator.create_database_genomic_aw2_raw(file_path=\n aw2_manifest_path, biobank_id='A10001', sample_id='100001',\n biobankidsampleid='A10001_100001')\n aw1_manifest_file = (self.data_generator.\n create_database_genomic_manifest_file(created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW1, filePath=\n aw1_manifest_path, fileName=aw1_file_name, bucketName=\n bucket_name, recordCount=1, rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now()))\n aw2_manifest_file = (self.data_generator.\n create_database_genomic_manifest_file(created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW2, filePath=\n aw2_manifest_path, fileName=aw2_file_name, bucketName=\n bucket_name, recordCount=1, rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now()))\n aw1_file_processed = (self.data_generator.\n create_database_genomic_file_processed(runId=aw1_job_run.id,\n startTime=clock.CLOCK.now(), genomicManifestFileId=\n aw1_manifest_file.id, filePath=f'/{aw1_manifest_path}',\n bucketName=bucket_name, fileName=aw1_file_name))\n aw2_file_processed = (self.data_generator.\n create_database_genomic_file_processed(runId=aw2_job_run.id,\n startTime=clock.CLOCK.now(), genomicManifestFileId=\n aw2_manifest_file.id, filePath=f'/{aw2_manifest_path}',\n bucketName=bucket_name, fileName=aw2_file_name))\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='100153482', sampleId=\n '21042005280', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1, aw1FileProcessedId=aw1_file_processed.id)\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n aw2_file_processed.id)\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(4)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n with self.member_dao.session() as session:\n session.query(GenomicGCValidationMetrics).delete()\n session.query(GenomicSetMember).delete()\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(5)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.SUCCESS)\n self.assertEqual(mock_cloud_task.call_count, 2)\n self.assertTrue(mock_cloud_task.call_count)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 2)\n cloud_task_endpoint = ['ingest_aw1_manifest_task',\n 'ingest_aw2_manifest_task']\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n mock_buckets = set([obj[0][0]['bucket_name'] for obj in call_args])\n self.assertTrue(len(mock_buckets), 1)\n self.assertTrue(list(mock_buckets)[0] == bucket_name)\n\n def test_calculate_informing_loop_ready_flags(self):\n num_participants = 4\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n for num in range(num_participants):\n plus_num = clock.CLOCK.now() + datetime.timedelta(minutes=num)\n plus_num = plus_num.replace(microsecond=0)\n with FakeClock(plus_num):\n summary = (self.data_generator.\n create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1))\n stored_sample = (self.data_generator.\n create_database_biobank_stored_sample(biobankId=summary\n .biobankId, biobankOrderIdentifier=self.fake.pyint()))\n collection_site = self.data_generator.create_database_site(\n siteType='Clinic')\n order = self.data_generator.create_database_biobank_order(\n collectedSiteId=collection_site.siteId, participantId=\n summary.participantId, finalizedTime=plus_num)\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId, system='1')\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId, system='2')\n member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, participantId=summary.participantId, genomeType=\n config.GENOME_TYPE_WGS, qcStatus=GenomicQcStatus.PASS,\n gcManifestSampleSource='Whole Blood', collectionTubeId=\n stored_sample.biobankStoredSampleId))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=member.id, sexConcordance='True',\n drcFpConcordance='Pass', drcSexConcordance='Pass',\n processingStatus='Pass')\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants)\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 0 for obj in\n current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is None for\n obj in current_set_members))\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants)\n calculation_limit = 2\n config.override_setting(config.CALCULATE_READY_FLAG_LIMIT, [\n calculation_limit])\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n current_set_members = self.member_dao.get_all()\n self.assertTrue(any(obj.informingLoopReadyFlag == 1 for obj in\n current_set_members))\n self.assertTrue(any(obj.informingLoopReadyFlagModified is not None for\n obj in current_set_members))\n current_loops_set = [obj for obj in current_set_members if obj.\n informingLoopReadyFlag == 1 and obj.\n informingLoopReadyFlagModified is not None]\n self.assertEqual(len(current_loops_set), calculation_limit)\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants // 2)\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 1 for obj in\n current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is not None for\n obj in current_set_members))\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), 0)\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_getting_results_withdrawn(self, email_mock):\n num_participants = 4\n result_withdrawal_dao = GenomicResultWithdrawalsDao()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n pids = []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT)\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_ARRAY, gemA1ManifestJobRunId=\n gen_job_run.id if num % 2 == 0 else None)\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_WGS, cvlW1ilHdrJobRunId=\n gen_job_run.id)\n pids.append(summary.participantId)\n config.override_setting(config.RDR_GENOMICS_NOTIFICATION_EMAIL,\n '[email protected]')\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS\n ) as controller:\n controller.check_results_withdrawals()\n self.assertEqual(email_mock.call_count, 2)\n call_args = email_mock.call_args_list\n self.assertTrue(any('GEM' in call.args[0].subject for call in\n call_args))\n self.assertTrue(any('HEALTH' in call.args[0].subject for call in\n call_args))\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n RESULTS_PIPELINE_WITHDRAWALS, job_runs))[0]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n all_withdrawal_records = result_withdrawal_dao.get_all()\n self.assertTrue(len(all_withdrawal_records) == len(pids))\n self.assertTrue(all(obj.participant_id in pids for obj in\n all_withdrawal_records))\n array_results = list(filter(lambda x: x.array_results == 1,\n all_withdrawal_records))\n self.assertTrue(len(array_results), 2)\n cvl_results = list(filter(lambda x: x.cvl_results == 1,\n all_withdrawal_records))\n self.assertTrue(len(cvl_results), num_participants)\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS\n ) as controller:\n controller.check_results_withdrawals()\n self.assertEqual(email_mock.call_count, 2)\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n RESULTS_PIPELINE_WITHDRAWALS, job_runs))[1]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n\n def test_gem_results_to_report_state(self):\n num_participants = 8\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gem_a2_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.GEM_A2_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n pids_to_update, member_ids = [], []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT)\n member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, participantId=summary.\n participantId, genomeType=config.GENOME_TYPE_ARRAY)\n if num % 2 == 0:\n member_ids.append(member.id)\n pids_to_update.append(summary.participantId)\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 2)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[0]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n current_members = self.member_dao.get_all()\n for member in current_members:\n if member.participantId in pids_to_update:\n member.gemA2ManifestJobRunId = gem_a2_job_run.id\n member.genomicWorkflowState = (GenomicWorkflowState.\n GEM_RPT_READY)\n self.member_dao.update(member)\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 3)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[1]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n current_gem_report_states = self.report_state_dao.get_all()\n self.assertEqual(len(current_gem_report_states), len(pids_to_update))\n self.assertTrue(all(obj.event_type == 'result_ready' for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.event_authored_time is not None for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.module == 'gem' for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.genomic_report_state == GenomicReportState.\n GEM_RPT_READY for obj in current_gem_report_states))\n self.assertTrue(all(obj.genomic_report_state_str ==\n GenomicReportState.GEM_RPT_READY.name for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.genomic_set_member_id in member_ids for obj in\n current_gem_report_states))\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 4)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[2]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n self.clear_table_after_test('genomic_member_report_state')\n\n def test_reconcile_informing_loop(self):\n event_dao = UserEventMetricsDao()\n event_dao.truncate()\n il_dao = GenomicInformingLoopDao()\n for pid in range(8):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n for b in ['aou_array', 'aou_wgs']:\n for i in range(1, 9):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType=b)\n events = ['gem.informing_loop.started',\n 'gem.informing_loop.screen8_no',\n 'gem.informing_loop.screen8_yes', 'hdr.informing_loop.started',\n 'gem.informing_loop.screen3', 'pgx.informing_loop.screen8_no',\n 'hdr.informing_loop.screen10_no']\n for p in range(4):\n for i in range(len(events)):\n self.data_generator.create_database_genomic_user_event_metrics(\n created=clock.CLOCK.now(), modified=clock.CLOCK.now(),\n participant_id=p + 1, created_at=datetime.datetime(2021,\n 12, 29, 0) + datetime.timedelta(hours=i), event_name=\n events[i], run_id=1, ignore_flag=0)\n decisions = [None, 'no', 'yes']\n for p in range(3):\n for i in range(2):\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=i, event_type=\n 'informing_loop_started' if i == 0 else\n 'informing_loop_decision', module_type='gem',\n participant_id=p + 1, decision_value=decisions[i],\n sample_id=100 + p, event_authored_time=datetime.\n datetime(2021, 12, 29, 0) + datetime.timedelta(hours=i))\n self.data_generator.create_database_genomic_user_event_metrics(created\n =clock.CLOCK.now(), modified=clock.CLOCK.now(), participant_id=\n 6, created_at=datetime.datetime(2021, 12, 29, 0), event_name=\n 'gem.informing_loop.screen8_yes', run_id=1, ignore_flag=0)\n genomic_pipeline.reconcile_informing_loop_responses()\n pid_list = [1, 2, 3, 6]\n new_il_values = il_dao.get_latest_il_for_pids(pid_list=pid_list,\n module='gem')\n for value in new_il_values:\n self.assertEqual('yes', value.decision_value)\n pid_list = [1, 2, 3, 4]\n for module in ['hdr', 'pgx']:\n new_il_values = il_dao.get_latest_il_for_pids(pid_list=pid_list,\n module=module)\n for value in new_il_values:\n self.assertEqual('no', value.decision_value)\n self.assertIsNotNone(value.created_from_metric_id)\n\n def test_reconcile_message_broker_results_ready(self):\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n for pid in range(7):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n for i in range(1, 6):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'\n )\n if i < 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='pgx.result_ready', run_id=1)\n if i == 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.informative', run_id=1)\n if i == 5:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.uninformative',\n run_id=1)\n genomic_cvl_pipeline.reconcile_message_broker_results_ready()\n report_state_dao = GenomicMemberReportStateDao()\n states = report_state_dao.get_all()\n self.assertEqual(5, len(states))\n pgx_records = [rec for rec in states if rec.module == 'pgx_v1']\n hdr_record_uninf = [rec for rec in states if rec.\n genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0\n ]\n hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==\n GenomicReportState.HDR_RPT_POSITIVE][0]\n for pgx_record in pgx_records:\n self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.\n genomic_report_state)\n self.assertEqual('PGX_RPT_READY', pgx_record.\n genomic_report_state_str)\n self.assertEqual(int(pgx_record.sample_id), pgx_record.\n participant_id + 10)\n self.assertEqual('result_ready', pgx_record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.\n event_authored_time)\n self.assertIsNotNone(pgx_record.created_from_metric_id)\n self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_uninf.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0),\n hdr_record_uninf.event_authored_time)\n self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)\n self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_pos.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.\n event_authored_time)\n self.assertIsNotNone(hdr_record_pos.created_from_metric_id)\n\n def test_reconcile_message_broker_results_viewed(self):\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n for pid in range(3):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n for i in range(1, 3):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'\n )\n if i == 1:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='pgx.opened_at', run_id=1)\n if i == 2:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.opened_at', run_id=1)\n genomic_cvl_pipeline.reconcile_message_broker_results_viewed()\n result_viewed_dao = GenomicResultViewedDao()\n results = result_viewed_dao.get_all()\n self.assertEqual(2, len(results))\n for record in results:\n if record.participant_id == 1:\n self.assertEqual('pgx_v1', record.module_type)\n else:\n self.assertEqual('hdr_v1', record.module_type)\n self.assertEqual(int(record.sample_id), record.participant_id + 10)\n self.assertEqual('result_viewed', record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), record.\n first_viewed)\n self.assertIsNotNone(record.created_from_metric_id)\n\n def test_ingest_appointment_metrics_file(self):\n test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'\n bucket_name = 'test_bucket'\n sub_folder = 'appointment_events'\n pids = []\n for _ in range(4):\n summary = self.data_generator.create_database_participant_summary()\n pids.append(summary.participantId)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'\n appointment_data = test_data.load_test_data_json(\n 'Genomic-Metrics-File-Appointment-Events-Test.json')\n appointment_data_str = json.dumps(appointment_data, indent=4)\n with open_cloud_file(test_file_path, mode='wb') as cloud_file:\n cloud_file.write(appointment_data_str.encode('utf-8'))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_appointment_metrics_file(file_path=test_file_path\n )\n all_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(all_metrics), 5)\n self.assertTrue(all(obj.participant_id in pids for obj in all_metrics))\n self.assertTrue(all(obj.file_path == test_file_path for obj in\n all_metrics))\n self.assertTrue(all(obj.appointment_event is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.created is not None for obj in all_metrics))\n self.assertTrue(all(obj.modified is not None for obj in all_metrics))\n self.assertTrue(all(obj.module_type is not None for obj in all_metrics)\n )\n self.assertTrue(all(obj.event_authored_time is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.event_type is not None for obj in all_metrics))\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 1)\n current_job_run = current_job_runs[0]\n self.assertTrue(current_job_run.jobId == GenomicJob.\n APPOINTMENT_METRICS_FILE_INGEST)\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n def test_reconcile_appointments_with_metrics(self):\n fake_date = parser.parse('2020-05-29T08:00:01-05:00')\n for num in range(4):\n summary = self.data_generator.create_database_participant_summary()\n missing_json = {'event': 'appointment_updated',\n 'eventAuthoredTime': '2022-09-16T17:18:38Z',\n 'participantId': f'P{summary.participantId}', 'messageBody':\n {'module_type': 'hdr', 'appointment_timestamp':\n '2022-09-19T19:30:00+00:00', 'id': 55,\n 'appointment_timezone': 'America/Los_Angeles', 'location':\n 'CA', 'contact_number': '18043704252', 'language': 'en',\n 'source': 'Color'}}\n if num % 2 == 0:\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_appointment_metric(\n participant_id=summary.participantId, appointment_event=\n json.dumps(missing_json, indent=4) if num % 2 != 0 else\n 'foo', file_path='test_file_path', module_type='hdr',\n event_authored_time=fake_date, event_type=\n 'appointment_updated' if num % 2 != 0 else\n 'appointment_scheduled')\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 2)\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is None for obj in\n current_metrics))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE\n ) as controller:\n controller.reconcile_appointment_events_from_metrics()\n job_run = self.job_run_dao.get_all()\n self.assertEqual(len(job_run), 1)\n self.assertTrue(job_run[0].jobId == GenomicJob.\n APPOINTMENT_METRICS_RECONCILE)\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 4)\n scheduled = list(filter(lambda x: x.event_type ==\n 'appointment_scheduled', current_events))\n self.assertEqual(len(scheduled), 2)\n self.assertTrue(all(obj.created_from_metric_id is None for obj in\n scheduled))\n updated = list(filter(lambda x: x.event_type ==\n 'appointment_updated', current_events))\n self.assertEqual(len(updated), 2)\n self.assertTrue(all(obj.created_from_metric_id is not None for obj in\n updated))\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in\n current_metrics))\n self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for\n obj in current_metrics))\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_appointments_gror_changed(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n notified_dao = GenomicAppointmentEventNotifiedDao()\n config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [\n '[email protected]'])\n num_participants = 4\n for num in range(num_participants):\n gror = num if num > 1 else 1\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=gror)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date,\n source='Color', appointment_timestamp=format_datetime(clock\n .CLOCK.now()), appointment_timezone='America/Los_Angeles',\n location='123 address st', contact_number='17348675309',\n language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(2, len(changed_ppts))\n with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED\n ) as controller:\n controller.check_appointments_gror_changed()\n self.assertEqual(email_mock.call_count, 1)\n notified_appointments = notified_dao.get_all()\n self.assertEqual(2, len(notified_appointments))\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=2)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=5, appointment_id=5, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date, source=\n 'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(\n )), appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(1, len(changed_ppts))\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n fake_date2 = parser.parse('2022-09-02T14:14:00')\n fake_date3 = parser.parse('2022-09-03T15:15:00')\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [\n '[email protected]'])\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1)\n set_member = (self.data_generator.\n create_database_genomic_set_member(participantId=summary.\n participantId, genomicSetId=1, biobankId=1001,\n collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId, genomic_report_state=\n GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=\n set_member.id, module='hdr_v1', event_authored_time=fake_date)\n pids.append(summary.participantId)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101, appointment_id=102, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [0], event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102, appointment_id=103, event_type=\n 'appointment_completed', module_type='hdr', participant_id=pids\n [1], event_authored_time=fake_date, source='Color',\n appointment_timestamp=fake_date, appointment_timezone=\n 'America/Los_Angeles', location='123 address st',\n contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103, appointment_id=104, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date2, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104, appointment_id=104, event_type=\n 'appointment_cancelled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date3, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n notified_dao = GenomicDefaultBaseDao(model_type=\n GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{'participant_id': pids[4], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': True}, {'participant_id': pids[5], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': False}])\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = (self.report_state_dao.\n get_hdr_result_positive_no_appointment(num_days=14))\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION\n ) as controller:\n controller.check_gcr_escalation(controller.job_id)\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject,\n 'GCR Outreach 14 Day Escalation')\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n <mask token>\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_ce_escalation(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n fake_date2 = parser.parse('2022-09-02T14:14:00')\n fake_date3 = parser.parse('2022-09-03T15:15:00')\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [\n '[email protected]'])\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1)\n set_member = (self.data_generator.\n create_database_genomic_set_member(participantId=summary.\n participantId, genomicSetId=1, biobankId=1001,\n collectionTubeId=100, sampleId=10, genomeType='aou_wgs',\n participantOrigin='careevolution'))\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId, genomic_report_state=\n GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=\n set_member.id, module='hdr_v1', event_authored_time=fake_date)\n pids.append(summary.participantId)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101, appointment_id=102, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [0], event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102, appointment_id=103, event_type=\n 'appointment_completed', module_type='hdr', participant_id=pids\n [1], event_authored_time=fake_date, source='Color',\n appointment_timestamp=fake_date, appointment_timezone=\n 'America/Los_Angeles', location='123 address st',\n contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103, appointment_id=104, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date2, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104, appointment_id=104, event_type=\n 'appointment_cancelled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date3, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n notified_dao = GenomicDefaultBaseDao(model_type=\n GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{'participant_id': pids[4], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': True}, {'participant_id': pids[5], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': False}])\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = (self.report_state_dao.\n get_hdr_result_positive_no_appointment(num_days=30,\n participant_origin='careevolution'))\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n with GenomicJobController(GenomicJob.CHECK_GCR_CE_OUTREACH_ESCALATION\n ) as controller:\n controller.check_gcr_escalation(controller.job_id)\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject,\n 'GCR Outreach 30 Day Escalation')\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_execute_auto_generation_from_last_run(self, cloud_task_mock):\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.ERROR\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR\n )\n self.assertEqual(cloud_task_mock.called, False)\n self.assertEqual(cloud_task_mock.call_count, 0)\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.SUCCESS\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.\n SUCCESS)\n self.assertEqual(cloud_task_mock.called, True)\n self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(\n 'manifest_type') == 'p0')\n self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==\n 'genomic-generate-manifest')\n all_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(all_job_runs), 2)\n self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.\n SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))\n self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in\n all_job_runs))\n",
"step-5": "import datetime\nimport json\n\nfrom dateutil import parser\nimport mock\nfrom python_http_client.exceptions import ForbiddenError\n\nfrom rdr_service import clock, config\nfrom rdr_service.api_util import open_cloud_file\nfrom rdr_service.clock import FakeClock\nfrom rdr_service.dao.database_utils import format_datetime\nfrom rdr_service.dao.genomics_dao import GenomicGcDataFileDao, GenomicGCValidationMetricsDao, GenomicIncidentDao, \\\n GenomicSetMemberDao, UserEventMetricsDao, GenomicJobRunDao, GenomicResultWithdrawalsDao, \\\n GenomicMemberReportStateDao, GenomicAppointmentEventMetricsDao, GenomicAppointmentEventDao, GenomicResultViewedDao, \\\n GenomicInformingLoopDao, GenomicAppointmentEventNotifiedDao, GenomicDefaultBaseDao\nfrom rdr_service.dao.message_broker_dao import MessageBrokenEventDataDao\nfrom rdr_service.genomic_enums import GenomicIncidentCode, GenomicJob, GenomicWorkflowState, GenomicSubProcessResult, \\\n GenomicSubProcessStatus, GenomicManifestTypes, GenomicQcStatus, GenomicReportState\nfrom rdr_service.genomic.genomic_job_components import GenomicFileIngester\nfrom rdr_service.genomic.genomic_job_controller import GenomicJobController\nfrom rdr_service.model.genomics import GenomicGcDataFile, GenomicIncident, GenomicSetMember, GenomicGCValidationMetrics,\\\n GenomicGCROutreachEscalationNotified\nfrom rdr_service.offline.genomics import genomic_pipeline, genomic_cvl_pipeline\nfrom rdr_service.participant_enums import WithdrawalStatus\nfrom tests import test_data\nfrom tests.genomics_tests.test_genomic_utils import create_ingestion_test_file\nfrom tests.helpers.unittest_base import BaseTestCase\n\n\nclass GenomicJobControllerTest(BaseTestCase):\n def setUp(self):\n super(GenomicJobControllerTest, self).setUp()\n self.data_file_dao = GenomicGcDataFileDao()\n self.event_data_dao = MessageBrokenEventDataDao()\n self.incident_dao = GenomicIncidentDao()\n self.member_dao = GenomicSetMemberDao()\n self.metrics_dao = GenomicGCValidationMetricsDao()\n self.user_event_metrics_dao = UserEventMetricsDao()\n self.job_run_dao = GenomicJobRunDao()\n self.report_state_dao = GenomicMemberReportStateDao()\n self.appointment_event_dao = GenomicAppointmentEventDao()\n self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()\n\n def test_incident_with_long_message(self):\n \"\"\"Make sure the length of incident messages doesn't cause issues when recording them\"\"\"\n incident_message = \"1\" * (GenomicIncident.message.type.length + 20)\n mock_slack_handler = mock.MagicMock()\n\n job_controller = GenomicJobController(job_id=1)\n job_controller.genomic_alert_slack = mock_slack_handler\n job_controller.create_incident(message=incident_message, slack=True)\n\n # Double check that the incident was saved successfully, with part of the message\n incident: GenomicIncident = self.session.query(GenomicIncident).one()\n self.assertTrue(incident_message.startswith(incident.message))\n\n # Make sure Slack received the full message\n mock_slack_handler.send_message_to_webhook.assert_called_with(\n message_data={\n 'text': incident_message\n }\n )\n\n def test_gvcf_files_ingestion(self):\n job_controller = GenomicJobController(job_id=38)\n bucket_name = \"test_bucket\"\n\n file_path = \"Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz\"\n file_path_md5 = \"Wgs_sample_raw_data/SS_VCF_research/\" \\\n \"BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz.md5sum\"\n\n full_path = f'{bucket_name}/{file_path}'\n full_path_md5 = f'{bucket_name}/{file_path_md5}'\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType=\"aou_wgs\",\n genomicWorkflowState=GenomicWorkflowState.AW1\n )\n\n gen_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.AW1_MANIFEST,\n startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n gen_processed_file = self.data_generator.create_database_genomic_file_processed(\n runId=gen_job_run.id,\n startTime=clock.CLOCK.now(),\n filePath='/test_file_path',\n bucketName='test_bucket',\n fileName='test_file_name',\n )\n\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id\n )\n\n job_controller.ingest_data_files_into_gc_metrics(file_path_md5, bucket_name)\n\n metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)\n\n self.assertIsNotNone(metrics.gvcfMd5Path)\n self.assertEqual(metrics.gvcfMd5Path, full_path_md5)\n\n job_controller.ingest_data_files_into_gc_metrics(file_path, bucket_name)\n\n metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)\n\n self.assertIsNotNone(metrics.gvcfPath)\n self.assertEqual(metrics.gvcfPath, full_path)\n\n def test_gvcf_files_ingestion_create_incident(self):\n bucket_name = \"test_bucket\"\n file_path = \"Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz\"\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"111111111\",\n sampleId=\"222222222222\",\n genomeType=\"aou_wgs\",\n genomicWorkflowState=GenomicWorkflowState.AW1\n )\n\n gen_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.AW1_MANIFEST,\n startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n gen_processed_file = self.data_generator.create_database_genomic_file_processed(\n runId=gen_job_run.id,\n startTime=clock.CLOCK.now(),\n filePath='/test_file_path',\n bucketName=bucket_name,\n fileName='test_file_name',\n )\n\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id\n )\n\n with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:\n controller.ingest_data_files_into_gc_metrics(file_path, bucket_name)\n\n incident = self.incident_dao.get(1)\n self.assertIsNotNone(incident)\n self.assertEqual(incident.code, GenomicIncidentCode.UNABLE_TO_FIND_METRIC.name)\n self.assertEqual(incident.data_file_path, file_path)\n self.assertEqual(incident.message, 'INGEST_DATA_FILES: Cannot find '\n 'genomics metric record for sample id: '\n '21042005280')\n\n def test_accession_data_files(self):\n test_bucket_baylor = \"fake-data-bucket-baylor\"\n test_idat_file = \"fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01_Grn.idat\"\n test_vcf_file = \"fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01.vcf.gz\"\n\n test_cram_file = \"fake-data-bucket-baylor/Wgs_sample_raw_data/\" \\\n \"CRAMs_CRAIs/BCM_A100134256_21063006771_SIA0017196_1.cram\"\n\n test_files = [test_idat_file, test_vcf_file, test_cram_file]\n\n test_time = datetime.datetime(2021, 7, 9, 14, 1, 1)\n\n # run job controller method on each file\n with clock.FakeClock(test_time):\n\n for file_path in test_files:\n with GenomicJobController(GenomicJob.ACCESSION_DATA_FILES) as controller:\n controller.accession_data_files(file_path, test_bucket_baylor)\n\n inserted_files = self.data_file_dao.get_all()\n\n # idat\n expected_idat = GenomicGcDataFile(\n id=1,\n created=test_time,\n modified=test_time,\n file_path=test_idat_file,\n gc_site_id='jh',\n bucket_name='fake-data-bucket-baylor',\n file_prefix='Genotyping_sample_raw_data',\n file_name='204027270091_R02C01_Grn.idat',\n file_type='Grn.idat',\n identifier_type='chipwellbarcode',\n identifier_value='204027270091_R02C01',\n ignore_flag=0,\n )\n\n # vcf\n expected_vcf = GenomicGcDataFile(\n id=2,\n created=test_time,\n modified=test_time,\n file_path=test_vcf_file,\n gc_site_id='jh',\n bucket_name='fake-data-bucket-baylor',\n file_prefix='Genotyping_sample_raw_data',\n file_name='204027270091_R02C01.vcf.gz',\n file_type='vcf.gz',\n identifier_type='chipwellbarcode',\n identifier_value='204027270091_R02C01',\n ignore_flag=0,\n )\n\n # cram\n expected_cram = GenomicGcDataFile(\n id=3,\n created=test_time,\n modified=test_time,\n file_path=test_cram_file,\n gc_site_id='bcm',\n bucket_name='fake-data-bucket-baylor',\n file_prefix='Wgs_sample_raw_data/CRAMs_CRAIs',\n file_name='BCM_A100134256_21063006771_SIA0017196_1.cram',\n file_type='cram',\n identifier_type='sample_id',\n identifier_value='21063006771',\n ignore_flag=0,\n )\n\n # obj mapping\n expected_objs = {\n 0: expected_idat,\n 1: expected_vcf,\n 2: expected_cram\n }\n\n # verify test objects match expectations\n for i in range(3):\n self.assertEqual(expected_objs[i].bucket_name, inserted_files[i].bucket_name)\n self.assertEqual(expected_objs[i].created, inserted_files[i].created)\n self.assertEqual(expected_objs[i].file_name, inserted_files[i].file_name)\n self.assertEqual(expected_objs[i].file_path, inserted_files[i].file_path)\n self.assertEqual(expected_objs[i].file_prefix, inserted_files[i].file_prefix)\n self.assertEqual(expected_objs[i].file_type, inserted_files[i].file_type)\n self.assertEqual(expected_objs[i].gc_site_id, inserted_files[i].gc_site_id)\n self.assertEqual(expected_objs[i].id, inserted_files[i].id)\n self.assertEqual(expected_objs[i].identifier_type, inserted_files[i].identifier_type)\n self.assertEqual(expected_objs[i].identifier_value, inserted_files[i].identifier_value)\n self.assertEqual(expected_objs[i].ignore_flag, inserted_files[i].ignore_flag)\n self.assertEqual(expected_objs[i].metadata, inserted_files[i].metadata)\n self.assertEqual(expected_objs[i].modified, inserted_files[i].modified)\n\n def test_updating_members_blocklists(self):\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n ids_should_be_updated = []\n # for just created and wf state query and MATCHES criteria\n for i in range(4):\n ids_should_be_updated.append(\n self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType='test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW0,\n ai_an='Y' if i & 2 == 0 else 'N'\n ).id\n )\n\n # for just created and wf state query and DOES NOT MATCH criteria\n for i in range(2):\n self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType='aou_array',\n genomicWorkflowState=GenomicWorkflowState.AW0,\n ai_an='N'\n )\n\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS) as controller:\n controller.update_members_blocklists()\n\n # current config json in base_config.json\n created_members = self.member_dao.get_all()\n\n blocklisted = list(filter(lambda x: x.blockResults == 1 or x.blockResearch == 1, created_members))\n self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in blocklisted].sort())\n\n # should be RESEARCH blocked\n self.assertTrue(all(\n obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'aian'\n for obj in created_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW0)\n )\n\n # should NOT be RESULTS blocked\n self.assertTrue(all(\n obj.blockResults == 0 and obj.blockResultsReason is None\n for obj in created_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW0)\n )\n\n # should be RESEARCH blocked\n self.assertTrue(all(\n obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'test_sample_swap'\n for obj in created_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0)\n )\n\n # should be RESULTS blocked\n self.assertTrue(all(\n obj.blockResults == 1 and obj.blockResultsReason is not None and obj.blockResultsReason == 'test_sample_swap'\n for obj in created_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0)\n )\n\n # should NOT be RESEARCH/RESULTS blocked\n self.assertTrue(all(\n obj.blockResearch == 0 and obj.blockResearchReason is None\n for obj in created_members if obj.genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0)\n )\n\n self.assertTrue(all(\n obj.blockResults == 0 and obj.blockResultsReason is None\n for obj in created_members if obj.genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0)\n )\n\n # clear current set member records\n with self.member_dao.session() as session:\n session.query(GenomicSetMember).delete()\n\n run_result = self.job_run_dao.get(1)\n\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n\n # for modified data query and MATCHES criteria\n for i in range(4):\n self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType='test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW1,\n ai_an='Y' if i & 2 == 0 else 'N'\n )\n\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS) as controller:\n controller.update_members_blocklists()\n\n modified_members = self.member_dao.get_all()\n\n # should be RESEARCH blocked\n self.assertTrue(all(\n obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'aian'\n for obj in modified_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW1)\n )\n\n # should NOT be RESULTS blocked\n self.assertTrue(all(\n obj.blockResults == 0 and obj.blockResultsReason is None\n for obj in modified_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW1)\n )\n\n # should be RESEARCH blocked\n self.assertTrue(all(\n obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'test_sample_swap'\n for obj in modified_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1)\n )\n\n # should be RESULTS blocked\n self.assertTrue(all(\n obj.blockResults == 1 and obj.blockResultsReason is not None and obj.blockResultsReason == 'test_sample_swap'\n for obj in modified_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1)\n )\n\n run_result = self.job_run_dao.get(2)\n\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n\n def test_ingest_user_metrics_file(self):\n test_file = 'Genomic-Metrics-File-User-Events-Test.csv'\n bucket_name = 'test_bucket'\n sub_folder = 'user_events'\n pids = []\n\n file_ingester = GenomicFileIngester()\n\n for _ in range(2):\n pid = self.data_generator.create_database_participant()\n pids.append(pid.participantId)\n\n test_metrics_file = create_ingestion_test_file(\n test_file,\n bucket_name,\n sub_folder)\n\n test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'\n\n with open_cloud_file(test_file_path) as csv_file:\n metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)\n\n with GenomicJobController(GenomicJob.METRICS_FILE_INGEST) as controller:\n controller.ingest_metrics_file(\n metric_type='user_events',\n file_path=test_file_path,\n )\n\n job_run_id = controller.job_run.id\n metrics = self.user_event_metrics_dao.get_all()\n\n for pid in pids:\n file_metrics = list(filter(lambda x: int(x['participant_id'].split('P')[-1]) == pid, metrics_to_ingest[\n 'rows']))\n participant_ingested_metrics = list(filter(lambda x: x.participant_id == pid, metrics))\n\n self.assertEqual(len(file_metrics), len(participant_ingested_metrics))\n self.assertTrue(all(obj.run_id == job_run_id for obj in participant_ingested_metrics))\n\n @mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')\n def test_reconcile_pdr_data(self, mock_cloud_task):\n\n # init new job run in __enter__\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n\n cloud_task_endpoint = 'rebuild_genomic_table_records_task'\n\n first_run = self.job_run_dao.get_all()\n\n self.assertEqual(mock_cloud_task.call_count, 1)\n call_args = mock_cloud_task.call_args_list\n\n self.assertEqual(len(call_args), 1)\n self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.model_type.__tablename__)\n\n self.assertTrue(type(call_args[0].args[0]['ids']) is list)\n self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in first_run])\n self.assertEqual(call_args[0].args[1], cloud_task_endpoint)\n\n participant = self.data_generator.create_database_participant()\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)\n plus_ten = plus_ten.replace(microsecond=0)\n with FakeClock(plus_ten):\n for i in range(2):\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType=\"aou_wgs\",\n genomicWorkflowState=GenomicWorkflowState.AW1\n )\n\n gen_processed_file = self.data_generator.create_database_genomic_file_processed(\n runId=first_run[0].id,\n startTime=clock.CLOCK.now(),\n filePath=f'test_file_path_{i}',\n bucketName='test_bucket',\n fileName='test_file_name',\n )\n\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id\n )\n\n manifest = self.data_generator.create_database_genomic_manifest_file(\n manifestTypeId=2,\n filePath=f'test_file_path_{i}'\n )\n\n self.data_generator.create_database_genomic_manifest_feedback(\n inputManifestFileId=manifest.id,\n feedbackRecordCount=2\n )\n\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=participant.participantId,\n event_name='test_event',\n run_id=1,\n )\n\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=1,\n event_type='informing_loop_decision',\n module_type='gem',\n participant_id=participant.participantId,\n decision_value='maybe_later',\n event_authored_time=clock.CLOCK.now()\n )\n\n self.data_generator.create_database_genomic_cvl_past_due(\n cvl_site_id='co',\n email_notification_sent=0,\n sample_id='sample_test',\n results_type='hdr',\n genomic_set_member_id=gen_member.id\n )\n\n self.data_generator.create_database_genomic_appointment(\n message_record_id=i,\n appointment_id=i,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=participant.participantId,\n event_authored_time=clock.CLOCK.now(),\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n self.data_generator.create_database_genomic_member_report_state(\n genomic_set_member_id=gen_member.id,\n participant_id=participant.participantId,\n module='gem',\n genomic_report_state=GenomicReportState.GEM_RPT_READY,\n event_authored_time=clock.CLOCK.now()\n )\n\n self.data_generator.create_genomic_result_viewed(\n participant_id=participant.participantId,\n event_type='result_viewed',\n event_authored_time=clock.CLOCK.now(),\n module_type='gem',\n sample_id=gen_member.sampleId\n )\n\n # gets new records that were created with last job run from above\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n\n affected_tables = [\n 'genomic_set',\n 'genomic_set_member',\n 'genomic_job_run',\n 'genomic_file_processed',\n 'genomic_gc_validation_metrics',\n 'genomic_manifest_file',\n 'genomic_manifest_feedback',\n 'genomic_informing_loop',\n 'genomic_cvl_results_past_due',\n 'user_event_metrics',\n 'genomic_member_report_state',\n 'genomic_result_viewed',\n 'genomic_appointment_event'\n ]\n\n num_calls = len(affected_tables) + 1\n\n self.assertEqual(mock_cloud_task.call_count, num_calls)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), num_calls)\n\n mock_tables = set([obj[0][0]['table'] for obj in call_args])\n mock_endpoint = [obj[0][1] for obj in call_args]\n\n self.assertTrue([mock_tables].sort() == affected_tables.sort())\n self.assertTrue(all(obj for obj in mock_endpoint if obj == cloud_task_endpoint))\n\n @mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')\n def test_retry_manifest_ingestions_if_deltas(self, mock_cloud_task):\n\n bucket_name = \"test-bucket\"\n aw1_file_name = \"AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv\"\n aw1_manifest_path = f\"{bucket_name}/{aw1_file_name}\"\n\n aw2_file_name = \"AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv\"\n aw2_manifest_path = f\"{bucket_name}/{aw2_file_name}\"\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n # Create AW1 job_run\n aw1_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.AW1_MANIFEST,\n startTime=clock.CLOCK.now(),\n endTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n # Create AW2 job_run\n aw2_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.METRICS_INGESTION,\n startTime=clock.CLOCK.now(),\n endTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n # should have no data\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:\n controller.retry_manifest_ingestions()\n\n job_run = self.job_run_dao.get(3)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n\n # Create genomic_aw1_raw record\n self.data_generator.create_database_genomic_aw1_raw(\n file_path=aw1_manifest_path,\n package_id=\"PKG-2104-026571\",\n biobank_id=\"A10001\",\n )\n\n # Create genomic_aw2_raw record\n self.data_generator.create_database_genomic_aw2_raw(\n file_path=aw2_manifest_path,\n biobank_id=\"A10001\",\n sample_id=\"100001\",\n biobankidsampleid=\"A10001_100001\",\n )\n\n # Create AW1 genomic_manifest_file record\n aw1_manifest_file = self.data_generator.create_database_genomic_manifest_file(\n created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(),\n uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW1,\n filePath=aw1_manifest_path,\n fileName=aw1_file_name,\n bucketName=bucket_name,\n recordCount=1,\n rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now(),\n )\n\n # Create AW2 genomic_manifest_file record\n aw2_manifest_file = self.data_generator.create_database_genomic_manifest_file(\n created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(),\n uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW2,\n filePath=aw2_manifest_path,\n fileName=aw2_file_name,\n bucketName=bucket_name,\n recordCount=1,\n rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now(),\n )\n\n # Create AW1 file_processed\n aw1_file_processed = self.data_generator.create_database_genomic_file_processed(\n runId=aw1_job_run.id,\n startTime=clock.CLOCK.now(),\n genomicManifestFileId=aw1_manifest_file.id,\n filePath=f\"/{aw1_manifest_path}\",\n bucketName=bucket_name,\n fileName=aw1_file_name,\n )\n\n # Create AW2 file_processed\n aw2_file_processed = self.data_generator.create_database_genomic_file_processed(\n runId=aw2_job_run.id,\n startTime=clock.CLOCK.now(),\n genomicManifestFileId=aw2_manifest_file.id,\n filePath=f\"/{aw2_manifest_path}\",\n bucketName=bucket_name,\n fileName=aw2_file_name,\n )\n\n # genomic_set_member for AW1\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType=\"aou_wgs\",\n genomicWorkflowState=GenomicWorkflowState.AW1,\n aw1FileProcessedId=aw1_file_processed.id\n )\n\n # genomic_gc_validation_metrics for AW1\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=aw2_file_processed.id\n )\n\n # one AW1/AW2 with no deltas\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:\n controller.retry_manifest_ingestions()\n\n job_run = self.job_run_dao.get(4)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n\n # empty tables resulting in deltas and cloud task calls\n with self.member_dao.session() as session:\n session.query(GenomicGCValidationMetrics).delete()\n session.query(GenomicSetMember).delete()\n\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:\n controller.retry_manifest_ingestions()\n\n job_run = self.job_run_dao.get(5)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.SUCCESS)\n\n # one AW1/AW2 with deltas\n self.assertEqual(mock_cloud_task.call_count, 2)\n self.assertTrue(mock_cloud_task.call_count)\n\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 2)\n\n cloud_task_endpoint = ['ingest_aw1_manifest_task', 'ingest_aw2_manifest_task']\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue(all(obj for obj in mock_endpoint if obj == cloud_task_endpoint))\n\n mock_buckets = set([obj[0][0]['bucket_name'] for obj in call_args])\n self.assertTrue(len(mock_buckets), 1)\n self.assertTrue(list(mock_buckets)[0] == bucket_name)\n\n def test_calculate_informing_loop_ready_flags(self):\n num_participants = 4\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n for num in range(num_participants):\n plus_num = clock.CLOCK.now() + datetime.timedelta(minutes=num)\n plus_num = plus_num.replace(microsecond=0)\n with FakeClock(plus_num):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1\n )\n stored_sample = self.data_generator.create_database_biobank_stored_sample(\n biobankId=summary.biobankId,\n biobankOrderIdentifier=self.fake.pyint()\n )\n collection_site = self.data_generator.create_database_site(\n siteType='Clinic'\n )\n order = self.data_generator.create_database_biobank_order(\n collectedSiteId=collection_site.siteId,\n participantId=summary.participantId,\n finalizedTime=plus_num\n )\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId,\n system=\"1\",\n )\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId,\n system=\"2\",\n )\n member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_WGS,\n qcStatus=GenomicQcStatus.PASS,\n gcManifestSampleSource='Whole Blood',\n collectionTubeId=stored_sample.biobankStoredSampleId\n )\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=member.id,\n sexConcordance='True',\n drcFpConcordance='Pass',\n drcSexConcordance='Pass',\n processingStatus='Pass'\n )\n\n\n members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()\n self.assertEqual(len(members_for_ready_loop), num_participants)\n\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 0 for obj in current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is None for obj in current_set_members))\n\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:\n controller.calculate_informing_loop_ready_flags()\n\n # no config object, controller method should return\n members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()\n self.assertEqual(len(members_for_ready_loop), num_participants)\n\n calculation_limit = 2\n config.override_setting(config.CALCULATE_READY_FLAG_LIMIT, [calculation_limit])\n\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:\n controller.calculate_informing_loop_ready_flags()\n\n current_set_members = self.member_dao.get_all()\n self.assertTrue(any(obj.informingLoopReadyFlag == 1 for obj in current_set_members))\n self.assertTrue(any(obj.informingLoopReadyFlagModified is not None for obj in current_set_members))\n\n current_loops_set = [obj for obj in current_set_members if obj.informingLoopReadyFlag == 1\n and obj.informingLoopReadyFlagModified is not None]\n self.assertEqual(len(current_loops_set), calculation_limit)\n\n members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()\n self.assertEqual(len(members_for_ready_loop), num_participants // 2)\n\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:\n controller.calculate_informing_loop_ready_flags()\n\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 1 for obj in current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is not None for obj in current_set_members))\n\n members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()\n self.assertEqual(len(members_for_ready_loop), 0)\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_getting_results_withdrawn(self, email_mock):\n num_participants = 4\n result_withdrawal_dao = GenomicResultWithdrawalsDao()\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n gen_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.AW1_MANIFEST,\n startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n pids = []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT\n )\n\n self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_ARRAY,\n gemA1ManifestJobRunId=gen_job_run.id if num % 2 == 0 else None\n )\n\n self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_WGS,\n cvlW1ilHdrJobRunId=gen_job_run.id\n )\n\n pids.append(summary.participantId)\n\n config.override_setting(config.RDR_GENOMICS_NOTIFICATION_EMAIL, '[email protected]')\n\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS) as controller:\n controller.check_results_withdrawals()\n\n # mock checks should be two => 1 GEM 1 HEALTH\n self.assertEqual(email_mock.call_count, 2)\n call_args = email_mock.call_args_list\n\n self.assertTrue(any('GEM' in call.args[0].subject for call in call_args))\n self.assertTrue(any('HEALTH' in call.args[0].subject for call in call_args))\n\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.RESULTS_PIPELINE_WITHDRAWALS, job_runs))[0]\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)\n\n all_withdrawal_records = result_withdrawal_dao.get_all()\n\n self.assertTrue(len(all_withdrawal_records) == len(pids))\n self.assertTrue(all(obj.participant_id in pids for obj in all_withdrawal_records))\n\n array_results = list(filter(lambda x: x.array_results == 1, all_withdrawal_records))\n\n # should only be 2\n self.assertTrue(len(array_results), 2)\n\n cvl_results = list(filter(lambda x: x.cvl_results == 1, all_withdrawal_records))\n\n # should be 4 for num of participants\n self.assertTrue(len(cvl_results), num_participants)\n\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS) as controller:\n controller.check_results_withdrawals()\n\n # mock checks should still be two on account of no records\n self.assertEqual(email_mock.call_count, 2)\n\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.RESULTS_PIPELINE_WITHDRAWALS, job_runs))[1]\n\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)\n\n def test_gem_results_to_report_state(self):\n num_participants = 8\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n gem_a2_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.GEM_A2_MANIFEST,\n startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n pids_to_update, member_ids = [], []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT\n )\n\n member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_ARRAY\n )\n\n if num % 2 == 0:\n member_ids.append(member.id)\n pids_to_update.append(summary.participantId)\n\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 2)\n\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[0]\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)\n\n current_members = self.member_dao.get_all()\n\n # 4 members updated correctly should return\n for member in current_members:\n if member.participantId in pids_to_update:\n member.gemA2ManifestJobRunId = gem_a2_job_run.id\n member.genomicWorkflowState = GenomicWorkflowState.GEM_RPT_READY\n self.member_dao.update(member)\n\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 3)\n\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[1]\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)\n\n current_gem_report_states = self.report_state_dao.get_all()\n self.assertEqual(len(current_gem_report_states), len(pids_to_update))\n self.assertTrue(all(obj.event_type == 'result_ready' for obj in current_gem_report_states))\n self.assertTrue(all(obj.event_authored_time is not None for obj in current_gem_report_states))\n self.assertTrue(all(obj.module == 'gem' for obj in current_gem_report_states))\n self.assertTrue(\n all(obj.genomic_report_state == GenomicReportState.GEM_RPT_READY for obj in current_gem_report_states)\n )\n self.assertTrue(\n all(obj.genomic_report_state_str == GenomicReportState.GEM_RPT_READY.name for obj in\n current_gem_report_states)\n )\n self.assertTrue(\n all(obj.genomic_set_member_id in member_ids for obj in\n current_gem_report_states)\n )\n\n # 4 members inserted already should not return\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 4)\n\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[2]\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)\n\n self.clear_table_after_test('genomic_member_report_state')\n\n def test_reconcile_informing_loop(self):\n event_dao = UserEventMetricsDao()\n event_dao.truncate() # for test suite\n il_dao = GenomicInformingLoopDao()\n\n for pid in range(8):\n self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)\n\n # Set up initial job run ID\n self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.METRICS_FILE_INGEST,\n startTime=clock.CLOCK.now()\n )\n\n # create genomic set\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n # insert set members\n for b in [\"aou_array\", \"aou_wgs\"]:\n for i in range(1, 9):\n self.data_generator.create_database_genomic_set_member(\n participantId=i,\n genomicSetId=1,\n biobankId=i,\n collectionTubeId=100 + i,\n sampleId=10 + i,\n genomeType=b,\n )\n\n # Set up ingested metrics data\n events = ['gem.informing_loop.started',\n 'gem.informing_loop.screen8_no',\n 'gem.informing_loop.screen8_yes',\n 'hdr.informing_loop.started',\n 'gem.informing_loop.screen3',\n 'pgx.informing_loop.screen8_no',\n 'hdr.informing_loop.screen10_no']\n\n for p in range(4):\n for i in range(len(events)):\n self.data_generator.create_database_genomic_user_event_metrics(\n created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(),\n participant_id=p + 1,\n created_at=datetime.datetime(2021, 12, 29, 00) + datetime.timedelta(hours=i),\n event_name=events[i],\n run_id=1,\n ignore_flag=0,\n )\n # Set up informing loop from message broker records\n decisions = [None, 'no', 'yes']\n for p in range(3):\n for i in range(2):\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=i,\n event_type='informing_loop_started' if i == 0 else 'informing_loop_decision',\n module_type='gem',\n participant_id=p + 1,\n decision_value=decisions[i],\n sample_id=100 + p,\n event_authored_time=datetime.datetime(2021, 12, 29, 00) + datetime.timedelta(hours=i)\n )\n\n # Test for no message but yes user event\n self.data_generator.create_database_genomic_user_event_metrics(\n created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(),\n participant_id=6,\n created_at=datetime.datetime(2021, 12, 29, 00),\n event_name='gem.informing_loop.screen8_yes',\n run_id=1,\n ignore_flag=0,\n )\n\n # Run reconcile job\n genomic_pipeline.reconcile_informing_loop_responses()\n\n # Test mismatched GEM data ingested correctly\n pid_list = [1, 2, 3, 6]\n\n new_il_values = il_dao.get_latest_il_for_pids(\n pid_list=pid_list,\n module=\"gem\"\n )\n\n for value in new_il_values:\n self.assertEqual(\"yes\", value.decision_value)\n\n pid_list = [1, 2, 3, 4]\n for module in [\"hdr\", \"pgx\"]:\n new_il_values = il_dao.get_latest_il_for_pids(\n pid_list=pid_list,\n module=module\n )\n\n for value in new_il_values:\n self.assertEqual(\"no\", value.decision_value)\n self.assertIsNotNone(value.created_from_metric_id)\n\n def test_reconcile_message_broker_results_ready(self):\n # Create Test Participants' data\n # create genomic set\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n # Set up initial job run ID\n self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.METRICS_FILE_INGEST,\n startTime=clock.CLOCK.now()\n )\n\n for pid in range(7):\n self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)\n\n # insert set members and event metrics records\n for i in range(1, 6):\n self.data_generator.create_database_genomic_set_member(\n participantId=i,\n genomicSetId=1,\n biobankId=i,\n collectionTubeId=100 + i,\n sampleId=10 + i,\n genomeType=\"aou_wgs\",\n )\n\n # 3 PGX records\n if i < 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i,\n created_at=datetime.datetime(2022, 10, 6, 00),\n event_name=\"pgx.result_ready\",\n run_id=1,\n )\n\n # 1 HDR Positive\n if i == 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i,\n created_at=datetime.datetime(2022, 10, 6, 00),\n event_name=\"hdr.result_ready.informative\",\n run_id=1,\n )\n\n # 1 HDR uninformative\n if i == 5:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i,\n created_at=datetime.datetime(2022, 10, 6, 00),\n event_name=\"hdr.result_ready.uninformative\",\n run_id=1,\n )\n\n # Run job\n genomic_cvl_pipeline.reconcile_message_broker_results_ready()\n\n # Test correct data inserted\n report_state_dao = GenomicMemberReportStateDao()\n states = report_state_dao.get_all()\n\n self.assertEqual(5, len(states))\n\n pgx_records = [rec for rec in states if rec.module == \"pgx_v1\"]\n hdr_record_uninf = [rec for rec in states\n if rec.genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0]\n\n hdr_record_pos = [rec for rec in states\n if rec.genomic_report_state == GenomicReportState.HDR_RPT_POSITIVE][0]\n\n for pgx_record in pgx_records:\n self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.genomic_report_state)\n self.assertEqual(\"PGX_RPT_READY\", pgx_record.genomic_report_state_str)\n self.assertEqual(int(pgx_record.sample_id), pgx_record.participant_id + 10)\n self.assertEqual(\"result_ready\", pgx_record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 00), pgx_record.event_authored_time)\n self.assertIsNotNone(pgx_record.created_from_metric_id)\n\n self.assertEqual(\"HDR_RPT_UNINFORMATIVE\", hdr_record_uninf.genomic_report_state_str)\n self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.participant_id + 10)\n self.assertEqual(\"result_ready\", hdr_record_uninf.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 00), hdr_record_uninf.event_authored_time)\n self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)\n\n self.assertEqual(\"HDR_RPT_POSITIVE\", hdr_record_pos.genomic_report_state_str)\n self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.participant_id + 10)\n self.assertEqual(\"result_ready\", hdr_record_pos.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 00), hdr_record_pos.event_authored_time)\n self.assertIsNotNone(hdr_record_pos.created_from_metric_id)\n\n def test_reconcile_message_broker_results_viewed(self):\n # Create Test Participants' data\n # create genomic set\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n # Set up initial job run ID\n self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.METRICS_FILE_INGEST,\n startTime=clock.CLOCK.now()\n )\n\n for pid in range(3):\n self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)\n\n # insert set members and event metrics records\n for i in range(1, 3):\n self.data_generator.create_database_genomic_set_member(\n participantId=i,\n genomicSetId=1,\n biobankId=i,\n collectionTubeId=100 + i,\n sampleId=10 + i,\n genomeType=\"aou_wgs\",\n )\n\n # 1 PGX Viewed\n if i == 1:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i,\n created_at=datetime.datetime(2022, 10, 6, 00),\n event_name=\"pgx.opened_at\",\n run_id=1,\n )\n\n # 1 HDR Viewed\n if i == 2:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i,\n created_at=datetime.datetime(2022, 10, 6, 00),\n event_name=\"hdr.opened_at\",\n run_id=1,\n )\n\n genomic_cvl_pipeline.reconcile_message_broker_results_viewed()\n\n # Test correct data inserted\n result_viewed_dao = GenomicResultViewedDao()\n results = result_viewed_dao.get_all()\n\n self.assertEqual(2, len(results))\n\n for record in results:\n if record.participant_id == 1:\n self.assertEqual(\"pgx_v1\", record.module_type)\n else:\n self.assertEqual(\"hdr_v1\", record.module_type)\n self.assertEqual(int(record.sample_id), record.participant_id + 10)\n self.assertEqual(\"result_viewed\", record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 00), record.first_viewed)\n self.assertIsNotNone(record.created_from_metric_id)\n\n def test_ingest_appointment_metrics_file(self):\n test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'\n bucket_name = 'test_bucket'\n sub_folder = 'appointment_events'\n pids = []\n\n for _ in range(4):\n summary = self.data_generator.create_database_participant_summary()\n pids.append(summary.participantId)\n\n test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'\n\n appointment_data = test_data.load_test_data_json(\n \"Genomic-Metrics-File-Appointment-Events-Test.json\")\n appointment_data_str = json.dumps(appointment_data, indent=4)\n\n with open_cloud_file(test_file_path, mode='wb') as cloud_file:\n cloud_file.write(appointment_data_str.encode(\"utf-8\"))\n\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST) as controller:\n controller.ingest_appointment_metrics_file(\n file_path=test_file_path,\n )\n\n all_metrics = self.appointment_metrics_dao.get_all()\n\n # should be 5 metric records for whats in json file\n self.assertEqual(len(all_metrics), 5)\n self.assertTrue(all((obj.participant_id in pids for obj in all_metrics)))\n self.assertTrue(all((obj.file_path == test_file_path for obj in all_metrics)))\n self.assertTrue(all((obj.appointment_event is not None for obj in all_metrics)))\n self.assertTrue(all((obj.created is not None for obj in all_metrics)))\n self.assertTrue(all((obj.modified is not None for obj in all_metrics)))\n self.assertTrue(all((obj.module_type is not None for obj in all_metrics)))\n self.assertTrue(all((obj.event_authored_time is not None for obj in all_metrics)))\n self.assertTrue(all((obj.event_type is not None for obj in all_metrics)))\n\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 1)\n\n current_job_run = current_job_runs[0]\n self.assertTrue(current_job_run.jobId == GenomicJob.APPOINTMENT_METRICS_FILE_INGEST)\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)\n\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n def test_reconcile_appointments_with_metrics(self):\n fake_date = parser.parse('2020-05-29T08:00:01-05:00')\n\n for num in range(4):\n summary = self.data_generator.create_database_participant_summary()\n\n missing_json = {\n \"event\": \"appointment_updated\",\n \"eventAuthoredTime\": \"2022-09-16T17:18:38Z\",\n \"participantId\": f'P{summary.participantId}',\n \"messageBody\": {\n \"module_type\": \"hdr\",\n \"appointment_timestamp\": \"2022-09-19T19:30:00+00:00\",\n \"id\": 55,\n \"appointment_timezone\": \"America/Los_Angeles\",\n \"location\": \"CA\",\n \"contact_number\": \"18043704252\",\n \"language\": \"en\",\n \"source\": \"Color\"\n }\n }\n\n if num % 2 == 0:\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num,\n appointment_id=num,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n self.data_generator.create_database_genomic_appointment_metric(\n participant_id=summary.participantId,\n appointment_event=json.dumps(missing_json, indent=4) if num % 2 != 0 else 'foo',\n file_path='test_file_path',\n module_type='hdr',\n event_authored_time=fake_date,\n event_type='appointment_updated' if num % 2 != 0 else 'appointment_scheduled'\n )\n\n current_events = self.appointment_event_dao.get_all()\n # should be 2 initial appointment events\n self.assertEqual(len(current_events), 2)\n\n current_metrics = self.appointment_metrics_dao.get_all()\n # should be 4 initial appointment events\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is None for obj in current_metrics))\n\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE) as controller:\n controller.reconcile_appointment_events_from_metrics()\n\n job_run = self.job_run_dao.get_all()\n self.assertEqual(len(job_run), 1)\n self.assertTrue(job_run[0].jobId == GenomicJob.APPOINTMENT_METRICS_RECONCILE)\n\n current_events = self.appointment_event_dao.get_all()\n # should be 4 appointment events 2 initial + 2 added\n self.assertEqual(len(current_events), 4)\n\n scheduled = list(filter(lambda x: x.event_type == 'appointment_scheduled', current_events))\n self.assertEqual(len(scheduled), 2)\n self.assertTrue(all(obj.created_from_metric_id is None for obj in scheduled))\n\n updated = list(filter(lambda x: x.event_type == 'appointment_updated', current_events))\n self.assertEqual(len(updated), 2)\n self.assertTrue(all(obj.created_from_metric_id is not None for obj in updated))\n\n current_metrics = self.appointment_metrics_dao.get_all()\n # should STILL be 4 initial appointment events\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in current_metrics))\n self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for obj in current_metrics))\n\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_appointments_gror_changed(self, email_mock):\n fake_date = parser.parse(\"2022-09-01T13:43:23\")\n notified_dao = GenomicAppointmentEventNotifiedDao()\n config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, ['[email protected]'])\n num_participants = 4\n for num in range(num_participants):\n gror = num if num > 1 else 1\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=gror\n )\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num,\n appointment_id=num,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n changed_ppts = self.appointment_event_dao.get_appointments_gror_changed()\n self.assertEqual(2, len(changed_ppts))\n with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED) as controller:\n controller.check_appointments_gror_changed()\n\n self.assertEqual(email_mock.call_count, 1)\n notified_appointments = notified_dao.get_all()\n self.assertEqual(2, len(notified_appointments))\n\n # test notified not returned by query\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=2\n )\n self.data_generator.create_database_genomic_appointment(\n message_record_id=5,\n appointment_id=5,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n changed_ppts = self.appointment_event_dao.get_appointments_gror_changed()\n self.assertEqual(1, len(changed_ppts))\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation(self, email_mock):\n fake_date = parser.parse(\"2022-09-01T13:43:23\")\n fake_date2 = parser.parse(\"2022-09-02T14:14:00\")\n fake_date3 = parser.parse(\"2022-09-03T15:15:00\")\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['[email protected]'])\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1\n )\n set_member = self.data_generator.create_database_genomic_set_member(\n participantId=summary.participantId,\n genomicSetId=1,\n biobankId=1001,\n collectionTubeId=100,\n sampleId=10,\n genomeType=\"aou_wgs\",\n )\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId,\n genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,\n genomic_set_member_id=set_member.id,\n module='hdr_v1',\n event_authored_time=fake_date\n )\n pids.append(summary.participantId)\n\n # Appointment scheduled in future: don't notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101,\n appointment_id=102,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=pids[0],\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n # Appointment completed: don't notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102,\n appointment_id=103,\n event_type='appointment_completed',\n module_type='hdr',\n participant_id=pids[1],\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=fake_date,\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n # Appointment scheduled then canceled: notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103,\n appointment_id=104,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=pids[2],\n event_authored_time=fake_date2,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104,\n appointment_id=104,\n event_type='appointment_cancelled',\n module_type='hdr',\n participant_id=pids[2],\n event_authored_time=fake_date3,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{\n 'participant_id': pids[4],\n 'created': clock.CLOCK.now(),\n 'modified': clock.CLOCK.now(),\n 'message_sent': True\n },{\n 'participant_id': pids[5],\n 'created': clock.CLOCK.now(),\n 'modified': clock.CLOCK.now(),\n 'message_sent': False\n }])\n\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = self.report_state_dao.get_hdr_result_positive_no_appointment(num_days=14)\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION) as controller:\n controller.check_gcr_escalation(controller.job_id)\n\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject, 'GCR Outreach 14 Day Escalation')\n\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation_error(self, email_mock):\n email_mock.side_effect = ForbiddenError(mock.Mock(code=403))\n mock_slack_handler = mock.MagicMock()\n\n fake_date = parser.parse(\"2023-06-01T13:43:23\")\n\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['[email protected]'])\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n\n pids = []\n for _ in range(2):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1\n )\n set_member = self.data_generator.create_database_genomic_set_member(\n participantId=summary.participantId,\n genomicSetId=1,\n biobankId=1001,\n collectionTubeId=100,\n sampleId=10,\n genomeType=\"aou_wgs\",\n )\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId,\n genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,\n genomic_set_member_id=set_member.id,\n module='hdr_v1',\n event_authored_time=fake_date\n )\n pids.append(summary.participantId)\n\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102,\n appointment_id=103,\n event_type='appointment_completed',\n module_type='hdr',\n participant_id=pids[1],\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=fake_date,\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION) as controller:\n controller.genomic_alert_slack = mock_slack_handler\n controller.check_gcr_escalation(controller.job_id)\n\n notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)\n with notified_dao.session() as session:\n notification = session.query(\n GenomicGCROutreachEscalationNotified\n ).filter(\n GenomicGCROutreachEscalationNotified.participant_id == pids[0]\n ).one()\n\n self.assertEqual(email_mock.call_count, 1)\n self.assertEqual(mock_slack_handler.send_message_to_webhook.call_count, 1)\n self.assertEqual(False, notification.message_sent)\n\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_ce_escalation(self, email_mock):\n fake_date = parser.parse(\"2022-09-01T13:43:23\")\n fake_date2 = parser.parse(\"2022-09-02T14:14:00\")\n fake_date3 = parser.parse(\"2022-09-03T15:15:00\")\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['[email protected]'])\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1\n )\n set_member = self.data_generator.create_database_genomic_set_member(\n participantId=summary.participantId,\n genomicSetId=1,\n biobankId=1001,\n collectionTubeId=100,\n sampleId=10,\n genomeType=\"aou_wgs\",\n participantOrigin='careevolution'\n )\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId,\n genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,\n genomic_set_member_id=set_member.id,\n module='hdr_v1',\n event_authored_time=fake_date\n )\n pids.append(summary.participantId)\n\n # Appointment scheduled in future: don't notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101,\n appointment_id=102,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=pids[0],\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n # Appointment completed: don't notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102,\n appointment_id=103,\n event_type='appointment_completed',\n module_type='hdr',\n participant_id=pids[1],\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=fake_date,\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n # Appointment scheduled then canceled: notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103,\n appointment_id=104,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=pids[2],\n event_authored_time=fake_date2,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104,\n appointment_id=104,\n event_type='appointment_cancelled',\n module_type='hdr',\n participant_id=pids[2],\n event_authored_time=fake_date3,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{\n 'participant_id': pids[4],\n 'created': clock.CLOCK.now(),\n 'modified': clock.CLOCK.now(),\n 'message_sent': True\n },{\n 'participant_id': pids[5],\n 'created': clock.CLOCK.now(),\n 'modified': clock.CLOCK.now(),\n 'message_sent': False\n }])\n\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = self.report_state_dao.get_hdr_result_positive_no_appointment(\n num_days=30,\n participant_origin='careevolution'\n )\n results = [pid[0] for pid in escalated_participants]\n\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n\n with GenomicJobController(GenomicJob.CHECK_GCR_CE_OUTREACH_ESCALATION) as controller:\n controller.check_gcr_escalation(controller.job_id)\n\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject, 'GCR Outreach 30 Day Escalation')\n\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n\n @mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')\n def test_execute_auto_generation_from_last_run(self, cloud_task_mock):\n\n with GenomicJobController(\n GenomicJob.PR_PR_WORKFLOW\n ) as controller:\n controller.job_result = GenomicSubProcessResult.ERROR\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR)\n\n # task SHOULD NOT be called\n self.assertEqual(cloud_task_mock.called, False)\n self.assertEqual(cloud_task_mock.call_count, 0)\n\n with GenomicJobController(\n GenomicJob.PR_PR_WORKFLOW\n ) as controller:\n controller.job_result = GenomicSubProcessResult.SUCCESS\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.SUCCESS)\n\n # task SHOULD be called\n self.assertEqual(cloud_task_mock.called, True)\n self.assertTrue(cloud_task_mock.call_args[1].get('payload').get('manifest_type') == 'p0')\n self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') == 'genomic-generate-manifest')\n\n all_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(all_job_runs), 2)\n self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.SUCCESS, GenomicSubProcessResult.ERROR] for obj\n in all_job_runs))\n self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in all_job_runs))\n\n",
"step-ids": [
9,
13,
17,
22,
25
]
}
|
[
9,
13,
17,
22,
25
] |
"""
Copyright (c) 2018, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Graph Search Policy Network.
"""
from typing import List, NamedTuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
import src.utils.ops as ops
from src.knowledge_graph import KnowledgeGraph, ActionSpace, Observation, Action
from src.utils.ops import var_cuda, zeros_var_cuda
class BucketActions(NamedTuple):
action_spaces: List[ActionSpace]
action_dists: List[torch.Tensor]
inv_offset: Union[List[int], None]
entropy: torch.Tensor
def pad_and_cat_action_space(
action_spaces: List[ActionSpace], inv_offset, kg: KnowledgeGraph
):
db_r_space, db_e_space, db_action_mask = [], [], []
forks = []
for acsp in action_spaces:
forks += acsp.forks
db_r_space.append(acsp.r_space)
db_e_space.append(acsp.e_space)
db_action_mask.append(acsp.action_mask)
r_space = ops.pad_and_cat(db_r_space, padding_value=kg.dummy_r)[inv_offset]
e_space = ops.pad_and_cat(db_e_space, padding_value=kg.dummy_e)[inv_offset]
action_mask = ops.pad_and_cat(db_action_mask, padding_value=0)[inv_offset]
action_space = ActionSpace(forks, r_space, e_space, action_mask)
return action_space
class GraphWalkAgent(nn.Module):
def __init__(self, args):
super(GraphWalkAgent, self).__init__()
self.model = args.model
self.relation_only = args.relation_only
self.history_dim = args.history_dim
self.history_num_layers = args.history_num_layers
self.entity_dim = args.entity_dim
self.relation_dim = args.relation_dim
if self.relation_only:
self.action_dim = args.relation_dim
else:
self.action_dim = args.entity_dim + args.relation_dim
self.ff_dropout_rate = args.ff_dropout_rate
self.rnn_dropout_rate = args.rnn_dropout_rate
self.action_dropout_rate = args.action_dropout_rate
self.xavier_initialization = args.xavier_initialization
self.relation_only_in_path = args.relation_only_in_path
self.path = None
# Set policy network modules
self.define_modules()
self.initialize_modules()
# Fact network modules
self.fn = None
self.fn_kg = None
def transit(
self,
current_entity,
obs: Observation,
kg: KnowledgeGraph,
use_action_space_bucketing=True,
merge_aspace_batching_outcome=False,
) -> BucketActions:
"""
Compute the next action distribution based on
(a) the current node (entity) in KG and the query relation
(b) action history representation
:param current_entity: agent location (node) at step t.
:param obs: agent observation at step t.
e_s: source node
query_relation: query relation
last_step: If set, the agent is carrying out the last step.
last_r: label of edge traversed in the previous step
seen_nodes: notes seen on the paths
:param kg: Knowledge graph environment.
:param use_action_space_bucketing: If set, group the action space of different nodes
into buckets by their sizes.
:param merge_aspace_batch_outcome: If set, merge the transition probability distribution
generated of different action space bucket into a single batch.
:return
With aspace batching and without merging the outcomes:
db_outcomes: (Dynamic Batch) (action_space, action_dist)
action_space: (Batch) padded possible action indices
action_dist: (Batch) distribution over actions.
inv_offset: Indices to set the dynamic batching output back to the original order.
entropy: (Batch) entropy of action distribution.
Else:
action_dist: (Batch) distribution over actions.
entropy: (Batch) entropy of action distribution.
"""
# Representation of the current state (current node and other observations)
X = self.encode_history(
current_entity, obs.source_entity, kg, obs.query_relation
)
# MLP
X = self.W1(X)
X = F.relu(X)
X = self.W1Dropout(X)
X = self.W2(X)
X2 = self.W2Dropout(X)
def policy_nn_fun(X2, acs: ActionSpace):
A = self.get_action_embedding(Action(acs.r_space, acs.e_space), kg)
action_dist = F.softmax(
torch.squeeze(A @ torch.unsqueeze(X2, 2), 2)
- (1 - acs.action_mask) * ops.HUGE_INT,
dim=-1,
)
# action_dist = ops.weighted_softmax(torch.squeeze(A @ torch.unsqueeze(X2, 2), 2), action_mask)
return action_dist, ops.entropy(action_dist)
if use_action_space_bucketing:
action = self.do_it_with_bucketing(
X2,
current_entity,
kg,
merge_aspace_batching_outcome,
obs,
policy_nn_fun,
)
else:
assert False
action = self.do_it_without_bucketing(
X2, current_entity, kg, obs, policy_nn_fun
)
return action
def encode_history(self, current_entity, e_s, kg, query_relation):
embedded_q_rel = kg.get_relation_embeddings(query_relation)
encoded_history = self.path[-1][0][-1, :, :]
if self.relation_only:
X = torch.cat([encoded_history, embedded_q_rel], dim=-1)
elif self.relation_only_in_path:
E_s = kg.get_entity_embeddings(e_s)
E = kg.get_entity_embeddings(current_entity)
X = torch.cat([E, encoded_history, E_s, embedded_q_rel], dim=-1)
else:
E = kg.get_entity_embeddings(current_entity)
X = torch.cat([E, encoded_history, embedded_q_rel], dim=-1)
return X
# def do_it_without_bucketing(self, X2, current_entity, kg, obs, policy_nn_fun):
# def get_action_space(e, obs, kg):
# r_space = kg.action_space["relation-space"][e]
# e_space = kg.action_space["entity-space"][e]
# action_mask = kg.action_space["action-mask"][e]
# return self.apply_action_masks(acsp, e, obs, kg)
#
# action_space = get_action_space(current_entity, obs, kg)
# action_dist, entropy = policy_nn_fun(X2, action_space)
# db_outcomes = [(action_space, action_dist)]
# inv_offset = None
# return db_outcomes, entropy, inv_offset
def do_it_with_bucketing(
self,
X2,
current_entity,
kg,
merge_aspace_batching_outcome,
obs: Observation,
policy_nn_fun,
):
entropy_list = []
references = []
buckect_action_spaces, inthis_bucket_indizes = self.get_action_space_in_buckets(
current_entity, obs, kg
)
action_spaces = []
action_dists = []
for as_b, inthis_bucket in zip(buckect_action_spaces, inthis_bucket_indizes):
X2_b = X2[inthis_bucket, :]
action_dist_b, entropy_b = policy_nn_fun(X2_b, as_b)
references.extend(inthis_bucket)
action_spaces.append(as_b)
action_dists.append(action_dist_b)
entropy_list.append(entropy_b)
inv_offset = [i for i, _ in sorted(enumerate(references), key=lambda x: x[1])]
entropy = torch.cat(entropy_list, dim=0)[inv_offset]
action = BucketActions(action_spaces, action_dists, inv_offset, entropy)
if merge_aspace_batching_outcome:
action_space = pad_and_cat_action_space(
buckect_action_spaces, inv_offset, kg
)
action_dist = ops.pad_and_cat(action.action_dists, padding_value=0)[
inv_offset
]
action = BucketActions([action_space], [action_dist], None, entropy)
return action
def initialize_path(self, action: Action, kg: KnowledgeGraph):
# [batch_size, action_dim]
if self.relation_only_in_path:
init_action_embedding = kg.get_relation_embeddings(action.rel)
else:
init_action_embedding = self.get_action_embedding(action, kg)
init_action_embedding.unsqueeze_(1)
# [num_layers, batch_size, dim]
init_h = zeros_var_cuda(
[self.history_num_layers, len(init_action_embedding), self.history_dim]
)
init_c = zeros_var_cuda(
[self.history_num_layers, len(init_action_embedding), self.history_dim]
)
self.path = [self.path_encoder(init_action_embedding, (init_h, init_c))[1]]
def update_path(self, action: Action, kg: KnowledgeGraph, offset=None):
"""
Once an action was selected, update the action history.
:param action (r, e): (Variable:batch) indices of the most recent action
- r is the most recently traversed edge;
- e is the destination entity.
:param offset: (Variable:batch) if None, adjust path history with the given offset, used for search
:param KG: Knowledge graph environment.
"""
def offset_path_history(p, offset):
for i, x in enumerate(p):
if type(x) is tuple:
new_tuple = tuple([_x[:, offset, :] for _x in x])
p[i] = new_tuple
else:
p[i] = x[offset, :]
# update action history
if self.relation_only_in_path:
action_embedding = kg.get_relation_embeddings(action.rel)
else:
action_embedding = self.get_action_embedding(action, kg)
if offset is not None:
offset_path_history(self.path, offset)
self.path.append(
self.path_encoder(action_embedding.unsqueeze(1), self.path[-1])[1]
)
def get_action_space_in_buckets(
self,
current_entity: torch.Tensor,
obs: Observation,
kg: KnowledgeGraph,
collapse_entities=False,
):
"""
To compute the search operation in batch, we group the action spaces of different states
(i.e. the set of outgoing edges of different nodes) into buckets based on their sizes to
save the memory consumption of paddings.
For example, in large knowledge graphs, certain nodes may have thousands of outgoing
edges while a long tail of nodes only have a small amount of outgoing edges. If a batch
contains a node with 1000 outgoing edges while the rest of the nodes have a maximum of
5 outgoing edges, we need to pad the action spaces of all nodes to 1000, which consumes
lots of memory.
With the bucketing approach, each bucket is padded separately. In this case the node
with 1000 outgoing edges will be in its own bucket and the rest of the nodes will suffer
little from padding the action space to 5.
Once we grouped the action spaces in buckets, the policy network computation is carried
out for every bucket iteratively. Once all the computation is done, we concatenate the
results of all buckets and restore their original order in the batch. The computation
outside the policy network module is thus unaffected.
:return db_action_spaces:
[((r_space_b0, r_space_b0), action_mask_b0),
((r_space_b1, r_space_b1), action_mask_b1),
...
((r_space_bn, r_space_bn), action_mask_bn)]
A list of action space tensor representations grouped in n buckets, s.t.
r_space_b0.size(0) + r_space_b1.size(0) + ... + r_space_bn.size(0) = e.size(0)
:return db_references:
[l_batch_refs0, l_batch_refs1, ..., l_batch_refsn]
l_batch_refsi stores the indices of the examples in bucket i in the current batch,
which is used later to restore the output results to the original order.
"""
db_action_spaces, db_references = [], []
assert not collapse_entities # NotImplementedError
bucket_ids, inbucket_ids = kg.get_bucket_and_inbucket_ids(current_entity)
for b_key in set(bucket_ids.tolist()):
inthisbucket_indices = (
torch.nonzero(bucket_ids.eq(b_key)).squeeze().tolist()
)
if not isinstance(inthisbucket_indices, list): # TODO(tilo) wtf!
inthisbucket_indices = [inthisbucket_indices]
inbucket_ids_of_entities_inthisbucket = inbucket_ids[
inthisbucket_indices
].tolist()
bucket_action_space = kg.bucketid2ActionSpace[b_key]
e_b = current_entity[inthisbucket_indices]
obs_b = obs.get_slice(inthisbucket_indices)
as_bucket = bucket_action_space.get_slice(
inbucket_ids_of_entities_inthisbucket
)
action_mask = self.apply_action_masks(as_bucket, e_b, obs_b, kg)
action_space_b = ActionSpace(
as_bucket.forks, as_bucket.r_space, as_bucket.e_space, action_mask
)
db_action_spaces.append(action_space_b)
db_references.append(inthisbucket_indices)
return db_action_spaces, db_references
def apply_action_masks(
self, acsp: ActionSpace, e, obs: Observation, kg: KnowledgeGraph
):
r_space, e_space, action_mask = acsp.r_space, acsp.e_space, acsp.action_mask
e_s, q, e_t, last_step, last_r, seen_nodes = obs
# Prevent the agent from selecting the ground truth edge
ground_truth_edge_mask = self.get_ground_truth_edge_mask(
e, r_space, e_space, obs, kg
)
action_mask -= ground_truth_edge_mask
self.validate_action_mask(action_mask)
# Mask out false negatives in the final step
if last_step:
false_negative_mask = self.get_false_negative_mask(e_space, e_s, q, e_t, kg)
action_mask *= 1 - false_negative_mask
self.validate_action_mask(action_mask)
# Prevent the agent from stopping in the middle of a path
# stop_mask = (last_r == NO_OP_RELATION_ID).unsqueeze(1).float()
# action_mask = (1 - stop_mask) * action_mask + stop_mask * (r_space == NO_OP_RELATION_ID).float()
# Prevent loops
# Note: avoid duplicate removal of self-loops
# seen_nodes_b = seen_nodes[l_batch_refs]
# loop_mask_b = (((seen_nodes_b.unsqueeze(1) == e_space.unsqueeze(2)).sum(2) > 0) *
# (r_space != NO_OP_RELATION_ID)).float()
# action_mask *= (1 - loop_mask_b)
return action_mask
def get_ground_truth_edge_mask(
self, current_nodes, r_space, e_space, obs: Observation, kg: KnowledgeGraph
):
s_e = obs.source_entity
t_e = obs.target_entity
q = obs.query_relation
def build_mask(source_nodes, target_nodes, relation):
return (
(current_nodes == source_nodes).unsqueeze(1)
* (r_space == relation.unsqueeze(1))
* (e_space == target_nodes.unsqueeze(1))
)
mask = build_mask(s_e, t_e, q)
inv_q = kg.get_inv_relation_id(q)
inv_mask = build_mask(t_e, s_e, inv_q)
return ((mask + inv_mask) * (s_e.unsqueeze(1) != kg.dummy_e)).float()
def get_answer_mask(self, e_space, e_s, q, kg: KnowledgeGraph):
if kg.args.mask_test_false_negatives:
answer_vectors = kg.all_object_vectors
else:
answer_vectors = kg.train_object_vectors
answer_masks = []
for i in range(len(e_space)):
_e_s, _q = int(e_s[i]), int(q[i])
if not _e_s in answer_vectors or not _q in answer_vectors[_e_s]:
answer_vector = var_cuda(torch.LongTensor([[kg.num_entities]]))
else:
answer_vector = answer_vectors[_e_s][_q]
answer_mask = torch.sum(
e_space[i].unsqueeze(0) == answer_vector, dim=0
).long()
answer_masks.append(answer_mask)
answer_mask = torch.cat(answer_masks).view(len(e_space), -1)
return answer_mask
def get_false_negative_mask(self, e_space, e_s, q, e_t, kg: KnowledgeGraph):
answer_mask = self.get_answer_mask(e_space, e_s, q, kg)
# This is a trick applied during training where we convert a multi-answer predction problem into several
# single-answer prediction problems. By masking out the other answers in the training set, we are forcing
# the agent to walk towards a particular answer.
# This trick does not affect inference on the test set: at inference time the ground truth answer will not
# appear in the answer mask. This can be checked by uncommenting the following assertion statement.
# Note that the assertion statement can trigger in the last batch if you're using a batch_size > 1 since
# we append dummy examples to the last batch to make it the required batch size.
# The assertion statement will also trigger in the dev set inference of NELL-995 since we randomly
# sampled the dev set from the training data.
# assert(float((answer_mask * (e_space == e_t.unsqueeze(1)).long()).sum()) == 0)
false_negative_mask = (
answer_mask * (e_space != e_t.unsqueeze(1)).long()
).float()
return false_negative_mask
def validate_action_mask(self, action_mask):
action_mask_min = action_mask.min()
action_mask_max = action_mask.max()
assert action_mask_min == 0 or action_mask_min == 1
assert action_mask_max == 0 or action_mask_max == 1
def get_action_embedding(self, action: Action, kg: KnowledgeGraph):
"""
Return (batch) action embedding which is the concatenation of the embeddings of
the traversed edge and the target node.
:param action (r, e):
(Variable:batch) indices of the most recent action
- r is the most recently traversed edge
- e is the destination entity.
:param kg: Knowledge graph enviroment.
"""
relation_embedding = kg.get_relation_embeddings(action.rel)
if self.relation_only:
action_embedding = relation_embedding
else:
entity_embedding = kg.get_entity_embeddings(action.ent)
action_embedding = torch.cat([relation_embedding, entity_embedding], dim=-1)
return action_embedding
def define_modules(self):
if self.relation_only:
input_dim = self.history_dim + self.relation_dim
elif self.relation_only_in_path:
input_dim = self.history_dim + self.entity_dim * 2 + self.relation_dim
else:
input_dim = self.history_dim + self.entity_dim + self.relation_dim
self.W1 = nn.Linear(input_dim, self.action_dim)
self.W2 = nn.Linear(self.action_dim, self.action_dim)
self.W1Dropout = nn.Dropout(p=self.ff_dropout_rate)
self.W2Dropout = nn.Dropout(p=self.ff_dropout_rate)
if self.relation_only_in_path:
self.path_encoder = nn.LSTM(
input_size=self.relation_dim,
hidden_size=self.history_dim,
num_layers=self.history_num_layers,
batch_first=True,
)
else:
self.path_encoder = nn.LSTM(
input_size=self.action_dim,
hidden_size=self.history_dim,
num_layers=self.history_num_layers,
batch_first=True,
)
def initialize_modules(self):
if self.xavier_initialization:
nn.init.xavier_uniform_(self.W1.weight)
nn.init.xavier_uniform_(self.W2.weight)
for name, param in self.path_encoder.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0.0)
elif "weight" in name:
nn.init.xavier_normal_(param)
|
normal
|
{
"blob_id": "4a892c3532a3e3ddcd54705336dce820ff49b91b",
"index": 6289,
"step-1": "<mask token>\n\n\nclass GraphWalkAgent(nn.Module):\n\n def __init__(self, args):\n super(GraphWalkAgent, self).__init__()\n self.model = args.model\n self.relation_only = args.relation_only\n self.history_dim = args.history_dim\n self.history_num_layers = args.history_num_layers\n self.entity_dim = args.entity_dim\n self.relation_dim = args.relation_dim\n if self.relation_only:\n self.action_dim = args.relation_dim\n else:\n self.action_dim = args.entity_dim + args.relation_dim\n self.ff_dropout_rate = args.ff_dropout_rate\n self.rnn_dropout_rate = args.rnn_dropout_rate\n self.action_dropout_rate = args.action_dropout_rate\n self.xavier_initialization = args.xavier_initialization\n self.relation_only_in_path = args.relation_only_in_path\n self.path = None\n self.define_modules()\n self.initialize_modules()\n self.fn = None\n self.fn_kg = None\n <mask token>\n\n def encode_history(self, current_entity, e_s, kg, query_relation):\n embedded_q_rel = kg.get_relation_embeddings(query_relation)\n encoded_history = self.path[-1][0][-1, :, :]\n if self.relation_only:\n X = torch.cat([encoded_history, embedded_q_rel], dim=-1)\n elif self.relation_only_in_path:\n E_s = kg.get_entity_embeddings(e_s)\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, E_s, embedded_q_rel], dim=-1)\n else:\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, embedded_q_rel], dim=-1)\n return X\n <mask token>\n\n def initialize_path(self, action: Action, kg: KnowledgeGraph):\n if self.relation_only_in_path:\n init_action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n init_action_embedding = self.get_action_embedding(action, kg)\n init_action_embedding.unsqueeze_(1)\n init_h = zeros_var_cuda([self.history_num_layers, len(\n init_action_embedding), self.history_dim])\n init_c = zeros_var_cuda([self.history_num_layers, len(\n init_action_embedding), self.history_dim])\n self.path = [self.path_encoder(init_action_embedding, (init_h,\n init_c))[1]]\n <mask token>\n <mask token>\n <mask token>\n\n def get_ground_truth_edge_mask(self, current_nodes, r_space, e_space,\n obs: Observation, kg: KnowledgeGraph):\n s_e = obs.source_entity\n t_e = obs.target_entity\n q = obs.query_relation\n\n def build_mask(source_nodes, target_nodes, relation):\n return (current_nodes == source_nodes).unsqueeze(1) * (r_space ==\n relation.unsqueeze(1)) * (e_space == target_nodes.unsqueeze(1))\n mask = build_mask(s_e, t_e, q)\n inv_q = kg.get_inv_relation_id(q)\n inv_mask = build_mask(t_e, s_e, inv_q)\n return ((mask + inv_mask) * (s_e.unsqueeze(1) != kg.dummy_e)).float()\n\n def get_answer_mask(self, e_space, e_s, q, kg: KnowledgeGraph):\n if kg.args.mask_test_false_negatives:\n answer_vectors = kg.all_object_vectors\n else:\n answer_vectors = kg.train_object_vectors\n answer_masks = []\n for i in range(len(e_space)):\n _e_s, _q = int(e_s[i]), int(q[i])\n if not _e_s in answer_vectors or not _q in answer_vectors[_e_s]:\n answer_vector = var_cuda(torch.LongTensor([[kg.num_entities]]))\n else:\n answer_vector = answer_vectors[_e_s][_q]\n answer_mask = torch.sum(e_space[i].unsqueeze(0) ==\n answer_vector, dim=0).long()\n answer_masks.append(answer_mask)\n answer_mask = torch.cat(answer_masks).view(len(e_space), -1)\n return answer_mask\n\n def get_false_negative_mask(self, e_space, e_s, q, e_t, kg: KnowledgeGraph\n ):\n answer_mask = self.get_answer_mask(e_space, e_s, q, kg)\n false_negative_mask = (answer_mask * (e_space != e_t.unsqueeze(1)).\n long()).float()\n return false_negative_mask\n\n def validate_action_mask(self, action_mask):\n action_mask_min = action_mask.min()\n action_mask_max = action_mask.max()\n assert action_mask_min == 0 or action_mask_min == 1\n assert action_mask_max == 0 or action_mask_max == 1\n <mask token>\n\n def define_modules(self):\n if self.relation_only:\n input_dim = self.history_dim + self.relation_dim\n elif self.relation_only_in_path:\n input_dim = (self.history_dim + self.entity_dim * 2 + self.\n relation_dim)\n else:\n input_dim = self.history_dim + self.entity_dim + self.relation_dim\n self.W1 = nn.Linear(input_dim, self.action_dim)\n self.W2 = nn.Linear(self.action_dim, self.action_dim)\n self.W1Dropout = nn.Dropout(p=self.ff_dropout_rate)\n self.W2Dropout = nn.Dropout(p=self.ff_dropout_rate)\n if self.relation_only_in_path:\n self.path_encoder = nn.LSTM(input_size=self.relation_dim,\n hidden_size=self.history_dim, num_layers=self.\n history_num_layers, batch_first=True)\n else:\n self.path_encoder = nn.LSTM(input_size=self.action_dim,\n hidden_size=self.history_dim, num_layers=self.\n history_num_layers, batch_first=True)\n\n def initialize_modules(self):\n if self.xavier_initialization:\n nn.init.xavier_uniform_(self.W1.weight)\n nn.init.xavier_uniform_(self.W2.weight)\n for name, param in self.path_encoder.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n",
"step-2": "<mask token>\n\n\nclass GraphWalkAgent(nn.Module):\n\n def __init__(self, args):\n super(GraphWalkAgent, self).__init__()\n self.model = args.model\n self.relation_only = args.relation_only\n self.history_dim = args.history_dim\n self.history_num_layers = args.history_num_layers\n self.entity_dim = args.entity_dim\n self.relation_dim = args.relation_dim\n if self.relation_only:\n self.action_dim = args.relation_dim\n else:\n self.action_dim = args.entity_dim + args.relation_dim\n self.ff_dropout_rate = args.ff_dropout_rate\n self.rnn_dropout_rate = args.rnn_dropout_rate\n self.action_dropout_rate = args.action_dropout_rate\n self.xavier_initialization = args.xavier_initialization\n self.relation_only_in_path = args.relation_only_in_path\n self.path = None\n self.define_modules()\n self.initialize_modules()\n self.fn = None\n self.fn_kg = None\n\n def transit(self, current_entity, obs: Observation, kg: KnowledgeGraph,\n use_action_space_bucketing=True, merge_aspace_batching_outcome=False\n ) ->BucketActions:\n \"\"\"\n Compute the next action distribution based on\n (a) the current node (entity) in KG and the query relation\n (b) action history representation\n :param current_entity: agent location (node) at step t.\n :param obs: agent observation at step t.\n e_s: source node\n query_relation: query relation\n last_step: If set, the agent is carrying out the last step.\n last_r: label of edge traversed in the previous step\n seen_nodes: notes seen on the paths\n :param kg: Knowledge graph environment.\n :param use_action_space_bucketing: If set, group the action space of different nodes \n into buckets by their sizes.\n :param merge_aspace_batch_outcome: If set, merge the transition probability distribution\n generated of different action space bucket into a single batch.\n :return\n With aspace batching and without merging the outcomes:\n db_outcomes: (Dynamic Batch) (action_space, action_dist)\n action_space: (Batch) padded possible action indices\n action_dist: (Batch) distribution over actions.\n inv_offset: Indices to set the dynamic batching output back to the original order.\n entropy: (Batch) entropy of action distribution.\n Else:\n action_dist: (Batch) distribution over actions.\n entropy: (Batch) entropy of action distribution.\n \"\"\"\n X = self.encode_history(current_entity, obs.source_entity, kg, obs.\n query_relation)\n X = self.W1(X)\n X = F.relu(X)\n X = self.W1Dropout(X)\n X = self.W2(X)\n X2 = self.W2Dropout(X)\n\n def policy_nn_fun(X2, acs: ActionSpace):\n A = self.get_action_embedding(Action(acs.r_space, acs.e_space), kg)\n action_dist = F.softmax(torch.squeeze(A @ torch.unsqueeze(X2, 2\n ), 2) - (1 - acs.action_mask) * ops.HUGE_INT, dim=-1)\n return action_dist, ops.entropy(action_dist)\n if use_action_space_bucketing:\n action = self.do_it_with_bucketing(X2, current_entity, kg,\n merge_aspace_batching_outcome, obs, policy_nn_fun)\n else:\n assert False\n action = self.do_it_without_bucketing(X2, current_entity, kg,\n obs, policy_nn_fun)\n return action\n\n def encode_history(self, current_entity, e_s, kg, query_relation):\n embedded_q_rel = kg.get_relation_embeddings(query_relation)\n encoded_history = self.path[-1][0][-1, :, :]\n if self.relation_only:\n X = torch.cat([encoded_history, embedded_q_rel], dim=-1)\n elif self.relation_only_in_path:\n E_s = kg.get_entity_embeddings(e_s)\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, E_s, embedded_q_rel], dim=-1)\n else:\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, embedded_q_rel], dim=-1)\n return X\n <mask token>\n\n def initialize_path(self, action: Action, kg: KnowledgeGraph):\n if self.relation_only_in_path:\n init_action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n init_action_embedding = self.get_action_embedding(action, kg)\n init_action_embedding.unsqueeze_(1)\n init_h = zeros_var_cuda([self.history_num_layers, len(\n init_action_embedding), self.history_dim])\n init_c = zeros_var_cuda([self.history_num_layers, len(\n init_action_embedding), self.history_dim])\n self.path = [self.path_encoder(init_action_embedding, (init_h,\n init_c))[1]]\n\n def update_path(self, action: Action, kg: KnowledgeGraph, offset=None):\n \"\"\"\n Once an action was selected, update the action history.\n :param action (r, e): (Variable:batch) indices of the most recent action\n - r is the most recently traversed edge;\n - e is the destination entity.\n :param offset: (Variable:batch) if None, adjust path history with the given offset, used for search\n :param KG: Knowledge graph environment.\n \"\"\"\n\n def offset_path_history(p, offset):\n for i, x in enumerate(p):\n if type(x) is tuple:\n new_tuple = tuple([_x[:, offset, :] for _x in x])\n p[i] = new_tuple\n else:\n p[i] = x[offset, :]\n if self.relation_only_in_path:\n action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n action_embedding = self.get_action_embedding(action, kg)\n if offset is not None:\n offset_path_history(self.path, offset)\n self.path.append(self.path_encoder(action_embedding.unsqueeze(1),\n self.path[-1])[1])\n <mask token>\n\n def apply_action_masks(self, acsp: ActionSpace, e, obs: Observation, kg:\n KnowledgeGraph):\n r_space, e_space, action_mask = (acsp.r_space, acsp.e_space, acsp.\n action_mask)\n e_s, q, e_t, last_step, last_r, seen_nodes = obs\n ground_truth_edge_mask = self.get_ground_truth_edge_mask(e, r_space,\n e_space, obs, kg)\n action_mask -= ground_truth_edge_mask\n self.validate_action_mask(action_mask)\n if last_step:\n false_negative_mask = self.get_false_negative_mask(e_space, e_s,\n q, e_t, kg)\n action_mask *= 1 - false_negative_mask\n self.validate_action_mask(action_mask)\n return action_mask\n\n def get_ground_truth_edge_mask(self, current_nodes, r_space, e_space,\n obs: Observation, kg: KnowledgeGraph):\n s_e = obs.source_entity\n t_e = obs.target_entity\n q = obs.query_relation\n\n def build_mask(source_nodes, target_nodes, relation):\n return (current_nodes == source_nodes).unsqueeze(1) * (r_space ==\n relation.unsqueeze(1)) * (e_space == target_nodes.unsqueeze(1))\n mask = build_mask(s_e, t_e, q)\n inv_q = kg.get_inv_relation_id(q)\n inv_mask = build_mask(t_e, s_e, inv_q)\n return ((mask + inv_mask) * (s_e.unsqueeze(1) != kg.dummy_e)).float()\n\n def get_answer_mask(self, e_space, e_s, q, kg: KnowledgeGraph):\n if kg.args.mask_test_false_negatives:\n answer_vectors = kg.all_object_vectors\n else:\n answer_vectors = kg.train_object_vectors\n answer_masks = []\n for i in range(len(e_space)):\n _e_s, _q = int(e_s[i]), int(q[i])\n if not _e_s in answer_vectors or not _q in answer_vectors[_e_s]:\n answer_vector = var_cuda(torch.LongTensor([[kg.num_entities]]))\n else:\n answer_vector = answer_vectors[_e_s][_q]\n answer_mask = torch.sum(e_space[i].unsqueeze(0) ==\n answer_vector, dim=0).long()\n answer_masks.append(answer_mask)\n answer_mask = torch.cat(answer_masks).view(len(e_space), -1)\n return answer_mask\n\n def get_false_negative_mask(self, e_space, e_s, q, e_t, kg: KnowledgeGraph\n ):\n answer_mask = self.get_answer_mask(e_space, e_s, q, kg)\n false_negative_mask = (answer_mask * (e_space != e_t.unsqueeze(1)).\n long()).float()\n return false_negative_mask\n\n def validate_action_mask(self, action_mask):\n action_mask_min = action_mask.min()\n action_mask_max = action_mask.max()\n assert action_mask_min == 0 or action_mask_min == 1\n assert action_mask_max == 0 or action_mask_max == 1\n <mask token>\n\n def define_modules(self):\n if self.relation_only:\n input_dim = self.history_dim + self.relation_dim\n elif self.relation_only_in_path:\n input_dim = (self.history_dim + self.entity_dim * 2 + self.\n relation_dim)\n else:\n input_dim = self.history_dim + self.entity_dim + self.relation_dim\n self.W1 = nn.Linear(input_dim, self.action_dim)\n self.W2 = nn.Linear(self.action_dim, self.action_dim)\n self.W1Dropout = nn.Dropout(p=self.ff_dropout_rate)\n self.W2Dropout = nn.Dropout(p=self.ff_dropout_rate)\n if self.relation_only_in_path:\n self.path_encoder = nn.LSTM(input_size=self.relation_dim,\n hidden_size=self.history_dim, num_layers=self.\n history_num_layers, batch_first=True)\n else:\n self.path_encoder = nn.LSTM(input_size=self.action_dim,\n hidden_size=self.history_dim, num_layers=self.\n history_num_layers, batch_first=True)\n\n def initialize_modules(self):\n if self.xavier_initialization:\n nn.init.xavier_uniform_(self.W1.weight)\n nn.init.xavier_uniform_(self.W2.weight)\n for name, param in self.path_encoder.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n",
"step-3": "<mask token>\n\n\nclass GraphWalkAgent(nn.Module):\n\n def __init__(self, args):\n super(GraphWalkAgent, self).__init__()\n self.model = args.model\n self.relation_only = args.relation_only\n self.history_dim = args.history_dim\n self.history_num_layers = args.history_num_layers\n self.entity_dim = args.entity_dim\n self.relation_dim = args.relation_dim\n if self.relation_only:\n self.action_dim = args.relation_dim\n else:\n self.action_dim = args.entity_dim + args.relation_dim\n self.ff_dropout_rate = args.ff_dropout_rate\n self.rnn_dropout_rate = args.rnn_dropout_rate\n self.action_dropout_rate = args.action_dropout_rate\n self.xavier_initialization = args.xavier_initialization\n self.relation_only_in_path = args.relation_only_in_path\n self.path = None\n self.define_modules()\n self.initialize_modules()\n self.fn = None\n self.fn_kg = None\n\n def transit(self, current_entity, obs: Observation, kg: KnowledgeGraph,\n use_action_space_bucketing=True, merge_aspace_batching_outcome=False\n ) ->BucketActions:\n \"\"\"\n Compute the next action distribution based on\n (a) the current node (entity) in KG and the query relation\n (b) action history representation\n :param current_entity: agent location (node) at step t.\n :param obs: agent observation at step t.\n e_s: source node\n query_relation: query relation\n last_step: If set, the agent is carrying out the last step.\n last_r: label of edge traversed in the previous step\n seen_nodes: notes seen on the paths\n :param kg: Knowledge graph environment.\n :param use_action_space_bucketing: If set, group the action space of different nodes \n into buckets by their sizes.\n :param merge_aspace_batch_outcome: If set, merge the transition probability distribution\n generated of different action space bucket into a single batch.\n :return\n With aspace batching and without merging the outcomes:\n db_outcomes: (Dynamic Batch) (action_space, action_dist)\n action_space: (Batch) padded possible action indices\n action_dist: (Batch) distribution over actions.\n inv_offset: Indices to set the dynamic batching output back to the original order.\n entropy: (Batch) entropy of action distribution.\n Else:\n action_dist: (Batch) distribution over actions.\n entropy: (Batch) entropy of action distribution.\n \"\"\"\n X = self.encode_history(current_entity, obs.source_entity, kg, obs.\n query_relation)\n X = self.W1(X)\n X = F.relu(X)\n X = self.W1Dropout(X)\n X = self.W2(X)\n X2 = self.W2Dropout(X)\n\n def policy_nn_fun(X2, acs: ActionSpace):\n A = self.get_action_embedding(Action(acs.r_space, acs.e_space), kg)\n action_dist = F.softmax(torch.squeeze(A @ torch.unsqueeze(X2, 2\n ), 2) - (1 - acs.action_mask) * ops.HUGE_INT, dim=-1)\n return action_dist, ops.entropy(action_dist)\n if use_action_space_bucketing:\n action = self.do_it_with_bucketing(X2, current_entity, kg,\n merge_aspace_batching_outcome, obs, policy_nn_fun)\n else:\n assert False\n action = self.do_it_without_bucketing(X2, current_entity, kg,\n obs, policy_nn_fun)\n return action\n\n def encode_history(self, current_entity, e_s, kg, query_relation):\n embedded_q_rel = kg.get_relation_embeddings(query_relation)\n encoded_history = self.path[-1][0][-1, :, :]\n if self.relation_only:\n X = torch.cat([encoded_history, embedded_q_rel], dim=-1)\n elif self.relation_only_in_path:\n E_s = kg.get_entity_embeddings(e_s)\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, E_s, embedded_q_rel], dim=-1)\n else:\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, embedded_q_rel], dim=-1)\n return X\n\n def do_it_with_bucketing(self, X2, current_entity, kg,\n merge_aspace_batching_outcome, obs: Observation, policy_nn_fun):\n entropy_list = []\n references = []\n buckect_action_spaces, inthis_bucket_indizes = (self.\n get_action_space_in_buckets(current_entity, obs, kg))\n action_spaces = []\n action_dists = []\n for as_b, inthis_bucket in zip(buckect_action_spaces,\n inthis_bucket_indizes):\n X2_b = X2[inthis_bucket, :]\n action_dist_b, entropy_b = policy_nn_fun(X2_b, as_b)\n references.extend(inthis_bucket)\n action_spaces.append(as_b)\n action_dists.append(action_dist_b)\n entropy_list.append(entropy_b)\n inv_offset = [i for i, _ in sorted(enumerate(references), key=lambda\n x: x[1])]\n entropy = torch.cat(entropy_list, dim=0)[inv_offset]\n action = BucketActions(action_spaces, action_dists, inv_offset, entropy\n )\n if merge_aspace_batching_outcome:\n action_space = pad_and_cat_action_space(buckect_action_spaces,\n inv_offset, kg)\n action_dist = ops.pad_and_cat(action.action_dists, padding_value=0\n )[inv_offset]\n action = BucketActions([action_space], [action_dist], None, entropy\n )\n return action\n\n def initialize_path(self, action: Action, kg: KnowledgeGraph):\n if self.relation_only_in_path:\n init_action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n init_action_embedding = self.get_action_embedding(action, kg)\n init_action_embedding.unsqueeze_(1)\n init_h = zeros_var_cuda([self.history_num_layers, len(\n init_action_embedding), self.history_dim])\n init_c = zeros_var_cuda([self.history_num_layers, len(\n init_action_embedding), self.history_dim])\n self.path = [self.path_encoder(init_action_embedding, (init_h,\n init_c))[1]]\n\n def update_path(self, action: Action, kg: KnowledgeGraph, offset=None):\n \"\"\"\n Once an action was selected, update the action history.\n :param action (r, e): (Variable:batch) indices of the most recent action\n - r is the most recently traversed edge;\n - e is the destination entity.\n :param offset: (Variable:batch) if None, adjust path history with the given offset, used for search\n :param KG: Knowledge graph environment.\n \"\"\"\n\n def offset_path_history(p, offset):\n for i, x in enumerate(p):\n if type(x) is tuple:\n new_tuple = tuple([_x[:, offset, :] for _x in x])\n p[i] = new_tuple\n else:\n p[i] = x[offset, :]\n if self.relation_only_in_path:\n action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n action_embedding = self.get_action_embedding(action, kg)\n if offset is not None:\n offset_path_history(self.path, offset)\n self.path.append(self.path_encoder(action_embedding.unsqueeze(1),\n self.path[-1])[1])\n\n def get_action_space_in_buckets(self, current_entity: torch.Tensor, obs:\n Observation, kg: KnowledgeGraph, collapse_entities=False):\n \"\"\"\n To compute the search operation in batch, we group the action spaces of different states\n (i.e. the set of outgoing edges of different nodes) into buckets based on their sizes to\n save the memory consumption of paddings.\n\n For example, in large knowledge graphs, certain nodes may have thousands of outgoing\n edges while a long tail of nodes only have a small amount of outgoing edges. If a batch\n contains a node with 1000 outgoing edges while the rest of the nodes have a maximum of\n 5 outgoing edges, we need to pad the action spaces of all nodes to 1000, which consumes\n lots of memory.\n\n With the bucketing approach, each bucket is padded separately. In this case the node\n with 1000 outgoing edges will be in its own bucket and the rest of the nodes will suffer\n little from padding the action space to 5.\n\n Once we grouped the action spaces in buckets, the policy network computation is carried\n out for every bucket iteratively. Once all the computation is done, we concatenate the\n results of all buckets and restore their original order in the batch. The computation\n outside the policy network module is thus unaffected.\n\n :return db_action_spaces:\n [((r_space_b0, r_space_b0), action_mask_b0),\n ((r_space_b1, r_space_b1), action_mask_b1),\n ...\n ((r_space_bn, r_space_bn), action_mask_bn)]\n\n A list of action space tensor representations grouped in n buckets, s.t.\n r_space_b0.size(0) + r_space_b1.size(0) + ... + r_space_bn.size(0) = e.size(0)\n\n :return db_references:\n [l_batch_refs0, l_batch_refs1, ..., l_batch_refsn]\n l_batch_refsi stores the indices of the examples in bucket i in the current batch,\n which is used later to restore the output results to the original order.\n \"\"\"\n db_action_spaces, db_references = [], []\n assert not collapse_entities\n bucket_ids, inbucket_ids = kg.get_bucket_and_inbucket_ids(\n current_entity)\n for b_key in set(bucket_ids.tolist()):\n inthisbucket_indices = torch.nonzero(bucket_ids.eq(b_key)).squeeze(\n ).tolist()\n if not isinstance(inthisbucket_indices, list):\n inthisbucket_indices = [inthisbucket_indices]\n inbucket_ids_of_entities_inthisbucket = inbucket_ids[\n inthisbucket_indices].tolist()\n bucket_action_space = kg.bucketid2ActionSpace[b_key]\n e_b = current_entity[inthisbucket_indices]\n obs_b = obs.get_slice(inthisbucket_indices)\n as_bucket = bucket_action_space.get_slice(\n inbucket_ids_of_entities_inthisbucket)\n action_mask = self.apply_action_masks(as_bucket, e_b, obs_b, kg)\n action_space_b = ActionSpace(as_bucket.forks, as_bucket.r_space,\n as_bucket.e_space, action_mask)\n db_action_spaces.append(action_space_b)\n db_references.append(inthisbucket_indices)\n return db_action_spaces, db_references\n\n def apply_action_masks(self, acsp: ActionSpace, e, obs: Observation, kg:\n KnowledgeGraph):\n r_space, e_space, action_mask = (acsp.r_space, acsp.e_space, acsp.\n action_mask)\n e_s, q, e_t, last_step, last_r, seen_nodes = obs\n ground_truth_edge_mask = self.get_ground_truth_edge_mask(e, r_space,\n e_space, obs, kg)\n action_mask -= ground_truth_edge_mask\n self.validate_action_mask(action_mask)\n if last_step:\n false_negative_mask = self.get_false_negative_mask(e_space, e_s,\n q, e_t, kg)\n action_mask *= 1 - false_negative_mask\n self.validate_action_mask(action_mask)\n return action_mask\n\n def get_ground_truth_edge_mask(self, current_nodes, r_space, e_space,\n obs: Observation, kg: KnowledgeGraph):\n s_e = obs.source_entity\n t_e = obs.target_entity\n q = obs.query_relation\n\n def build_mask(source_nodes, target_nodes, relation):\n return (current_nodes == source_nodes).unsqueeze(1) * (r_space ==\n relation.unsqueeze(1)) * (e_space == target_nodes.unsqueeze(1))\n mask = build_mask(s_e, t_e, q)\n inv_q = kg.get_inv_relation_id(q)\n inv_mask = build_mask(t_e, s_e, inv_q)\n return ((mask + inv_mask) * (s_e.unsqueeze(1) != kg.dummy_e)).float()\n\n def get_answer_mask(self, e_space, e_s, q, kg: KnowledgeGraph):\n if kg.args.mask_test_false_negatives:\n answer_vectors = kg.all_object_vectors\n else:\n answer_vectors = kg.train_object_vectors\n answer_masks = []\n for i in range(len(e_space)):\n _e_s, _q = int(e_s[i]), int(q[i])\n if not _e_s in answer_vectors or not _q in answer_vectors[_e_s]:\n answer_vector = var_cuda(torch.LongTensor([[kg.num_entities]]))\n else:\n answer_vector = answer_vectors[_e_s][_q]\n answer_mask = torch.sum(e_space[i].unsqueeze(0) ==\n answer_vector, dim=0).long()\n answer_masks.append(answer_mask)\n answer_mask = torch.cat(answer_masks).view(len(e_space), -1)\n return answer_mask\n\n def get_false_negative_mask(self, e_space, e_s, q, e_t, kg: KnowledgeGraph\n ):\n answer_mask = self.get_answer_mask(e_space, e_s, q, kg)\n false_negative_mask = (answer_mask * (e_space != e_t.unsqueeze(1)).\n long()).float()\n return false_negative_mask\n\n def validate_action_mask(self, action_mask):\n action_mask_min = action_mask.min()\n action_mask_max = action_mask.max()\n assert action_mask_min == 0 or action_mask_min == 1\n assert action_mask_max == 0 or action_mask_max == 1\n\n def get_action_embedding(self, action: Action, kg: KnowledgeGraph):\n \"\"\"\n Return (batch) action embedding which is the concatenation of the embeddings of\n the traversed edge and the target node.\n\n :param action (r, e):\n (Variable:batch) indices of the most recent action\n - r is the most recently traversed edge\n - e is the destination entity.\n :param kg: Knowledge graph enviroment.\n \"\"\"\n relation_embedding = kg.get_relation_embeddings(action.rel)\n if self.relation_only:\n action_embedding = relation_embedding\n else:\n entity_embedding = kg.get_entity_embeddings(action.ent)\n action_embedding = torch.cat([relation_embedding,\n entity_embedding], dim=-1)\n return action_embedding\n\n def define_modules(self):\n if self.relation_only:\n input_dim = self.history_dim + self.relation_dim\n elif self.relation_only_in_path:\n input_dim = (self.history_dim + self.entity_dim * 2 + self.\n relation_dim)\n else:\n input_dim = self.history_dim + self.entity_dim + self.relation_dim\n self.W1 = nn.Linear(input_dim, self.action_dim)\n self.W2 = nn.Linear(self.action_dim, self.action_dim)\n self.W1Dropout = nn.Dropout(p=self.ff_dropout_rate)\n self.W2Dropout = nn.Dropout(p=self.ff_dropout_rate)\n if self.relation_only_in_path:\n self.path_encoder = nn.LSTM(input_size=self.relation_dim,\n hidden_size=self.history_dim, num_layers=self.\n history_num_layers, batch_first=True)\n else:\n self.path_encoder = nn.LSTM(input_size=self.action_dim,\n hidden_size=self.history_dim, num_layers=self.\n history_num_layers, batch_first=True)\n\n def initialize_modules(self):\n if self.xavier_initialization:\n nn.init.xavier_uniform_(self.W1.weight)\n nn.init.xavier_uniform_(self.W2.weight)\n for name, param in self.path_encoder.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n",
"step-4": "<mask token>\n\n\nclass BucketActions(NamedTuple):\n action_spaces: List[ActionSpace]\n action_dists: List[torch.Tensor]\n inv_offset: Union[List[int], None]\n entropy: torch.Tensor\n\n\ndef pad_and_cat_action_space(action_spaces: List[ActionSpace], inv_offset,\n kg: KnowledgeGraph):\n db_r_space, db_e_space, db_action_mask = [], [], []\n forks = []\n for acsp in action_spaces:\n forks += acsp.forks\n db_r_space.append(acsp.r_space)\n db_e_space.append(acsp.e_space)\n db_action_mask.append(acsp.action_mask)\n r_space = ops.pad_and_cat(db_r_space, padding_value=kg.dummy_r)[inv_offset]\n e_space = ops.pad_and_cat(db_e_space, padding_value=kg.dummy_e)[inv_offset]\n action_mask = ops.pad_and_cat(db_action_mask, padding_value=0)[inv_offset]\n action_space = ActionSpace(forks, r_space, e_space, action_mask)\n return action_space\n\n\nclass GraphWalkAgent(nn.Module):\n\n def __init__(self, args):\n super(GraphWalkAgent, self).__init__()\n self.model = args.model\n self.relation_only = args.relation_only\n self.history_dim = args.history_dim\n self.history_num_layers = args.history_num_layers\n self.entity_dim = args.entity_dim\n self.relation_dim = args.relation_dim\n if self.relation_only:\n self.action_dim = args.relation_dim\n else:\n self.action_dim = args.entity_dim + args.relation_dim\n self.ff_dropout_rate = args.ff_dropout_rate\n self.rnn_dropout_rate = args.rnn_dropout_rate\n self.action_dropout_rate = args.action_dropout_rate\n self.xavier_initialization = args.xavier_initialization\n self.relation_only_in_path = args.relation_only_in_path\n self.path = None\n self.define_modules()\n self.initialize_modules()\n self.fn = None\n self.fn_kg = None\n\n def transit(self, current_entity, obs: Observation, kg: KnowledgeGraph,\n use_action_space_bucketing=True, merge_aspace_batching_outcome=False\n ) ->BucketActions:\n \"\"\"\n Compute the next action distribution based on\n (a) the current node (entity) in KG and the query relation\n (b) action history representation\n :param current_entity: agent location (node) at step t.\n :param obs: agent observation at step t.\n e_s: source node\n query_relation: query relation\n last_step: If set, the agent is carrying out the last step.\n last_r: label of edge traversed in the previous step\n seen_nodes: notes seen on the paths\n :param kg: Knowledge graph environment.\n :param use_action_space_bucketing: If set, group the action space of different nodes \n into buckets by their sizes.\n :param merge_aspace_batch_outcome: If set, merge the transition probability distribution\n generated of different action space bucket into a single batch.\n :return\n With aspace batching and without merging the outcomes:\n db_outcomes: (Dynamic Batch) (action_space, action_dist)\n action_space: (Batch) padded possible action indices\n action_dist: (Batch) distribution over actions.\n inv_offset: Indices to set the dynamic batching output back to the original order.\n entropy: (Batch) entropy of action distribution.\n Else:\n action_dist: (Batch) distribution over actions.\n entropy: (Batch) entropy of action distribution.\n \"\"\"\n X = self.encode_history(current_entity, obs.source_entity, kg, obs.\n query_relation)\n X = self.W1(X)\n X = F.relu(X)\n X = self.W1Dropout(X)\n X = self.W2(X)\n X2 = self.W2Dropout(X)\n\n def policy_nn_fun(X2, acs: ActionSpace):\n A = self.get_action_embedding(Action(acs.r_space, acs.e_space), kg)\n action_dist = F.softmax(torch.squeeze(A @ torch.unsqueeze(X2, 2\n ), 2) - (1 - acs.action_mask) * ops.HUGE_INT, dim=-1)\n return action_dist, ops.entropy(action_dist)\n if use_action_space_bucketing:\n action = self.do_it_with_bucketing(X2, current_entity, kg,\n merge_aspace_batching_outcome, obs, policy_nn_fun)\n else:\n assert False\n action = self.do_it_without_bucketing(X2, current_entity, kg,\n obs, policy_nn_fun)\n return action\n\n def encode_history(self, current_entity, e_s, kg, query_relation):\n embedded_q_rel = kg.get_relation_embeddings(query_relation)\n encoded_history = self.path[-1][0][-1, :, :]\n if self.relation_only:\n X = torch.cat([encoded_history, embedded_q_rel], dim=-1)\n elif self.relation_only_in_path:\n E_s = kg.get_entity_embeddings(e_s)\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, E_s, embedded_q_rel], dim=-1)\n else:\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, embedded_q_rel], dim=-1)\n return X\n\n def do_it_with_bucketing(self, X2, current_entity, kg,\n merge_aspace_batching_outcome, obs: Observation, policy_nn_fun):\n entropy_list = []\n references = []\n buckect_action_spaces, inthis_bucket_indizes = (self.\n get_action_space_in_buckets(current_entity, obs, kg))\n action_spaces = []\n action_dists = []\n for as_b, inthis_bucket in zip(buckect_action_spaces,\n inthis_bucket_indizes):\n X2_b = X2[inthis_bucket, :]\n action_dist_b, entropy_b = policy_nn_fun(X2_b, as_b)\n references.extend(inthis_bucket)\n action_spaces.append(as_b)\n action_dists.append(action_dist_b)\n entropy_list.append(entropy_b)\n inv_offset = [i for i, _ in sorted(enumerate(references), key=lambda\n x: x[1])]\n entropy = torch.cat(entropy_list, dim=0)[inv_offset]\n action = BucketActions(action_spaces, action_dists, inv_offset, entropy\n )\n if merge_aspace_batching_outcome:\n action_space = pad_and_cat_action_space(buckect_action_spaces,\n inv_offset, kg)\n action_dist = ops.pad_and_cat(action.action_dists, padding_value=0\n )[inv_offset]\n action = BucketActions([action_space], [action_dist], None, entropy\n )\n return action\n\n def initialize_path(self, action: Action, kg: KnowledgeGraph):\n if self.relation_only_in_path:\n init_action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n init_action_embedding = self.get_action_embedding(action, kg)\n init_action_embedding.unsqueeze_(1)\n init_h = zeros_var_cuda([self.history_num_layers, len(\n init_action_embedding), self.history_dim])\n init_c = zeros_var_cuda([self.history_num_layers, len(\n init_action_embedding), self.history_dim])\n self.path = [self.path_encoder(init_action_embedding, (init_h,\n init_c))[1]]\n\n def update_path(self, action: Action, kg: KnowledgeGraph, offset=None):\n \"\"\"\n Once an action was selected, update the action history.\n :param action (r, e): (Variable:batch) indices of the most recent action\n - r is the most recently traversed edge;\n - e is the destination entity.\n :param offset: (Variable:batch) if None, adjust path history with the given offset, used for search\n :param KG: Knowledge graph environment.\n \"\"\"\n\n def offset_path_history(p, offset):\n for i, x in enumerate(p):\n if type(x) is tuple:\n new_tuple = tuple([_x[:, offset, :] for _x in x])\n p[i] = new_tuple\n else:\n p[i] = x[offset, :]\n if self.relation_only_in_path:\n action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n action_embedding = self.get_action_embedding(action, kg)\n if offset is not None:\n offset_path_history(self.path, offset)\n self.path.append(self.path_encoder(action_embedding.unsqueeze(1),\n self.path[-1])[1])\n\n def get_action_space_in_buckets(self, current_entity: torch.Tensor, obs:\n Observation, kg: KnowledgeGraph, collapse_entities=False):\n \"\"\"\n To compute the search operation in batch, we group the action spaces of different states\n (i.e. the set of outgoing edges of different nodes) into buckets based on their sizes to\n save the memory consumption of paddings.\n\n For example, in large knowledge graphs, certain nodes may have thousands of outgoing\n edges while a long tail of nodes only have a small amount of outgoing edges. If a batch\n contains a node with 1000 outgoing edges while the rest of the nodes have a maximum of\n 5 outgoing edges, we need to pad the action spaces of all nodes to 1000, which consumes\n lots of memory.\n\n With the bucketing approach, each bucket is padded separately. In this case the node\n with 1000 outgoing edges will be in its own bucket and the rest of the nodes will suffer\n little from padding the action space to 5.\n\n Once we grouped the action spaces in buckets, the policy network computation is carried\n out for every bucket iteratively. Once all the computation is done, we concatenate the\n results of all buckets and restore their original order in the batch. The computation\n outside the policy network module is thus unaffected.\n\n :return db_action_spaces:\n [((r_space_b0, r_space_b0), action_mask_b0),\n ((r_space_b1, r_space_b1), action_mask_b1),\n ...\n ((r_space_bn, r_space_bn), action_mask_bn)]\n\n A list of action space tensor representations grouped in n buckets, s.t.\n r_space_b0.size(0) + r_space_b1.size(0) + ... + r_space_bn.size(0) = e.size(0)\n\n :return db_references:\n [l_batch_refs0, l_batch_refs1, ..., l_batch_refsn]\n l_batch_refsi stores the indices of the examples in bucket i in the current batch,\n which is used later to restore the output results to the original order.\n \"\"\"\n db_action_spaces, db_references = [], []\n assert not collapse_entities\n bucket_ids, inbucket_ids = kg.get_bucket_and_inbucket_ids(\n current_entity)\n for b_key in set(bucket_ids.tolist()):\n inthisbucket_indices = torch.nonzero(bucket_ids.eq(b_key)).squeeze(\n ).tolist()\n if not isinstance(inthisbucket_indices, list):\n inthisbucket_indices = [inthisbucket_indices]\n inbucket_ids_of_entities_inthisbucket = inbucket_ids[\n inthisbucket_indices].tolist()\n bucket_action_space = kg.bucketid2ActionSpace[b_key]\n e_b = current_entity[inthisbucket_indices]\n obs_b = obs.get_slice(inthisbucket_indices)\n as_bucket = bucket_action_space.get_slice(\n inbucket_ids_of_entities_inthisbucket)\n action_mask = self.apply_action_masks(as_bucket, e_b, obs_b, kg)\n action_space_b = ActionSpace(as_bucket.forks, as_bucket.r_space,\n as_bucket.e_space, action_mask)\n db_action_spaces.append(action_space_b)\n db_references.append(inthisbucket_indices)\n return db_action_spaces, db_references\n\n def apply_action_masks(self, acsp: ActionSpace, e, obs: Observation, kg:\n KnowledgeGraph):\n r_space, e_space, action_mask = (acsp.r_space, acsp.e_space, acsp.\n action_mask)\n e_s, q, e_t, last_step, last_r, seen_nodes = obs\n ground_truth_edge_mask = self.get_ground_truth_edge_mask(e, r_space,\n e_space, obs, kg)\n action_mask -= ground_truth_edge_mask\n self.validate_action_mask(action_mask)\n if last_step:\n false_negative_mask = self.get_false_negative_mask(e_space, e_s,\n q, e_t, kg)\n action_mask *= 1 - false_negative_mask\n self.validate_action_mask(action_mask)\n return action_mask\n\n def get_ground_truth_edge_mask(self, current_nodes, r_space, e_space,\n obs: Observation, kg: KnowledgeGraph):\n s_e = obs.source_entity\n t_e = obs.target_entity\n q = obs.query_relation\n\n def build_mask(source_nodes, target_nodes, relation):\n return (current_nodes == source_nodes).unsqueeze(1) * (r_space ==\n relation.unsqueeze(1)) * (e_space == target_nodes.unsqueeze(1))\n mask = build_mask(s_e, t_e, q)\n inv_q = kg.get_inv_relation_id(q)\n inv_mask = build_mask(t_e, s_e, inv_q)\n return ((mask + inv_mask) * (s_e.unsqueeze(1) != kg.dummy_e)).float()\n\n def get_answer_mask(self, e_space, e_s, q, kg: KnowledgeGraph):\n if kg.args.mask_test_false_negatives:\n answer_vectors = kg.all_object_vectors\n else:\n answer_vectors = kg.train_object_vectors\n answer_masks = []\n for i in range(len(e_space)):\n _e_s, _q = int(e_s[i]), int(q[i])\n if not _e_s in answer_vectors or not _q in answer_vectors[_e_s]:\n answer_vector = var_cuda(torch.LongTensor([[kg.num_entities]]))\n else:\n answer_vector = answer_vectors[_e_s][_q]\n answer_mask = torch.sum(e_space[i].unsqueeze(0) ==\n answer_vector, dim=0).long()\n answer_masks.append(answer_mask)\n answer_mask = torch.cat(answer_masks).view(len(e_space), -1)\n return answer_mask\n\n def get_false_negative_mask(self, e_space, e_s, q, e_t, kg: KnowledgeGraph\n ):\n answer_mask = self.get_answer_mask(e_space, e_s, q, kg)\n false_negative_mask = (answer_mask * (e_space != e_t.unsqueeze(1)).\n long()).float()\n return false_negative_mask\n\n def validate_action_mask(self, action_mask):\n action_mask_min = action_mask.min()\n action_mask_max = action_mask.max()\n assert action_mask_min == 0 or action_mask_min == 1\n assert action_mask_max == 0 or action_mask_max == 1\n\n def get_action_embedding(self, action: Action, kg: KnowledgeGraph):\n \"\"\"\n Return (batch) action embedding which is the concatenation of the embeddings of\n the traversed edge and the target node.\n\n :param action (r, e):\n (Variable:batch) indices of the most recent action\n - r is the most recently traversed edge\n - e is the destination entity.\n :param kg: Knowledge graph enviroment.\n \"\"\"\n relation_embedding = kg.get_relation_embeddings(action.rel)\n if self.relation_only:\n action_embedding = relation_embedding\n else:\n entity_embedding = kg.get_entity_embeddings(action.ent)\n action_embedding = torch.cat([relation_embedding,\n entity_embedding], dim=-1)\n return action_embedding\n\n def define_modules(self):\n if self.relation_only:\n input_dim = self.history_dim + self.relation_dim\n elif self.relation_only_in_path:\n input_dim = (self.history_dim + self.entity_dim * 2 + self.\n relation_dim)\n else:\n input_dim = self.history_dim + self.entity_dim + self.relation_dim\n self.W1 = nn.Linear(input_dim, self.action_dim)\n self.W2 = nn.Linear(self.action_dim, self.action_dim)\n self.W1Dropout = nn.Dropout(p=self.ff_dropout_rate)\n self.W2Dropout = nn.Dropout(p=self.ff_dropout_rate)\n if self.relation_only_in_path:\n self.path_encoder = nn.LSTM(input_size=self.relation_dim,\n hidden_size=self.history_dim, num_layers=self.\n history_num_layers, batch_first=True)\n else:\n self.path_encoder = nn.LSTM(input_size=self.action_dim,\n hidden_size=self.history_dim, num_layers=self.\n history_num_layers, batch_first=True)\n\n def initialize_modules(self):\n if self.xavier_initialization:\n nn.init.xavier_uniform_(self.W1.weight)\n nn.init.xavier_uniform_(self.W2.weight)\n for name, param in self.path_encoder.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n",
"step-5": "\"\"\"\n Copyright (c) 2018, salesforce.com, inc.\n All rights reserved.\n SPDX-License-Identifier: BSD-3-Clause\n For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause\n \n Graph Search Policy Network.\n\"\"\"\nfrom typing import List, NamedTuple, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport src.utils.ops as ops\nfrom src.knowledge_graph import KnowledgeGraph, ActionSpace, Observation, Action\nfrom src.utils.ops import var_cuda, zeros_var_cuda\n\n\nclass BucketActions(NamedTuple):\n action_spaces: List[ActionSpace]\n action_dists: List[torch.Tensor]\n inv_offset: Union[List[int], None]\n entropy: torch.Tensor\n\n\ndef pad_and_cat_action_space(\n action_spaces: List[ActionSpace], inv_offset, kg: KnowledgeGraph\n):\n db_r_space, db_e_space, db_action_mask = [], [], []\n forks = []\n for acsp in action_spaces:\n forks += acsp.forks\n db_r_space.append(acsp.r_space)\n db_e_space.append(acsp.e_space)\n db_action_mask.append(acsp.action_mask)\n r_space = ops.pad_and_cat(db_r_space, padding_value=kg.dummy_r)[inv_offset]\n e_space = ops.pad_and_cat(db_e_space, padding_value=kg.dummy_e)[inv_offset]\n action_mask = ops.pad_and_cat(db_action_mask, padding_value=0)[inv_offset]\n action_space = ActionSpace(forks, r_space, e_space, action_mask)\n return action_space\n\n\nclass GraphWalkAgent(nn.Module):\n def __init__(self, args):\n super(GraphWalkAgent, self).__init__()\n self.model = args.model\n self.relation_only = args.relation_only\n\n self.history_dim = args.history_dim\n self.history_num_layers = args.history_num_layers\n self.entity_dim = args.entity_dim\n self.relation_dim = args.relation_dim\n if self.relation_only:\n self.action_dim = args.relation_dim\n else:\n self.action_dim = args.entity_dim + args.relation_dim\n self.ff_dropout_rate = args.ff_dropout_rate\n self.rnn_dropout_rate = args.rnn_dropout_rate\n self.action_dropout_rate = args.action_dropout_rate\n\n self.xavier_initialization = args.xavier_initialization\n\n self.relation_only_in_path = args.relation_only_in_path\n self.path = None\n\n # Set policy network modules\n self.define_modules()\n self.initialize_modules()\n\n # Fact network modules\n self.fn = None\n self.fn_kg = None\n\n def transit(\n self,\n current_entity,\n obs: Observation,\n kg: KnowledgeGraph,\n use_action_space_bucketing=True,\n merge_aspace_batching_outcome=False,\n ) -> BucketActions:\n \"\"\"\n Compute the next action distribution based on\n (a) the current node (entity) in KG and the query relation\n (b) action history representation\n :param current_entity: agent location (node) at step t.\n :param obs: agent observation at step t.\n e_s: source node\n query_relation: query relation\n last_step: If set, the agent is carrying out the last step.\n last_r: label of edge traversed in the previous step\n seen_nodes: notes seen on the paths\n :param kg: Knowledge graph environment.\n :param use_action_space_bucketing: If set, group the action space of different nodes \n into buckets by their sizes.\n :param merge_aspace_batch_outcome: If set, merge the transition probability distribution\n generated of different action space bucket into a single batch.\n :return\n With aspace batching and without merging the outcomes:\n db_outcomes: (Dynamic Batch) (action_space, action_dist)\n action_space: (Batch) padded possible action indices\n action_dist: (Batch) distribution over actions.\n inv_offset: Indices to set the dynamic batching output back to the original order.\n entropy: (Batch) entropy of action distribution.\n Else:\n action_dist: (Batch) distribution over actions.\n entropy: (Batch) entropy of action distribution.\n \"\"\"\n\n # Representation of the current state (current node and other observations)\n X = self.encode_history(\n current_entity, obs.source_entity, kg, obs.query_relation\n )\n\n # MLP\n X = self.W1(X)\n X = F.relu(X)\n X = self.W1Dropout(X)\n X = self.W2(X)\n X2 = self.W2Dropout(X)\n\n def policy_nn_fun(X2, acs: ActionSpace):\n A = self.get_action_embedding(Action(acs.r_space, acs.e_space), kg)\n action_dist = F.softmax(\n torch.squeeze(A @ torch.unsqueeze(X2, 2), 2)\n - (1 - acs.action_mask) * ops.HUGE_INT,\n dim=-1,\n )\n # action_dist = ops.weighted_softmax(torch.squeeze(A @ torch.unsqueeze(X2, 2), 2), action_mask)\n return action_dist, ops.entropy(action_dist)\n\n if use_action_space_bucketing:\n action = self.do_it_with_bucketing(\n X2,\n current_entity,\n kg,\n merge_aspace_batching_outcome,\n obs,\n policy_nn_fun,\n )\n else:\n assert False\n action = self.do_it_without_bucketing(\n X2, current_entity, kg, obs, policy_nn_fun\n )\n\n return action\n\n def encode_history(self, current_entity, e_s, kg, query_relation):\n embedded_q_rel = kg.get_relation_embeddings(query_relation)\n encoded_history = self.path[-1][0][-1, :, :]\n if self.relation_only:\n X = torch.cat([encoded_history, embedded_q_rel], dim=-1)\n elif self.relation_only_in_path:\n E_s = kg.get_entity_embeddings(e_s)\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, E_s, embedded_q_rel], dim=-1)\n else:\n E = kg.get_entity_embeddings(current_entity)\n X = torch.cat([E, encoded_history, embedded_q_rel], dim=-1)\n return X\n\n # def do_it_without_bucketing(self, X2, current_entity, kg, obs, policy_nn_fun):\n # def get_action_space(e, obs, kg):\n # r_space = kg.action_space[\"relation-space\"][e]\n # e_space = kg.action_space[\"entity-space\"][e]\n # action_mask = kg.action_space[\"action-mask\"][e]\n # return self.apply_action_masks(acsp, e, obs, kg)\n #\n # action_space = get_action_space(current_entity, obs, kg)\n # action_dist, entropy = policy_nn_fun(X2, action_space)\n # db_outcomes = [(action_space, action_dist)]\n # inv_offset = None\n # return db_outcomes, entropy, inv_offset\n\n def do_it_with_bucketing(\n self,\n X2,\n current_entity,\n kg,\n merge_aspace_batching_outcome,\n obs: Observation,\n policy_nn_fun,\n ):\n entropy_list = []\n references = []\n buckect_action_spaces, inthis_bucket_indizes = self.get_action_space_in_buckets(\n current_entity, obs, kg\n )\n action_spaces = []\n action_dists = []\n\n for as_b, inthis_bucket in zip(buckect_action_spaces, inthis_bucket_indizes):\n X2_b = X2[inthis_bucket, :]\n action_dist_b, entropy_b = policy_nn_fun(X2_b, as_b)\n references.extend(inthis_bucket)\n action_spaces.append(as_b)\n action_dists.append(action_dist_b)\n entropy_list.append(entropy_b)\n inv_offset = [i for i, _ in sorted(enumerate(references), key=lambda x: x[1])]\n entropy = torch.cat(entropy_list, dim=0)[inv_offset]\n action = BucketActions(action_spaces, action_dists, inv_offset, entropy)\n\n if merge_aspace_batching_outcome:\n action_space = pad_and_cat_action_space(\n buckect_action_spaces, inv_offset, kg\n )\n action_dist = ops.pad_and_cat(action.action_dists, padding_value=0)[\n inv_offset\n ]\n action = BucketActions([action_space], [action_dist], None, entropy)\n return action\n\n def initialize_path(self, action: Action, kg: KnowledgeGraph):\n # [batch_size, action_dim]\n if self.relation_only_in_path:\n init_action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n init_action_embedding = self.get_action_embedding(action, kg)\n init_action_embedding.unsqueeze_(1)\n # [num_layers, batch_size, dim]\n init_h = zeros_var_cuda(\n [self.history_num_layers, len(init_action_embedding), self.history_dim]\n )\n init_c = zeros_var_cuda(\n [self.history_num_layers, len(init_action_embedding), self.history_dim]\n )\n self.path = [self.path_encoder(init_action_embedding, (init_h, init_c))[1]]\n\n def update_path(self, action: Action, kg: KnowledgeGraph, offset=None):\n \"\"\"\n Once an action was selected, update the action history.\n :param action (r, e): (Variable:batch) indices of the most recent action\n - r is the most recently traversed edge;\n - e is the destination entity.\n :param offset: (Variable:batch) if None, adjust path history with the given offset, used for search\n :param KG: Knowledge graph environment.\n \"\"\"\n\n def offset_path_history(p, offset):\n for i, x in enumerate(p):\n if type(x) is tuple:\n new_tuple = tuple([_x[:, offset, :] for _x in x])\n p[i] = new_tuple\n else:\n p[i] = x[offset, :]\n\n # update action history\n if self.relation_only_in_path:\n action_embedding = kg.get_relation_embeddings(action.rel)\n else:\n action_embedding = self.get_action_embedding(action, kg)\n if offset is not None:\n offset_path_history(self.path, offset)\n\n self.path.append(\n self.path_encoder(action_embedding.unsqueeze(1), self.path[-1])[1]\n )\n\n def get_action_space_in_buckets(\n self,\n current_entity: torch.Tensor,\n obs: Observation,\n kg: KnowledgeGraph,\n collapse_entities=False,\n ):\n \"\"\"\n To compute the search operation in batch, we group the action spaces of different states\n (i.e. the set of outgoing edges of different nodes) into buckets based on their sizes to\n save the memory consumption of paddings.\n\n For example, in large knowledge graphs, certain nodes may have thousands of outgoing\n edges while a long tail of nodes only have a small amount of outgoing edges. If a batch\n contains a node with 1000 outgoing edges while the rest of the nodes have a maximum of\n 5 outgoing edges, we need to pad the action spaces of all nodes to 1000, which consumes\n lots of memory.\n\n With the bucketing approach, each bucket is padded separately. In this case the node\n with 1000 outgoing edges will be in its own bucket and the rest of the nodes will suffer\n little from padding the action space to 5.\n\n Once we grouped the action spaces in buckets, the policy network computation is carried\n out for every bucket iteratively. Once all the computation is done, we concatenate the\n results of all buckets and restore their original order in the batch. The computation\n outside the policy network module is thus unaffected.\n\n :return db_action_spaces:\n [((r_space_b0, r_space_b0), action_mask_b0),\n ((r_space_b1, r_space_b1), action_mask_b1),\n ...\n ((r_space_bn, r_space_bn), action_mask_bn)]\n\n A list of action space tensor representations grouped in n buckets, s.t.\n r_space_b0.size(0) + r_space_b1.size(0) + ... + r_space_bn.size(0) = e.size(0)\n\n :return db_references:\n [l_batch_refs0, l_batch_refs1, ..., l_batch_refsn]\n l_batch_refsi stores the indices of the examples in bucket i in the current batch,\n which is used later to restore the output results to the original order.\n \"\"\"\n db_action_spaces, db_references = [], []\n assert not collapse_entities # NotImplementedError\n bucket_ids, inbucket_ids = kg.get_bucket_and_inbucket_ids(current_entity)\n\n for b_key in set(bucket_ids.tolist()):\n inthisbucket_indices = (\n torch.nonzero(bucket_ids.eq(b_key)).squeeze().tolist()\n )\n if not isinstance(inthisbucket_indices, list): # TODO(tilo) wtf!\n inthisbucket_indices = [inthisbucket_indices]\n\n inbucket_ids_of_entities_inthisbucket = inbucket_ids[\n inthisbucket_indices\n ].tolist()\n\n bucket_action_space = kg.bucketid2ActionSpace[b_key]\n\n e_b = current_entity[inthisbucket_indices]\n obs_b = obs.get_slice(inthisbucket_indices)\n\n as_bucket = bucket_action_space.get_slice(\n inbucket_ids_of_entities_inthisbucket\n )\n action_mask = self.apply_action_masks(as_bucket, e_b, obs_b, kg)\n action_space_b = ActionSpace(\n as_bucket.forks, as_bucket.r_space, as_bucket.e_space, action_mask\n )\n db_action_spaces.append(action_space_b)\n db_references.append(inthisbucket_indices)\n\n return db_action_spaces, db_references\n\n def apply_action_masks(\n self, acsp: ActionSpace, e, obs: Observation, kg: KnowledgeGraph\n ):\n r_space, e_space, action_mask = acsp.r_space, acsp.e_space, acsp.action_mask\n e_s, q, e_t, last_step, last_r, seen_nodes = obs\n\n # Prevent the agent from selecting the ground truth edge\n ground_truth_edge_mask = self.get_ground_truth_edge_mask(\n e, r_space, e_space, obs, kg\n )\n action_mask -= ground_truth_edge_mask\n self.validate_action_mask(action_mask)\n\n # Mask out false negatives in the final step\n if last_step:\n false_negative_mask = self.get_false_negative_mask(e_space, e_s, q, e_t, kg)\n action_mask *= 1 - false_negative_mask\n self.validate_action_mask(action_mask)\n\n # Prevent the agent from stopping in the middle of a path\n # stop_mask = (last_r == NO_OP_RELATION_ID).unsqueeze(1).float()\n # action_mask = (1 - stop_mask) * action_mask + stop_mask * (r_space == NO_OP_RELATION_ID).float()\n # Prevent loops\n # Note: avoid duplicate removal of self-loops\n # seen_nodes_b = seen_nodes[l_batch_refs]\n # loop_mask_b = (((seen_nodes_b.unsqueeze(1) == e_space.unsqueeze(2)).sum(2) > 0) *\n # (r_space != NO_OP_RELATION_ID)).float()\n # action_mask *= (1 - loop_mask_b)\n return action_mask\n\n def get_ground_truth_edge_mask(\n self, current_nodes, r_space, e_space, obs: Observation, kg: KnowledgeGraph\n ):\n s_e = obs.source_entity\n t_e = obs.target_entity\n q = obs.query_relation\n\n def build_mask(source_nodes, target_nodes, relation):\n return (\n (current_nodes == source_nodes).unsqueeze(1)\n * (r_space == relation.unsqueeze(1))\n * (e_space == target_nodes.unsqueeze(1))\n )\n\n mask = build_mask(s_e, t_e, q)\n inv_q = kg.get_inv_relation_id(q)\n inv_mask = build_mask(t_e, s_e, inv_q)\n return ((mask + inv_mask) * (s_e.unsqueeze(1) != kg.dummy_e)).float()\n\n def get_answer_mask(self, e_space, e_s, q, kg: KnowledgeGraph):\n if kg.args.mask_test_false_negatives:\n answer_vectors = kg.all_object_vectors\n else:\n answer_vectors = kg.train_object_vectors\n answer_masks = []\n for i in range(len(e_space)):\n _e_s, _q = int(e_s[i]), int(q[i])\n if not _e_s in answer_vectors or not _q in answer_vectors[_e_s]:\n answer_vector = var_cuda(torch.LongTensor([[kg.num_entities]]))\n else:\n answer_vector = answer_vectors[_e_s][_q]\n answer_mask = torch.sum(\n e_space[i].unsqueeze(0) == answer_vector, dim=0\n ).long()\n answer_masks.append(answer_mask)\n answer_mask = torch.cat(answer_masks).view(len(e_space), -1)\n return answer_mask\n\n def get_false_negative_mask(self, e_space, e_s, q, e_t, kg: KnowledgeGraph):\n answer_mask = self.get_answer_mask(e_space, e_s, q, kg)\n # This is a trick applied during training where we convert a multi-answer predction problem into several\n # single-answer prediction problems. By masking out the other answers in the training set, we are forcing\n # the agent to walk towards a particular answer.\n # This trick does not affect inference on the test set: at inference time the ground truth answer will not\n # appear in the answer mask. This can be checked by uncommenting the following assertion statement.\n # Note that the assertion statement can trigger in the last batch if you're using a batch_size > 1 since\n # we append dummy examples to the last batch to make it the required batch size.\n # The assertion statement will also trigger in the dev set inference of NELL-995 since we randomly\n # sampled the dev set from the training data.\n # assert(float((answer_mask * (e_space == e_t.unsqueeze(1)).long()).sum()) == 0)\n false_negative_mask = (\n answer_mask * (e_space != e_t.unsqueeze(1)).long()\n ).float()\n return false_negative_mask\n\n def validate_action_mask(self, action_mask):\n action_mask_min = action_mask.min()\n action_mask_max = action_mask.max()\n assert action_mask_min == 0 or action_mask_min == 1\n assert action_mask_max == 0 or action_mask_max == 1\n\n def get_action_embedding(self, action: Action, kg: KnowledgeGraph):\n \"\"\"\n Return (batch) action embedding which is the concatenation of the embeddings of\n the traversed edge and the target node.\n\n :param action (r, e):\n (Variable:batch) indices of the most recent action\n - r is the most recently traversed edge\n - e is the destination entity.\n :param kg: Knowledge graph enviroment.\n \"\"\"\n relation_embedding = kg.get_relation_embeddings(action.rel)\n if self.relation_only:\n action_embedding = relation_embedding\n else:\n entity_embedding = kg.get_entity_embeddings(action.ent)\n action_embedding = torch.cat([relation_embedding, entity_embedding], dim=-1)\n return action_embedding\n\n def define_modules(self):\n if self.relation_only:\n input_dim = self.history_dim + self.relation_dim\n elif self.relation_only_in_path:\n input_dim = self.history_dim + self.entity_dim * 2 + self.relation_dim\n else:\n input_dim = self.history_dim + self.entity_dim + self.relation_dim\n self.W1 = nn.Linear(input_dim, self.action_dim)\n self.W2 = nn.Linear(self.action_dim, self.action_dim)\n self.W1Dropout = nn.Dropout(p=self.ff_dropout_rate)\n self.W2Dropout = nn.Dropout(p=self.ff_dropout_rate)\n if self.relation_only_in_path:\n self.path_encoder = nn.LSTM(\n input_size=self.relation_dim,\n hidden_size=self.history_dim,\n num_layers=self.history_num_layers,\n batch_first=True,\n )\n else:\n self.path_encoder = nn.LSTM(\n input_size=self.action_dim,\n hidden_size=self.history_dim,\n num_layers=self.history_num_layers,\n batch_first=True,\n )\n\n def initialize_modules(self):\n if self.xavier_initialization:\n nn.init.xavier_uniform_(self.W1.weight)\n nn.init.xavier_uniform_(self.W2.weight)\n for name, param in self.path_encoder.named_parameters():\n if \"bias\" in name:\n nn.init.constant_(param, 0.0)\n elif \"weight\" in name:\n nn.init.xavier_normal_(param)\n",
"step-ids": [
10,
13,
16,
18,
20
]
}
|
[
10,
13,
16,
18,
20
] |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 19 17:15:58 2021
@author: Professional
"""
#son = int(input("Biror son kiriting: ") )
#print(son, "ning kvadrati", son*son, "ga teng")
#print (son, "ning kubi", son*son*son, "ga teng")
#yosh = int(input("Yoshingiz nechida: "))
#print("Siz", 2021 - yosh, "yilda tug'ilgansz")
a = int(input("Birinchi sonni kiriting: "))
b = int(input("Ikkinchi sonni kiriting: "))
print("yig'indisi ", a + b)
print("ayirmasi ", a - b)
print("bo'linmasi ", a/b)
print("ko'paytmasi ", a*b)
|
normal
|
{
"blob_id": "0d32fe36f71ffb3df56738664c5dbd0b8ae585e3",
"index": 3303,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\"yig'indisi \", a + b)\nprint('ayirmasi ', a - b)\nprint(\"bo'linmasi \", a / b)\nprint(\"ko'paytmasi \", a * b)\n",
"step-3": "<mask token>\na = int(input('Birinchi sonni kiriting: '))\nb = int(input('Ikkinchi sonni kiriting: '))\nprint(\"yig'indisi \", a + b)\nprint('ayirmasi ', a - b)\nprint(\"bo'linmasi \", a / b)\nprint(\"ko'paytmasi \", a * b)\n",
"step-4": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 19 17:15:58 2021\r\n\r\n@author: Professional\r\n\"\"\"\r\n\r\n#son = int(input(\"Biror son kiriting: \") )\r\n#print(son, \"ning kvadrati\", son*son, \"ga teng\")\r\n#print (son, \"ning kubi\", son*son*son, \"ga teng\")\r\n\r\n#yosh = int(input(\"Yoshingiz nechida: \"))\r\n#print(\"Siz\", 2021 - yosh, \"yilda tug'ilgansz\")\r\n\r\na = int(input(\"Birinchi sonni kiriting: \"))\r\nb = int(input(\"Ikkinchi sonni kiriting: \"))\r\nprint(\"yig'indisi \", a + b)\r\nprint(\"ayirmasi \", a - b)\r\nprint(\"bo'linmasi \", a/b)\r\nprint(\"ko'paytmasi \", a*b)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from datetime import *
import datetime
import time
time_one = datetime.time(1, 2, 3)
print("Time One :: ", time_one)
time_two = datetime.time(hour=23, minute=59, second=59, microsecond=99)
print("Time Two :: ", time_two)
date_one = datetime.date(month=3, year=2019, day=31)
print("Date One :: ", date_one)
today = datetime.date.today()
print("Today :: ", today, today.timetuple())
print("Difference Between Time :: ", timedelta(time_two.second) - timedelta(time_one.second))
print("Today :: ", datetime.date.today())
print("Time.asctime() :: ", time.asctime())
now = time.gmtime()
print("time.asctime(time.gmtime) :: ", time.asctime(now))
start = time.time()
time.sleep(3)
stop = time.time()
print(stop - start)
|
normal
|
{
"blob_id": "1ed7dba63db38e53a1dc5fac3c36f0dd98075c1f",
"index": 4305,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Time One :: ', time_one)\n<mask token>\nprint('Time Two :: ', time_two)\n<mask token>\nprint('Date One :: ', date_one)\n<mask token>\nprint('Today :: ', today, today.timetuple())\nprint('Difference Between Time :: ', timedelta(time_two.second) - timedelta\n (time_one.second))\nprint('Today :: ', datetime.date.today())\nprint('Time.asctime() :: ', time.asctime())\n<mask token>\nprint('time.asctime(time.gmtime) :: ', time.asctime(now))\n<mask token>\ntime.sleep(3)\n<mask token>\nprint(stop - start)\n",
"step-3": "<mask token>\ntime_one = datetime.time(1, 2, 3)\nprint('Time One :: ', time_one)\ntime_two = datetime.time(hour=23, minute=59, second=59, microsecond=99)\nprint('Time Two :: ', time_two)\ndate_one = datetime.date(month=3, year=2019, day=31)\nprint('Date One :: ', date_one)\ntoday = datetime.date.today()\nprint('Today :: ', today, today.timetuple())\nprint('Difference Between Time :: ', timedelta(time_two.second) - timedelta\n (time_one.second))\nprint('Today :: ', datetime.date.today())\nprint('Time.asctime() :: ', time.asctime())\nnow = time.gmtime()\nprint('time.asctime(time.gmtime) :: ', time.asctime(now))\nstart = time.time()\ntime.sleep(3)\nstop = time.time()\nprint(stop - start)\n",
"step-4": "from datetime import *\nimport datetime\nimport time\ntime_one = datetime.time(1, 2, 3)\nprint('Time One :: ', time_one)\ntime_two = datetime.time(hour=23, minute=59, second=59, microsecond=99)\nprint('Time Two :: ', time_two)\ndate_one = datetime.date(month=3, year=2019, day=31)\nprint('Date One :: ', date_one)\ntoday = datetime.date.today()\nprint('Today :: ', today, today.timetuple())\nprint('Difference Between Time :: ', timedelta(time_two.second) - timedelta\n (time_one.second))\nprint('Today :: ', datetime.date.today())\nprint('Time.asctime() :: ', time.asctime())\nnow = time.gmtime()\nprint('time.asctime(time.gmtime) :: ', time.asctime(now))\nstart = time.time()\ntime.sleep(3)\nstop = time.time()\nprint(stop - start)\n",
"step-5": "from datetime import *\nimport datetime\nimport time\ntime_one = datetime.time(1, 2, 3)\nprint(\"Time One :: \", time_one)\n\ntime_two = datetime.time(hour=23, minute=59, second=59, microsecond=99)\nprint(\"Time Two :: \", time_two)\n\ndate_one = datetime.date(month=3, year=2019, day=31)\nprint(\"Date One :: \", date_one)\n\ntoday = datetime.date.today()\nprint(\"Today :: \", today, today.timetuple())\n\nprint(\"Difference Between Time :: \", timedelta(time_two.second) - timedelta(time_one.second))\nprint(\"Today :: \", datetime.date.today())\n\nprint(\"Time.asctime() :: \", time.asctime())\nnow = time.gmtime()\nprint(\"time.asctime(time.gmtime) :: \", time.asctime(now))\n\nstart = time.time()\ntime.sleep(3)\nstop = time.time()\nprint(stop - start)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for c1, c2 in zip(s1, s2):
ans += c1 + c2
print(ans)
<|reserved_special_token_1|>
s1 = 'パトカー'
s2 = 'タクシー'
ans = ''
for c1, c2 in zip(s1, s2):
ans += c1 + c2
print(ans)
<|reserved_special_token_1|>
# coding: utf-8
# 02. 「パトカー」+「タクシー」=「パタトクカシーー」
# 「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.
s1 = "パトカー"
s2 = "タクシー"
ans = ""
for c1, c2 in zip(s1, s2):
ans += c1 + c2
print(ans)
#パタトクカシーー
|
flexible
|
{
"blob_id": "4d7e30714ae209e1d09d895dadf7a19928fe253f",
"index": 6623,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor c1, c2 in zip(s1, s2):\n ans += c1 + c2\nprint(ans)\n",
"step-3": "s1 = 'パトカー'\ns2 = 'タクシー'\nans = ''\nfor c1, c2 in zip(s1, s2):\n ans += c1 + c2\nprint(ans)\n",
"step-4": "# coding: utf-8\n\n# 02. 「パトカー」+「タクシー」=「パタトクカシーー」\n# 「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.\n\ns1 = \"パトカー\"\ns2 = \"タクシー\"\n\nans = \"\"\nfor c1, c2 in zip(s1, s2):\n ans += c1 + c2\n\nprint(ans)\n#パタトクカシーー\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def qsub(job, exe_path, queue=QUEUE_NAME):
o_path = job['o_path'] if job['o_path'] is not None else '/dev/null'
e_path = job['e_path'] if job['e_path'] is not None else '/dev/null'
for p in [o_path, e_path]:
if p == '/dev/null':
continue
if exists(p):
remove(p)
else:
makedirs(dirname(p), exist_ok=True)
cmd = ['qsub', '-q', queue, '-o', o_path, '-e', e_path, '-N', job[
'name'], exe_path]
for key in job:
if '--' in key:
cmd += [key, job[key]]
if 'test_dummy' in queue:
dummy_qsub(cmd)
else:
try:
sp.check_output(cmd, stderr=sp.STDOUT)
except sp.CalledProcessError as e:
print('returncode', e.returncode)
print('output', e.output)
raise
<|reserved_special_token_1|>
<|reserved_special_token_0|>
QUEUE_NAME = 'fact_medium'
def qsub(job, exe_path, queue=QUEUE_NAME):
o_path = job['o_path'] if job['o_path'] is not None else '/dev/null'
e_path = job['e_path'] if job['e_path'] is not None else '/dev/null'
for p in [o_path, e_path]:
if p == '/dev/null':
continue
if exists(p):
remove(p)
else:
makedirs(dirname(p), exist_ok=True)
cmd = ['qsub', '-q', queue, '-o', o_path, '-e', e_path, '-N', job[
'name'], exe_path]
for key in job:
if '--' in key:
cmd += [key, job[key]]
if 'test_dummy' in queue:
dummy_qsub(cmd)
else:
try:
sp.check_output(cmd, stderr=sp.STDOUT)
except sp.CalledProcessError as e:
print('returncode', e.returncode)
print('output', e.output)
raise
<|reserved_special_token_1|>
import subprocess as sp
from .dummy_qsub import dummy_qsub
from os.path import exists
from os import makedirs
from os import remove
from os.path import dirname
QUEUE_NAME = 'fact_medium'
def qsub(job, exe_path, queue=QUEUE_NAME):
o_path = job['o_path'] if job['o_path'] is not None else '/dev/null'
e_path = job['e_path'] if job['e_path'] is not None else '/dev/null'
for p in [o_path, e_path]:
if p == '/dev/null':
continue
if exists(p):
remove(p)
else:
makedirs(dirname(p), exist_ok=True)
cmd = ['qsub', '-q', queue, '-o', o_path, '-e', e_path, '-N', job[
'name'], exe_path]
for key in job:
if '--' in key:
cmd += [key, job[key]]
if 'test_dummy' in queue:
dummy_qsub(cmd)
else:
try:
sp.check_output(cmd, stderr=sp.STDOUT)
except sp.CalledProcessError as e:
print('returncode', e.returncode)
print('output', e.output)
raise
|
flexible
|
{
"blob_id": "427d3d386d4b8a998a0b61b8c59984c6003f5d7b",
"index": 6975,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef qsub(job, exe_path, queue=QUEUE_NAME):\n o_path = job['o_path'] if job['o_path'] is not None else '/dev/null'\n e_path = job['e_path'] if job['e_path'] is not None else '/dev/null'\n for p in [o_path, e_path]:\n if p == '/dev/null':\n continue\n if exists(p):\n remove(p)\n else:\n makedirs(dirname(p), exist_ok=True)\n cmd = ['qsub', '-q', queue, '-o', o_path, '-e', e_path, '-N', job[\n 'name'], exe_path]\n for key in job:\n if '--' in key:\n cmd += [key, job[key]]\n if 'test_dummy' in queue:\n dummy_qsub(cmd)\n else:\n try:\n sp.check_output(cmd, stderr=sp.STDOUT)\n except sp.CalledProcessError as e:\n print('returncode', e.returncode)\n print('output', e.output)\n raise\n",
"step-3": "<mask token>\nQUEUE_NAME = 'fact_medium'\n\n\ndef qsub(job, exe_path, queue=QUEUE_NAME):\n o_path = job['o_path'] if job['o_path'] is not None else '/dev/null'\n e_path = job['e_path'] if job['e_path'] is not None else '/dev/null'\n for p in [o_path, e_path]:\n if p == '/dev/null':\n continue\n if exists(p):\n remove(p)\n else:\n makedirs(dirname(p), exist_ok=True)\n cmd = ['qsub', '-q', queue, '-o', o_path, '-e', e_path, '-N', job[\n 'name'], exe_path]\n for key in job:\n if '--' in key:\n cmd += [key, job[key]]\n if 'test_dummy' in queue:\n dummy_qsub(cmd)\n else:\n try:\n sp.check_output(cmd, stderr=sp.STDOUT)\n except sp.CalledProcessError as e:\n print('returncode', e.returncode)\n print('output', e.output)\n raise\n",
"step-4": "import subprocess as sp\nfrom .dummy_qsub import dummy_qsub\nfrom os.path import exists\nfrom os import makedirs\nfrom os import remove\nfrom os.path import dirname\nQUEUE_NAME = 'fact_medium'\n\n\ndef qsub(job, exe_path, queue=QUEUE_NAME):\n o_path = job['o_path'] if job['o_path'] is not None else '/dev/null'\n e_path = job['e_path'] if job['e_path'] is not None else '/dev/null'\n for p in [o_path, e_path]:\n if p == '/dev/null':\n continue\n if exists(p):\n remove(p)\n else:\n makedirs(dirname(p), exist_ok=True)\n cmd = ['qsub', '-q', queue, '-o', o_path, '-e', e_path, '-N', job[\n 'name'], exe_path]\n for key in job:\n if '--' in key:\n cmd += [key, job[key]]\n if 'test_dummy' in queue:\n dummy_qsub(cmd)\n else:\n try:\n sp.check_output(cmd, stderr=sp.STDOUT)\n except sp.CalledProcessError as e:\n print('returncode', e.returncode)\n print('output', e.output)\n raise\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CriteriaForm(forms.Form):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CriteriaForm(forms.Form):
query = forms.CharField(widget=forms.Textarea)
<|reserved_special_token_1|>
from django import forms
class CriteriaForm(forms.Form):
query = forms.CharField(widget=forms.Textarea)
|
flexible
|
{
"blob_id": "b6529dc77d89cdf2d49c689dc583b78c94e31c4d",
"index": 4716,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CriteriaForm(forms.Form):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CriteriaForm(forms.Form):\n query = forms.CharField(widget=forms.Textarea)\n",
"step-4": "from django import forms\n\n\nclass CriteriaForm(forms.Form):\n query = forms.CharField(widget=forms.Textarea)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def load_stop_word():
stop_word = set()
with open('data/stop_word.txt', 'r', encoding='utf-8') as file:
for line in file.readlines():
stop_word.add(line.strip())
return stop_word
<|reserved_special_token_0|>
def write_dict(word_dict):
file = open('data/vocab_unigram.txt', 'w', encoding='utf-8')
file.write('[UNK]' + '\t' + '0' + '\n')
for k, v in word_dict.items():
file.write(k + '\t' + str(v) + '\n')
file.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_stop_word():
stop_word = set()
with open('data/stop_word.txt', 'r', encoding='utf-8') as file:
for line in file.readlines():
stop_word.add(line.strip())
return stop_word
def get_goods_title_dict(stop_word_dict):
cursor = db.cursor()
cursor.execute('select goods_name FROM goods')
data = cursor.fetchall()
goods_name_dict = dict()
idx = 1
for line in data:
title = line[0].strip().lower()
for c in title:
if c.strip() == '':
continue
if c in stop_word_dict:
continue
if c not in goods_name_dict:
goods_name_dict[c] = idx
idx = idx + 1
cursor.execute('select goods_name FROM goods where is_onsell=1')
data = cursor.fetchall()
regexp = '[0-9a-z]+'
pattern = re.compile(regexp)
for line in data:
title = line[0].strip().lower()
match_res = pattern.findall(title)
print(title, match_res)
for item in match_res:
if item not in goods_name_dict:
goods_name_dict[item] = idx
idx = idx + 1
return goods_name_dict
def write_dict(word_dict):
file = open('data/vocab_unigram.txt', 'w', encoding='utf-8')
file.write('[UNK]' + '\t' + '0' + '\n')
for k, v in word_dict.items():
file.write(k + '\t' + str(v) + '\n')
file.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_stop_word():
stop_word = set()
with open('data/stop_word.txt', 'r', encoding='utf-8') as file:
for line in file.readlines():
stop_word.add(line.strip())
return stop_word
def get_goods_title_dict(stop_word_dict):
cursor = db.cursor()
cursor.execute('select goods_name FROM goods')
data = cursor.fetchall()
goods_name_dict = dict()
idx = 1
for line in data:
title = line[0].strip().lower()
for c in title:
if c.strip() == '':
continue
if c in stop_word_dict:
continue
if c not in goods_name_dict:
goods_name_dict[c] = idx
idx = idx + 1
cursor.execute('select goods_name FROM goods where is_onsell=1')
data = cursor.fetchall()
regexp = '[0-9a-z]+'
pattern = re.compile(regexp)
for line in data:
title = line[0].strip().lower()
match_res = pattern.findall(title)
print(title, match_res)
for item in match_res:
if item not in goods_name_dict:
goods_name_dict[item] = idx
idx = idx + 1
return goods_name_dict
def write_dict(word_dict):
file = open('data/vocab_unigram.txt', 'w', encoding='utf-8')
file.write('[UNK]' + '\t' + '0' + '\n')
for k, v in word_dict.items():
file.write(k + '\t' + str(v) + '\n')
file.close()
if __name__ == '__main__':
stop_word_dict = load_stop_word()
goods_name_dict = get_goods_title_dict(stop_word_dict)
write_dict(goods_name_dict)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db = MySQLdb.connect(host='wonderfulloffline.mysql.rds.aliyuncs.com', port=
3306, user='wonderfull_ai', password='868wxRHrPaTKkjvC', db=
'wonderfull_ai_online', charset='utf8')
def load_stop_word():
stop_word = set()
with open('data/stop_word.txt', 'r', encoding='utf-8') as file:
for line in file.readlines():
stop_word.add(line.strip())
return stop_word
def get_goods_title_dict(stop_word_dict):
cursor = db.cursor()
cursor.execute('select goods_name FROM goods')
data = cursor.fetchall()
goods_name_dict = dict()
idx = 1
for line in data:
title = line[0].strip().lower()
for c in title:
if c.strip() == '':
continue
if c in stop_word_dict:
continue
if c not in goods_name_dict:
goods_name_dict[c] = idx
idx = idx + 1
cursor.execute('select goods_name FROM goods where is_onsell=1')
data = cursor.fetchall()
regexp = '[0-9a-z]+'
pattern = re.compile(regexp)
for line in data:
title = line[0].strip().lower()
match_res = pattern.findall(title)
print(title, match_res)
for item in match_res:
if item not in goods_name_dict:
goods_name_dict[item] = idx
idx = idx + 1
return goods_name_dict
def write_dict(word_dict):
file = open('data/vocab_unigram.txt', 'w', encoding='utf-8')
file.write('[UNK]' + '\t' + '0' + '\n')
for k, v in word_dict.items():
file.write(k + '\t' + str(v) + '\n')
file.close()
if __name__ == '__main__':
stop_word_dict = load_stop_word()
goods_name_dict = get_goods_title_dict(stop_word_dict)
write_dict(goods_name_dict)
<|reserved_special_token_1|>
#!/usr/bin/env python
# encoding=utf-8
import MySQLdb
import re
# 打开数据库连接
db = MySQLdb.connect(host='wonderfulloffline.mysql.rds.aliyuncs.com',port=3306,user='wonderfull_ai',password='868wxRHrPaTKkjvC', db='wonderfull_ai_online', charset='utf8' )
def load_stop_word():
stop_word=set()
with open("data/stop_word.txt","r",encoding="utf-8") as file:
for line in file.readlines():
stop_word.add(line.strip())
return stop_word
# 使用cursor()方法获取操作游标
def get_goods_title_dict(stop_word_dict):
cursor = db.cursor()
# 使用execute方法执行SQL语句
cursor.execute("select goods_name FROM goods")
# 使用 fetchone() 方法获取一条数据
data = cursor.fetchall()
goods_name_dict=dict()
idx=1
for line in data:
title = line[0].strip().lower()
for c in title:
if(c.strip()==''):
continue
if(c in stop_word_dict):
continue
if(c not in goods_name_dict):
goods_name_dict[c]=idx
idx=idx+1
cursor.execute("select goods_name FROM goods where is_onsell=1")
data = cursor.fetchall()
regexp = r"[0-9a-z]+"
pattern = re.compile(regexp)
for line in data:
title = line[0].strip().lower()
match_res = pattern.findall(title)
print(title,match_res)
for item in match_res:
if (item not in goods_name_dict):
goods_name_dict[item] = idx
idx = idx + 1
# 关闭数据库连接
# db.close()
return goods_name_dict
def write_dict(word_dict):
file=open("data/vocab_unigram.txt","w",encoding="utf-8")
file.write("[UNK]"+"\t"+"0"+"\n")
for k,v in word_dict.items():
# print(k,v)
file.write(k+"\t"+str(v)+"\n")
file.close()
if __name__ == '__main__':
stop_word_dict=load_stop_word()
goods_name_dict=get_goods_title_dict(stop_word_dict)
# print(goods_name_dict)
write_dict(goods_name_dict)
|
flexible
|
{
"blob_id": "4942b20a8e4f58c52b82800fb4c59db169cd8048",
"index": 3562,
"step-1": "<mask token>\n\n\ndef load_stop_word():\n stop_word = set()\n with open('data/stop_word.txt', 'r', encoding='utf-8') as file:\n for line in file.readlines():\n stop_word.add(line.strip())\n return stop_word\n\n\n<mask token>\n\n\ndef write_dict(word_dict):\n file = open('data/vocab_unigram.txt', 'w', encoding='utf-8')\n file.write('[UNK]' + '\\t' + '0' + '\\n')\n for k, v in word_dict.items():\n file.write(k + '\\t' + str(v) + '\\n')\n file.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_stop_word():\n stop_word = set()\n with open('data/stop_word.txt', 'r', encoding='utf-8') as file:\n for line in file.readlines():\n stop_word.add(line.strip())\n return stop_word\n\n\ndef get_goods_title_dict(stop_word_dict):\n cursor = db.cursor()\n cursor.execute('select goods_name FROM goods')\n data = cursor.fetchall()\n goods_name_dict = dict()\n idx = 1\n for line in data:\n title = line[0].strip().lower()\n for c in title:\n if c.strip() == '':\n continue\n if c in stop_word_dict:\n continue\n if c not in goods_name_dict:\n goods_name_dict[c] = idx\n idx = idx + 1\n cursor.execute('select goods_name FROM goods where is_onsell=1')\n data = cursor.fetchall()\n regexp = '[0-9a-z]+'\n pattern = re.compile(regexp)\n for line in data:\n title = line[0].strip().lower()\n match_res = pattern.findall(title)\n print(title, match_res)\n for item in match_res:\n if item not in goods_name_dict:\n goods_name_dict[item] = idx\n idx = idx + 1\n return goods_name_dict\n\n\ndef write_dict(word_dict):\n file = open('data/vocab_unigram.txt', 'w', encoding='utf-8')\n file.write('[UNK]' + '\\t' + '0' + '\\n')\n for k, v in word_dict.items():\n file.write(k + '\\t' + str(v) + '\\n')\n file.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_stop_word():\n stop_word = set()\n with open('data/stop_word.txt', 'r', encoding='utf-8') as file:\n for line in file.readlines():\n stop_word.add(line.strip())\n return stop_word\n\n\ndef get_goods_title_dict(stop_word_dict):\n cursor = db.cursor()\n cursor.execute('select goods_name FROM goods')\n data = cursor.fetchall()\n goods_name_dict = dict()\n idx = 1\n for line in data:\n title = line[0].strip().lower()\n for c in title:\n if c.strip() == '':\n continue\n if c in stop_word_dict:\n continue\n if c not in goods_name_dict:\n goods_name_dict[c] = idx\n idx = idx + 1\n cursor.execute('select goods_name FROM goods where is_onsell=1')\n data = cursor.fetchall()\n regexp = '[0-9a-z]+'\n pattern = re.compile(regexp)\n for line in data:\n title = line[0].strip().lower()\n match_res = pattern.findall(title)\n print(title, match_res)\n for item in match_res:\n if item not in goods_name_dict:\n goods_name_dict[item] = idx\n idx = idx + 1\n return goods_name_dict\n\n\ndef write_dict(word_dict):\n file = open('data/vocab_unigram.txt', 'w', encoding='utf-8')\n file.write('[UNK]' + '\\t' + '0' + '\\n')\n for k, v in word_dict.items():\n file.write(k + '\\t' + str(v) + '\\n')\n file.close()\n\n\nif __name__ == '__main__':\n stop_word_dict = load_stop_word()\n goods_name_dict = get_goods_title_dict(stop_word_dict)\n write_dict(goods_name_dict)\n",
"step-4": "<mask token>\ndb = MySQLdb.connect(host='wonderfulloffline.mysql.rds.aliyuncs.com', port=\n 3306, user='wonderfull_ai', password='868wxRHrPaTKkjvC', db=\n 'wonderfull_ai_online', charset='utf8')\n\n\ndef load_stop_word():\n stop_word = set()\n with open('data/stop_word.txt', 'r', encoding='utf-8') as file:\n for line in file.readlines():\n stop_word.add(line.strip())\n return stop_word\n\n\ndef get_goods_title_dict(stop_word_dict):\n cursor = db.cursor()\n cursor.execute('select goods_name FROM goods')\n data = cursor.fetchall()\n goods_name_dict = dict()\n idx = 1\n for line in data:\n title = line[0].strip().lower()\n for c in title:\n if c.strip() == '':\n continue\n if c in stop_word_dict:\n continue\n if c not in goods_name_dict:\n goods_name_dict[c] = idx\n idx = idx + 1\n cursor.execute('select goods_name FROM goods where is_onsell=1')\n data = cursor.fetchall()\n regexp = '[0-9a-z]+'\n pattern = re.compile(regexp)\n for line in data:\n title = line[0].strip().lower()\n match_res = pattern.findall(title)\n print(title, match_res)\n for item in match_res:\n if item not in goods_name_dict:\n goods_name_dict[item] = idx\n idx = idx + 1\n return goods_name_dict\n\n\ndef write_dict(word_dict):\n file = open('data/vocab_unigram.txt', 'w', encoding='utf-8')\n file.write('[UNK]' + '\\t' + '0' + '\\n')\n for k, v in word_dict.items():\n file.write(k + '\\t' + str(v) + '\\n')\n file.close()\n\n\nif __name__ == '__main__':\n stop_word_dict = load_stop_word()\n goods_name_dict = get_goods_title_dict(stop_word_dict)\n write_dict(goods_name_dict)\n",
"step-5": "#!/usr/bin/env python\r\n# encoding=utf-8\r\nimport MySQLdb\r\nimport re\r\n\r\n# 打开数据库连接\r\ndb = MySQLdb.connect(host='wonderfulloffline.mysql.rds.aliyuncs.com',port=3306,user='wonderfull_ai',password='868wxRHrPaTKkjvC', db='wonderfull_ai_online', charset='utf8' )\r\n\r\ndef load_stop_word():\r\n stop_word=set()\r\n with open(\"data/stop_word.txt\",\"r\",encoding=\"utf-8\") as file:\r\n for line in file.readlines():\r\n stop_word.add(line.strip())\r\n return stop_word\r\n\r\n# 使用cursor()方法获取操作游标\r\ndef get_goods_title_dict(stop_word_dict):\r\n cursor = db.cursor()\r\n # 使用execute方法执行SQL语句\r\n cursor.execute(\"select goods_name FROM goods\")\r\n # 使用 fetchone() 方法获取一条数据\r\n data = cursor.fetchall()\r\n goods_name_dict=dict()\r\n idx=1\r\n\r\n for line in data:\r\n title = line[0].strip().lower()\r\n for c in title:\r\n if(c.strip()==''):\r\n continue\r\n if(c in stop_word_dict):\r\n continue\r\n if(c not in goods_name_dict):\r\n goods_name_dict[c]=idx\r\n idx=idx+1\r\n\r\n cursor.execute(\"select goods_name FROM goods where is_onsell=1\")\r\n data = cursor.fetchall()\r\n regexp = r\"[0-9a-z]+\"\r\n pattern = re.compile(regexp)\r\n for line in data:\r\n title = line[0].strip().lower()\r\n match_res = pattern.findall(title)\r\n print(title,match_res)\r\n for item in match_res:\r\n if (item not in goods_name_dict):\r\n goods_name_dict[item] = idx\r\n idx = idx + 1\r\n\r\n # 关闭数据库连接\r\n # db.close()\r\n return goods_name_dict\r\n\r\ndef write_dict(word_dict):\r\n file=open(\"data/vocab_unigram.txt\",\"w\",encoding=\"utf-8\")\r\n file.write(\"[UNK]\"+\"\\t\"+\"0\"+\"\\n\")\r\n for k,v in word_dict.items():\r\n # print(k,v)\r\n file.write(k+\"\\t\"+str(v)+\"\\n\")\r\n file.close()\r\n\r\nif __name__ == '__main__':\r\n stop_word_dict=load_stop_word()\r\n goods_name_dict=get_goods_title_dict(stop_word_dict)\r\n # print(goods_name_dict)\r\n write_dict(goods_name_dict)",
"step-ids": [
2,
3,
4,
5,
7
]
}
|
[
2,
3,
4,
5,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(2):
renderer.set_drawing_axis(i)
renderer.draw_ws_obstacles()
renderer.draw_ws_point(source, color='k', shape='o')
renderer.background_matrix_eval = False
renderer.draw_ws_img(U[-1] if i == 0 else U_e, interpolate='none',
color_style=plt.cm.gray)
renderer.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ROWS = 1
COLS = 2
heat_diffusion.NB_POINTS = 101
heat_diffusion.TIME_FACTOR = 50
heat_diffusion.ALGORITHM = 'forward'
iterations = 10
workspace = Workspace()
source = [0, 0]
renderer = WorkspaceDrawer(workspace, rows=ROWS, cols=COLS)
U = heat_diffusion.heat_diffusion(workspace, source, iterations)
U_e = heat_diffusion.compare_with_kernel(U[-1], 0.00902, workspace)
for i in range(2):
renderer.set_drawing_axis(i)
renderer.draw_ws_obstacles()
renderer.draw_ws_point(source, color='k', shape='o')
renderer.background_matrix_eval = False
renderer.draw_ws_img(U[-1] if i == 0 else U_e, interpolate='none',
color_style=plt.cm.gray)
renderer.show()
<|reserved_special_token_1|>
from demos_common_imports import *
from pyrieef.geometry.workspace import *
from pyrieef.geometry import heat_diffusion
from pyrieef.rendering.workspace_planar import WorkspaceDrawer
import matplotlib.pyplot as plt
ROWS = 1
COLS = 2
heat_diffusion.NB_POINTS = 101
heat_diffusion.TIME_FACTOR = 50
heat_diffusion.ALGORITHM = 'forward'
iterations = 10
workspace = Workspace()
source = [0, 0]
renderer = WorkspaceDrawer(workspace, rows=ROWS, cols=COLS)
U = heat_diffusion.heat_diffusion(workspace, source, iterations)
U_e = heat_diffusion.compare_with_kernel(U[-1], 0.00902, workspace)
for i in range(2):
renderer.set_drawing_axis(i)
renderer.draw_ws_obstacles()
renderer.draw_ws_point(source, color='k', shape='o')
renderer.background_matrix_eval = False
renderer.draw_ws_img(U[-1] if i == 0 else U_e, interpolate='none',
color_style=plt.cm.gray)
renderer.show()
<|reserved_special_token_1|>
#!/usr/bin/env python
# Copyright (c) 2019, University of Stuttgart
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright
# notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
# Jim Mainprice on Wed January 22 2019
from demos_common_imports import *
from pyrieef.geometry.workspace import *
from pyrieef.geometry import heat_diffusion
from pyrieef.rendering.workspace_planar import WorkspaceDrawer
import matplotlib.pyplot as plt
ROWS = 1
COLS = 2
heat_diffusion.NB_POINTS = 101
heat_diffusion.TIME_FACTOR = 50
heat_diffusion.ALGORITHM = "forward"
iterations = 10
workspace = Workspace()
source = [0, 0]
renderer = WorkspaceDrawer(workspace, rows=ROWS, cols=COLS)
U = heat_diffusion.heat_diffusion(workspace, source, iterations)
U_e = heat_diffusion.compare_with_kernel(U[-1], 9.020E-03, workspace)
for i in range(2):
renderer.set_drawing_axis(i)
renderer.draw_ws_obstacles()
renderer.draw_ws_point(source, color='k', shape='o')
renderer.background_matrix_eval = False
renderer.draw_ws_img(
U[-1] if i == 0 else U_e,
interpolate="none", color_style=plt.cm.gray)
renderer.show()
|
flexible
|
{
"blob_id": "007cce815f3ad4e47593ff00ff2e73d5d9961d9e",
"index": 3211,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(2):\n renderer.set_drawing_axis(i)\n renderer.draw_ws_obstacles()\n renderer.draw_ws_point(source, color='k', shape='o')\n renderer.background_matrix_eval = False\n renderer.draw_ws_img(U[-1] if i == 0 else U_e, interpolate='none',\n color_style=plt.cm.gray)\nrenderer.show()\n",
"step-3": "<mask token>\nROWS = 1\nCOLS = 2\nheat_diffusion.NB_POINTS = 101\nheat_diffusion.TIME_FACTOR = 50\nheat_diffusion.ALGORITHM = 'forward'\niterations = 10\nworkspace = Workspace()\nsource = [0, 0]\nrenderer = WorkspaceDrawer(workspace, rows=ROWS, cols=COLS)\nU = heat_diffusion.heat_diffusion(workspace, source, iterations)\nU_e = heat_diffusion.compare_with_kernel(U[-1], 0.00902, workspace)\nfor i in range(2):\n renderer.set_drawing_axis(i)\n renderer.draw_ws_obstacles()\n renderer.draw_ws_point(source, color='k', shape='o')\n renderer.background_matrix_eval = False\n renderer.draw_ws_img(U[-1] if i == 0 else U_e, interpolate='none',\n color_style=plt.cm.gray)\nrenderer.show()\n",
"step-4": "from demos_common_imports import *\nfrom pyrieef.geometry.workspace import *\nfrom pyrieef.geometry import heat_diffusion\nfrom pyrieef.rendering.workspace_planar import WorkspaceDrawer\nimport matplotlib.pyplot as plt\nROWS = 1\nCOLS = 2\nheat_diffusion.NB_POINTS = 101\nheat_diffusion.TIME_FACTOR = 50\nheat_diffusion.ALGORITHM = 'forward'\niterations = 10\nworkspace = Workspace()\nsource = [0, 0]\nrenderer = WorkspaceDrawer(workspace, rows=ROWS, cols=COLS)\nU = heat_diffusion.heat_diffusion(workspace, source, iterations)\nU_e = heat_diffusion.compare_with_kernel(U[-1], 0.00902, workspace)\nfor i in range(2):\n renderer.set_drawing_axis(i)\n renderer.draw_ws_obstacles()\n renderer.draw_ws_point(source, color='k', shape='o')\n renderer.background_matrix_eval = False\n renderer.draw_ws_img(U[-1] if i == 0 else U_e, interpolate='none',\n color_style=plt.cm.gray)\nrenderer.show()\n",
"step-5": "#!/usr/bin/env python\n\n# Copyright (c) 2019, University of Stuttgart\n# All rights reserved.\n#\n# Permission to use, copy, modify, and distribute this software for any purpose\n# with or without fee is hereby granted, provided that the above copyright\n# notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\n# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\n# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\n# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\n# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\n# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\n# PERFORMANCE OF THIS SOFTWARE.\n#\n# Jim Mainprice on Wed January 22 2019\n\nfrom demos_common_imports import *\nfrom pyrieef.geometry.workspace import *\nfrom pyrieef.geometry import heat_diffusion\nfrom pyrieef.rendering.workspace_planar import WorkspaceDrawer\nimport matplotlib.pyplot as plt\n\nROWS = 1\nCOLS = 2\nheat_diffusion.NB_POINTS = 101\nheat_diffusion.TIME_FACTOR = 50\nheat_diffusion.ALGORITHM = \"forward\"\niterations = 10\nworkspace = Workspace()\nsource = [0, 0]\nrenderer = WorkspaceDrawer(workspace, rows=ROWS, cols=COLS)\nU = heat_diffusion.heat_diffusion(workspace, source, iterations)\nU_e = heat_diffusion.compare_with_kernel(U[-1], 9.020E-03, workspace)\nfor i in range(2):\n renderer.set_drawing_axis(i)\n renderer.draw_ws_obstacles()\n renderer.draw_ws_point(source, color='k', shape='o')\n renderer.background_matrix_eval = False\n renderer.draw_ws_img(\n U[-1] if i == 0 else U_e,\n interpolate=\"none\", color_style=plt.cm.gray)\nrenderer.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TemplateTestBCAlgorithm:
@staticmethod
@abstractmethod
def list_to_backend_type(data: List) ->TTensor:
"""
Convert list to backend specific type
:param data: List of data.
:return: Converted data.
"""
@staticmethod
@abstractmethod
def get_backend() ->BiasCorrectionAlgoBackend:
"""
Get backend specific BiasCorrectionAlgoBackend
:return BiasCorrectionAlgoBackend: Backend specific BiasCorrectionAlgoBackend
"""
@staticmethod
def fn_to_type(tensor):
return tensor
@staticmethod
@abstractmethod
def get_transform_fn():
"""
Get transformation function for dataset.
"""
def get_dataset(self, input_size: Tuple):
"""
Return backend specific random dataset.
:param model: The model for which the dataset is being created.
"""
return StaticDatasetMock(input_size, self.fn_to_type)
<|reserved_special_token_0|>
@staticmethod
@abstractmethod
def check_bias(model: TModel, ref_biases: Dict):
"""
Checks biases values.
"""
@staticmethod
def map_references(ref_biases: Dict) ->Dict[str, List]:
"""
Returns backend-specific reference.
"""
return ref_biases
@staticmethod
def get_quantization_algorithm():
return PostTrainingQuantization(subset_size=1, fast_bias_correction
=False, advanced_parameters=AdvancedQuantizationParameters(
overflow_fix=OverflowFix.DISABLE))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TemplateTestBCAlgorithm:
@staticmethod
@abstractmethod
def list_to_backend_type(data: List) ->TTensor:
"""
Convert list to backend specific type
:param data: List of data.
:return: Converted data.
"""
@staticmethod
@abstractmethod
def get_backend() ->BiasCorrectionAlgoBackend:
"""
Get backend specific BiasCorrectionAlgoBackend
:return BiasCorrectionAlgoBackend: Backend specific BiasCorrectionAlgoBackend
"""
@staticmethod
def fn_to_type(tensor):
return tensor
@staticmethod
@abstractmethod
def get_transform_fn():
"""
Get transformation function for dataset.
"""
def get_dataset(self, input_size: Tuple):
"""
Return backend specific random dataset.
:param model: The model for which the dataset is being created.
"""
return StaticDatasetMock(input_size, self.fn_to_type)
@staticmethod
@abstractmethod
def backend_specific_model(model: TModel, tmp_dir: str):
"""
Return backend specific model.
"""
@staticmethod
@abstractmethod
def check_bias(model: TModel, ref_biases: Dict):
"""
Checks biases values.
"""
@staticmethod
def map_references(ref_biases: Dict) ->Dict[str, List]:
"""
Returns backend-specific reference.
"""
return ref_biases
@staticmethod
def get_quantization_algorithm():
return PostTrainingQuantization(subset_size=1, fast_bias_correction
=False, advanced_parameters=AdvancedQuantizationParameters(
overflow_fix=OverflowFix.DISABLE))
@pytest.mark.parametrize('model_cls, ref_biases', ((
MultipleConvTestModel, {'/conv_1/Conv': [0.6658976, -0.70563036],
'/conv_2/Conv': [-0.307696, -0.42806846, 0.44965455],
'/conv_3/Conv': [-0.0033792169, 1.0661412], '/conv_4/Conv': [-
0.6941606, 0.9958957, 0.6081058]}), (ConvTestModel, {'/conv/Conv':
[0.11085186, 1.0017344]})))
def test_update_bias(self, model_cls, ref_biases, tmpdir):
model = self.backend_specific_model(model_cls(), tmpdir)
dataset = Dataset(self.get_dataset(model_cls.INPUT_SIZE), self.
get_transform_fn())
quantization_algorithm = self.get_quantization_algorithm()
quantized_model = quantization_algorithm.apply(model, dataset=dataset)
mapped_ref_biases = self.map_references(ref_biases)
self.check_bias(quantized_model, mapped_ref_biases)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
TModel = TypeVar('TModel')
TTensor = TypeVar('TTensor')
class TemplateTestBCAlgorithm:
@staticmethod
@abstractmethod
def list_to_backend_type(data: List) ->TTensor:
"""
Convert list to backend specific type
:param data: List of data.
:return: Converted data.
"""
@staticmethod
@abstractmethod
def get_backend() ->BiasCorrectionAlgoBackend:
"""
Get backend specific BiasCorrectionAlgoBackend
:return BiasCorrectionAlgoBackend: Backend specific BiasCorrectionAlgoBackend
"""
@staticmethod
def fn_to_type(tensor):
return tensor
@staticmethod
@abstractmethod
def get_transform_fn():
"""
Get transformation function for dataset.
"""
def get_dataset(self, input_size: Tuple):
"""
Return backend specific random dataset.
:param model: The model for which the dataset is being created.
"""
return StaticDatasetMock(input_size, self.fn_to_type)
@staticmethod
@abstractmethod
def backend_specific_model(model: TModel, tmp_dir: str):
"""
Return backend specific model.
"""
@staticmethod
@abstractmethod
def check_bias(model: TModel, ref_biases: Dict):
"""
Checks biases values.
"""
@staticmethod
def map_references(ref_biases: Dict) ->Dict[str, List]:
"""
Returns backend-specific reference.
"""
return ref_biases
@staticmethod
def get_quantization_algorithm():
return PostTrainingQuantization(subset_size=1, fast_bias_correction
=False, advanced_parameters=AdvancedQuantizationParameters(
overflow_fix=OverflowFix.DISABLE))
@pytest.mark.parametrize('model_cls, ref_biases', ((
MultipleConvTestModel, {'/conv_1/Conv': [0.6658976, -0.70563036],
'/conv_2/Conv': [-0.307696, -0.42806846, 0.44965455],
'/conv_3/Conv': [-0.0033792169, 1.0661412], '/conv_4/Conv': [-
0.6941606, 0.9958957, 0.6081058]}), (ConvTestModel, {'/conv/Conv':
[0.11085186, 1.0017344]})))
def test_update_bias(self, model_cls, ref_biases, tmpdir):
model = self.backend_specific_model(model_cls(), tmpdir)
dataset = Dataset(self.get_dataset(model_cls.INPUT_SIZE), self.
get_transform_fn())
quantization_algorithm = self.get_quantization_algorithm()
quantized_model = quantization_algorithm.apply(model, dataset=dataset)
mapped_ref_biases = self.map_references(ref_biases)
self.check_bias(quantized_model, mapped_ref_biases)
<|reserved_special_token_1|>
from abc import abstractmethod
from typing import Dict, List, Tuple, TypeVar
import pytest
from nncf.data import Dataset
from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters
from nncf.quantization.advanced_parameters import OverflowFix
from nncf.quantization.algorithms.bias_correction.backend import BiasCorrectionAlgoBackend
from nncf.quantization.algorithms.post_training.algorithm import PostTrainingQuantization
from tests.post_training.test_templates.helpers import ConvTestModel
from tests.post_training.test_templates.helpers import MultipleConvTestModel
from tests.post_training.test_templates.helpers import StaticDatasetMock
TModel = TypeVar('TModel')
TTensor = TypeVar('TTensor')
class TemplateTestBCAlgorithm:
@staticmethod
@abstractmethod
def list_to_backend_type(data: List) ->TTensor:
"""
Convert list to backend specific type
:param data: List of data.
:return: Converted data.
"""
@staticmethod
@abstractmethod
def get_backend() ->BiasCorrectionAlgoBackend:
"""
Get backend specific BiasCorrectionAlgoBackend
:return BiasCorrectionAlgoBackend: Backend specific BiasCorrectionAlgoBackend
"""
@staticmethod
def fn_to_type(tensor):
return tensor
@staticmethod
@abstractmethod
def get_transform_fn():
"""
Get transformation function for dataset.
"""
def get_dataset(self, input_size: Tuple):
"""
Return backend specific random dataset.
:param model: The model for which the dataset is being created.
"""
return StaticDatasetMock(input_size, self.fn_to_type)
@staticmethod
@abstractmethod
def backend_specific_model(model: TModel, tmp_dir: str):
"""
Return backend specific model.
"""
@staticmethod
@abstractmethod
def check_bias(model: TModel, ref_biases: Dict):
"""
Checks biases values.
"""
@staticmethod
def map_references(ref_biases: Dict) ->Dict[str, List]:
"""
Returns backend-specific reference.
"""
return ref_biases
@staticmethod
def get_quantization_algorithm():
return PostTrainingQuantization(subset_size=1, fast_bias_correction
=False, advanced_parameters=AdvancedQuantizationParameters(
overflow_fix=OverflowFix.DISABLE))
@pytest.mark.parametrize('model_cls, ref_biases', ((
MultipleConvTestModel, {'/conv_1/Conv': [0.6658976, -0.70563036],
'/conv_2/Conv': [-0.307696, -0.42806846, 0.44965455],
'/conv_3/Conv': [-0.0033792169, 1.0661412], '/conv_4/Conv': [-
0.6941606, 0.9958957, 0.6081058]}), (ConvTestModel, {'/conv/Conv':
[0.11085186, 1.0017344]})))
def test_update_bias(self, model_cls, ref_biases, tmpdir):
model = self.backend_specific_model(model_cls(), tmpdir)
dataset = Dataset(self.get_dataset(model_cls.INPUT_SIZE), self.
get_transform_fn())
quantization_algorithm = self.get_quantization_algorithm()
quantized_model = quantization_algorithm.apply(model, dataset=dataset)
mapped_ref_biases = self.map_references(ref_biases)
self.check_bias(quantized_model, mapped_ref_biases)
<|reserved_special_token_1|>
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from typing import Dict, List, Tuple, TypeVar
import pytest
from nncf.data import Dataset
from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters
from nncf.quantization.advanced_parameters import OverflowFix
from nncf.quantization.algorithms.bias_correction.backend import BiasCorrectionAlgoBackend
from nncf.quantization.algorithms.post_training.algorithm import PostTrainingQuantization
from tests.post_training.test_templates.helpers import ConvTestModel
from tests.post_training.test_templates.helpers import MultipleConvTestModel
from tests.post_training.test_templates.helpers import StaticDatasetMock
TModel = TypeVar("TModel")
TTensor = TypeVar("TTensor")
class TemplateTestBCAlgorithm:
@staticmethod
@abstractmethod
def list_to_backend_type(data: List) -> TTensor:
"""
Convert list to backend specific type
:param data: List of data.
:return: Converted data.
"""
@staticmethod
@abstractmethod
def get_backend() -> BiasCorrectionAlgoBackend:
"""
Get backend specific BiasCorrectionAlgoBackend
:return BiasCorrectionAlgoBackend: Backend specific BiasCorrectionAlgoBackend
"""
@staticmethod
def fn_to_type(tensor):
return tensor
@staticmethod
@abstractmethod
def get_transform_fn():
"""
Get transformation function for dataset.
"""
def get_dataset(self, input_size: Tuple):
"""
Return backend specific random dataset.
:param model: The model for which the dataset is being created.
"""
return StaticDatasetMock(input_size, self.fn_to_type)
@staticmethod
@abstractmethod
def backend_specific_model(model: TModel, tmp_dir: str):
"""
Return backend specific model.
"""
@staticmethod
@abstractmethod
def check_bias(model: TModel, ref_biases: Dict):
"""
Checks biases values.
"""
@staticmethod
def map_references(ref_biases: Dict) -> Dict[str, List]:
"""
Returns backend-specific reference.
"""
return ref_biases
@staticmethod
def get_quantization_algorithm():
return PostTrainingQuantization(
subset_size=1,
fast_bias_correction=False,
advanced_parameters=AdvancedQuantizationParameters(overflow_fix=OverflowFix.DISABLE),
)
@pytest.mark.parametrize(
"model_cls, ref_biases",
(
(
MultipleConvTestModel,
{
"/conv_1/Conv": [0.6658976, -0.70563036],
"/conv_2/Conv": [-0.307696, -0.42806846, 0.44965455],
"/conv_3/Conv": [-0.0033792169, 1.0661412],
"/conv_4/Conv": [-0.6941606, 0.9958957, 0.6081058],
# Disabled latest layer due to backends differences
# "/conv_5/Conv": [0.07476559, -0.75797373],
},
),
(ConvTestModel, {"/conv/Conv": [0.11085186, 1.0017344]}),
),
)
def test_update_bias(self, model_cls, ref_biases, tmpdir):
model = self.backend_specific_model(model_cls(), tmpdir)
dataset = Dataset(self.get_dataset(model_cls.INPUT_SIZE), self.get_transform_fn())
quantization_algorithm = self.get_quantization_algorithm()
quantized_model = quantization_algorithm.apply(model, dataset=dataset)
mapped_ref_biases = self.map_references(ref_biases)
self.check_bias(quantized_model, mapped_ref_biases)
|
flexible
|
{
"blob_id": "de88e2d2cf165b35f247ea89300c91b3c8c07fea",
"index": 7844,
"step-1": "<mask token>\n\n\nclass TemplateTestBCAlgorithm:\n\n @staticmethod\n @abstractmethod\n def list_to_backend_type(data: List) ->TTensor:\n \"\"\"\n Convert list to backend specific type\n\n :param data: List of data.\n\n :return: Converted data.\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def get_backend() ->BiasCorrectionAlgoBackend:\n \"\"\"\n Get backend specific BiasCorrectionAlgoBackend\n\n :return BiasCorrectionAlgoBackend: Backend specific BiasCorrectionAlgoBackend\n \"\"\"\n\n @staticmethod\n def fn_to_type(tensor):\n return tensor\n\n @staticmethod\n @abstractmethod\n def get_transform_fn():\n \"\"\"\n Get transformation function for dataset.\n \"\"\"\n\n def get_dataset(self, input_size: Tuple):\n \"\"\"\n Return backend specific random dataset.\n\n :param model: The model for which the dataset is being created.\n \"\"\"\n return StaticDatasetMock(input_size, self.fn_to_type)\n <mask token>\n\n @staticmethod\n @abstractmethod\n def check_bias(model: TModel, ref_biases: Dict):\n \"\"\"\n Checks biases values.\n \"\"\"\n\n @staticmethod\n def map_references(ref_biases: Dict) ->Dict[str, List]:\n \"\"\"\n Returns backend-specific reference.\n \"\"\"\n return ref_biases\n\n @staticmethod\n def get_quantization_algorithm():\n return PostTrainingQuantization(subset_size=1, fast_bias_correction\n =False, advanced_parameters=AdvancedQuantizationParameters(\n overflow_fix=OverflowFix.DISABLE))\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TemplateTestBCAlgorithm:\n\n @staticmethod\n @abstractmethod\n def list_to_backend_type(data: List) ->TTensor:\n \"\"\"\n Convert list to backend specific type\n\n :param data: List of data.\n\n :return: Converted data.\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def get_backend() ->BiasCorrectionAlgoBackend:\n \"\"\"\n Get backend specific BiasCorrectionAlgoBackend\n\n :return BiasCorrectionAlgoBackend: Backend specific BiasCorrectionAlgoBackend\n \"\"\"\n\n @staticmethod\n def fn_to_type(tensor):\n return tensor\n\n @staticmethod\n @abstractmethod\n def get_transform_fn():\n \"\"\"\n Get transformation function for dataset.\n \"\"\"\n\n def get_dataset(self, input_size: Tuple):\n \"\"\"\n Return backend specific random dataset.\n\n :param model: The model for which the dataset is being created.\n \"\"\"\n return StaticDatasetMock(input_size, self.fn_to_type)\n\n @staticmethod\n @abstractmethod\n def backend_specific_model(model: TModel, tmp_dir: str):\n \"\"\"\n Return backend specific model.\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def check_bias(model: TModel, ref_biases: Dict):\n \"\"\"\n Checks biases values.\n \"\"\"\n\n @staticmethod\n def map_references(ref_biases: Dict) ->Dict[str, List]:\n \"\"\"\n Returns backend-specific reference.\n \"\"\"\n return ref_biases\n\n @staticmethod\n def get_quantization_algorithm():\n return PostTrainingQuantization(subset_size=1, fast_bias_correction\n =False, advanced_parameters=AdvancedQuantizationParameters(\n overflow_fix=OverflowFix.DISABLE))\n\n @pytest.mark.parametrize('model_cls, ref_biases', ((\n MultipleConvTestModel, {'/conv_1/Conv': [0.6658976, -0.70563036],\n '/conv_2/Conv': [-0.307696, -0.42806846, 0.44965455],\n '/conv_3/Conv': [-0.0033792169, 1.0661412], '/conv_4/Conv': [-\n 0.6941606, 0.9958957, 0.6081058]}), (ConvTestModel, {'/conv/Conv':\n [0.11085186, 1.0017344]})))\n def test_update_bias(self, model_cls, ref_biases, tmpdir):\n model = self.backend_specific_model(model_cls(), tmpdir)\n dataset = Dataset(self.get_dataset(model_cls.INPUT_SIZE), self.\n get_transform_fn())\n quantization_algorithm = self.get_quantization_algorithm()\n quantized_model = quantization_algorithm.apply(model, dataset=dataset)\n mapped_ref_biases = self.map_references(ref_biases)\n self.check_bias(quantized_model, mapped_ref_biases)\n",
"step-3": "<mask token>\nTModel = TypeVar('TModel')\nTTensor = TypeVar('TTensor')\n\n\nclass TemplateTestBCAlgorithm:\n\n @staticmethod\n @abstractmethod\n def list_to_backend_type(data: List) ->TTensor:\n \"\"\"\n Convert list to backend specific type\n\n :param data: List of data.\n\n :return: Converted data.\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def get_backend() ->BiasCorrectionAlgoBackend:\n \"\"\"\n Get backend specific BiasCorrectionAlgoBackend\n\n :return BiasCorrectionAlgoBackend: Backend specific BiasCorrectionAlgoBackend\n \"\"\"\n\n @staticmethod\n def fn_to_type(tensor):\n return tensor\n\n @staticmethod\n @abstractmethod\n def get_transform_fn():\n \"\"\"\n Get transformation function for dataset.\n \"\"\"\n\n def get_dataset(self, input_size: Tuple):\n \"\"\"\n Return backend specific random dataset.\n\n :param model: The model for which the dataset is being created.\n \"\"\"\n return StaticDatasetMock(input_size, self.fn_to_type)\n\n @staticmethod\n @abstractmethod\n def backend_specific_model(model: TModel, tmp_dir: str):\n \"\"\"\n Return backend specific model.\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def check_bias(model: TModel, ref_biases: Dict):\n \"\"\"\n Checks biases values.\n \"\"\"\n\n @staticmethod\n def map_references(ref_biases: Dict) ->Dict[str, List]:\n \"\"\"\n Returns backend-specific reference.\n \"\"\"\n return ref_biases\n\n @staticmethod\n def get_quantization_algorithm():\n return PostTrainingQuantization(subset_size=1, fast_bias_correction\n =False, advanced_parameters=AdvancedQuantizationParameters(\n overflow_fix=OverflowFix.DISABLE))\n\n @pytest.mark.parametrize('model_cls, ref_biases', ((\n MultipleConvTestModel, {'/conv_1/Conv': [0.6658976, -0.70563036],\n '/conv_2/Conv': [-0.307696, -0.42806846, 0.44965455],\n '/conv_3/Conv': [-0.0033792169, 1.0661412], '/conv_4/Conv': [-\n 0.6941606, 0.9958957, 0.6081058]}), (ConvTestModel, {'/conv/Conv':\n [0.11085186, 1.0017344]})))\n def test_update_bias(self, model_cls, ref_biases, tmpdir):\n model = self.backend_specific_model(model_cls(), tmpdir)\n dataset = Dataset(self.get_dataset(model_cls.INPUT_SIZE), self.\n get_transform_fn())\n quantization_algorithm = self.get_quantization_algorithm()\n quantized_model = quantization_algorithm.apply(model, dataset=dataset)\n mapped_ref_biases = self.map_references(ref_biases)\n self.check_bias(quantized_model, mapped_ref_biases)\n",
"step-4": "from abc import abstractmethod\nfrom typing import Dict, List, Tuple, TypeVar\nimport pytest\nfrom nncf.data import Dataset\nfrom nncf.quantization.advanced_parameters import AdvancedQuantizationParameters\nfrom nncf.quantization.advanced_parameters import OverflowFix\nfrom nncf.quantization.algorithms.bias_correction.backend import BiasCorrectionAlgoBackend\nfrom nncf.quantization.algorithms.post_training.algorithm import PostTrainingQuantization\nfrom tests.post_training.test_templates.helpers import ConvTestModel\nfrom tests.post_training.test_templates.helpers import MultipleConvTestModel\nfrom tests.post_training.test_templates.helpers import StaticDatasetMock\nTModel = TypeVar('TModel')\nTTensor = TypeVar('TTensor')\n\n\nclass TemplateTestBCAlgorithm:\n\n @staticmethod\n @abstractmethod\n def list_to_backend_type(data: List) ->TTensor:\n \"\"\"\n Convert list to backend specific type\n\n :param data: List of data.\n\n :return: Converted data.\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def get_backend() ->BiasCorrectionAlgoBackend:\n \"\"\"\n Get backend specific BiasCorrectionAlgoBackend\n\n :return BiasCorrectionAlgoBackend: Backend specific BiasCorrectionAlgoBackend\n \"\"\"\n\n @staticmethod\n def fn_to_type(tensor):\n return tensor\n\n @staticmethod\n @abstractmethod\n def get_transform_fn():\n \"\"\"\n Get transformation function for dataset.\n \"\"\"\n\n def get_dataset(self, input_size: Tuple):\n \"\"\"\n Return backend specific random dataset.\n\n :param model: The model for which the dataset is being created.\n \"\"\"\n return StaticDatasetMock(input_size, self.fn_to_type)\n\n @staticmethod\n @abstractmethod\n def backend_specific_model(model: TModel, tmp_dir: str):\n \"\"\"\n Return backend specific model.\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def check_bias(model: TModel, ref_biases: Dict):\n \"\"\"\n Checks biases values.\n \"\"\"\n\n @staticmethod\n def map_references(ref_biases: Dict) ->Dict[str, List]:\n \"\"\"\n Returns backend-specific reference.\n \"\"\"\n return ref_biases\n\n @staticmethod\n def get_quantization_algorithm():\n return PostTrainingQuantization(subset_size=1, fast_bias_correction\n =False, advanced_parameters=AdvancedQuantizationParameters(\n overflow_fix=OverflowFix.DISABLE))\n\n @pytest.mark.parametrize('model_cls, ref_biases', ((\n MultipleConvTestModel, {'/conv_1/Conv': [0.6658976, -0.70563036],\n '/conv_2/Conv': [-0.307696, -0.42806846, 0.44965455],\n '/conv_3/Conv': [-0.0033792169, 1.0661412], '/conv_4/Conv': [-\n 0.6941606, 0.9958957, 0.6081058]}), (ConvTestModel, {'/conv/Conv':\n [0.11085186, 1.0017344]})))\n def test_update_bias(self, model_cls, ref_biases, tmpdir):\n model = self.backend_specific_model(model_cls(), tmpdir)\n dataset = Dataset(self.get_dataset(model_cls.INPUT_SIZE), self.\n get_transform_fn())\n quantization_algorithm = self.get_quantization_algorithm()\n quantized_model = quantization_algorithm.apply(model, dataset=dataset)\n mapped_ref_biases = self.map_references(ref_biases)\n self.check_bias(quantized_model, mapped_ref_biases)\n",
"step-5": "# Copyright (c) 2023 Intel Corporation\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import abstractmethod\nfrom typing import Dict, List, Tuple, TypeVar\n\nimport pytest\n\nfrom nncf.data import Dataset\nfrom nncf.quantization.advanced_parameters import AdvancedQuantizationParameters\nfrom nncf.quantization.advanced_parameters import OverflowFix\nfrom nncf.quantization.algorithms.bias_correction.backend import BiasCorrectionAlgoBackend\nfrom nncf.quantization.algorithms.post_training.algorithm import PostTrainingQuantization\nfrom tests.post_training.test_templates.helpers import ConvTestModel\nfrom tests.post_training.test_templates.helpers import MultipleConvTestModel\nfrom tests.post_training.test_templates.helpers import StaticDatasetMock\n\nTModel = TypeVar(\"TModel\")\nTTensor = TypeVar(\"TTensor\")\n\n\nclass TemplateTestBCAlgorithm:\n @staticmethod\n @abstractmethod\n def list_to_backend_type(data: List) -> TTensor:\n \"\"\"\n Convert list to backend specific type\n\n :param data: List of data.\n\n :return: Converted data.\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def get_backend() -> BiasCorrectionAlgoBackend:\n \"\"\"\n Get backend specific BiasCorrectionAlgoBackend\n\n :return BiasCorrectionAlgoBackend: Backend specific BiasCorrectionAlgoBackend\n \"\"\"\n\n @staticmethod\n def fn_to_type(tensor):\n return tensor\n\n @staticmethod\n @abstractmethod\n def get_transform_fn():\n \"\"\"\n Get transformation function for dataset.\n \"\"\"\n\n def get_dataset(self, input_size: Tuple):\n \"\"\"\n Return backend specific random dataset.\n\n :param model: The model for which the dataset is being created.\n \"\"\"\n return StaticDatasetMock(input_size, self.fn_to_type)\n\n @staticmethod\n @abstractmethod\n def backend_specific_model(model: TModel, tmp_dir: str):\n \"\"\"\n Return backend specific model.\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def check_bias(model: TModel, ref_biases: Dict):\n \"\"\"\n Checks biases values.\n \"\"\"\n\n @staticmethod\n def map_references(ref_biases: Dict) -> Dict[str, List]:\n \"\"\"\n Returns backend-specific reference.\n \"\"\"\n return ref_biases\n\n @staticmethod\n def get_quantization_algorithm():\n return PostTrainingQuantization(\n subset_size=1,\n fast_bias_correction=False,\n advanced_parameters=AdvancedQuantizationParameters(overflow_fix=OverflowFix.DISABLE),\n )\n\n @pytest.mark.parametrize(\n \"model_cls, ref_biases\",\n (\n (\n MultipleConvTestModel,\n {\n \"/conv_1/Conv\": [0.6658976, -0.70563036],\n \"/conv_2/Conv\": [-0.307696, -0.42806846, 0.44965455],\n \"/conv_3/Conv\": [-0.0033792169, 1.0661412],\n \"/conv_4/Conv\": [-0.6941606, 0.9958957, 0.6081058],\n # Disabled latest layer due to backends differences\n # \"/conv_5/Conv\": [0.07476559, -0.75797373],\n },\n ),\n (ConvTestModel, {\"/conv/Conv\": [0.11085186, 1.0017344]}),\n ),\n )\n def test_update_bias(self, model_cls, ref_biases, tmpdir):\n model = self.backend_specific_model(model_cls(), tmpdir)\n dataset = Dataset(self.get_dataset(model_cls.INPUT_SIZE), self.get_transform_fn())\n\n quantization_algorithm = self.get_quantization_algorithm()\n quantized_model = quantization_algorithm.apply(model, dataset=dataset)\n\n mapped_ref_biases = self.map_references(ref_biases)\n self.check_bias(quantized_model, mapped_ref_biases)\n",
"step-ids": [
9,
11,
12,
13,
14
]
}
|
[
9,
11,
12,
13,
14
] |
from __future__ import with_statement
from fabric.api import *
from fabric.colors import *
from fabric.utils import puts
from fabric.context_managers import shell_env
env.hosts = ['[email protected]']
def deploy():
"deploys the project to the server"
with prefix('source /srv/django-envs/tweetset/bin/activate'):
with shell_env(DJANGO_SETTINGS_MODULE='tweetset.settings.production'):
with cd('/srv/django-projects/tweetset'):
puts(magenta("[Pulling changes]"))
run('git pull origin master')
puts(magenta("[Installing packages]"))
run('pip install -r requirements.txt')
with cd('/srv/django-projects/tweetset/tweetset'):
puts(magenta("[Migrating apps]"))
run('python manage.py migrate --no-initial-data')
puts(magenta("[Collecting static files]"))
run('python manage.py collectstatic --noinput')
puts(magenta("[Touching wsgi.py]"))
run('touch /srv/django-projects/tweetset/tweetset/tweetset/wsgi.py')
|
normal
|
{
"blob_id": "6111c9730c556ab3ab95f7685ffa135a2bbeb2ca",
"index": 5950,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef deploy():\n \"\"\"deploys the project to the server\"\"\"\n with prefix('source /srv/django-envs/tweetset/bin/activate'):\n with shell_env(DJANGO_SETTINGS_MODULE='tweetset.settings.production'):\n with cd('/srv/django-projects/tweetset'):\n puts(magenta('[Pulling changes]'))\n run('git pull origin master')\n puts(magenta('[Installing packages]'))\n run('pip install -r requirements.txt')\n with cd('/srv/django-projects/tweetset/tweetset'):\n puts(magenta('[Migrating apps]'))\n run('python manage.py migrate --no-initial-data')\n puts(magenta('[Collecting static files]'))\n run('python manage.py collectstatic --noinput')\n puts(magenta('[Touching wsgi.py]'))\n run('touch /srv/django-projects/tweetset/tweetset/tweetset/wsgi.py'\n )\n",
"step-3": "<mask token>\nenv.hosts = ['[email protected]']\n\n\ndef deploy():\n \"\"\"deploys the project to the server\"\"\"\n with prefix('source /srv/django-envs/tweetset/bin/activate'):\n with shell_env(DJANGO_SETTINGS_MODULE='tweetset.settings.production'):\n with cd('/srv/django-projects/tweetset'):\n puts(magenta('[Pulling changes]'))\n run('git pull origin master')\n puts(magenta('[Installing packages]'))\n run('pip install -r requirements.txt')\n with cd('/srv/django-projects/tweetset/tweetset'):\n puts(magenta('[Migrating apps]'))\n run('python manage.py migrate --no-initial-data')\n puts(magenta('[Collecting static files]'))\n run('python manage.py collectstatic --noinput')\n puts(magenta('[Touching wsgi.py]'))\n run('touch /srv/django-projects/tweetset/tweetset/tweetset/wsgi.py'\n )\n",
"step-4": "from __future__ import with_statement\nfrom fabric.api import *\nfrom fabric.colors import *\nfrom fabric.utils import puts\nfrom fabric.context_managers import shell_env\nenv.hosts = ['[email protected]']\n\n\ndef deploy():\n \"\"\"deploys the project to the server\"\"\"\n with prefix('source /srv/django-envs/tweetset/bin/activate'):\n with shell_env(DJANGO_SETTINGS_MODULE='tweetset.settings.production'):\n with cd('/srv/django-projects/tweetset'):\n puts(magenta('[Pulling changes]'))\n run('git pull origin master')\n puts(magenta('[Installing packages]'))\n run('pip install -r requirements.txt')\n with cd('/srv/django-projects/tweetset/tweetset'):\n puts(magenta('[Migrating apps]'))\n run('python manage.py migrate --no-initial-data')\n puts(magenta('[Collecting static files]'))\n run('python manage.py collectstatic --noinput')\n puts(magenta('[Touching wsgi.py]'))\n run('touch /srv/django-projects/tweetset/tweetset/tweetset/wsgi.py'\n )\n",
"step-5": "from __future__ import with_statement\nfrom fabric.api import *\nfrom fabric.colors import *\nfrom fabric.utils import puts\nfrom fabric.context_managers import shell_env\n\nenv.hosts = ['[email protected]']\n\ndef deploy():\n \"deploys the project to the server\"\n with prefix('source /srv/django-envs/tweetset/bin/activate'):\n with shell_env(DJANGO_SETTINGS_MODULE='tweetset.settings.production'):\n with cd('/srv/django-projects/tweetset'):\n puts(magenta(\"[Pulling changes]\"))\n run('git pull origin master')\n\n puts(magenta(\"[Installing packages]\"))\n run('pip install -r requirements.txt')\n\n with cd('/srv/django-projects/tweetset/tweetset'):\n puts(magenta(\"[Migrating apps]\"))\n run('python manage.py migrate --no-initial-data')\n\n puts(magenta(\"[Collecting static files]\"))\n run('python manage.py collectstatic --noinput')\n\n puts(magenta(\"[Touching wsgi.py]\"))\n run('touch /srv/django-projects/tweetset/tweetset/tweetset/wsgi.py')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def function_addrs(r2):
"""
Yield a list of all the function's start addresses.
"""
for addr in r2.cmdj('aflqj'):
yield int(addr, 16)
<|reserved_special_token_0|>
def basic_block_coverage(r2, translation_blocks):
"""
Calculate the basic block coverage based on the covered TBs.
Returns a set of *covered* basic block start addresses
"""
covered_bbs = set()
for func_addr in function_addrs(r2):
graph = r2.cmdj('agj 0x%x' % func_addr)
assert len(graph) == 1
graph = graph[0]
for tb_start_addr, tb_end_addr in translation_blocks:
for bb in graph['blocks']:
bb_start_addr = bb['offset']
bb_end_addr = bb_start_addr + bb['size']
if (bb_end_addr >= tb_start_addr >= bb_start_addr or
bb_start_addr <= tb_end_addr <= bb_end_addr):
covered_bbs.add(bb_start_addr)
return covered_bbs
def render_functions(r2, covered_bbs, output_dir):
"""
Renders SVG graphs of each of the functions in the program. Basic blocks
that were executed by S2E are coloured green.
The resulting SVG images are written to `output_dir`.
"""
for func_addr in function_addrs(r2):
func_name = r2.cmdj('agj 0x%x' % func_addr)[0]['name']
dot_str = r2.cmd('ag 0x%x' % func_addr)
dot = pydot.graph_from_dot_data(dot_str)
if not dot:
continue
else:
dot = dot[0]
for node in dot.get_nodes():
node_name = node.get_name()
try:
if node_name.startswith('"'):
node_name = node_name[1:-1]
node_addr = int(node_name, 16)
except ValueError:
continue
if node_addr in covered_bbs:
node.set_fillcolor('darkolivegreen2')
svg_path = os.path.join(output_dir, '%s_0x%x.svg' % (func_name,
func_addr))
with open(svg_path, 'wb') as f:
svg = dot.create_svg()
f.write(svg)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def function_addrs(r2):
"""
Yield a list of all the function's start addresses.
"""
for addr in r2.cmdj('aflqj'):
yield int(addr, 16)
<|reserved_special_token_0|>
def basic_block_coverage(r2, translation_blocks):
"""
Calculate the basic block coverage based on the covered TBs.
Returns a set of *covered* basic block start addresses
"""
covered_bbs = set()
for func_addr in function_addrs(r2):
graph = r2.cmdj('agj 0x%x' % func_addr)
assert len(graph) == 1
graph = graph[0]
for tb_start_addr, tb_end_addr in translation_blocks:
for bb in graph['blocks']:
bb_start_addr = bb['offset']
bb_end_addr = bb_start_addr + bb['size']
if (bb_end_addr >= tb_start_addr >= bb_start_addr or
bb_start_addr <= tb_end_addr <= bb_end_addr):
covered_bbs.add(bb_start_addr)
return covered_bbs
def render_functions(r2, covered_bbs, output_dir):
"""
Renders SVG graphs of each of the functions in the program. Basic blocks
that were executed by S2E are coloured green.
The resulting SVG images are written to `output_dir`.
"""
for func_addr in function_addrs(r2):
func_name = r2.cmdj('agj 0x%x' % func_addr)[0]['name']
dot_str = r2.cmd('ag 0x%x' % func_addr)
dot = pydot.graph_from_dot_data(dot_str)
if not dot:
continue
else:
dot = dot[0]
for node in dot.get_nodes():
node_name = node.get_name()
try:
if node_name.startswith('"'):
node_name = node_name[1:-1]
node_addr = int(node_name, 16)
except ValueError:
continue
if node_addr in covered_bbs:
node.set_fillcolor('darkolivegreen2')
svg_path = os.path.join(output_dir, '%s_0x%x.svg' % (func_name,
func_addr))
with open(svg_path, 'wb') as f:
svg = dot.create_svg()
f.write(svg)
def generate_graph(s2e_output_dir, s2e_num, project_name):
"""
Generate the PNG graph for the analysis in the output_dir
"""
s2e_env_path = S2E_settings.S2E_ENVIRONMENT_FOLDER_PATH
output_dir = os.path.join(s2e_output_dir, 'functions')
os.makedirs(output_dir)
if not os.path.isfile(os.path.join(s2e_env_path, 's2e.yaml')):
print('ERROR: %s is not an S2E environment' % s2e_env_path)
return
project_path = os.path.join(s2e_env_path, 'projects', project_name)
if not os.path.isdir(project_path):
print('ERROR: %s is not a valid project' % project_name)
return
if not os.path.isdir(output_dir):
print('ERROR: %s is not a valid output directory' % output_dir)
return
s2e_last_path = os.path.join(project_path, 's2e-last')
if not os.path.isdir(s2e_last_path):
print('ERROR: %s has no s2e-last' % project_name)
return
tb_coverage_files = glob.glob(os.path.join(s2e_last_path, '*',
'tbcoverage-*.json')) + glob.glob(os.path.join(s2e_last_path,
'tbcoverage-*.json'))
if not tb_coverage_files:
print(
'ERROR: No translation block coverage files found in s2e-last. Did you enable the ``TranslationBlockCoverage`` plugin in s2e-config.lua?'
)
return
covered_tbs = set()
for tb_coverage_file in tb_coverage_files:
tb_coverage_data = parse_tb_file(tb_coverage_file, project_name)
if not tb_coverage_data:
continue
covered_tbs.update((start, end) for start, end, _ in tb_coverage_data)
r2 = r2pipe.open(os.path.join(project_path, project_name))
r2.cmd('aaa')
covered_bbs = basic_block_coverage(r2, covered_tbs)
render_functions(r2, covered_bbs, output_dir)
base_path = os.path.join(project_name, 's2e-out-%d' % s2e_num, 'functions')
return [[file_[0:-4], os.path.join(base_path, file_)] for file_ in os.
listdir(output_dir)]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def function_addrs(r2):
"""
Yield a list of all the function's start addresses.
"""
for addr in r2.cmdj('aflqj'):
yield int(addr, 16)
def parse_tb_file(path, module):
"""
Parse a translation block coverage file generated by S2E's
``TranslationBlockCoverage`` plugin.
"""
with open(path, 'r') as f:
try:
tb_coverage_data = json.load(f)
except Exception:
print('WARN: Failed to parse translation block JSON file %s' % path
)
return None
if not tb_coverage_data:
print('WARN: Translation block JSON file %s is empty' % path)
return None
if module not in tb_coverage_data:
print('WARN: Target %s not found in translation block JSON file %s' %
(module, path))
return None
return tb_coverage_data[module]
def basic_block_coverage(r2, translation_blocks):
"""
Calculate the basic block coverage based on the covered TBs.
Returns a set of *covered* basic block start addresses
"""
covered_bbs = set()
for func_addr in function_addrs(r2):
graph = r2.cmdj('agj 0x%x' % func_addr)
assert len(graph) == 1
graph = graph[0]
for tb_start_addr, tb_end_addr in translation_blocks:
for bb in graph['blocks']:
bb_start_addr = bb['offset']
bb_end_addr = bb_start_addr + bb['size']
if (bb_end_addr >= tb_start_addr >= bb_start_addr or
bb_start_addr <= tb_end_addr <= bb_end_addr):
covered_bbs.add(bb_start_addr)
return covered_bbs
def render_functions(r2, covered_bbs, output_dir):
"""
Renders SVG graphs of each of the functions in the program. Basic blocks
that were executed by S2E are coloured green.
The resulting SVG images are written to `output_dir`.
"""
for func_addr in function_addrs(r2):
func_name = r2.cmdj('agj 0x%x' % func_addr)[0]['name']
dot_str = r2.cmd('ag 0x%x' % func_addr)
dot = pydot.graph_from_dot_data(dot_str)
if not dot:
continue
else:
dot = dot[0]
for node in dot.get_nodes():
node_name = node.get_name()
try:
if node_name.startswith('"'):
node_name = node_name[1:-1]
node_addr = int(node_name, 16)
except ValueError:
continue
if node_addr in covered_bbs:
node.set_fillcolor('darkolivegreen2')
svg_path = os.path.join(output_dir, '%s_0x%x.svg' % (func_name,
func_addr))
with open(svg_path, 'wb') as f:
svg = dot.create_svg()
f.write(svg)
def generate_graph(s2e_output_dir, s2e_num, project_name):
"""
Generate the PNG graph for the analysis in the output_dir
"""
s2e_env_path = S2E_settings.S2E_ENVIRONMENT_FOLDER_PATH
output_dir = os.path.join(s2e_output_dir, 'functions')
os.makedirs(output_dir)
if not os.path.isfile(os.path.join(s2e_env_path, 's2e.yaml')):
print('ERROR: %s is not an S2E environment' % s2e_env_path)
return
project_path = os.path.join(s2e_env_path, 'projects', project_name)
if not os.path.isdir(project_path):
print('ERROR: %s is not a valid project' % project_name)
return
if not os.path.isdir(output_dir):
print('ERROR: %s is not a valid output directory' % output_dir)
return
s2e_last_path = os.path.join(project_path, 's2e-last')
if not os.path.isdir(s2e_last_path):
print('ERROR: %s has no s2e-last' % project_name)
return
tb_coverage_files = glob.glob(os.path.join(s2e_last_path, '*',
'tbcoverage-*.json')) + glob.glob(os.path.join(s2e_last_path,
'tbcoverage-*.json'))
if not tb_coverage_files:
print(
'ERROR: No translation block coverage files found in s2e-last. Did you enable the ``TranslationBlockCoverage`` plugin in s2e-config.lua?'
)
return
covered_tbs = set()
for tb_coverage_file in tb_coverage_files:
tb_coverage_data = parse_tb_file(tb_coverage_file, project_name)
if not tb_coverage_data:
continue
covered_tbs.update((start, end) for start, end, _ in tb_coverage_data)
r2 = r2pipe.open(os.path.join(project_path, project_name))
r2.cmd('aaa')
covered_bbs = basic_block_coverage(r2, covered_tbs)
render_functions(r2, covered_bbs, output_dir)
base_path = os.path.join(project_name, 's2e-out-%d' % s2e_num, 'functions')
return [[file_[0:-4], os.path.join(base_path, file_)] for file_ in os.
listdir(output_dir)]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import print_function
import glob
import json
import os
import pydot
import r2pipe
import s2e_web.S2E_settings as S2E_settings
def function_addrs(r2):
"""
Yield a list of all the function's start addresses.
"""
for addr in r2.cmdj('aflqj'):
yield int(addr, 16)
def parse_tb_file(path, module):
"""
Parse a translation block coverage file generated by S2E's
``TranslationBlockCoverage`` plugin.
"""
with open(path, 'r') as f:
try:
tb_coverage_data = json.load(f)
except Exception:
print('WARN: Failed to parse translation block JSON file %s' % path
)
return None
if not tb_coverage_data:
print('WARN: Translation block JSON file %s is empty' % path)
return None
if module not in tb_coverage_data:
print('WARN: Target %s not found in translation block JSON file %s' %
(module, path))
return None
return tb_coverage_data[module]
def basic_block_coverage(r2, translation_blocks):
"""
Calculate the basic block coverage based on the covered TBs.
Returns a set of *covered* basic block start addresses
"""
covered_bbs = set()
for func_addr in function_addrs(r2):
graph = r2.cmdj('agj 0x%x' % func_addr)
assert len(graph) == 1
graph = graph[0]
for tb_start_addr, tb_end_addr in translation_blocks:
for bb in graph['blocks']:
bb_start_addr = bb['offset']
bb_end_addr = bb_start_addr + bb['size']
if (bb_end_addr >= tb_start_addr >= bb_start_addr or
bb_start_addr <= tb_end_addr <= bb_end_addr):
covered_bbs.add(bb_start_addr)
return covered_bbs
def render_functions(r2, covered_bbs, output_dir):
"""
Renders SVG graphs of each of the functions in the program. Basic blocks
that were executed by S2E are coloured green.
The resulting SVG images are written to `output_dir`.
"""
for func_addr in function_addrs(r2):
func_name = r2.cmdj('agj 0x%x' % func_addr)[0]['name']
dot_str = r2.cmd('ag 0x%x' % func_addr)
dot = pydot.graph_from_dot_data(dot_str)
if not dot:
continue
else:
dot = dot[0]
for node in dot.get_nodes():
node_name = node.get_name()
try:
if node_name.startswith('"'):
node_name = node_name[1:-1]
node_addr = int(node_name, 16)
except ValueError:
continue
if node_addr in covered_bbs:
node.set_fillcolor('darkolivegreen2')
svg_path = os.path.join(output_dir, '%s_0x%x.svg' % (func_name,
func_addr))
with open(svg_path, 'wb') as f:
svg = dot.create_svg()
f.write(svg)
def generate_graph(s2e_output_dir, s2e_num, project_name):
"""
Generate the PNG graph for the analysis in the output_dir
"""
s2e_env_path = S2E_settings.S2E_ENVIRONMENT_FOLDER_PATH
output_dir = os.path.join(s2e_output_dir, 'functions')
os.makedirs(output_dir)
if not os.path.isfile(os.path.join(s2e_env_path, 's2e.yaml')):
print('ERROR: %s is not an S2E environment' % s2e_env_path)
return
project_path = os.path.join(s2e_env_path, 'projects', project_name)
if not os.path.isdir(project_path):
print('ERROR: %s is not a valid project' % project_name)
return
if not os.path.isdir(output_dir):
print('ERROR: %s is not a valid output directory' % output_dir)
return
s2e_last_path = os.path.join(project_path, 's2e-last')
if not os.path.isdir(s2e_last_path):
print('ERROR: %s has no s2e-last' % project_name)
return
tb_coverage_files = glob.glob(os.path.join(s2e_last_path, '*',
'tbcoverage-*.json')) + glob.glob(os.path.join(s2e_last_path,
'tbcoverage-*.json'))
if not tb_coverage_files:
print(
'ERROR: No translation block coverage files found in s2e-last. Did you enable the ``TranslationBlockCoverage`` plugin in s2e-config.lua?'
)
return
covered_tbs = set()
for tb_coverage_file in tb_coverage_files:
tb_coverage_data = parse_tb_file(tb_coverage_file, project_name)
if not tb_coverage_data:
continue
covered_tbs.update((start, end) for start, end, _ in tb_coverage_data)
r2 = r2pipe.open(os.path.join(project_path, project_name))
r2.cmd('aaa')
covered_bbs = basic_block_coverage(r2, covered_tbs)
render_functions(r2, covered_bbs, output_dir)
base_path = os.path.join(project_name, 's2e-out-%d' % s2e_num, 'functions')
return [[file_[0:-4], os.path.join(base_path, file_)] for file_ in os.
listdir(output_dir)]
<|reserved_special_token_1|>
"""
Copyright (C) Adrian Herrera, 2017
You will need to install r2pipe and pydot:
```
pip install r2pipe pydot
```
"""
from __future__ import print_function
import glob
import json
import os
import pydot
import r2pipe
import s2e_web.S2E_settings as S2E_settings
def function_addrs(r2):
"""
Yield a list of all the function's start addresses.
"""
for addr in r2.cmdj('aflqj'):
yield int(addr, 16)
def parse_tb_file(path, module):
"""
Parse a translation block coverage file generated by S2E's
``TranslationBlockCoverage`` plugin.
"""
with open(path, 'r') as f:
try:
tb_coverage_data = json.load(f)
except Exception:
print('WARN: Failed to parse translation block JSON file %s' % path)
return None
if not tb_coverage_data:
print('WARN: Translation block JSON file %s is empty' % path)
return None
if module not in tb_coverage_data:
print('WARN: Target %s not found in translation block JSON file %s' %
(module, path))
return None
return tb_coverage_data[module]
def basic_block_coverage(r2, translation_blocks):
"""
Calculate the basic block coverage based on the covered TBs.
Returns a set of *covered* basic block start addresses
"""
covered_bbs = set()
for func_addr in function_addrs(r2):
graph = r2.cmdj('agj 0x%x' % func_addr)
assert len(graph) == 1
graph = graph[0]
for tb_start_addr, tb_end_addr in translation_blocks:
for bb in graph['blocks']:
bb_start_addr = bb['offset']
bb_end_addr = bb_start_addr + bb['size']
# Check if the translation block falls within a basic block OR
# a basic block falls within a translation block
if (bb_end_addr >= tb_start_addr >= bb_start_addr or
bb_start_addr <= tb_end_addr <= bb_end_addr):
covered_bbs.add(bb_start_addr)
return covered_bbs
def render_functions(r2, covered_bbs, output_dir):
"""
Renders SVG graphs of each of the functions in the program. Basic blocks
that were executed by S2E are coloured green.
The resulting SVG images are written to `output_dir`.
"""
for func_addr in function_addrs(r2):
# Get the function name
func_name = r2.cmdj('agj 0x%x' % func_addr)[0]['name']
dot_str = r2.cmd('ag 0x%x' % func_addr)
dot = pydot.graph_from_dot_data(dot_str)
if not dot:
continue
else:
dot = dot[0]
for node in dot.get_nodes():
node_name = node.get_name()
try:
# XXX This is very hacky - need something more robust
if node_name.startswith('"'):
node_name = node_name[1:-1]
node_addr = int(node_name, 16)
except ValueError:
# Node name is not a hex string
continue
if node_addr in covered_bbs:
node.set_fillcolor('darkolivegreen2')
svg_path = os.path.join(output_dir, '%s_0x%x.svg' % (func_name, func_addr))
with open(svg_path, 'wb') as f:
svg = dot.create_svg()
f.write(svg)
def generate_graph(s2e_output_dir, s2e_num, project_name):
"""
Generate the PNG graph for the analysis in the output_dir
"""
s2e_env_path = S2E_settings.S2E_ENVIRONMENT_FOLDER_PATH
output_dir = os.path.join(s2e_output_dir, 'functions')
os.makedirs(output_dir)
# Check that the given S2E environment is legitimate
if not os.path.isfile(os.path.join(s2e_env_path, 's2e.yaml')):
print('ERROR: %s is not an S2E environment' % s2e_env_path)
return
# Check that the given project exists in the environment
project_path = os.path.join(s2e_env_path, 'projects', project_name)
if not os.path.isdir(project_path):
print('ERROR: %s is not a valid project' % project_name)
return
# Check that the output directory exists
if not os.path.isdir(output_dir):
print('ERROR: %s is not a valid output directory' % output_dir)
return
# Check that the project has been executed at least once
s2e_last_path = os.path.join(project_path, 's2e-last')
if not os.path.isdir(s2e_last_path):
print('ERROR: %s has no s2e-last' % project_name)
return
# Get all the TB coverage files
tb_coverage_files = glob.glob(os.path.join(s2e_last_path, '*', 'tbcoverage-*.json')) + \
glob.glob(os.path.join(s2e_last_path, 'tbcoverage-*.json'))
if not tb_coverage_files:
print('ERROR: No translation block coverage files found in s2e-last. '
'Did you enable the ``TranslationBlockCoverage`` plugin in '
's2e-config.lua?')
return
# Parse the TB coverage files
covered_tbs = set()
for tb_coverage_file in tb_coverage_files:
# XXX A project can have a different name to the target program
tb_coverage_data = parse_tb_file(tb_coverage_file, project_name)
if not tb_coverage_data:
continue
covered_tbs.update((start, end) for start, end, _ in tb_coverage_data)
# Open the program in Radare and do the initial analysis
# XXX A project can have a different name to the target program
r2 = r2pipe.open(os.path.join(project_path, project_name))
r2.cmd('aaa')
# Calculate the basic block coverage and render the information as a set
# of PNG images for each function
covered_bbs = basic_block_coverage(r2, covered_tbs)
render_functions(r2, covered_bbs, output_dir)
base_path = os.path.join(project_name, 's2e-out-%d' % s2e_num, 'functions')
return [[file_[0:-4], os.path.join(base_path, file_)] for file_ in os.listdir(output_dir)]
|
flexible
|
{
"blob_id": "2aee4af2e5a5c3f59dde4d9dd46f8d124a32fb27",
"index": 2590,
"step-1": "<mask token>\n\n\ndef function_addrs(r2):\n \"\"\"\n Yield a list of all the function's start addresses.\n \"\"\"\n for addr in r2.cmdj('aflqj'):\n yield int(addr, 16)\n\n\n<mask token>\n\n\ndef basic_block_coverage(r2, translation_blocks):\n \"\"\"\n Calculate the basic block coverage based on the covered TBs.\n\n Returns a set of *covered* basic block start addresses\n \"\"\"\n covered_bbs = set()\n for func_addr in function_addrs(r2):\n graph = r2.cmdj('agj 0x%x' % func_addr)\n assert len(graph) == 1\n graph = graph[0]\n for tb_start_addr, tb_end_addr in translation_blocks:\n for bb in graph['blocks']:\n bb_start_addr = bb['offset']\n bb_end_addr = bb_start_addr + bb['size']\n if (bb_end_addr >= tb_start_addr >= bb_start_addr or \n bb_start_addr <= tb_end_addr <= bb_end_addr):\n covered_bbs.add(bb_start_addr)\n return covered_bbs\n\n\ndef render_functions(r2, covered_bbs, output_dir):\n \"\"\"\n Renders SVG graphs of each of the functions in the program. Basic blocks\n that were executed by S2E are coloured green.\n\n The resulting SVG images are written to `output_dir`.\n \"\"\"\n for func_addr in function_addrs(r2):\n func_name = r2.cmdj('agj 0x%x' % func_addr)[0]['name']\n dot_str = r2.cmd('ag 0x%x' % func_addr)\n dot = pydot.graph_from_dot_data(dot_str)\n if not dot:\n continue\n else:\n dot = dot[0]\n for node in dot.get_nodes():\n node_name = node.get_name()\n try:\n if node_name.startswith('\"'):\n node_name = node_name[1:-1]\n node_addr = int(node_name, 16)\n except ValueError:\n continue\n if node_addr in covered_bbs:\n node.set_fillcolor('darkolivegreen2')\n svg_path = os.path.join(output_dir, '%s_0x%x.svg' % (func_name,\n func_addr))\n with open(svg_path, 'wb') as f:\n svg = dot.create_svg()\n f.write(svg)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef function_addrs(r2):\n \"\"\"\n Yield a list of all the function's start addresses.\n \"\"\"\n for addr in r2.cmdj('aflqj'):\n yield int(addr, 16)\n\n\n<mask token>\n\n\ndef basic_block_coverage(r2, translation_blocks):\n \"\"\"\n Calculate the basic block coverage based on the covered TBs.\n\n Returns a set of *covered* basic block start addresses\n \"\"\"\n covered_bbs = set()\n for func_addr in function_addrs(r2):\n graph = r2.cmdj('agj 0x%x' % func_addr)\n assert len(graph) == 1\n graph = graph[0]\n for tb_start_addr, tb_end_addr in translation_blocks:\n for bb in graph['blocks']:\n bb_start_addr = bb['offset']\n bb_end_addr = bb_start_addr + bb['size']\n if (bb_end_addr >= tb_start_addr >= bb_start_addr or \n bb_start_addr <= tb_end_addr <= bb_end_addr):\n covered_bbs.add(bb_start_addr)\n return covered_bbs\n\n\ndef render_functions(r2, covered_bbs, output_dir):\n \"\"\"\n Renders SVG graphs of each of the functions in the program. Basic blocks\n that were executed by S2E are coloured green.\n\n The resulting SVG images are written to `output_dir`.\n \"\"\"\n for func_addr in function_addrs(r2):\n func_name = r2.cmdj('agj 0x%x' % func_addr)[0]['name']\n dot_str = r2.cmd('ag 0x%x' % func_addr)\n dot = pydot.graph_from_dot_data(dot_str)\n if not dot:\n continue\n else:\n dot = dot[0]\n for node in dot.get_nodes():\n node_name = node.get_name()\n try:\n if node_name.startswith('\"'):\n node_name = node_name[1:-1]\n node_addr = int(node_name, 16)\n except ValueError:\n continue\n if node_addr in covered_bbs:\n node.set_fillcolor('darkolivegreen2')\n svg_path = os.path.join(output_dir, '%s_0x%x.svg' % (func_name,\n func_addr))\n with open(svg_path, 'wb') as f:\n svg = dot.create_svg()\n f.write(svg)\n\n\ndef generate_graph(s2e_output_dir, s2e_num, project_name):\n \"\"\"\n Generate the PNG graph for the analysis in the output_dir\n \"\"\"\n s2e_env_path = S2E_settings.S2E_ENVIRONMENT_FOLDER_PATH\n output_dir = os.path.join(s2e_output_dir, 'functions')\n os.makedirs(output_dir)\n if not os.path.isfile(os.path.join(s2e_env_path, 's2e.yaml')):\n print('ERROR: %s is not an S2E environment' % s2e_env_path)\n return\n project_path = os.path.join(s2e_env_path, 'projects', project_name)\n if not os.path.isdir(project_path):\n print('ERROR: %s is not a valid project' % project_name)\n return\n if not os.path.isdir(output_dir):\n print('ERROR: %s is not a valid output directory' % output_dir)\n return\n s2e_last_path = os.path.join(project_path, 's2e-last')\n if not os.path.isdir(s2e_last_path):\n print('ERROR: %s has no s2e-last' % project_name)\n return\n tb_coverage_files = glob.glob(os.path.join(s2e_last_path, '*',\n 'tbcoverage-*.json')) + glob.glob(os.path.join(s2e_last_path,\n 'tbcoverage-*.json'))\n if not tb_coverage_files:\n print(\n 'ERROR: No translation block coverage files found in s2e-last. Did you enable the ``TranslationBlockCoverage`` plugin in s2e-config.lua?'\n )\n return\n covered_tbs = set()\n for tb_coverage_file in tb_coverage_files:\n tb_coverage_data = parse_tb_file(tb_coverage_file, project_name)\n if not tb_coverage_data:\n continue\n covered_tbs.update((start, end) for start, end, _ in tb_coverage_data)\n r2 = r2pipe.open(os.path.join(project_path, project_name))\n r2.cmd('aaa')\n covered_bbs = basic_block_coverage(r2, covered_tbs)\n render_functions(r2, covered_bbs, output_dir)\n base_path = os.path.join(project_name, 's2e-out-%d' % s2e_num, 'functions')\n return [[file_[0:-4], os.path.join(base_path, file_)] for file_ in os.\n listdir(output_dir)]\n",
"step-3": "<mask token>\n\n\ndef function_addrs(r2):\n \"\"\"\n Yield a list of all the function's start addresses.\n \"\"\"\n for addr in r2.cmdj('aflqj'):\n yield int(addr, 16)\n\n\ndef parse_tb_file(path, module):\n \"\"\"\n Parse a translation block coverage file generated by S2E's\n ``TranslationBlockCoverage`` plugin.\n \"\"\"\n with open(path, 'r') as f:\n try:\n tb_coverage_data = json.load(f)\n except Exception:\n print('WARN: Failed to parse translation block JSON file %s' % path\n )\n return None\n if not tb_coverage_data:\n print('WARN: Translation block JSON file %s is empty' % path)\n return None\n if module not in tb_coverage_data:\n print('WARN: Target %s not found in translation block JSON file %s' %\n (module, path))\n return None\n return tb_coverage_data[module]\n\n\ndef basic_block_coverage(r2, translation_blocks):\n \"\"\"\n Calculate the basic block coverage based on the covered TBs.\n\n Returns a set of *covered* basic block start addresses\n \"\"\"\n covered_bbs = set()\n for func_addr in function_addrs(r2):\n graph = r2.cmdj('agj 0x%x' % func_addr)\n assert len(graph) == 1\n graph = graph[0]\n for tb_start_addr, tb_end_addr in translation_blocks:\n for bb in graph['blocks']:\n bb_start_addr = bb['offset']\n bb_end_addr = bb_start_addr + bb['size']\n if (bb_end_addr >= tb_start_addr >= bb_start_addr or \n bb_start_addr <= tb_end_addr <= bb_end_addr):\n covered_bbs.add(bb_start_addr)\n return covered_bbs\n\n\ndef render_functions(r2, covered_bbs, output_dir):\n \"\"\"\n Renders SVG graphs of each of the functions in the program. Basic blocks\n that were executed by S2E are coloured green.\n\n The resulting SVG images are written to `output_dir`.\n \"\"\"\n for func_addr in function_addrs(r2):\n func_name = r2.cmdj('agj 0x%x' % func_addr)[0]['name']\n dot_str = r2.cmd('ag 0x%x' % func_addr)\n dot = pydot.graph_from_dot_data(dot_str)\n if not dot:\n continue\n else:\n dot = dot[0]\n for node in dot.get_nodes():\n node_name = node.get_name()\n try:\n if node_name.startswith('\"'):\n node_name = node_name[1:-1]\n node_addr = int(node_name, 16)\n except ValueError:\n continue\n if node_addr in covered_bbs:\n node.set_fillcolor('darkolivegreen2')\n svg_path = os.path.join(output_dir, '%s_0x%x.svg' % (func_name,\n func_addr))\n with open(svg_path, 'wb') as f:\n svg = dot.create_svg()\n f.write(svg)\n\n\ndef generate_graph(s2e_output_dir, s2e_num, project_name):\n \"\"\"\n Generate the PNG graph for the analysis in the output_dir\n \"\"\"\n s2e_env_path = S2E_settings.S2E_ENVIRONMENT_FOLDER_PATH\n output_dir = os.path.join(s2e_output_dir, 'functions')\n os.makedirs(output_dir)\n if not os.path.isfile(os.path.join(s2e_env_path, 's2e.yaml')):\n print('ERROR: %s is not an S2E environment' % s2e_env_path)\n return\n project_path = os.path.join(s2e_env_path, 'projects', project_name)\n if not os.path.isdir(project_path):\n print('ERROR: %s is not a valid project' % project_name)\n return\n if not os.path.isdir(output_dir):\n print('ERROR: %s is not a valid output directory' % output_dir)\n return\n s2e_last_path = os.path.join(project_path, 's2e-last')\n if not os.path.isdir(s2e_last_path):\n print('ERROR: %s has no s2e-last' % project_name)\n return\n tb_coverage_files = glob.glob(os.path.join(s2e_last_path, '*',\n 'tbcoverage-*.json')) + glob.glob(os.path.join(s2e_last_path,\n 'tbcoverage-*.json'))\n if not tb_coverage_files:\n print(\n 'ERROR: No translation block coverage files found in s2e-last. Did you enable the ``TranslationBlockCoverage`` plugin in s2e-config.lua?'\n )\n return\n covered_tbs = set()\n for tb_coverage_file in tb_coverage_files:\n tb_coverage_data = parse_tb_file(tb_coverage_file, project_name)\n if not tb_coverage_data:\n continue\n covered_tbs.update((start, end) for start, end, _ in tb_coverage_data)\n r2 = r2pipe.open(os.path.join(project_path, project_name))\n r2.cmd('aaa')\n covered_bbs = basic_block_coverage(r2, covered_tbs)\n render_functions(r2, covered_bbs, output_dir)\n base_path = os.path.join(project_name, 's2e-out-%d' % s2e_num, 'functions')\n return [[file_[0:-4], os.path.join(base_path, file_)] for file_ in os.\n listdir(output_dir)]\n",
"step-4": "<mask token>\nfrom __future__ import print_function\nimport glob\nimport json\nimport os\nimport pydot\nimport r2pipe\nimport s2e_web.S2E_settings as S2E_settings\n\n\ndef function_addrs(r2):\n \"\"\"\n Yield a list of all the function's start addresses.\n \"\"\"\n for addr in r2.cmdj('aflqj'):\n yield int(addr, 16)\n\n\ndef parse_tb_file(path, module):\n \"\"\"\n Parse a translation block coverage file generated by S2E's\n ``TranslationBlockCoverage`` plugin.\n \"\"\"\n with open(path, 'r') as f:\n try:\n tb_coverage_data = json.load(f)\n except Exception:\n print('WARN: Failed to parse translation block JSON file %s' % path\n )\n return None\n if not tb_coverage_data:\n print('WARN: Translation block JSON file %s is empty' % path)\n return None\n if module not in tb_coverage_data:\n print('WARN: Target %s not found in translation block JSON file %s' %\n (module, path))\n return None\n return tb_coverage_data[module]\n\n\ndef basic_block_coverage(r2, translation_blocks):\n \"\"\"\n Calculate the basic block coverage based on the covered TBs.\n\n Returns a set of *covered* basic block start addresses\n \"\"\"\n covered_bbs = set()\n for func_addr in function_addrs(r2):\n graph = r2.cmdj('agj 0x%x' % func_addr)\n assert len(graph) == 1\n graph = graph[0]\n for tb_start_addr, tb_end_addr in translation_blocks:\n for bb in graph['blocks']:\n bb_start_addr = bb['offset']\n bb_end_addr = bb_start_addr + bb['size']\n if (bb_end_addr >= tb_start_addr >= bb_start_addr or \n bb_start_addr <= tb_end_addr <= bb_end_addr):\n covered_bbs.add(bb_start_addr)\n return covered_bbs\n\n\ndef render_functions(r2, covered_bbs, output_dir):\n \"\"\"\n Renders SVG graphs of each of the functions in the program. Basic blocks\n that were executed by S2E are coloured green.\n\n The resulting SVG images are written to `output_dir`.\n \"\"\"\n for func_addr in function_addrs(r2):\n func_name = r2.cmdj('agj 0x%x' % func_addr)[0]['name']\n dot_str = r2.cmd('ag 0x%x' % func_addr)\n dot = pydot.graph_from_dot_data(dot_str)\n if not dot:\n continue\n else:\n dot = dot[0]\n for node in dot.get_nodes():\n node_name = node.get_name()\n try:\n if node_name.startswith('\"'):\n node_name = node_name[1:-1]\n node_addr = int(node_name, 16)\n except ValueError:\n continue\n if node_addr in covered_bbs:\n node.set_fillcolor('darkolivegreen2')\n svg_path = os.path.join(output_dir, '%s_0x%x.svg' % (func_name,\n func_addr))\n with open(svg_path, 'wb') as f:\n svg = dot.create_svg()\n f.write(svg)\n\n\ndef generate_graph(s2e_output_dir, s2e_num, project_name):\n \"\"\"\n Generate the PNG graph for the analysis in the output_dir\n \"\"\"\n s2e_env_path = S2E_settings.S2E_ENVIRONMENT_FOLDER_PATH\n output_dir = os.path.join(s2e_output_dir, 'functions')\n os.makedirs(output_dir)\n if not os.path.isfile(os.path.join(s2e_env_path, 's2e.yaml')):\n print('ERROR: %s is not an S2E environment' % s2e_env_path)\n return\n project_path = os.path.join(s2e_env_path, 'projects', project_name)\n if not os.path.isdir(project_path):\n print('ERROR: %s is not a valid project' % project_name)\n return\n if not os.path.isdir(output_dir):\n print('ERROR: %s is not a valid output directory' % output_dir)\n return\n s2e_last_path = os.path.join(project_path, 's2e-last')\n if not os.path.isdir(s2e_last_path):\n print('ERROR: %s has no s2e-last' % project_name)\n return\n tb_coverage_files = glob.glob(os.path.join(s2e_last_path, '*',\n 'tbcoverage-*.json')) + glob.glob(os.path.join(s2e_last_path,\n 'tbcoverage-*.json'))\n if not tb_coverage_files:\n print(\n 'ERROR: No translation block coverage files found in s2e-last. Did you enable the ``TranslationBlockCoverage`` plugin in s2e-config.lua?'\n )\n return\n covered_tbs = set()\n for tb_coverage_file in tb_coverage_files:\n tb_coverage_data = parse_tb_file(tb_coverage_file, project_name)\n if not tb_coverage_data:\n continue\n covered_tbs.update((start, end) for start, end, _ in tb_coverage_data)\n r2 = r2pipe.open(os.path.join(project_path, project_name))\n r2.cmd('aaa')\n covered_bbs = basic_block_coverage(r2, covered_tbs)\n render_functions(r2, covered_bbs, output_dir)\n base_path = os.path.join(project_name, 's2e-out-%d' % s2e_num, 'functions')\n return [[file_[0:-4], os.path.join(base_path, file_)] for file_ in os.\n listdir(output_dir)]\n",
"step-5": "\"\"\"\nCopyright (C) Adrian Herrera, 2017\n\nYou will need to install r2pipe and pydot:\n\n```\npip install r2pipe pydot\n```\n\"\"\"\n\nfrom __future__ import print_function\n\nimport glob\nimport json\nimport os\n\nimport pydot\nimport r2pipe\nimport s2e_web.S2E_settings as S2E_settings\n\n\ndef function_addrs(r2):\n \"\"\"\n Yield a list of all the function's start addresses.\n \"\"\"\n for addr in r2.cmdj('aflqj'):\n yield int(addr, 16)\n\n\ndef parse_tb_file(path, module):\n \"\"\"\n Parse a translation block coverage file generated by S2E's\n ``TranslationBlockCoverage`` plugin.\n \"\"\"\n with open(path, 'r') as f:\n try:\n tb_coverage_data = json.load(f)\n except Exception:\n print('WARN: Failed to parse translation block JSON file %s' % path)\n return None\n\n if not tb_coverage_data:\n print('WARN: Translation block JSON file %s is empty' % path)\n return None\n\n if module not in tb_coverage_data:\n print('WARN: Target %s not found in translation block JSON file %s' %\n (module, path))\n return None\n\n return tb_coverage_data[module]\n\n\ndef basic_block_coverage(r2, translation_blocks):\n \"\"\"\n Calculate the basic block coverage based on the covered TBs.\n\n Returns a set of *covered* basic block start addresses\n \"\"\"\n covered_bbs = set()\n\n for func_addr in function_addrs(r2):\n graph = r2.cmdj('agj 0x%x' % func_addr)\n assert len(graph) == 1\n graph = graph[0]\n\n for tb_start_addr, tb_end_addr in translation_blocks:\n for bb in graph['blocks']:\n bb_start_addr = bb['offset']\n bb_end_addr = bb_start_addr + bb['size']\n\n # Check if the translation block falls within a basic block OR\n # a basic block falls within a translation block\n if (bb_end_addr >= tb_start_addr >= bb_start_addr or\n bb_start_addr <= tb_end_addr <= bb_end_addr):\n covered_bbs.add(bb_start_addr)\n\n return covered_bbs\n\n\ndef render_functions(r2, covered_bbs, output_dir):\n \"\"\"\n Renders SVG graphs of each of the functions in the program. Basic blocks\n that were executed by S2E are coloured green.\n\n The resulting SVG images are written to `output_dir`.\n \"\"\"\n for func_addr in function_addrs(r2):\n # Get the function name\n func_name = r2.cmdj('agj 0x%x' % func_addr)[0]['name']\n\n dot_str = r2.cmd('ag 0x%x' % func_addr)\n dot = pydot.graph_from_dot_data(dot_str)\n if not dot:\n continue\n else:\n dot = dot[0]\n\n for node in dot.get_nodes():\n node_name = node.get_name()\n try:\n # XXX This is very hacky - need something more robust\n if node_name.startswith('\"'):\n node_name = node_name[1:-1]\n node_addr = int(node_name, 16)\n except ValueError:\n # Node name is not a hex string\n continue\n\n if node_addr in covered_bbs:\n node.set_fillcolor('darkolivegreen2')\n\n svg_path = os.path.join(output_dir, '%s_0x%x.svg' % (func_name, func_addr))\n with open(svg_path, 'wb') as f:\n svg = dot.create_svg()\n f.write(svg)\n\n\ndef generate_graph(s2e_output_dir, s2e_num, project_name):\n \"\"\"\n Generate the PNG graph for the analysis in the output_dir\n \"\"\"\n\n s2e_env_path = S2E_settings.S2E_ENVIRONMENT_FOLDER_PATH\n output_dir = os.path.join(s2e_output_dir, 'functions')\n os.makedirs(output_dir)\n\n # Check that the given S2E environment is legitimate\n if not os.path.isfile(os.path.join(s2e_env_path, 's2e.yaml')):\n print('ERROR: %s is not an S2E environment' % s2e_env_path)\n return\n\n # Check that the given project exists in the environment\n project_path = os.path.join(s2e_env_path, 'projects', project_name)\n if not os.path.isdir(project_path):\n print('ERROR: %s is not a valid project' % project_name)\n return\n\n # Check that the output directory exists\n if not os.path.isdir(output_dir):\n print('ERROR: %s is not a valid output directory' % output_dir)\n return\n\n # Check that the project has been executed at least once\n s2e_last_path = os.path.join(project_path, 's2e-last')\n if not os.path.isdir(s2e_last_path):\n print('ERROR: %s has no s2e-last' % project_name)\n return\n\n # Get all the TB coverage files\n tb_coverage_files = glob.glob(os.path.join(s2e_last_path, '*', 'tbcoverage-*.json')) + \\\n glob.glob(os.path.join(s2e_last_path, 'tbcoverage-*.json'))\n if not tb_coverage_files:\n print('ERROR: No translation block coverage files found in s2e-last. '\n 'Did you enable the ``TranslationBlockCoverage`` plugin in '\n 's2e-config.lua?')\n return\n\n # Parse the TB coverage files\n covered_tbs = set()\n for tb_coverage_file in tb_coverage_files:\n # XXX A project can have a different name to the target program\n tb_coverage_data = parse_tb_file(tb_coverage_file, project_name)\n if not tb_coverage_data:\n continue\n\n covered_tbs.update((start, end) for start, end, _ in tb_coverage_data)\n\n # Open the program in Radare and do the initial analysis\n # XXX A project can have a different name to the target program\n r2 = r2pipe.open(os.path.join(project_path, project_name))\n r2.cmd('aaa')\n\n # Calculate the basic block coverage and render the information as a set\n # of PNG images for each function\n covered_bbs = basic_block_coverage(r2, covered_tbs)\n render_functions(r2, covered_bbs, output_dir)\n\n base_path = os.path.join(project_name, 's2e-out-%d' % s2e_num, 'functions')\n return [[file_[0:-4], os.path.join(base_path, file_)] for file_ in os.listdir(output_dir)]\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import bluetooth
import serial
import struct
# Definition of Bluetooth rfcomm socket
bd_addr = "98:D3:37:00:8D:39" # The address from the HC-05 sensor
port = 1
sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
sock.connect((bd_addr,port))
# Definition of Serial port
ser = serial.Serial("/dev/ttyACM0", 57600)
def BT_DRIVING():
while True:
data = ord(sock.recv(1024))
String = ' '
String = struct.pack('!B',data)
ser.write(string)
def BT_SIGNAL():
while True:
data = ord(sock.recv(1024))
String = ' '
String = struct.pack('!B', data)
if String == 24:
return 24
elif String = 25:
return 25:
def FR30():
string = 10
string = struct.pack('!B',string)
ser.write(string)
def FR15():
string = 11
string = struct.pack('!B',string)
ser.write(string)
def FS00():
string = 12
string = struct.pack('!B',string)
ser.write(string)
def FL15():
string = 13
string = struct.pack('!B',string)
ser.write(string)
def FL30():
string = 14
string = struct.pack('!B',string)
ser.write(string)
def HR30():
string = 15
string = struct.pack('!B',string)
ser.write(string)
def HR15():
string = 16
string = struct.pack('!B',string)
ser.write(string)
def HS00():
string = 17
string = struct.pack('!B',string)
ser.write(string)
def HL15():
string = 18
string = struct.pack('!B',string)
ser.write(string)
def HL30():
string = 19
string = struct.pack('!B',string)
ser.write(string)
def BR30():
string = 20
string = struct.pack('!B',string)
ser.write(string)
def BR15():
string = 21
string = struct.pack('!B',string)
ser.write(string)
def BS00():
string = 22
string = struct.pack('!B',string)
ser.write(string)
def BL15():
string = 23
string = struct.pack('!B',string)
ser.write(string)
def BL30():
string = 24
string = struct.pack('!B',string)
ser.write(string)
|
normal
|
{
"blob_id": "605c78795b5a072d330d44a150f26ad410d9d084",
"index": 2962,
"step-1": "import bluetooth\nimport serial\nimport struct\n\n# Definition of Bluetooth rfcomm socket\nbd_addr = \"98:D3:37:00:8D:39\" # The address from the HC-05 sensor\nport = 1\nsock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\nsock.connect((bd_addr,port))\n\n# Definition of Serial port\nser = serial.Serial(\"/dev/ttyACM0\", 57600)\n\ndef BT_DRIVING():\n while True:\n data = ord(sock.recv(1024))\n String = ' '\n String = struct.pack('!B',data)\n ser.write(string)\n\ndef BT_SIGNAL():\n while True:\n data = ord(sock.recv(1024))\n String = ' '\n String = struct.pack('!B', data)\n if String == 24:\n return 24\n elif String = 25:\n return 25:\n\ndef FR30():\n string = 10\n string = struct.pack('!B',string)\n ser.write(string)\ndef FR15():\n string = 11\n string = struct.pack('!B',string)\n ser.write(string)\ndef FS00():\n string = 12\n string = struct.pack('!B',string)\n ser.write(string)\ndef FL15():\n string = 13\n string = struct.pack('!B',string)\n ser.write(string)\ndef FL30():\n string = 14\n string = struct.pack('!B',string)\n ser.write(string)\n\n\ndef HR30():\n string = 15\n string = struct.pack('!B',string)\n ser.write(string)\ndef HR15():\n string = 16\n string = struct.pack('!B',string)\n ser.write(string)\ndef HS00():\n string = 17\n string = struct.pack('!B',string)\n ser.write(string)\ndef HL15():\n string = 18\n string = struct.pack('!B',string)\n ser.write(string)\ndef HL30():\n string = 19\n string = struct.pack('!B',string)\n ser.write(string)\n\n\ndef BR30():\n string = 20\n string = struct.pack('!B',string)\n ser.write(string)\ndef BR15():\n string = 21\n string = struct.pack('!B',string)\n ser.write(string)\ndef BS00():\n string = 22\n string = struct.pack('!B',string)\n ser.write(string)\ndef BL15():\n string = 23\n string = struct.pack('!B',string)\n ser.write(string)\ndef BL30():\n string = 24\n string = struct.pack('!B',string)\n ser.write(string)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
@with_tempfile
def test_invalid_call(path):
assert_raises(ValueError, create_sibling_github, 'bogus', dataset=path)
ds = Dataset(path).create()
assert_raises(gh.BadCredentialsException, ds.create_sibling_github,
'bogus', github_user='')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@with_tempfile
def test_invalid_call(path):
assert_raises(ValueError, create_sibling_github, 'bogus', dataset=path)
ds = Dataset(path).create()
assert_raises(gh.BadCredentialsException, ds.create_sibling_github,
'bogus', github_user='')
@with_tempfile
def test_dont_trip_over_missing_subds(path):
ds1 = Dataset(opj(path, 'ds1')).create()
ds2 = Dataset(opj(path, 'ds2')).create()
subds2 = ds1.install(source=ds2.path, path='subds2')
assert_true(subds2.is_installed())
assert_in('subds2', ds1.get_subdatasets())
subds2.uninstall(remove_handles=True, remove_history=True)
assert_in('subds2', ds1.get_subdatasets())
assert_false(subds2.is_installed())
ds1.save(files=['subds2'])
assert_raises(gh.BadCredentialsException, ds1.create_sibling_github,
'bogus', recursive=True, github_user='')
assert_not_in('github', ds1.repo.get_remotes())
ds1.repo.add_remote('github', 'http://nothere')
assert_raises(ValueError, ds1.create_sibling_github, 'bogus', recursive
=True, github_user='')
assert_raises(gh.BadCredentialsException, ds1.create_sibling_github,
'bogus', recursive=True, github_user='', existing='reconfigure')
assert_equal(ds1.create_sibling_github('bogus', recursive=True,
github_user='', existing='skip'), [])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
import github as gh
except ImportError:
assert_raises(MissingExternalDependency, create_sibling_github, 'some')
raise SkipTest
@with_tempfile
def test_invalid_call(path):
assert_raises(ValueError, create_sibling_github, 'bogus', dataset=path)
ds = Dataset(path).create()
assert_raises(gh.BadCredentialsException, ds.create_sibling_github,
'bogus', github_user='')
@with_tempfile
def test_dont_trip_over_missing_subds(path):
ds1 = Dataset(opj(path, 'ds1')).create()
ds2 = Dataset(opj(path, 'ds2')).create()
subds2 = ds1.install(source=ds2.path, path='subds2')
assert_true(subds2.is_installed())
assert_in('subds2', ds1.get_subdatasets())
subds2.uninstall(remove_handles=True, remove_history=True)
assert_in('subds2', ds1.get_subdatasets())
assert_false(subds2.is_installed())
ds1.save(files=['subds2'])
assert_raises(gh.BadCredentialsException, ds1.create_sibling_github,
'bogus', recursive=True, github_user='')
assert_not_in('github', ds1.repo.get_remotes())
ds1.repo.add_remote('github', 'http://nothere')
assert_raises(ValueError, ds1.create_sibling_github, 'bogus', recursive
=True, github_user='')
assert_raises(gh.BadCredentialsException, ds1.create_sibling_github,
'bogus', recursive=True, github_user='', existing='reconfigure')
assert_equal(ds1.create_sibling_github('bogus', recursive=True,
github_user='', existing='skip'), [])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from os.path import join as opj
from datalad.api import create_sibling_github
from datalad.api import Dataset
from datalad.support.exceptions import MissingExternalDependency
from datalad.tests.utils import with_tempfile
from nose.tools import assert_raises, assert_in, assert_true, assert_false, assert_not_in, assert_equal
from nose import SkipTest
try:
import github as gh
except ImportError:
assert_raises(MissingExternalDependency, create_sibling_github, 'some')
raise SkipTest
@with_tempfile
def test_invalid_call(path):
assert_raises(ValueError, create_sibling_github, 'bogus', dataset=path)
ds = Dataset(path).create()
assert_raises(gh.BadCredentialsException, ds.create_sibling_github,
'bogus', github_user='')
@with_tempfile
def test_dont_trip_over_missing_subds(path):
ds1 = Dataset(opj(path, 'ds1')).create()
ds2 = Dataset(opj(path, 'ds2')).create()
subds2 = ds1.install(source=ds2.path, path='subds2')
assert_true(subds2.is_installed())
assert_in('subds2', ds1.get_subdatasets())
subds2.uninstall(remove_handles=True, remove_history=True)
assert_in('subds2', ds1.get_subdatasets())
assert_false(subds2.is_installed())
ds1.save(files=['subds2'])
assert_raises(gh.BadCredentialsException, ds1.create_sibling_github,
'bogus', recursive=True, github_user='')
assert_not_in('github', ds1.repo.get_remotes())
ds1.repo.add_remote('github', 'http://nothere')
assert_raises(ValueError, ds1.create_sibling_github, 'bogus', recursive
=True, github_user='')
assert_raises(gh.BadCredentialsException, ds1.create_sibling_github,
'bogus', recursive=True, github_user='', existing='reconfigure')
assert_equal(ds1.create_sibling_github('bogus', recursive=True,
github_user='', existing='skip'), [])
<|reserved_special_token_1|>
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test create publication target github"""
from os.path import join as opj
# this must with with and without pygithub
from datalad.api import create_sibling_github
from datalad.api import Dataset
from datalad.support.exceptions import MissingExternalDependency
from datalad.tests.utils import with_tempfile
from nose.tools import assert_raises, assert_in, assert_true, assert_false, \
assert_not_in, assert_equal
from nose import SkipTest
try:
import github as gh
except ImportError:
# make sure that the command complains too
assert_raises(MissingExternalDependency, create_sibling_github, 'some')
raise SkipTest
@with_tempfile
def test_invalid_call(path):
# no dataset
assert_raises(ValueError, create_sibling_github, 'bogus', dataset=path)
ds = Dataset(path).create()
# no user
assert_raises(gh.BadCredentialsException, ds.create_sibling_github, 'bogus', github_user='')
@with_tempfile
def test_dont_trip_over_missing_subds(path):
ds1 = Dataset(opj(path, 'ds1')).create()
ds2 = Dataset(opj(path, 'ds2')).create()
subds2 = ds1.install(source=ds2.path, path='subds2')
assert_true(subds2.is_installed())
assert_in('subds2', ds1.get_subdatasets())
subds2.uninstall(remove_handles=True, remove_history=True)
assert_in('subds2', ds1.get_subdatasets())
assert_false(subds2.is_installed())
# this will deinit the submodule
ds1.save(files=['subds2'])
# see if it wants to talk to github (and fail), or if it trips over something
# before
assert_raises(gh.BadCredentialsException, ds1.create_sibling_github, 'bogus', recursive=True, github_user='')
# inject remote config prior run
assert_not_in('github', ds1.repo.get_remotes())
# fail on existing
ds1.repo.add_remote('github', 'http://nothere')
assert_raises(ValueError, ds1.create_sibling_github, 'bogus', recursive=True, github_user='')
# talk to github when existing is OK
assert_raises(gh.BadCredentialsException, ds1.create_sibling_github, 'bogus', recursive=True, github_user='', existing='reconfigure')
# return happy emptiness when all is skipped
assert_equal(ds1.create_sibling_github('bogus', recursive=True, github_user='', existing='skip'), [])
|
flexible
|
{
"blob_id": "035043460805b7fe92e078e05708d368130e3527",
"index": 8965,
"step-1": "<mask token>\n\n\n@with_tempfile\ndef test_invalid_call(path):\n assert_raises(ValueError, create_sibling_github, 'bogus', dataset=path)\n ds = Dataset(path).create()\n assert_raises(gh.BadCredentialsException, ds.create_sibling_github,\n 'bogus', github_user='')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@with_tempfile\ndef test_invalid_call(path):\n assert_raises(ValueError, create_sibling_github, 'bogus', dataset=path)\n ds = Dataset(path).create()\n assert_raises(gh.BadCredentialsException, ds.create_sibling_github,\n 'bogus', github_user='')\n\n\n@with_tempfile\ndef test_dont_trip_over_missing_subds(path):\n ds1 = Dataset(opj(path, 'ds1')).create()\n ds2 = Dataset(opj(path, 'ds2')).create()\n subds2 = ds1.install(source=ds2.path, path='subds2')\n assert_true(subds2.is_installed())\n assert_in('subds2', ds1.get_subdatasets())\n subds2.uninstall(remove_handles=True, remove_history=True)\n assert_in('subds2', ds1.get_subdatasets())\n assert_false(subds2.is_installed())\n ds1.save(files=['subds2'])\n assert_raises(gh.BadCredentialsException, ds1.create_sibling_github,\n 'bogus', recursive=True, github_user='')\n assert_not_in('github', ds1.repo.get_remotes())\n ds1.repo.add_remote('github', 'http://nothere')\n assert_raises(ValueError, ds1.create_sibling_github, 'bogus', recursive\n =True, github_user='')\n assert_raises(gh.BadCredentialsException, ds1.create_sibling_github,\n 'bogus', recursive=True, github_user='', existing='reconfigure')\n assert_equal(ds1.create_sibling_github('bogus', recursive=True,\n github_user='', existing='skip'), [])\n",
"step-3": "<mask token>\ntry:\n import github as gh\nexcept ImportError:\n assert_raises(MissingExternalDependency, create_sibling_github, 'some')\n raise SkipTest\n\n\n@with_tempfile\ndef test_invalid_call(path):\n assert_raises(ValueError, create_sibling_github, 'bogus', dataset=path)\n ds = Dataset(path).create()\n assert_raises(gh.BadCredentialsException, ds.create_sibling_github,\n 'bogus', github_user='')\n\n\n@with_tempfile\ndef test_dont_trip_over_missing_subds(path):\n ds1 = Dataset(opj(path, 'ds1')).create()\n ds2 = Dataset(opj(path, 'ds2')).create()\n subds2 = ds1.install(source=ds2.path, path='subds2')\n assert_true(subds2.is_installed())\n assert_in('subds2', ds1.get_subdatasets())\n subds2.uninstall(remove_handles=True, remove_history=True)\n assert_in('subds2', ds1.get_subdatasets())\n assert_false(subds2.is_installed())\n ds1.save(files=['subds2'])\n assert_raises(gh.BadCredentialsException, ds1.create_sibling_github,\n 'bogus', recursive=True, github_user='')\n assert_not_in('github', ds1.repo.get_remotes())\n ds1.repo.add_remote('github', 'http://nothere')\n assert_raises(ValueError, ds1.create_sibling_github, 'bogus', recursive\n =True, github_user='')\n assert_raises(gh.BadCredentialsException, ds1.create_sibling_github,\n 'bogus', recursive=True, github_user='', existing='reconfigure')\n assert_equal(ds1.create_sibling_github('bogus', recursive=True,\n github_user='', existing='skip'), [])\n",
"step-4": "<mask token>\nfrom os.path import join as opj\nfrom datalad.api import create_sibling_github\nfrom datalad.api import Dataset\nfrom datalad.support.exceptions import MissingExternalDependency\nfrom datalad.tests.utils import with_tempfile\nfrom nose.tools import assert_raises, assert_in, assert_true, assert_false, assert_not_in, assert_equal\nfrom nose import SkipTest\ntry:\n import github as gh\nexcept ImportError:\n assert_raises(MissingExternalDependency, create_sibling_github, 'some')\n raise SkipTest\n\n\n@with_tempfile\ndef test_invalid_call(path):\n assert_raises(ValueError, create_sibling_github, 'bogus', dataset=path)\n ds = Dataset(path).create()\n assert_raises(gh.BadCredentialsException, ds.create_sibling_github,\n 'bogus', github_user='')\n\n\n@with_tempfile\ndef test_dont_trip_over_missing_subds(path):\n ds1 = Dataset(opj(path, 'ds1')).create()\n ds2 = Dataset(opj(path, 'ds2')).create()\n subds2 = ds1.install(source=ds2.path, path='subds2')\n assert_true(subds2.is_installed())\n assert_in('subds2', ds1.get_subdatasets())\n subds2.uninstall(remove_handles=True, remove_history=True)\n assert_in('subds2', ds1.get_subdatasets())\n assert_false(subds2.is_installed())\n ds1.save(files=['subds2'])\n assert_raises(gh.BadCredentialsException, ds1.create_sibling_github,\n 'bogus', recursive=True, github_user='')\n assert_not_in('github', ds1.repo.get_remotes())\n ds1.repo.add_remote('github', 'http://nothere')\n assert_raises(ValueError, ds1.create_sibling_github, 'bogus', recursive\n =True, github_user='')\n assert_raises(gh.BadCredentialsException, ds1.create_sibling_github,\n 'bogus', recursive=True, github_user='', existing='reconfigure')\n assert_equal(ds1.create_sibling_github('bogus', recursive=True,\n github_user='', existing='skip'), [])\n",
"step-5": "# ex: set sts=4 ts=4 sw=4 noet:\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the datalad package for the\n# copyright and license terms.\n#\n# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Test create publication target github\"\"\"\n\nfrom os.path import join as opj\n# this must with with and without pygithub\nfrom datalad.api import create_sibling_github\nfrom datalad.api import Dataset\nfrom datalad.support.exceptions import MissingExternalDependency\nfrom datalad.tests.utils import with_tempfile\nfrom nose.tools import assert_raises, assert_in, assert_true, assert_false, \\\n assert_not_in, assert_equal\nfrom nose import SkipTest\n\n\ntry:\n import github as gh\nexcept ImportError:\n # make sure that the command complains too\n assert_raises(MissingExternalDependency, create_sibling_github, 'some')\n raise SkipTest\n\n\n@with_tempfile\ndef test_invalid_call(path):\n # no dataset\n assert_raises(ValueError, create_sibling_github, 'bogus', dataset=path)\n ds = Dataset(path).create()\n # no user\n assert_raises(gh.BadCredentialsException, ds.create_sibling_github, 'bogus', github_user='')\n\n\n@with_tempfile\ndef test_dont_trip_over_missing_subds(path):\n ds1 = Dataset(opj(path, 'ds1')).create()\n ds2 = Dataset(opj(path, 'ds2')).create()\n subds2 = ds1.install(source=ds2.path, path='subds2')\n assert_true(subds2.is_installed())\n assert_in('subds2', ds1.get_subdatasets())\n subds2.uninstall(remove_handles=True, remove_history=True)\n assert_in('subds2', ds1.get_subdatasets())\n assert_false(subds2.is_installed())\n # this will deinit the submodule\n ds1.save(files=['subds2'])\n # see if it wants to talk to github (and fail), or if it trips over something\n # before\n assert_raises(gh.BadCredentialsException, ds1.create_sibling_github, 'bogus', recursive=True, github_user='')\n # inject remote config prior run\n assert_not_in('github', ds1.repo.get_remotes())\n # fail on existing\n ds1.repo.add_remote('github', 'http://nothere')\n assert_raises(ValueError, ds1.create_sibling_github, 'bogus', recursive=True, github_user='')\n # talk to github when existing is OK\n assert_raises(gh.BadCredentialsException, ds1.create_sibling_github, 'bogus', recursive=True, github_user='', existing='reconfigure')\n # return happy emptiness when all is skipped\n assert_equal(ds1.create_sibling_github('bogus', recursive=True, github_user='', existing='skip'), [])\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Stubs for binascii
# Based on http://docs.python.org/3.2/library/binascii.html
import sys
from typing import Union, Text
if sys.version_info < (3,):
# Python 2 accepts unicode ascii pretty much everywhere.
_Bytes = Text
_Ascii = Text
else:
# But since Python 3.3 ASCII-only unicode strings are accepted by the
# a2b_* functions.
_Bytes = bytes
_Ascii = Union[bytes, str]
def a2b_uu(string: _Ascii) -> bytes: ...
if sys.version_info >= (3, 7):
def b2a_uu(data: _Bytes, *, backtick: bool = ...) -> bytes: ...
else:
def b2a_uu(data: _Bytes) -> bytes: ...
def a2b_base64(string: _Ascii) -> bytes: ...
if sys.version_info >= (3, 6):
def b2a_base64(data: _Bytes, *, newline: bool = ...) -> bytes: ...
else:
def b2a_base64(data: _Bytes) -> bytes: ...
def a2b_qp(string: _Ascii, header: bool = ...) -> bytes: ...
def b2a_qp(data: _Bytes, quotetabs: bool = ..., istext: bool = ..., header: bool = ...) -> bytes: ...
def a2b_hqx(string: _Ascii) -> bytes: ...
def rledecode_hqx(data: _Bytes) -> bytes: ...
def rlecode_hqx(data: _Bytes) -> bytes: ...
def b2a_hqx(data: _Bytes) -> bytes: ...
def crc_hqx(data: _Bytes, crc: int) -> int: ...
def crc32(data: _Bytes, crc: int = ...) -> int: ...
def b2a_hex(data: _Bytes) -> bytes: ...
def hexlify(data: _Bytes) -> bytes: ...
def a2b_hex(hexstr: _Ascii) -> bytes: ...
def unhexlify(hexlify: _Ascii) -> bytes: ...
class Error(Exception): ...
class Incomplete(Exception): ...
|
normal
|
{
"blob_id": "9ba74c7ecbd20c59883aff4efdc7e0369ff65daf",
"index": 5267,
"step-1": "<mask token>\n\n\ndef a2b_base64(string: _Ascii) ->bytes:\n ...\n\n\n<mask token>\n\n\ndef a2b_qp(string: _Ascii, header: bool=...) ->bytes:\n ...\n\n\ndef b2a_qp(data: _Bytes, quotetabs: bool=..., istext: bool=..., header:\n bool=...) ->bytes:\n ...\n\n\ndef a2b_hqx(string: _Ascii) ->bytes:\n ...\n\n\ndef rledecode_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef rlecode_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef b2a_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef crc_hqx(data: _Bytes, crc: int) ->int:\n ...\n\n\n<mask token>\n\n\ndef b2a_hex(data: _Bytes) ->bytes:\n ...\n\n\ndef hexlify(data: _Bytes) ->bytes:\n ...\n\n\ndef a2b_hex(hexstr: _Ascii) ->bytes:\n ...\n\n\n<mask token>\n\n\nclass Error(Exception):\n ...\n\n\nclass Incomplete(Exception):\n ...\n",
"step-2": "<mask token>\n\n\ndef a2b_base64(string: _Ascii) ->bytes:\n ...\n\n\n<mask token>\n\n\ndef a2b_qp(string: _Ascii, header: bool=...) ->bytes:\n ...\n\n\ndef b2a_qp(data: _Bytes, quotetabs: bool=..., istext: bool=..., header:\n bool=...) ->bytes:\n ...\n\n\ndef a2b_hqx(string: _Ascii) ->bytes:\n ...\n\n\ndef rledecode_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef rlecode_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef b2a_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef crc_hqx(data: _Bytes, crc: int) ->int:\n ...\n\n\ndef crc32(data: _Bytes, crc: int=...) ->int:\n ...\n\n\ndef b2a_hex(data: _Bytes) ->bytes:\n ...\n\n\ndef hexlify(data: _Bytes) ->bytes:\n ...\n\n\ndef a2b_hex(hexstr: _Ascii) ->bytes:\n ...\n\n\ndef unhexlify(hexlify: _Ascii) ->bytes:\n ...\n\n\nclass Error(Exception):\n ...\n\n\nclass Incomplete(Exception):\n ...\n",
"step-3": "<mask token>\n\n\ndef a2b_uu(string: _Ascii) ->bytes:\n ...\n\n\n<mask token>\n\n\ndef a2b_base64(string: _Ascii) ->bytes:\n ...\n\n\n<mask token>\n\n\ndef a2b_qp(string: _Ascii, header: bool=...) ->bytes:\n ...\n\n\ndef b2a_qp(data: _Bytes, quotetabs: bool=..., istext: bool=..., header:\n bool=...) ->bytes:\n ...\n\n\ndef a2b_hqx(string: _Ascii) ->bytes:\n ...\n\n\ndef rledecode_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef rlecode_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef b2a_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef crc_hqx(data: _Bytes, crc: int) ->int:\n ...\n\n\ndef crc32(data: _Bytes, crc: int=...) ->int:\n ...\n\n\ndef b2a_hex(data: _Bytes) ->bytes:\n ...\n\n\ndef hexlify(data: _Bytes) ->bytes:\n ...\n\n\ndef a2b_hex(hexstr: _Ascii) ->bytes:\n ...\n\n\ndef unhexlify(hexlify: _Ascii) ->bytes:\n ...\n\n\nclass Error(Exception):\n ...\n\n\nclass Incomplete(Exception):\n ...\n",
"step-4": "<mask token>\nif sys.version_info < (3,):\n _Bytes = Text\n _Ascii = Text\nelse:\n _Bytes = bytes\n _Ascii = Union[bytes, str]\n\n\ndef a2b_uu(string: _Ascii) ->bytes:\n ...\n\n\nif sys.version_info >= (3, 7):\n\n def b2a_uu(data: _Bytes, *, backtick: bool=...) ->bytes:\n ...\nelse:\n\n def b2a_uu(data: _Bytes) ->bytes:\n ...\n\n\ndef a2b_base64(string: _Ascii) ->bytes:\n ...\n\n\nif sys.version_info >= (3, 6):\n\n def b2a_base64(data: _Bytes, *, newline: bool=...) ->bytes:\n ...\nelse:\n\n def b2a_base64(data: _Bytes) ->bytes:\n ...\n\n\ndef a2b_qp(string: _Ascii, header: bool=...) ->bytes:\n ...\n\n\ndef b2a_qp(data: _Bytes, quotetabs: bool=..., istext: bool=..., header:\n bool=...) ->bytes:\n ...\n\n\ndef a2b_hqx(string: _Ascii) ->bytes:\n ...\n\n\ndef rledecode_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef rlecode_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef b2a_hqx(data: _Bytes) ->bytes:\n ...\n\n\ndef crc_hqx(data: _Bytes, crc: int) ->int:\n ...\n\n\ndef crc32(data: _Bytes, crc: int=...) ->int:\n ...\n\n\ndef b2a_hex(data: _Bytes) ->bytes:\n ...\n\n\ndef hexlify(data: _Bytes) ->bytes:\n ...\n\n\ndef a2b_hex(hexstr: _Ascii) ->bytes:\n ...\n\n\ndef unhexlify(hexlify: _Ascii) ->bytes:\n ...\n\n\nclass Error(Exception):\n ...\n\n\nclass Incomplete(Exception):\n ...\n",
"step-5": "# Stubs for binascii\n\n# Based on http://docs.python.org/3.2/library/binascii.html\n\nimport sys\nfrom typing import Union, Text\n\nif sys.version_info < (3,):\n # Python 2 accepts unicode ascii pretty much everywhere.\n _Bytes = Text\n _Ascii = Text\nelse:\n # But since Python 3.3 ASCII-only unicode strings are accepted by the\n # a2b_* functions.\n _Bytes = bytes\n _Ascii = Union[bytes, str]\n\ndef a2b_uu(string: _Ascii) -> bytes: ...\nif sys.version_info >= (3, 7):\n def b2a_uu(data: _Bytes, *, backtick: bool = ...) -> bytes: ...\nelse:\n def b2a_uu(data: _Bytes) -> bytes: ...\ndef a2b_base64(string: _Ascii) -> bytes: ...\nif sys.version_info >= (3, 6):\n def b2a_base64(data: _Bytes, *, newline: bool = ...) -> bytes: ...\nelse:\n def b2a_base64(data: _Bytes) -> bytes: ...\ndef a2b_qp(string: _Ascii, header: bool = ...) -> bytes: ...\ndef b2a_qp(data: _Bytes, quotetabs: bool = ..., istext: bool = ..., header: bool = ...) -> bytes: ...\ndef a2b_hqx(string: _Ascii) -> bytes: ...\ndef rledecode_hqx(data: _Bytes) -> bytes: ...\ndef rlecode_hqx(data: _Bytes) -> bytes: ...\ndef b2a_hqx(data: _Bytes) -> bytes: ...\ndef crc_hqx(data: _Bytes, crc: int) -> int: ...\ndef crc32(data: _Bytes, crc: int = ...) -> int: ...\ndef b2a_hex(data: _Bytes) -> bytes: ...\ndef hexlify(data: _Bytes) -> bytes: ...\ndef a2b_hex(hexstr: _Ascii) -> bytes: ...\ndef unhexlify(hexlify: _Ascii) -> bytes: ...\n\nclass Error(Exception): ...\nclass Incomplete(Exception): ...\n",
"step-ids": [
13,
15,
16,
17,
19
]
}
|
[
13,
15,
16,
17,
19
] |
from psycopg2 import extras as ex
import psycopg2 as pg
import json
import datetime
import os
from functools import reduce
data_list = [{'projectName': '伊犁哈萨克自治州友谊医院开发区分院保洁服务项目', 'pingmu': '服务', 'purUnit': '新疆伊犁哈萨克自治州友谊医院', 'adminiArea': '新疆维吾尔自治区', 'bulletTime': '2020年09月02日 19:20', 'obtBidTime': '2020年09月02日至2020年09月09日每日上午:00:00 至 12:00\xa0\xa0下午:12:00 至 23:59(北京时间,法定节假日除外)', 'bidDocPrice': '¥500', 'obtBidLoc': '伊宁市经济合作区福安·西城国际1416室', 'staBidTime': '', 'staLoc': '伊宁市海棠路3号州财政局办公楼附楼1层州政府采购中心 一楼招标厅', 'budget': '¥807.000000万元(人民币)', 'proContact': '胡川', 'proPhone': '18690293446', 'purAddress': '伊宁市斯大林街92号', 'purUnitPhone': '0999-8024023', 'agentName': '新疆诚成工程项目管理有限公司', 'agentAddress': '详见公告正文', 'agentPhone': '18690293446'}
, {'projectName': '旅顺口医疗区医用氧气管道检修采购项目', 'pingmu': '服务/维修和保养服务/其他维修和保养服务', 'purUnit': '中国人民解放军联勤保障部队第九六七医院', 'adminiArea': '大连市', 'bulletTime': '2020年09月02日 19:52', 'obtBidTime': '2020年09月02日至2020年09月07日每日上午:8:30 至 11:30\xa0\xa0下午:13:00 至 16:30(北京时间,法定节假日除外)', 'budget': '¥0.000000万元(人民币)', 'proContact': '廖大成,尹辉', 'proPhone': '0411-80841295 0411-80841296', 'purAddress': '辽宁省大连市西岗区胜利路80号', 'purUnitPhone': '廖大成,尹辉 0411-80841295 0411-80841296', 'agentName': '中国人民解放军联勤保障部队第九六七医院', 'agentAddress': '辽宁省大连市西岗区胜利路80号', 'agentPhone': '廖大成,尹辉 0411-80841295 0411-80841296', 'appendix': '{"2.报价书氧气管道检修.docx": "http://www.ccgp.gov.cn/oss/download?uuid=88FCEC822374C5002F6DD48B15DC44", "3.货物指标及要求氧气管道检修.docx": "http://www.ccgp.gov.cn/oss/download?uuid=2773DFCD00839B5E034DA43339EDF1"}'}
]
dict_tmp={}
values_list = []
result = []
def processJson(dic):
dicobj = json.loads(dic)
print(dicobj)
for k,v in dicobj.items():
dict_tmp = {}
dict_tmp["file_name"] = k
dict_tmp["urls"] =v
print(k)
print(v)
result.append(dict_tmp)
# dict_tmp.clear()
return result
def procesV():
for i in data_list:
if "appendix" in i.keys():
appendix = i["appendix"]
if appendix != "":
fj = processJson(i["appendix"])
print(fj)
fjs = json.dumps(fj,ensure_ascii=False)
values_list.append(("testtest",fjs))
def prosql():
# values 后面直接%s
hostname = '172.18.11.26'
username = 'postgres'
password = 'postgres_cnhis@#$'
database = 'ai'
conn = pg.connect(database=database, user=username, password=password, host=hostname, port="5432")
cursor = conn.cursor()
procesV()
sql = '''insert into ho_sysnc_third_customer_data("purchased_project_name","fj_json")
values %s
'''
# 其中函数中的page_size参数默认为100,表示每个statement包含的最大条目数,
# 如果传过来的argslist长度大于page_size,则该函数最多执行len(argslist)/page_size + 1次。
ex.execute_values(cursor, sql, values_list, page_size=10000)
conn.commit()
conn.close()
cursor.close()
if __name__ =='__main__':
prosql()
# procesV()
|
normal
|
{
"blob_id": "e9af8f7830be7db3ca57b0a24de48ef7fcb08d6c",
"index": 8453,
"step-1": "<mask token>\n\n\ndef processJson(dic):\n dicobj = json.loads(dic)\n print(dicobj)\n for k, v in dicobj.items():\n dict_tmp = {}\n dict_tmp['file_name'] = k\n dict_tmp['urls'] = v\n print(k)\n print(v)\n result.append(dict_tmp)\n return result\n\n\ndef procesV():\n for i in data_list:\n if 'appendix' in i.keys():\n appendix = i['appendix']\n if appendix != '':\n fj = processJson(i['appendix'])\n print(fj)\n fjs = json.dumps(fj, ensure_ascii=False)\n values_list.append(('testtest', fjs))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef processJson(dic):\n dicobj = json.loads(dic)\n print(dicobj)\n for k, v in dicobj.items():\n dict_tmp = {}\n dict_tmp['file_name'] = k\n dict_tmp['urls'] = v\n print(k)\n print(v)\n result.append(dict_tmp)\n return result\n\n\ndef procesV():\n for i in data_list:\n if 'appendix' in i.keys():\n appendix = i['appendix']\n if appendix != '':\n fj = processJson(i['appendix'])\n print(fj)\n fjs = json.dumps(fj, ensure_ascii=False)\n values_list.append(('testtest', fjs))\n\n\ndef prosql():\n hostname = '172.18.11.26'\n username = 'postgres'\n password = 'postgres_cnhis@#$'\n database = 'ai'\n conn = pg.connect(database=database, user=username, password=password,\n host=hostname, port='5432')\n cursor = conn.cursor()\n procesV()\n sql = \"\"\"insert into ho_sysnc_third_customer_data(\"purchased_project_name\",\"fj_json\")\n values %s\n \"\"\"\n ex.execute_values(cursor, sql, values_list, page_size=10000)\n conn.commit()\n conn.close()\n cursor.close()\n\n\nif __name__ == '__main__':\n prosql()\n",
"step-3": "<mask token>\ndata_list = [{'projectName': '伊犁哈萨克自治州友谊医院开发区分院保洁服务项目', 'pingmu': '服务',\n 'purUnit': '新疆伊犁哈萨克自治州友谊医院', 'adminiArea': '新疆维吾尔自治区', 'bulletTime':\n '2020年09月02日 19:20', 'obtBidTime':\n '2020年09月02日至2020年09月09日每日上午:00:00 至 12:00\\xa0\\xa0下午:12:00 至 23:59(北京时间,法定节假日除外)'\n , 'bidDocPrice': '¥500', 'obtBidLoc': '伊宁市经济合作区福安·西城国际1416室',\n 'staBidTime': '', 'staLoc': '伊宁市海棠路3号州财政局办公楼附楼1层州政府采购中心 一楼招标厅',\n 'budget': '¥807.000000万元(人民币)', 'proContact': '胡川', 'proPhone':\n '18690293446', 'purAddress': '伊宁市斯大林街92号', 'purUnitPhone':\n '0999-8024023', 'agentName': '新疆诚成工程项目管理有限公司', 'agentAddress': '详见公告正文',\n 'agentPhone': '18690293446'}, {'projectName': '旅顺口医疗区医用氧气管道检修采购项目',\n 'pingmu': '服务/维修和保养服务/其他维修和保养服务', 'purUnit': '中国人民解放军联勤保障部队第九六七医院',\n 'adminiArea': '大连市', 'bulletTime': '2020年09月02日 19:52', 'obtBidTime':\n '2020年09月02日至2020年09月07日每日上午:8:30 至 11:30\\xa0\\xa0下午:13:00 至 16:30(北京时间,法定节假日除外)'\n , 'budget': '¥0.000000万元(人民币)', 'proContact': '廖大成,尹辉', 'proPhone':\n '0411-80841295 0411-80841296', 'purAddress': '辽宁省大连市西岗区胜利路80号',\n 'purUnitPhone': '廖大成,尹辉 0411-80841295 0411-80841296', 'agentName':\n '中国人民解放军联勤保障部队第九六七医院', 'agentAddress': '辽宁省大连市西岗区胜利路80号', 'agentPhone':\n '廖大成,尹辉 0411-80841295 0411-80841296', 'appendix':\n '{\"2.报价书氧气管道检修.docx\": \"http://www.ccgp.gov.cn/oss/download?uuid=88FCEC822374C5002F6DD48B15DC44\", \"3.货物指标及要求氧气管道检修.docx\": \"http://www.ccgp.gov.cn/oss/download?uuid=2773DFCD00839B5E034DA43339EDF1\"}'\n }]\ndict_tmp = {}\nvalues_list = []\nresult = []\n\n\ndef processJson(dic):\n dicobj = json.loads(dic)\n print(dicobj)\n for k, v in dicobj.items():\n dict_tmp = {}\n dict_tmp['file_name'] = k\n dict_tmp['urls'] = v\n print(k)\n print(v)\n result.append(dict_tmp)\n return result\n\n\ndef procesV():\n for i in data_list:\n if 'appendix' in i.keys():\n appendix = i['appendix']\n if appendix != '':\n fj = processJson(i['appendix'])\n print(fj)\n fjs = json.dumps(fj, ensure_ascii=False)\n values_list.append(('testtest', fjs))\n\n\ndef prosql():\n hostname = '172.18.11.26'\n username = 'postgres'\n password = 'postgres_cnhis@#$'\n database = 'ai'\n conn = pg.connect(database=database, user=username, password=password,\n host=hostname, port='5432')\n cursor = conn.cursor()\n procesV()\n sql = \"\"\"insert into ho_sysnc_third_customer_data(\"purchased_project_name\",\"fj_json\")\n values %s\n \"\"\"\n ex.execute_values(cursor, sql, values_list, page_size=10000)\n conn.commit()\n conn.close()\n cursor.close()\n\n\nif __name__ == '__main__':\n prosql()\n",
"step-4": "from psycopg2 import extras as ex\nimport psycopg2 as pg\nimport json\nimport datetime\nimport os\nfrom functools import reduce\ndata_list = [{'projectName': '伊犁哈萨克自治州友谊医院开发区分院保洁服务项目', 'pingmu': '服务',\n 'purUnit': '新疆伊犁哈萨克自治州友谊医院', 'adminiArea': '新疆维吾尔自治区', 'bulletTime':\n '2020年09月02日 19:20', 'obtBidTime':\n '2020年09月02日至2020年09月09日每日上午:00:00 至 12:00\\xa0\\xa0下午:12:00 至 23:59(北京时间,法定节假日除外)'\n , 'bidDocPrice': '¥500', 'obtBidLoc': '伊宁市经济合作区福安·西城国际1416室',\n 'staBidTime': '', 'staLoc': '伊宁市海棠路3号州财政局办公楼附楼1层州政府采购中心 一楼招标厅',\n 'budget': '¥807.000000万元(人民币)', 'proContact': '胡川', 'proPhone':\n '18690293446', 'purAddress': '伊宁市斯大林街92号', 'purUnitPhone':\n '0999-8024023', 'agentName': '新疆诚成工程项目管理有限公司', 'agentAddress': '详见公告正文',\n 'agentPhone': '18690293446'}, {'projectName': '旅顺口医疗区医用氧气管道检修采购项目',\n 'pingmu': '服务/维修和保养服务/其他维修和保养服务', 'purUnit': '中国人民解放军联勤保障部队第九六七医院',\n 'adminiArea': '大连市', 'bulletTime': '2020年09月02日 19:52', 'obtBidTime':\n '2020年09月02日至2020年09月07日每日上午:8:30 至 11:30\\xa0\\xa0下午:13:00 至 16:30(北京时间,法定节假日除外)'\n , 'budget': '¥0.000000万元(人民币)', 'proContact': '廖大成,尹辉', 'proPhone':\n '0411-80841295 0411-80841296', 'purAddress': '辽宁省大连市西岗区胜利路80号',\n 'purUnitPhone': '廖大成,尹辉 0411-80841295 0411-80841296', 'agentName':\n '中国人民解放军联勤保障部队第九六七医院', 'agentAddress': '辽宁省大连市西岗区胜利路80号', 'agentPhone':\n '廖大成,尹辉 0411-80841295 0411-80841296', 'appendix':\n '{\"2.报价书氧气管道检修.docx\": \"http://www.ccgp.gov.cn/oss/download?uuid=88FCEC822374C5002F6DD48B15DC44\", \"3.货物指标及要求氧气管道检修.docx\": \"http://www.ccgp.gov.cn/oss/download?uuid=2773DFCD00839B5E034DA43339EDF1\"}'\n }]\ndict_tmp = {}\nvalues_list = []\nresult = []\n\n\ndef processJson(dic):\n dicobj = json.loads(dic)\n print(dicobj)\n for k, v in dicobj.items():\n dict_tmp = {}\n dict_tmp['file_name'] = k\n dict_tmp['urls'] = v\n print(k)\n print(v)\n result.append(dict_tmp)\n return result\n\n\ndef procesV():\n for i in data_list:\n if 'appendix' in i.keys():\n appendix = i['appendix']\n if appendix != '':\n fj = processJson(i['appendix'])\n print(fj)\n fjs = json.dumps(fj, ensure_ascii=False)\n values_list.append(('testtest', fjs))\n\n\ndef prosql():\n hostname = '172.18.11.26'\n username = 'postgres'\n password = 'postgres_cnhis@#$'\n database = 'ai'\n conn = pg.connect(database=database, user=username, password=password,\n host=hostname, port='5432')\n cursor = conn.cursor()\n procesV()\n sql = \"\"\"insert into ho_sysnc_third_customer_data(\"purchased_project_name\",\"fj_json\")\n values %s\n \"\"\"\n ex.execute_values(cursor, sql, values_list, page_size=10000)\n conn.commit()\n conn.close()\n cursor.close()\n\n\nif __name__ == '__main__':\n prosql()\n",
"step-5": "from psycopg2 import extras as ex\nimport psycopg2 as pg\nimport json\nimport datetime\nimport os\nfrom functools import reduce\n\n\ndata_list = [{'projectName': '伊犁哈萨克自治州友谊医院开发区分院保洁服务项目', 'pingmu': '服务', 'purUnit': '新疆伊犁哈萨克自治州友谊医院', 'adminiArea': '新疆维吾尔自治区', 'bulletTime': '2020年09月02日 19:20', 'obtBidTime': '2020年09月02日至2020年09月09日每日上午:00:00 至 12:00\\xa0\\xa0下午:12:00 至 23:59(北京时间,法定节假日除外)', 'bidDocPrice': '¥500', 'obtBidLoc': '伊宁市经济合作区福安·西城国际1416室', 'staBidTime': '', 'staLoc': '伊宁市海棠路3号州财政局办公楼附楼1层州政府采购中心 一楼招标厅', 'budget': '¥807.000000万元(人民币)', 'proContact': '胡川', 'proPhone': '18690293446', 'purAddress': '伊宁市斯大林街92号', 'purUnitPhone': '0999-8024023', 'agentName': '新疆诚成工程项目管理有限公司', 'agentAddress': '详见公告正文', 'agentPhone': '18690293446'}\n , {'projectName': '旅顺口医疗区医用氧气管道检修采购项目', 'pingmu': '服务/维修和保养服务/其他维修和保养服务', 'purUnit': '中国人民解放军联勤保障部队第九六七医院', 'adminiArea': '大连市', 'bulletTime': '2020年09月02日 19:52', 'obtBidTime': '2020年09月02日至2020年09月07日每日上午:8:30 至 11:30\\xa0\\xa0下午:13:00 至 16:30(北京时间,法定节假日除外)', 'budget': '¥0.000000万元(人民币)', 'proContact': '廖大成,尹辉', 'proPhone': '0411-80841295 0411-80841296', 'purAddress': '辽宁省大连市西岗区胜利路80号', 'purUnitPhone': '廖大成,尹辉 0411-80841295 0411-80841296', 'agentName': '中国人民解放军联勤保障部队第九六七医院', 'agentAddress': '辽宁省大连市西岗区胜利路80号', 'agentPhone': '廖大成,尹辉 0411-80841295 0411-80841296', 'appendix': '{\"2.报价书氧气管道检修.docx\": \"http://www.ccgp.gov.cn/oss/download?uuid=88FCEC822374C5002F6DD48B15DC44\", \"3.货物指标及要求氧气管道检修.docx\": \"http://www.ccgp.gov.cn/oss/download?uuid=2773DFCD00839B5E034DA43339EDF1\"}'}\n ]\n\n\ndict_tmp={}\nvalues_list = []\nresult = []\ndef processJson(dic):\n dicobj = json.loads(dic)\n print(dicobj)\n for k,v in dicobj.items():\n dict_tmp = {}\n dict_tmp[\"file_name\"] = k\n dict_tmp[\"urls\"] =v\n print(k)\n print(v)\n result.append(dict_tmp)\n # dict_tmp.clear()\n return result\n\ndef procesV():\n for i in data_list:\n if \"appendix\" in i.keys():\n appendix = i[\"appendix\"]\n if appendix != \"\":\n fj = processJson(i[\"appendix\"])\n print(fj)\n fjs = json.dumps(fj,ensure_ascii=False)\n values_list.append((\"testtest\",fjs))\n\ndef prosql():\n # values 后面直接%s\n hostname = '172.18.11.26'\n username = 'postgres'\n password = 'postgres_cnhis@#$'\n database = 'ai'\n conn = pg.connect(database=database, user=username, password=password, host=hostname, port=\"5432\")\n cursor = conn.cursor()\n procesV()\n sql = '''insert into ho_sysnc_third_customer_data(\"purchased_project_name\",\"fj_json\")\n values %s\n '''\n # 其中函数中的page_size参数默认为100,表示每个statement包含的最大条目数,\n # 如果传过来的argslist长度大于page_size,则该函数最多执行len(argslist)/page_size + 1次。\n ex.execute_values(cursor, sql, values_list, page_size=10000)\n conn.commit()\n\n conn.close()\n cursor.close()\n\n\n\n\nif __name__ =='__main__':\n prosql()\n # procesV()\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
species(label='C=C([CH]C)C(=C)[CH]C(24182)', structure=SMILES(
'[CH2]C(=CC)C([CH2])=CC'), E0=(249.687, 'kJ/mol'), modes=[
HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700,
1750, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450, 1500, 700,
800, 1000, 1100, 1350, 1400, 900, 1100, 3000, 3033.33, 3066.67, 3100,
415, 465, 780, 850, 1435, 1475, 900, 1100, 2995, 3025, 975, 1000, 1300,
1375, 400, 500, 1630, 1680, 180], 'cm^-1')), HinderedRotor(inertia=(
0.735277, 'amu*angstrom^2'), symmetry=1, barrier=(16.9055, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.0632434,
'amu*angstrom^2'), symmetry=1, barrier=(29.514, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.737545, 'amu*angstrom^2'
), symmetry=1, barrier=(16.9576, 'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.732781, 'amu*angstrom^2'), symmetry=1, barrier
=(16.8481, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(
0.739219, 'amu*angstrom^2'), symmetry=1, barrier=(16.9961, 'kJ/mol'),
semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,
molecularWeight=(108.181, 'amu'), energyTransferModel=
SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),
thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.384005, 0.0840749, -
5.09991e-05, 5.50851e-09, 4.14197e-12, 30198.9, 28.4131], Tmin=(100,
'K'), Tmax=(1039.09, 'K')), NASAPolynomial(coeffs=[18.1326, 0.0354522,
-1.35159e-05, 2.44392e-09, -1.69358e-13, 25127.7, -67.5143], Tmin=(
1039.09, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(249.687, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Allyl_P)'
))
species(label='CH3CHCCH2(18175)', structure=SMILES('C=C=CC'), E0=(145.615,
'kJ/mol'), modes=[HarmonicOscillator(frequencies=([2950, 3100, 1380,
975, 1025, 1650, 540, 610, 2055, 2750, 2800, 2850, 1350, 1500, 750,
1050, 1375, 1000, 3010, 987.5, 1337.5, 450, 1655], 'cm^-1')),
HinderedRotor(inertia=(0.759584, 'amu*angstrom^2'), symmetry=1, barrier
=(17.4643, 'kJ/mol'), semiclassical=False)], spinMultiplicity=1,
opticalIsomers=1, molecularWeight=(54.0904, 'amu'), collisionModel=
TransportData(shapeIndex=2, epsilon=(2996.71, 'J/mol'), sigma=(5.18551,
'angstroms'), dipoleMoment=(0, 'C*m'), polarizability=(0, 'angstroms^3'
), rotrelaxcollnum=0, comment=
'Epsilon & sigma estimated with Tc=468.08 K, Pc=48.77 bar (from Joback method)'
), energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'),
T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=
[2.74635, 0.0218189, 8.22353e-06, -2.14768e-08, 8.55624e-12, 17563.6,
12.7381], Tmin=(100, 'K'), Tmax=(1025.6, 'K')), NASAPolynomial(coeffs=[
6.82078, 0.0192338, -7.45622e-06, 1.36536e-09, -9.53195e-14, 16028, -
10.4333], Tmin=(1025.6, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax
=(5000, 'K'), E0=(145.615, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf
=(228.648, 'J/(mol*K)'), label='CH3CHCCH2', comment=
'Thermo library: DFT_QCI_thermo'))
species(label='[CH2]C1([CH]C)CC1=CC(25275)', structure=SMILES(
'[CH2]C1([CH]C)CC1=CC'), E0=(462.221, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel
=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85
), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.263258, 0.0692237,
-2.26363e-05, -1.35463e-08, 8.13734e-12, 55737.7, 31.4039], Tmin=(100,
'K'), Tmax=(1105.46, 'K')), NASAPolynomial(coeffs=[15.171, 0.0400578, -
1.66801e-05, 3.13624e-09, -2.2049e-13, 50927.8, -48.8594], Tmin=(
1105.46, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(462.221, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)'
))
species(label='C=[C][CH]C(18176)', structure=SMILES('[CH2][C]=CC'), E0=(
361.056, 'kJ/mol'), modes=[HarmonicOscillator(frequencies=([1685, 370,
2750, 2800, 2850, 1350, 1500, 750, 1050, 1375, 1000, 3000, 3100, 440,
815, 1455, 1000, 3010, 987.5, 1337.5, 450, 1655], 'cm^-1')),
HinderedRotor(inertia=(0.352622, 'amu*angstrom^2'), symmetry=1, barrier
=(8.10748, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(
0.828631, 'amu*angstrom^2'), symmetry=1, barrier=(19.0519, 'kJ/mol'),
semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,
molecularWeight=(54.0904, 'amu'), energyTransferModel=
SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),
thermo=NASA(polynomials=[NASAPolynomial(coeffs=[2.42015, 0.030446, -
1.69076e-05, 4.64684e-09, -5.12013e-13, 43485.7, 14.8304], Tmin=(100,
'K'), Tmax=(2065.83, 'K')), NASAPolynomial(coeffs=[10.7464, 0.014324, -
5.20136e-06, 8.69079e-10, -5.48385e-14, 40045.6, -31.3799], Tmin=(
2065.83, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(361.056, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(274.378,
'J/(mol*K)'), comment=
'Thermo library: DFT_QCI_thermo + radical(Cds_S) + radical(Allyl_P)'))
species(label='[CH2]C(=CC)C(C)=[C]C(25412)', structure=SMILES(
'[CH2]C(=CC)C(C)=[C]C'), E0=(336.03, 'kJ/mol'), modes=[
HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700,
1750, 1685, 370, 2750, 2762.5, 2775, 2787.5, 2800, 2812.5, 2825, 2837.5,
2850, 1350, 1380, 1410, 1440, 1470, 1500, 700, 750, 800, 1000, 1050,
1100, 1350, 1375, 1400, 900, 1000, 1100, 3000, 3100, 440, 815, 1455,
1000, 3010, 987.5, 1337.5, 450, 1655, 222.04], 'cm^-1')), HinderedRotor
(inertia=(0.395973, 'amu*angstrom^2'), symmetry=1, barrier=(13.8694,
'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.396086,
'amu*angstrom^2'), symmetry=1, barrier=(13.8683, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.395737, 'amu*angstrom^2'
), symmetry=1, barrier=(13.8691, 'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395039, 'amu*angstrom^2'), symmetry=1, barrier
=(13.8689, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(
0.395901, 'amu*angstrom^2'), symmetry=1, barrier=(13.8689, 'kJ/mol'),
semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,
molecularWeight=(108.181, 'amu'), energyTransferModel=
SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),
thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365, 0.0876489, -
7.20737e-05, 3.21805e-08, -5.96317e-12, 40565.5, 28.3373], Tmin=(100,
'K'), Tmax=(1264.63, 'K')), NASAPolynomial(coeffs=[14.5979, 0.041109, -
1.68732e-05, 3.08148e-09, -2.10818e-13, 36843.8, -46.1055], Tmin=(
1264.63, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(336.03, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)'
))
species(label='[CH2]C(=[C]C)C(C)=CC(25413)', structure=SMILES(
'[CH2]C(=[C]C)C(C)=CC'), E0=(336.03, 'kJ/mol'), modes=[
HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700,
1750, 1685, 370, 2750, 2762.5, 2775, 2787.5, 2800, 2812.5, 2825, 2837.5,
2850, 1350, 1380, 1410, 1440, 1470, 1500, 700, 750, 800, 1000, 1050,
1100, 1350, 1375, 1400, 900, 1000, 1100, 3000, 3100, 440, 815, 1455,
1000, 3010, 987.5, 1337.5, 450, 1655, 222.04], 'cm^-1')), HinderedRotor
(inertia=(0.395973, 'amu*angstrom^2'), symmetry=1, barrier=(13.8694,
'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.396086,
'amu*angstrom^2'), symmetry=1, barrier=(13.8683, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.395737, 'amu*angstrom^2'
), symmetry=1, barrier=(13.8691, 'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395039, 'amu*angstrom^2'), symmetry=1, barrier
=(13.8689, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(
0.395901, 'amu*angstrom^2'), symmetry=1, barrier=(13.8689, 'kJ/mol'),
semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,
molecularWeight=(108.181, 'amu'), energyTransferModel=
SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),
thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365, 0.0876489, -
7.20737e-05, 3.21805e-08, -5.96317e-12, 40565.5, 28.3373], Tmin=(100,
'K'), Tmax=(1264.63, 'K')), NASAPolynomial(coeffs=[14.5979, 0.041109, -
1.68732e-05, 3.08148e-09, -2.10818e-13, 36843.8, -46.1055], Tmin=(
1264.63, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(336.03, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Cds_S)'
))
species(label='[CH2]C(=CC)[C](C)C=C(24605)', structure=SMILES(
'[CH2]C=C(C)C([CH2])=CC'), E0=(216.244, 'kJ/mol'), modes=[
HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700,
1750, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450, 1500, 700,
800, 1000, 1100, 1350, 1400, 900, 1100, 3000, 3033.33, 3066.67, 3100,
415, 465, 780, 850, 1435, 1475, 900, 1100, 2995, 3025, 975, 1000, 1300,
1375, 400, 500, 1630, 1680, 180], 'cm^-1')), HinderedRotor(inertia=(
0.712083, 'amu*angstrom^2'), symmetry=1, barrier=(16.3722, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.555659, 'amu*angstrom^2'
), symmetry=1, barrier=(96.3851, 'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0202512, 'amu*angstrom^2'), symmetry=1,
barrier=(16.3711, 'kJ/mol'), semiclassical=False), HinderedRotor(
inertia=(0.712008, 'amu*angstrom^2'), symmetry=1, barrier=(16.3705,
'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(4.19211,
'amu*angstrom^2'), symmetry=1, barrier=(96.3849, 'kJ/mol'),
semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,
molecularWeight=(108.181, 'amu'), energyTransferModel=
SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),
thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175, 0.0775021,
-3.58132e-05, -7.55711e-09, 8.27771e-12, 26166.1, 29.3215], Tmin=(100,
'K'), Tmax=(1017.17, 'K')), NASAPolynomial(coeffs=[16.4341, 0.0376674,
-1.41425e-05, 2.53759e-09, -1.75328e-13, 21504.4, -57.0638], Tmin=(
1017.17, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(216.244, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)'
))
species(label='[CH2][C](C=C)C(C)=CC(24606)', structure=SMILES(
'[CH2]C=C([CH2])C(C)=CC'), E0=(216.244, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel
=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85
), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175,
0.0775021, -3.58132e-05, -7.55711e-09, 8.27771e-12, 26166.1, 29.3215],
Tmin=(100, 'K'), Tmax=(1017.17, 'K')), NASAPolynomial(coeffs=[16.4341,
0.0376674, -1.41425e-05, 2.53759e-09, -1.75328e-13, 21504.4, -57.0638],
Tmin=(1017.17, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000,
'K'), E0=(216.244, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(
461.453, 'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)'
))
species(label='[CH2]C(=CC)[C]1CC1C(25414)', structure=SMILES(
'[CH2]C(=CC)[C]1CC1C'), E0=(289.9, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel
=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85
), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.71289, 0.0520158,
3.84829e-05, -8.55933e-08, 3.61457e-11, 35003.5, 26.4903], Tmin=(100,
'K'), Tmax=(968.714, 'K')), NASAPolynomial(coeffs=[16.7686, 0.0352996,
-1.24057e-05, 2.26286e-09, -1.62921e-13, 29566.5, -62.466], Tmin=(
968.714, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(289.9, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Cyclopropane) + radical(Allyl_T) + radical(Allyl_P)'
))
species(label='[CH2][C]1C(=CC)CC1C(25415)', structure=SMILES(
'[CH2]C1=C([CH]C)CC1C'), E0=(304.572, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel
=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85
), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.583091, 0.0531885,
4.0938e-05, -9.08388e-08, 3.83549e-11, 36774.2, 26.4705], Tmin=(100,
'K'), Tmax=(972.301, 'K')), NASAPolynomial(coeffs=[18.2947, 0.0339462,
-1.21014e-05, 2.24934e-09, -1.64353e-13, 30795.4, -71.5147], Tmin=(
972.301, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(304.572, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_S)'
))
species(label='CH2(S)(23)', structure=SMILES('[CH2]'), E0=(419.862,
'kJ/mol'), modes=[HarmonicOscillator(frequencies=([1369.36, 2789.41,
2993.36], 'cm^-1'))], spinMultiplicity=1, opticalIsomers=1,
molecularWeight=(14.0266, 'amu'), collisionModel=TransportData(
shapeIndex=2, epsilon=(1197.29, 'J/mol'), sigma=(3.8, 'angstroms'),
dipoleMoment=(0, 'C*m'), polarizability=(0, 'angstroms^3'),
rotrelaxcollnum=0.0, comment='GRI-Mech'), energyTransferModel=
SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),
thermo=NASA(polynomials=[NASAPolynomial(coeffs=[4.19195, -0.00230793,
8.0509e-06, -6.60123e-09, 1.95638e-12, 50484.3, -0.754589], Tmin=(200,
'K'), Tmax=(1000, 'K')), NASAPolynomial(coeffs=[2.28556, 0.00460255, -
1.97412e-06, 4.09548e-10, -3.34695e-14, 50922.4, 8.67684], Tmin=(1000,
'K'), Tmax=(3000, 'K'))], Tmin=(200, 'K'), Tmax=(3000, 'K'), E0=(
419.862, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(58.2013,
'J/(mol*K)'), label='CH2(S)', comment=
'Thermo library: Klippenstein_Glarborg2016'))
species(label='[CH2]C(=C)C([CH2])=CC(25416)', structure=SMILES(
'[CH2]C(=C)C([CH2])=CC'), E0=(285.713, 'kJ/mol'), modes=[
HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700,
1750, 2950, 3100, 1380, 975, 1025, 1650, 2750, 2800, 2850, 1350, 1500,
750, 1050, 1375, 1000, 3000, 3033.33, 3066.67, 3100, 415, 465, 780, 850,
1435, 1475, 900, 1100, 3010, 987.5, 1337.5, 450, 1655, 311.383],
'cm^-1')), HinderedRotor(inertia=(0.327475, 'amu*angstrom^2'), symmetry
=1, barrier=(22.5291, 'kJ/mol'), semiclassical=False), HinderedRotor(
inertia=(0.327466, 'amu*angstrom^2'), symmetry=1, barrier=(22.5294,
'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.327318,
'amu*angstrom^2'), symmetry=1, barrier=(22.5272, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.327483, 'amu*angstrom^2'
), symmetry=1, barrier=(22.5297, 'kJ/mol'), semiclassical=False)],
spinMultiplicity=3, opticalIsomers=1, molecularWeight=(94.1543, 'amu'),
energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0
=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[
0.335271, 0.0676667, -2.76626e-05, -1.62749e-08, 1.21982e-11, 34506.8,
24.024], Tmin=(100, 'K'), Tmax=(980.594, 'K')), NASAPolynomial(coeffs=[
17.5531, 0.0266059, -9.47854e-06, 1.70194e-09, -1.19937e-13, 29727.4, -
65.8563], Tmin=(980.594, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),
Tmax=(5000, 'K'), E0=(285.713, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'),
CpInf=(390.78, 'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Allyl_P)'
))
species(label='C=C([CH]C)C[C]=CC(24184)', structure=SMILES(
'[CH2]C(=CC)C[C]=CC'), E0=(366.985, 'kJ/mol'), modes=[
HarmonicOscillator(frequencies=([2995, 3025, 975, 1000, 1300, 1375, 400,
500, 1630, 1680, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450,
1500, 700, 800, 1000, 1100, 1350, 1400, 900, 1100, 1685, 370, 350, 440,
435, 1725, 2750, 2850, 1437.5, 1250, 1305, 750, 350, 3000, 3100, 440,
815, 1455, 1000, 180, 579.702], 'cm^-1')), HinderedRotor(inertia=(
0.147406, 'amu*angstrom^2'), symmetry=1, barrier=(3.38916, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.64226, 'amu*angstrom^2'),
symmetry=1, barrier=(14.7668, 'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.64164, 'amu*angstrom^2'), symmetry=1, barrier=
(14.7526, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(
0.643937, 'amu*angstrom^2'), symmetry=1, barrier=(14.8054, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.145327, 'amu*angstrom^2'
), symmetry=1, barrier=(3.34136, 'kJ/mol'), semiclassical=False)],
spinMultiplicity=3, opticalIsomers=1, molecularWeight=(108.181, 'amu'),
collisionModel=TransportData(shapeIndex=2, epsilon=(3683.66, 'J/mol'),
sigma=(6.4482, 'angstroms'), dipoleMoment=(0, 'C*m'), polarizability=(0,
'angstroms^3'), rotrelaxcollnum=0, comment=
'Epsilon & sigma estimated with Tc=575.38 K, Pc=31.18 bar (from Joback method)'
), energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'),
T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=
[0.29648, 0.0786067, -5.42868e-05, 1.96375e-08, -2.97459e-12, 44273.2,
31.2372], Tmin=(100, 'K'), Tmax=(1490.43, 'K')), NASAPolynomial(coeffs=
[13.9025, 0.0420909, -1.75363e-05, 3.199e-09, -2.17227e-13, 40217.5, -
39.8334], Tmin=(1490.43, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),
Tmax=(5000, 'K'), E0=(366.985, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'),
CpInf=(461.453, 'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)'
))
species(label='CC=C1CCC1=CC(25269)', structure=SMILES('CC=C1CCC1=CC'), E0=(
114.107, 'kJ/mol'), spinMultiplicity=1, opticalIsomers=1,
molecularWeight=(108.181, 'amu'), energyTransferModel=
SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),
thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.677799, 0.0585738,
5.80411e-06, -4.1598e-08, 1.78951e-11, 13856, 25.5085], Tmin=(100, 'K'),
Tmax=(1034.79, 'K')), NASAPolynomial(coeffs=[13.4814, 0.0415234, -
1.65073e-05, 3.07348e-09, -2.16896e-13, 9469.28, -45.0922], Tmin=(
1034.79, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(114.107, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(473.925,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(12methylenecyclobutane)'
))
species(label='CH2(19)', structure=SMILES('[CH2]'), E0=(381.563, 'kJ/mol'),
modes=[HarmonicOscillator(frequencies=([1032.72, 2936.3, 3459], 'cm^-1'
))], spinMultiplicity=3, opticalIsomers=1, molecularWeight=(14.0266,
'amu'), collisionModel=TransportData(shapeIndex=2, epsilon=(1197.29,
'J/mol'), sigma=(3.8, 'angstroms'), dipoleMoment=(0, 'C*m'),
polarizability=(0, 'angstroms^3'), rotrelaxcollnum=0.0, comment=
'GRI-Mech'), energyTransferModel=SingleExponentialDown(alpha0=(3.5886,
'kJ/mol'), T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[
NASAPolynomial(coeffs=[3.8328, 0.000224446, 4.68033e-06, -6.04743e-09,
2.59009e-12, 45920.8, 1.40666], Tmin=(200, 'K'), Tmax=(1000, 'K')),
NASAPolynomial(coeffs=[3.16229, 0.00281798, -7.56235e-07, 5.05446e-11,
5.65236e-15, 46099.1, 4.77656], Tmin=(1000, 'K'), Tmax=(3000, 'K'))],
Tmin=(200, 'K'), Tmax=(3000, 'K'), E0=(381.563, 'kJ/mol'), Cp0=(33.2579,
'J/(mol*K)'), CpInf=(58.2013, 'J/(mol*K)'), label='CH2', comment=
'Thermo library: Klippenstein_Glarborg2016'))
species(label='[CH2]C([C]=CC)=CC(25417)', structure=SMILES(
'[CH2]C([C]=CC)=CC'), E0=(334.774, 'kJ/mol'), modes=[HarmonicOscillator
(frequencies=([350, 440, 435, 1725, 1685, 370, 2750, 2770, 2790, 2810,
2830, 2850, 1350, 1400, 1450, 1500, 700, 800, 1000, 1100, 1350, 1400,
900, 1100, 3000, 3100, 440, 815, 1455, 1000, 2995, 3025, 975, 1000,
1300, 1375, 400, 500, 1630, 1680, 180], 'cm^-1')), HinderedRotor(
inertia=(0.7606, 'amu*angstrom^2'), symmetry=1, barrier=(17.4877,
'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.760854,
'amu*angstrom^2'), symmetry=1, barrier=(17.4935, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.760586, 'amu*angstrom^2'
), symmetry=1, barrier=(17.4874, 'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.15146, 'amu*angstrom^2'), symmetry=1, barrier=
(49.4663, 'kJ/mol'), semiclassical=False)], spinMultiplicity=3,
opticalIsomers=1, molecularWeight=(94.1543, 'amu'), energyTransferModel
=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85
), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.352604, 0.0734369,
-5.91187e-05, 2.57941e-08, -4.60694e-12, 40400.9, 25.1788], Tmin=(100,
'K'), Tmax=(1327.42, 'K')), NASAPolynomial(coeffs=[14.2321, 0.0316126,
-1.18565e-05, 2.05761e-09, -1.36512e-13, 36716.1, -45.7131], Tmin=(
1327.42, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(334.774, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(390.78,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + radical(C=CJC=C) + radical(Allyl_P)'
))
species(label='[CH2]C1([CH]C)C(=C)C1C(25296)', structure=SMILES(
'[CH2]C1([CH]C)C(=C)C1C'), E0=(466.494, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel
=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85
), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.29276, 0.0655305,
-4.50464e-06, -3.74661e-08, 1.7759e-11, 56253.7, 30.0992], Tmin=(100,
'K'), Tmax=(1027.4, 'K')), NASAPolynomial(coeffs=[16.6435, 0.0372633, -
1.49065e-05, 2.81296e-09, -2.01072e-13, 51026, -58.316], Tmin=(1027.4,
'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), E0=(
466.494, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)'
))
species(label='H(3)', structure=SMILES('[H]'), E0=(211.792, 'kJ/mol'),
spinMultiplicity=1, opticalIsomers=1, molecularWeight=(1.00794, 'amu'),
collisionModel=TransportData(shapeIndex=0, epsilon=(1205.6, 'J/mol'),
sigma=(2.05, 'angstroms'), dipoleMoment=(0, 'C*m'), polarizability=(0,
'angstroms^3'), rotrelaxcollnum=0.0, comment='GRI-Mech'),
energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0
=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[
2.5, 9.24385e-15, -1.3678e-17, 6.66185e-21, -1.00107e-24, 25472.7, -
0.459566], Tmin=(100, 'K'), Tmax=(3459.6, 'K')), NASAPolynomial(coeffs=
[2.5, 9.20456e-12, -3.58608e-15, 6.15199e-19, -3.92042e-23, 25472.7, -
0.459566], Tmin=(3459.6, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),
Tmax=(5000, 'K'), E0=(211.792, 'kJ/mol'), Cp0=(20.7862, 'J/(mol*K)'),
CpInf=(20.7862, 'J/(mol*K)'), label='H', comment=
'Thermo library: BurkeH2O2'))
species(label='[CH2]C(=CC)C(=C)C=C(24604)', structure=SMILES(
'[CH2]C(=CC)C(=C)C=C'), E0=(242.677, 'kJ/mol'), modes=[
HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700,
1750, 2950, 3000, 3050, 3100, 1330, 1430, 900, 1050, 1000, 1050, 1600,
1700, 2750, 2800, 2850, 1350, 1500, 750, 1050, 1375, 1000, 3000, 3100,
440, 815, 1455, 1000, 2995, 3025, 975, 1000, 1300, 1375, 400, 500, 1630,
1680, 181.962, 683.313], 'cm^-1')), HinderedRotor(inertia=(0.669842,
'amu*angstrom^2'), symmetry=1, barrier=(19.1337, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.0582339,
'amu*angstrom^2'), symmetry=1, barrier=(19.1767, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.83204, 'amu*angstrom^2'),
symmetry=1, barrier=(19.1302, 'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(4.52237, 'amu*angstrom^2'), symmetry=1, barrier=
(104.569, 'kJ/mol'), semiclassical=False)], spinMultiplicity=2,
opticalIsomers=1, molecularWeight=(107.173, 'amu'), energyTransferModel
=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85
), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.293043, 0.0682771,
-2.00337e-05, -2.05401e-08, 1.21516e-11, 29332.3, 27.0261], Tmin=(100,
'K'), Tmax=(1018.57, 'K')), NASAPolynomial(coeffs=[15.7386, 0.0358123,
-1.37404e-05, 2.51366e-09, -1.76142e-13, 24723.4, -54.9529], Tmin=(
1018.57, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(242.677, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(440.667,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Allyl_P)'
))
species(label='[CH2]CC(=C)C([CH2])=CC(25418)', structure=SMILES(
'[CH2]CC(=C)C([CH2])=CC'), E0=(316.814, 'kJ/mol'), modes=[
HarmonicOscillator(frequencies=([3010, 987.5, 1337.5, 450, 1655, 2750,
2800, 2850, 1350, 1500, 750, 1050, 1375, 1000, 2950, 3100, 1380, 975,
1025, 1650, 325, 375, 415, 465, 420, 450, 1700, 1750, 2750, 2850,
1437.5, 1250, 1305, 750, 350, 3000, 3033.33, 3066.67, 3100, 415, 465,
780, 850, 1435, 1475, 900, 1100, 180, 180], 'cm^-1')), HinderedRotor(
inertia=(0.0368535, 'amu*angstrom^2'), symmetry=1, barrier=(17.9864,
'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.00736317,
'amu*angstrom^2'), symmetry=1, barrier=(3.60618, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.781153, 'amu*angstrom^2'
), symmetry=1, barrier=(17.9602, 'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.779478, 'amu*angstrom^2'), symmetry=1, barrier
=(17.9217, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(
0.781104, 'amu*angstrom^2'), symmetry=1, barrier=(17.9591, 'kJ/mol'),
semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,
molecularWeight=(108.181, 'amu'), energyTransferModel=
SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),
thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.348925, 0.0836004, -
5.1879e-05, 7.14877e-09, 3.44908e-12, 38270.9, 31.5928], Tmin=(100, 'K'
), Tmax=(1044.14, 'K')), NASAPolynomial(coeffs=[17.9255, 0.0352115, -
1.34219e-05, 2.42456e-09, -1.67785e-13, 33276.3, -63.0036], Tmin=(
1044.14, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(316.814, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(RCCJ) + radical(Allyl_P)'
))
species(label='[CH]=C(CC)C([CH2])=CC(25419)', structure=SMILES(
'[CH]=C(CC)C([CH2])=CC'), E0=(358.664, 'kJ/mol'), modes=[
HarmonicOscillator(frequencies=([3120, 650, 792.5, 1650, 3010, 987.5,
1337.5, 450, 1655, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450,
1500, 700, 800, 1000, 1100, 1350, 1400, 900, 1100, 325, 375, 415, 465,
420, 450, 1700, 1750, 2750, 2850, 1437.5, 1250, 1305, 750, 350, 3000,
3100, 440, 815, 1455, 1000, 180], 'cm^-1')), HinderedRotor(inertia=(
0.701639, 'amu*angstrom^2'), symmetry=1, barrier=(16.1321, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.344302, 'amu*angstrom^2'
), symmetry=1, barrier=(16.1602, 'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0492932, 'amu*angstrom^2'), symmetry=1,
barrier=(16.1378, 'kJ/mol'), semiclassical=False), HinderedRotor(
inertia=(0.702005, 'amu*angstrom^2'), symmetry=1, barrier=(16.1405,
'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.702379,
'amu*angstrom^2'), symmetry=1, barrier=(16.1491, 'kJ/mol'),
semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,
molecularWeight=(108.181, 'amu'), energyTransferModel=
SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),
thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.468616, 0.0864938, -
5.84569e-05, 1.27697e-08, 1.75707e-12, 43308.4, 30.6389], Tmin=(100,
'K'), Tmax=(1047.28, 'K')), NASAPolynomial(coeffs=[18.4195, 0.034593, -
1.31104e-05, 2.35762e-09, -1.62637e-13, 38242.2, -66.6572], Tmin=(
1047.28, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(358.664, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_P)'
))
species(label='[CH2]C(=[C]C)C(=C)CC(25420)', structure=SMILES(
'[CH2]C(=[C]C)C(=C)CC'), E0=(349.41, 'kJ/mol'), modes=[
HarmonicOscillator(frequencies=([1685, 370, 2750, 2770, 2790, 2810,
2830, 2850, 1350, 1400, 1450, 1500, 700, 800, 1000, 1100, 1350, 1400,
900, 1100, 2950, 3100, 1380, 975, 1025, 1650, 325, 375, 415, 465, 420,
450, 1700, 1750, 2750, 2850, 1437.5, 1250, 1305, 750, 350, 3000, 3100,
440, 815, 1455, 1000, 180, 180], 'cm^-1')), HinderedRotor(inertia=(
0.159905, 'amu*angstrom^2'), symmetry=1, barrier=(15.9368, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.693159, 'amu*angstrom^2'
), symmetry=1, barrier=(15.9371, 'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.693127, 'amu*angstrom^2'), symmetry=1, barrier
=(15.9364, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(
0.693165, 'amu*angstrom^2'), symmetry=1, barrier=(15.9372, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.0150632,
'amu*angstrom^2'), symmetry=1, barrier=(15.9371, 'kJ/mol'),
semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,
molecularWeight=(108.181, 'amu'), energyTransferModel=
SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),
thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.583231, 0.089245, -
7.16619e-05, 3.00631e-08, -5.07891e-12, 42198.9, 31.1306], Tmin=(100,
'K'), Tmax=(1412.15, 'K')), NASAPolynomial(coeffs=[19.0319, 0.0336833,
-1.2643e-05, 2.20036e-09, -1.46165e-13, 36659.1, -70.2702], Tmin=(
1412.15, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(349.41, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)'
))
species(label='[CH]=C([CH]C)C(C)=CC(25421)', structure=SMILES(
'[CH]C(=CC)C(C)=CC'), E0=(317.373, 'kJ/mol'), modes=[HarmonicOscillator
(frequencies=([325, 375, 415, 465, 420, 450, 1700, 1750, 2750, 2762.5,
2775, 2787.5, 2800, 2812.5, 2825, 2837.5, 2850, 1350, 1380, 1410, 1440,
1470, 1500, 700, 750, 800, 1000, 1050, 1100, 1350, 1375, 1400, 900,
1000, 1100, 2995, 3025, 975, 1000, 1300, 1375, 400, 500, 1630, 1680,
200, 800, 1200, 1600], 'cm^-1')), HinderedRotor(inertia=(0.156089,
'amu*angstrom^2'), symmetry=1, barrier=(3.5888, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'
), symmetry=1, barrier=(3.5888, 'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'), symmetry=1, barrier
=(3.5888, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(
0.156089, 'amu*angstrom^2'), symmetry=1, barrier=(3.5888, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'
), symmetry=1, barrier=(3.5888, 'kJ/mol'), semiclassical=False)],
spinMultiplicity=3, opticalIsomers=1, molecularWeight=(108.181, 'amu'),
energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0
=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-
0.247945, 0.0873521, -6.16843e-05, 2.31486e-08, -3.62747e-12, 38328.8,
29.1665], Tmin=(100, 'K'), Tmax=(1460.93, 'K')), NASAPolynomial(coeffs=
[15.297, 0.0447902, -1.7984e-05, 3.20673e-09, -2.14924e-13, 33786.8, -
51.7212], Tmin=(1460.93, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),
Tmax=(5000, 'K'), E0=(317.373, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'),
CpInf=(461.453, 'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(AllylJ2_triplet)'
))
species(label='[CH2][C](C=C)C(=C)CC(24623)', structure=SMILES(
'[CH2]C(C=C)=C([CH2])CC'), E0=(228.159, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel
=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85
), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.0497728,
0.0733281, -1.6094e-05, -3.35123e-08, 1.88363e-11, 27601.1, 30.4448],
Tmin=(100, 'K'), Tmax=(975.095, 'K')), NASAPolynomial(coeffs=[18.3695,
0.0342638, -1.21408e-05, 2.16747e-09, -1.52112e-13, 22274, -66.8493],
Tmin=(975.095, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000,
'K'), E0=(228.159, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(
461.453, 'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CC=CCJ) + radical(Allyl_P)'
))
species(label='C[CH][C]1CCC1=CC(25422)', structure=SMILES(
'C[CH]C1CCC=1[CH]C'), E0=(303.292, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel
=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85
), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.788866, 0.0500701,
4.22235e-05, -8.64809e-08, 3.53174e-11, 36611.5, 25.2586], Tmin=(100,
'K'), Tmax=(987.239, 'K')), NASAPolynomial(coeffs=[16.2187, 0.0373502,
-1.4111e-05, 2.65357e-09, -1.92503e-13, 31138.2, -61.2734], Tmin=(
987.239, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(303.292, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_S) + radical(Allyl_S)'
))
species(label='[CH2][C]1C(=C)C(C)C1C(25423)', structure=SMILES(
'[CH2]C1=C([CH2])C(C)C1C'), E0=(305.852, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel
=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85
), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.377097, 0.0563026,
3.9705e-05, -9.53284e-08, 4.14811e-11, 36937, 26.2973], Tmin=(100, 'K'),
Tmax=(959.735, 'K')), NASAPolynomial(coeffs=[20.4056, 0.0304853, -
1.006e-05, 1.83774e-09, -1.35603e-13, 30437.2, -83.3398], Tmin=(959.735,
'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), E0=(
305.852, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_P)'
))
species(label='C=CC(=C)C(C)=CC(24616)', structure=SMILES('C=CC(=C)C(C)=CC'),
E0=(91.1774, 'kJ/mol'), spinMultiplicity=1, opticalIsomers=1,
molecularWeight=(108.181, 'amu'), energyTransferModel=
SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),
thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.236638, 0.0713806, -
3.04205e-05, -5.26762e-09, 5.54498e-12, 11111.2, 26.9518], Tmin=(100,
'K'), Tmax=(1093.32, 'K')), NASAPolynomial(coeffs=[14.1536, 0.040705, -
1.6104e-05, 2.93544e-09, -2.02595e-13, 6858.32, -46.9636], Tmin=(
1093.32, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(91.1774, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH)'
))
species(label='C=[C]C(C)C(=C)[CH]C(24183)', structure=SMILES(
'[CH2]C(=CC)C(C)[C]=C'), E0=(369.44, 'kJ/mol'), modes=[
HarmonicOscillator(frequencies=([1685, 370, 3010, 987.5, 1337.5, 450,
1655, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450, 1500, 700,
800, 1000, 1100, 1350, 1400, 900, 1100, 2950, 3100, 1380, 975, 1025,
1650, 1380, 1390, 370, 380, 2900, 435, 350, 440, 435, 1725, 3000, 3100,
440, 815, 1455, 1000, 345.333, 347.343], 'cm^-1')), HinderedRotor(
inertia=(0.119405, 'amu*angstrom^2'), symmetry=1, barrier=(9.93037,
'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.281457,
'amu*angstrom^2'), symmetry=1, barrier=(24.022, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.116909, 'amu*angstrom^2'
), symmetry=1, barrier=(9.94809, 'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.117447, 'amu*angstrom^2'), symmetry=1, barrier
=(9.9744, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(
0.116555, 'amu*angstrom^2'), symmetry=1, barrier=(9.93684, 'kJ/mol'),
semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,
molecularWeight=(108.181, 'amu'), collisionModel=TransportData(
shapeIndex=2, epsilon=(3625.33, 'J/mol'), sigma=(6.4092, 'angstroms'),
dipoleMoment=(0, 'C*m'), polarizability=(0, 'angstroms^3'),
rotrelaxcollnum=0, comment=
'Epsilon & sigma estimated with Tc=566.27 K, Pc=31.24 bar (from Joback method)'
), energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'),
T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=
[0.299693, 0.0839308, -6.74533e-05, 3.06742e-08, -6.02582e-12, 44564.4,
29.0122], Tmin=(100, 'K'), Tmax=(1163.73, 'K')), NASAPolynomial(coeffs=
[10.857, 0.0476425, -2.06788e-05, 3.8782e-09, -2.69295e-13, 42107.3, -
23.5217], Tmin=(1163.73, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),
Tmax=(5000, 'K'), E0=(369.44, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'),
CpInf=(461.453, 'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)'
))
species(label='C=C1C(=CC)CC1C(25265)', structure=SMILES('C=C1C(=CC)CC1C'),
E0=(118.381, 'kJ/mol'), spinMultiplicity=1, opticalIsomers=1,
molecularWeight=(108.181, 'amu'), energyTransferModel=
SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),
thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.689924, 0.0550304,
2.3689e-05, -6.56265e-08, 2.77602e-11, 14372.8, 24.9628], Tmin=(100,
'K'), Tmax=(993.204, 'K')), NASAPolynomial(coeffs=[15.3775, 0.0380508,
-1.43595e-05, 2.66472e-09, -1.90565e-13, 9375.16, -56.2678], Tmin=(
993.204, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(118.381, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(473.925,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)'
))
species(label='CHCH3(T)(95)', structure=SMILES('[CH]C'), E0=(343.893,
'kJ/mol'), modes=[HarmonicOscillator(frequencies=([2750, 2800, 2850,
1350, 1500, 750, 1050, 1375, 1000, 592.414, 4000], 'cm^-1')),
HinderedRotor(inertia=(0.00438699, 'amu*angstrom^2'), symmetry=1,
barrier=(26.7685, 'kJ/mol'), semiclassical=False)], spinMultiplicity=3,
opticalIsomers=1, molecularWeight=(28.0532, 'amu'), energyTransferModel
=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85
), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[3.82363, -
0.000909515, 3.2138e-05, -3.7348e-08, 1.3309e-11, 41371.4, 7.10948],
Tmin=(100, 'K'), Tmax=(960.812, 'K')), NASAPolynomial(coeffs=[4.30487,
0.00943069, -3.27559e-06, 5.95121e-10, -4.27307e-14, 40709.1, 1.84202],
Tmin=(960.812, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000,
'K'), E0=(343.893, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(
128.874, 'J/(mol*K)'), label='CHCH3(T)', comment=
'Thermo library: DFT_QCI_thermo'))
species(label='[CH2]C([C]=C)=CC(24774)', structure=SMILES(
'[CH2]C([C]=C)=CC'), E0=(370.8, 'kJ/mol'), modes=[HarmonicOscillator(
frequencies=([1685, 370, 2750, 2800, 2850, 1350, 1500, 750, 1050, 1375,
1000, 3010, 987.5, 1337.5, 450, 1655, 2950, 3100, 1380, 975, 1025, 1650,
350, 440, 435, 1725, 3000, 3100, 440, 815, 1455, 1000, 180], 'cm^-1')),
HinderedRotor(inertia=(1.17315, 'amu*angstrom^2'), symmetry=1, barrier=
(26.9731, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(
1.17496, 'amu*angstrom^2'), symmetry=1, barrier=(27.0146, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(1.1727, 'amu*angstrom^2'),
symmetry=1, barrier=(26.9626, 'kJ/mol'), semiclassical=False)],
spinMultiplicity=3, opticalIsomers=1, molecularWeight=(80.1277, 'amu'),
energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0
=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[
1.0818, 0.0569416, -3.56598e-05, 4.1841e-09, 3.20998e-12, 44708.4,
20.7527], Tmin=(100, 'K'), Tmax=(982.69, 'K')), NASAPolynomial(coeffs=[
12.9204, 0.0239405, -8.46845e-06, 1.46434e-09, -9.91425e-14, 41648.3, -
39.886], Tmin=(982.69, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=
(5000, 'K'), E0=(370.8, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(
320.107, 'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CJC=C) + radical(Allyl_P)'
))
species(label='[CH]=C([CH]C)C(=C)CC(25424)', structure=SMILES(
'[CH]C(=CC)C(=C)CC'), E0=(330.753, 'kJ/mol'), modes=[HarmonicOscillator
(frequencies=([2750, 2850, 1437.5, 1250, 1305, 750, 350, 2950, 3100,
1380, 975, 1025, 1650, 3010, 987.5, 1337.5, 450, 1655, 2750, 2770, 2790,
2810, 2830, 2850, 1350, 1400, 1450, 1500, 700, 800, 1000, 1100, 1350,
1400, 900, 1100, 325, 375, 415, 465, 420, 450, 1700, 1750, 200, 800,
1066.67, 1333.33, 1600], 'cm^-1')), HinderedRotor(inertia=(0.156089,
'amu*angstrom^2'), symmetry=1, barrier=(3.5888, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'
), symmetry=1, barrier=(3.5888, 'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'), symmetry=1, barrier
=(3.5888, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(
0.156089, 'amu*angstrom^2'), symmetry=1, barrier=(3.5888, 'kJ/mol'),
semiclassical=False), HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'
), symmetry=1, barrier=(3.5888, 'kJ/mol'), semiclassical=False)],
spinMultiplicity=3, opticalIsomers=1, molecularWeight=(108.181, 'amu'),
energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0
=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-
0.442166, 0.0858934, -5.1432e-05, 9.5936e-09, 1.54315e-12, 39950.3,
30.9724], Tmin=(100, 'K'), Tmax=(1106.5, 'K')), NASAPolynomial(coeffs=[
16.3579, 0.0427111, -1.66841e-05, 2.99222e-09, -2.04007e-13, 35158.1, -
56.633], Tmin=(1106.5, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=
(5000, 'K'), E0=(330.753, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=
(461.453, 'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(AllylJ2_triplet)'
))
species(label='C=CC(=C)C(=C)CC(24630)', structure=SMILES('C=CC(=C)C(=C)CC'),
E0=(104.558, 'kJ/mol'), spinMultiplicity=1, opticalIsomers=1,
molecularWeight=(108.181, 'amu'), energyTransferModel=
SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),
thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.296747, 0.0670054, -
1.0269e-05, -3.13536e-08, 1.59568e-11, 12721.3, 27.8384], Tmin=(100,
'K'), Tmax=(1010.3, 'K')), NASAPolynomial(coeffs=[15.6889, 0.0379462, -
1.44599e-05, 2.64736e-09, -1.86033e-13, 7984.11, -54.6302], Tmin=(
1010.3, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), E0
=(104.558, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH)'
))
species(label='C=C1C(=C)C(C)C1C(25274)', structure=SMILES(
'C=C1C(=C)C(C)C1C'), E0=(122.654, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel
=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85
), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.691732, 0.0515838,
4.13669e-05, -8.96066e-08, 3.77135e-11, 14890, 23.0693], Tmin=(100, 'K'
), Tmax=(969.873, 'K')), NASAPolynomial(coeffs=[17.4573, 0.0342784, -
1.20439e-05, 2.21718e-09, -1.61071e-13, 9199.74, -69.8715], Tmin=(
969.873, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),
E0=(122.654, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(473.925,
'J/(mol*K)'), comment=
'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)'
))
species(label='N2', structure=SMILES('N#N'), E0=(-8.69489, 'kJ/mol'),
spinMultiplicity=1, opticalIsomers=1, molecularWeight=(28.0135, 'amu'),
collisionModel=TransportData(shapeIndex=1, epsilon=(810.913, 'J/mol'),
sigma=(3.621, 'angstroms'), dipoleMoment=(0, 'C*m'), polarizability=(
1.76, 'angstroms^3'), rotrelaxcollnum=4.0, comment=
'PrimaryTransportLibrary'), energyTransferModel=SingleExponentialDown(
alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85), thermo=NASA(
polynomials=[NASAPolynomial(coeffs=[3.61263, -0.00100893, 2.49898e-06,
-1.43376e-09, 2.58636e-13, -1051.1, 2.6527], Tmin=(100, 'K'), Tmax=(
1817.04, 'K')), NASAPolynomial(coeffs=[2.9759, 0.00164141, -7.19722e-07,
1.25378e-10, -7.91526e-15, -1025.84, 5.53757], Tmin=(1817.04, 'K'),
Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), E0=(-8.69489,
'kJ/mol'), Cp0=(29.1007, 'J/(mol*K)'), CpInf=(37.4151, 'J/(mol*K)'),
label='N2', comment='Thermo library: BurkeH2O2'))
species(label='Ne', structure=SMILES('[Ne]'), E0=(-6.19738, 'kJ/mol'),
spinMultiplicity=1, opticalIsomers=1, molecularWeight=(20.1797, 'amu'),
collisionModel=TransportData(shapeIndex=0, epsilon=(1235.53, 'J/mol'),
sigma=(3.758e-10, 'm'), dipoleMoment=(0, 'C*m'), polarizability=(0,
'angstroms^3'), rotrelaxcollnum=0, comment=
'Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!'
), energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'),
T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=
[2.5, 0, 0, 0, 0, -745.375, 3.35532], Tmin=(200, 'K'), Tmax=(1000, 'K')
), NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, -745.375, 3.35532], Tmin=(
1000, 'K'), Tmax=(6000, 'K'))], Tmin=(200, 'K'), Tmax=(6000, 'K'), E0=(
-6.19738, 'kJ/mol'), Cp0=(20.7862, 'J/(mol*K)'), CpInf=(20.7862,
'J/(mol*K)'), label='Ne', comment='Thermo library: primaryThermoLibrary'))
transitionState(label='TS1', E0=(291.23, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS2', E0=(462.221, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS3', E0=(538.699, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS4', E0=(497.951, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS5', E0=(380.338, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS6', E0=(399.474, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS7', E0=(350.103, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS8', E0=(722.113, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS9', E0=(343.259, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS10', E0=(380.132, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS11', E0=(705.575, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS12', E0=(537.022, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS13', E0=(257.971, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS14', E0=(716.337, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS15', E0=(466.494, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS16', E0=(454.469, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS17', E0=(430.619, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS18', E0=(503.849, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS19', E0=(393.718, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS20', E0=(361.682, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS21', E0=(350.103, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS22', E0=(380.132, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS23', E0=(375.044, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS24', E0=(274.66, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS25', E0=(463.915, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS26', E0=(257.971, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS27', E0=(714.692, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS28', E0=(375.062, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS29', E0=(258.055, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
transitionState(label='TS30', E0=(257.971, 'kJ/mol'), spinMultiplicity=1,
opticalIsomers=1)
reaction(label='reaction1', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],
products=['CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'], transitionState=
'TS1', kinetics=Arrhenius(A=(5000000000000.0, 's^-1'), n=0, Ea=(41.5431,
'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(1500, 'K'), comment=
"""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission
Ea raised from 0.0 to 41.5 kJ/mol to match endothermicity of reaction."""
))
reaction(label='reaction2', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],
products=['[CH2]C1([CH]C)CC1=CC(25275)'], transitionState='TS2',
kinetics=Arrhenius(A=(3360000000.0, 's^-1'), n=0.84, Ea=(212.534,
'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(2500, 'K'), comment=
"""Estimated using template [R4_S_D;doublebond_intra_HNd;radadd_intra_cs2H] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_cs2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Exocyclic
Ea raised from 210.2 to 212.5 kJ/mol to match endothermicity of reaction."""
))
reaction(label='reaction3', reactants=['CH3CHCCH2(18175)',
'C=[C][CH]C(18176)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState='TS3', kinetics=Arrhenius(A=(0.00086947, 'm^3/(mol*s)'),
n=2.67356, Ea=(32.0272, 'kJ/mol'), T0=(1, 'K'), comment=
"""Estimated using an average for rate rule [Ca_Cds-HH;CJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""
))
reaction(label='reaction4', reactants=['[CH2]C(=CC)C(C)=[C]C(25412)'],
products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS4',
kinetics=Arrhenius(A=(7740000000.0, 's^-1'), n=1.08, Ea=(161.921,
'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(1500, 'K'), comment=
"""From training reaction 198 used for R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H
Exact match found for rate rule [R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""
))
reaction(label='reaction5', reactants=['[CH2]C(=[C]C)C(C)=CC(25413)'],
products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS5',
kinetics=Arrhenius(A=(111300, 's^-1'), n=2.23, Ea=(44.3086, 'kJ/mol'),
T0=(1, 'K'), comment=
"""Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_2H]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""
))
reaction(label='reaction6', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],
products=['[CH2]C(=CC)[C](C)C=C(24605)'], transitionState='TS6',
kinetics=Arrhenius(A=(1600000.0, 's^-1'), n=1.81, Ea=(149.787, 'kJ/mol'
), T0=(1, 'K'), comment=
"""From training reaction 101 used for R4H_SDS;C_rad_out_2H;Cs_H_out_2H
Exact match found for rate rule [R4H_SDS;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""
))
reaction(label='reaction7', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],
products=['[CH2][C](C=C)C(C)=CC(24606)'], transitionState='TS7',
kinetics=Arrhenius(A=(6660000.0, 's^-1'), n=1.64, Ea=(100.416, 'kJ/mol'
), T0=(1, 'K'), comment=
"""From training reaction 96 used for R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H
Exact match found for rate rule [R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""
))
reaction(label='reaction8', reactants=['C=[C][CH]C(18176)',
'C=[C][CH]C(18176)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState='TS8', kinetics=Arrhenius(A=(3730380.0, 'm^3/(mol*s)'),
n=0.027223, Ea=(0, 'kJ/mol'), T0=(1, 'K'), comment=
"""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -14.4 to 0 kJ/mol."""
))
reaction(label='reaction9', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],
products=['[CH2]C(=CC)[C]1CC1C(25414)'], transitionState='TS9',
kinetics=Arrhenius(A=(7367860000000.0, 's^-1'), n=-0.105173, Ea=(
93.5715, 'kJ/mol'), T0=(1, 'K'), Tmin=(303.03, 'K'), Tmax=(2000, 'K'),
comment=
"""Estimated using template [R3_D;doublebond_intra;radadd_intra_cs2H] for rate rule [R3_D;doublebond_intra_secDe_HNd;radadd_intra_cs2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Endocyclic"""
))
reaction(label='reaction10', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],
products=['[CH2][C]1C(=CC)CC1C(25415)'], transitionState='TS10',
kinetics=Arrhenius(A=(643734000.0, 's^-1'), n=0.926191, Ea=(130.445,
'kJ/mol'), T0=(1, 'K'), comment=
"""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Endocyclic"""
))
reaction(label='reaction11', reactants=['CH2(S)(23)',
'[CH2]C(=C)C([CH2])=CC(25416)'], products=[
'C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS11', kinetics=
Arrhenius(A=(79400000000000.0, 'cm^3/(mol*s)', '*|/', 0.25), n=-0.324,
Ea=(0, 'kJ/mol'), T0=(1, 'K'), comment=
"""From training reaction 4 used for carbene;Cd_pri
Exact match found for rate rule [carbene;Cd_pri]
Euclidian distance = 0
Multiplied by reaction path degeneracy 4.0
family: 1,2_Insertion_carbene
Ea raised from -3.9 to 0 kJ/mol."""
))
reaction(label='reaction23', reactants=['C=C([CH]C)C[C]=CC(24184)'],
products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS12',
kinetics=Arrhenius(A=(1748420000.0, 's^-1'), n=1.084, Ea=(170.038,
'kJ/mol'), T0=(1, 'K'), comment=
"""Estimated using average of templates [cCsCJ;CdsJ;C] + [cCs(-HH)CJ;CJ;C] for rate rule [cCs(-HH)CJ;CdsJ;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""
))
reaction(label='reaction13', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],
products=['CC=C1CCC1=CC(25269)'], transitionState='TS13', kinetics=
Arrhenius(A=(1620000000000.0, 's^-1'), n=-0.305, Ea=(8.28432, 'kJ/mol'),
T0=(1, 'K'), Tmin=(600, 'K'), Tmax=(2000, 'K'), comment=
"""From training reaction 2 used for R4_SSS;C_rad_out_2H;Cpri_rad_out_2H
Exact match found for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_2H]
Euclidian distance = 0
family: Birad_recombination"""
))
reaction(label='reaction14', reactants=['CH2(19)',
'[CH2]C([C]=CC)=CC(25417)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState='TS14', kinetics=Arrhenius(A=(1067320.0, 'm^3/(mol*s)'),
n=0.472793, Ea=(0, 'kJ/mol'), T0=(1, 'K'), comment=
"""Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""
))
reaction(label='reaction15', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],
products=['[CH2]C1([CH]C)C(=C)C1C(25296)'], transitionState='TS15',
kinetics=Arrhenius(A=(67265800000.0, 's^-1'), n=0.535608, Ea=(216.807,
'kJ/mol'), T0=(1, 'K'), comment=
"""Estimated using average of templates [R4_S_D;doublebond_intra;radadd_intra_csHNd] + [R4_S_D;doublebond_intra_HNd;radadd_intra_cs] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_csHNd]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Exocyclic
Ea raised from 214.2 to 216.8 kJ/mol to match endothermicity of reaction."""
))
reaction(label='reaction16', reactants=['H(3)',
'[CH2]C(=CC)C(=C)C=C(24604)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState='TS16', kinetics=Arrhenius(A=(231000000.0,
'cm^3/(mol*s)'), n=1.64, Ea=(0, 'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'),
Tmax=(1500, 'K'), comment=
"""From training reaction 2544 used for Cds-HH_Cds-CdH;HJ
Exact match found for rate rule [Cds-HH_Cds-CdH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond
Ea raised from -2.0 to 0 kJ/mol."""
))
reaction(label='reaction17', reactants=['[CH2]CC(=C)C([CH2])=CC(25418)'],
products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS17',
kinetics=Arrhenius(A=(1720000.0, 's^-1'), n=1.99, Ea=(113.805, 'kJ/mol'
), T0=(1, 'K'), comment=
"""From training reaction 84 used for R2H_S;C_rad_out_2H;Cs_H_out_H/Cd
Exact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""
))
reaction(label='reaction18', reactants=['[CH]=C(CC)C([CH2])=CC(25419)'],
products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS18',
kinetics=Arrhenius(A=(18460000000.0, 's^-1'), n=0.74, Ea=(145.185,
'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(1500, 'K'), comment=
"""From training reaction 194 used for R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC
Exact match found for rate rule [R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""
))
reaction(label='reaction19', reactants=['[CH2]C(=[C]C)C(=C)CC(25420)'],
products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS19',
kinetics=Arrhenius(A=(74200, 's^-1'), n=2.23, Ea=(44.3086, 'kJ/mol'),
T0=(1, 'K'), comment=
"""Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_H/NonDeC]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""
))
reaction(label='reaction20', reactants=['[CH]=C([CH]C)C(C)=CC(25421)'],
products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS20',
kinetics=Arrhenius(A=(111300, 's^-1'), n=2.23, Ea=(44.3086, 'kJ/mol'),
T0=(1, 'K'), comment=
"""Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_2H]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""
))
reaction(label='reaction21', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],
products=['[CH2][C](C=C)C(=C)CC(24623)'], transitionState='TS21',
kinetics=Arrhenius(A=(6660000.0, 's^-1'), n=1.64, Ea=(100.416, 'kJ/mol'
), T0=(1, 'K'), comment=
"""Estimated using template [R5H_SS(D)MS;C_rad_out_single;Cs_H_out_2H] for rate rule [R5H_SS(D)MS;C_rad_out_H/NonDeC;Cs_H_out_2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""
))
reaction(label='reaction22', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],
products=['C[CH][C]1CCC1=CC(25422)'], transitionState='TS22', kinetics=
Arrhenius(A=(321867000.0, 's^-1'), n=0.926191, Ea=(130.445, 'kJ/mol'),
T0=(1, 'K'), comment=
"""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""
))
reaction(label='reaction23', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],
products=['[CH2][C]1C(=C)C(C)C1C(25423)'], transitionState='TS23',
kinetics=Arrhenius(A=(516207000.0, 's^-1'), n=0.911389, Ea=(125.357,
'kJ/mol'), T0=(1, 'K'), comment=
"""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_csHCs]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""
))
reaction(label='reaction24', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],
products=['C=CC(=C)C(C)=CC(24616)'], transitionState='TS24', kinetics=
Arrhenius(A=(12756600000.0, 's^-1'), n=0.137, Ea=(24.9733, 'kJ/mol'),
T0=(1, 'K'), comment=
"""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 6.0
family: Intra_Disproportionation"""
))
reaction(label='reaction24', reactants=['C=[C]C(C)C(=C)[CH]C(24183)'],
products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS25',
kinetics=Arrhenius(A=(866000000000.0, 's^-1'), n=0.438, Ea=(94.4747,
'kJ/mol'), T0=(1, 'K'), comment=
"""From training reaction 5 used for cCs(-HC)CJ;CdsJ;C
Exact match found for rate rule [cCs(-HC)CJ;CdsJ;C]
Euclidian distance = 0
family: 1,2_shiftC"""
))
reaction(label='reaction26', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],
products=['C=C1C(=CC)CC1C(25265)'], transitionState='TS26', kinetics=
Arrhenius(A=(3240000000000.0, 's^-1'), n=-0.305, Ea=(8.28432, 'kJ/mol'),
T0=(1, 'K'), Tmin=(600, 'K'), Tmax=(2000, 'K'), comment=
"""Estimated using template [R4_SSS;C_rad_out_2H;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_H/NonDeC]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: Birad_recombination"""
))
reaction(label='reaction27', reactants=['CHCH3(T)(95)',
'[CH2]C([C]=C)=CC(24774)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState='TS27', kinetics=Arrhenius(A=(1067320.0, 'm^3/(mol*s)'),
n=0.472793, Ea=(0, 'kJ/mol'), T0=(1, 'K'), comment=
"""Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""
))
reaction(label='reaction28', reactants=['[CH]=C([CH]C)C(=C)CC(25424)'],
products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS28',
kinetics=Arrhenius(A=(74200, 's^-1'), n=2.23, Ea=(44.3086, 'kJ/mol'),
T0=(1, 'K'), comment=
"""Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""
))
reaction(label='reaction29', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],
products=['C=CC(=C)C(=C)CC(24630)'], transitionState='TS29', kinetics=
Arrhenius(A=(19260000000.0, 's^-1'), n=0.137, Ea=(8.368, 'kJ/mol'), T0=
(1, 'K'), Tmin=(300, 'K'), Tmax=(1500, 'K'), comment=
"""Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radEndo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 6.0
family: Intra_Disproportionation"""
))
reaction(label='reaction30', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],
products=['C=C1C(=C)C(C)C1C(25274)'], transitionState='TS30', kinetics=
Arrhenius(A=(1620000000000.0, 's^-1'), n=-0.305, Ea=(8.28432, 'kJ/mol'),
T0=(1, 'K'), comment=
"""Estimated using template [R4_SSS;C_rad_out_single;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Cpri_rad_out_H/NonDeC]
Euclidian distance = 2.82842712475
family: Birad_recombination"""
))
network(label='4267', isomers=['C=C([CH]C)C(=C)[CH]C(24182)'], reactants=[(
'CH3CHCCH2(18175)', 'CH3CHCCH2(18175)')], bathGas={'N2': 0.5, 'Ne': 0.5})
pressureDependence(label='4267', Tmin=(300, 'K'), Tmax=(2000, 'K'), Tcount=
8, Tlist=([302.47, 323.145, 369.86, 455.987, 609.649, 885.262, 1353.64,
1896.74], 'K'), Pmin=(0.01, 'bar'), Pmax=(100, 'bar'), Pcount=5, Plist=
([0.0125282, 0.0667467, 1, 14.982, 79.8202], 'bar'), maximumGrainSize=(
0.5, 'kcal/mol'), minimumGrainCount=250, method=
'modified strong collision', interpolationModel=('Chebyshev', 6, 4),
activeKRotor=True, activeJRotor=True, rmgmode=True)
<|reserved_special_token_1|>
species(
label = 'C=C([CH]C)C(=C)[CH]C(24182)',
structure = SMILES('[CH2]C(=CC)C([CH2])=CC'),
E0 = (249.687,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),
HinderedRotor(inertia=(0.735277,'amu*angstrom^2'), symmetry=1, barrier=(16.9055,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0632434,'amu*angstrom^2'), symmetry=1, barrier=(29.514,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.737545,'amu*angstrom^2'), symmetry=1, barrier=(16.9576,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.732781,'amu*angstrom^2'), symmetry=1, barrier=(16.8481,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.739219,'amu*angstrom^2'), symmetry=1, barrier=(16.9961,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.384005,0.0840749,-5.09991e-05,5.50851e-09,4.14197e-12,30198.9,28.4131], Tmin=(100,'K'), Tmax=(1039.09,'K')), NASAPolynomial(coeffs=[18.1326,0.0354522,-1.35159e-05,2.44392e-09,-1.69358e-13,25127.7,-67.5143], Tmin=(1039.09,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(249.687,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Allyl_P)"""),
)
species(
label = 'CH3CHCCH2(18175)',
structure = SMILES('C=C=CC'),
E0 = (145.615,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,540,610,2055,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655],'cm^-1')),
HinderedRotor(inertia=(0.759584,'amu*angstrom^2'), symmetry=1, barrier=(17.4643,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (54.0904,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2996.71,'J/mol'), sigma=(5.18551,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=468.08 K, Pc=48.77 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.74635,0.0218189,8.22353e-06,-2.14768e-08,8.55624e-12,17563.6,12.7381], Tmin=(100,'K'), Tmax=(1025.6,'K')), NASAPolynomial(coeffs=[6.82078,0.0192338,-7.45622e-06,1.36536e-09,-9.53195e-14,16028,-10.4333], Tmin=(1025.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(145.615,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(228.648,'J/(mol*K)'), label="""CH3CHCCH2""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = '[CH2]C1([CH]C)CC1=CC(25275)',
structure = SMILES('[CH2]C1([CH]C)CC1=CC'),
E0 = (462.221,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.263258,0.0692237,-2.26363e-05,-1.35463e-08,8.13734e-12,55737.7,31.4039], Tmin=(100,'K'), Tmax=(1105.46,'K')), NASAPolynomial(coeffs=[15.171,0.0400578,-1.66801e-05,3.13624e-09,-2.2049e-13,50927.8,-48.8594], Tmin=(1105.46,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(462.221,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)"""),
)
species(
label = 'C=[C][CH]C(18176)',
structure = SMILES('[CH2][C]=CC'),
E0 = (361.056,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655],'cm^-1')),
HinderedRotor(inertia=(0.352622,'amu*angstrom^2'), symmetry=1, barrier=(8.10748,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.828631,'amu*angstrom^2'), symmetry=1, barrier=(19.0519,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (54.0904,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.42015,0.030446,-1.69076e-05,4.64684e-09,-5.12013e-13,43485.7,14.8304], Tmin=(100,'K'), Tmax=(2065.83,'K')), NASAPolynomial(coeffs=[10.7464,0.014324,-5.20136e-06,8.69079e-10,-5.48385e-14,40045.6,-31.3799], Tmin=(2065.83,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(361.056,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C(=CC)C(C)=[C]C(25412)',
structure = SMILES('[CH2]C(=CC)C(C)=[C]C'),
E0 = (336.03,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,1685,370,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,222.04],'cm^-1')),
HinderedRotor(inertia=(0.395973,'amu*angstrom^2'), symmetry=1, barrier=(13.8694,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.396086,'amu*angstrom^2'), symmetry=1, barrier=(13.8683,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395737,'amu*angstrom^2'), symmetry=1, barrier=(13.8691,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395039,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395901,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365,0.0876489,-7.20737e-05,3.21805e-08,-5.96317e-12,40565.5,28.3373], Tmin=(100,'K'), Tmax=(1264.63,'K')), NASAPolynomial(coeffs=[14.5979,0.041109,-1.68732e-05,3.08148e-09,-2.10818e-13,36843.8,-46.1055], Tmin=(1264.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.03,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C(=[C]C)C(C)=CC(25413)',
structure = SMILES('[CH2]C(=[C]C)C(C)=CC'),
E0 = (336.03,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,1685,370,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,222.04],'cm^-1')),
HinderedRotor(inertia=(0.395973,'amu*angstrom^2'), symmetry=1, barrier=(13.8694,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.396086,'amu*angstrom^2'), symmetry=1, barrier=(13.8683,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395737,'amu*angstrom^2'), symmetry=1, barrier=(13.8691,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395039,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.395901,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365,0.0876489,-7.20737e-05,3.21805e-08,-5.96317e-12,40565.5,28.3373], Tmin=(100,'K'), Tmax=(1264.63,'K')), NASAPolynomial(coeffs=[14.5979,0.041109,-1.68732e-05,3.08148e-09,-2.10818e-13,36843.8,-46.1055], Tmin=(1264.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.03,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Cds_S)"""),
)
species(
label = '[CH2]C(=CC)[C](C)C=C(24605)',
structure = SMILES('[CH2]C=C(C)C([CH2])=CC'),
E0 = (216.244,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),
HinderedRotor(inertia=(0.712083,'amu*angstrom^2'), symmetry=1, barrier=(16.3722,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.555659,'amu*angstrom^2'), symmetry=1, barrier=(96.3851,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0202512,'amu*angstrom^2'), symmetry=1, barrier=(16.3711,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.712008,'amu*angstrom^2'), symmetry=1, barrier=(16.3705,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(4.19211,'amu*angstrom^2'), symmetry=1, barrier=(96.3849,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175,0.0775021,-3.58132e-05,-7.55711e-09,8.27771e-12,26166.1,29.3215], Tmin=(100,'K'), Tmax=(1017.17,'K')), NASAPolynomial(coeffs=[16.4341,0.0376674,-1.41425e-05,2.53759e-09,-1.75328e-13,21504.4,-57.0638], Tmin=(1017.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(216.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)"""),
)
species(
label = '[CH2][C](C=C)C(C)=CC(24606)',
structure = SMILES('[CH2]C=C([CH2])C(C)=CC'),
E0 = (216.244,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175,0.0775021,-3.58132e-05,-7.55711e-09,8.27771e-12,26166.1,29.3215], Tmin=(100,'K'), Tmax=(1017.17,'K')), NASAPolynomial(coeffs=[16.4341,0.0376674,-1.41425e-05,2.53759e-09,-1.75328e-13,21504.4,-57.0638], Tmin=(1017.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(216.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)"""),
)
species(
label = '[CH2]C(=CC)[C]1CC1C(25414)',
structure = SMILES('[CH2]C(=CC)[C]1CC1C'),
E0 = (289.9,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.71289,0.0520158,3.84829e-05,-8.55933e-08,3.61457e-11,35003.5,26.4903], Tmin=(100,'K'), Tmax=(968.714,'K')), NASAPolynomial(coeffs=[16.7686,0.0352996,-1.24057e-05,2.26286e-09,-1.62921e-13,29566.5,-62.466], Tmin=(968.714,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(289.9,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Cyclopropane) + radical(Allyl_T) + radical(Allyl_P)"""),
)
species(
label = '[CH2][C]1C(=CC)CC1C(25415)',
structure = SMILES('[CH2]C1=C([CH]C)CC1C'),
E0 = (304.572,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.583091,0.0531885,4.0938e-05,-9.08388e-08,3.83549e-11,36774.2,26.4705], Tmin=(100,'K'), Tmax=(972.301,'K')), NASAPolynomial(coeffs=[18.2947,0.0339462,-1.21014e-05,2.24934e-09,-1.64353e-13,30795.4,-71.5147], Tmin=(972.301,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(304.572,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_S)"""),
)
species(
label = 'CH2(S)(23)',
structure = SMILES('[CH2]'),
E0 = (419.862,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1369.36,2789.41,2993.36],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19195,-0.00230793,8.0509e-06,-6.60123e-09,1.95638e-12,50484.3,-0.754589], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.28556,0.00460255,-1.97412e-06,4.09548e-10,-3.34695e-14,50922.4,8.67684], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(419.862,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(S)""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]C(=C)C([CH2])=CC(25416)',
structure = SMILES('[CH2]C(=C)C([CH2])=CC'),
E0 = (285.713,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2950,3100,1380,975,1025,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,3010,987.5,1337.5,450,1655,311.383],'cm^-1')),
HinderedRotor(inertia=(0.327475,'amu*angstrom^2'), symmetry=1, barrier=(22.5291,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.327466,'amu*angstrom^2'), symmetry=1, barrier=(22.5294,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.327318,'amu*angstrom^2'), symmetry=1, barrier=(22.5272,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.327483,'amu*angstrom^2'), symmetry=1, barrier=(22.5297,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (94.1543,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.335271,0.0676667,-2.76626e-05,-1.62749e-08,1.21982e-11,34506.8,24.024], Tmin=(100,'K'), Tmax=(980.594,'K')), NASAPolynomial(coeffs=[17.5531,0.0266059,-9.47854e-06,1.70194e-09,-1.19937e-13,29727.4,-65.8563], Tmin=(980.594,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(285.713,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Allyl_P)"""),
)
species(
label = 'C=C([CH]C)C[C]=CC(24184)',
structure = SMILES('[CH2]C(=CC)C[C]=CC'),
E0 = (366.985,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,1685,370,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,579.702],'cm^-1')),
HinderedRotor(inertia=(0.147406,'amu*angstrom^2'), symmetry=1, barrier=(3.38916,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.64226,'amu*angstrom^2'), symmetry=1, barrier=(14.7668,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.64164,'amu*angstrom^2'), symmetry=1, barrier=(14.7526,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.643937,'amu*angstrom^2'), symmetry=1, barrier=(14.8054,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.145327,'amu*angstrom^2'), symmetry=1, barrier=(3.34136,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3683.66,'J/mol'), sigma=(6.4482,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=575.38 K, Pc=31.18 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.29648,0.0786067,-5.42868e-05,1.96375e-08,-2.97459e-12,44273.2,31.2372], Tmin=(100,'K'), Tmax=(1490.43,'K')), NASAPolynomial(coeffs=[13.9025,0.0420909,-1.75363e-05,3.199e-09,-2.17227e-13,40217.5,-39.8334], Tmin=(1490.43,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(366.985,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = 'CC=C1CCC1=CC(25269)',
structure = SMILES('CC=C1CCC1=CC'),
E0 = (114.107,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.677799,0.0585738,5.80411e-06,-4.1598e-08,1.78951e-11,13856,25.5085], Tmin=(100,'K'), Tmax=(1034.79,'K')), NASAPolynomial(coeffs=[13.4814,0.0415234,-1.65073e-05,3.07348e-09,-2.16896e-13,9469.28,-45.0922], Tmin=(1034.79,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(114.107,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(12methylenecyclobutane)"""),
)
species(
label = 'CH2(19)',
structure = SMILES('[CH2]'),
E0 = (381.563,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1032.72,2936.3,3459],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.8328,0.000224446,4.68033e-06,-6.04743e-09,2.59009e-12,45920.8,1.40666], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16229,0.00281798,-7.56235e-07,5.05446e-11,5.65236e-15,46099.1,4.77656], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(381.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]C([C]=CC)=CC(25417)',
structure = SMILES('[CH2]C([C]=CC)=CC'),
E0 = (334.774,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),
HinderedRotor(inertia=(0.7606,'amu*angstrom^2'), symmetry=1, barrier=(17.4877,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.760854,'amu*angstrom^2'), symmetry=1, barrier=(17.4935,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.760586,'amu*angstrom^2'), symmetry=1, barrier=(17.4874,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.15146,'amu*angstrom^2'), symmetry=1, barrier=(49.4663,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (94.1543,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.352604,0.0734369,-5.91187e-05,2.57941e-08,-4.60694e-12,40400.9,25.1788], Tmin=(100,'K'), Tmax=(1327.42,'K')), NASAPolynomial(coeffs=[14.2321,0.0316126,-1.18565e-05,2.05761e-09,-1.36512e-13,36716.1,-45.7131], Tmin=(1327.42,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(334.774,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + radical(C=CJC=C) + radical(Allyl_P)"""),
)
species(
label = '[CH2]C1([CH]C)C(=C)C1C(25296)',
structure = SMILES('[CH2]C1([CH]C)C(=C)C1C'),
E0 = (466.494,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.29276,0.0655305,-4.50464e-06,-3.74661e-08,1.7759e-11,56253.7,30.0992], Tmin=(100,'K'), Tmax=(1027.4,'K')), NASAPolynomial(coeffs=[16.6435,0.0372633,-1.49065e-05,2.81296e-09,-2.01072e-13,51026,-58.316], Tmin=(1027.4,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(466.494,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)"""),
)
species(
label = 'H(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = '[CH2]C(=CC)C(=C)C=C(24604)',
structure = SMILES('[CH2]C(=CC)C(=C)C=C'),
E0 = (242.677,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,181.962,683.313],'cm^-1')),
HinderedRotor(inertia=(0.669842,'amu*angstrom^2'), symmetry=1, barrier=(19.1337,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0582339,'amu*angstrom^2'), symmetry=1, barrier=(19.1767,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.83204,'amu*angstrom^2'), symmetry=1, barrier=(19.1302,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(4.52237,'amu*angstrom^2'), symmetry=1, barrier=(104.569,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (107.173,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.293043,0.0682771,-2.00337e-05,-2.05401e-08,1.21516e-11,29332.3,27.0261], Tmin=(100,'K'), Tmax=(1018.57,'K')), NASAPolynomial(coeffs=[15.7386,0.0358123,-1.37404e-05,2.51366e-09,-1.76142e-13,24723.4,-54.9529], Tmin=(1018.57,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(242.677,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Allyl_P)"""),
)
species(
label = '[CH2]CC(=C)C([CH2])=CC(25418)',
structure = SMILES('[CH2]CC(=C)C([CH2])=CC'),
E0 = (316.814,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,180,180],'cm^-1')),
HinderedRotor(inertia=(0.0368535,'amu*angstrom^2'), symmetry=1, barrier=(17.9864,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00736317,'amu*angstrom^2'), symmetry=1, barrier=(3.60618,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.781153,'amu*angstrom^2'), symmetry=1, barrier=(17.9602,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.779478,'amu*angstrom^2'), symmetry=1, barrier=(17.9217,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.781104,'amu*angstrom^2'), symmetry=1, barrier=(17.9591,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.348925,0.0836004,-5.1879e-05,7.14877e-09,3.44908e-12,38270.9,31.5928], Tmin=(100,'K'), Tmax=(1044.14,'K')), NASAPolynomial(coeffs=[17.9255,0.0352115,-1.34219e-05,2.42456e-09,-1.67785e-13,33276.3,-63.0036], Tmin=(1044.14,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(316.814,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(RCCJ) + radical(Allyl_P)"""),
)
species(
label = '[CH]=C(CC)C([CH2])=CC(25419)',
structure = SMILES('[CH]=C(CC)C([CH2])=CC'),
E0 = (358.664,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3120,650,792.5,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180],'cm^-1')),
HinderedRotor(inertia=(0.701639,'amu*angstrom^2'), symmetry=1, barrier=(16.1321,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.344302,'amu*angstrom^2'), symmetry=1, barrier=(16.1602,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0492932,'amu*angstrom^2'), symmetry=1, barrier=(16.1378,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.702005,'amu*angstrom^2'), symmetry=1, barrier=(16.1405,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.702379,'amu*angstrom^2'), symmetry=1, barrier=(16.1491,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.468616,0.0864938,-5.84569e-05,1.27697e-08,1.75707e-12,43308.4,30.6389], Tmin=(100,'K'), Tmax=(1047.28,'K')), NASAPolynomial(coeffs=[18.4195,0.034593,-1.31104e-05,2.35762e-09,-1.62637e-13,38242.2,-66.6572], Tmin=(1047.28,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(358.664,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_P)"""),
)
species(
label = '[CH2]C(=[C]C)C(=C)CC(25420)',
structure = SMILES('[CH2]C(=[C]C)C(=C)CC'),
E0 = (349.41,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,180],'cm^-1')),
HinderedRotor(inertia=(0.159905,'amu*angstrom^2'), symmetry=1, barrier=(15.9368,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.693159,'amu*angstrom^2'), symmetry=1, barrier=(15.9371,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.693127,'amu*angstrom^2'), symmetry=1, barrier=(15.9364,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.693165,'amu*angstrom^2'), symmetry=1, barrier=(15.9372,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0150632,'amu*angstrom^2'), symmetry=1, barrier=(15.9371,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.583231,0.089245,-7.16619e-05,3.00631e-08,-5.07891e-12,42198.9,31.1306], Tmin=(100,'K'), Tmax=(1412.15,'K')), NASAPolynomial(coeffs=[19.0319,0.0336833,-1.2643e-05,2.20036e-09,-1.46165e-13,36659.1,-70.2702], Tmin=(1412.15,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(349.41,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)"""),
)
species(
label = '[CH]=C([CH]C)C(C)=CC(25421)',
structure = SMILES('[CH]C(=CC)C(C)=CC'),
E0 = (317.373,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.247945,0.0873521,-6.16843e-05,2.31486e-08,-3.62747e-12,38328.8,29.1665], Tmin=(100,'K'), Tmax=(1460.93,'K')), NASAPolynomial(coeffs=[15.297,0.0447902,-1.7984e-05,3.20673e-09,-2.14924e-13,33786.8,-51.7212], Tmin=(1460.93,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(317.373,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(AllylJ2_triplet)"""),
)
species(
label = '[CH2][C](C=C)C(=C)CC(24623)',
structure = SMILES('[CH2]C(C=C)=C([CH2])CC'),
E0 = (228.159,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0497728,0.0733281,-1.6094e-05,-3.35123e-08,1.88363e-11,27601.1,30.4448], Tmin=(100,'K'), Tmax=(975.095,'K')), NASAPolynomial(coeffs=[18.3695,0.0342638,-1.21408e-05,2.16747e-09,-1.52112e-13,22274,-66.8493], Tmin=(975.095,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(228.159,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CC=CCJ) + radical(Allyl_P)"""),
)
species(
label = 'C[CH][C]1CCC1=CC(25422)',
structure = SMILES('C[CH]C1CCC=1[CH]C'),
E0 = (303.292,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.788866,0.0500701,4.22235e-05,-8.64809e-08,3.53174e-11,36611.5,25.2586], Tmin=(100,'K'), Tmax=(987.239,'K')), NASAPolynomial(coeffs=[16.2187,0.0373502,-1.4111e-05,2.65357e-09,-1.92503e-13,31138.2,-61.2734], Tmin=(987.239,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(303.292,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_S) + radical(Allyl_S)"""),
)
species(
label = '[CH2][C]1C(=C)C(C)C1C(25423)',
structure = SMILES('[CH2]C1=C([CH2])C(C)C1C'),
E0 = (305.852,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.377097,0.0563026,3.9705e-05,-9.53284e-08,4.14811e-11,36937,26.2973], Tmin=(100,'K'), Tmax=(959.735,'K')), NASAPolynomial(coeffs=[20.4056,0.0304853,-1.006e-05,1.83774e-09,-1.35603e-13,30437.2,-83.3398], Tmin=(959.735,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(305.852,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_P)"""),
)
species(
label = 'C=CC(=C)C(C)=CC(24616)',
structure = SMILES('C=CC(=C)C(C)=CC'),
E0 = (91.1774,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.236638,0.0713806,-3.04205e-05,-5.26762e-09,5.54498e-12,11111.2,26.9518], Tmin=(100,'K'), Tmax=(1093.32,'K')), NASAPolynomial(coeffs=[14.1536,0.040705,-1.6104e-05,2.93544e-09,-2.02595e-13,6858.32,-46.9636], Tmin=(1093.32,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(91.1774,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH)"""),
)
species(
label = 'C=[C]C(C)C(=C)[CH]C(24183)',
structure = SMILES('[CH2]C(=CC)C(C)[C]=C'),
E0 = (369.44,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,350,440,435,1725,3000,3100,440,815,1455,1000,345.333,347.343],'cm^-1')),
HinderedRotor(inertia=(0.119405,'amu*angstrom^2'), symmetry=1, barrier=(9.93037,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.281457,'amu*angstrom^2'), symmetry=1, barrier=(24.022,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.116909,'amu*angstrom^2'), symmetry=1, barrier=(9.94809,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.117447,'amu*angstrom^2'), symmetry=1, barrier=(9.9744,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.116555,'amu*angstrom^2'), symmetry=1, barrier=(9.93684,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3625.33,'J/mol'), sigma=(6.4092,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=566.27 K, Pc=31.24 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.299693,0.0839308,-6.74533e-05,3.06742e-08,-6.02582e-12,44564.4,29.0122], Tmin=(100,'K'), Tmax=(1163.73,'K')), NASAPolynomial(coeffs=[10.857,0.0476425,-2.06788e-05,3.8782e-09,-2.69295e-13,42107.3,-23.5217], Tmin=(1163.73,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(369.44,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)"""),
)
species(
label = 'C=C1C(=CC)CC1C(25265)',
structure = SMILES('C=C1C(=CC)CC1C'),
E0 = (118.381,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.689924,0.0550304,2.3689e-05,-6.56265e-08,2.77602e-11,14372.8,24.9628], Tmin=(100,'K'), Tmax=(993.204,'K')), NASAPolynomial(coeffs=[15.3775,0.0380508,-1.43595e-05,2.66472e-09,-1.90565e-13,9375.16,-56.2678], Tmin=(993.204,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(118.381,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)"""),
)
species(
label = 'CHCH3(T)(95)',
structure = SMILES('[CH]C'),
E0 = (343.893,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,592.414,4000],'cm^-1')),
HinderedRotor(inertia=(0.00438699,'amu*angstrom^2'), symmetry=1, barrier=(26.7685,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (28.0532,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.82363,-0.000909515,3.2138e-05,-3.7348e-08,1.3309e-11,41371.4,7.10948], Tmin=(100,'K'), Tmax=(960.812,'K')), NASAPolynomial(coeffs=[4.30487,0.00943069,-3.27559e-06,5.95121e-10,-4.27307e-14,40709.1,1.84202], Tmin=(960.812,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(343.893,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), label="""CHCH3(T)""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = '[CH2]C([C]=C)=CC(24774)',
structure = SMILES('[CH2]C([C]=C)=CC'),
E0 = (370.8,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,350,440,435,1725,3000,3100,440,815,1455,1000,180],'cm^-1')),
HinderedRotor(inertia=(1.17315,'amu*angstrom^2'), symmetry=1, barrier=(26.9731,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.17496,'amu*angstrom^2'), symmetry=1, barrier=(27.0146,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.1727,'amu*angstrom^2'), symmetry=1, barrier=(26.9626,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (80.1277,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.0818,0.0569416,-3.56598e-05,4.1841e-09,3.20998e-12,44708.4,20.7527], Tmin=(100,'K'), Tmax=(982.69,'K')), NASAPolynomial(coeffs=[12.9204,0.0239405,-8.46845e-06,1.46434e-09,-9.91425e-14,41648.3,-39.886], Tmin=(982.69,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(370.8,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(320.107,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CJC=C) + radical(Allyl_P)"""),
)
species(
label = '[CH]=C([CH]C)C(=C)CC(25424)',
structure = SMILES('[CH]C(=CC)C(=C)CC'),
E0 = (330.753,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,325,375,415,465,420,450,1700,1750,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.442166,0.0858934,-5.1432e-05,9.5936e-09,1.54315e-12,39950.3,30.9724], Tmin=(100,'K'), Tmax=(1106.5,'K')), NASAPolynomial(coeffs=[16.3579,0.0427111,-1.66841e-05,2.99222e-09,-2.04007e-13,35158.1,-56.633], Tmin=(1106.5,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(330.753,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(AllylJ2_triplet)"""),
)
species(
label = 'C=CC(=C)C(=C)CC(24630)',
structure = SMILES('C=CC(=C)C(=C)CC'),
E0 = (104.558,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.296747,0.0670054,-1.0269e-05,-3.13536e-08,1.59568e-11,12721.3,27.8384], Tmin=(100,'K'), Tmax=(1010.3,'K')), NASAPolynomial(coeffs=[15.6889,0.0379462,-1.44599e-05,2.64736e-09,-1.86033e-13,7984.11,-54.6302], Tmin=(1010.3,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(104.558,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH)"""),
)
species(
label = 'C=C1C(=C)C(C)C1C(25274)',
structure = SMILES('C=C1C(=C)C(C)C1C'),
E0 = (122.654,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (108.181,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.691732,0.0515838,4.13669e-05,-8.96066e-08,3.77135e-11,14890,23.0693], Tmin=(100,'K'), Tmax=(969.873,'K')), NASAPolynomial(coeffs=[17.4573,0.0342784,-1.20439e-05,2.21718e-09,-1.61071e-13,9199.74,-69.8715], Tmin=(969.873,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(122.654,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (291.23,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (462.221,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (538.699,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (497.951,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (380.338,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (399.474,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (350.103,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (722.113,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (343.259,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (380.132,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (705.575,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (537.022,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (257.971,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (716.337,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (466.494,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (454.469,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (430.619,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (503.849,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (393.718,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (361.682,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (350.103,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (380.132,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS23',
E0 = (375.044,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS24',
E0 = (274.66,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS25',
E0 = (463.915,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS26',
E0 = (257.971,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS27',
E0 = (714.692,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS28',
E0 = (375.062,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS29',
E0 = (258.055,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS30',
E0 = (257.971,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(41.5431,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission
Ea raised from 0.0 to 41.5 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction2',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2]C1([CH]C)CC1=CC(25275)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(3.36e+09,'s^-1'), n=0.84, Ea=(212.534,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""Estimated using template [R4_S_D;doublebond_intra_HNd;radadd_intra_cs2H] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_cs2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Exocyclic
Ea raised from 210.2 to 212.5 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction3',
reactants = ['CH3CHCCH2(18175)', 'C=[C][CH]C(18176)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(0.00086947,'m^3/(mol*s)'), n=2.67356, Ea=(32.0272,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Ca_Cds-HH;CJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction4',
reactants = ['[CH2]C(=CC)C(C)=[C]C(25412)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(7.74e+09,'s^-1'), n=1.08, Ea=(161.921,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 198 used for R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H
Exact match found for rate rule [R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction5',
reactants = ['[CH2]C(=[C]C)C(C)=CC(25413)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_2H]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction6',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2]C(=CC)[C](C)C=C(24605)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(1.6e+06,'s^-1'), n=1.81, Ea=(149.787,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 101 used for R4H_SDS;C_rad_out_2H;Cs_H_out_2H
Exact match found for rate rule [R4H_SDS;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction7',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2][C](C=C)C(C)=CC(24606)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(6.66e+06,'s^-1'), n=1.64, Ea=(100.416,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 96 used for R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H
Exact match found for rate rule [R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction8',
reactants = ['C=[C][CH]C(18176)', 'C=[C][CH]C(18176)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(3.73038e+06,'m^3/(mol*s)'), n=0.027223, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -14.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction9',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2]C(=CC)[C]1CC1C(25414)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(7.36786e+12,'s^-1'), n=-0.105173, Ea=(93.5715,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using template [R3_D;doublebond_intra;radadd_intra_cs2H] for rate rule [R3_D;doublebond_intra_secDe_HNd;radadd_intra_cs2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction10',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2][C]1C(=CC)CC1C(25415)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(6.43734e+08,'s^-1'), n=0.926191, Ea=(130.445,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction11',
reactants = ['CH2(S)(23)', '[CH2]C(=C)C([CH2])=CC(25416)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(7.94e+13,'cm^3/(mol*s)','*|/',0.25), n=-0.324, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 4 used for carbene;Cd_pri
Exact match found for rate rule [carbene;Cd_pri]
Euclidian distance = 0
Multiplied by reaction path degeneracy 4.0
family: 1,2_Insertion_carbene
Ea raised from -3.9 to 0 kJ/mol."""),
)
reaction(
label = 'reaction23',
reactants = ['C=C([CH]C)C[C]=CC(24184)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(1.74842e+09,'s^-1'), n=1.084, Ea=(170.038,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [cCsCJ;CdsJ;C] + [cCs(-HH)CJ;CJ;C] for rate rule [cCs(-HH)CJ;CdsJ;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction13',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['CC=C1CCC1=CC(25269)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""From training reaction 2 used for R4_SSS;C_rad_out_2H;Cpri_rad_out_2H
Exact match found for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_2H]
Euclidian distance = 0
family: Birad_recombination"""),
)
reaction(
label = 'reaction14',
reactants = ['CH2(19)', '[CH2]C([C]=CC)=CC(25417)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction15',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2]C1([CH]C)C(=C)C1C(25296)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(6.72658e+10,'s^-1'), n=0.535608, Ea=(216.807,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R4_S_D;doublebond_intra;radadd_intra_csHNd] + [R4_S_D;doublebond_intra_HNd;radadd_intra_cs] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_csHNd]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Exocyclic
Ea raised from 214.2 to 216.8 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction16',
reactants = ['H(3)', '[CH2]C(=CC)C(=C)C=C(24604)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(2.31e+08,'cm^3/(mol*s)'), n=1.64, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 2544 used for Cds-HH_Cds-CdH;HJ
Exact match found for rate rule [Cds-HH_Cds-CdH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond
Ea raised from -2.0 to 0 kJ/mol."""),
)
reaction(
label = 'reaction17',
reactants = ['[CH2]CC(=C)C([CH2])=CC(25418)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(1.72e+06,'s^-1'), n=1.99, Ea=(113.805,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 84 used for R2H_S;C_rad_out_2H;Cs_H_out_H/Cd
Exact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction18',
reactants = ['[CH]=C(CC)C([CH2])=CC(25419)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(1.846e+10,'s^-1'), n=0.74, Ea=(145.185,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 194 used for R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC
Exact match found for rate rule [R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction19',
reactants = ['[CH2]C(=[C]C)C(=C)CC(25420)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_H/NonDeC]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction20',
reactants = ['[CH]=C([CH]C)C(C)=CC(25421)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_2H]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction21',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2][C](C=C)C(=C)CC(24623)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(6.66e+06,'s^-1'), n=1.64, Ea=(100.416,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H_SS(D)MS;C_rad_out_single;Cs_H_out_2H] for rate rule [R5H_SS(D)MS;C_rad_out_H/NonDeC;Cs_H_out_2H]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction22',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C[CH][C]1CCC1=CC(25422)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(3.21867e+08,'s^-1'), n=0.926191, Ea=(130.445,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction23',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['[CH2][C]1C(=C)C(C)C1C(25423)'],
transitionState = 'TS23',
kinetics = Arrhenius(A=(5.16207e+08,'s^-1'), n=0.911389, Ea=(125.357,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_csHCs]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction24',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C=CC(=C)C(C)=CC(24616)'],
transitionState = 'TS24',
kinetics = Arrhenius(A=(1.27566e+10,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 6.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction24',
reactants = ['C=[C]C(C)C(=C)[CH]C(24183)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS25',
kinetics = Arrhenius(A=(8.66e+11,'s^-1'), n=0.438, Ea=(94.4747,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 5 used for cCs(-HC)CJ;CdsJ;C
Exact match found for rate rule [cCs(-HC)CJ;CdsJ;C]
Euclidian distance = 0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction26',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C=C1C(=CC)CC1C(25265)'],
transitionState = 'TS26',
kinetics = Arrhenius(A=(3.24e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_2H;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_H/NonDeC]
Euclidian distance = 2.0
Multiplied by reaction path degeneracy 2.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction27',
reactants = ['CHCH3(T)(95)', '[CH2]C([C]=C)=CC(24774)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS27',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction28',
reactants = ['[CH]=C([CH]C)C(=C)CC(25424)'],
products = ['C=C([CH]C)C(=C)[CH]C(24182)'],
transitionState = 'TS28',
kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction29',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C=CC(=C)C(=C)CC(24630)'],
transitionState = 'TS29',
kinetics = Arrhenius(A=(1.926e+10,'s^-1'), n=0.137, Ea=(8.368,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radEndo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 6.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction30',
reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],
products = ['C=C1C(=C)C(C)C1C(25274)'],
transitionState = 'TS30',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_single;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Cpri_rad_out_H/NonDeC]
Euclidian distance = 2.82842712475
family: Birad_recombination"""),
)
network(
label = '4267',
isomers = [
'C=C([CH]C)C(=C)[CH]C(24182)',
],
reactants = [
('CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'),
],
bathGas = {
'N2': 0.5,
'Ne': 0.5,
},
)
pressureDependence(
label = '4267',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
|
flexible
|
{
"blob_id": "63093190ee20e10698bd99dcea94ccf5d076a006",
"index": 8921,
"step-1": "<mask token>\n",
"step-2": "species(label='C=C([CH]C)C(=C)[CH]C(24182)', structure=SMILES(\n '[CH2]C(=CC)C([CH2])=CC'), E0=(249.687, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450, 1500, 700, \n 800, 1000, 1100, 1350, 1400, 900, 1100, 3000, 3033.33, 3066.67, 3100, \n 415, 465, 780, 850, 1435, 1475, 900, 1100, 2995, 3025, 975, 1000, 1300,\n 1375, 400, 500, 1630, 1680, 180], 'cm^-1')), HinderedRotor(inertia=(\n 0.735277, 'amu*angstrom^2'), symmetry=1, barrier=(16.9055, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.0632434,\n 'amu*angstrom^2'), symmetry=1, barrier=(29.514, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.737545, 'amu*angstrom^2'\n ), symmetry=1, barrier=(16.9576, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.732781, 'amu*angstrom^2'), symmetry=1, barrier\n =(16.8481, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.739219, 'amu*angstrom^2'), symmetry=1, barrier=(16.9961, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.384005, 0.0840749, -\n 5.09991e-05, 5.50851e-09, 4.14197e-12, 30198.9, 28.4131], Tmin=(100,\n 'K'), Tmax=(1039.09, 'K')), NASAPolynomial(coeffs=[18.1326, 0.0354522, \n -1.35159e-05, 2.44392e-09, -1.69358e-13, 25127.7, -67.5143], Tmin=(\n 1039.09, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(249.687, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Allyl_P)'\n ))\nspecies(label='CH3CHCCH2(18175)', structure=SMILES('C=C=CC'), E0=(145.615,\n 'kJ/mol'), modes=[HarmonicOscillator(frequencies=([2950, 3100, 1380, \n 975, 1025, 1650, 540, 610, 2055, 2750, 2800, 2850, 1350, 1500, 750, \n 1050, 1375, 1000, 3010, 987.5, 1337.5, 450, 1655], 'cm^-1')),\n HinderedRotor(inertia=(0.759584, 'amu*angstrom^2'), symmetry=1, barrier\n =(17.4643, 'kJ/mol'), semiclassical=False)], spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(54.0904, 'amu'), collisionModel=\n TransportData(shapeIndex=2, epsilon=(2996.71, 'J/mol'), sigma=(5.18551,\n 'angstroms'), dipoleMoment=(0, 'C*m'), polarizability=(0, 'angstroms^3'\n ), rotrelaxcollnum=0, comment=\n 'Epsilon & sigma estimated with Tc=468.08 K, Pc=48.77 bar (from Joback method)'\n ), energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'),\n T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=\n [2.74635, 0.0218189, 8.22353e-06, -2.14768e-08, 8.55624e-12, 17563.6, \n 12.7381], Tmin=(100, 'K'), Tmax=(1025.6, 'K')), NASAPolynomial(coeffs=[\n 6.82078, 0.0192338, -7.45622e-06, 1.36536e-09, -9.53195e-14, 16028, -\n 10.4333], Tmin=(1025.6, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax\n =(5000, 'K'), E0=(145.615, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf\n =(228.648, 'J/(mol*K)'), label='CH3CHCCH2', comment=\n 'Thermo library: DFT_QCI_thermo'))\nspecies(label='[CH2]C1([CH]C)CC1=CC(25275)', structure=SMILES(\n '[CH2]C1([CH]C)CC1=CC'), E0=(462.221, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.263258, 0.0692237,\n -2.26363e-05, -1.35463e-08, 8.13734e-12, 55737.7, 31.4039], Tmin=(100,\n 'K'), Tmax=(1105.46, 'K')), NASAPolynomial(coeffs=[15.171, 0.0400578, -\n 1.66801e-05, 3.13624e-09, -2.2049e-13, 50927.8, -48.8594], Tmin=(\n 1105.46, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(462.221, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)'\n ))\nspecies(label='C=[C][CH]C(18176)', structure=SMILES('[CH2][C]=CC'), E0=(\n 361.056, 'kJ/mol'), modes=[HarmonicOscillator(frequencies=([1685, 370, \n 2750, 2800, 2850, 1350, 1500, 750, 1050, 1375, 1000, 3000, 3100, 440, \n 815, 1455, 1000, 3010, 987.5, 1337.5, 450, 1655], 'cm^-1')),\n HinderedRotor(inertia=(0.352622, 'amu*angstrom^2'), symmetry=1, barrier\n =(8.10748, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.828631, 'amu*angstrom^2'), symmetry=1, barrier=(19.0519, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(54.0904, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[2.42015, 0.030446, -\n 1.69076e-05, 4.64684e-09, -5.12013e-13, 43485.7, 14.8304], Tmin=(100,\n 'K'), Tmax=(2065.83, 'K')), NASAPolynomial(coeffs=[10.7464, 0.014324, -\n 5.20136e-06, 8.69079e-10, -5.48385e-14, 40045.6, -31.3799], Tmin=(\n 2065.83, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(361.056, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(274.378,\n 'J/(mol*K)'), comment=\n 'Thermo library: DFT_QCI_thermo + radical(Cds_S) + radical(Allyl_P)'))\nspecies(label='[CH2]C(=CC)C(C)=[C]C(25412)', structure=SMILES(\n '[CH2]C(=CC)C(C)=[C]C'), E0=(336.03, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 1685, 370, 2750, 2762.5, 2775, 2787.5, 2800, 2812.5, 2825, 2837.5,\n 2850, 1350, 1380, 1410, 1440, 1470, 1500, 700, 750, 800, 1000, 1050, \n 1100, 1350, 1375, 1400, 900, 1000, 1100, 3000, 3100, 440, 815, 1455, \n 1000, 3010, 987.5, 1337.5, 450, 1655, 222.04], 'cm^-1')), HinderedRotor\n (inertia=(0.395973, 'amu*angstrom^2'), symmetry=1, barrier=(13.8694,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.396086,\n 'amu*angstrom^2'), symmetry=1, barrier=(13.8683, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.395737, 'amu*angstrom^2'\n ), symmetry=1, barrier=(13.8691, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395039, 'amu*angstrom^2'), symmetry=1, barrier\n =(13.8689, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.395901, 'amu*angstrom^2'), symmetry=1, barrier=(13.8689, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365, 0.0876489, -\n 7.20737e-05, 3.21805e-08, -5.96317e-12, 40565.5, 28.3373], Tmin=(100,\n 'K'), Tmax=(1264.63, 'K')), NASAPolynomial(coeffs=[14.5979, 0.041109, -\n 1.68732e-05, 3.08148e-09, -2.10818e-13, 36843.8, -46.1055], Tmin=(\n 1264.63, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(336.03, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)'\n ))\nspecies(label='[CH2]C(=[C]C)C(C)=CC(25413)', structure=SMILES(\n '[CH2]C(=[C]C)C(C)=CC'), E0=(336.03, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 1685, 370, 2750, 2762.5, 2775, 2787.5, 2800, 2812.5, 2825, 2837.5,\n 2850, 1350, 1380, 1410, 1440, 1470, 1500, 700, 750, 800, 1000, 1050, \n 1100, 1350, 1375, 1400, 900, 1000, 1100, 3000, 3100, 440, 815, 1455, \n 1000, 3010, 987.5, 1337.5, 450, 1655, 222.04], 'cm^-1')), HinderedRotor\n (inertia=(0.395973, 'amu*angstrom^2'), symmetry=1, barrier=(13.8694,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.396086,\n 'amu*angstrom^2'), symmetry=1, barrier=(13.8683, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.395737, 'amu*angstrom^2'\n ), symmetry=1, barrier=(13.8691, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395039, 'amu*angstrom^2'), symmetry=1, barrier\n =(13.8689, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.395901, 'amu*angstrom^2'), symmetry=1, barrier=(13.8689, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365, 0.0876489, -\n 7.20737e-05, 3.21805e-08, -5.96317e-12, 40565.5, 28.3373], Tmin=(100,\n 'K'), Tmax=(1264.63, 'K')), NASAPolynomial(coeffs=[14.5979, 0.041109, -\n 1.68732e-05, 3.08148e-09, -2.10818e-13, 36843.8, -46.1055], Tmin=(\n 1264.63, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(336.03, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Cds_S)'\n ))\nspecies(label='[CH2]C(=CC)[C](C)C=C(24605)', structure=SMILES(\n '[CH2]C=C(C)C([CH2])=CC'), E0=(216.244, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450, 1500, 700, \n 800, 1000, 1100, 1350, 1400, 900, 1100, 3000, 3033.33, 3066.67, 3100, \n 415, 465, 780, 850, 1435, 1475, 900, 1100, 2995, 3025, 975, 1000, 1300,\n 1375, 400, 500, 1630, 1680, 180], 'cm^-1')), HinderedRotor(inertia=(\n 0.712083, 'amu*angstrom^2'), symmetry=1, barrier=(16.3722, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.555659, 'amu*angstrom^2'\n ), symmetry=1, barrier=(96.3851, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0202512, 'amu*angstrom^2'), symmetry=1,\n barrier=(16.3711, 'kJ/mol'), semiclassical=False), HinderedRotor(\n inertia=(0.712008, 'amu*angstrom^2'), symmetry=1, barrier=(16.3705,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(4.19211,\n 'amu*angstrom^2'), symmetry=1, barrier=(96.3849, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175, 0.0775021, \n -3.58132e-05, -7.55711e-09, 8.27771e-12, 26166.1, 29.3215], Tmin=(100,\n 'K'), Tmax=(1017.17, 'K')), NASAPolynomial(coeffs=[16.4341, 0.0376674, \n -1.41425e-05, 2.53759e-09, -1.75328e-13, 21504.4, -57.0638], Tmin=(\n 1017.17, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(216.244, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)'\n ))\nspecies(label='[CH2][C](C=C)C(C)=CC(24606)', structure=SMILES(\n '[CH2]C=C([CH2])C(C)=CC'), E0=(216.244, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175, \n 0.0775021, -3.58132e-05, -7.55711e-09, 8.27771e-12, 26166.1, 29.3215],\n Tmin=(100, 'K'), Tmax=(1017.17, 'K')), NASAPolynomial(coeffs=[16.4341, \n 0.0376674, -1.41425e-05, 2.53759e-09, -1.75328e-13, 21504.4, -57.0638],\n Tmin=(1017.17, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000,\n 'K'), E0=(216.244, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(\n 461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)'\n ))\nspecies(label='[CH2]C(=CC)[C]1CC1C(25414)', structure=SMILES(\n '[CH2]C(=CC)[C]1CC1C'), E0=(289.9, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.71289, 0.0520158, \n 3.84829e-05, -8.55933e-08, 3.61457e-11, 35003.5, 26.4903], Tmin=(100,\n 'K'), Tmax=(968.714, 'K')), NASAPolynomial(coeffs=[16.7686, 0.0352996, \n -1.24057e-05, 2.26286e-09, -1.62921e-13, 29566.5, -62.466], Tmin=(\n 968.714, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(289.9, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Cyclopropane) + radical(Allyl_T) + radical(Allyl_P)'\n ))\nspecies(label='[CH2][C]1C(=CC)CC1C(25415)', structure=SMILES(\n '[CH2]C1=C([CH]C)CC1C'), E0=(304.572, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.583091, 0.0531885,\n 4.0938e-05, -9.08388e-08, 3.83549e-11, 36774.2, 26.4705], Tmin=(100,\n 'K'), Tmax=(972.301, 'K')), NASAPolynomial(coeffs=[18.2947, 0.0339462, \n -1.21014e-05, 2.24934e-09, -1.64353e-13, 30795.4, -71.5147], Tmin=(\n 972.301, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(304.572, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_S)'\n ))\nspecies(label='CH2(S)(23)', structure=SMILES('[CH2]'), E0=(419.862,\n 'kJ/mol'), modes=[HarmonicOscillator(frequencies=([1369.36, 2789.41, \n 2993.36], 'cm^-1'))], spinMultiplicity=1, opticalIsomers=1,\n molecularWeight=(14.0266, 'amu'), collisionModel=TransportData(\n shapeIndex=2, epsilon=(1197.29, 'J/mol'), sigma=(3.8, 'angstroms'),\n dipoleMoment=(0, 'C*m'), polarizability=(0, 'angstroms^3'),\n rotrelaxcollnum=0.0, comment='GRI-Mech'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[4.19195, -0.00230793, \n 8.0509e-06, -6.60123e-09, 1.95638e-12, 50484.3, -0.754589], Tmin=(200,\n 'K'), Tmax=(1000, 'K')), NASAPolynomial(coeffs=[2.28556, 0.00460255, -\n 1.97412e-06, 4.09548e-10, -3.34695e-14, 50922.4, 8.67684], Tmin=(1000,\n 'K'), Tmax=(3000, 'K'))], Tmin=(200, 'K'), Tmax=(3000, 'K'), E0=(\n 419.862, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(58.2013,\n 'J/(mol*K)'), label='CH2(S)', comment=\n 'Thermo library: Klippenstein_Glarborg2016'))\nspecies(label='[CH2]C(=C)C([CH2])=CC(25416)', structure=SMILES(\n '[CH2]C(=C)C([CH2])=CC'), E0=(285.713, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 2950, 3100, 1380, 975, 1025, 1650, 2750, 2800, 2850, 1350, 1500, \n 750, 1050, 1375, 1000, 3000, 3033.33, 3066.67, 3100, 415, 465, 780, 850,\n 1435, 1475, 900, 1100, 3010, 987.5, 1337.5, 450, 1655, 311.383],\n 'cm^-1')), HinderedRotor(inertia=(0.327475, 'amu*angstrom^2'), symmetry\n =1, barrier=(22.5291, 'kJ/mol'), semiclassical=False), HinderedRotor(\n inertia=(0.327466, 'amu*angstrom^2'), symmetry=1, barrier=(22.5294,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.327318,\n 'amu*angstrom^2'), symmetry=1, barrier=(22.5272, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.327483, 'amu*angstrom^2'\n ), symmetry=1, barrier=(22.5297, 'kJ/mol'), semiclassical=False)],\n spinMultiplicity=3, opticalIsomers=1, molecularWeight=(94.1543, 'amu'),\n energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0\n =(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[\n 0.335271, 0.0676667, -2.76626e-05, -1.62749e-08, 1.21982e-11, 34506.8, \n 24.024], Tmin=(100, 'K'), Tmax=(980.594, 'K')), NASAPolynomial(coeffs=[\n 17.5531, 0.0266059, -9.47854e-06, 1.70194e-09, -1.19937e-13, 29727.4, -\n 65.8563], Tmin=(980.594, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),\n Tmax=(5000, 'K'), E0=(285.713, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'),\n CpInf=(390.78, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Allyl_P)'\n ))\nspecies(label='C=C([CH]C)C[C]=CC(24184)', structure=SMILES(\n '[CH2]C(=CC)C[C]=CC'), E0=(366.985, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([2995, 3025, 975, 1000, 1300, 1375, 400,\n 500, 1630, 1680, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450, \n 1500, 700, 800, 1000, 1100, 1350, 1400, 900, 1100, 1685, 370, 350, 440,\n 435, 1725, 2750, 2850, 1437.5, 1250, 1305, 750, 350, 3000, 3100, 440, \n 815, 1455, 1000, 180, 579.702], 'cm^-1')), HinderedRotor(inertia=(\n 0.147406, 'amu*angstrom^2'), symmetry=1, barrier=(3.38916, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.64226, 'amu*angstrom^2'),\n symmetry=1, barrier=(14.7668, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.64164, 'amu*angstrom^2'), symmetry=1, barrier=\n (14.7526, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.643937, 'amu*angstrom^2'), symmetry=1, barrier=(14.8054, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.145327, 'amu*angstrom^2'\n ), symmetry=1, barrier=(3.34136, 'kJ/mol'), semiclassical=False)],\n spinMultiplicity=3, opticalIsomers=1, molecularWeight=(108.181, 'amu'),\n collisionModel=TransportData(shapeIndex=2, epsilon=(3683.66, 'J/mol'),\n sigma=(6.4482, 'angstroms'), dipoleMoment=(0, 'C*m'), polarizability=(0,\n 'angstroms^3'), rotrelaxcollnum=0, comment=\n 'Epsilon & sigma estimated with Tc=575.38 K, Pc=31.18 bar (from Joback method)'\n ), energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'),\n T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=\n [0.29648, 0.0786067, -5.42868e-05, 1.96375e-08, -2.97459e-12, 44273.2, \n 31.2372], Tmin=(100, 'K'), Tmax=(1490.43, 'K')), NASAPolynomial(coeffs=\n [13.9025, 0.0420909, -1.75363e-05, 3.199e-09, -2.17227e-13, 40217.5, -\n 39.8334], Tmin=(1490.43, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),\n Tmax=(5000, 'K'), E0=(366.985, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'),\n CpInf=(461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)'\n ))\nspecies(label='CC=C1CCC1=CC(25269)', structure=SMILES('CC=C1CCC1=CC'), E0=(\n 114.107, 'kJ/mol'), spinMultiplicity=1, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.677799, 0.0585738, \n 5.80411e-06, -4.1598e-08, 1.78951e-11, 13856, 25.5085], Tmin=(100, 'K'),\n Tmax=(1034.79, 'K')), NASAPolynomial(coeffs=[13.4814, 0.0415234, -\n 1.65073e-05, 3.07348e-09, -2.16896e-13, 9469.28, -45.0922], Tmin=(\n 1034.79, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(114.107, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(473.925,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(12methylenecyclobutane)'\n ))\nspecies(label='CH2(19)', structure=SMILES('[CH2]'), E0=(381.563, 'kJ/mol'),\n modes=[HarmonicOscillator(frequencies=([1032.72, 2936.3, 3459], 'cm^-1'\n ))], spinMultiplicity=3, opticalIsomers=1, molecularWeight=(14.0266,\n 'amu'), collisionModel=TransportData(shapeIndex=2, epsilon=(1197.29,\n 'J/mol'), sigma=(3.8, 'angstroms'), dipoleMoment=(0, 'C*m'),\n polarizability=(0, 'angstroms^3'), rotrelaxcollnum=0.0, comment=\n 'GRI-Mech'), energyTransferModel=SingleExponentialDown(alpha0=(3.5886,\n 'kJ/mol'), T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[\n NASAPolynomial(coeffs=[3.8328, 0.000224446, 4.68033e-06, -6.04743e-09, \n 2.59009e-12, 45920.8, 1.40666], Tmin=(200, 'K'), Tmax=(1000, 'K')),\n NASAPolynomial(coeffs=[3.16229, 0.00281798, -7.56235e-07, 5.05446e-11, \n 5.65236e-15, 46099.1, 4.77656], Tmin=(1000, 'K'), Tmax=(3000, 'K'))],\n Tmin=(200, 'K'), Tmax=(3000, 'K'), E0=(381.563, 'kJ/mol'), Cp0=(33.2579,\n 'J/(mol*K)'), CpInf=(58.2013, 'J/(mol*K)'), label='CH2', comment=\n 'Thermo library: Klippenstein_Glarborg2016'))\nspecies(label='[CH2]C([C]=CC)=CC(25417)', structure=SMILES(\n '[CH2]C([C]=CC)=CC'), E0=(334.774, 'kJ/mol'), modes=[HarmonicOscillator\n (frequencies=([350, 440, 435, 1725, 1685, 370, 2750, 2770, 2790, 2810, \n 2830, 2850, 1350, 1400, 1450, 1500, 700, 800, 1000, 1100, 1350, 1400, \n 900, 1100, 3000, 3100, 440, 815, 1455, 1000, 2995, 3025, 975, 1000, \n 1300, 1375, 400, 500, 1630, 1680, 180], 'cm^-1')), HinderedRotor(\n inertia=(0.7606, 'amu*angstrom^2'), symmetry=1, barrier=(17.4877,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.760854,\n 'amu*angstrom^2'), symmetry=1, barrier=(17.4935, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.760586, 'amu*angstrom^2'\n ), symmetry=1, barrier=(17.4874, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(2.15146, 'amu*angstrom^2'), symmetry=1, barrier=\n (49.4663, 'kJ/mol'), semiclassical=False)], spinMultiplicity=3,\n opticalIsomers=1, molecularWeight=(94.1543, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.352604, 0.0734369,\n -5.91187e-05, 2.57941e-08, -4.60694e-12, 40400.9, 25.1788], Tmin=(100,\n 'K'), Tmax=(1327.42, 'K')), NASAPolynomial(coeffs=[14.2321, 0.0316126, \n -1.18565e-05, 2.05761e-09, -1.36512e-13, 36716.1, -45.7131], Tmin=(\n 1327.42, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(334.774, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(390.78,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + radical(C=CJC=C) + radical(Allyl_P)'\n ))\nspecies(label='[CH2]C1([CH]C)C(=C)C1C(25296)', structure=SMILES(\n '[CH2]C1([CH]C)C(=C)C1C'), E0=(466.494, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.29276, 0.0655305, \n -4.50464e-06, -3.74661e-08, 1.7759e-11, 56253.7, 30.0992], Tmin=(100,\n 'K'), Tmax=(1027.4, 'K')), NASAPolynomial(coeffs=[16.6435, 0.0372633, -\n 1.49065e-05, 2.81296e-09, -2.01072e-13, 51026, -58.316], Tmin=(1027.4,\n 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), E0=(\n 466.494, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)'\n ))\nspecies(label='H(3)', structure=SMILES('[H]'), E0=(211.792, 'kJ/mol'),\n spinMultiplicity=1, opticalIsomers=1, molecularWeight=(1.00794, 'amu'),\n collisionModel=TransportData(shapeIndex=0, epsilon=(1205.6, 'J/mol'),\n sigma=(2.05, 'angstroms'), dipoleMoment=(0, 'C*m'), polarizability=(0,\n 'angstroms^3'), rotrelaxcollnum=0.0, comment='GRI-Mech'),\n energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0\n =(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[\n 2.5, 9.24385e-15, -1.3678e-17, 6.66185e-21, -1.00107e-24, 25472.7, -\n 0.459566], Tmin=(100, 'K'), Tmax=(3459.6, 'K')), NASAPolynomial(coeffs=\n [2.5, 9.20456e-12, -3.58608e-15, 6.15199e-19, -3.92042e-23, 25472.7, -\n 0.459566], Tmin=(3459.6, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),\n Tmax=(5000, 'K'), E0=(211.792, 'kJ/mol'), Cp0=(20.7862, 'J/(mol*K)'),\n CpInf=(20.7862, 'J/(mol*K)'), label='H', comment=\n 'Thermo library: BurkeH2O2'))\nspecies(label='[CH2]C(=CC)C(=C)C=C(24604)', structure=SMILES(\n '[CH2]C(=CC)C(=C)C=C'), E0=(242.677, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 2950, 3000, 3050, 3100, 1330, 1430, 900, 1050, 1000, 1050, 1600, \n 1700, 2750, 2800, 2850, 1350, 1500, 750, 1050, 1375, 1000, 3000, 3100, \n 440, 815, 1455, 1000, 2995, 3025, 975, 1000, 1300, 1375, 400, 500, 1630,\n 1680, 181.962, 683.313], 'cm^-1')), HinderedRotor(inertia=(0.669842,\n 'amu*angstrom^2'), symmetry=1, barrier=(19.1337, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.0582339,\n 'amu*angstrom^2'), symmetry=1, barrier=(19.1767, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.83204, 'amu*angstrom^2'),\n symmetry=1, barrier=(19.1302, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(4.52237, 'amu*angstrom^2'), symmetry=1, barrier=\n (104.569, 'kJ/mol'), semiclassical=False)], spinMultiplicity=2,\n opticalIsomers=1, molecularWeight=(107.173, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.293043, 0.0682771,\n -2.00337e-05, -2.05401e-08, 1.21516e-11, 29332.3, 27.0261], Tmin=(100,\n 'K'), Tmax=(1018.57, 'K')), NASAPolynomial(coeffs=[15.7386, 0.0358123, \n -1.37404e-05, 2.51366e-09, -1.76142e-13, 24723.4, -54.9529], Tmin=(\n 1018.57, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(242.677, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(440.667,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Allyl_P)'\n ))\nspecies(label='[CH2]CC(=C)C([CH2])=CC(25418)', structure=SMILES(\n '[CH2]CC(=C)C([CH2])=CC'), E0=(316.814, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([3010, 987.5, 1337.5, 450, 1655, 2750, \n 2800, 2850, 1350, 1500, 750, 1050, 1375, 1000, 2950, 3100, 1380, 975, \n 1025, 1650, 325, 375, 415, 465, 420, 450, 1700, 1750, 2750, 2850, \n 1437.5, 1250, 1305, 750, 350, 3000, 3033.33, 3066.67, 3100, 415, 465, \n 780, 850, 1435, 1475, 900, 1100, 180, 180], 'cm^-1')), HinderedRotor(\n inertia=(0.0368535, 'amu*angstrom^2'), symmetry=1, barrier=(17.9864,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.00736317,\n 'amu*angstrom^2'), symmetry=1, barrier=(3.60618, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.781153, 'amu*angstrom^2'\n ), symmetry=1, barrier=(17.9602, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.779478, 'amu*angstrom^2'), symmetry=1, barrier\n =(17.9217, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.781104, 'amu*angstrom^2'), symmetry=1, barrier=(17.9591, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.348925, 0.0836004, -\n 5.1879e-05, 7.14877e-09, 3.44908e-12, 38270.9, 31.5928], Tmin=(100, 'K'\n ), Tmax=(1044.14, 'K')), NASAPolynomial(coeffs=[17.9255, 0.0352115, -\n 1.34219e-05, 2.42456e-09, -1.67785e-13, 33276.3, -63.0036], Tmin=(\n 1044.14, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(316.814, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(RCCJ) + radical(Allyl_P)'\n ))\nspecies(label='[CH]=C(CC)C([CH2])=CC(25419)', structure=SMILES(\n '[CH]=C(CC)C([CH2])=CC'), E0=(358.664, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([3120, 650, 792.5, 1650, 3010, 987.5, \n 1337.5, 450, 1655, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450,\n 1500, 700, 800, 1000, 1100, 1350, 1400, 900, 1100, 325, 375, 415, 465, \n 420, 450, 1700, 1750, 2750, 2850, 1437.5, 1250, 1305, 750, 350, 3000, \n 3100, 440, 815, 1455, 1000, 180], 'cm^-1')), HinderedRotor(inertia=(\n 0.701639, 'amu*angstrom^2'), symmetry=1, barrier=(16.1321, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.344302, 'amu*angstrom^2'\n ), symmetry=1, barrier=(16.1602, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0492932, 'amu*angstrom^2'), symmetry=1,\n barrier=(16.1378, 'kJ/mol'), semiclassical=False), HinderedRotor(\n inertia=(0.702005, 'amu*angstrom^2'), symmetry=1, barrier=(16.1405,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.702379,\n 'amu*angstrom^2'), symmetry=1, barrier=(16.1491, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.468616, 0.0864938, -\n 5.84569e-05, 1.27697e-08, 1.75707e-12, 43308.4, 30.6389], Tmin=(100,\n 'K'), Tmax=(1047.28, 'K')), NASAPolynomial(coeffs=[18.4195, 0.034593, -\n 1.31104e-05, 2.35762e-09, -1.62637e-13, 38242.2, -66.6572], Tmin=(\n 1047.28, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(358.664, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_P)'\n ))\nspecies(label='[CH2]C(=[C]C)C(=C)CC(25420)', structure=SMILES(\n '[CH2]C(=[C]C)C(=C)CC'), E0=(349.41, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([1685, 370, 2750, 2770, 2790, 2810, \n 2830, 2850, 1350, 1400, 1450, 1500, 700, 800, 1000, 1100, 1350, 1400, \n 900, 1100, 2950, 3100, 1380, 975, 1025, 1650, 325, 375, 415, 465, 420, \n 450, 1700, 1750, 2750, 2850, 1437.5, 1250, 1305, 750, 350, 3000, 3100, \n 440, 815, 1455, 1000, 180, 180], 'cm^-1')), HinderedRotor(inertia=(\n 0.159905, 'amu*angstrom^2'), symmetry=1, barrier=(15.9368, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.693159, 'amu*angstrom^2'\n ), symmetry=1, barrier=(15.9371, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.693127, 'amu*angstrom^2'), symmetry=1, barrier\n =(15.9364, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.693165, 'amu*angstrom^2'), symmetry=1, barrier=(15.9372, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.0150632,\n 'amu*angstrom^2'), symmetry=1, barrier=(15.9371, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.583231, 0.089245, -\n 7.16619e-05, 3.00631e-08, -5.07891e-12, 42198.9, 31.1306], Tmin=(100,\n 'K'), Tmax=(1412.15, 'K')), NASAPolynomial(coeffs=[19.0319, 0.0336833, \n -1.2643e-05, 2.20036e-09, -1.46165e-13, 36659.1, -70.2702], Tmin=(\n 1412.15, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(349.41, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)'\n ))\nspecies(label='[CH]=C([CH]C)C(C)=CC(25421)', structure=SMILES(\n '[CH]C(=CC)C(C)=CC'), E0=(317.373, 'kJ/mol'), modes=[HarmonicOscillator\n (frequencies=([325, 375, 415, 465, 420, 450, 1700, 1750, 2750, 2762.5, \n 2775, 2787.5, 2800, 2812.5, 2825, 2837.5, 2850, 1350, 1380, 1410, 1440,\n 1470, 1500, 700, 750, 800, 1000, 1050, 1100, 1350, 1375, 1400, 900, \n 1000, 1100, 2995, 3025, 975, 1000, 1300, 1375, 400, 500, 1630, 1680, \n 200, 800, 1200, 1600], 'cm^-1')), HinderedRotor(inertia=(0.156089,\n 'amu*angstrom^2'), symmetry=1, barrier=(3.5888, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'\n ), symmetry=1, barrier=(3.5888, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'), symmetry=1, barrier\n =(3.5888, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.156089, 'amu*angstrom^2'), symmetry=1, barrier=(3.5888, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'\n ), symmetry=1, barrier=(3.5888, 'kJ/mol'), semiclassical=False)],\n spinMultiplicity=3, opticalIsomers=1, molecularWeight=(108.181, 'amu'),\n energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0\n =(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-\n 0.247945, 0.0873521, -6.16843e-05, 2.31486e-08, -3.62747e-12, 38328.8, \n 29.1665], Tmin=(100, 'K'), Tmax=(1460.93, 'K')), NASAPolynomial(coeffs=\n [15.297, 0.0447902, -1.7984e-05, 3.20673e-09, -2.14924e-13, 33786.8, -\n 51.7212], Tmin=(1460.93, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),\n Tmax=(5000, 'K'), E0=(317.373, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'),\n CpInf=(461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(AllylJ2_triplet)'\n ))\nspecies(label='[CH2][C](C=C)C(=C)CC(24623)', structure=SMILES(\n '[CH2]C(C=C)=C([CH2])CC'), E0=(228.159, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.0497728, \n 0.0733281, -1.6094e-05, -3.35123e-08, 1.88363e-11, 27601.1, 30.4448],\n Tmin=(100, 'K'), Tmax=(975.095, 'K')), NASAPolynomial(coeffs=[18.3695, \n 0.0342638, -1.21408e-05, 2.16747e-09, -1.52112e-13, 22274, -66.8493],\n Tmin=(975.095, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000,\n 'K'), E0=(228.159, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(\n 461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CC=CCJ) + radical(Allyl_P)'\n ))\nspecies(label='C[CH][C]1CCC1=CC(25422)', structure=SMILES(\n 'C[CH]C1CCC=1[CH]C'), E0=(303.292, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.788866, 0.0500701,\n 4.22235e-05, -8.64809e-08, 3.53174e-11, 36611.5, 25.2586], Tmin=(100,\n 'K'), Tmax=(987.239, 'K')), NASAPolynomial(coeffs=[16.2187, 0.0373502, \n -1.4111e-05, 2.65357e-09, -1.92503e-13, 31138.2, -61.2734], Tmin=(\n 987.239, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(303.292, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_S) + radical(Allyl_S)'\n ))\nspecies(label='[CH2][C]1C(=C)C(C)C1C(25423)', structure=SMILES(\n '[CH2]C1=C([CH2])C(C)C1C'), E0=(305.852, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.377097, 0.0563026,\n 3.9705e-05, -9.53284e-08, 4.14811e-11, 36937, 26.2973], Tmin=(100, 'K'),\n Tmax=(959.735, 'K')), NASAPolynomial(coeffs=[20.4056, 0.0304853, -\n 1.006e-05, 1.83774e-09, -1.35603e-13, 30437.2, -83.3398], Tmin=(959.735,\n 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), E0=(\n 305.852, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_P)'\n ))\nspecies(label='C=CC(=C)C(C)=CC(24616)', structure=SMILES('C=CC(=C)C(C)=CC'),\n E0=(91.1774, 'kJ/mol'), spinMultiplicity=1, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.236638, 0.0713806, -\n 3.04205e-05, -5.26762e-09, 5.54498e-12, 11111.2, 26.9518], Tmin=(100,\n 'K'), Tmax=(1093.32, 'K')), NASAPolynomial(coeffs=[14.1536, 0.040705, -\n 1.6104e-05, 2.93544e-09, -2.02595e-13, 6858.32, -46.9636], Tmin=(\n 1093.32, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(91.1774, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH)'\n ))\nspecies(label='C=[C]C(C)C(=C)[CH]C(24183)', structure=SMILES(\n '[CH2]C(=CC)C(C)[C]=C'), E0=(369.44, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([1685, 370, 3010, 987.5, 1337.5, 450, \n 1655, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450, 1500, 700, \n 800, 1000, 1100, 1350, 1400, 900, 1100, 2950, 3100, 1380, 975, 1025, \n 1650, 1380, 1390, 370, 380, 2900, 435, 350, 440, 435, 1725, 3000, 3100,\n 440, 815, 1455, 1000, 345.333, 347.343], 'cm^-1')), HinderedRotor(\n inertia=(0.119405, 'amu*angstrom^2'), symmetry=1, barrier=(9.93037,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.281457,\n 'amu*angstrom^2'), symmetry=1, barrier=(24.022, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.116909, 'amu*angstrom^2'\n ), symmetry=1, barrier=(9.94809, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.117447, 'amu*angstrom^2'), symmetry=1, barrier\n =(9.9744, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.116555, 'amu*angstrom^2'), symmetry=1, barrier=(9.93684, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), collisionModel=TransportData(\n shapeIndex=2, epsilon=(3625.33, 'J/mol'), sigma=(6.4092, 'angstroms'),\n dipoleMoment=(0, 'C*m'), polarizability=(0, 'angstroms^3'),\n rotrelaxcollnum=0, comment=\n 'Epsilon & sigma estimated with Tc=566.27 K, Pc=31.24 bar (from Joback method)'\n ), energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'),\n T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=\n [0.299693, 0.0839308, -6.74533e-05, 3.06742e-08, -6.02582e-12, 44564.4,\n 29.0122], Tmin=(100, 'K'), Tmax=(1163.73, 'K')), NASAPolynomial(coeffs=\n [10.857, 0.0476425, -2.06788e-05, 3.8782e-09, -2.69295e-13, 42107.3, -\n 23.5217], Tmin=(1163.73, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),\n Tmax=(5000, 'K'), E0=(369.44, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'),\n CpInf=(461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)'\n ))\nspecies(label='C=C1C(=CC)CC1C(25265)', structure=SMILES('C=C1C(=CC)CC1C'),\n E0=(118.381, 'kJ/mol'), spinMultiplicity=1, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.689924, 0.0550304, \n 2.3689e-05, -6.56265e-08, 2.77602e-11, 14372.8, 24.9628], Tmin=(100,\n 'K'), Tmax=(993.204, 'K')), NASAPolynomial(coeffs=[15.3775, 0.0380508, \n -1.43595e-05, 2.66472e-09, -1.90565e-13, 9375.16, -56.2678], Tmin=(\n 993.204, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(118.381, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(473.925,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)'\n ))\nspecies(label='CHCH3(T)(95)', structure=SMILES('[CH]C'), E0=(343.893,\n 'kJ/mol'), modes=[HarmonicOscillator(frequencies=([2750, 2800, 2850, \n 1350, 1500, 750, 1050, 1375, 1000, 592.414, 4000], 'cm^-1')),\n HinderedRotor(inertia=(0.00438699, 'amu*angstrom^2'), symmetry=1,\n barrier=(26.7685, 'kJ/mol'), semiclassical=False)], spinMultiplicity=3,\n opticalIsomers=1, molecularWeight=(28.0532, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[3.82363, -\n 0.000909515, 3.2138e-05, -3.7348e-08, 1.3309e-11, 41371.4, 7.10948],\n Tmin=(100, 'K'), Tmax=(960.812, 'K')), NASAPolynomial(coeffs=[4.30487, \n 0.00943069, -3.27559e-06, 5.95121e-10, -4.27307e-14, 40709.1, 1.84202],\n Tmin=(960.812, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000,\n 'K'), E0=(343.893, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(\n 128.874, 'J/(mol*K)'), label='CHCH3(T)', comment=\n 'Thermo library: DFT_QCI_thermo'))\nspecies(label='[CH2]C([C]=C)=CC(24774)', structure=SMILES(\n '[CH2]C([C]=C)=CC'), E0=(370.8, 'kJ/mol'), modes=[HarmonicOscillator(\n frequencies=([1685, 370, 2750, 2800, 2850, 1350, 1500, 750, 1050, 1375,\n 1000, 3010, 987.5, 1337.5, 450, 1655, 2950, 3100, 1380, 975, 1025, 1650,\n 350, 440, 435, 1725, 3000, 3100, 440, 815, 1455, 1000, 180], 'cm^-1')),\n HinderedRotor(inertia=(1.17315, 'amu*angstrom^2'), symmetry=1, barrier=\n (26.9731, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 1.17496, 'amu*angstrom^2'), symmetry=1, barrier=(27.0146, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(1.1727, 'amu*angstrom^2'),\n symmetry=1, barrier=(26.9626, 'kJ/mol'), semiclassical=False)],\n spinMultiplicity=3, opticalIsomers=1, molecularWeight=(80.1277, 'amu'),\n energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0\n =(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[\n 1.0818, 0.0569416, -3.56598e-05, 4.1841e-09, 3.20998e-12, 44708.4, \n 20.7527], Tmin=(100, 'K'), Tmax=(982.69, 'K')), NASAPolynomial(coeffs=[\n 12.9204, 0.0239405, -8.46845e-06, 1.46434e-09, -9.91425e-14, 41648.3, -\n 39.886], Tmin=(982.69, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=\n (5000, 'K'), E0=(370.8, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(\n 320.107, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CJC=C) + radical(Allyl_P)'\n ))\nspecies(label='[CH]=C([CH]C)C(=C)CC(25424)', structure=SMILES(\n '[CH]C(=CC)C(=C)CC'), E0=(330.753, 'kJ/mol'), modes=[HarmonicOscillator\n (frequencies=([2750, 2850, 1437.5, 1250, 1305, 750, 350, 2950, 3100, \n 1380, 975, 1025, 1650, 3010, 987.5, 1337.5, 450, 1655, 2750, 2770, 2790,\n 2810, 2830, 2850, 1350, 1400, 1450, 1500, 700, 800, 1000, 1100, 1350, \n 1400, 900, 1100, 325, 375, 415, 465, 420, 450, 1700, 1750, 200, 800, \n 1066.67, 1333.33, 1600], 'cm^-1')), HinderedRotor(inertia=(0.156089,\n 'amu*angstrom^2'), symmetry=1, barrier=(3.5888, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'\n ), symmetry=1, barrier=(3.5888, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'), symmetry=1, barrier\n =(3.5888, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.156089, 'amu*angstrom^2'), symmetry=1, barrier=(3.5888, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'\n ), symmetry=1, barrier=(3.5888, 'kJ/mol'), semiclassical=False)],\n spinMultiplicity=3, opticalIsomers=1, molecularWeight=(108.181, 'amu'),\n energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0\n =(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-\n 0.442166, 0.0858934, -5.1432e-05, 9.5936e-09, 1.54315e-12, 39950.3, \n 30.9724], Tmin=(100, 'K'), Tmax=(1106.5, 'K')), NASAPolynomial(coeffs=[\n 16.3579, 0.0427111, -1.66841e-05, 2.99222e-09, -2.04007e-13, 35158.1, -\n 56.633], Tmin=(1106.5, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=\n (5000, 'K'), E0=(330.753, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=\n (461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(AllylJ2_triplet)'\n ))\nspecies(label='C=CC(=C)C(=C)CC(24630)', structure=SMILES('C=CC(=C)C(=C)CC'),\n E0=(104.558, 'kJ/mol'), spinMultiplicity=1, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.296747, 0.0670054, -\n 1.0269e-05, -3.13536e-08, 1.59568e-11, 12721.3, 27.8384], Tmin=(100,\n 'K'), Tmax=(1010.3, 'K')), NASAPolynomial(coeffs=[15.6889, 0.0379462, -\n 1.44599e-05, 2.64736e-09, -1.86033e-13, 7984.11, -54.6302], Tmin=(\n 1010.3, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), E0\n =(104.558, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH)'\n ))\nspecies(label='C=C1C(=C)C(C)C1C(25274)', structure=SMILES(\n 'C=C1C(=C)C(C)C1C'), E0=(122.654, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.691732, 0.0515838,\n 4.13669e-05, -8.96066e-08, 3.77135e-11, 14890, 23.0693], Tmin=(100, 'K'\n ), Tmax=(969.873, 'K')), NASAPolynomial(coeffs=[17.4573, 0.0342784, -\n 1.20439e-05, 2.21718e-09, -1.61071e-13, 9199.74, -69.8715], Tmin=(\n 969.873, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(122.654, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(473.925,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)'\n ))\nspecies(label='N2', structure=SMILES('N#N'), E0=(-8.69489, 'kJ/mol'),\n spinMultiplicity=1, opticalIsomers=1, molecularWeight=(28.0135, 'amu'),\n collisionModel=TransportData(shapeIndex=1, epsilon=(810.913, 'J/mol'),\n sigma=(3.621, 'angstroms'), dipoleMoment=(0, 'C*m'), polarizability=(\n 1.76, 'angstroms^3'), rotrelaxcollnum=4.0, comment=\n 'PrimaryTransportLibrary'), energyTransferModel=SingleExponentialDown(\n alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85), thermo=NASA(\n polynomials=[NASAPolynomial(coeffs=[3.61263, -0.00100893, 2.49898e-06, \n -1.43376e-09, 2.58636e-13, -1051.1, 2.6527], Tmin=(100, 'K'), Tmax=(\n 1817.04, 'K')), NASAPolynomial(coeffs=[2.9759, 0.00164141, -7.19722e-07,\n 1.25378e-10, -7.91526e-15, -1025.84, 5.53757], Tmin=(1817.04, 'K'),\n Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), E0=(-8.69489,\n 'kJ/mol'), Cp0=(29.1007, 'J/(mol*K)'), CpInf=(37.4151, 'J/(mol*K)'),\n label='N2', comment='Thermo library: BurkeH2O2'))\nspecies(label='Ne', structure=SMILES('[Ne]'), E0=(-6.19738, 'kJ/mol'),\n spinMultiplicity=1, opticalIsomers=1, molecularWeight=(20.1797, 'amu'),\n collisionModel=TransportData(shapeIndex=0, epsilon=(1235.53, 'J/mol'),\n sigma=(3.758e-10, 'm'), dipoleMoment=(0, 'C*m'), polarizability=(0,\n 'angstroms^3'), rotrelaxcollnum=0, comment=\n 'Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!'\n ), energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'),\n T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=\n [2.5, 0, 0, 0, 0, -745.375, 3.35532], Tmin=(200, 'K'), Tmax=(1000, 'K')\n ), NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, -745.375, 3.35532], Tmin=(\n 1000, 'K'), Tmax=(6000, 'K'))], Tmin=(200, 'K'), Tmax=(6000, 'K'), E0=(\n -6.19738, 'kJ/mol'), Cp0=(20.7862, 'J/(mol*K)'), CpInf=(20.7862,\n 'J/(mol*K)'), label='Ne', comment='Thermo library: primaryThermoLibrary'))\ntransitionState(label='TS1', E0=(291.23, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS2', E0=(462.221, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS3', E0=(538.699, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS4', E0=(497.951, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS5', E0=(380.338, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS6', E0=(399.474, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS7', E0=(350.103, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS8', E0=(722.113, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS9', E0=(343.259, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS10', E0=(380.132, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS11', E0=(705.575, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS12', E0=(537.022, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS13', E0=(257.971, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS14', E0=(716.337, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS15', E0=(466.494, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS16', E0=(454.469, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS17', E0=(430.619, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS18', E0=(503.849, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS19', E0=(393.718, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS20', E0=(361.682, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS21', E0=(350.103, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS22', E0=(380.132, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS23', E0=(375.044, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS24', E0=(274.66, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS25', E0=(463.915, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS26', E0=(257.971, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS27', E0=(714.692, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS28', E0=(375.062, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS29', E0=(258.055, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS30', E0=(257.971, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\nreaction(label='reaction1', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'], transitionState=\n 'TS1', kinetics=Arrhenius(A=(5000000000000.0, 's^-1'), n=0, Ea=(41.5431,\n 'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(1500, 'K'), comment=\n \"\"\"Exact match found for rate rule [RJJ]\nEuclidian distance = 0\nfamily: 1,4_Linear_birad_scission\nEa raised from 0.0 to 41.5 kJ/mol to match endothermicity of reaction.\"\"\"\n ))\nreaction(label='reaction2', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2]C1([CH]C)CC1=CC(25275)'], transitionState='TS2',\n kinetics=Arrhenius(A=(3360000000.0, 's^-1'), n=0.84, Ea=(212.534,\n 'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(2500, 'K'), comment=\n \"\"\"Estimated using template [R4_S_D;doublebond_intra_HNd;radadd_intra_cs2H] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_cs2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Exocyclic\nEa raised from 210.2 to 212.5 kJ/mol to match endothermicity of reaction.\"\"\"\n ))\nreaction(label='reaction3', reactants=['CH3CHCCH2(18175)',\n 'C=[C][CH]C(18176)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState='TS3', kinetics=Arrhenius(A=(0.00086947, 'm^3/(mol*s)'),\n n=2.67356, Ea=(32.0272, 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using an average for rate rule [Ca_Cds-HH;CJ]\nEuclidian distance = 0\nfamily: R_Addition_MultipleBond\"\"\"\n ))\nreaction(label='reaction4', reactants=['[CH2]C(=CC)C(C)=[C]C(25412)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS4',\n kinetics=Arrhenius(A=(7740000000.0, 's^-1'), n=1.08, Ea=(161.921,\n 'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(1500, 'K'), comment=\n \"\"\"From training reaction 198 used for R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H\nExact match found for rate rule [R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction5', reactants=['[CH2]C(=[C]C)C(C)=CC(25413)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS5',\n kinetics=Arrhenius(A=(111300, 's^-1'), n=2.23, Ea=(44.3086, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_2H]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction6', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2]C(=CC)[C](C)C=C(24605)'], transitionState='TS6',\n kinetics=Arrhenius(A=(1600000.0, 's^-1'), n=1.81, Ea=(149.787, 'kJ/mol'\n ), T0=(1, 'K'), comment=\n \"\"\"From training reaction 101 used for R4H_SDS;C_rad_out_2H;Cs_H_out_2H\nExact match found for rate rule [R4H_SDS;C_rad_out_2H;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction7', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2][C](C=C)C(C)=CC(24606)'], transitionState='TS7',\n kinetics=Arrhenius(A=(6660000.0, 's^-1'), n=1.64, Ea=(100.416, 'kJ/mol'\n ), T0=(1, 'K'), comment=\n \"\"\"From training reaction 96 used for R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H\nExact match found for rate rule [R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction8', reactants=['C=[C][CH]C(18176)',\n 'C=[C][CH]C(18176)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState='TS8', kinetics=Arrhenius(A=(3730380.0, 'm^3/(mol*s)'),\n n=0.027223, Ea=(0, 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using an average for rate rule [Y_rad;Y_rad]\nEuclidian distance = 0\nfamily: R_Recombination\nEa raised from -14.4 to 0 kJ/mol.\"\"\"\n ))\nreaction(label='reaction9', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2]C(=CC)[C]1CC1C(25414)'], transitionState='TS9',\n kinetics=Arrhenius(A=(7367860000000.0, 's^-1'), n=-0.105173, Ea=(\n 93.5715, 'kJ/mol'), T0=(1, 'K'), Tmin=(303.03, 'K'), Tmax=(2000, 'K'),\n comment=\n \"\"\"Estimated using template [R3_D;doublebond_intra;radadd_intra_cs2H] for rate rule [R3_D;doublebond_intra_secDe_HNd;radadd_intra_cs2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Endocyclic\"\"\"\n ))\nreaction(label='reaction10', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2][C]1C(=CC)CC1C(25415)'], transitionState='TS10',\n kinetics=Arrhenius(A=(643734000.0, 's^-1'), n=0.926191, Ea=(130.445,\n 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Endocyclic\"\"\"\n ))\nreaction(label='reaction11', reactants=['CH2(S)(23)',\n '[CH2]C(=C)C([CH2])=CC(25416)'], products=[\n 'C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS11', kinetics=\n Arrhenius(A=(79400000000000.0, 'cm^3/(mol*s)', '*|/', 0.25), n=-0.324,\n Ea=(0, 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"From training reaction 4 used for carbene;Cd_pri\nExact match found for rate rule [carbene;Cd_pri]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 4.0\nfamily: 1,2_Insertion_carbene\nEa raised from -3.9 to 0 kJ/mol.\"\"\"\n ))\nreaction(label='reaction23', reactants=['C=C([CH]C)C[C]=CC(24184)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS12',\n kinetics=Arrhenius(A=(1748420000.0, 's^-1'), n=1.084, Ea=(170.038,\n 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using average of templates [cCsCJ;CdsJ;C] + [cCs(-HH)CJ;CJ;C] for rate rule [cCs(-HH)CJ;CdsJ;C]\nEuclidian distance = 1.0\nfamily: 1,2_shiftC\"\"\"\n ))\nreaction(label='reaction13', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['CC=C1CCC1=CC(25269)'], transitionState='TS13', kinetics=\n Arrhenius(A=(1620000000000.0, 's^-1'), n=-0.305, Ea=(8.28432, 'kJ/mol'),\n T0=(1, 'K'), Tmin=(600, 'K'), Tmax=(2000, 'K'), comment=\n \"\"\"From training reaction 2 used for R4_SSS;C_rad_out_2H;Cpri_rad_out_2H\nExact match found for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_2H]\nEuclidian distance = 0\nfamily: Birad_recombination\"\"\"\n ))\nreaction(label='reaction14', reactants=['CH2(19)',\n '[CH2]C([C]=CC)=CC(25417)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState='TS14', kinetics=Arrhenius(A=(1067320.0, 'm^3/(mol*s)'),\n n=0.472793, Ea=(0, 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]\nEuclidian distance = 3.0\nfamily: Birad_R_Recombination\nEa raised from -3.5 to 0 kJ/mol.\"\"\"\n ))\nreaction(label='reaction15', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2]C1([CH]C)C(=C)C1C(25296)'], transitionState='TS15',\n kinetics=Arrhenius(A=(67265800000.0, 's^-1'), n=0.535608, Ea=(216.807,\n 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using average of templates [R4_S_D;doublebond_intra;radadd_intra_csHNd] + [R4_S_D;doublebond_intra_HNd;radadd_intra_cs] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_csHNd]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Exocyclic\nEa raised from 214.2 to 216.8 kJ/mol to match endothermicity of reaction.\"\"\"\n ))\nreaction(label='reaction16', reactants=['H(3)',\n '[CH2]C(=CC)C(=C)C=C(24604)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState='TS16', kinetics=Arrhenius(A=(231000000.0,\n 'cm^3/(mol*s)'), n=1.64, Ea=(0, 'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'),\n Tmax=(1500, 'K'), comment=\n \"\"\"From training reaction 2544 used for Cds-HH_Cds-CdH;HJ\nExact match found for rate rule [Cds-HH_Cds-CdH;HJ]\nEuclidian distance = 0\nfamily: R_Addition_MultipleBond\nEa raised from -2.0 to 0 kJ/mol.\"\"\"\n ))\nreaction(label='reaction17', reactants=['[CH2]CC(=C)C([CH2])=CC(25418)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS17',\n kinetics=Arrhenius(A=(1720000.0, 's^-1'), n=1.99, Ea=(113.805, 'kJ/mol'\n ), T0=(1, 'K'), comment=\n \"\"\"From training reaction 84 used for R2H_S;C_rad_out_2H;Cs_H_out_H/Cd\nExact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/Cd]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction18', reactants=['[CH]=C(CC)C([CH2])=CC(25419)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS18',\n kinetics=Arrhenius(A=(18460000000.0, 's^-1'), n=0.74, Ea=(145.185,\n 'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(1500, 'K'), comment=\n \"\"\"From training reaction 194 used for R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC\nExact match found for rate rule [R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction19', reactants=['[CH2]C(=[C]C)C(=C)CC(25420)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS19',\n kinetics=Arrhenius(A=(74200, 's^-1'), n=2.23, Ea=(44.3086, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_H/NonDeC]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction20', reactants=['[CH]=C([CH]C)C(C)=CC(25421)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS20',\n kinetics=Arrhenius(A=(111300, 's^-1'), n=2.23, Ea=(44.3086, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_2H]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction21', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2][C](C=C)C(=C)CC(24623)'], transitionState='TS21',\n kinetics=Arrhenius(A=(6660000.0, 's^-1'), n=1.64, Ea=(100.416, 'kJ/mol'\n ), T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R5H_SS(D)MS;C_rad_out_single;Cs_H_out_2H] for rate rule [R5H_SS(D)MS;C_rad_out_H/NonDeC;Cs_H_out_2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction22', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['C[CH][C]1CCC1=CC(25422)'], transitionState='TS22', kinetics=\n Arrhenius(A=(321867000.0, 's^-1'), n=0.926191, Ea=(130.445, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]\nEuclidian distance = 0\nfamily: Intra_R_Add_Endocyclic\"\"\"\n ))\nreaction(label='reaction23', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2][C]1C(=C)C(C)C1C(25423)'], transitionState='TS23',\n kinetics=Arrhenius(A=(516207000.0, 's^-1'), n=0.911389, Ea=(125.357,\n 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_csHCs]\nEuclidian distance = 0\nfamily: Intra_R_Add_Endocyclic\"\"\"\n ))\nreaction(label='reaction24', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['C=CC(=C)C(C)=CC(24616)'], transitionState='TS24', kinetics=\n Arrhenius(A=(12756600000.0, 's^-1'), n=0.137, Ea=(24.9733, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 6.0\nfamily: Intra_Disproportionation\"\"\"\n ))\nreaction(label='reaction24', reactants=['C=[C]C(C)C(=C)[CH]C(24183)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS25',\n kinetics=Arrhenius(A=(866000000000.0, 's^-1'), n=0.438, Ea=(94.4747,\n 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"From training reaction 5 used for cCs(-HC)CJ;CdsJ;C\nExact match found for rate rule [cCs(-HC)CJ;CdsJ;C]\nEuclidian distance = 0\nfamily: 1,2_shiftC\"\"\"\n ))\nreaction(label='reaction26', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['C=C1C(=CC)CC1C(25265)'], transitionState='TS26', kinetics=\n Arrhenius(A=(3240000000000.0, 's^-1'), n=-0.305, Ea=(8.28432, 'kJ/mol'),\n T0=(1, 'K'), Tmin=(600, 'K'), Tmax=(2000, 'K'), comment=\n \"\"\"Estimated using template [R4_SSS;C_rad_out_2H;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_H/NonDeC]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Birad_recombination\"\"\"\n ))\nreaction(label='reaction27', reactants=['CHCH3(T)(95)',\n '[CH2]C([C]=C)=CC(24774)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState='TS27', kinetics=Arrhenius(A=(1067320.0, 'm^3/(mol*s)'),\n n=0.472793, Ea=(0, 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]\nEuclidian distance = 3.0\nfamily: Birad_R_Recombination\nEa raised from -3.5 to 0 kJ/mol.\"\"\"\n ))\nreaction(label='reaction28', reactants=['[CH]=C([CH]C)C(=C)CC(25424)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS28',\n kinetics=Arrhenius(A=(74200, 's^-1'), n=2.23, Ea=(44.3086, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction29', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['C=CC(=C)C(=C)CC(24630)'], transitionState='TS29', kinetics=\n Arrhenius(A=(19260000000.0, 's^-1'), n=0.137, Ea=(8.368, 'kJ/mol'), T0=\n (1, 'K'), Tmin=(300, 'K'), Tmax=(1500, 'K'), comment=\n \"\"\"Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radEndo;Y_rad_NDe;XH_Rrad]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 6.0\nfamily: Intra_Disproportionation\"\"\"\n ))\nreaction(label='reaction30', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['C=C1C(=C)C(C)C1C(25274)'], transitionState='TS30', kinetics=\n Arrhenius(A=(1620000000000.0, 's^-1'), n=-0.305, Ea=(8.28432, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R4_SSS;C_rad_out_single;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Cpri_rad_out_H/NonDeC]\nEuclidian distance = 2.82842712475\nfamily: Birad_recombination\"\"\"\n ))\nnetwork(label='4267', isomers=['C=C([CH]C)C(=C)[CH]C(24182)'], reactants=[(\n 'CH3CHCCH2(18175)', 'CH3CHCCH2(18175)')], bathGas={'N2': 0.5, 'Ne': 0.5})\npressureDependence(label='4267', Tmin=(300, 'K'), Tmax=(2000, 'K'), Tcount=\n 8, Tlist=([302.47, 323.145, 369.86, 455.987, 609.649, 885.262, 1353.64,\n 1896.74], 'K'), Pmin=(0.01, 'bar'), Pmax=(100, 'bar'), Pcount=5, Plist=\n ([0.0125282, 0.0667467, 1, 14.982, 79.8202], 'bar'), maximumGrainSize=(\n 0.5, 'kcal/mol'), minimumGrainCount=250, method=\n 'modified strong collision', interpolationModel=('Chebyshev', 6, 4),\n activeKRotor=True, activeJRotor=True, rmgmode=True)\n",
"step-3": "species(\n label = 'C=C([CH]C)C(=C)[CH]C(24182)',\n structure = SMILES('[CH2]C(=CC)C([CH2])=CC'),\n E0 = (249.687,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),\n HinderedRotor(inertia=(0.735277,'amu*angstrom^2'), symmetry=1, barrier=(16.9055,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0632434,'amu*angstrom^2'), symmetry=1, barrier=(29.514,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.737545,'amu*angstrom^2'), symmetry=1, barrier=(16.9576,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.732781,'amu*angstrom^2'), symmetry=1, barrier=(16.8481,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.739219,'amu*angstrom^2'), symmetry=1, barrier=(16.9961,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.384005,0.0840749,-5.09991e-05,5.50851e-09,4.14197e-12,30198.9,28.4131], Tmin=(100,'K'), Tmax=(1039.09,'K')), NASAPolynomial(coeffs=[18.1326,0.0354522,-1.35159e-05,2.44392e-09,-1.69358e-13,25127.7,-67.5143], Tmin=(1039.09,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(249.687,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = 'CH3CHCCH2(18175)',\n structure = SMILES('C=C=CC'),\n E0 = (145.615,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,540,610,2055,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655],'cm^-1')),\n HinderedRotor(inertia=(0.759584,'amu*angstrom^2'), symmetry=1, barrier=(17.4643,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (54.0904,'amu'),\n collisionModel = TransportData(shapeIndex=2, epsilon=(2996.71,'J/mol'), sigma=(5.18551,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment=\"\"\"Epsilon & sigma estimated with Tc=468.08 K, Pc=48.77 bar (from Joback method)\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.74635,0.0218189,8.22353e-06,-2.14768e-08,8.55624e-12,17563.6,12.7381], Tmin=(100,'K'), Tmax=(1025.6,'K')), NASAPolynomial(coeffs=[6.82078,0.0192338,-7.45622e-06,1.36536e-09,-9.53195e-14,16028,-10.4333], Tmin=(1025.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(145.615,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(228.648,'J/(mol*K)'), label=\"\"\"CH3CHCCH2\"\"\", comment=\"\"\"Thermo library: DFT_QCI_thermo\"\"\"),\n)\n\nspecies(\n label = '[CH2]C1([CH]C)CC1=CC(25275)',\n structure = SMILES('[CH2]C1([CH]C)CC1=CC'),\n E0 = (462.221,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.263258,0.0692237,-2.26363e-05,-1.35463e-08,8.13734e-12,55737.7,31.4039], Tmin=(100,'K'), Tmax=(1105.46,'K')), NASAPolynomial(coeffs=[15.171,0.0400578,-1.66801e-05,3.13624e-09,-2.2049e-13,50927.8,-48.8594], Tmin=(1105.46,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(462.221,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)\"\"\"),\n)\n\nspecies(\n label = 'C=[C][CH]C(18176)',\n structure = SMILES('[CH2][C]=CC'),\n E0 = (361.056,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655],'cm^-1')),\n HinderedRotor(inertia=(0.352622,'amu*angstrom^2'), symmetry=1, barrier=(8.10748,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.828631,'amu*angstrom^2'), symmetry=1, barrier=(19.0519,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (54.0904,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.42015,0.030446,-1.69076e-05,4.64684e-09,-5.12013e-13,43485.7,14.8304], Tmin=(100,'K'), Tmax=(2065.83,'K')), NASAPolynomial(coeffs=[10.7464,0.014324,-5.20136e-06,8.69079e-10,-5.48385e-14,40045.6,-31.3799], Tmin=(2065.83,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(361.056,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), comment=\"\"\"Thermo library: DFT_QCI_thermo + radical(Cds_S) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=CC)C(C)=[C]C(25412)',\n structure = SMILES('[CH2]C(=CC)C(C)=[C]C'),\n E0 = (336.03,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,1685,370,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,222.04],'cm^-1')),\n HinderedRotor(inertia=(0.395973,'amu*angstrom^2'), symmetry=1, barrier=(13.8694,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.396086,'amu*angstrom^2'), symmetry=1, barrier=(13.8683,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395737,'amu*angstrom^2'), symmetry=1, barrier=(13.8691,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395039,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395901,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365,0.0876489,-7.20737e-05,3.21805e-08,-5.96317e-12,40565.5,28.3373], Tmin=(100,'K'), Tmax=(1264.63,'K')), NASAPolynomial(coeffs=[14.5979,0.041109,-1.68732e-05,3.08148e-09,-2.10818e-13,36843.8,-46.1055], Tmin=(1264.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.03,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=[C]C)C(C)=CC(25413)',\n structure = SMILES('[CH2]C(=[C]C)C(C)=CC'),\n E0 = (336.03,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,1685,370,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,222.04],'cm^-1')),\n HinderedRotor(inertia=(0.395973,'amu*angstrom^2'), symmetry=1, barrier=(13.8694,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.396086,'amu*angstrom^2'), symmetry=1, barrier=(13.8683,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395737,'amu*angstrom^2'), symmetry=1, barrier=(13.8691,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395039,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395901,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365,0.0876489,-7.20737e-05,3.21805e-08,-5.96317e-12,40565.5,28.3373], Tmin=(100,'K'), Tmax=(1264.63,'K')), NASAPolynomial(coeffs=[14.5979,0.041109,-1.68732e-05,3.08148e-09,-2.10818e-13,36843.8,-46.1055], Tmin=(1264.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.03,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Cds_S)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=CC)[C](C)C=C(24605)',\n structure = SMILES('[CH2]C=C(C)C([CH2])=CC'),\n E0 = (216.244,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),\n HinderedRotor(inertia=(0.712083,'amu*angstrom^2'), symmetry=1, barrier=(16.3722,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.555659,'amu*angstrom^2'), symmetry=1, barrier=(96.3851,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0202512,'amu*angstrom^2'), symmetry=1, barrier=(16.3711,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.712008,'amu*angstrom^2'), symmetry=1, barrier=(16.3705,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(4.19211,'amu*angstrom^2'), symmetry=1, barrier=(96.3849,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175,0.0775021,-3.58132e-05,-7.55711e-09,8.27771e-12,26166.1,29.3215], Tmin=(100,'K'), Tmax=(1017.17,'K')), NASAPolynomial(coeffs=[16.4341,0.0376674,-1.41425e-05,2.53759e-09,-1.75328e-13,21504.4,-57.0638], Tmin=(1017.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(216.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)\"\"\"),\n)\n\nspecies(\n label = '[CH2][C](C=C)C(C)=CC(24606)',\n structure = SMILES('[CH2]C=C([CH2])C(C)=CC'),\n E0 = (216.244,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175,0.0775021,-3.58132e-05,-7.55711e-09,8.27771e-12,26166.1,29.3215], Tmin=(100,'K'), Tmax=(1017.17,'K')), NASAPolynomial(coeffs=[16.4341,0.0376674,-1.41425e-05,2.53759e-09,-1.75328e-13,21504.4,-57.0638], Tmin=(1017.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(216.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=CC)[C]1CC1C(25414)',\n structure = SMILES('[CH2]C(=CC)[C]1CC1C'),\n E0 = (289.9,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.71289,0.0520158,3.84829e-05,-8.55933e-08,3.61457e-11,35003.5,26.4903], Tmin=(100,'K'), Tmax=(968.714,'K')), NASAPolynomial(coeffs=[16.7686,0.0352996,-1.24057e-05,2.26286e-09,-1.62921e-13,29566.5,-62.466], Tmin=(968.714,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(289.9,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Cyclopropane) + radical(Allyl_T) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2][C]1C(=CC)CC1C(25415)',\n structure = SMILES('[CH2]C1=C([CH]C)CC1C'),\n E0 = (304.572,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.583091,0.0531885,4.0938e-05,-9.08388e-08,3.83549e-11,36774.2,26.4705], Tmin=(100,'K'), Tmax=(972.301,'K')), NASAPolynomial(coeffs=[18.2947,0.0339462,-1.21014e-05,2.24934e-09,-1.64353e-13,30795.4,-71.5147], Tmin=(972.301,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(304.572,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_S)\"\"\"),\n)\n\nspecies(\n label = 'CH2(S)(23)',\n structure = SMILES('[CH2]'),\n E0 = (419.862,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1369.36,2789.41,2993.36],'cm^-1')),\n ],\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (14.0266,'amu'),\n collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment=\"\"\"GRI-Mech\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19195,-0.00230793,8.0509e-06,-6.60123e-09,1.95638e-12,50484.3,-0.754589], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.28556,0.00460255,-1.97412e-06,4.09548e-10,-3.34695e-14,50922.4,8.67684], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(419.862,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label=\"\"\"CH2(S)\"\"\", comment=\"\"\"Thermo library: Klippenstein_Glarborg2016\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=C)C([CH2])=CC(25416)',\n structure = SMILES('[CH2]C(=C)C([CH2])=CC'),\n E0 = (285.713,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2950,3100,1380,975,1025,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,3010,987.5,1337.5,450,1655,311.383],'cm^-1')),\n HinderedRotor(inertia=(0.327475,'amu*angstrom^2'), symmetry=1, barrier=(22.5291,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.327466,'amu*angstrom^2'), symmetry=1, barrier=(22.5294,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.327318,'amu*angstrom^2'), symmetry=1, barrier=(22.5272,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.327483,'amu*angstrom^2'), symmetry=1, barrier=(22.5297,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (94.1543,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.335271,0.0676667,-2.76626e-05,-1.62749e-08,1.21982e-11,34506.8,24.024], Tmin=(100,'K'), Tmax=(980.594,'K')), NASAPolynomial(coeffs=[17.5531,0.0266059,-9.47854e-06,1.70194e-09,-1.19937e-13,29727.4,-65.8563], Tmin=(980.594,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(285.713,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = 'C=C([CH]C)C[C]=CC(24184)',\n structure = SMILES('[CH2]C(=CC)C[C]=CC'),\n E0 = (366.985,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,1685,370,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,579.702],'cm^-1')),\n HinderedRotor(inertia=(0.147406,'amu*angstrom^2'), symmetry=1, barrier=(3.38916,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.64226,'amu*angstrom^2'), symmetry=1, barrier=(14.7668,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.64164,'amu*angstrom^2'), symmetry=1, barrier=(14.7526,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.643937,'amu*angstrom^2'), symmetry=1, barrier=(14.8054,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.145327,'amu*angstrom^2'), symmetry=1, barrier=(3.34136,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n collisionModel = TransportData(shapeIndex=2, epsilon=(3683.66,'J/mol'), sigma=(6.4482,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment=\"\"\"Epsilon & sigma estimated with Tc=575.38 K, Pc=31.18 bar (from Joback method)\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.29648,0.0786067,-5.42868e-05,1.96375e-08,-2.97459e-12,44273.2,31.2372], Tmin=(100,'K'), Tmax=(1490.43,'K')), NASAPolynomial(coeffs=[13.9025,0.0420909,-1.75363e-05,3.199e-09,-2.17227e-13,40217.5,-39.8334], Tmin=(1490.43,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(366.985,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = 'CC=C1CCC1=CC(25269)',\n structure = SMILES('CC=C1CCC1=CC'),\n E0 = (114.107,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.677799,0.0585738,5.80411e-06,-4.1598e-08,1.78951e-11,13856,25.5085], Tmin=(100,'K'), Tmax=(1034.79,'K')), NASAPolynomial(coeffs=[13.4814,0.0415234,-1.65073e-05,3.07348e-09,-2.16896e-13,9469.28,-45.0922], Tmin=(1034.79,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(114.107,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(12methylenecyclobutane)\"\"\"),\n)\n\nspecies(\n label = 'CH2(19)',\n structure = SMILES('[CH2]'),\n E0 = (381.563,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1032.72,2936.3,3459],'cm^-1')),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (14.0266,'amu'),\n collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment=\"\"\"GRI-Mech\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.8328,0.000224446,4.68033e-06,-6.04743e-09,2.59009e-12,45920.8,1.40666], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16229,0.00281798,-7.56235e-07,5.05446e-11,5.65236e-15,46099.1,4.77656], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(381.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label=\"\"\"CH2\"\"\", comment=\"\"\"Thermo library: Klippenstein_Glarborg2016\"\"\"),\n)\n\nspecies(\n label = '[CH2]C([C]=CC)=CC(25417)',\n structure = SMILES('[CH2]C([C]=CC)=CC'),\n E0 = (334.774,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([350,440,435,1725,1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),\n HinderedRotor(inertia=(0.7606,'amu*angstrom^2'), symmetry=1, barrier=(17.4877,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.760854,'amu*angstrom^2'), symmetry=1, barrier=(17.4935,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.760586,'amu*angstrom^2'), symmetry=1, barrier=(17.4874,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(2.15146,'amu*angstrom^2'), symmetry=1, barrier=(49.4663,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (94.1543,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.352604,0.0734369,-5.91187e-05,2.57941e-08,-4.60694e-12,40400.9,25.1788], Tmin=(100,'K'), Tmax=(1327.42,'K')), NASAPolynomial(coeffs=[14.2321,0.0316126,-1.18565e-05,2.05761e-09,-1.36512e-13,36716.1,-45.7131], Tmin=(1327.42,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(334.774,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + radical(C=CJC=C) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C1([CH]C)C(=C)C1C(25296)',\n structure = SMILES('[CH2]C1([CH]C)C(=C)C1C'),\n E0 = (466.494,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.29276,0.0655305,-4.50464e-06,-3.74661e-08,1.7759e-11,56253.7,30.0992], Tmin=(100,'K'), Tmax=(1027.4,'K')), NASAPolynomial(coeffs=[16.6435,0.0372633,-1.49065e-05,2.81296e-09,-2.01072e-13,51026,-58.316], Tmin=(1027.4,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(466.494,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)\"\"\"),\n)\n\nspecies(\n label = 'H(3)',\n structure = SMILES('[H]'),\n E0 = (211.792,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (1.00794,'amu'),\n collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment=\"\"\"GRI-Mech\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label=\"\"\"H\"\"\", comment=\"\"\"Thermo library: BurkeH2O2\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=CC)C(=C)C=C(24604)',\n structure = SMILES('[CH2]C(=CC)C(=C)C=C'),\n E0 = (242.677,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,181.962,683.313],'cm^-1')),\n HinderedRotor(inertia=(0.669842,'amu*angstrom^2'), symmetry=1, barrier=(19.1337,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0582339,'amu*angstrom^2'), symmetry=1, barrier=(19.1767,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.83204,'amu*angstrom^2'), symmetry=1, barrier=(19.1302,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(4.52237,'amu*angstrom^2'), symmetry=1, barrier=(104.569,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 2,\n opticalIsomers = 1,\n molecularWeight = (107.173,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.293043,0.0682771,-2.00337e-05,-2.05401e-08,1.21516e-11,29332.3,27.0261], Tmin=(100,'K'), Tmax=(1018.57,'K')), NASAPolynomial(coeffs=[15.7386,0.0358123,-1.37404e-05,2.51366e-09,-1.76142e-13,24723.4,-54.9529], Tmin=(1018.57,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(242.677,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2]CC(=C)C([CH2])=CC(25418)',\n structure = SMILES('[CH2]CC(=C)C([CH2])=CC'),\n E0 = (316.814,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,180,180],'cm^-1')),\n HinderedRotor(inertia=(0.0368535,'amu*angstrom^2'), symmetry=1, barrier=(17.9864,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.00736317,'amu*angstrom^2'), symmetry=1, barrier=(3.60618,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.781153,'amu*angstrom^2'), symmetry=1, barrier=(17.9602,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.779478,'amu*angstrom^2'), symmetry=1, barrier=(17.9217,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.781104,'amu*angstrom^2'), symmetry=1, barrier=(17.9591,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.348925,0.0836004,-5.1879e-05,7.14877e-09,3.44908e-12,38270.9,31.5928], Tmin=(100,'K'), Tmax=(1044.14,'K')), NASAPolynomial(coeffs=[17.9255,0.0352115,-1.34219e-05,2.42456e-09,-1.67785e-13,33276.3,-63.0036], Tmin=(1044.14,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(316.814,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(RCCJ) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH]=C(CC)C([CH2])=CC(25419)',\n structure = SMILES('[CH]=C(CC)C([CH2])=CC'),\n E0 = (358.664,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([3120,650,792.5,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180],'cm^-1')),\n HinderedRotor(inertia=(0.701639,'amu*angstrom^2'), symmetry=1, barrier=(16.1321,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.344302,'amu*angstrom^2'), symmetry=1, barrier=(16.1602,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0492932,'amu*angstrom^2'), symmetry=1, barrier=(16.1378,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.702005,'amu*angstrom^2'), symmetry=1, barrier=(16.1405,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.702379,'amu*angstrom^2'), symmetry=1, barrier=(16.1491,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.468616,0.0864938,-5.84569e-05,1.27697e-08,1.75707e-12,43308.4,30.6389], Tmin=(100,'K'), Tmax=(1047.28,'K')), NASAPolynomial(coeffs=[18.4195,0.034593,-1.31104e-05,2.35762e-09,-1.62637e-13,38242.2,-66.6572], Tmin=(1047.28,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(358.664,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=[C]C)C(=C)CC(25420)',\n structure = SMILES('[CH2]C(=[C]C)C(=C)CC'),\n E0 = (349.41,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,180],'cm^-1')),\n HinderedRotor(inertia=(0.159905,'amu*angstrom^2'), symmetry=1, barrier=(15.9368,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.693159,'amu*angstrom^2'), symmetry=1, barrier=(15.9371,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.693127,'amu*angstrom^2'), symmetry=1, barrier=(15.9364,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.693165,'amu*angstrom^2'), symmetry=1, barrier=(15.9372,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0150632,'amu*angstrom^2'), symmetry=1, barrier=(15.9371,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.583231,0.089245,-7.16619e-05,3.00631e-08,-5.07891e-12,42198.9,31.1306], Tmin=(100,'K'), Tmax=(1412.15,'K')), NASAPolynomial(coeffs=[19.0319,0.0336833,-1.2643e-05,2.20036e-09,-1.46165e-13,36659.1,-70.2702], Tmin=(1412.15,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(349.41,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)\"\"\"),\n)\n\nspecies(\n label = '[CH]=C([CH]C)C(C)=CC(25421)',\n structure = SMILES('[CH]C(=CC)C(C)=CC'),\n E0 = (317.373,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,200,800,1200,1600],'cm^-1')),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.247945,0.0873521,-6.16843e-05,2.31486e-08,-3.62747e-12,38328.8,29.1665], Tmin=(100,'K'), Tmax=(1460.93,'K')), NASAPolynomial(coeffs=[15.297,0.0447902,-1.7984e-05,3.20673e-09,-2.14924e-13,33786.8,-51.7212], Tmin=(1460.93,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(317.373,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(AllylJ2_triplet)\"\"\"),\n)\n\nspecies(\n label = '[CH2][C](C=C)C(=C)CC(24623)',\n structure = SMILES('[CH2]C(C=C)=C([CH2])CC'),\n E0 = (228.159,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0497728,0.0733281,-1.6094e-05,-3.35123e-08,1.88363e-11,27601.1,30.4448], Tmin=(100,'K'), Tmax=(975.095,'K')), NASAPolynomial(coeffs=[18.3695,0.0342638,-1.21408e-05,2.16747e-09,-1.52112e-13,22274,-66.8493], Tmin=(975.095,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(228.159,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CC=CCJ) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = 'C[CH][C]1CCC1=CC(25422)',\n structure = SMILES('C[CH]C1CCC=1[CH]C'),\n E0 = (303.292,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.788866,0.0500701,4.22235e-05,-8.64809e-08,3.53174e-11,36611.5,25.2586], Tmin=(100,'K'), Tmax=(987.239,'K')), NASAPolynomial(coeffs=[16.2187,0.0373502,-1.4111e-05,2.65357e-09,-1.92503e-13,31138.2,-61.2734], Tmin=(987.239,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(303.292,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_S) + radical(Allyl_S)\"\"\"),\n)\n\nspecies(\n label = '[CH2][C]1C(=C)C(C)C1C(25423)',\n structure = SMILES('[CH2]C1=C([CH2])C(C)C1C'),\n E0 = (305.852,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.377097,0.0563026,3.9705e-05,-9.53284e-08,4.14811e-11,36937,26.2973], Tmin=(100,'K'), Tmax=(959.735,'K')), NASAPolynomial(coeffs=[20.4056,0.0304853,-1.006e-05,1.83774e-09,-1.35603e-13,30437.2,-83.3398], Tmin=(959.735,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(305.852,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = 'C=CC(=C)C(C)=CC(24616)',\n structure = SMILES('C=CC(=C)C(C)=CC'),\n E0 = (91.1774,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.236638,0.0713806,-3.04205e-05,-5.26762e-09,5.54498e-12,11111.2,26.9518], Tmin=(100,'K'), Tmax=(1093.32,'K')), NASAPolynomial(coeffs=[14.1536,0.040705,-1.6104e-05,2.93544e-09,-2.02595e-13,6858.32,-46.9636], Tmin=(1093.32,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(91.1774,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH)\"\"\"),\n)\n\nspecies(\n label = 'C=[C]C(C)C(=C)[CH]C(24183)',\n structure = SMILES('[CH2]C(=CC)C(C)[C]=C'),\n E0 = (369.44,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1685,370,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,350,440,435,1725,3000,3100,440,815,1455,1000,345.333,347.343],'cm^-1')),\n HinderedRotor(inertia=(0.119405,'amu*angstrom^2'), symmetry=1, barrier=(9.93037,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.281457,'amu*angstrom^2'), symmetry=1, barrier=(24.022,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.116909,'amu*angstrom^2'), symmetry=1, barrier=(9.94809,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.117447,'amu*angstrom^2'), symmetry=1, barrier=(9.9744,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.116555,'amu*angstrom^2'), symmetry=1, barrier=(9.93684,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n collisionModel = TransportData(shapeIndex=2, epsilon=(3625.33,'J/mol'), sigma=(6.4092,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment=\"\"\"Epsilon & sigma estimated with Tc=566.27 K, Pc=31.24 bar (from Joback method)\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.299693,0.0839308,-6.74533e-05,3.06742e-08,-6.02582e-12,44564.4,29.0122], Tmin=(100,'K'), Tmax=(1163.73,'K')), NASAPolynomial(coeffs=[10.857,0.0476425,-2.06788e-05,3.8782e-09,-2.69295e-13,42107.3,-23.5217], Tmin=(1163.73,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(369.44,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)\"\"\"),\n)\n\nspecies(\n label = 'C=C1C(=CC)CC1C(25265)',\n structure = SMILES('C=C1C(=CC)CC1C'),\n E0 = (118.381,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.689924,0.0550304,2.3689e-05,-6.56265e-08,2.77602e-11,14372.8,24.9628], Tmin=(100,'K'), Tmax=(993.204,'K')), NASAPolynomial(coeffs=[15.3775,0.0380508,-1.43595e-05,2.66472e-09,-1.90565e-13,9375.16,-56.2678], Tmin=(993.204,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(118.381,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)\"\"\"),\n)\n\nspecies(\n label = 'CHCH3(T)(95)',\n structure = SMILES('[CH]C'),\n E0 = (343.893,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,592.414,4000],'cm^-1')),\n HinderedRotor(inertia=(0.00438699,'amu*angstrom^2'), symmetry=1, barrier=(26.7685,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (28.0532,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.82363,-0.000909515,3.2138e-05,-3.7348e-08,1.3309e-11,41371.4,7.10948], Tmin=(100,'K'), Tmax=(960.812,'K')), NASAPolynomial(coeffs=[4.30487,0.00943069,-3.27559e-06,5.95121e-10,-4.27307e-14,40709.1,1.84202], Tmin=(960.812,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(343.893,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), label=\"\"\"CHCH3(T)\"\"\", comment=\"\"\"Thermo library: DFT_QCI_thermo\"\"\"),\n)\n\nspecies(\n label = '[CH2]C([C]=C)=CC(24774)',\n structure = SMILES('[CH2]C([C]=C)=CC'),\n E0 = (370.8,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,350,440,435,1725,3000,3100,440,815,1455,1000,180],'cm^-1')),\n HinderedRotor(inertia=(1.17315,'amu*angstrom^2'), symmetry=1, barrier=(26.9731,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(1.17496,'amu*angstrom^2'), symmetry=1, barrier=(27.0146,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(1.1727,'amu*angstrom^2'), symmetry=1, barrier=(26.9626,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (80.1277,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.0818,0.0569416,-3.56598e-05,4.1841e-09,3.20998e-12,44708.4,20.7527], Tmin=(100,'K'), Tmax=(982.69,'K')), NASAPolynomial(coeffs=[12.9204,0.0239405,-8.46845e-06,1.46434e-09,-9.91425e-14,41648.3,-39.886], Tmin=(982.69,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(370.8,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(320.107,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CJC=C) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH]=C([CH]C)C(=C)CC(25424)',\n structure = SMILES('[CH]C(=CC)C(=C)CC'),\n E0 = (330.753,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,325,375,415,465,420,450,1700,1750,200,800,1066.67,1333.33,1600],'cm^-1')),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.442166,0.0858934,-5.1432e-05,9.5936e-09,1.54315e-12,39950.3,30.9724], Tmin=(100,'K'), Tmax=(1106.5,'K')), NASAPolynomial(coeffs=[16.3579,0.0427111,-1.66841e-05,2.99222e-09,-2.04007e-13,35158.1,-56.633], Tmin=(1106.5,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(330.753,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(AllylJ2_triplet)\"\"\"),\n)\n\nspecies(\n label = 'C=CC(=C)C(=C)CC(24630)',\n structure = SMILES('C=CC(=C)C(=C)CC'),\n E0 = (104.558,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.296747,0.0670054,-1.0269e-05,-3.13536e-08,1.59568e-11,12721.3,27.8384], Tmin=(100,'K'), Tmax=(1010.3,'K')), NASAPolynomial(coeffs=[15.6889,0.0379462,-1.44599e-05,2.64736e-09,-1.86033e-13,7984.11,-54.6302], Tmin=(1010.3,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(104.558,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH)\"\"\"),\n)\n\nspecies(\n label = 'C=C1C(=C)C(C)C1C(25274)',\n structure = SMILES('C=C1C(=C)C(C)C1C'),\n E0 = (122.654,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.691732,0.0515838,4.13669e-05,-8.96066e-08,3.77135e-11,14890,23.0693], Tmin=(100,'K'), Tmax=(969.873,'K')), NASAPolynomial(coeffs=[17.4573,0.0342784,-1.20439e-05,2.21718e-09,-1.61071e-13,9199.74,-69.8715], Tmin=(969.873,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(122.654,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)\"\"\"),\n)\n\nspecies(\n label = 'N2',\n structure = SMILES('N#N'),\n E0 = (-8.69489,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (28.0135,'amu'),\n collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment=\"\"\"PrimaryTransportLibrary\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label=\"\"\"N2\"\"\", comment=\"\"\"Thermo library: BurkeH2O2\"\"\"),\n)\n\nspecies(\n label = 'Ne',\n structure = SMILES('[Ne]'),\n E0 = (-6.19738,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (20.1797,'amu'),\n collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment=\"\"\"Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label=\"\"\"Ne\"\"\", comment=\"\"\"Thermo library: primaryThermoLibrary\"\"\"),\n)\n\ntransitionState(\n label = 'TS1',\n E0 = (291.23,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS2',\n E0 = (462.221,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS3',\n E0 = (538.699,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS4',\n E0 = (497.951,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS5',\n E0 = (380.338,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS6',\n E0 = (399.474,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS7',\n E0 = (350.103,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS8',\n E0 = (722.113,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS9',\n E0 = (343.259,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS10',\n E0 = (380.132,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS11',\n E0 = (705.575,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS12',\n E0 = (537.022,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS13',\n E0 = (257.971,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS14',\n E0 = (716.337,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS15',\n E0 = (466.494,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS16',\n E0 = (454.469,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS17',\n E0 = (430.619,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS18',\n E0 = (503.849,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS19',\n E0 = (393.718,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS20',\n E0 = (361.682,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS21',\n E0 = (350.103,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS22',\n E0 = (380.132,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS23',\n E0 = (375.044,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS24',\n E0 = (274.66,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS25',\n E0 = (463.915,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS26',\n E0 = (257.971,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS27',\n E0 = (714.692,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS28',\n E0 = (375.062,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS29',\n E0 = (258.055,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS30',\n E0 = (257.971,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\nreaction(\n label = 'reaction1',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'],\n transitionState = 'TS1',\n kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(41.5431,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment=\"\"\"Exact match found for rate rule [RJJ]\nEuclidian distance = 0\nfamily: 1,4_Linear_birad_scission\nEa raised from 0.0 to 41.5 kJ/mol to match endothermicity of reaction.\"\"\"),\n)\n\nreaction(\n label = 'reaction2',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2]C1([CH]C)CC1=CC(25275)'],\n transitionState = 'TS2',\n kinetics = Arrhenius(A=(3.36e+09,'s^-1'), n=0.84, Ea=(212.534,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment=\"\"\"Estimated using template [R4_S_D;doublebond_intra_HNd;radadd_intra_cs2H] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_cs2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Exocyclic\nEa raised from 210.2 to 212.5 kJ/mol to match endothermicity of reaction.\"\"\"),\n)\n\nreaction(\n label = 'reaction3',\n reactants = ['CH3CHCCH2(18175)', 'C=[C][CH]C(18176)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS3',\n kinetics = Arrhenius(A=(0.00086947,'m^3/(mol*s)'), n=2.67356, Ea=(32.0272,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using an average for rate rule [Ca_Cds-HH;CJ]\nEuclidian distance = 0\nfamily: R_Addition_MultipleBond\"\"\"),\n)\n\nreaction(\n label = 'reaction4',\n reactants = ['[CH2]C(=CC)C(C)=[C]C(25412)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS4',\n kinetics = Arrhenius(A=(7.74e+09,'s^-1'), n=1.08, Ea=(161.921,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment=\"\"\"From training reaction 198 used for R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H\nExact match found for rate rule [R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction5',\n reactants = ['[CH2]C(=[C]C)C(C)=CC(25413)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS5',\n kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_2H]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction6',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2]C(=CC)[C](C)C=C(24605)'],\n transitionState = 'TS6',\n kinetics = Arrhenius(A=(1.6e+06,'s^-1'), n=1.81, Ea=(149.787,'kJ/mol'), T0=(1,'K'), comment=\"\"\"From training reaction 101 used for R4H_SDS;C_rad_out_2H;Cs_H_out_2H\nExact match found for rate rule [R4H_SDS;C_rad_out_2H;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction7',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2][C](C=C)C(C)=CC(24606)'],\n transitionState = 'TS7',\n kinetics = Arrhenius(A=(6.66e+06,'s^-1'), n=1.64, Ea=(100.416,'kJ/mol'), T0=(1,'K'), comment=\"\"\"From training reaction 96 used for R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H\nExact match found for rate rule [R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction8',\n reactants = ['C=[C][CH]C(18176)', 'C=[C][CH]C(18176)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS8',\n kinetics = Arrhenius(A=(3.73038e+06,'m^3/(mol*s)'), n=0.027223, Ea=(0,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using an average for rate rule [Y_rad;Y_rad]\nEuclidian distance = 0\nfamily: R_Recombination\nEa raised from -14.4 to 0 kJ/mol.\"\"\"),\n)\n\nreaction(\n label = 'reaction9',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2]C(=CC)[C]1CC1C(25414)'],\n transitionState = 'TS9',\n kinetics = Arrhenius(A=(7.36786e+12,'s^-1'), n=-0.105173, Ea=(93.5715,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment=\"\"\"Estimated using template [R3_D;doublebond_intra;radadd_intra_cs2H] for rate rule [R3_D;doublebond_intra_secDe_HNd;radadd_intra_cs2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Endocyclic\"\"\"),\n)\n\nreaction(\n label = 'reaction10',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2][C]1C(=CC)CC1C(25415)'],\n transitionState = 'TS10',\n kinetics = Arrhenius(A=(6.43734e+08,'s^-1'), n=0.926191, Ea=(130.445,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Endocyclic\"\"\"),\n)\n\nreaction(\n label = 'reaction11',\n reactants = ['CH2(S)(23)', '[CH2]C(=C)C([CH2])=CC(25416)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS11',\n kinetics = Arrhenius(A=(7.94e+13,'cm^3/(mol*s)','*|/',0.25), n=-0.324, Ea=(0,'kJ/mol'), T0=(1,'K'), comment=\"\"\"From training reaction 4 used for carbene;Cd_pri\nExact match found for rate rule [carbene;Cd_pri]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 4.0\nfamily: 1,2_Insertion_carbene\nEa raised from -3.9 to 0 kJ/mol.\"\"\"),\n)\n\nreaction(\n label = 'reaction23',\n reactants = ['C=C([CH]C)C[C]=CC(24184)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS12',\n kinetics = Arrhenius(A=(1.74842e+09,'s^-1'), n=1.084, Ea=(170.038,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using average of templates [cCsCJ;CdsJ;C] + [cCs(-HH)CJ;CJ;C] for rate rule [cCs(-HH)CJ;CdsJ;C]\nEuclidian distance = 1.0\nfamily: 1,2_shiftC\"\"\"),\n)\n\nreaction(\n label = 'reaction13',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['CC=C1CCC1=CC(25269)'],\n transitionState = 'TS13',\n kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment=\"\"\"From training reaction 2 used for R4_SSS;C_rad_out_2H;Cpri_rad_out_2H\nExact match found for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_2H]\nEuclidian distance = 0\nfamily: Birad_recombination\"\"\"),\n)\n\nreaction(\n label = 'reaction14',\n reactants = ['CH2(19)', '[CH2]C([C]=CC)=CC(25417)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS14',\n kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]\nEuclidian distance = 3.0\nfamily: Birad_R_Recombination\nEa raised from -3.5 to 0 kJ/mol.\"\"\"),\n)\n\nreaction(\n label = 'reaction15',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2]C1([CH]C)C(=C)C1C(25296)'],\n transitionState = 'TS15',\n kinetics = Arrhenius(A=(6.72658e+10,'s^-1'), n=0.535608, Ea=(216.807,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using average of templates [R4_S_D;doublebond_intra;radadd_intra_csHNd] + [R4_S_D;doublebond_intra_HNd;radadd_intra_cs] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_csHNd]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Exocyclic\nEa raised from 214.2 to 216.8 kJ/mol to match endothermicity of reaction.\"\"\"),\n)\n\nreaction(\n label = 'reaction16',\n reactants = ['H(3)', '[CH2]C(=CC)C(=C)C=C(24604)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS16',\n kinetics = Arrhenius(A=(2.31e+08,'cm^3/(mol*s)'), n=1.64, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment=\"\"\"From training reaction 2544 used for Cds-HH_Cds-CdH;HJ\nExact match found for rate rule [Cds-HH_Cds-CdH;HJ]\nEuclidian distance = 0\nfamily: R_Addition_MultipleBond\nEa raised from -2.0 to 0 kJ/mol.\"\"\"),\n)\n\nreaction(\n label = 'reaction17',\n reactants = ['[CH2]CC(=C)C([CH2])=CC(25418)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS17',\n kinetics = Arrhenius(A=(1.72e+06,'s^-1'), n=1.99, Ea=(113.805,'kJ/mol'), T0=(1,'K'), comment=\"\"\"From training reaction 84 used for R2H_S;C_rad_out_2H;Cs_H_out_H/Cd\nExact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/Cd]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction18',\n reactants = ['[CH]=C(CC)C([CH2])=CC(25419)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS18',\n kinetics = Arrhenius(A=(1.846e+10,'s^-1'), n=0.74, Ea=(145.185,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment=\"\"\"From training reaction 194 used for R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC\nExact match found for rate rule [R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction19',\n reactants = ['[CH2]C(=[C]C)C(=C)CC(25420)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS19',\n kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_H/NonDeC]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction20',\n reactants = ['[CH]=C([CH]C)C(C)=CC(25421)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS20',\n kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_2H]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction21',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2][C](C=C)C(=C)CC(24623)'],\n transitionState = 'TS21',\n kinetics = Arrhenius(A=(6.66e+06,'s^-1'), n=1.64, Ea=(100.416,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R5H_SS(D)MS;C_rad_out_single;Cs_H_out_2H] for rate rule [R5H_SS(D)MS;C_rad_out_H/NonDeC;Cs_H_out_2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction22',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['C[CH][C]1CCC1=CC(25422)'],\n transitionState = 'TS22',\n kinetics = Arrhenius(A=(3.21867e+08,'s^-1'), n=0.926191, Ea=(130.445,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]\nEuclidian distance = 0\nfamily: Intra_R_Add_Endocyclic\"\"\"),\n)\n\nreaction(\n label = 'reaction23',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2][C]1C(=C)C(C)C1C(25423)'],\n transitionState = 'TS23',\n kinetics = Arrhenius(A=(5.16207e+08,'s^-1'), n=0.911389, Ea=(125.357,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_csHCs]\nEuclidian distance = 0\nfamily: Intra_R_Add_Endocyclic\"\"\"),\n)\n\nreaction(\n label = 'reaction24',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['C=CC(=C)C(C)=CC(24616)'],\n transitionState = 'TS24',\n kinetics = Arrhenius(A=(1.27566e+10,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 6.0\nfamily: Intra_Disproportionation\"\"\"),\n)\n\nreaction(\n label = 'reaction24',\n reactants = ['C=[C]C(C)C(=C)[CH]C(24183)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS25',\n kinetics = Arrhenius(A=(8.66e+11,'s^-1'), n=0.438, Ea=(94.4747,'kJ/mol'), T0=(1,'K'), comment=\"\"\"From training reaction 5 used for cCs(-HC)CJ;CdsJ;C\nExact match found for rate rule [cCs(-HC)CJ;CdsJ;C]\nEuclidian distance = 0\nfamily: 1,2_shiftC\"\"\"),\n)\n\nreaction(\n label = 'reaction26',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['C=C1C(=CC)CC1C(25265)'],\n transitionState = 'TS26',\n kinetics = Arrhenius(A=(3.24e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment=\"\"\"Estimated using template [R4_SSS;C_rad_out_2H;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_H/NonDeC]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Birad_recombination\"\"\"),\n)\n\nreaction(\n label = 'reaction27',\n reactants = ['CHCH3(T)(95)', '[CH2]C([C]=C)=CC(24774)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS27',\n kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]\nEuclidian distance = 3.0\nfamily: Birad_R_Recombination\nEa raised from -3.5 to 0 kJ/mol.\"\"\"),\n)\n\nreaction(\n label = 'reaction28',\n reactants = ['[CH]=C([CH]C)C(=C)CC(25424)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS28',\n kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction29',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['C=CC(=C)C(=C)CC(24630)'],\n transitionState = 'TS29',\n kinetics = Arrhenius(A=(1.926e+10,'s^-1'), n=0.137, Ea=(8.368,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment=\"\"\"Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radEndo;Y_rad_NDe;XH_Rrad]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 6.0\nfamily: Intra_Disproportionation\"\"\"),\n)\n\nreaction(\n label = 'reaction30',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['C=C1C(=C)C(C)C1C(25274)'],\n transitionState = 'TS30',\n kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R4_SSS;C_rad_out_single;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Cpri_rad_out_H/NonDeC]\nEuclidian distance = 2.82842712475\nfamily: Birad_recombination\"\"\"),\n)\n\nnetwork(\n label = '4267',\n isomers = [\n 'C=C([CH]C)C(=C)[CH]C(24182)',\n ],\n reactants = [\n ('CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'),\n ],\n bathGas = {\n 'N2': 0.5,\n 'Ne': 0.5,\n },\n)\n\npressureDependence(\n label = '4267',\n Tmin = (300,'K'),\n Tmax = (2000,'K'),\n Tcount = 8,\n Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),\n Pmin = (0.01,'bar'),\n Pmax = (100,'bar'),\n Pcount = 5,\n Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),\n maximumGrainSize = (0.5,'kcal/mol'),\n minimumGrainCount = 250,\n method = 'modified strong collision',\n interpolationModel = ('Chebyshev', 6, 4),\n activeKRotor = True,\n activeJRotor = True,\n rmgmode = True,\n)\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
# (c) Simen Sommerfeldt, @sisomm, [email protected] Licensed as CC-BY-SA
import os
import argparse,time
import pygame
import paho.mqtt.client as paho
parser = argparse.ArgumentParser()
parser.add_argument("-s","--server", default="127.0.0.1", help="The IP address of the MQTT server")
parser.add_argument("-v", "--verbosity", type=int, choices=[0, 1], default=0,
help="increase output verbosity")
args = parser.parse_args()
def task_laugh():
pygame.mixer.music.load("../sounds/witchlaugh.wav")
pygame.mixer.music.play()
def task_goodbye():
pygame.mixer.music.load("../sounds/despicable.wav")
pygame.mixer.music.play()
def task_hello():
pygame.mixer.music.load("../sounds/mday.wav")
pygame.mixer.music.play()
def task_doh():
print("SOUNDPLAYER DOH!")
pygame.mixer.music.load("../sounds/doh.wav")
pygame.mixer.music.play()
def on_message(mosq, obj, msg):
print("SOUNDPLAYER: Message received on topic "+msg.topic+" with payload "+msg.payload)
print(len(msg.payload));
if(msg.payload=="GOODBYE"):
task_goodbye()
if(msg.payload=="HELLO"):
task_hello()
if(msg.payload=="DOH"):
task_doh()
if(msg.payload=="LAUGH"):
task_laugh()
print("SOUNDPLAYER: Connecting")
mypid = os.getpid()
client = paho.Client("sound_broker_"+str(mypid))
client.connect(args.server)
connect_time=time.time()
client.on_message = on_message
client.subscribe('/raspberry/1/incoming',0)
pygame.mixer.init()
try:
while client.loop()==0:
pass
except KeyboardInterrupt:
print('SOUNDPLAYER: Interrupt')
client.unsubscribe("/raspberry/1/incoming")
client.disconnect()
|
normal
|
{
"blob_id": "9852d2a15047b110c7f374fd75e531c60c954724",
"index": 3920,
"step-1": "<mask token>\n\n\ndef task_goodbye():\n pygame.mixer.music.load('../sounds/despicable.wav')\n pygame.mixer.music.play()\n\n\ndef task_hello():\n pygame.mixer.music.load('../sounds/mday.wav')\n pygame.mixer.music.play()\n\n\ndef task_doh():\n print('SOUNDPLAYER DOH!')\n pygame.mixer.music.load('../sounds/doh.wav')\n pygame.mixer.music.play()\n\n\ndef on_message(mosq, obj, msg):\n print('SOUNDPLAYER: Message received on topic ' + msg.topic +\n ' with payload ' + msg.payload)\n print(len(msg.payload))\n if msg.payload == 'GOODBYE':\n task_goodbye()\n if msg.payload == 'HELLO':\n task_hello()\n if msg.payload == 'DOH':\n task_doh()\n if msg.payload == 'LAUGH':\n task_laugh()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef task_laugh():\n pygame.mixer.music.load('../sounds/witchlaugh.wav')\n pygame.mixer.music.play()\n\n\ndef task_goodbye():\n pygame.mixer.music.load('../sounds/despicable.wav')\n pygame.mixer.music.play()\n\n\ndef task_hello():\n pygame.mixer.music.load('../sounds/mday.wav')\n pygame.mixer.music.play()\n\n\ndef task_doh():\n print('SOUNDPLAYER DOH!')\n pygame.mixer.music.load('../sounds/doh.wav')\n pygame.mixer.music.play()\n\n\ndef on_message(mosq, obj, msg):\n print('SOUNDPLAYER: Message received on topic ' + msg.topic +\n ' with payload ' + msg.payload)\n print(len(msg.payload))\n if msg.payload == 'GOODBYE':\n task_goodbye()\n if msg.payload == 'HELLO':\n task_hello()\n if msg.payload == 'DOH':\n task_doh()\n if msg.payload == 'LAUGH':\n task_laugh()\n\n\n<mask token>\n",
"step-3": "<mask token>\nparser.add_argument('-s', '--server', default='127.0.0.1', help=\n 'The IP address of the MQTT server')\nparser.add_argument('-v', '--verbosity', type=int, choices=[0, 1], default=\n 0, help='increase output verbosity')\n<mask token>\n\n\ndef task_laugh():\n pygame.mixer.music.load('../sounds/witchlaugh.wav')\n pygame.mixer.music.play()\n\n\ndef task_goodbye():\n pygame.mixer.music.load('../sounds/despicable.wav')\n pygame.mixer.music.play()\n\n\ndef task_hello():\n pygame.mixer.music.load('../sounds/mday.wav')\n pygame.mixer.music.play()\n\n\ndef task_doh():\n print('SOUNDPLAYER DOH!')\n pygame.mixer.music.load('../sounds/doh.wav')\n pygame.mixer.music.play()\n\n\ndef on_message(mosq, obj, msg):\n print('SOUNDPLAYER: Message received on topic ' + msg.topic +\n ' with payload ' + msg.payload)\n print(len(msg.payload))\n if msg.payload == 'GOODBYE':\n task_goodbye()\n if msg.payload == 'HELLO':\n task_hello()\n if msg.payload == 'DOH':\n task_doh()\n if msg.payload == 'LAUGH':\n task_laugh()\n\n\nprint('SOUNDPLAYER: Connecting')\n<mask token>\nclient.connect(args.server)\n<mask token>\nclient.subscribe('/raspberry/1/incoming', 0)\npygame.mixer.init()\ntry:\n while client.loop() == 0:\n pass\nexcept KeyboardInterrupt:\n print('SOUNDPLAYER: Interrupt')\n client.unsubscribe('/raspberry/1/incoming')\n client.disconnect()\n",
"step-4": "import os\nimport argparse, time\nimport pygame\nimport paho.mqtt.client as paho\nparser = argparse.ArgumentParser()\nparser.add_argument('-s', '--server', default='127.0.0.1', help=\n 'The IP address of the MQTT server')\nparser.add_argument('-v', '--verbosity', type=int, choices=[0, 1], default=\n 0, help='increase output verbosity')\nargs = parser.parse_args()\n\n\ndef task_laugh():\n pygame.mixer.music.load('../sounds/witchlaugh.wav')\n pygame.mixer.music.play()\n\n\ndef task_goodbye():\n pygame.mixer.music.load('../sounds/despicable.wav')\n pygame.mixer.music.play()\n\n\ndef task_hello():\n pygame.mixer.music.load('../sounds/mday.wav')\n pygame.mixer.music.play()\n\n\ndef task_doh():\n print('SOUNDPLAYER DOH!')\n pygame.mixer.music.load('../sounds/doh.wav')\n pygame.mixer.music.play()\n\n\ndef on_message(mosq, obj, msg):\n print('SOUNDPLAYER: Message received on topic ' + msg.topic +\n ' with payload ' + msg.payload)\n print(len(msg.payload))\n if msg.payload == 'GOODBYE':\n task_goodbye()\n if msg.payload == 'HELLO':\n task_hello()\n if msg.payload == 'DOH':\n task_doh()\n if msg.payload == 'LAUGH':\n task_laugh()\n\n\nprint('SOUNDPLAYER: Connecting')\nmypid = os.getpid()\nclient = paho.Client('sound_broker_' + str(mypid))\nclient.connect(args.server)\nconnect_time = time.time()\nclient.on_message = on_message\nclient.subscribe('/raspberry/1/incoming', 0)\npygame.mixer.init()\ntry:\n while client.loop() == 0:\n pass\nexcept KeyboardInterrupt:\n print('SOUNDPLAYER: Interrupt')\n client.unsubscribe('/raspberry/1/incoming')\n client.disconnect()\n",
"step-5": "# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4\n# (c) Simen Sommerfeldt, @sisomm, [email protected] Licensed as CC-BY-SA\n\nimport os\nimport argparse,time\nimport pygame\nimport paho.mqtt.client as paho\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-s\",\"--server\", default=\"127.0.0.1\", help=\"The IP address of the MQTT server\")\nparser.add_argument(\"-v\", \"--verbosity\", type=int, choices=[0, 1], default=0,\n help=\"increase output verbosity\")\nargs = parser.parse_args()\n\ndef task_laugh():\n pygame.mixer.music.load(\"../sounds/witchlaugh.wav\")\n pygame.mixer.music.play()\n \ndef task_goodbye():\n pygame.mixer.music.load(\"../sounds/despicable.wav\")\n pygame.mixer.music.play()\n\ndef task_hello():\n pygame.mixer.music.load(\"../sounds/mday.wav\")\n pygame.mixer.music.play()\n\ndef task_doh():\n print(\"SOUNDPLAYER DOH!\")\n pygame.mixer.music.load(\"../sounds/doh.wav\") \n pygame.mixer.music.play()\n\ndef on_message(mosq, obj, msg):\n\n print(\"SOUNDPLAYER: Message received on topic \"+msg.topic+\" with payload \"+msg.payload)\n print(len(msg.payload));\n if(msg.payload==\"GOODBYE\"):\n task_goodbye()\n\n if(msg.payload==\"HELLO\"):\n task_hello()\n\n if(msg.payload==\"DOH\"):\n task_doh()\n\n if(msg.payload==\"LAUGH\"):\n task_laugh()\n\nprint(\"SOUNDPLAYER: Connecting\")\nmypid = os.getpid()\nclient = paho.Client(\"sound_broker_\"+str(mypid))\nclient.connect(args.server)\nconnect_time=time.time()\nclient.on_message = on_message\nclient.subscribe('/raspberry/1/incoming',0)\n\npygame.mixer.init()\n\ntry:\n while client.loop()==0:\n pass\n\nexcept KeyboardInterrupt:\n print('SOUNDPLAYER: Interrupt')\n client.unsubscribe(\"/raspberry/1/incoming\")\n client.disconnect()\n\n",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
import math
z = 1j
cosinus_real = math.cos(z.real)
cosinus_imaginary = math.cos(z.imag)
sinus_real = math.sin(z.real)
sinus_imag = math.sin(z.imag)
print (cosinus_real)
print (cosinus_imaginary)
print (sinus_real)
print (sinus_imag)
|
normal
|
{
"blob_id": "7ea608b73f592cffc7723b4319cf1a87b3e9b443",
"index": 4220,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(cosinus_real)\nprint(cosinus_imaginary)\nprint(sinus_real)\nprint(sinus_imag)\n",
"step-3": "<mask token>\nz = 1.0j\ncosinus_real = math.cos(z.real)\ncosinus_imaginary = math.cos(z.imag)\nsinus_real = math.sin(z.real)\nsinus_imag = math.sin(z.imag)\nprint(cosinus_real)\nprint(cosinus_imaginary)\nprint(sinus_real)\nprint(sinus_imag)\n",
"step-4": "import math\nz = 1.0j\ncosinus_real = math.cos(z.real)\ncosinus_imaginary = math.cos(z.imag)\nsinus_real = math.sin(z.real)\nsinus_imag = math.sin(z.imag)\nprint(cosinus_real)\nprint(cosinus_imaginary)\nprint(sinus_real)\nprint(sinus_imag)\n",
"step-5": "import math\n\nz = 1j\n\n\ncosinus_real = math.cos(z.real)\ncosinus_imaginary = math.cos(z.imag)\nsinus_real = math.sin(z.real)\nsinus_imag = math.sin(z.imag)\n\nprint (cosinus_real)\nprint (cosinus_imaginary)\nprint (sinus_real)\nprint (sinus_imag)\n\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class CUtil:
@staticmethod
def generate_board(initial_board, grid_size):
board_dictionary = dict()
iterator = 0
board_identifiers = CUtil.__generate_board_identifiers(grid_size)
for row in initial_board:
for data in row:
identifier = board_identifiers[iterator]
board_dictionary[identifier] = str(data)
if data == 0:
board_dictionary[identifier] = '123456789'
iterator += 1
return board_dictionary
@staticmethod
def generate_constraint_dictionary(grid_size):
identifiers = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
board_size = grid_size * grid_size
rows_selected = columns_selected = identifiers[:board_size]
board_identifiers = CUtil.__generate_board_identifiers(grid_size)
generated_grids = CUtil.__generate_grids(rows_selected, grid_size)
constraints = dict()
for board_identifier in board_identifiers:
rows = CUtil.__others_in_row(board_identifier, rows_selected)
columns = CUtil.__others_in_columns(board_identifier,
columns_selected)
grids = CUtil.__others_in_grid(board_identifier, generated_grids)
constraints[board_identifier] = set(rows + columns + grids)
return constraints
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def __others_in_columns(board_identifier, identifiers):
column_identifier = board_identifier[1]
others = []
for identifier in identifiers:
new_element = identifier + column_identifier
if new_element != board_identifier:
others.append(new_element)
return others
@staticmethod
def __others_in_grid(board_identifier, grids):
selected_grid = []
for index, grid in enumerate(grids):
for element in grid:
if element == board_identifier:
selected_grid = list(grid)
break
selected_grid.remove(board_identifier)
return selected_grid
@staticmethod
def __generate_grids(identifiers, grid_size):
split_identifiers = []
for i in range(grid_size):
start = i * grid_size
end = grid_size * (i + 1)
selected = identifiers[start:end]
split_identifiers.append(list(selected))
grids = []
for row in split_identifiers:
for column in split_identifiers:
inner_grid = []
for identifier_row in row:
for identifier_column in column:
inner_grid.append(identifier_row + identifier_column)
grids.append(inner_grid)
return grids
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class CUtil:
@staticmethod
def generate_board(initial_board, grid_size):
board_dictionary = dict()
iterator = 0
board_identifiers = CUtil.__generate_board_identifiers(grid_size)
for row in initial_board:
for data in row:
identifier = board_identifiers[iterator]
board_dictionary[identifier] = str(data)
if data == 0:
board_dictionary[identifier] = '123456789'
iterator += 1
return board_dictionary
@staticmethod
def generate_constraint_dictionary(grid_size):
identifiers = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
board_size = grid_size * grid_size
rows_selected = columns_selected = identifiers[:board_size]
board_identifiers = CUtil.__generate_board_identifiers(grid_size)
generated_grids = CUtil.__generate_grids(rows_selected, grid_size)
constraints = dict()
for board_identifier in board_identifiers:
rows = CUtil.__others_in_row(board_identifier, rows_selected)
columns = CUtil.__others_in_columns(board_identifier,
columns_selected)
grids = CUtil.__others_in_grid(board_identifier, generated_grids)
constraints[board_identifier] = set(rows + columns + grids)
return constraints
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def __others_in_columns(board_identifier, identifiers):
column_identifier = board_identifier[1]
others = []
for identifier in identifiers:
new_element = identifier + column_identifier
if new_element != board_identifier:
others.append(new_element)
return others
@staticmethod
def __others_in_grid(board_identifier, grids):
selected_grid = []
for index, grid in enumerate(grids):
for element in grid:
if element == board_identifier:
selected_grid = list(grid)
break
selected_grid.remove(board_identifier)
return selected_grid
@staticmethod
def __generate_grids(identifiers, grid_size):
split_identifiers = []
for i in range(grid_size):
start = i * grid_size
end = grid_size * (i + 1)
selected = identifiers[start:end]
split_identifiers.append(list(selected))
grids = []
for row in split_identifiers:
for column in split_identifiers:
inner_grid = []
for identifier_row in row:
for identifier_column in column:
inner_grid.append(identifier_row + identifier_column)
grids.append(inner_grid)
return grids
<|reserved_special_token_0|>
@staticmethod
def __testing9x9():
string = 'ABCDEFGHI'
output_string = ''
for letter1 in string:
for letter2 in string:
output_string += letter1 + letter2 + ' '
output_string += '\n'
print(output_string)
<|reserved_special_token_1|>
class CUtil:
@staticmethod
def generate_board(initial_board, grid_size):
board_dictionary = dict()
iterator = 0
board_identifiers = CUtil.__generate_board_identifiers(grid_size)
for row in initial_board:
for data in row:
identifier = board_identifiers[iterator]
board_dictionary[identifier] = str(data)
if data == 0:
board_dictionary[identifier] = '123456789'
iterator += 1
return board_dictionary
@staticmethod
def generate_constraint_dictionary(grid_size):
identifiers = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
board_size = grid_size * grid_size
rows_selected = columns_selected = identifiers[:board_size]
board_identifiers = CUtil.__generate_board_identifiers(grid_size)
generated_grids = CUtil.__generate_grids(rows_selected, grid_size)
constraints = dict()
for board_identifier in board_identifiers:
rows = CUtil.__others_in_row(board_identifier, rows_selected)
columns = CUtil.__others_in_columns(board_identifier,
columns_selected)
grids = CUtil.__others_in_grid(board_identifier, generated_grids)
constraints[board_identifier] = set(rows + columns + grids)
return constraints
<|reserved_special_token_0|>
@staticmethod
def __others_in_row(board_identifier, identifiers):
row_identifier = board_identifier[0]
others = []
for identifier in identifiers:
new_element = row_identifier + identifier
if new_element != board_identifier:
others.append(new_element)
return others
@staticmethod
def __others_in_columns(board_identifier, identifiers):
column_identifier = board_identifier[1]
others = []
for identifier in identifiers:
new_element = identifier + column_identifier
if new_element != board_identifier:
others.append(new_element)
return others
@staticmethod
def __others_in_grid(board_identifier, grids):
selected_grid = []
for index, grid in enumerate(grids):
for element in grid:
if element == board_identifier:
selected_grid = list(grid)
break
selected_grid.remove(board_identifier)
return selected_grid
@staticmethod
def __generate_grids(identifiers, grid_size):
split_identifiers = []
for i in range(grid_size):
start = i * grid_size
end = grid_size * (i + 1)
selected = identifiers[start:end]
split_identifiers.append(list(selected))
grids = []
for row in split_identifiers:
for column in split_identifiers:
inner_grid = []
for identifier_row in row:
for identifier_column in column:
inner_grid.append(identifier_row + identifier_column)
grids.append(inner_grid)
return grids
<|reserved_special_token_0|>
@staticmethod
def __testing9x9():
string = 'ABCDEFGHI'
output_string = ''
for letter1 in string:
for letter2 in string:
output_string += letter1 + letter2 + ' '
output_string += '\n'
print(output_string)
<|reserved_special_token_1|>
class CUtil:
@staticmethod
def generate_board(initial_board, grid_size):
board_dictionary = dict()
iterator = 0
board_identifiers = CUtil.__generate_board_identifiers(grid_size)
for row in initial_board:
for data in row:
identifier = board_identifiers[iterator]
board_dictionary[identifier] = str(data)
if data == 0:
board_dictionary[identifier] = '123456789'
iterator += 1
return board_dictionary
@staticmethod
def generate_constraint_dictionary(grid_size):
identifiers = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
board_size = grid_size * grid_size
rows_selected = columns_selected = identifiers[:board_size]
board_identifiers = CUtil.__generate_board_identifiers(grid_size)
generated_grids = CUtil.__generate_grids(rows_selected, grid_size)
constraints = dict()
for board_identifier in board_identifiers:
rows = CUtil.__others_in_row(board_identifier, rows_selected)
columns = CUtil.__others_in_columns(board_identifier,
columns_selected)
grids = CUtil.__others_in_grid(board_identifier, generated_grids)
constraints[board_identifier] = set(rows + columns + grids)
return constraints
@staticmethod
def constraints_as_tuple(constraints):
constraints_tuples = []
for key, values in constraints.items():
for value in values:
constraints_tuples.append((key, value))
return constraints_tuples
@staticmethod
def __others_in_row(board_identifier, identifiers):
row_identifier = board_identifier[0]
others = []
for identifier in identifiers:
new_element = row_identifier + identifier
if new_element != board_identifier:
others.append(new_element)
return others
@staticmethod
def __others_in_columns(board_identifier, identifiers):
column_identifier = board_identifier[1]
others = []
for identifier in identifiers:
new_element = identifier + column_identifier
if new_element != board_identifier:
others.append(new_element)
return others
@staticmethod
def __others_in_grid(board_identifier, grids):
selected_grid = []
for index, grid in enumerate(grids):
for element in grid:
if element == board_identifier:
selected_grid = list(grid)
break
selected_grid.remove(board_identifier)
return selected_grid
@staticmethod
def __generate_grids(identifiers, grid_size):
split_identifiers = []
for i in range(grid_size):
start = i * grid_size
end = grid_size * (i + 1)
selected = identifiers[start:end]
split_identifiers.append(list(selected))
grids = []
for row in split_identifiers:
for column in split_identifiers:
inner_grid = []
for identifier_row in row:
for identifier_column in column:
inner_grid.append(identifier_row + identifier_column)
grids.append(inner_grid)
return grids
@staticmethod
def __generate_board_identifiers(grid_size):
identifiers = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
board_size = grid_size * grid_size
rows_selected = columns_selected = identifiers[:board_size]
board = []
for row_identifier in rows_selected:
for column_identifier in columns_selected:
board.append(row_identifier + column_identifier)
return board
@staticmethod
def __testing9x9():
string = 'ABCDEFGHI'
output_string = ''
for letter1 in string:
for letter2 in string:
output_string += letter1 + letter2 + ' '
output_string += '\n'
print(output_string)
<|reserved_special_token_1|>
class CUtil:
# Returns a dictionary containing the cell UID as they key and the data for the cell as the value
# Ex: 'AA': 2, 'AB': 4 ....
@staticmethod
def generate_board(initial_board, grid_size):
board_dictionary = dict()
iterator = 0
board_identifiers = CUtil.__generate_board_identifiers(grid_size)
for row in initial_board:
for data in row:
identifier = board_identifiers[iterator]
board_dictionary[identifier] = str(data)
if data == 0:
board_dictionary[identifier] = "123456789"
iterator += 1
return board_dictionary
# returns a dictionary containing possible constraints for each cell
# Ex: 'AA': 'AB', 'AC' ....
@staticmethod
def generate_constraint_dictionary(grid_size):
identifiers = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
board_size = grid_size * grid_size
rows_selected = columns_selected = identifiers[:board_size]
board_identifiers = CUtil.__generate_board_identifiers(grid_size)
generated_grids = CUtil.__generate_grids(rows_selected, grid_size)
constraints = dict()
for board_identifier in board_identifiers:
rows = CUtil.__others_in_row(board_identifier, rows_selected)
columns = CUtil.__others_in_columns(board_identifier, columns_selected)
grids = CUtil.__others_in_grid(board_identifier, generated_grids)
constraints[board_identifier] = set(rows + columns + grids)
return constraints
# returns a tuple containing possible constraints for each cell
# Ex: ('AA', 'AB'), ('AA', AC') ....
@staticmethod
def constraints_as_tuple(constraints):
constraints_tuples = []
for key, values in constraints.items():
for value in values:
constraints_tuples.append((key, value))
return constraints_tuples
@staticmethod
def __others_in_row(board_identifier, identifiers):
# if 'AB' then get just 'A', because that's the row
row_identifier = board_identifier[0]
others = []
for identifier in identifiers:
new_element = row_identifier + identifier
if new_element != board_identifier:
others.append(new_element)
return others
@staticmethod
def __others_in_columns(board_identifier, identifiers):
# if 'AB' then get just 'B', because that's the columns
column_identifier = board_identifier[1]
others = []
for identifier in identifiers:
new_element = identifier + column_identifier
if new_element != board_identifier:
others.append(new_element)
return others
@staticmethod
def __others_in_grid(board_identifier, grids):
# if 'AB' then get just 'B', because that's the columns
selected_grid = []
for index, grid in enumerate(grids):
for element in grid:
if element == board_identifier:
selected_grid = list(grid)
break
selected_grid.remove(board_identifier)
return selected_grid
@staticmethod
def __generate_grids(identifiers, grid_size):
split_identifiers = []
for i in range(grid_size):
start = i * grid_size
end = grid_size * (i + 1)
selected = identifiers[start:end]
split_identifiers.append(list(selected))
grids = []
for row in split_identifiers:
# ["A", "B", "C"]
for column in split_identifiers:
# ["A", "B", "C"]
inner_grid = []
for identifier_row in row:
for identifier_column in column:
inner_grid.append(identifier_row + identifier_column)
grids.append(inner_grid)
return grids
@staticmethod
def __generate_board_identifiers(grid_size):
identifiers = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
board_size = grid_size * grid_size
rows_selected = columns_selected = identifiers[:board_size]
board = []
for row_identifier in rows_selected:
for column_identifier in columns_selected:
board.append(row_identifier + column_identifier)
return board
@staticmethod
def __testing9x9():
string = "ABCDEFGHI"
output_string = ""
for letter1 in string:
for letter2 in string:
output_string += letter1 + letter2 + " "
output_string += "\n"
print(output_string)
|
flexible
|
{
"blob_id": "929e6deeb017fd338c63439f689d05331b016d0f",
"index": 1951,
"step-1": "class CUtil:\n\n @staticmethod\n def generate_board(initial_board, grid_size):\n board_dictionary = dict()\n iterator = 0\n board_identifiers = CUtil.__generate_board_identifiers(grid_size)\n for row in initial_board:\n for data in row:\n identifier = board_identifiers[iterator]\n board_dictionary[identifier] = str(data)\n if data == 0:\n board_dictionary[identifier] = '123456789'\n iterator += 1\n return board_dictionary\n\n @staticmethod\n def generate_constraint_dictionary(grid_size):\n identifiers = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n board_size = grid_size * grid_size\n rows_selected = columns_selected = identifiers[:board_size]\n board_identifiers = CUtil.__generate_board_identifiers(grid_size)\n generated_grids = CUtil.__generate_grids(rows_selected, grid_size)\n constraints = dict()\n for board_identifier in board_identifiers:\n rows = CUtil.__others_in_row(board_identifier, rows_selected)\n columns = CUtil.__others_in_columns(board_identifier,\n columns_selected)\n grids = CUtil.__others_in_grid(board_identifier, generated_grids)\n constraints[board_identifier] = set(rows + columns + grids)\n return constraints\n <mask token>\n <mask token>\n\n @staticmethod\n def __others_in_columns(board_identifier, identifiers):\n column_identifier = board_identifier[1]\n others = []\n for identifier in identifiers:\n new_element = identifier + column_identifier\n if new_element != board_identifier:\n others.append(new_element)\n return others\n\n @staticmethod\n def __others_in_grid(board_identifier, grids):\n selected_grid = []\n for index, grid in enumerate(grids):\n for element in grid:\n if element == board_identifier:\n selected_grid = list(grid)\n break\n selected_grid.remove(board_identifier)\n return selected_grid\n\n @staticmethod\n def __generate_grids(identifiers, grid_size):\n split_identifiers = []\n for i in range(grid_size):\n start = i * grid_size\n end = grid_size * (i + 1)\n selected = identifiers[start:end]\n split_identifiers.append(list(selected))\n grids = []\n for row in split_identifiers:\n for column in split_identifiers:\n inner_grid = []\n for identifier_row in row:\n for identifier_column in column:\n inner_grid.append(identifier_row + identifier_column)\n grids.append(inner_grid)\n return grids\n <mask token>\n <mask token>\n",
"step-2": "class CUtil:\n\n @staticmethod\n def generate_board(initial_board, grid_size):\n board_dictionary = dict()\n iterator = 0\n board_identifiers = CUtil.__generate_board_identifiers(grid_size)\n for row in initial_board:\n for data in row:\n identifier = board_identifiers[iterator]\n board_dictionary[identifier] = str(data)\n if data == 0:\n board_dictionary[identifier] = '123456789'\n iterator += 1\n return board_dictionary\n\n @staticmethod\n def generate_constraint_dictionary(grid_size):\n identifiers = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n board_size = grid_size * grid_size\n rows_selected = columns_selected = identifiers[:board_size]\n board_identifiers = CUtil.__generate_board_identifiers(grid_size)\n generated_grids = CUtil.__generate_grids(rows_selected, grid_size)\n constraints = dict()\n for board_identifier in board_identifiers:\n rows = CUtil.__others_in_row(board_identifier, rows_selected)\n columns = CUtil.__others_in_columns(board_identifier,\n columns_selected)\n grids = CUtil.__others_in_grid(board_identifier, generated_grids)\n constraints[board_identifier] = set(rows + columns + grids)\n return constraints\n <mask token>\n <mask token>\n\n @staticmethod\n def __others_in_columns(board_identifier, identifiers):\n column_identifier = board_identifier[1]\n others = []\n for identifier in identifiers:\n new_element = identifier + column_identifier\n if new_element != board_identifier:\n others.append(new_element)\n return others\n\n @staticmethod\n def __others_in_grid(board_identifier, grids):\n selected_grid = []\n for index, grid in enumerate(grids):\n for element in grid:\n if element == board_identifier:\n selected_grid = list(grid)\n break\n selected_grid.remove(board_identifier)\n return selected_grid\n\n @staticmethod\n def __generate_grids(identifiers, grid_size):\n split_identifiers = []\n for i in range(grid_size):\n start = i * grid_size\n end = grid_size * (i + 1)\n selected = identifiers[start:end]\n split_identifiers.append(list(selected))\n grids = []\n for row in split_identifiers:\n for column in split_identifiers:\n inner_grid = []\n for identifier_row in row:\n for identifier_column in column:\n inner_grid.append(identifier_row + identifier_column)\n grids.append(inner_grid)\n return grids\n <mask token>\n\n @staticmethod\n def __testing9x9():\n string = 'ABCDEFGHI'\n output_string = ''\n for letter1 in string:\n for letter2 in string:\n output_string += letter1 + letter2 + ' '\n output_string += '\\n'\n print(output_string)\n",
"step-3": "class CUtil:\n\n @staticmethod\n def generate_board(initial_board, grid_size):\n board_dictionary = dict()\n iterator = 0\n board_identifiers = CUtil.__generate_board_identifiers(grid_size)\n for row in initial_board:\n for data in row:\n identifier = board_identifiers[iterator]\n board_dictionary[identifier] = str(data)\n if data == 0:\n board_dictionary[identifier] = '123456789'\n iterator += 1\n return board_dictionary\n\n @staticmethod\n def generate_constraint_dictionary(grid_size):\n identifiers = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n board_size = grid_size * grid_size\n rows_selected = columns_selected = identifiers[:board_size]\n board_identifiers = CUtil.__generate_board_identifiers(grid_size)\n generated_grids = CUtil.__generate_grids(rows_selected, grid_size)\n constraints = dict()\n for board_identifier in board_identifiers:\n rows = CUtil.__others_in_row(board_identifier, rows_selected)\n columns = CUtil.__others_in_columns(board_identifier,\n columns_selected)\n grids = CUtil.__others_in_grid(board_identifier, generated_grids)\n constraints[board_identifier] = set(rows + columns + grids)\n return constraints\n <mask token>\n\n @staticmethod\n def __others_in_row(board_identifier, identifiers):\n row_identifier = board_identifier[0]\n others = []\n for identifier in identifiers:\n new_element = row_identifier + identifier\n if new_element != board_identifier:\n others.append(new_element)\n return others\n\n @staticmethod\n def __others_in_columns(board_identifier, identifiers):\n column_identifier = board_identifier[1]\n others = []\n for identifier in identifiers:\n new_element = identifier + column_identifier\n if new_element != board_identifier:\n others.append(new_element)\n return others\n\n @staticmethod\n def __others_in_grid(board_identifier, grids):\n selected_grid = []\n for index, grid in enumerate(grids):\n for element in grid:\n if element == board_identifier:\n selected_grid = list(grid)\n break\n selected_grid.remove(board_identifier)\n return selected_grid\n\n @staticmethod\n def __generate_grids(identifiers, grid_size):\n split_identifiers = []\n for i in range(grid_size):\n start = i * grid_size\n end = grid_size * (i + 1)\n selected = identifiers[start:end]\n split_identifiers.append(list(selected))\n grids = []\n for row in split_identifiers:\n for column in split_identifiers:\n inner_grid = []\n for identifier_row in row:\n for identifier_column in column:\n inner_grid.append(identifier_row + identifier_column)\n grids.append(inner_grid)\n return grids\n <mask token>\n\n @staticmethod\n def __testing9x9():\n string = 'ABCDEFGHI'\n output_string = ''\n for letter1 in string:\n for letter2 in string:\n output_string += letter1 + letter2 + ' '\n output_string += '\\n'\n print(output_string)\n",
"step-4": "class CUtil:\n\n @staticmethod\n def generate_board(initial_board, grid_size):\n board_dictionary = dict()\n iterator = 0\n board_identifiers = CUtil.__generate_board_identifiers(grid_size)\n for row in initial_board:\n for data in row:\n identifier = board_identifiers[iterator]\n board_dictionary[identifier] = str(data)\n if data == 0:\n board_dictionary[identifier] = '123456789'\n iterator += 1\n return board_dictionary\n\n @staticmethod\n def generate_constraint_dictionary(grid_size):\n identifiers = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n board_size = grid_size * grid_size\n rows_selected = columns_selected = identifiers[:board_size]\n board_identifiers = CUtil.__generate_board_identifiers(grid_size)\n generated_grids = CUtil.__generate_grids(rows_selected, grid_size)\n constraints = dict()\n for board_identifier in board_identifiers:\n rows = CUtil.__others_in_row(board_identifier, rows_selected)\n columns = CUtil.__others_in_columns(board_identifier,\n columns_selected)\n grids = CUtil.__others_in_grid(board_identifier, generated_grids)\n constraints[board_identifier] = set(rows + columns + grids)\n return constraints\n\n @staticmethod\n def constraints_as_tuple(constraints):\n constraints_tuples = []\n for key, values in constraints.items():\n for value in values:\n constraints_tuples.append((key, value))\n return constraints_tuples\n\n @staticmethod\n def __others_in_row(board_identifier, identifiers):\n row_identifier = board_identifier[0]\n others = []\n for identifier in identifiers:\n new_element = row_identifier + identifier\n if new_element != board_identifier:\n others.append(new_element)\n return others\n\n @staticmethod\n def __others_in_columns(board_identifier, identifiers):\n column_identifier = board_identifier[1]\n others = []\n for identifier in identifiers:\n new_element = identifier + column_identifier\n if new_element != board_identifier:\n others.append(new_element)\n return others\n\n @staticmethod\n def __others_in_grid(board_identifier, grids):\n selected_grid = []\n for index, grid in enumerate(grids):\n for element in grid:\n if element == board_identifier:\n selected_grid = list(grid)\n break\n selected_grid.remove(board_identifier)\n return selected_grid\n\n @staticmethod\n def __generate_grids(identifiers, grid_size):\n split_identifiers = []\n for i in range(grid_size):\n start = i * grid_size\n end = grid_size * (i + 1)\n selected = identifiers[start:end]\n split_identifiers.append(list(selected))\n grids = []\n for row in split_identifiers:\n for column in split_identifiers:\n inner_grid = []\n for identifier_row in row:\n for identifier_column in column:\n inner_grid.append(identifier_row + identifier_column)\n grids.append(inner_grid)\n return grids\n\n @staticmethod\n def __generate_board_identifiers(grid_size):\n identifiers = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n board_size = grid_size * grid_size\n rows_selected = columns_selected = identifiers[:board_size]\n board = []\n for row_identifier in rows_selected:\n for column_identifier in columns_selected:\n board.append(row_identifier + column_identifier)\n return board\n\n @staticmethod\n def __testing9x9():\n string = 'ABCDEFGHI'\n output_string = ''\n for letter1 in string:\n for letter2 in string:\n output_string += letter1 + letter2 + ' '\n output_string += '\\n'\n print(output_string)\n",
"step-5": "class CUtil:\n\n # Returns a dictionary containing the cell UID as they key and the data for the cell as the value\n # Ex: 'AA': 2, 'AB': 4 ....\n @staticmethod\n def generate_board(initial_board, grid_size):\n board_dictionary = dict()\n iterator = 0\n board_identifiers = CUtil.__generate_board_identifiers(grid_size)\n\n for row in initial_board:\n for data in row:\n identifier = board_identifiers[iterator]\n board_dictionary[identifier] = str(data)\n if data == 0:\n board_dictionary[identifier] = \"123456789\"\n iterator += 1\n\n return board_dictionary\n\n # returns a dictionary containing possible constraints for each cell\n # Ex: 'AA': 'AB', 'AC' ....\n @staticmethod\n def generate_constraint_dictionary(grid_size):\n identifiers = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n board_size = grid_size * grid_size\n rows_selected = columns_selected = identifiers[:board_size]\n\n board_identifiers = CUtil.__generate_board_identifiers(grid_size)\n generated_grids = CUtil.__generate_grids(rows_selected, grid_size)\n\n constraints = dict()\n\n for board_identifier in board_identifiers:\n rows = CUtil.__others_in_row(board_identifier, rows_selected)\n columns = CUtil.__others_in_columns(board_identifier, columns_selected)\n grids = CUtil.__others_in_grid(board_identifier, generated_grids)\n\n constraints[board_identifier] = set(rows + columns + grids)\n\n return constraints\n\n # returns a tuple containing possible constraints for each cell\n # Ex: ('AA', 'AB'), ('AA', AC') ....\n @staticmethod\n def constraints_as_tuple(constraints):\n constraints_tuples = []\n for key, values in constraints.items():\n for value in values:\n constraints_tuples.append((key, value))\n\n return constraints_tuples\n\n @staticmethod\n def __others_in_row(board_identifier, identifiers):\n # if 'AB' then get just 'A', because that's the row\n row_identifier = board_identifier[0]\n others = []\n\n for identifier in identifiers:\n new_element = row_identifier + identifier\n if new_element != board_identifier:\n others.append(new_element)\n\n return others\n\n @staticmethod\n def __others_in_columns(board_identifier, identifiers):\n # if 'AB' then get just 'B', because that's the columns\n column_identifier = board_identifier[1]\n others = []\n\n for identifier in identifiers:\n new_element = identifier + column_identifier\n if new_element != board_identifier:\n others.append(new_element)\n\n return others\n\n @staticmethod\n def __others_in_grid(board_identifier, grids):\n # if 'AB' then get just 'B', because that's the columns\n selected_grid = []\n for index, grid in enumerate(grids):\n for element in grid:\n if element == board_identifier:\n selected_grid = list(grid)\n break\n\n selected_grid.remove(board_identifier)\n return selected_grid\n\n @staticmethod\n def __generate_grids(identifiers, grid_size):\n split_identifiers = []\n for i in range(grid_size):\n start = i * grid_size\n end = grid_size * (i + 1)\n selected = identifiers[start:end]\n split_identifiers.append(list(selected))\n\n grids = []\n for row in split_identifiers:\n # [\"A\", \"B\", \"C\"]\n for column in split_identifiers:\n # [\"A\", \"B\", \"C\"]\n inner_grid = []\n for identifier_row in row:\n for identifier_column in column:\n inner_grid.append(identifier_row + identifier_column)\n\n grids.append(inner_grid)\n\n return grids\n\n @staticmethod\n def __generate_board_identifiers(grid_size):\n identifiers = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n board_size = grid_size * grid_size\n\n rows_selected = columns_selected = identifiers[:board_size]\n board = []\n for row_identifier in rows_selected:\n for column_identifier in columns_selected:\n board.append(row_identifier + column_identifier)\n\n return board\n\n @staticmethod\n def __testing9x9():\n string = \"ABCDEFGHI\"\n\n output_string = \"\"\n for letter1 in string:\n for letter2 in string:\n output_string += letter1 + letter2 + \" \"\n\n output_string += \"\\n\"\n\n print(output_string)",
"step-ids": [
6,
7,
8,
10,
11
]
}
|
[
6,
7,
8,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def count_singlekey(inputDict, keyword):
countDict = {}
for abName, abInfo in inputDict.iteritems():
if countDict.has_key(abInfo[keyword]):
countDict[abInfo[keyword]][1] += 1
else:
countDict[abInfo[keyword]] = [abName, 1]
return countDict
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def count_singlekey(inputDict, keyword):
countDict = {}
for abName, abInfo in inputDict.iteritems():
if countDict.has_key(abInfo[keyword]):
countDict[abInfo[keyword]][1] += 1
else:
countDict[abInfo[keyword]] = [abName, 1]
return countDict
def count_multikey(inputDict, keywords):
keywords.sort()
keywords = tuple(keywords)
countDict = {}
for abName, abInfo in inputDict.iteritems():
combinedKey = []
for k in keywords:
combinedKey.append(abInfo[k])
combinedKey = tuple(combinedKey)
if countDict.has_key(combinedKey):
countDict[combinedKey][1] += 1
else:
countDict[combinedKey] = [abName, 1]
return countDict
<|reserved_special_token_1|>
def count_singlekey(inputDict, keyword):
# sample input
# inputDict = {
# abName1: { dna: 'atgc', protein: 'x' }
# abName2: { dna: 'ctga', protein: 'y' }
# }
countDict = {}
for abName, abInfo in inputDict.iteritems():
if countDict.has_key(abInfo[keyword]):
countDict[abInfo[keyword]][1] += 1
else:
countDict[abInfo[keyword]] = [abName, 1]
return countDict
def count_multikey(inputDict, keywords):
# sample input
# inputDict = {
# abName1: { dna: 'atgc', protein: 'x' }
# abName2: { dna: 'ctga', protein: 'y' }
# }
#keywords = list(keywords)
keywords.sort()
keywords = tuple(keywords)
countDict = {}
for abName, abInfo in inputDict.iteritems():
combinedKey = []
for k in keywords:
combinedKey.append(abInfo[k])
combinedKey = tuple(combinedKey)
if countDict.has_key(combinedKey):
countDict[combinedKey][1] += 1
else:
countDict[combinedKey] = [abName, 1]
return countDict
|
flexible
|
{
"blob_id": "b164dc8183c0dc460aa20883553fc73acd1e45ec",
"index": 7828,
"step-1": "<mask token>\n",
"step-2": "def count_singlekey(inputDict, keyword):\n countDict = {}\n for abName, abInfo in inputDict.iteritems():\n if countDict.has_key(abInfo[keyword]):\n countDict[abInfo[keyword]][1] += 1\n else:\n countDict[abInfo[keyword]] = [abName, 1]\n return countDict\n\n\n<mask token>\n",
"step-3": "def count_singlekey(inputDict, keyword):\n countDict = {}\n for abName, abInfo in inputDict.iteritems():\n if countDict.has_key(abInfo[keyword]):\n countDict[abInfo[keyword]][1] += 1\n else:\n countDict[abInfo[keyword]] = [abName, 1]\n return countDict\n\n\ndef count_multikey(inputDict, keywords):\n keywords.sort()\n keywords = tuple(keywords)\n countDict = {}\n for abName, abInfo in inputDict.iteritems():\n combinedKey = []\n for k in keywords:\n combinedKey.append(abInfo[k])\n combinedKey = tuple(combinedKey)\n if countDict.has_key(combinedKey):\n countDict[combinedKey][1] += 1\n else:\n countDict[combinedKey] = [abName, 1]\n return countDict\n",
"step-4": "def count_singlekey(inputDict, keyword):\n # sample input\n # inputDict = {\n # abName1: { dna: 'atgc', protein: 'x' }\n # abName2: { dna: 'ctga', protein: 'y' }\n # }\n\n countDict = {}\n for abName, abInfo in inputDict.iteritems():\n if countDict.has_key(abInfo[keyword]):\n countDict[abInfo[keyword]][1] += 1\n else:\n countDict[abInfo[keyword]] = [abName, 1]\n return countDict\n\n\ndef count_multikey(inputDict, keywords):\n # sample input\n # inputDict = {\n # abName1: { dna: 'atgc', protein: 'x' }\n # abName2: { dna: 'ctga', protein: 'y' }\n # }\n #keywords = list(keywords)\n keywords.sort()\n keywords = tuple(keywords)\n countDict = {}\n for abName, abInfo in inputDict.iteritems():\n combinedKey = []\n for k in keywords:\n combinedKey.append(abInfo[k])\n combinedKey = tuple(combinedKey)\n if countDict.has_key(combinedKey):\n countDict[combinedKey][1] += 1\n else:\n countDict[combinedKey] = [abName, 1]\n return countDict\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.