metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jphacks/SD_1809",
"score": 2
} |
#### File: SD_1809/whisper/app.py
```python
from __future__ import unicode_literals
import errno
import os
import sys
import tempfile
import concurrent.futures as futures
import json
import re
from argparse import ArgumentParser
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
LineBotApiError, InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
SourceUser, SourceGroup, SourceRoom,
TemplateSendMessage, ConfirmTemplate, MessageAction,
ButtonsTemplate, ImageCarouselTemplate, ImageCarouselColumn, URIAction,
PostbackAction, DatetimePickerAction,
CameraAction, CameraRollAction, LocationAction,
CarouselTemplate, CarouselColumn, PostbackEvent,
StickerMessage, StickerSendMessage, LocationMessage, LocationSendMessage,
ImageMessage, VideoMessage, AudioMessage, FileMessage,
UnfollowEvent, FollowEvent, JoinEvent, LeaveEvent, BeaconEvent,
FlexSendMessage, BubbleContainer, ImageComponent, BoxComponent,
TextComponent, SpacerComponent, IconComponent, ButtonComponent,
SeparatorComponent, QuickReply, QuickReplyButton
)
app = Flask(__name__)
# get channel_secret and channel_access_token from your environment variable
channel_secret = os.getenv('LINE_CHANNEL_SECRET', None)
channel_access_token = os.getenv('LINE_CHANNEL_ACCESS_TOKEN', None)
if channel_secret is None:
print('Specify LINE_CHANNEL_SECRET as environment variable.')
sys.exit(1)
if channel_access_token is None:
print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')
sys.exit(1)
print(channel_secret, file=sys.stderr)
print(channel_access_token, file=sys.stderr)
line_bot_api = LineBotApi(channel_access_token)
handler = WebhookHandler(channel_secret)
static_tmp_path = os.path.join(os.path.dirname(__file__), 'static', 'tmp')
# ========================= whisper独自のフィールド ========================
from UserData import UserData
from PlantAnimator import PlantAnimator
from beaconWhisperEvent import BeaconWhisperEvent
# ここでimport出来ないときは、pip install clova-cek-sdk をたたくこと
import cek
from flask import jsonify
user_data = UserData()
plant_animator = PlantAnimator(user_data, line_bot_api)
beacon_whisper_event = BeaconWhisperEvent(line_bot_api,user_data)
# user_idでエラーをはく場合は、下のidベタ打ちを採用してください
# user_id = "U70418518785e805318db128d8014710e"
user_id = user_data.json_data["user_id"]
# =========================================================================
# =========================Clova用のフィールド==============================
# application_id : lineのClovaアプリ?でスキルを登録した際のExtension_IDを入れる
clova = cek.Clova(
application_id = "com.clovatalk.whisper",
default_language = "ja",
debug_mode = False
)
# =========================================================================
# function for create tmp dir for download content
def make_static_tmp_dir():
try:
os.makedirs(static_tmp_path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(static_tmp_path):
pass
else:
raise
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except LineBotApiError as e:
print("Got exception from LINE Messaging API: %s\n" % e.message)
for m in e.error.details:
print(" %s: %s" % (m.property, m.message))
print("\n")
except InvalidSignatureError:
abort(400)
return 'OK'
# /clova に対してのPOSTリクエストを受け付けるサーバーを立てる
@app.route('/clova', methods=['POST'])
def my_service():
body_dict = clova.route(body=request.data, header=request.headers)
response = jsonify(body_dict)
response.headers['Content-Type'] = 'application/json;charset-UTF-8'
return response
# 以下はcallback用のhandler
# ユーザにフォローされた時のイベント
@handler.add(FollowEvent)
def follow_event(event):
global user_id
user_id = event.source.user_id
user_data.set_user_id(user_id)
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text="初めまして。whisperです!\nよろしくね(^^♪"))
@handler.add(MessageEvent, message=TextMessage)
def handle_text_message(event):
print("text message")
text = event.message.text
split_msg = re.split('[\ | ]', text)
reply_texts = create_reply(split_msg, event, source="text")
if reply_texts is not None:
reply_texts = (reply_texts,) if isinstance(reply_texts, str) else reply_texts
msgs = [TextSendMessage(text=s) for s in reply_texts]
line_bot_api.reply_message(event.reply_token, msgs)
@handler.add(MessageEvent, message=LocationMessage)
def handle_location_message(event):
line_bot_api.reply_message(
event.reply_token,
LocationSendMessage(
title=event.message.title, address=event.message.address,
latitude=event.message.latitude, longitude=event.message.longitude
)
)
@handler.add(MessageEvent, message=StickerMessage)
def handle_sticker_message(event):
line_bot_api.reply_message(
event.reply_token,
StickerSendMessage(
package_id=event.message.package_id,
sticker_id=event.message.sticker_id)
)
# Other Message Type
@handler.add(MessageEvent, message=(ImageMessage, VideoMessage, AudioMessage))
def handle_content_message(event):
if isinstance(event.message, ImageMessage):
ext = 'jpg'
elif isinstance(event.message, VideoMessage):
ext = 'mp4'
elif isinstance(event.message, AudioMessage):
ext = 'm4a'
else:
return
message_content = line_bot_api.get_message_content(event.message.id)
with tempfile.NamedTemporaryFile(dir=static_tmp_path, prefix=ext + '-', delete=False) as tf:
for chunk in message_content.iter_content():
tf.write(chunk)
tempfile_path = tf.name
dist_path = tempfile_path + '.' + ext
dist_name = os.path.basename(dist_path)
os.rename(tempfile_path, dist_path)
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(text='Save content.'),
TextSendMessage(text=request.host_url + os.path.join('static', 'tmp', dist_name))
])
@handler.add(MessageEvent, message=FileMessage)
def handle_file_message(event):
message_content = line_bot_api.get_message_content(event.message.id)
with tempfile.NamedTemporaryFile(dir=static_tmp_path, prefix='file-', delete=False) as tf:
for chunk in message_content.iter_content():
tf.write(chunk)
tempfile_path = tf.name
dist_path = tempfile_path + '-' + event.message.file_name
dist_name = os.path.basename(dist_path)
os.rename(tempfile_path, dist_path)
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(text='Save file.'),
TextSendMessage(text=request.host_url + os.path.join('static', 'tmp', dist_name))
])
@handler.add(UnfollowEvent)
def handle_unfollow():
app.logger.info("Got Unfollow event")
@handler.add(JoinEvent)
def handle_join(event):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text='Joined this ' + event.source.type))
@handler.add(LeaveEvent)
def handle_leave():
app.logger.info("Got leave event")
@handler.add(PostbackEvent)
def handle_postback(event):
if event.postback.data == 'ping':
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text='pong'))
elif event.postback.data == 'datetime_postback':
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text=event.postback.params['datetime']))
elif event.postback.data == 'date_postback':
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text=event.postback.params['date']))
elif event.postback.data in ('set_beacon_on', 'set_beacon_off'):
# ビーコンを使うかどうかを設定するときの"YES", "No"を押したときの挙動を設定
beacon_whisper_event.set_beacon(event)
else:
# 植物の名前を消すときにはワンクッション挟んであげる
data = event.postback.data.split()
if data[0] == 'delete_plant':
plant_animator.delete_plant(data[1])
elif data[0] == 'delete_plant_cancel':
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text='ありがとう^^'
)
)
# ビーコンがかざされたときに呼ばれる処理
@handler.add(BeaconEvent)
def handle_beacon(event):
if plant_animator.listen_beacon_span():
beacon_whisper_event.activation_msg(event)
if user_data.json_data['use_line_beacon'] is 1:
# ビーコンがエコモード中ならずっと家にいたと判断して挨拶はしない
if plant_animator.check_beacon_eco_time() == False:
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text='おかえりなさい!'
))
plant_animator.listen_beacon(user_data.json_data['use_line_beacon'])
#--------------------------------------
# メッセージを生成するメソッドへのディスパッチャ
#--------------------------------------
lines = (
"植物の呼び出し", " ハロー `植物の名前`",
"植物の登録:", " 登録 `植物の名前`",
"植物の削除", " 削除 `植物の名前`",
"会話の終了", ' またね')
help_msg = os.linesep.join(lines)
def create_reply(split_text, event=None, source=None):
"""
テキストとして受け取ったメッセージとclovaから受け取ったメッセージを同列に扱うために
応答メッセージ生成へのディスパッチ部分を抜き出す
input: string[]
output: None or iterable<string>
"""
def decorate_text(plant, text):
return plant.display_name + ": " + text
text = split_text[0]
if text == 'bye':
if isinstance(event.source, SourceGroup):
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text='またね、今までありがとう'))
line_bot_api.leave_group(event.source.group_id)
elif isinstance(event.source, SourceRoom):
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text='またね、今までありがとう'))
line_bot_api.leave_room(event.source.room_id)
else:
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="この会話から退出させることはできません"))
# ユーザからビーコンの設定を行う
elif text in {'beacon', 'ビーコン'}:
return beacon_whisper_event.config_beacon_msg(event)
elif text in {"help", "ヘルプ"}:
return help_msg
elif text in {'またね', 'じゃあね', 'バイバイ'}:
plant = plant_animator.plant
text = plant_animator.disconnect()
if source == "text":
text = decorate_text(plant, text)
return text
# 植物の生成を行う
elif text in {'登録', 'ようこそ'}:
if len(split_text) == 2:
name = split_text[1]
return plant_animator.register_plant(name)
elif len(split_text) == 1:
return "名前が設定されていません"
else:
return "メッセージが不正です", "例:登録 `植物の名前`"
# ランダムに呼び出す
elif text == "誰かを呼んで":
reply = plant_animator.clova_random_connect()
if source == "text":
reply = decorate_text(plant_animator.plant, reply)
return reply
# 植物との接続命令
elif split_text[0] in {'ハロー', 'hello', 'こんにちは', 'こんばんは', 'おはよう', 'ごきげんよう'}:
if len(split_text) == 2:
reply = plant_animator.connect(split_text[1])
if source == "text":
reply = decorate_text(plant_animator.plant, reply)
return reply
elif len(split_text) == 1:
return "植物が選択されていません"
else:
return "メッセージが不正です:", "例:ハロー `植物の名前`"
# 植物を削除するときの命令
elif split_text[0] == {'削除'}:
if len(split_text) == 2:
return plant_animator.delete_plant(split_text[1])
elif len(split_text) == 1:
return "植物が選択されていません"
else:
return "メッセージが不正です:" , "例:削除 `植物の名前`"
# 植物を削除するときの命令
# if split_msg[1] is not None:
# confirm_template = ConfirmTemplate(text= split_msg[1] +"の情報を削除します\n本当によろしいですか?\n", actions=[
# PostbackAction(label='Yes', data='delete_plant '+ split_msg[1], displayText='はい'),
# PostbackAction(label='No', data='delete_plant_cancel '+ split_msg[1], displayText='いいえ'),
# ])
# template_message = TemplateSendMessage(
# alt_text='Confirm alt text', template=confirm_template)
# line_bot_api.reply_message(event.reply_token, template_message)
# else:
# line_bot_api.reply_message(
# event.reply_token,
# TextSendMessage(
# text='植物が選択されていません'
# )
# )
else:
text = plant_animator.communicate(text)
if source == "text":
if plant_animator.connecting():
text = decorate_text(plant_animator.plant, text)
else:
text = [text, help_msg]
return text
# line_bot_api.reply_message(
# event.reply_token, TextSendMessage(text=event.message.text))
#--------------------------------------
# メッセージを生成するメソッドへのディスパッチャ end
#--------------------------------------
# 以下にClova用のイベントを書き込む
# 起動時の処理
@clova.handle.launch
def launch_request_handler(clova_request):
welcome_japanese = cek.Message(message="おかえりなさい!", language="ja")
response = clova.response([welcome_japanese])
return response
@clova.handle.default
def no_response(clova_request):
text = plant_animator.communicate("hogehoge")
if plant_animator.connecting():
text = "%s: よくわかんないや" % plant_animator.plant.display_name
return clova.response(text)
# Communicateの発火箇所
# debugのために、defaultにしているが本来は
# @clova.handle.intent("Communication") と書いて、Clova アプリの方でインテントを設定しておく必要がある
# ToDo: Connect処理を設定してあげないと不親切、LINE Clavaアプリで予冷応答を細かく設定(今回は時間が足りないかも)
# @clova.handle.default
# @clova.handle.intent("AskStatus")
# def communication(clova_request):
# msg = plant_animator.communicate("調子はどう?", None)
# if msg is None:
# msg = "誰ともお話ししていません"
# message_japanese = cek.Message(message=msg, language="ja")
# response = clova.response([message_japanese])
# return response
# @clova.handle.intent("AskWater")
# def ask_water(clova_request):
# msg = plant_animator.communicate("水はいる?", None)
# if msg is None:
# msg = "誰ともお話ししていません"
# message_japanese = cek.Message(message=msg, language="ja")
# response = clova.response([message_japanese])
# return response
# @clova.handle.intent("AskLuminous")
# def ask_luminous(clova_request):
# msg = plant_animator.communicate("日当たりはどう?", None)
# if msg is None:
# msg = "誰ともお話ししていません"
# message_japanese = cek.Message(message=msg, language="ja")
# response = clova.response([message_japanese])
# return response
#--------------------------
# start Clova setting
#--------------------------
def define_clova_handler(intent, text):
@clova.handle.intent(intent)
def handler(clova_request):
# バグがあるかもしれない
# textの形式次第で
print("clova intent = %s" % intent)
msg = create_reply([text], source="clova")
# msg = plant_animator.communicate(text, None)
if msg is None:
msg = "誰ともお話ししていません"
message_japanese = cek.Message(message=msg, language="ja")
response = clova.response([message_japanese])
return response
return handler
with open("data/clova_setting.json") as f:
js = json.load(f)
intent_text_dict = js["intent_text_dict"]
# Clovaに対するイベントハンドラを設定
for k ,v in intent_text_dict.items():
define_clova_handler(k, v)
#-------------------------------
# end Clova setting
#-------------------------------
import time
# should be modified when required
def update():
plant_animator.update()
def main_loop(clock_span):
while 1:
time.sleep(clock_span)
update()
if __name__ == "__main__":
arg_parser = ArgumentParser(
usage='Usage: python ' + __file__ + ' [--port <port>] [--help]'
)
arg_parser.add_argument('-p', '--port', type=int, default=8000, help='port')
arg_parser.add_argument('-d', '--debug', default=False, help='debug')
options = arg_parser.parse_args()
# create tmp dir for download content
make_static_tmp_dir()
def push_message(msg):
line_bot_api.push_message(user_id, TextSendMessage(text=msg))
plant_animator.push_message = push_message
with futures.ThreadPoolExecutor(2) as exec:
exec.submit(app.run, debug=options.debug, port=options.port)
exec.submit(main_loop, 0.9)
```
#### File: SD_1809/whisper/PseudoPlant.py
```python
from Plant import Plant
class PseudoPlant(Plant):
def __init__(self, display_name, name, speech_center):
self.display_name = display_name
self.name = name
self.__speech_center = speech_center
self.dry = True
self.dark = True
def report_wether_forecast(self, postal_code):
return self.__speech_center.report_wether_forecast(postal_code)
def needWater(self):
return self.dry
def needLuminesity(self):
return self.dark
<<<<<<< HEAD
import SpeechCenter
dis_name = "ダミー001"
name = "ダミー001"
kls = SpeechCenter.ExampleResponce
center = kls()
ex = center.examples
ex["調子はどう?"] = kls.respond_health
ex["水はいる?"] = kls.respond_water_demand
ex["日当たりはどう?"] = kls.respond_light_demand
ex["気温はどう?"] = kls.respond_temperture
plant = PseudoPlant(dis_name, name, center)
=======
>>>>>>> Clova
```
#### File: SD_1809/whisper/SpeechCenter.py
```python
import numpy as np
from WeatherForecast import WeatherForecast
from ResponseDict import Instance as R
from Plant import Plant
import random
def sample_one(*args):
return random.sample(args, 1)[0]
class SpeechCenter:
def make_response(self, plant):
raise NotImplementedError()
class ExampleResponce(SpeechCenter):
def __init__(self):
self.examples = {}
# 植物の状態に応じたテキストを生成します
# TODO: ユーザーテキストが無い時のテキスト生成
def make_response(self, plant, user_text=None):
if user_text is None:
return ""
elif user_text in self.examples:
return self.examples[user_text](plant)
else:
ret = sample_one("..?", "なに言っているの?", "よくわかんないや")
return ret
def report_weather_forecast(self, postal_code):
weather = WeatherForecast.get_weather(postal_code)
if WeatherForecast.calc_average(weather) > 0:
return "今日は天気がいいから外に出して"
else:
return "今日はあまり天気が良くないね"
def say_nice_to_meet_you(self, plant: Plant):
return "はじめまして!"
def say_hello(self, plant: Plant):
return sample_one( "なに?", "呼んだ?")
def respond_see_you(self, plant: Plant):
return sample_one( "またね", "じゃあね", "バイバイ")
@staticmethod
def make_self_introduce(plant: Plant):
return sample_one(*R.IamPlant) % plant.display_name
@staticmethod
def respond_health(plant : Plant):
response_msg = ""
plant.sense_condition()
need_water = plant.needWater()
need_light = plant.needLuminesity()
if need_water:
response_msg += "水が欲しいよ!"
if need_light:
response_msg += "光が欲しいよ"
if not need_light and not need_water:
response_msg += "元気だよ!"
if np.random.randint(0, 10) < 2:
response_msg += "\nいつもありがとう(^^)"
return response_msg
@staticmethod
def respond_water_demand(plant):
plant.sense_condition()
response_msg = ""
if plant.needWater():
response_msg += sample_one("水が欲しいよ!", "うん!", "のどが渇いたな")
else:
response_msg += sample_one("もう十分だよ", "いらないよー", "大丈夫だよ、ありがとう")
return response_msg
@staticmethod
def respond_light_demand(plant):
response_msg = ""
plant.sense_condition()
if plant.needLuminesity():
response_msg += sample_one("少し暗いかな", "明るいところに行きたいな", "光が欲しいよ")
else:
response_msg += sample_one("ちょうどいいよ!", "気持ちいいよ!", "十分だよ")
return response_msg
@staticmethod
def respond_temperture(plant):
response_msg = ""
temp = plant.getTemperture()
if temp == 0:
response_msg += "今日は寒すぎるよ"
elif temp == 1:
response_msg += "今日はきもちいいね!"
elif temp == 2:
response_msg += "今日は暑いね"
return response_msg
``` |
{
"source": "jphacks/SP_1803",
"score": 3
} |
#### File: SP_1803/AI/server.py
```python
from flask import Flask, request, make_response, jsonify
import cv2 as cv
import numpy as np
from keras.models import load_model
from keras.preprocessing import image
import os
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
app.config['MAX_CONTENT_LENGTH'] = 100 * 1024 * 1024
confThreshold = 0.5
nmsThreshold = 0.4
inpWidth = 416
inpHeight = 416
count = 0
send_data = []
categories = ["cute_m","cute_w", "disgusting_m", "disgusting_w", "good_m","good_w", "interesting_m", "interesting_w"]
# Yolo関連のモデルの読み込み
modelConfiguration = "yolov3-openimages.cfg"
modelWeights = "yolov3-openimages.weights"
net = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
def getOutputsNames(net):
layersNames = net.getLayerNames()
return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
def load_image(img_path, show=False):
img = image.load_img(img_path, target_size=(64, 64))
img_tensor = image.img_to_array(img) # (height, width, channels)
img_tensor = np.expand_dims(img_tensor, axis=0) # (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels)
img_tensor /= 255. # imshow expects values in the range [0, 1]
return img_tensor
def drawPred(conf, left, top, right, bottom, frame):
print('start clasta')
model = load_model("model.ep2999.h5")
tmp = {}
print('call clasta')
dst = frame[top:bottom, left:right]
cv.imwrite('huga' + '.jpg', dst)
new_image = load_image('huga' + '.jpg')
pred = model.predict(new_image)
for pre in pred:
y = pre.argmax()
tmp = {
'category': categories[y],
'probability': float(pre[y]),
'x1': left,
'y1': top,
'x2': right,
'y2': bottom
}
send_data.append(tmp)
print(send_data)
def postprocess(frame, outs):
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
confidences = []
boxes = []
print('call postprocess')
for out in outs:
for detection in out:
scores = detection[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confThreshold:
center_x = int(detection[0] * frameWidth)
center_y = int(detection[1] * frameHeight)
width = int(detection[2] * frameWidth)
height = int(detection[3] * frameHeight)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
indices = cv.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)
print(indices)
# Yoloで出力されるボックスの位置を出す
for i in indices:
i = i[0]
box = boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
drawPred(confidences[i], left, top, left + width, top + height, frame)
@app.route('/')# , methods=['POST'])
def hello():
# del send_data[:]
# if 'image' not in request.files:
# make_response(jsonify({'result': 'uploadFile is required.'}))
# print('ready')
# file = request.files['image']
# print('file get')
filename = 'dog.jpg'
print('name get')
# file.save(filename)
print('save done')
# Yoloを用いたネットワークの構築
im = cv.imread(filename)
print('open')
blob = cv.dnn.blobFromImage(im, 1 / 255, (inpWidth, inpHeight), [0, 0, 0], 1, crop=False)
net.setInput(blob)
print('make net')
outs = net.forward(getOutputsNames(net))
postprocess(im, outs)
print('done')
if len(send_data) == 0:
ooo = {
'category': 'none',
'probability': 0,
'x1': 0,
'y1': 0,
'x2': 0,
'y2': 0
}
send_data.append(ooo)
return jsonify({'data': send_data})
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)
``` |
{
"source": "jphacks/SP_1804",
"score": 2
} |
#### File: SP_1804/okusurikun/main.py
```python
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
)
import os
app = Flask(__name__)
#環境変数取得
YOUR_CHANNEL_ACCESS_TOKEN = os.environ["YOUR_CHANNEL_ACCESS_TOKEN"]
YOUR_CHANNEL_SECRET = os.environ["YOUR_CHANNEL_SECRET"]
line_bot_api = LineBotApi(YOUR_CHANNEL_ACCESS_TOKEN)
handler = WebhookHandler(YOUR_CHANNEL_SECRET)
kusuri_list=[]
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
if '薬登録' in event.message.text:
content = '朝'
elif 'A,B,C' in event.message.text:
content='昼'
elif 'B,C,D' in event.message.text:
content='夜'
elif 'C,D,E' in event.message.text:
content='了解'
else:
content = '死ね老害'
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=content)
)
if __name__ == "__main__":
# app.run()
port = int(os.getenv("PORT", 5000))
app.run(host="0.0.0.0", port=port)
``` |
{
"source": "jphacks/SP_1906",
"score": 2
} |
#### File: SP_1906/pika/models.py
```python
from django.db import models
# Create your models here.
class Tree(models.Model):
date = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=20)
CHOICES = (
(1, 'クール'),
(2, 'かわいい'),
(3, 'おしゃれ'),
(4, 'ばえ'),
(5, '読める!'),
(6, 'おもしろい'),
)
look = models.IntegerField(choices=CHOICES)
data = models.TextField(max_length=5000)
lighted = models.BooleanField(default=False)
def __str__(self):
return self.name + ", " +str(self.look) + ", " +str(self.lighted)
``` |
{
"source": "jphacks/TH_1604_3",
"score": 3
} |
#### File: TH_1604_3/controllers/default_controller.py
```python
import flask
import hashlib
import requests
import os
from datetime import datetime
from models.models import db
from models.models import User
from models.models import Posts
from models.models import Connection
from decorator import decorator
from sqlalchemy.sql import select
from sqlalchemy.orm import session
'''
I don't really understand how Connexion works with Flask, so I'm using their example
https://github.com/zalando/connexion/blob/master/examples/basicauth/app.py
'''
def check_auth(username: str, password: str):
select_stmt = select([User]).where(User.username == username)
q = db.session.query(User).\
select_entity_from(select_stmt)
users = q.all()
'''This function is called to check if a username /
password combination is valid.'''
return len(users)==1 and users[0].password == hashlib.sha256(password.encode()).hexdigest()
def authenticate():
'''Sends a 401 response that enables basic auth'''
resp = flask.jsonify({"code": 401, "message": "You need to be authenticated"})
resp.headers['WWW-Authenticate'] = 'Basic realm="Login Required"'
resp.status_code = 401
return resp
@decorator
def requires_auth(f: callable, *args, **kwargs):
auth = flask.request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
def name_to_id(username):
select_stmt = select([User]).where(User.username == username)
q = db.session.query(User).\
select_entity_from(select_stmt)
users = q.all()
if len(users)==1:
return users[0].id
else:
raise ValueError
def following(user_id):
q = db.session.query(Connection.target_id).filter(Connection.user_id==user_id)
return [r for r, in q.all()]
@requires_auth
def status_annotate_post(id, body) -> str:
return 'do some magic!'
@requires_auth
def status_home_tsurailine_get() -> str:
user_id = name_to_id(flask.request.authorization.username)
users = following(user_id)+[user_id]
q = db.session.query(Posts).filter(Posts.user_id.in_(users))
return flask.jsonify([{"status_id":p.id,"user_id":p.user_id, "tsurami":float(p.tsurami),"timestamp":p.timestamp} for p in q.all()])
@requires_auth
def status_update_post(file) -> str:
res = requests.post(url='https://api.projectoxford.ai/emotion/v1.0/recognize',
data=file.read(),
headers={'Content-Type': 'application/octet-stream','Ocp-Apim-Subscription-Key': os.environ['EMOTIONAPI_KEY']})
if res.status_code==200:
data = res.json()
if len(data)>0:
score = float(0)
for face in data:
candidate_score = (face['scores']['fear']+face['scores']['neutral']+face['scores']['sadness'])/float(3)
if candidate_score > score:
score = candidate_score
post = Posts(name_to_id(flask.request.authorization.username),score,datetime.utcnow())
db.session.add(post)
db.session.commit()
return flask.jsonify({"status_id":post.id})
resp = flask.jsonify({"code": 400, "message": "Bad request"})
resp.status_code = 401
return resp
@requires_auth
def status_user_user_id_get(userId) -> str:
return 'do some magic!'
``` |
{
"source": "jphacks/TK_1621",
"score": 3
} |
#### File: TK_1621/client/app.py
```python
import websocket
import thread
import time
import base64
import json
import uuid
from lib.camera import Camera
from lib.acceleration import Acceralation
from lib import jtalk
from lib import ir_sensor
def on_message(ws, message):
print message
res = json.loads(message)
text = res["text"].encode('utf-8')
# TODO 超音波センサで動くようにする
distance = ir_sensor.read_distance()
jtalk.speak("%-3.2fメートル%s" % (distance, text))
ws.close()
def on_error(ws, error):
print error
ws.close()
def on_close(ws):
print "### closed ###"
def on_open(ws):
camera = Camera()
filepath = uuid.uuid4()
filename = '%s.jpg' % filepath
camera.snapshot(filename)
file = open('images/'+filename, "rb").read()
file_data = base64.b64encode(file)
ws.send(json.dumps({'upload_file': file_data}))
time.sleep(0.2)
if __name__ == "__main__":
ws_url = "ws://jphacksserver.herokuapp.com/"
websocket.enableTrace(True)
accel = Acceralation()
while True:
if accel.permit_snapshot():
ws = websocket.WebSocketApp(ws_url,
on_message=on_message,
on_error=on_error,
on_close=on_close)
ws.on_open = on_open
ws.run_forever()
time.sleep(10.0)
```
#### File: client/lib/camera.py
```python
import cv2
class Camera:
"""WEBカメラの設定
"""
def __init__(self, use_last=True, camera_num=0,
image_width=1280, image_height=720, fps=30):
self.img_path = "./images/"
self.camera_num = camera_num
self.image_width = image_width
self.image_height = image_height
self.fps = fps
self.capture = cv2.VideoCapture(self.camera_num)
self.set_capture()
# ================================
# キャプチャーの用意
# ================================
def set_capture(self):
self.capture.set(3, self.image_width)
self.capture.set(4, self.image_height)
self.capture.set(5, self.fps)
if self.capture.isOpened() is False:
raise IOError('Camera cannot open.')
print 'finish setting camera'
# ================================
# スナップショットを撮影
# ================================
def snapshot(self, name):
ret, image = self.capture.read()
if not ret:
raise IOError("Cannnot shot")
cv2.imwrite(self.img_path+name, image)
```
#### File: TK_1621/client/mqtt_client.py
```python
import thread
import time
import base64
import json
import uuid
from lib.camera import Camera
from lib.acceleration import Acceralation
from lib import jtalk
from lib import ir_sensor
import paho.mqtt.client as mqtt
host = 'beam.soracom.io'
port = 1883
topic = "sh8@github/jphacks"
sub_topic = topic + '/result'
pub_topic = topic + '/image'
def on_connect(client, userdata, flags, respons_code):
client.subscribe(sub_topic)
def on_message(client, userdata, msg):
print msg
res = json.loads(msg)
text = res["text"].encode('utf-8')
try:
distance = ir_sensor.read_distance()
if distance == "close":
jtalk.speak("近くの%s" % text)
elif distance == "far":
jtalk.speak("遠くの%s" % text)
else:
jtalk.speak("%-3.2fメートル前方に%s" % (distance, text))
except UnboundLocalError:
print 'エラーが発生しました'
def on_publish(client, userdata, mid):
print("publish: {0}".format(mid))
if __name__ == "__main__":
client = mqtt.Client(protocol=mqtt.MQTTv311)
client.on_connect = on_connect
client.on_message = on_message
client.on_publish = on_publish
client.connect(host, port=port, keepalive=60)
client.loop_start()
accel = Acceralation()
camera = Camera()
while True:
if accel.permit_snapshot():
filepath = uuid.uuid4()
filename = '%s.jpg' % filepath
camera.snapshot(filename)
file = open('images/'+filename, "rb").read()
file_data = base64.b64encode(file)
client.publish(pub_topic, file_data, 0)
time.sleep(10.0)
``` |
{
"source": "jphacks/TK_1712",
"score": 3
} |
#### File: TK_1712/Photo2Keywords/merged_photo2keyword.py
```python
from base64 import b64encode
from os import makedirs
from os.path import join, basename
from sys import argv
import json
import requests
from goolabs import GoolabsAPI
import pprint
ENDPOINT_URL = 'https://vision.googleapis.com/v1/images:annotate'
RESULTS_DIR = 'jsons'
makedirs(RESULTS_DIR, exist_ok=True)
# gooAPIが発行してくれたAPI ID
with open('apikey.json', 'r') as f:
api_data = json.load(f)
app_id = api_data['keyword_api_key']
api = GoolabsAPI(app_id)
def make_image_data_list(image_filenames):
"""
image_filenames is a list of filename strings
Returns a list of dicts formatted as the Vision API
needs them to be
"""
img_requests = []
for imgname in image_filenames:
with open(imgname, 'rb') as f:
ctxt = b64encode(f.read()).decode()
img_requests.append({
'image': {'content': ctxt},
'features': [{
'type': 'TEXT_DETECTION',
'maxResults': 1
}]
})
return img_requests
def make_image_data(image_filenames):
"""Returns the image data lists as bytes"""
imgdict = make_image_data_list(image_filenames)
return json.dumps({"requests": imgdict}).encode()
def request_ocr(api_key, image_filenames):
response = requests.post(ENDPOINT_URL,
data=make_image_data(image_filenames),
params={'key': api_key},
headers={'Content-Type': 'application/json'})
return response
def target_sentence_to_keywords_list(target_sentence):
target_sentence = target_sentence.replace('\n', '')
# print("target_sentence = ", end="")
# print(target_sentence)
# key:キーワード、value:関連度合い の辞書を作る
sample_response = api.keyword(
title="photo01", body=target_sentence, max_num=5)
# pprintで、整形された状態でprintできる(sample_responseは辞書型のデータ)
# print("sample_response = ", end="")
# pprint.pprint(sample_response)
# max_num個のキーワードをリスト型にして出力。
keywords_dic_list = sample_response["keywords"]
print("keywords_dic_list = ", end="")
print(keywords_dic_list)
if __name__ == '__main__':
# google cloud vision のAPIキー
with open('apikey.json', 'r') as f:
api_data = json.load(f)
app_id = api_data['google_cloud_vision_api_key']
# 画像ファイル読み込み
image_filenames = argv[1:]
if not api_key or not image_filenames:
print("""
Please supply an api key, then one or more image filenames
$ python cloudvisreq.py api_key image1.jpg image2.png""")
else:
response = request_ocr(api_key, image_filenames)
if response.status_code != 200 or response.json().get('error'):
print(response.text)
else:
for idx, resp in enumerate(response.json()['responses']):
# save to JSON file
imgname = image_filenames[idx]
jpath = join(RESULTS_DIR, basename(imgname) + '.json')
with open(jpath, 'w') as f:
datatxt = json.dumps(resp, indent=2)
print("Wrote", len(datatxt), "bytes to", jpath)
f.write(datatxt)
# print the plaintext to screen for convenience
print("---------------------------------------------")
t = resp['textAnnotations'][0]
print(" Text:")
print(t['description'])
target_sentence_to_keywords_list(
'Python is a very useful language.') # ここの引数に本当は t['description'] を代入したい
``` |
{
"source": "jphacks/TK_1713",
"score": 3
} |
#### File: src/movie_processing/devide.py
```python
import cv2
from PIL import Image
import numpy as np
import os
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
import mojimoji
import extract_text
import google_ocr
def whitehist(rects, mask, n):
text_area = extract_text.cut_out_text(rects, mask, 100000)
text_area = cv2.resize(text_area, (150, 150))
m_sum = np.sum(text_area/255, axis=0)
if max(m_sum) != 0:
m_sum /= max(m_sum)
return m_sum
def isChange(x, y):
if np.corrcoef(x, y)[0, 1] < 0.6 or np.sum(y) == 0:
return True
return False
def find_devide_point(dirId, n):
dirpath = "images{0:02d}".format(dirId)
df = pd.DataFrame(index=[], columns=['id', 'time', 'text', 'state'])
imId = 1
state = 0 # text: exist = 1, none = 0
y = np.zeros(150)
pbar = tqdm(total=120)
cnt = 0
hists = np.array([])
before_text = ""
while(os.path.isfile(dirpath+"/image{}.jpg".format(imId))):
pbar.update(1)
path = dirpath+"/image{}.jpg".format(imId)
img = cv2.imread(path)
mask = extract_text.extract_white(img)
rects = extract_text.get_rects(mask)
height, width = img.shape[:2]
rects = [rect for rect in rects
if rect[2] * rect[3] > height * width / n]
# textが存在しない場合
if not rects:
if state:
state = 0
y = np.zeros(150)
series = pd.Series([imId-1, (imId-1)*0.5, before_text, -1], index=df.columns)
df = df.append(series, ignore_index=True)
imId += 1
continue
x = whitehist(rects, mask, n)
min_x = min(rects, key=(lambda x: x[0]))
min_y = min(rects, key=(lambda x: x[1]))
max_w = max(rects, key=(lambda x: x[0] + x[2]))
max_h = max(rects, key=(lambda x: x[1] + x[3]))
max_rect = np.array([min_x[0], min_y[1], max_w[0] - min_x[0] + max_w[2],
max_h[1] - min_y[1] + max_h[3]])
# 画面がホワイトアウトした場合
if max_rect[2] * max_rect[3] >= height * width:
if state:
state = 0
y = x
series = pd.Series([imId-1, (imId-1)*0.5, before_text, -1], index=df.columns)
df = df.append(series, ignore_index=True)
imId += 1
continue
if isChange(x, y):
cnt += 1
text = google_ocr.detect_text(dirId, imId)
text = text.replace(" ", "").replace("\n", "").replace(u' ', "").replace("\t", "")
if mojimoji.zen_to_han(text) == mojimoji.zen_to_han(before_text):
imId += 1
y = x
continue
if state == 0:
if text == "":
imId += 1
y = x
before_text = text
continue
state = 1
y = x
series = pd.Series([imId, imId*0.5, text, 1],
index=df.columns)
df = df.append(series, ignore_index=True)
before_text = text
else:
state = 1
series = pd.Series([imId-1, (imId-1)*0.5, before_text, -1],
index=df.columns)
df = df.append(series, ignore_index=True)
y = x
before_text = text
if text:
series = pd.Series([imId, imId*0.5, text, 1],
index=df.columns)
df = df.append(series, ignore_index=True)
y = x
imId += 1
datadir = "data"
if not os.path.isdir(datadir):
os.makedirs(datadir)
df.to_csv(datadir+"/"+dirpath+".csv")
pbar.close()
print(cnt)
if __name__ == "__main__":
find_devide_point(0, 10000)
```
#### File: src/movie_processing/extract_text.py
```python
import cv2
import numpy as np
def extract_white(image):
lower_white = np.array([240, 240, 240])
upper_white = np.array([255, 255, 255])
img_mask = cv2.inRange(image, lower_white, upper_white)
_, img_mask = cv2.threshold(img_mask, 130, 255, cv2.THRESH_BINARY)
return img_mask
def get_rects(mask):
contours, _ = cv2.findContours(mask, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)[-2:]
rects = []
for contour in contours:
approx = cv2.convexHull(contour)
rect = cv2.boundingRect(approx)
rects.append(np.array(rect))
return rects
def draw_rects(rects, img, n):
height, width = img.shape[:2]
for rect in rects:
if rect[2]*rect[3]>height*width/n:
cv2.rectangle(img, tuple(rect[0:2]), tuple(rect[0:2]+rect[2:4]),
(0,0,255), thickness=2)
return img
def cut_out_text(rects, img, n):
height, width = img.shape[:2]
new_rects = [rect for rect in rects
if rect[2] * rect[3] > height * width / n]
if new_rects:
min_x = min(new_rects, key=(lambda x: x[0]))
min_y = min(new_rects, key=(lambda x: x[1]))
max_w = max(new_rects, key=(lambda x: x[0] + x[2]))
max_h = max(new_rects, key=(lambda x: x[1] + x[3]))
max_rect = np.array([min_x[0], min_y[1], max_w[0] - min_x[0] + max_w[2],
max_h[1] - min_y[1] + max_h[3]])
text_img = img[max_rect[1]:max_rect[1]+max_rect[3],
max_rect[0]:max_rect[0]+max_rect[2]]
else:
text_img = img
return text_img
def draw_max_rect(rects, img, n):
height, width = img.shape[:2]
new_rects = [rect for rect in rects
if rect[2] * rect[3] > height * width / n]
if new_rects:
min_x = min(new_rects, key=(lambda x: x[0]))
min_y = min(new_rects, key=(lambda x: x[1]))
max_w = max(new_rects, key=(lambda x: x[0] + x[2]))
max_h = max(new_rects, key=(lambda x: x[1] + x[3]))
max_rect = np.array([min_x[0], min_y[1], max_w[0] - min_x[0] + max_w[2],
max_h[1] - min_y[1] + max_h[3]])
cv2.rectangle(img, tuple(max_rect[0:2]), tuple(max_rect[0:2]+max_rect[2:4]),
(0,0,255), thickness=2)
return img
def run(img):
paper_mask = extract_white(img)
rects = get_rects(paper_mask)
fin_img = draw_max_rect(rects, img, 10000)
return fin_img
if __name__ == "__main__":
image = cv2.imread('images00/image65.jpg')
fin_img = run(image)
cv2.imwrite('text00/image65.jpg', fin_img)
cv2.imshow("TEXT", fin_img)
while(1):
if cv2.waitKey(10) > 0:
break
```
#### File: src/movie_processing/google_ocr.py
```python
import base64
import json
import os
import requests
def detect_text(dirId, imId):
jsondir = "json{0:02d}".format(dirId)
if not os.path.isdir(jsondir):
os.makedirs(jsondir)
cache = jsondir + "/image{}.json".format(imId)
if os.path.isfile(cache):
with open(cache, 'r') as f:
result = json.load(f)
if 'textAnnotations' in result['responses'][0]:
return (result['responses'][0]['textAnnotations'][0]['description'])
else:
return ""
path = "images{0:02d}".format(dirId) + "/image{}.jpg".format(imId)
with open(path, 'rb') as image_file:
content = base64.b64encode(image_file.read())
content = content.decode('utf-8')
api_key = "hogehoge"
url = "https://vision.googleapis.com/v1/images:annotate?key=" + api_key
headers = { 'Content-Type': 'application/json' }
request_body = {
'requests': [
{
'image': {
'content': content
},
'features': [
{
'type': "TEXT_DETECTION",
'maxResults': 10
}
]
}
]
}
response = requests.post(
url,
json.dumps(request_body),
headers
)
result = response.json()
with open(cache, 'w') as f:
json.dump(result, f)
if 'textAnnotations' in result['responses'][0]:
return (result['responses'][0]['textAnnotations'][0]['description'])
else:
return ""
if __name__ == '__main__':
detect_text(0, 1461)
``` |
{
"source": "jphacks/TK_1804",
"score": 3
} |
#### File: TK_1804/src/gui.py
```python
from tkinter import *
from tkinter import ttk
from tkinter import messagebox as tkMessageBox
from tkinter import filedialog as tkFileDialog
import os
import subprocess
def play():
args = ["python", "./src/process.py", filename.get()]
subprocess.call(args)
print('Play %s' % filename.get())
def pick():
print('pick a audio file')
fTyp = [('wavファイル', '*.wav'), ('mp3ファイル', '*.mp3')]
iDir = '/home/Documents'
filename.set(tkFileDialog.askopenfilename(filetypes = fTyp, initialdir = iDir))
print(filename.get())
root = Tk()
root.title('surroundify')
frame1 = ttk.Frame(root)
frame2 = ttk.Frame(frame1)
filename = StringVar()
logo = PhotoImage(file = './src/assets/logo.gif')
canvas1 = Canvas(frame1, width=500, height=500, bg='#15738c')
canvas1.create_image(250, 250, image=logo)
entry1 = ttk.Entry(frame2, textvariable=filename)
button1 = ttk.Button(frame2, text='pick a audio file', command=pick)
button2 = ttk.Button(frame2, text='play', command=play)
frame1.grid(row=0, column=0, sticky=(N,E,S,W))
canvas1.grid(row=1, column=1, sticky=E)
frame2.grid(row=1, column=2, sticky=W)
entry1.grid(row=1, column=1, sticky=E)
button1.grid(row=1, column=2, sticky=W)
button2.grid(row=2, column=1, sticky=S)
for child in frame1.winfo_children():
child.grid_configure(padx=5, pady=5)
root.mainloop()
```
#### File: TK_1804/src/process.py
```python
import copy
from multiprocessing import Process, Array, Value
import ctypes
import sys
import numpy as np
import audioop
import cv2
import readchar
import dlib
from camera.head_degree import HeadDegree
from audio.music import Music
from camera.head_vector import HeadVector
from camera.select_speakers import SelectSpeakers
CHUNK_SIZE = 1024
def init_select_speaker():
face_landmark_path = './src/camera/shape_predictor_68_face_landmarks.dat'
K = [3.805259604807149003e+02,0.0,3.067605479328022398e+02,
0.0,3.692700763302592577e+02,2.792470548132930048e+02,
0.0, 0.0, 1.0]
D = [-6.480026610793842012e-01,4.518886105418712940e-01,2.383686615865462672e-03,5.527650471881409219e-03,-1.457046727587593127e-01]
object_pts = np.float32([[6.825897, 6.760612, 4.402142],
[1.330353, 7.122144, 6.903745],
[-1.330353, 7.122144, 6.903745],
[-6.825897, 6.760612, 4.402142],
[5.311432, 5.485328, 3.987654],
[1.789930, 5.393625, 4.413414],
[-1.789930, 5.393625, 4.413414],
[-5.311432, 5.485328, 3.987654],
[2.005628, 1.409845, 6.165652],
[-2.005628, 1.409845, 6.165652],
[2.774015, -2.080775, 5.048531],
[-2.774015, -2.080775, 5.048531],
[0.000000, -3.116408, 6.097667],
[0.000000, -7.415691, 4.070434]])
reprojectsrc = np.float32([[10.0, 10.0, 10.0],
[10.0, 10.0, -10.0],
[10.0, -10.0, -10.0],
[10.0, -10.0, 10.0],
[-10.0, 10.0, 10.0],
[-10.0, 10.0, -10.0],
[-10.0, -10.0, -10.0],
[-10.0, -10.0, 10.0]])
line_pairs = [[0, 1], [1, 2], [2, 3], [3, 0],
[4, 5], [5, 6], [6, 7], [7, 4],
[0, 4], [1, 5], [2, 6], [3, 7]]
select_speaker = SelectSpeakers(K, D, object_pts, reprojectsrc, line_pairs, face_landmark_path)
return select_speaker
def play_music(shared_music_l_volumes, shared_music_r_volumes):
print("Run play_music")
music = Music()
print("END play_music")
src_frames = music.stream_in.read(CHUNK_SIZE, exception_on_overflow=False)
while src_frames != '':
# バイト列を取得
# [L0, R0, L1, R1, L2, R2, ...]
src_frames = music.stream_in.read(CHUNK_SIZE, exception_on_overflow=False)
# L, Rに分割
l_frames = audioop.tomono(src_frames, music.width, 1, 0)
r_frames = audioop.tomono(src_frames, music.width, 0, 1)
music.volumes = [shared_music_l_volumes, shared_music_r_volumes]
# 顔認識側から受け取る値
six_ch_frames = music.set_6ch_audio(l_frames, r_frames, music.volumes)
# 6chオーディオをstream_outに渡す
# [FL0, FR0, CT0, BA0, RL0, RR0, ...]
music.stream_out.write(six_ch_frames)
music.stop()
def assign_speaker(shared_music_l_volumes, shared_music_r_volumes, direction):
print("Run assign_speaker")
select_speaker = init_select_speaker()
# 顔認識
head = HeadVector()
head_degree = HeadDegree()
while(True):
# デバックモード
if direction.value == 0:
all_flames = select_speaker.estimate_head_orientation(head, head_degree)
if all_flames is not None:
l_volumes, r_volumes = all_flames[0], all_flames[1]
elif direction.value == -1:
l_volumes, r_volumes = np.array([0, 0, 0, 0, 0]), np.array([0, 0, 0, 0, 0])
elif direction.value == 9:
l_volumes, r_volumes = np.array([1, 1, 0, 0, 0.5]), np.array([0, 0, 1, 1, 0.5])
elif direction.value == 1:
l_volumes, r_volumes = np.array([0, 0, 0, 0, 1]), np.array([0, 0.5, 0.5, 0, 0])
elif direction.value == 2:
l_volumes, r_volumes = np.array([0.5, 0, 0, 0, 0.5]), np.array([0, 0, 0.75, 0.25, 0])
elif direction.value == 3:
l_volumes, r_volumes = np.array([1, 0, 0, 0, 0]), np.array([0, 0, 0, 1, 0])
elif direction.value == 4:
l_volumes, r_volumes = np.array([0.25, 0.75, 0, 0, 0]), np.array([0, 0, 0, 0.5, 0.5])
elif direction.value == 5:
l_volumes, r_volumes = np.array([0, 0.5, 0.5, 0, 0]), np.array([0, 0, 0, 0, 1])
elif direction.value == 6:
l_volumes, r_volumes = np.array([1, 0, 0, 0, 0]), np.array([0, 0, 0, 0, 0])
elif direction.value == 7:
l_volumes, r_volumes = np.array([0, 1, 0, 0, 0]), np.array([0, 0, 0, 0, 0])
elif direction.value == 8:
l_volumes, r_volumes = np.array([0, 0, 0, 0, 0]), np.array([0, 0, 1, 0, 0])
elif direction.value == -2:
l_volumes, r_volumes = np.array([0, 0, 0, 0, 0]), np.array([0, 0, 0, 1, 0])
elif direction.value == -3:
l_volumes, r_volumes = np.array([0, 0, 0, 0, 0]), np.array([0, 0, 0, 0, 1])
for i in range(5):
shared_music_l_volumes[i], shared_music_r_volumes[i] = l_volumes[i], r_volumes[i]
def start():
l_volumes, r_volumes = np.array([1, 0, 0, 0, 0]), np.array([0, 0, 0, 1, 0])
shared_music_l_volumes, shared_music_r_volumes = Array("f", l_volumes), Array("f", r_volumes)
# デバックモード
direction = Value('i', 0)
music_process = Process(target=play_music, args=[shared_music_l_volumes, shared_music_r_volumes])
speaker_process = Process(target=assign_speaker, args=[shared_music_l_volumes, shared_music_r_volumes, direction])
music_process.start()
speaker_process.start()
while(True):
kb = readchar.readchar()
if kb == 'q':
direction.value = -1
elif kb == 's':
direction.value = 0
elif kb == 'a':
direction.value = 9
elif kb == '1':
direction.value = 1
elif kb == '2':
direction.value = 2
elif kb == '3':
direction.value = 3
elif kb == '4':
direction.value = 4
elif kb == '5':
direction.value = 5
elif kb == 'z':
direction.value = 6
elif kb == 'x':
direction.value = 7
elif kb == 'c':
direction.value = 8
elif kb == 'v':
direction.value = -2
elif kb == 'b':
direction.value = -3
if __name__ == '__main__':
start()
``` |
{
"source": "jphacks/TK_1810",
"score": 2
} |
#### File: ml/server/estimator.py
```python
import sys, os
from pathlib import Path
import cv2
import numpy as np
import torch
from torchvision import datasets
from torch.autograd import Variable
from keras.applications.inception_resnet_v2 import preprocess_input
from keras.preprocessing.image import load_img, img_to_array
import tensorflow as tf
# detector
sys.path.append(os.environ['DETECTOR_PATH'])
from models import *
from utils.utils import *
from utils.datasets import *
# nima
sys.path.append(os.environ['REGRESSOR_PATH'])
from nima_models import NimaModel
class InstaScoreEstimator:
# food detector
detector_path = Path(os.environ['DETECTOR_PATH'])
config_path = detector_path / "config/mymodel.cfg"
weights_path = detector_path / "result/normal_finetuning_aug_full_strong/35.pkl"
img_size = 416
img_shape= (img_size, img_size)
class_path = detector_path / "data/coco.names"
conf_thresh = 0.7
nms_thres = 0.4
classes = load_classes(class_path)
Tensor = torch.FloatTensor
# nima
regressor_path = Path(os.environ['REGRESSOR_PATH'])
nima_weight_path = regressor_path / 'weights/inception_weights.h5'
nima_img_size = 224
def __init__(self):
# food detector
self.detector = Darknet(self.config_path, img_size=self.img_size)
model_wts = torch.load(self.weights_path)
self.detector.load_state_dict(model_wts)
self.detector.eval()
if torch.cuda.is_available():
self.detector = self.detector.cuda()
# nima
self.regressor = NimaModel(img_size=self.nima_img_size)
self.regressor.load_weights(self.nima_weight_path)
def predict(self, img_path):
img = cv2.imread(img_path)
img = img[:, :, ::-1]
h, w, c = img.shape
img_area = h*w
# run dish detector
bbox, bbox_area = self.detector.predict(img, self.conf_thresh, self.nms_thres)
# run nima
img = load_img(img_path, target_size=(224, 224))
img_arr = img_to_array(img)
img_arr = np.expand_dims(img_arr, axis=0)
img_arr = preprocess_input(img_arr)
instagenic_scores = self.regressor.predict(img_arr)
# calculate instagrammable score
score = np.argmax(instagenic_scores) + 1.
score /= 5.
return bbox, bbox_area, img_area, score
``` |
{
"source": "jphacks/TK_1811",
"score": 2
} |
#### File: src/picturesender/picture_sender.py
```python
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError,LineBotApiError,
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage, Error, ImageSendMessage
)
from linebot.__about__ import __version__
from linebot.http_client import HttpClient, RequestsHttpClient
import os,sys
import json
import requests
AWS_S3_BUCKET_NAME = 'pictures'
app = Flask(__name__)
line_bot_api = LineBotApi(os.environ["CHANNEL_ACCESS_TOKEN"])
handler = WebhookHandler(os.environ["CHANNEL_SECRET"])
EC2_ADDR = 'tawashi.biz:5050'
@app.route('/')
def connect_test():
return "access success!"
@app.route("/callback",methods=['POST'])
def callback():
signature = request.headers['X-Line-Signature']
body = request.get_data(as_text=True)
app.logger.info("Request body: "+ body)
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
# messageが送られてきたら...
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
'''
self._post('/v2/bot/message/reply', data=json.dumps(data), timeout=timeout)
'''
if(event.message.text == "写真ちょうだい"):
send_images(event)
elif(event.message.text == "写真リセットして"):
reset_images(event)
else:
echo(event)
def echo(event):
reply_token = event.reply_token
messages = TextSendMessage(text=event.message.text)
app.logger.info("DEBUG:" + event.message.text)
line_bot_api.reply_message(reply_token, messages)
def send_images(event):
reply_token = event.reply_token
#filename_l = get_images_from_server()
messages = [ImageSendMessage(
original_content_url="https://tawashi.biz/images/1.jpg", #JPEG 最大画像サイズ:240×240 最大ファイルサイズ:1MB(注意:仕様が変わっていた)
preview_image_url="https://tawashi.biz/images/1.jpg" #JPEG 最大画像サイズ:1024×1024 最大ファイルサイズ:1MB(注意:仕様が変わっていた)
),
ImageSendMessage(
original_content_url="https://tawashi.biz/images/2.jpg", #JPEG 最大画像サイズ:240×240 最大ファイルサイズ:1MB(注意:仕様が変わっていた)
preview_image_url="https://tawashi.biz/images/2.jpg" #JPEG 最大画像サイズ:1024×1024 最大ファイルサイズ:1MB(注意:仕様が変わっていた)
),
ImageSendMessage(
original_content_url="https://tawashi.biz/images/3.jpg", #JPEG 最大画像サイズ:240×240 最大ファイルサイズ:1MB(注意:仕様が変わっていた)
preview_image_url="https://tawashi.biz/images/3.jpg" #JPEG 最大画像サイズ:1024×1024 最大ファイルサイズ:1MB(注意:仕様が変わっていた)
)]
#messages = TextSendMessage(text=ret_str)
line_bot_api.reply_message(reply_token, messages)
def reset_images(event):
delete_images_on_server()
reply_token = event.reply_token
messages = TextSendMessage(text='写真を全て削除しました')
app.logger.info("DEBUG:" + event.message.text)
line_bot_api.reply_message(reply_token, messages)
def get_images_from_server():
url_items = 'http://' + EC2_ADDR +'/get_list'
r_json = requests.get(url_items).json()
ls = r_json["data"]
return ls
def delete_images_on_server():
url_items = 'https://' + EC2_ADDR +'/delete'
requests.post(url_items)
if __name__ == "__main__":
port = int(os.getenv("PORT",5000))
app.run(debug=False,host="0.0.0.0",port=port)
```
#### File: src/rpi_side/get_audio.py
```python
import pyaudio
import threading
import numpy as np
from pathlib import Path
from take_picture import AutoTakePictures
#from matplotlib import pyplot as plt
chunk = 1024*2
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
RECORD_SECONDS = 1
STOCK_FILE_NUM = 20
p = pyaudio.PyAudio()
picture = AutoTakePictures()
stream = p.open(
format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
output = True,
frames_per_buffer = chunk,
)
max_data = []
def audio_trans(input):
frames = (np.frombuffer(input,dtype="int16"))
max_data.append(max(frames))
return max_data
# callback per term
def sendlog():
global max_data
if len(max_data) != 0:
#print(max_data)
mic_mean = int(sum(max_data)/len(max_data))
max_data = []
print(mic_mean)
# over level 15000
if mic_mean > 15000:
picture.capture()
file_numbers = list(Path(picture.save_dir).glob("*.jpg")).__len__()
if file_numbers >= STOCK_FILE_NUM:
picture.sendServer(server="172.16.58.3",user="ec2-user",key="jphack2018-01.pem")
picture.delete()
# thread per 1
t = threading.Timer(1,sendlog)
t.start()
# thread
t = threading.Thread(target = sendlog)
t.start()
while stream.is_active():
input = stream.read(chunk)
input = audio_trans(input)
stream.stop_stream()
stream.close()
p.terminate()
``` |
{
"source": "jphacks/TK_1814",
"score": 2
} |
#### File: TK_1814/server/admin.py
```python
from flask import Response, redirect
from flask_admin.contrib import sqla
from werkzeug.exceptions import HTTPException
from flask_admin import Admin
from model import db, User, Promise, Motion
class AuthException(HTTPException):
def __init__(self, message):
super().__init__(message, Response(
message, 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'}
))
class ModelView(sqla.ModelView):
column_display_pk = True
def is_accessible(self):
from app import admin_basic_auth
if not admin_basic_auth.authenticate():
raise AuthException('Not authenticated. Refresh the page.')
else:
return True
def inaccessible_callback(self, name, **kwargs):
from app import admin_basic_auth
return redirect(admin_basic_auth.challenge())
def init_admin(app_obj):
admin = Admin(app_obj, name='Pinky', template_mode='bootstrap3')
admin.add_view(ModelView(User, db.session))
admin.add_view(ModelView(Promise, db.session))
admin.add_view(ModelView(Motion, db.session))
```
#### File: server/views/archive.py
```python
from flask import Blueprint, jsonify
from database import session
from model import User, Promise
from sqlalchemy import or_
from itertools import groupby
from config import HOST_TOP
app = Blueprint('archive_bp', __name__)
@app.route('/archive/<user_id>', methods=['GET'])
def get_(user_id):
results = []
promises = session.query(Promise, User).filter(
or_(Promise.master_user_id == user_id, Promise.slave_user_id == user_id),
or_(User.id == Promise.master_user_id, User.id == Promise.slave_user_id),
Promise.is_done == True,
User.id != user_id
).all()
promises.sort(key=lambda tmp_promise: tmp_promise[1].id)
for user_id, promise_list in groupby(promises, key=lambda tmp_promise: tmp_promise[1].id):
user = [tmp_promise_user[1] for tmp_promise_user in promises if tmp_promise_user[1].id == user_id][0]
results.append({
'count': len(list(promise_list)),
'img': '{}/{}'.format(HOST_TOP, user.profile),
'name': user.name,
'user_id': user.id
})
session.close()
return jsonify({'results': results}), 200
``` |
{
"source": "jphacks/TK_1819",
"score": 3
} |
#### File: TK_1819/raspberrypi/get.py
```python
import RPi.GPIO as GPIO
import urllib.request
import time
import pygame
url = 'https://hack-api.herokuapp.com/trashkan/1/status'
req = urllib.request.Request(url)
led = [22, 10, 9, 11, 5, 6, 13, 2, 3, 4, 17, 27]
pygame.init()
GPIO.setmode(GPIO.BCM)
for i in led:
GPIO.setup(i, GPIO.OUT)
GPIO.output(i, GPIO.LOW)
def blink(soundnum):
print("blink start")
if soundnum == 2:
sound = pygame.mixer.Sound("wav/piano1.wav")
else:
sound = pygame.mixer.Sound("wav/decision5.wav")
sound.play()
for i in range(10):
for j in led:
GPIO.output(j, GPIO.HIGH)
time.sleep(0.1)
GPIO.output(j, GPIO.LOW)
print("blink end")
try:
while True:
with urllib.request.urlopen(req)as res:
body = int(res.read())
print(body)
if body != 0:
blink(body)
time.sleep(1)
except urllib.error.URLError as err:
print(err.reason)
GPIO.cleanup()
except urllib.error.HTTPError as err:
print(err.code)
GPIO.cleanup()
except:
GPIO.cleanup()
``` |
{
"source": "jphacks/TK_1905",
"score": 2
} |
#### File: main/forms/signin.py
```python
from django.forms import Form
from django.forms.fields import EmailField, CharField
from django.forms.widgets import TextInput, PasswordInput
from django.contrib.auth import authenticate
from main.models import User
from .base import BootstrapFormMixin
class SigninForm(BootstrapFormMixin, Form):
email = EmailField(
max_length=256,
required=True,
widget=TextInput(attrs={'placeholder': 'メールアドレス'}))
password = CharField(
max_length=256,
required=True,
widget=PasswordInput(attrs={'placeholder': 'パスワード'}))
def clean_email(self):
email = self.cleaned_data['email']
if User.objects.filter(email=email).count() == 0:
self.add_error('password', 'メールアドレスかパスワードが正しくありません。')
return email
def get_authenticated_user(self):
user = authenticate(
username=self.cleaned_data['email'],
password=self.cleaned_data['password'])
if user is None:
self.add_error('password', 'メールアドレスかパスワードが正しくありません。')
return user
```
#### File: main/forms/signup.py
```python
from django.forms import Form
from django.forms.fields import EmailField, CharField
from django.forms.widgets import TextInput, PasswordInput
from main.models import User
from .base import BootstrapFormMixin
class SignupForm(BootstrapFormMixin, Form):
name = CharField(
max_length=256,
required=True,
widget=TextInput(attrs={'placeholder': 'ユーザーネーム'}))
email = EmailField(
max_length=256,
required=True,
widget=TextInput(attrs={'placeholder': 'メールアドレス'}))
password = CharField(
min_length=8,
max_length=256,
required=True,
widget=PasswordInput(attrs={'placeholder': 'パスワード(8文字以上)'}))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def create_user(self):
if 'name' in self.cleaned_data:
name = self.cleaned_data['name']
else:
name = None
email = self.cleaned_data['email']
password = self.cleaned_data['password']
user = None
try:
if name:
user = User.objects.create_user(email, password, name=name)
else:
user = User.objects.create_user(email, password)
except:
self.add_error('password', 'ユーザーの作成に失敗しました.')
return user
def clean_email(self):
email = self.cleaned_data['email']
if User.objects.filter(email=email).count() > 0:
self.add_error('email', 'そのメールアドレスは既に使われています.')
return email
```
#### File: management/commands/image2base64.py
```python
import base64
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'image2base64'
def add_arguments(self, parser):
parser.add_argument(
dest='img_path',
help='img_path'
)
def handle(self, *args, **options):
print('image2base64')
img_path = options['img_path']
print(img_path)
with open(img_path, 'rb') as f:
img_b64 = base64.encodestring(f.read())
img_b64_str = img_b64.decode('utf8')
print(repr(img_b64_str))
base64.b64decode(img_b64_str.encode())
```
#### File: main/serializers/sentence.py
```python
from rest_framework import serializers
from main.models import Sentence
class SentenceSerializer(serializers.ModelSerializer):
spoken_count = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Sentence
fields = ('content_jp', 'content_en', 'score', 'spoken_count', )
def __init__(self, *args, **kwargs):
user = kwargs.pop("user", None)
super().__init__(*args, **kwargs)
self.user = user
self.fields['score'].read_only = True
def get_spoken_count(self, obj):
# return obj.usersentence_set.all().count()
return obj.usersentence_set.filter(text__user=self.user).count()
```
#### File: main/serializers/user_text.py
```python
from rest_framework import serializers
from main.models import UserText, Text
class UserTextSerializer(serializers.ModelSerializer):
content = serializers.CharField(write_only=True)
class Meta:
model = UserText
fields = ('id', 'text', 'content', )
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['id'].read_only = True
self.fields['text'].read_only = True
def create(self, validated_data):
content = validated_data.pop("content")
text, _ = Text.objects.get_or_create(content=content)
validated_data["text"] = text
return super().create(validated_data)
def update(self, instance, validated_data):
return super().update(instance, validated_data)
```
#### File: server/main/tests.py
```python
from django.test import TestCase
from main.utils import split_text
class GoogleUtilsTests(TestCase):
def test_google_split_text(self):
test_cases = [
("チームVolareですね僕達は出かけるeducationでテック学習ですねすごいものは英語の学習アプリを消すねただの英語の学習アプリじゃなくて自分の普段の会話から勝手にできる英語学習アプリですで駄目になったら薬をやっていざ日常と喋れないことがあると思うんですけどなんで喋れないかって言うとあの広大な辞書に載ってる家で自分が普段使ってる言葉とちょっと思うんですよねってことはあのアプリを作って自分が普段こうやってベラベラ喋ってるなよ全部取っといてそこから英語の住所とかを生成することで自分が普段喋ってる言葉を最低限それをそれさえ子にできれば普段でも喋れるわけですからね何もわからないとかちょっとできるとかそういうことを一緒になってないような子でもできるそういうことかが進化した今のテクノロジーを使えばできるというそういうアプリ",
[
'チームVolareですね', '僕達は出かけるeducationでテック学習ですね',
'すごいものは英語の学習アプリを消すね', 'ただの英語の学習アプリじゃなくて',
'自分の普段の会話から勝手にできる英語学習アプリですで', '駄目になったら', '薬をやって',
'いざ日常と喋れないことがあると思うんですけど', 'なんで喋れないかって言うと',
'あの広大な辞書に載ってる家で自分が普段使ってる言葉とちょっと思うんですよねって', 'ことはあのアプリを作って',
'自分が普段こうやって', 'ベラベラ喋ってるなよ全部取っといて',
'そこから英語の住所とかを生成することで自分が普段喋ってる言葉を最低限それをそれさえ子にできれば普段でも喋れるわけですからね',
'何もわからないとかちょっとできるとかそういうことを一緒になってない', 'ような子でもできるそういうことかが',
'進化した今のテクノロジーを使えばできるというそういうアプリ'
]),
("スペースがあるじゃないですかやって絶対あるスペースでわかってかつ一番最後にあの同士が来るって言うも翼くん同士じゃないか一番最後に何が来る一番最初に何が来ると決まってるので次々出るんですよ",
[
'スペースがあるじゃないですかやって', '絶対あるスペースでわかって',
'かつ一番最後にあの同士が来るって言うも翼くん同士じゃないか',
'一番最後に何が来る一番最初に何が来ると決まってるので次々出るんですよ'
]),
("誰がその中の問題も悪化したら問題モードに移行して日本語出てきて英語単語帳みたいな感じでセンスみたいな感じで帰ってペラペラペラオみたいな感じで",
[
'誰がその中の問題も悪化したら', '問題モードに移行して', '日本語出てきて',
'英語単語帳みたいな感じでセンスみたいな感じで帰って', 'ペラペラペラオみたいな感じで'
]),
("ファブをこいつの真ん中に LINE してるってのやりました漫画になってるよね待って待って さっき家に帰りました",
[
'ファブをこいつの真ん中にLINEしてるってのやりました',
'漫画になってるよね待って待って',
'さっき家に帰りました'
]),
# ("英語の学習アプリですねただの英語の学習アプリじゃなくて自分の普段の会話から勝手にできる英語学習アプリです",
# [
# '英語の学習アプリですね','ただの英語の学習アプリじゃなくて自分の普段の会話から勝手にできる英語学習アプリです'
# ])
]
for test_case in test_cases:
texts = split_text(test_case[0])
print(texts)
try:
self.assertTrue(test_case[1] == texts)
except AssertionError as e:
try:
for i in range(len(texts)):
print(f"実際: {texts[i]}\n正解: {test_case[1][i]}")
print("-" * 24)
except Exception:
pass
raise e
print("OK")
```
#### File: main/utils/funcs.py
```python
import uuid
from datetime import date, datetime, timedelta
import numpy as np
from django.utils import timezone
def json_serial(obj):
if isinstance(obj, (datetime, date)):
return obj.isoformat()
elif isinstance(obj, (uuid.UUID)):
return str(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
raise TypeError("Type %s not serializable" % type(obj))
def get_next_minute_datetime(d):
return timezone.localtime(d + timedelta(minutes=1) - timedelta(
seconds=d.second) - timedelta(microseconds=d.microsecond))
def get_prev_minute_datetime(d):
return timezone.localtime(d - timedelta(seconds=d.second) -
timedelta(microseconds=d.microsecond))
```
#### File: utils/googleutils/language.py
```python
import six
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
def _split_text_with_rules(tokens,
eof_roles=["です", "ね"],
eof_words=["か", "て", "たら"]):
texts = []
sentence = ""
end_flag = False
prev_token = None
for i, token in enumerate(tokens):
content = token.text.content
part_of_speech_tag = enums.PartOfSpeech.Tag(token.part_of_speech.tag)
if end_flag and part_of_speech_tag.name == "VERB": #動詞
end_flag = True
elif end_flag and part_of_speech_tag.name != "PRT": #修飾詞
texts.append(sentence)
sentence = ""
end_flag = False
print(i, part_of_speech_tag.name, content,
token.dependency_edge.head_token_index)
sentence += content
if content == "た" and prev_token == "まし":
end_flag = True
for eof_word in eof_words:
if eof_word == content:
print("point2")
end_flag = True
for eof_role in eof_roles:
if eof_role in content:
print("************point1************")
end_flag = True
kaketasaki_token = tokens[token.dependency_edge.head_token_index]
# if enums.PartOfSpeech.Tag(kaketasaki_token.part_of_speech.tag).name != "NOUN":
# for eof_word in eof_words:
# if eof_word == content:
# print("point2")
# end_flag = True
if kaketasaki_token == kaketasaki_token.dependency_edge.head_token_index:
text.append(sentence)
sentence = ""
end_flag = False
prev_token = content
texts.append(sentence)
return texts
def split_text(text):
client = language.LanguageServiceClient()
if isinstance(text, six.binary_type):
text = text.decode('utf-8')
# Instantiates a plain text document.
document = types.Document(content=text,
type=enums.Document.Type.PLAIN_TEXT)
res = client.analyze_syntax(document)
# print("analyze_syntax:", res)
# sentence_texts = []
# for sentence in res.sentences:
# text_content = sentence.text.content
# print(text_content)
# sentence_texts.append(text_content)
# tokens = res.tokens
# for token in tokens:
# # print(token)
# part_of_speech_tag = enums.PartOfSpeech.Tag(token.part_of_speech.tag)
# print(part_of_speech_tag.name, token.text.content,
# token.dependency_edge.head_token_index)
return _split_text_with_rules(res.tokens)
```
#### File: views/web/signup.py
```python
from django.urls import reverse_lazy
from django.views.generic import FormView
from django.contrib.auth import login
from main.forms import SignupForm
class SignupView(FormView):
template_name = 'main/signup.html'
form_class = SignupForm
success_url = reverse_lazy('main:index')
def form_valid(self, form):
user = form.create_user()
if user is None:
return super().form_invalid(form)
login(self.request, user)
return super().form_valid(form)
``` |
{
"source": "jphacks/TK_1912",
"score": 2
} |
#### File: TK_1912/Myapp/jetson.py
```python
from flask import Flask, render_template, request, session, redirect, jsonify
from lib.user_management import Management
from lib.quest_management import questMap
from lib.answer_management import AnswerManage
import urllib.error
import urllib.request
import os
import requests
app = Flask(__name__)
app.secret_key = b'random string...'
AREA = ''
username = ''
def download_img(url, dst, path):
os.makedirs(dst, exist_ok=True)
dst_path = os.path.join(dst, path)
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(dst_path, 'wb') as f:
f.write(r.content)
# login page access
@app.route('/<point>', methods=['GET'])
def login(point):
global AREA
if point != 'favicon.ico':
AREA = point
return render_template('login.html',
title='Adachi Quest')
@app.route('/', methods=['GET'])
def login_redirect():
return render_template('login.html',
title='Adachi Quest')
@app.route('/jetson/<path>/<answer>', methods=['GET'])
def index_post(path, answer):
global AREA, username
# Areaのクエストを表示
am = AnswerManage(area=AREA)
data = am.acquisitionQuest()
url = 'http://x.x.x.x:5000/static/data/' + username + '/' + data['name'] + '/' + answer + '/' + path
dst = './static/data/' + username + '/' + data['name']
download_img(url, dst, 'result.jpg')
return render_template('jetson.html',
title='Adachi Quest',
message='Hello',
user=username,
password='',
flg=True,
quest=data,
file=path,
answer=answer)
@app.route('/login', methods=['GET'])
def login_get():
return redirect('/')
@app.route('/takephoto', methods=['POST'])
def takephoto_post():
global AREA
id = request.form.get('id')
pswd = request.form.get('pass')
answer = request.form.get('answer')
am = AnswerManage(area=AREA)
data = am.acquisitionQuest()
return render_template('takephoto.html',
title='Adachi Quest',
message='Hello',
user=id,
password=<PASSWORD>,
quest=data,
answer=answer)
@app.route('/login', methods=['POST'])
def login_post():
global AREA, username
id = request.form.get('id')
username = id
pswd = request.form.get('pass')
manager = Management(user_id=id, password=<PASSWORD>)
# User & Password が一致するときのみTrue
flg = manager.checkLogin()
# Areaのクエストを表示
am = AnswerManage(area=AREA)
data = am.acquisitionQuest()
if flg:
return render_template('jetson.html',
title='Adachi Quest',
quest=data,
user=id,
password=<PASSWORD>,
flg=False)
else:
return render_template('login.html',
title='Adachi Quest',
message='パスワードが異なるか、登録がありません。')
@app.route('/signup', methods=['POST'])
def signup_post():
id = request.form.get('name')
pswd = request.form.get('password')
manager = Management(user_id=id, password=<PASSWORD>)
# 登録済みのUserの場合はFasle
flg = manager.signup()
return str(flg)
@app.route('/logout', methods=['GET'])
def logout():
session.pop('id', None)
return redirect('/')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000, debug=True, use_reloader=True, use_debugger=False)
```
#### File: Myapp/lib/TakePhoto.py
```python
import cv2
import time
import numpy as np
import os
import math
from numpy import linalg as LA
from collections import Counter
import glob
import sys
sys.path.append('./lib')
from tf_pose.estimator import TfPoseEstimator
# tf-openposeを用いたポーズ推定
class tfOpenpose:
def __init__(self):
# openposeが返却するポーズ番号
self.point = {"Nose":0, "Neck":1, "RShoulder":2,"RElbow":3,"RWrist":4,
"LShoulder":5, "LElbow":6, "LWrist":7, "MidHip":8, "RHip":9,
"RKnee":10, "RAnkle":11,"LHip":12, "LKnee":13, "LAnkle":14,
"REye":15, "LEye":16, "REar":17, "LEar":18, "LBigToe":19,
"LSmallToe":20, "LHeel":21, "RBigToe":22, "RSmallToe":23, "RHeel":24,
"Background":25}
self.width = 320
self.height = 176
# モデルデータの読み込み
def setting(self):
model = './lib/tf-pose-estimation/models/graph/mobilenet_v2_small/graph_opt.pb'
self.e = TfPoseEstimator(model, target_size=(self.width, self.height))
# 動画の撮影に伴う設定。
def captureSetting(self, filename, user, area):
self.user = user
self.area = area
self.root = './static/data/'+self.user+'/'+self.area
os.makedirs(self.root, exist_ok=True)
self.cam = cv2.VideoCapture(0)
# CamWidth = int(self.cam.get(cv2.CAP_PROP_FRAME_WIDTH) )
# CamHeight = int(self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT) )
fps = self.cam.get(cv2.CAP_PROP_FPS)
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
self.save_video = self.root+'/'+filename+'.avi'
self.writer = cv2.VideoWriter(self.save_video, fourcc, fps, (self.width, self.height))
# 撮影した動画を指定したフレームごとに画像に変更してポーズ推定を行う
def MovieToImage(self):
save_root = './static/data/'+self.user+'/'+self.area
os.makedirs(save_root, exist_ok=True)
for i in ['A', 'B', 'C', 'NG']:
os.makedirs(save_root + '/' + i, exist_ok=True)
# 動画を読み取る
cap = cv2.VideoCapture(self.save_video)
num = 0
labels = []
print('~~~~~~~~~~~~スタート~~~~~~~~~~~~~~')
while cap.isOpened():
ret, ori_image = cap.read()
if ret == True:
# openposeによる骨格推定
humans = self.e.inference(ori_image, resize_to_default=(self.width > 0 and self.height > 0), upsample_size=4.0)
image, center = TfPoseEstimator.draw_humans(ori_image, humans, imgcopy=False)
# 自作アルゴリズムによるポーズ推定
image, label = self.detect_pose(center=center, image=image)
labels.append(label)
cv2.imwrite(save_root+'/'+label+"/picture{:0=3}".format(num)+".jpg", ori_image)
num += 1
else:
break
counter = Counter(np.array(labels))
print(counter)
print(counter.keys())
# 最も多いかった予測結果を出力
acc_label = counter.most_common()[0][0]
# 最も多いラベルがNGであり、次に多いラベルがNG以外の場合は次に多いラベルに更新
if (acc_label == 'NG') and (len(counter.keys()) != 1):
acc_label = counter.most_common()[1][0]
filename = sorted(glob.glob(save_root+'/'+acc_label+'/*'))
return acc_label, filename[0]
# openposeによるリアルタイム骨格推定と動画の撮影を行う。
def takeMovie(self):
fps_time = 0
start = time.time()
while True:
ret_val, image = self.cam.read()
reImage = cv2.resize(image, (self.width, self.height))
end = time.time()
# 7秒後に撮影とリアルタイム骨格推定を実施
if (end - start) > 7:
self.writer.write(reImage)
# openposeによる骨格推定
humans = self.e.inference(image, resize_to_default=(self.width > 0 and self.height > 0), upsample_size=4.0)
image, center = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
# 自作アルゴリズムによるポーズ推定
image, label = self.detect_pose(center=center, image=image)
# fpsの埋め込み
cv2.putText(image,
"FPS: %f" % (1.0 / (time.time() - fps_time)),
(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 255, 0), 1)
else:
# 7秒未満はカウントダウン
image = self.TextComposition(text=int((7 - (end - start) )), bg=image)
cv2.imshow('tf-pose-estimation result', image)
fps_time = time.time()
##### 今のポーズが出題される3つのポーズに該当しているとき、正解判定するコードを書く
if cv2.waitKey(1) & 0xFF == ord('q') or end - start > 15:
break
self.cam.release()
self.writer.release()
cv2.destroyAllWindows()
# 画像の埋め込み
def ImageComposition(self, fg, bg, cX=0, cY=0):
hImg, wImg = fg.shape[:2]
hBack, wBack = bg.shape[:2]
if (cX == 0) and (cY == 0):
cX, cY = int(wBack/2), int(hBack/2)
halfX, halfY = int(wImg/2), int(hImg/2)
yUp, xLeft = (cY-halfY), (cX-halfX)
yDown, xRight = (cY+halfY), (cX+halfX)
bg[yUp:yDown, xLeft:xRight] = fg
return bg
# カウントダウン表示にともなう文字の埋め込み
def TextComposition(self, text, bg, cX=0, cY=0):
hBack, wBack = bg.shape[:2]
if (cX == 0) and (cY == 0):
cX, cY = int(wBack/2), int(hBack/2)
cv2.putText(bg,
"%d" % text,
(cX-100, cY+100), cv2.FONT_HERSHEY_SIMPLEX, 10,
(255,255,255), 8)
return bg
# 各自作アルゴリズムによるポーズ推定の結果を統合
def detect_pose(self, center, image):
if self.detect_A(center, image):
label = 'A'
elif self.detect_B(center, image):
label = 'B'
elif self.detect_C(center, image):
label = 'C'
else :
label = 'NG'
cv2.putText(image,
label,
(10, 120), cv2.FONT_HERSHEY_SIMPLEX, 4,
(255, 0, 0), 3)
# print('angle_center:', angle_center, 'angle_right:', angle_right, 'angle_left:', angle_left)
return image, label
# 両手を広げるポーズ
def detect_A(self, center, image):
if self.is_included_point(center, ['Neck', 'RWrist', 'LWrist', 'RElbow', 'LElbow', 'RShoulder', 'LShoulder', 'Nose']):
angle_center = self.CalculationAngle(center=center[self.point['Neck']],
p1=center[self.point['RWrist']],
p2=center[self.point['LWrist']])
angle_right = self.CalculationAngle(center=center[self.point['RElbow']],
p1=center[self.point['RShoulder']],
p2=center[self.point['RWrist']])
angle_left = self.CalculationAngle(center=center[self.point['LElbow']],
p1=center[self.point['LShoulder']],
p2=center[self.point['LWrist']])
if angle_center > 90 and angle_right > 135 and angle_left > 135 and abs(center[self.point['RWrist']][1] - center[self.point['LWrist']][1]) < 30:
return True
return False
# 片手を上げるポーズ
def detect_B(self, center, image):
if self.is_included_point(center, ['Neck', 'RWrist', 'LWrist', 'RElbow', 'LElbow', 'RShoulder', 'LShoulder', 'Nose']):
if (center[self.point['RWrist']][1] < center[self.point['Nose']][1]) ^ (center[self.point['LWrist']][1] < center[self.point['Nose']][1]):
if center[self.point['RWrist']][1] < center[self.point['LWrist']][1]:
angle_elbow = self.CalculationAngle(center=center[self.point['RElbow']],
p1=center[self.point['RWrist']],
p2=center[self.point['RShoulder']])
else:
angle_elbow = self.CalculationAngle(center=center[self.point['LElbow']],
p1=center[self.point['LWrist']],
p2=center[self.point['LShoulder']] )
if angle_elbow > 135:
return True
return False
# 脇を絞めて、肘を下に、手は上にするポーズ
def detect_C(self, center, image):
if self.is_included_point(center, ['Neck', 'RWrist', 'LWrist', 'RElbow', 'LElbow', 'RShoulder', 'LShoulder', 'Nose']):
angle_center = self.CalculationAngle(center=center[self.point['Neck']],
p1=center[self.point['RElbow']],
p2=center[self.point['LElbow']])
angle_right = self.CalculationAngle(center=center[self.point['RElbow']],
p1=center[self.point['RShoulder']],
p2=center[self.point['RWrist']])
angle_left = self.CalculationAngle(center=center[self.point['LElbow']],
p1=center[self.point['LShoulder']],
p2=center[self.point['LWrist']])
if 60 < angle_center < 160 and angle_right < 100 and angle_left < 100 and abs(center[self.point['RWrist']][1] - center[self.point['LWrist']][1]) < 30:
return True
return False
# openposeが推定した骨格に指定した骨格位置が存在するか確認
def is_included_point(self, dic, name_list):
ans = True
for name in name_list:
if not self.point[name] in dic:
ans = False
return ans
# 3点間の角度を求める
def CalculationAngle(self, p1, p2, center):
nparr_p1 = np.array([p1[0], p1[1]])
nparr_p2 = np.array([p2[0], p2[1]])
nparr_center = np.array([center[0], center[1]])
vec_p1 = nparr_p1 - nparr_center
vec_p2 = nparr_p2 - nparr_center
i = np.inner(vec_p1, vec_p2)
n = LA.norm(vec_p1) * LA.norm(vec_p2)
c = i / n
angle = np.rad2deg(np.arccos(np.clip(c, -1.0, 1.0)))
return angle
```
#### File: Myapp/lib/user_management.py
```python
import pandas as pd
import numpy as np
import os
# ログイン情報を管理する
class Management:
def __init__(self, user_id, password):
self.user = user_id
self.password = password
self.file_name = './lib/User/userManagement.csv'
# 未登録か否かを判断して未登録で未使用のユーザ名なら登録
def signup(self):
if os.path.exists(self.file_name):
df = pd.read_csv(self.file_name)
users = list(df['User'])
passwords = list(df['Password'])
if self.user in users:
flg = False
else:
self.registration(user=users, password=passwords)
flg = True
else:
self.registration(user=[], password=[])
flg = True
return flg
# 未登録か否かを判断する。
def checkLogin(self):
if os.path.exists(self.file_name):
df = pd.read_csv(self.file_name)
flg = self.checkUserPassword(df=df)
return flg
else:
return False
# 登録ユーザの更新
def save_data(self, df):
df.to_csv(self.file_name, index=False)
# 新規登録
def registration(self, user, password):
user.append(self.user)
password.append(self.password)
data = np.array([user, password]).T
print('siginup;', data)
df = pd.DataFrame(data=data, columns=['User','Password'])
self.save_data(df=df)
# 既に使用されているユーザかを確認。
def checkUserPassword(self, df):
users = list(df['User'])
if self.user in users:
password = list(df[df['User'] == self.user]['Password'])[0]
if password == self.password:
return True
else:
return False
else:
return False
``` |
{
"source": "jphacks/TK_1913",
"score": 3
} |
#### File: TK_1913/sensor/btxmt.py
```python
from time import sleep
import bluetooth
class BTClient:
def __init__(self, addr, port):
"""Initial setting.
Args:
addr (str): Server mac address
port (int): Server port number
"""
self.__addr = addr
self.__port = port
self.__sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
def connect(self):
"""Connect server.
"""
while True:
try:
self.__sock.connect((self.__addr, self.__port))
sleep(1)
break
except bluetooth.btcommon.BluetoothError:
self.__sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
sleep(1)
except Exception as e:
print(e)
break
def disconnect(self):
"""Disconnect.
"""
self.__sock.close()
def send(self, msg):
"""Send `msg` to server.
Args:
msg (str): Message sent to server
"""
self.__sock.send(msg)
class BTServer:
def __init__(self, port):
"""Initial setting.
Args:
port (int): Server (listen) port number
"""
self.__server_sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
self.__server_sock.bind(('', port))
self.__server_sock.listen(1)
def accept(self):
"""Listening for connection.
Returns:
str: Client mac address
"""
client_sock, client_addr = self.__server_sock.accept()
self.__client_sock = client_sock
return client_addr
def disconnect(self):
"""Disconnect.
"""
self.__client_sock.close()
self.__server_sock.close()
def recv(self, recv_size):
"""Send `msg` to server.
Returns:
bytes: Message recieved from client
"""
return self.__client_sock.recv(recv_size)
```
#### File: TK_1913/server/normalize.py
```python
import csv
import math
import numpy as np
from scipy.optimize import curve_fit
SeaLevelPressure = 101000
def sin(x, a, b, c, d):
return a*np.sin(b*x+c)+d
# return a*x**6+b*x**5+c*x**4+d*x**3+e*x**2+f*x+g
def pressure_to_height(pressure):
height = ((SeaLevelPressure/pressure)**(1/5.275)-1)*(15+273.15)/0.0065
return height
def height_to_angle(diff_height):
angle = math.asin(diff_height)
return angle
def normalize(csv_file):
with open(csv_file, 'r') as f:
reader = csv.reader(f)
height_list = []
angle_list = []
normalized_list = []
time_list = []
for raw in reader:
# angle_list.append(height_to_angle(pressure_to_height(float(raw[1])), pressure_to_height(float(raw[2]))))
height_list.append(float(raw[2]) - float(raw[1]))
time_list.append(raw[0])
if max(height_list) < 0:
max_lenght = 0
else:
max_length = max(height_list)
for height in height_list:
if height < 0:
height = 0
angle_list.append(height_to_angle(height/max_length))
# print(angle)
# max_angle = math.pi/2
# min_angle = min(angle_list)
for index, angle in enumerate(angle_list):
if 0 <= math.pi/2 - angle <= 2*math.pi/9:
normalized_list.append([time_list[index], 9*(math.pi/2-angle)/(4*math.pi)])
elif 2*math.pi/9 <= math.pi/2 - angle <= math.pi/2:
normalized_list.append([time_list[index], 9*(math.pi/2-angle)/(5*math.pi)-1/10])
elif math.pi/2 - angle < 0:
normalized_list.append([time_list[index], 0])
else:
normalized_list.append([time_list[index], 1])
transposed_list = np.array(normalized_list).T
transposed_list = transposed_list.astype(np.float64)
transposed_list[0] = transposed_list[0] - transposed_list[0][0]
param, _ =curve_fit(sin, transposed_list[0], transposed_list[1])
for transposed_data_index, transposed_data in enumerate(transposed_list[0]):
normalized_list[transposed_data_index][1] = sin(transposed_data, param[0], param[1], param[2], param[3])
with open('normalized_data/' + csv_file.split('/')[1], 'w') as wf:
writer = csv.writer(wf)
for normalized_data in normalized_list:
writer.writerow(normalized_data)
return normalized_list
def main():
return
if __name__ == '__main__':
main()
``` |
{
"source": "jphacks/TK_1918",
"score": 2
} |
#### File: TK_1918/src/GazeEstimate.py
```python
import os
import sys
import time
import math
import json
import base64
import requests
import cv2
import numpy
## Settings ###################################################################
endPoint = 'http://a8b88762ef01211e9950f0eacce6e863-2021028779.ap-northeast-1.elb.amazonaws.com' # for JPHACKS 2019
proxies = []
#proxies = ['http':'http://proxygate2.nic.nec.co.jp:8080', 'https':'http://proxygate2.nic.nec.co.jp:8080']
displayFlag = False
###############################################################################
# Send Request
def sendRequest(frame):
imgGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
height, width = imgGray.shape
imgRawArray = numpy.reshape(imgGray, (-1))
imgRaw = imgRawArray.tobytes()
# Set URL
url = endPoint + "/v1/query/gaze_byRAW"
# Set request parameter
reqPara = {
'width' : width,
'height' : height,
'raw_b64' : base64.b64encode(imgRaw).decode('utf-8')
}
# Send the request
headers = {'Content-Type' : 'application/json'}
params = {}
data = json.dumps(reqPara).encode('utf-8')
res = requests.post(url, params=params, data=data, headers=headers, proxies=proxies, timeout=10)
# Get response
if res.status_code == 200:
return res.json()
else:
print('## Error! ##')
print(res.text)
return []
def estimateGaze(img):
results = sendRequest(img)
# Show result
print(json.dumps(results, indent=4))
return(results)
if displayFlag:
width = img.shape[1]
gazeLen = width / 5
gazeColor = (0,255,0)
eyesColor = (255,0,0)
for result in results:
reye = result['reye']
leye = result['leye']
gaze = result['gaze']
# Show the result
#cv2.circle(img, (int(reye[0]), int(reye[1])), 15, eyesColor, thickness=2)
#cv2.circle(img, (int(leye[0]), int(leye[1])), 15, eyesColor, thickness=2)
center = ((reye[0]+leye[0])/2, (reye[1]+leye[1])/2)
gazeTop = (center[0] + gazeLen * math.sin(math.radians(gaze[0])), center[1] + gazeLen * math.sin(math.radians(gaze[1])))
#cv2.arrowedLine(img, (int(center[0]), int(center[1])), (int(gazeTop[0]), int(gazeTop[1])), gazeColor, thickness=2)
#cv2.imwrite("gaze_output.png",img)
#cv2.imshow('image', img)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
``` |
{
"source": "jphalip/datacatalog-tag-engine",
"score": 2
} |
#### File: datacatalog-tag-engine/load_testing/clone_table_standalone.py
```python
from google.cloud import bigquery
from google.cloud.exceptions import NotFound
def make_copies(src_table, dest_project, dest_dataset, num_copies):
bq = bigquery.client.Client()
bq.create_dataset(dest_project + '.' + dest_dataset, exists_ok=True)
src_table = bq.get_table(src_table)
src_table_id = src_table.table_id
dest_dataset_ref = bq.dataset(dest_dataset, project=dest_project)
config = bigquery.job.CopyJobConfig()
config.write_disposition = "WRITE_TRUNCATE"
for i in range(0, num_copies):
dest_table = src_table_id + '_' + str(i)
#try:
#bq.get_table(dest_project + '.' + dest_dataset + '.' + dest_table)
#print("Table {} already exists.".format(dest_table))
#except NotFound:
dest_table_ref = dest_dataset_ref.table(dest_table)
print('attempting to create ' + dest_table_ref.table_id)
job = bq.copy_table(src_table, dest_table_ref, location='us-central1', job_config=config)
job.result()
print('finished creating ' + dest_table_ref.table_id)
if __name__ == '__main__':
src_table = 'warehouse-337221.austin_311_source.austin_311_service_requests'
dest_project = 'warehouse-337221'
dest_dataset = 'austin_311_100k'
num_copies = 100000
make_copies(src_table, dest_project, dest_dataset, num_copies)
```
#### File: datacatalog-tag-engine/load_testing/load_tables_standalone.py
```python
from google.cloud import bigquery
def load_tables(src_uri, dest_project, dest_dataset, table_prefix, start_index, end_index):
client = bigquery.client.Client()
client.create_dataset(dest_project + '.' + dest_dataset, exists_ok=True)
dest_dataset_ref = client.dataset(dest_dataset, project=dest_project)
config = bigquery.LoadJobConfig(source_format=bigquery.SourceFormat.AVRO)
for i in range(start_index, end_index):
dest_table = table_prefix + '_' + str(i)
dest_table_ref = dest_dataset_ref.table(dest_table)
load_job = client.load_table_from_uri(src_uri, dest_table_ref, job_config=config)
load_job.result()
print('loaded {}'.format(dest_table_ref.table_id))
if __name__ == '__main__':
src_uri = 'gs://austin_311/austin_311_service_requests_sample.avro'
dest_project = 'warehouse-337221'
dest_dataset = 'austin_311_500k'
table_prefix = 'austin_311_service_requests'
start_index = 252799
end_index = 252800
load_tables(src_uri, dest_project, dest_dataset, table_prefix, start_index, end_index)
```
#### File: datacatalog-tag-engine/load_testing/tag_checks.py
```python
import json
import time
import argparse
from datetime import timedelta
from datetime import datetime
from google.cloud import datacatalog
from google.cloud import bigquery
dc = datacatalog.DataCatalogClient()
def search_catalog(bq_project, bq_dataset, tag_template_project, tag_template1, tag_template2=None):
scope = datacatalog.SearchCatalogRequest.Scope()
scope.include_project_ids.append(bq_project)
scope.include_project_ids.append(tag_template_project)
request = datacatalog.SearchCatalogRequest()
request.scope = scope
parent = 'parent:' + bq_project + '.' + bq_dataset
if tag_template2 != None:
tag = 'tag:' + tag_template_project + '.' + tag_template1 + ' or tag:' + tag_template_project + '.' + tag_template2
else:
tag = 'tag:' + tag_template_project + '.' + tag_template1
query = parent + ' ' + tag
print('query string: ' + query)
request.query = query
request.page_size = 1
num_results = 0
start_time = time.time()
for result in dc.search_catalog(request):
#print('result: ' + str(result))
num_results += 1
print(str(num_results))
print('tagged assets count: ' + str(num_results))
end_time = time.time()
run_time = (end_time - start_time)
td = timedelta(seconds=run_time)
print('search catalog runtime:', td)
def list_tags(bq_project, bq_dataset, tag_template_project, tag_template):
with open('logs/list_' + bq_dataset + '.out', 'w') as log:
log.write('started scanning ' + bq_dataset + ' at ' + datetime.now().strftime('%m/%d/%Y, %H:%M:%S') + '\n')
bq = bigquery.Client()
sql = 'select table_name from ' + bq_project + '.' + bq_dataset + '.INFORMATION_SCHEMA.TABLES'
query_job = bq.query(sql)
rows = query_job.result()
for row in rows:
table_name = row.table_name
#print('processing ' + table_name)
resource = '//bigquery.googleapis.com/projects/' + bq_project + '/datasets/' + bq_dataset + '/tables/' + table_name
#print("resource: " + resource)
request = datacatalog.LookupEntryRequest()
request.linked_resource=resource
entry = dc.lookup_entry(request)
page_result = dc.list_tags(parent=entry.name)
found_tag = False
for tag in page_result:
index = tag.template.rfind('/')
attached_template = tag.template[index+1:]
#print('attached template: ' + attached_template)
if attached_template == tag_template:
found_tag = True
break
if found_tag != True:
log.write(table_name + ' is untagged \n')
log.write('finished scanning at ' + datetime.now().strftime('%m/%d/%Y, %H:%M:%S') + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='runs tag_checks.py')
parser.add_argument('option', help='Choose search or list')
args = parser.parse_args()
bq_project = 'warehouse-337221'
bq_dataset = 'austin_311_50k'
tag_template_project = 'tag-engine-vanilla-337221'
tag_template1 = 'data_governance_50k'
#tag_template2 = 'data_governance'
if args.option == 'search':
search_catalog(bq_project, bq_dataset, tag_template_project, tag_template1)
if args.option == 'list':
list_tags(bq_project, bq_dataset, tag_template_project, tag_template1)
```
#### File: jphalip/datacatalog-tag-engine/main.py
```python
from flask import Flask, render_template, request, redirect, url_for, jsonify, json
import datetime, configparser
from google.cloud import firestore
import DataCatalogUtils as dc
import TagEngineUtils as te
import Resources as res
import constants
import JobManager as jobm
import TaskManager as taskm
from google.cloud import tasks_v2
from google.protobuf import timestamp_pb2
import datetime, time
config = configparser.ConfigParser()
config.read("tagengine.ini")
app = Flask(__name__)
teu = te.TagEngineUtils()
# handles create requests from API and on-demand update requests from API (i.e. config contains refresh_mode = ON_DEMAND)
jm = jobm.JobManager(config['DEFAULT']['TAG_ENGINE_PROJECT'], config['DEFAULT']['QUEUE_REGION'], config['DEFAULT']['INJECTOR_QUEUE'], "/_split_work")
tm = taskm.TaskManager(config['DEFAULT']['TAG_ENGINE_PROJECT'], config['DEFAULT']['QUEUE_REGION'], config['DEFAULT']['WORK_QUEUE'], "/_run_task")
##################### UI METHODS #################
@app.route("/")
def homepage():
exists, settings = teu.read_default_settings()
if exists:
template_id = settings['template_id']
project_id = settings['project_id']
region = settings['region']
else:
template_id = "{your_template_id}"
project_id = "{your_project_id}"
region = "{your_region}"
# [END default_settings]
# [START render_template]
return render_template(
'index.html',
template_id=template_id,
project_id=project_id,
region=region)
@app.route("/default_settings<int:saved>", methods=["GET"])
def default_settings(saved):
exists, settings = teu.read_default_settings()
if exists:
template_id = settings['template_id']
project_id = settings['project_id']
region = settings['region']
else:
template_id = "{your_template_id}"
project_id = "{your_project_id}"
region = "{your_region}"
# [END default_settings]
# [START render_template]
return render_template(
'default_settings.html',
template_id=template_id,
project_id=project_id,
region=region,
settings=saved)
# [END render_template]
@app.route("/coverage_settings<int:saved>")
def coverage_settings(saved):
exists, settings = teu.read_coverage_settings()
if exists:
project_ids = settings['project_ids']
datasets = settings['excluded_datasets']
tables = settings['excluded_tables']
else:
project_ids = "{projectA}, {projectB}, {projectC}"
datasets = "{project.dataset1}, {project.dataset2}, {project.dataset3}"
tables = "{project.dataset.table1}, {project.dataset.table2}, {project.dataset.view3}"
# [END report_settings]
# [START render_template]
return render_template(
'coverage_settings.html',
project_ids=project_ids,
datasets=datasets,
tables=tables,
settings=saved)
# [END render_template]
@app.route("/tag_history_settings<int:saved>", methods=["GET"])
def tag_history_settings(saved):
enabled, settings = teu.read_tag_history_settings()
if enabled:
enabled = settings['enabled']
project_id = settings['project_id']
region = settings['region']
dataset = settings['dataset']
else:
project_id = "{your_project_id}"
region = "{your_region}"
dataset = "{your_dataset}"
# [END tag_history_settings]
# [START render_template]
return render_template(
'tag_history_settings.html',
enabled=enabled,
project_id=project_id,
region=region,
dataset=dataset,
settings=saved)
# [END render_template]
@app.route("/tag_stream_settings<int:saved>", methods=["GET"])
def tag_stream_settings(saved):
enabled, settings = teu.read_tag_stream_settings()
if enabled:
enabled = settings['enabled']
project_id = settings['project_id']
topic = settings['topic']
else:
project_id = "{your_project_id}"
topic = "{your_topic}"
# [END tag_stream_settings]
# [START render_template]
return render_template(
'tag_stream_settings.html',
enabled=enabled,
project_id=project_id,
topic=topic,
settings=saved)
# [END render_template]
@app.route("/set_default", methods=['POST'])
def set_default():
template_id = request.form['template_id'].rstrip()
project_id = request.form['project_id'].rstrip()
region = request.form['region'].rstrip()
if template_id == "{your_template_id}":
template_id = None
if project_id == "{your_project_id}":
project_id = None
if region == "{your_region}":
region = None
if template_id != None or project_id != None or region != None:
teu.write_default_settings(template_id, project_id, region)
return default_settings(1)
@app.route("/set_tag_history", methods=['POST'])
def set_tag_history():
enabled = request.form['enabled'].rstrip()
project_id = request.form['project_id'].rstrip()
region = request.form['region'].rstrip()
dataset = request.form['dataset'].rstrip()
print("enabled: " + enabled)
print("project_id: " + project_id)
print("region: " + region)
print("dataset: " + dataset)
if enabled == "on":
enabled = True
else:
enabled = False
if project_id == "{your_project_id}":
project_id = None
if region == "{your_region}":
region = None
if dataset == "{your_dataset}":
dataset = None
# can't be enabled if either of the required fields are NULL
if enabled and (project_id == None or region == None or dataset == None):
enabled = False
if project_id != None or region != None or dataset != None:
teu.write_tag_history_settings(enabled, project_id, region, dataset)
return tag_history_settings(1)
else:
return tag_history_settings(0)
@app.route("/set_tag_stream", methods=['POST'])
def set_tag_stream():
enabled = request.form['enabled'].rstrip()
project_id = request.form['project_id'].rstrip()
topic = request.form['topic'].rstrip()
print("enabled: " + enabled)
print("project_id: " + project_id)
print("topic: " + topic)
if enabled == "on":
enabled = True
else:
enabled = False
if project_id == "{your_project_id}":
project_id = None
if topic == "{your_topic}":
topic = None
# can't be enabled if either the required fields are NULL
if enabled and (project_id == None or topic == None):
enabled = False
if project_id != None or topic != None:
teu.write_tag_stream_settings(enabled, project_id, topic)
return tag_stream_settings(1)
else:
return tag_stream_settings(0)
@app.route("/set_coverage", methods=['POST'])
def set_coverage():
project_ids = request.form['project_ids'].rstrip()
datasets = request.form['datasets'].rstrip()
tables = request.form['tables'].rstrip()
print("project_ids: " + project_ids)
print("datasets: " + datasets)
print("tables: " + tables)
if project_ids == "{projectA}, {projectB}, {projectC}":
project_ids = None
if datasets == "{project.dataset1}, {project.dataset2}, {project.dataset3}":
datasets = None
if tables == "{project.dataset.table1}, {project.dataset.table2}, {project.dataset.view3}":
tables = None
if project_ids != None or datasets != None or tables != None:
teu.write_coverage_settings(project_ids, datasets, tables)
return coverage_settings(1)
@app.route("/coverage_report")
def coverage_report():
summary_report, detailed_report = teu.generate_coverage_report()
print('summary_report: ' + str(summary_report))
print('detailed_report: ' + str(detailed_report))
exists, settings = teu.read_coverage_settings()
project_ids = settings['project_ids']
return render_template(
"coverage_report.html",
project_ids=project_ids,
report_headers=summary_report,
report_data=detailed_report)
# TO DO: re-implement this method using the DC API
@app.route("/coverage_details<string:res>", methods=['GET'])
def coverage_details(res):
print("res: " + res)
project_id = res.split('.')[0]
resource = res.split('.')[1]
tag_configs = teu.read_tag_configs_on_res(res)
return render_template(
'view_tags_on_res.html',
resource=res,
project_id=project_id,
tag_configs=tag_configs)
# [START search_template]
@app.route('/search_template', methods=['POST'])
def search_template():
template_id = request.form['template_id']
project_id = request.form['project_id']
region = request.form['region']
dcu = dc.DataCatalogUtils(template_id, project_id, region)
fields = dcu.get_template()
print("fields: " + str(fields))
# [END search_template]
# [START render_template]
return render_template(
'tag_template.html',
template_id=template_id,
project_id=project_id,
region=region,
fields=fields)
# [END render_template]
@app.route('/choose_action', methods=['GET'])
def choose_action():
template_id = request.args.get('template_id')
project_id = request.args.get('project_id')
region = request.args.get('region')
dcu = dc.DataCatalogUtils(template_id, project_id, region)
fields = dcu.get_template()
print("fields: " + str(fields))
# [END search_template]
# [START render_template]
return render_template(
'tag_template.html',
template_id=template_id,
project_id=project_id,
region=region,
fields=fields)
# [END render_template]
# [START view_configs]
@app.route('/view_configs', methods=['GET'])
def view_configs():
template_id = request.args.get('template_id')
project_id = request.args.get('project_id')
region = request.args.get('region')
print("template_id: " + str(template_id))
print("project_id: " + str(project_id))
print("region: " + str(region))
dcu = dc.DataCatalogUtils(template_id, project_id, region)
template_fields = dcu.get_template()
history_enabled, history_settings = teu.read_tag_history_settings()
stream_enabled, stream_settings = teu.read_tag_stream_settings()
tag_configs = teu.read_tag_configs(template_id, project_id, region)
return render_template(
'view_configs.html',
template_id=template_id,
project_id=project_id,
region=region,
tag_configs=tag_configs)
# [END render_template]
# [START display_selected_action]
@app.route('/display_selected_action', methods=['POST'])
def display_selected_action():
template_id = request.form['template_id']
project_id = request.form['project_id']
region = request.form['region']
action = request.form['action']
print("template_id: " + str(template_id))
print("project_id: " + str(project_id))
print("region: " + str(region))
print("action: " + str(action))
dcu = dc.DataCatalogUtils(template_id, project_id, region)
template_fields = dcu.get_template()
history_enabled, history_settings = teu.read_tag_history_settings()
stream_enabled, stream_settings = teu.read_tag_stream_settings()
if action == "View and Edit Configs":
tag_configs = teu.read_tag_configs(template_id, project_id, region)
return render_template(
'view_configs.html',
template_id=template_id,
project_id=project_id,
region=region,
tag_configs=tag_configs)
elif action == "Create Static Config":
return render_template(
'static_config.html',
template_id=template_id,
project_id=project_id,
region=region,
fields=template_fields,
current_time=datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'),
display_tag_history=history_enabled,
display_tag_stream=stream_enabled)
elif action == "Create Dynamic Config":
return render_template(
'dynamic_config.html',
template_id=template_id,
project_id=project_id,
region=region,
fields=template_fields,
display_tag_history=history_enabled,
display_tag_stream=stream_enabled)
else:
# this option is currently hidden as tag propagation is in alpha
return render_template(
'propagation_tag.html',
template_id=template_id,
project_id=project_id,
region=region,
fields=template_fields,
display_tag_history=history_enabled,
display_tag_stream=stream_enabled)
# [END render_template]
@app.route('/update_config', methods=['POST'])
def update_config():
template_id = request.form['template_id']
project_id = request.form['project_id']
region = request.form['region']
tag_uuid = request.form['tag_uuid']
config_type = request.form['config_type']
print("template_id: " + str(template_id))
print("project_id: " + str(project_id))
print("region: " + str(region))
print("tag_uuid: " + str(tag_uuid))
print("config_type: " + str(config_type))
tag_config = teu.read_tag_config(tag_uuid)
print("tag_config: " + str(tag_config))
dcu = dc.DataCatalogUtils(template_id, project_id, region)
template_fields = dcu.get_template()
print("fields: " + str(template_fields))
enabled, settings = teu.read_tag_history_settings()
if enabled:
tag_history = 1
else:
tag_history = 0
print("tag_history: " + str(tag_history))
enabled, settings = teu.read_tag_stream_settings()
if enabled:
tag_stream = 1
else:
tag_stream = 0
print("tag_stream: " + str(tag_stream))
if config_type == "STATIC":
# [END update_tag]
# [START render_template]
return render_template(
'update_static_config.html',
template_id=template_id,
project_id=project_id,
region=region,
fields=template_fields,
tag_config=tag_config,
current_time=datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'),
display_tag_history_option=tag_history,
display_tag_stream_option=tag_stream)
else:
# [END display_action]
# [START render_template]
return render_template(
'update_dynamic_config.html',
template_id=template_id,
project_id=project_id,
region=region,
fields=template_fields,
tag_config=tag_config,
display_tag_history_option=tag_history,
display_tag_stream_option=tag_stream)
# [END render_template]
@app.route('/process_update_static_config', methods=['POST'])
def process_update_static_config():
template_id = request.form['template_id']
project_id = request.form['project_id']
region = request.form['region']
old_tag_uuid = request.form['tag_uuid']
included_uris = request.form['included_uris'].rstrip()
excluded_uris = request.form['excluded_uris'].rstrip()
action = request.form['action']
update_status = 0
#print("action: " + str(action))
dcu = dc.DataCatalogUtils(template_id, project_id, region)
template_fields = dcu.get_template()
if action == "Submit Tag":
fields = []
selected_fields = request.form.getlist("selected")
#print("selected_fields: " + str(selected_fields))
for selected_field in selected_fields:
selected_value = request.form.get(selected_field)
selected_type = request.form.get(selected_field + "_datatype")
print(selected_field + ", " + selected_value + ", " + selected_type)
for template_field in template_fields:
if template_field['field_id'] != selected_field:
continue
is_required = template_field['is_required']
field = {'field_id': selected_field, 'field_value': selected_value, 'field_type': selected_type, 'is_required': is_required}
fields.append(field)
break
#print('fields: ' + str(fields))
tag_history = False
if "tag_history" in request.form:
tag_history_option = request.form.get("tag_history")
if tag_history_option == "selected":
tag_history = True
tag_stream = False
if "tag_stream" in request.form:
tag_stream_option = request.form.get("tag_stream")
if tag_stream_option == "selected":
tag_stream = True
template_exists, template_uuid = teu.read_tag_template(template_id, project_id, region)
new_tag_uuid = teu.update_tag_config(old_tag_uuid, 'STATIC', 'ACTIVE', fields, included_uris, excluded_uris,\
template_uuid, None, None, None, tag_history, tag_stream)
#print('new_tag_uuid: ' + new_tag_uuid)
if isinstance(new_tag_uuid, str):
job_uuid = jm.create_job(new_tag_uuid)
else:
job_uuid = None
if job_uuid != None:
job_creation = constants.SUCCESS
else:
job_creation = constants.ERROR
template_fields = dcu.get_template()
tag_configs = teu.read_tag_configs(template_id, project_id, region)
print('template_fields: ' + str(template_fields))
# [END process_update_static_tag]
# [START render_template]
return render_template(
'view_configs.html',
template_id=template_id,
project_id=project_id,
region=region,
fields=template_fields,
tag_configs=tag_configs,
status=job_creation)
@app.route('/process_update_dynamic_config', methods=['POST'])
def process_update_dynamic_config():
template_id = request.form['template_id']
project_id = request.form['project_id']
region = request.form['region']
old_tag_uuid = request.form['tag_uuid']
included_uris = request.form['included_uris'].rstrip()
excluded_uris = request.form['excluded_uris'].rstrip()
refresh_mode = request.form['refresh_mode']
refresh_frequency = request.form['refresh_frequency'].rstrip()
refresh_unit = request.form['refresh_unit']
action = request.form['action']
update_status = 0
#print('old_tag_uuid: ' + old_tag_uuid)
#print("action: " + str(action))
dcu = dc.DataCatalogUtils(template_id, project_id, region)
template_fields = dcu.get_template()
if action == "Submit Tag":
fields = []
selected_fields = request.form.getlist("selected")
print("selected_fields: " + str(selected_fields))
for selected_field in selected_fields:
query_expression = request.form.get(selected_field)
print("query_expression: " + query_expression)
selected_field_type = request.form.get(selected_field + "_datatype")
print(selected_field + ", " + query_expression + ", " + selected_field_type)
for template_field in template_fields:
if template_field['field_id'] != selected_field:
continue
is_required = template_field['is_required']
field = {'field_id': selected_field, 'query_expression': query_expression, 'field_type': selected_field_type,\
'is_required': is_required}
fields.append(field)
break
print('fields: ' + str(fields))
tag_history = False
if "tag_history" in request.form:
tag_history_option = request.form.get("tag_history")
if tag_history_option == "selected":
tag_history = True
tag_stream = False
if "tag_stream" in request.form:
tag_stream_option = request.form.get("tag_stream")
if tag_stream_option == "selected":
tag_stream = True
template_exists, template_uuid = teu.read_tag_template(template_id, project_id, region)
new_tag_uuid = teu.update_tag_config(old_tag_uuid, 'DYNAMIC', 'PENDING', fields, included_uris, excluded_uris,\
template_uuid, refresh_mode, refresh_frequency, refresh_unit, \
tag_history, tag_stream)
job_uuid = jm.create_job(new_tag_uuid)
if job_uuid != None:
job_creation = constants.SUCCESS
else:
job_creation = constants.ERROR
template_fields = dcu.get_template()
print('template_fields: ' + str(template_fields))
tag_configs = teu.read_tag_configs(template_id, project_id, region)
# [END process_update_dynamic_config]
# [START render_template]
return render_template(
'view_configs.html',
template_id=template_id,
project_id=project_id,
region=region,
fields=template_fields,
tag_configs=tag_configs,
status=job_creation)
@app.route('/process_static_config', methods=['POST'])
def process_static_config():
template_id = request.form['template_id']
project_id = request.form['project_id']
region = request.form['region']
included_uris = request.form['included_uris'].rstrip()
excluded_uris = request.form['excluded_uris'].rstrip()
action = request.form['action']
print('included_uris: ' + included_uris)
print('excluded_uris: ' + excluded_uris)
dcu = dc.DataCatalogUtils(template_id, project_id, region)
template = dcu.get_template()
if action == "Cancel Changes":
return render_template(
'tag_template.html',
template_id=template_id,
project_id=project_id,
region=region,
fields=template)
fields = []
selected_fields = request.form.getlist("selected")
print("selected_fields: " + str(selected_fields))
for selected_field in selected_fields:
selected_value = request.form.get(selected_field)
selected_type = request.form.get(selected_field + "_datatype")
print(selected_field + ", " + selected_value + ", " + selected_type)
for template_field in template:
if template_field['field_id'] != selected_field:
continue
is_required = template_field['is_required']
field = {'field_id': selected_field, 'field_value': selected_value, 'field_type': selected_type, 'is_required': is_required}
fields.append(field)
break
print('fields: ' + str(fields))
tag_history_option = False
tag_history_enabled = "OFF"
if "tag_history" in request.form:
tag_history = request.form.get("tag_history")
if tag_history == "selected":
tag_history_option = True
tag_history_enabled = "ON"
tag_stream_option = False
tag_stream_enabled = "OFF"
if "tag_stream" in request.form:
tag_stream = request.form.get("tag_stream")
if tag_stream == "selected":
tag_stream_option = True
tag_stream_enabled = "ON"
template_uuid = teu.write_tag_template(template_id, project_id, region)
tag_uuid, included_uris_hash = teu.write_static_config('PENDING', fields, included_uris, excluded_uris, template_uuid,\
tag_history_option, tag_stream_option)
if isinstance(tag_uuid, str):
job_uuid = jm.create_job(tag_uuid)
if job_uuid != None:
job_creation = constants.SUCCESS
else:
job_creation = constants.ERROR
# [END process_static_tag]
# [START render_template]
return render_template(
'submitted_static_config.html',
template_id=template_id,
project_id=project_id,
region=region,
fields=fields,
included_uris=included_uris,
excluded_uris=excluded_uris,
tag_history=tag_history_enabled,
tag_stream=tag_stream_enabled,
status=job_creation)
# [END render_template]
@app.route('/process_dynamic_config', methods=['POST'])
def process_dynamic_config():
template_id = request.form['template_id']
project_id = request.form['project_id']
region = request.form['region']
included_uris = request.form['included_uris'].rstrip()
excluded_uris = request.form['excluded_uris'].rstrip()
refresh_mode = request.form['refresh_mode']
refresh_frequency = request.form['refresh_frequency']
refresh_unit = request.form['refresh_unit']
action = request.form['action']
#print('included_uris: ' + included_uris)
#print('excluded_uris: ' + excluded_uris)
#print('refresh_mode: ' + refresh_mode)
#print('refresh_frequency: ' + refresh_frequency)
#print('refresh_unit: ' + refresh_unit)
dcu = dc.DataCatalogUtils(template_id, project_id, region)
template = dcu.get_template()
if action == "Cancel Changes":
return render_template(
'tag_template.html',
template_id=template_id,
project_id=project_id,
region=region,
fields=template)
fields = []
selected_fields = request.form.getlist("selected")
#print("selected_fields: " + str(selected_fields))
for selected_field in selected_fields:
query_expression = request.form.get(selected_field)
#print("query_expression: " + query_expression)
selected_field_type = request.form.get(selected_field + "_datatype")
#print("selected_field_type: " + selected_field_type)
print(selected_field + ", " + query_expression + ", " + selected_field_type)
for template_field in template:
if template_field['field_id'] != selected_field:
continue
is_required = template_field['is_required']
field = {'field_id': selected_field, 'query_expression': query_expression, 'field_type': selected_field_type,\
'is_required': is_required}
fields.append(field)
break
#print('fields: ' + str(fields))
tag_history_option = False
tag_history_enabled = "OFF"
if "tag_history" in request.form:
tag_history = request.form.get("tag_history")
if tag_history == "selected":
tag_history_option = True
tag_history_enabled = "ON"
tag_stream_option = False
tag_stream_enabled = "OFF"
if "tag_stream" in request.form:
tag_stream = request.form.get("tag_stream")
if tag_stream == "selected":
tag_stream_option = True
tag_stream_enabled = "ON"
template_uuid = teu.write_tag_template(template_id, project_id, region)
tag_uuid, included_uris_hash = teu.write_dynamic_config('PENDING', fields, included_uris, excluded_uris, template_uuid,\
refresh_mode, refresh_frequency, refresh_unit, \
tag_history_option, tag_stream_option)
if isinstance(tag_uuid, str):
job_uuid = jm.create_job(tag_uuid)
else:
job_uuid = None
if job_uuid != None:
job_creation = constants.SUCCESS
else:
job_creation = constants.ERROR
# [END process_dynamic_tag]
# [START render_template]
return render_template(
'submitted_dynamic_config.html',
template_id=template_id,
project_id=project_id,
region=region,
fields=fields,
included_uris=included_uris,
included_uris_hash=included_uris_hash,
excluded_uris=excluded_uris,
refresh_mode=refresh_mode,
refresh_frequency=refresh_frequency,
refresh_unit=refresh_unit,
tag_history=tag_history_enabled,
tag_stream=tag_stream_enabled,
status=job_creation)
# [END render_template]
##################### API METHODS #################
"""
Args:
template_id: tag template to use
project_id: tag template's Google Cloud project
region: tag template's region
included_uris: The included_uris value
excluded_uris: The excluded_uris value (optional)
fields: list of selected fields containing field name, field type and query expression
refresh_mode: AUTO or ON_DEMAND
refresh_frequency: positive integer
refresh_unit: minutes or hours
tag_history: true if tag history is on, false otherwise
tag_stream: true if tag stream is on, false otherwise
Returns:
job_uuid
"""
@app.route("/dynamic_create", methods=['POST'])
def dynamic_create():
json = request.get_json(force=True)
print('json: ' + str(json))
template_id = json['template_id']
project_id = json['project_id']
region = json['region']
print('template_id: ' + template_id)
print('project_id: ' + project_id)
print('region: ' + region)
template_uuid = teu.write_tag_template(template_id, project_id, region)
fields = json['fields']
excluded_uris = json['excluded_uris']
included_uris = json['included_uris']
refresh_mode = json['refresh_mode']
if 'refresh_frequency' in json:
refresh_frequency = json['refresh_frequency']
else:
refresh_frequency = ''
if 'refresh_unit' in json:
refresh_unit = json['refresh_unit']
else:
refresh_unit = ''
tag_history = json['tag_history']
tag_stream = json['tag_stream']
tag_uuid, included_uris_hash = teu.write_dynamic_config('PENDING', fields, included_uris, excluded_uris, template_uuid,\
refresh_mode, refresh_frequency, refresh_unit, \
tag_history, tag_stream)
if isinstance(tag_uuid, str):
job_uuid = jm.create_job(tag_uuid)
else:
job_uuid = None
return jsonify(job_uuid=job_uuid)
"""
Args:
template_id: tag template to use
project_id: tag template's Google Cloud project
region: tag template's region
included_uris: The included_uris value
excluded_uris: The excluded_uris value (optional)
fields: list of selected fields containing field name, field type and query expression
tag_history: true if tag history is on, false otherwise
tag_stream: true if tag stream is on, false otherwise
Returns:
job_uuid
"""
@app.route("/static_create", methods=['POST'])
def static_create():
json = request.get_json(force=True)
print('json: ' + str(json))
template_id = json['template_id']
project_id = json['project_id']
region = json['region']
print('template_id: ' + template_id)
print('project_id: ' + project_id)
print('region: ' + region)
template_uuid = teu.write_tag_template(template_id, project_id, region)
fields = json['fields']
excluded_uris = json['excluded_uris']
included_uris = json['included_uris']
tag_history = json['tag_history']
tag_stream = json['tag_stream']
tag_uuid, included_uris_hash = teu.write_static_config('PENDING', fields, included_uris, excluded_uris, template_uuid,\
tag_history, tag_stream)
if isinstance(tag_uuid, str):
job_uuid = jm.create_job(tag_uuid)
else:
job_uuid = None
return jsonify(job_uuid=job_uuid)
"""
Args:
template_id: tag template to use
project_id: tag template's Google Cloud project
region: tag template's region
included_uris: tag config's included uris or
included_uris_hash: tag config's md5 hash value (in place of the included_uris)
Note: caller must provide either the included_uris_hash or included_uris
Returns:
job_uuid = unique identifer for job
"""
@app.route("/dynamic_ondemand_update", methods=['POST'])
def dynamic_ondemand_update():
json = request.get_json(force=True)
template_id = json['template_id']
project_id = json['project_id']
region = json['region']
template_exists, template_uuid = teu.read_tag_template(template_id, project_id, region)
if not template_exists:
print("tag_template " + template_id + " doesn't exist")
resp = jsonify(success=False)
return resp
if 'included_uris' in json:
included_uris = json['included_uris']
success, tag_config = teu.lookup_tag_config_by_included_uris(template_uuid, included_uris, None)
elif 'included_uris_hash' in json:
included_uris_hash = json['included_uris_hash']
success, tag_config = teu.lookup_tag_config_by_included_uris(template_uuid, None, included_uris_hash)
else:
resp = jsonify(success=False, message="Request is missing required parameter included_uris or included_uris_hash.")
return resp
if success != True:
print("tag config not found " + str(tag_config))
resp = jsonify(success=False, message="Tag config not found.")
return resp
# process the update request
if tag_config['refresh_mode'] == 'AUTO':
print("tag config == AUTO" + str(tag_config))
resp = jsonify(success=False, message="Tag config has refresh_mode='AUTO'. Update config to refresh_mode='ON-DEMAND' prior to calling this method.")
return resp
if isinstance(tag_config['tag_uuid'], str):
job_uuid = jm.create_job(tag_config['tag_uuid'])
else:
job_uuid = None
return jsonify(job_uuid=job_uuid)
#[END dynamic_ondemand_update]
"""
Args:
job_uuid = unique identifer for job
Returns:
job_status = one of (PENDING, RUNNING, COMPLETE, ERROR)
task_count = number of tasks associates with this jobs
tasks_ran = number of tasks that have run
tasks_completed = number of tasks which have completed
tasks_failed = number of tasks which have failed
"""
@app.route("/get_job_status", methods=['POST'])
def get_job_status():
json = request.get_json(force=True)
job_uuid = json['job_uuid']
job = jm.get_job_status(job_uuid)
if job is None:
return jsonify(success=False, message="job_uuid " + job_uuid + " cannot be found.")
else:
return jsonify(success=True, job_status=job['job_status'], task_count=job['task_count'], tasks_ran=job['tasks_ran'],\
tasks_completed=job['tasks_completed'], tasks_failed=job['tasks_failed'])
"""
Method called by Cloud Scheduler entry to update the tags from all active dynamic configs which are set to auto
Args:
None
Returns:
True if the request succeeded, False otherwise
"""
@app.route("/dynamic_auto_update", methods=['POST'])
def dynamic_auto_update():
try:
print('*** enter dynamic_auto_update ***')
jobs = []
configs = teu.read_ready_configs()
for tag_uuid in configs:
if isinstance(tag_uuid, str):
teu.update_config_status(tag_uuid, 'PENDING')
teu.increment_version_next_run(tag_uuid)
job_uuid = jm.create_job(tag_uuid)
jobs.append(job_uuid)
print('created jobs: ' + str(jobs))
resp = jsonify(success=True, job_ids=json.dumps(jobs))
except Exception as e:
print('failed dynamic_auto_update {}'.format(e))
resp = jsonify(success=False, message='failed dynamic_auto_update ' + str(e))
return resp
################ INTERNAL PROCESSING METHODS #################
@app.route("/_split_work", methods=['POST'])
def _split_work():
print('*** enter _split_work ***')
json = request.get_json(force=True)
job_uuid = json['job_uuid']
tag_uuid = json['tag_uuid']
config = teu.read_tag_config(tag_uuid)
uris = res.Resources.get_resources(config['included_uris'], config['excluded_uris'])
print('uris: ' + str(uris))
jm.record_num_tasks(job_uuid, len(uris))
jm.update_job_running(job_uuid)
tm.create_work_tasks(job_uuid, tag_uuid, uris)
teu.update_config_status(tag_uuid, 'RUNNING')
resp = jsonify(success=True)
return resp
@app.route("/_run_task", methods=['POST'])
def _run_task():
print('*** enter _run_task ***')
json = request.get_json(force=True)
job_uuid = json['job_uuid']
tag_uuid = json['tag_uuid']
uri = json['uri']
task_uuid = json['task_uuid']
print('task_uuid: ' + task_uuid)
tm.update_task_status(task_uuid, 'RUNNING')
# retrieve tag config and template
tag_config = teu.read_tag_config(tag_uuid)
tem_config = teu.read_template_config(tag_config['template_uuid'])
dcu = dc.DataCatalogUtils(tem_config['template_id'], tem_config['project_id'], tem_config['region'])
creation_status = constants.ERROR
if tag_config['config_type'] == 'DYNAMIC':
creation_status = dcu.create_update_dynamic_configs(tag_config['fields'], None, None, uri, tag_config['tag_uuid'], \
tag_config['template_uuid'], tag_config['tag_history'], \
tag_config['tag_stream'])
if tag_config['config_type'] == 'STATIC':
creation_status = dcu.create_update_static_configs(tag_config['fields'], None, None, uri, tag_config['tag_uuid'], \
tag_config['template_uuid'], tag_config['tag_history'], \
tag_config['tag_stream'])
if creation_status == constants.SUCCESS:
tm.update_task_status(task_uuid, 'COMPLETED')
is_success, is_failed, pct_complete = jm.calculate_job_completion(job_uuid)
if is_success:
teu.update_config_status(tag_uuid, 'ACTIVE')
teu.update_scheduling_status(tag_uuid, 'READY')
elif is_failed:
teu.update_config_status(tag_uuid, 'ERROR')
else:
teu.update_config_status(tag_uuid, 'PROCESSING: {}% complete'.format(pct_complete))
resp = jsonify(success=True)
else:
tm.update_task_status(task_uuid, 'FAILED')
jm.update_job_failed(job_uuid)
teu.update_config_status(tag_uuid, 'ERROR')
resp = jsonify(success=False)
return resp
#[END _run_task]
################ TAG PROPAGATION METHODS #################
@app.route("/propagation_settings<int:saved>")
def propagation_settings(saved):
exists, settings = teu.read_propagation_settings()
if exists:
source_project_ids = settings['source_project_ids']
dest_project_ids = settings['dest_project_ids']
excluded_datasets = settings['excluded_datasets']
job_frequency = settings['job_frequency']
else:
source_project_ids = "{projectA}, {projectB}, {projectC}"
dest_project_ids = "{projectD}, {projectE}, {projectF}"
excluded_datasets = "{projectA}.{dataset1}, {projectB}.{dataset2}"
job_frequency = "24"
# [END propagation_settings]
# [START render_template]
return render_template(
'propagation_settings.html',
source_project_ids=source_project_ids,
dest_project_ids=dest_project_ids,
excluded_datasets=excluded_datasets,
job_frequency=job_frequency,
settings=saved)
# [END render_template]
@app.route("/set_propagation", methods=['POST'])
def set_propagation():
source_project_ids = request.form['source_project_ids'].rstrip()
dest_project_ids = request.form['dest_project_ids'].rstrip()
excluded_datasets = request.form['excluded_datasets'].rstrip()
job_frequency = request.form['job_frequency'].rstrip()
if source_project_ids == "{projectA}, {projectB}, {projectC}":
source_project_ids = None
if dest_project_ids == "{projectD}, {projectE}, {projectF}":
dest_project_ids = None
if excluded_datasets == "{projectA}.{dataset1}, {projectB}.{dataset2}":
dest_project_ids = None
if source_project_ids != None or dest_project_ids != None:
teu.write_propagation_settings(source_project_ids, dest_project_ids, excluded_datasets, job_frequency)
return propagation_settings(1)
@app.route("/propagation_report", methods=['GET', 'POST'])
def propagation_report():
exists, settings = teu.read_propagation_settings()
method = request.method
if method == 'POST':
run_propagation()
if exists == True:
source_project_ids = settings['source_project_ids']
dest_project_ids = settings['dest_project_ids']
excluded_datasets = settings['excluded_datasets']
project_ids = source_project_ids
project_list = dest_project_ids.split(",")
for dest_project in project_list:
if dest_project not in project_ids:
project_ids = project_ids + ", " + dest_project
report_data, last_run = teu.generate_propagation_report()
if last_run is not None:
last_run = last_run.strftime('%Y-%m-%d %H:%M:%S')
#print('last_run: ' + str(last_run))
else:
last_run = 'Never'
return render_template(
"propagation_report.html",
project_ids=project_ids,
report_data=report_data,
last_run=last_run)
else:
return render_template(
"propagation_settings.html")
@app.route("/run_propagation", methods=['POST'])
def run_propagation():
exists, settings = teu.read_propagation_settings()
if exists == True:
source_project_ids = settings['source_project_ids']
dest_project_ids = settings['dest_project_ids']
excluded_datasets = settings['excluded_datasets']
teu.run_propagation_job(source_project_ids, dest_project_ids, excluded_datasets)
resp = jsonify(success=True)
else:
resp = jsonify(success=False)
return resp
@app.route("/propagated_details", methods=['POST'])
def propagated_details():
template_uuid = request.form['template_uuid']
view_tag_uuid = request.form['view_tag_uuid']
source_res = request.form['source_res']
view_res = request.form['view_res']
print("template_uuid: " + template_uuid)
print("view_tag_uuid: " + view_tag_uuid)
propagated_tag_config = teu.read_propagated_config(view_tag_uuid)
template_config = teu.read_template_config(template_uuid)
source_res_list = propagated_tag_config['source_res']
source_res_full = ','.join(source_res_list)
view_res_full = propagated_tag_config['view_res']
# construct included_uris from propagated_tag_config
if 'cols' in propagated_tag_config.keys():
included_uris = ""
for col in propagated_tag_config['cols']:
if col != "":
included_uris = included_uris + view_res + "/" + col + ", "
else:
included_uris = included_uris + view_res + ", "
included_uris = included_uris[0:-2]
print("included_uris: " + included_uris)
else:
included_uris = 'bigquery/project/' + propagated_tag_config['view_res']
print("included_uris: " + included_uris)
return render_template(
'view_propagated_tag_on_res.html',
source_res_full=source_res_full,
view_res_full=view_res_full,
template_id=template_config['template_id'],
propagated_tag_config=propagated_tag_config,
included_uris=included_uris)
@app.route('/update_propagated_tag', methods=['POST'])
def update_propagated_tag():
template_uuid = request.form['template_uuid']
tag_uuid = request.form['tag_uuid']
config_type = request.form['config_type']
print("template_uuid: " + str(template_uuid))
print("tag_uuid: " + str(tag_uuid))
print("config_type: " + str(config_type))
propagated_tag_config = teu.read_propagated_tag_config(tag_uuid)
print("propagated_tag_config: " + str(propagated_tag_config))
view_res = propagated_tag_config['view_res'].replace('/datasets', '').replace('/tables', '')
source_res_list = propagated_tag_config['source_res']
source_res = ','.join(source_res_list)
source_res = source_res.replace('/datasets', '').replace('/tables', '')
# construct included_uris from propagated_tag_config
if 'cols' in propagated_tag_config.keys():
included_uris = ""
for col in propagated_tag_config['cols']:
if col != "":
included_uris = included_uris + view_res + "/" + col + ", "
else:
included_uris = included_uris + view_res + ", "
included_uris = included_uris[0:-2]
print("included_uris: " + included_uris)
else:
included_uris = 'bigquery/project/' + propagated_tag_config['view_res']
template_config = teu.read_template_config(template_uuid)
template_id = template_config['template_id']
project_id = template_config['project_id']
region = template_config['region']
dcu = dc.DataCatalogUtils(template_id, project_id, region)
template_fields = dcu.get_template()
print("fields: " + str(template_fields))
if config_type == "STATIC":
# [END update_tag]
# [START render_template]
return render_template(
'override_static_config.html',
template_id=template_id,
project_id=project_id,
region=region,
view_res=view_res,
source_res=source_res,
fields=template_fields,
propagated_tag_config=propagated_tag_config,
included_uris=included_uris,
current_time=datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'))
else:
# [END display_action]
# [START render_template]
return render_template(
'override_dynamic_config.html',
template_id=template_id,
project_id=project_id,
region=region,
view_res=view_res,
source_res=source_res,
fields=template_fields,
propagated_tag_config=propagated_tag_config,
included_uris=included_uris)
# [END render_template]
@app.route('/override_propagated_dynamic_tag', methods=['POST'])
def override_propagated_dynamic_tag():
template_id = request.form['template_id']
project_id = request.form['project_id']
region = request.form['region']
tag_uuid = request.form['tag_uuid']
included_uris = request.form['included_uris'].rstrip()
refresh_frequency = request.form['refresh_frequency'].rstrip()
action = request.form['action']
print('tag_uuid: ' + tag_uuid)
print("action: " + str(action))
dcu = dc.DataCatalogUtils(template_id, project_id, region)
template_fields = dcu.get_template()
if action != "Cancel Changes":
fields = []
selected_fields = request.form.getlist("selected")
#print("selected_fields: " + str(selected_fields))
for selected_field in selected_fields:
query_expression = request.form.get(selected_field)
#print("query_expression: " + query_expression)
selected_field_type = request.form.get(selected_field + "_datatype")
print(selected_field + ", " + query_expression + ", " + selected_field_type)
for template_field in template_fields:
if template_field['field_id'] != selected_field:
continue
is_required = template_field['is_required']
field = {'field_id': selected_field, 'query_expression': query_expression, 'field_type': selected_field_type, 'is_required': is_required}
fields.append(field)
break
#print('fields: ' + str(fields))
template_exists, template_uuid = teu.read_tag_template(template_id, project_id, region)
# TO DO: process included_uris changes
# for now, assume columns is empty
columns = []
if action == 'Fork Tag and Save Changes':
config_status = 'CONFLICT AND FORKED'
else:
config_status = 'PROPAGATED AND FORKED'
propagated_tag_config = teu.fork_propagated_tag(tag_uuid, config_status, fields, refresh_frequency)
source_res = propagated_tag_config['source_res']
view_res = propagated_tag_config['view_res']
fields = propagated_tag_config['fields']
source_tag_uuid = propagated_tag_config['source_tag_uuid']
view_tag_uuid = propagated_tag_config['view_tag_uuid']
template_uuid = propagated_tag_config['template_uuid']
print('source_res: ' + str(source_res))
print('view_res: ' + view_res)
print('fields: ' + str(fields))
print('source_tag_uuid: ' + str(source_tag_uuid))
print('view_tag_uuid: ' + view_tag_uuid)
print('template_uuid: ' + template_uuid)
update_status = dcu.create_update_dynamic_propagated_tag(config_status, source_res, view_res, columns, fields, source_tag_uuid, view_tag_uuid, template_uuid)
if update_status == constants.SUCCESS:
print('override_propagated_dynamic_tags SUCCEEDED.')
else:
print('override_propagated_dynamic_tags FAILED.')
else:
propagated_tag_config = teu.read_propagated_config(tag_uuid)
view_res = propagated_tag_config['view_res'].replace('/datasets', '').replace('/tables', '')
included_uris = 'bigquery/project/' + propagated_tag_config['view_res']
source_res_list = propagated_tag_config['source_res']
source_res_full = ','.join(source_res_list)
source_res_full = source_res_full.replace('/datasets', '').replace('/tables', '')
# [END override_propagated_dynamic_tag]
# [START render_template]
return render_template(
'view_propagated_tag_on_res.html',
source_res_full=source_res_full,
view_res=view_res,
included_uris=included_uris,
template_id=template_id,
propagated_tag_config=propagated_tag_config)
@app.route('/override_propagated_static_tag', methods=['POST'])
def override_propagated_static_tag():
template_id = request.form['template_id']
project_id = request.form['project_id']
region = request.form['region']
tag_uuid = request.form['tag_uuid']
included_uris = request.form['included_uris'].rstrip()
action = request.form['action']
print('tag_uuid: ' + tag_uuid)
print("action: " + str(action))
dcu = dc.DataCatalogUtils(template_id, project_id, region)
template_fields = dcu.get_template()
if action != "Cancel Changes":
fields = []
selected_fields = request.form.getlist("selected")
print("selected_fields: " + str(selected_fields))
for selected_field in selected_fields:
selected_value = request.form.get(selected_field)
selected_type = request.form.get(selected_field + "_datatype")
print(selected_field + ", " + selected_value + ", " + selected_type)
for template_field in template_fields:
if template_field['field_id'] != selected_field:
continue
is_required = template_field['is_required']
field = {'field_id': selected_field, 'field_value': selected_value, 'field_type': selected_type, 'is_required': is_required}
fields.append(field)
break
#print('fields: ' + str(fields))
template_exists, template_uuid = teu.read_tag_template(template_id, project_id, region)
# TO DO: process included_uris changes (compare values to cols)
# for now assume that columns is empty
columns = []
if action == 'Fork Tag and Save Changes':
config_status = 'CONFLICT AND FORKED'
else:
config_status = 'PROPAGATED AND FORKED'
propagated_tag_config = teu.fork_propagated_tag(tag_uuid, config_status, fields, refresh_frequency=None)
source_res = propagated_tag_config['source_res']
view_res = propagated_tag_config['view_res']
fields = propagated_tag_config['fields']
source_tag_uuid = propagated_tag_config['source_tag_uuid']
view_tag_uuid = propagated_tag_config['view_tag_uuid']
template_uuid = propagated_tag_config['template_uuid']
update_status = dcu.create_update_static_propagated_tag(config_status, source_res, view_res, columns, fields, source_tag_uuid, view_tag_uuid, template_uuid)
if update_status == constants.SUCCESS:
print('override_propagated_static_tags SUCCEEDED.')
else:
print('override_propagated_static_tags FAILED.')
else:
propagated_tag_config = teu.read_propagated_config(tag_uuid)
view_res = propagated_tag_config['view_res'].replace('/datasets', '').replace('/tables', '')
included_uris = 'bigquery/project/' + propagated_tag_config['view_res']
source_res_list = propagated_tag_config['source_res']
source_res_full = ','.join(source_res_list)
source_res_full = source_res_full.replace('/datasets', '').replace('/tables', '')
# [END override_propagated_static_tag]
# [START render_template]
return render_template(
'view_propagated_tag_on_res.html',
source_res_full=source_res_full,
view_res=view_res,
included_uris=included_uris,
template_id=template_id,
propagated_tag_config=propagated_tag_config)
###########################################################
@app.route("/ping", methods=['GET'])
def ping():
return "Tag Engine is alive"
#[END ping]
@app.errorhandler(500)
def server_error(e):
# Log the error and stacktrace.
#logging.exception('An error occurred during a request.')
return 'An internal error occurred: ' + str(e), 500
# [END app]
if __name__ == "__main__":
app.run()
``` |
{
"source": "jphalip/django-react-djangocon2015",
"score": 2
} |
#### File: django-react-djangocon2015/project/runner.py
```python
from subprocess import Popen, PIPE
from django.test.runner import DiscoverRunner
from django.conf import settings
from react.render_server import render_server
TEST_REACT_SERVER_HOST = getattr(settings, 'TEST_REACT_SERVER_HOST', '127.0.0.1')
TEST_REACT_SERVER_PORT = getattr(settings, 'TEST_REACT_SERVER_PORT', 9008)
class CustomTestRunner(DiscoverRunner):
"""
Same as the default Django test runner, except it also runs our node
server as a subprocess so we can render React components.
"""
def setup_test_environment(self, **kwargs):
# Start the test node server
self.node_server = Popen(
[
'node',
'react-server.js',
'--host=%s' % TEST_REACT_SERVER_HOST,
'--port=%s' % TEST_REACT_SERVER_PORT
],
stdout=PIPE
)
# Wait until the server is ready before proceeding
self.node_server.stdout.readline()
# Point the renderer to our new test server
settings.REACT = {
'RENDER': True,
'RENDER_URL': 'http://%s:%s' % (
TEST_REACT_SERVER_HOST, TEST_REACT_SERVER_PORT
),
}
super(CustomTestRunner, self).setup_test_environment(**kwargs)
def teardown_test_environment(self, **kwargs):
# Kill the node server
self.node_server.terminate()
super(CustomTestRunner, self).teardown_test_environment(**kwargs)
``` |
{
"source": "jphalip/fabric",
"score": 2
} |
#### File: fabric/integration/test_contrib.py
```python
import os
import re
from fabric.api import run, local
from fabric.contrib import files, project
from utils import Integration
def tildify(path):
home = run("echo ~", quiet=True).stdout.strip()
return path.replace('~', home)
def expect(path):
assert files.exists(tildify(path))
def expect_contains(path, value):
assert files.contains(tildify(path), value)
def escape(path):
return path.replace(' ', r'\ ')
class FileCleaner(Integration):
def setup(self):
self.local = []
self.remote = []
def teardown(self):
super(FileCleaner, self).teardown()
for created in self.local:
os.unlink(created)
for created in self.remote:
run("rm %s" % escape(created))
class TestTildeExpansion(FileCleaner):
def test_append(self):
for target in ('~/append_test', '~/append_test with spaces'):
self.remote.append(target)
files.append(target, ['line'])
expect(target)
def test_exists(self):
for target in ('~/exists_test', '~/exists test with space'):
self.remote.append(target)
run("touch %s" % escape(target))
expect(target)
def test_sed(self):
for target in ('~/sed_test', '~/sed test with space'):
self.remote.append(target)
run("echo 'before' > %s" % escape(target))
files.sed(target, 'before', 'after')
expect_contains(target, 'after')
def test_upload_template(self):
for i, target in enumerate((
'~/upload_template_test',
'~/upload template test with space'
)):
src = "source%s" % i
local("touch %s" % src)
self.local.append(src)
self.remote.append(target)
files.upload_template(src, target)
expect(target)
class TestIsLink(FileCleaner):
# TODO: add more of these. meh.
def test_is_link_is_true_on_symlink(self):
self.remote.extend(['/tmp/foo', '/tmp/bar'])
run("touch /tmp/foo")
run("ln -s /tmp/foo /tmp/bar")
assert files.is_link('/tmp/bar')
def test_is_link_is_false_on_non_link(self):
self.remote.append('/tmp/biz')
run("touch /tmp/biz")
assert not files.is_link('/tmp/biz')
rsync_sources = (
'integration/',
'integration/test_contrib.py',
'integration/test_operations.py',
'integration/utils.py'
)
class TestRsync(Integration):
def rsync(self, id_, **kwargs):
remote = '/tmp/rsync-test-%s/' % id_
if files.exists(remote):
run("rm -rf %s" % remote)
return project.rsync_project(
remote_dir=remote,
local_dir='integration',
ssh_opts='-o StrictHostKeyChecking=no',
capture=True,
**kwargs
)
def test_existing_default_args(self):
"""
Rsync uses -v by default
"""
r = self.rsync(1)
for x in rsync_sources:
assert re.search(r'^%s$' % x, r.stdout, re.M), "'%s' was not found in '%s'" % (x, r.stdout)
def test_overriding_default_args(self):
"""
Use of default_args kwarg can be used to nuke e.g. -v
"""
r = self.rsync(2, default_opts='-pthrz')
for x in rsync_sources:
assert not re.search(r'^%s$' % x, r.stdout, re.M), "'%s' was found in '%s'" % (x, r.stdout)
class TestUploadTemplate(FileCleaner):
def test_allows_pty_disable(self):
src = "source_file"
target = "remote_file"
local("touch %s" % src)
self.local.append(src)
self.remote.append(target)
# Just make sure it doesn't asplode. meh.
files.upload_template(src, target, pty=False)
expect(target)
``` |
{
"source": "jphalip/glamkit-feincmstools",
"score": 2
} |
#### File: management/commands/repair_tree.py
```python
from itertools import count
from optparse import make_option
from django.core.management.base import LabelCommand
from django.db.models.loading import get_model
from mptt.models import MPTTModel
class Parser(object):
def __init__(self, model):
self.tree = {}
self.parsed = []
self.found = []
self.counter = count(1)
self.model = model
self.build_tree()
def parse_item(self, item):
if item in self.parsed:
return self.parsed[self.parsed.index(item)].level
if item.parent:
if item.parent not in self.parsed:
print 'Uh-oh, encountered a child %s with unparsed parent %s.' % (item, item.parent)
else:
item.parent = self.parsed[self.parsed.index(item.parent)]
item.level = self.parse_item(item.parent) + 1
item.tree_id = item.parent.tree_id
else:
item.tree_id = self.counter.next()
item.level = 0
if item.tree_id not in self.tree:
self.tree[item.tree_id] = [item,item]
else:
self.tree[item.tree_id].insert(
self.tree[item.tree_id].index(
item.parent,
self.tree[item.tree_id].index(item.parent) + 1),
item)
self.tree[item.tree_id].insert(
self.tree[item.tree_id].index(item),
item)
self.parsed.append(item)
return item.level
def build_tree(self):
for item in self.model.objects.order_by('lft', 'tree_id'):
self.parse_item(item)
for subtree in self.tree.values():
for idx, item in enumerate(subtree, 1):
if item not in self.found:
item.lft = idx
self.found.append(item)
else:
item.rght = idx
def save(self):
for item in self.found:
item.save()
class Command(LabelCommand):
args = '<app.Model app.Model ...>'
label = 'app.Model'
help = 'Repair a corrupt MPTT tree for specified model (in app.Model format).'
def handle_label(self, arg, **options):
verbosity = int(options.get('verbosity', 1))
assert len(arg.split('.')) == 2, 'Arguments must be in app.Model format.'
model = get_model(*arg.split('.'))
assert issubclass(model, MPTTModel), 'The model must be an MPTT model.'
parser = Parser(model)
parser.save()
```
#### File: feincmstools/templatetags/feincmstools_tags.py
```python
import os
from django import template
from feincms.templatetags.feincms_tags import feincms_render_content
register = template.Library()
@register.filter
def is_parent_of(page1, page2):
"""
Determines whether a given page is the parent of another page
Example:
{% if page|is_parent_of:feincms_page %} ... {% endif %}
"""
if page1 is None:
return False
return (page1.tree_id == page2.tree_id and
page1.lft < page2.lft and
page1.rght > page2.rght)
@register.filter
def is_equal_or_parent_of(page1, page2):
return (page1.tree_id == page2.tree_id and
page1.lft <= page2.lft and
page1.rght >= page2.rght)
@register.filter
def is_sibling_of(page1, page2):
"""
Determines whether a given page is a sibling of another page
{% if page|is_sibling_of:feincms_page %} ... {% endif %}
"""
if page1 is None or page2 is None:
return False
return (page1.parent_id == page2.parent_id)
@register.filter
def get_extension(filename):
""" Return the extension from a file name """
return os.path.splitext(filename)[1][1:]
@register.assignment_tag(takes_context=True)
def feincms_render_content_as(context, content, request=None):
return feincms_render_content(context, content, request)
```
#### File: glamkit-feincmstools/feincmstools/widgets.py
```python
from django.forms.widgets import Widget
class HierarchicalSlugWidget(Widget):
def render(self, name, value, attrs=None):
if value is not None:
value = value.rsplit('/', 1)[-1]
return super(HierarchicalSlugWidget, self).render(name, value, attrs)
``` |
{
"source": "jphalip/media-bias",
"score": 3
} |
#### File: media-bias/code/plotting.py
```python
from __future__ import division
import math
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
import seaborn as sns
def plot_channel_stats(stats, topics, channels, fig_height=8, y_center=False, title=None):
"""
Plots bar charts for the given channel stats.
A separate subplot is generated for each given topic.
"""
fig, axes = plt.subplots(nrows=int(math.ceil(topics.shape[0]/2)), ncols=2, figsize=(8,fig_height))
fig.subplots_adjust(hspace=.5)
for i, topic in topics.iterrows():
ax = fig.axes[i]
# If requested, center all axes around 0
if y_center:
# Calculate the approximate amplitude of the given stats values
amplitude = math.ceil(stats.abs().values.max()*10)/10
ax.set_ylim(-amplitude, amplitude)
# If we have negative values, grey out the negative space for better contrast
if stats.values.min() < 0:
ax.axhspan(0, ax.get_ylim()[0], facecolor='0.2', alpha=0.15)
color = channels.sort_values('title').color
ax.bar(range(len(stats.index)), stats[topic.slug], tick_label=stats.index, color=color, align='center')
ax.set_title(topic.title, size=11)
# Hide potential last empty subplot
if topics.shape[0] % 2:
fig.axes[-1].axis('off')
# Optional title at the top
if title is not None:
multiline = '\n' in title
y = 1. if multiline else .96
plt.suptitle(title, size=14, y=y)
plt.show()
def plot_compressed_channel_stats(stats, color=None, y_center=False, title=None):
"""
Similar to plot_channel_stats except everything is represented
in a single plot (i.e. no subplots).
"""
plt.figure(figsize=(6,4))
ax = plt.gca()
# If requested, center all axes around 0
if y_center:
# Calculate the approximate amplitude of the given stats values
amplitude = math.ceil(stats.abs().values.max()*10)/10
ax.set_ylim(-amplitude, amplitude)
# If we have negative values, grey out the negative space
# for better contrast
if stats.values.min() < 0:
ax.axhspan(0, ax.get_ylim()[0], facecolor='0.2', alpha=0.15)
# The actual plot
stats.plot(kind='bar', color=color, width=0.6, ax=ax)
# Presentation cleanup
plt.xlabel('')
plt.xticks(rotation=0)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# Optional title at the top
if title is not None:
plt.title(title)
plt.show()
def plot_sentiment_series(videos, topics, channels, start_date=None, title=None):
"""
Plot linear timeseries of sentiment scores for the given videos:
One separate subplot is generated for each topic. Each subplot
has one timeseries for each channel, and one timeseries for the
average values across all channells.
"""
fig, axes = plt.subplots(nrows=topics.shape[0], ncols=1, figsize=(8,4*topics.shape[0]))
fig.subplots_adjust(hspace=.3)
# Resample rule: 2-week buckets
resample_rule = '2W'
# Calculate the approximate amplitude of the given sentiment values
amplitude = math.ceil(videos.sentiment_score.abs().max()*10)/10
for i, topic in topics.reset_index().iterrows():
ax = fig.axes[i]
# Grey out the negative sentiment area
ax.axhspan(0, -1, facecolor='0.2', alpha=0.15)
# Plot a timeseries for the average sentiment across all channels
topic_mask = videos[topic.slug]
if start_date is not None:
topic_mask = topic_mask & (videos.published_at >= start_date)
ts = videos[topic_mask].set_index('published_at').resample(resample_rule)['sentiment_score'].mean().interpolate()
sns.tsplot(ts, ts.index, color='#fcef99', linewidth=6, ax=ax)
# Plot a separate time-series for each channel
for _, channel in channels.iterrows():
channel_mask = topic_mask & (videos.channel==channel.title)
ts = videos[channel_mask].set_index('published_at').resample(resample_rule)['sentiment_score'].mean().interpolate()
if len(ts) > 1:
sns.tsplot(ts, ts.index, color=channel['color'], linewidth=1, ax=ax)
# Format x-axis labels as dates
xvalues = ax.xaxis.get_majorticklocs()
xlabels = [datetime.utcfromtimestamp(x/1e9).strftime("%Y.%m") for x in xvalues]
ax.set_xticklabels(xlabels)
# A little extra presentation cleanup
ax.set_xlabel('')
ax.set_title(topic['title'], size=11)
ax.set_ylim(-amplitude,amplitude)
# Add legend
handles = [Patch(color='#fcef99', label='Average')]
for _, channel in channels.iterrows():
handles.append(Patch(color=channel['color'], label=channel['title']))
ax.legend(handles=handles, fontsize=8)
# Optional title at the top
if title is not None:
plt.suptitle(title, size=14, y=.92)
plt.show()
``` |
{
"source": "jphanwebstaurant/google-ads-python",
"score": 3
} |
#### File: google-ads-python/CustomerMatch/hash_customer_list.py
```python
import hashlib
# TEST
# hashed_object = hashlib.sha256(b"Nobody inspects the spammish repetition").hexdigest()
# print(hashed_object)
# create hash function
def hash_me(text):
return hashlib.sha256(text.encode('utf-8')).hexdigest()
# create copy of data
df_hashed = df_customer_list_narrow.copy()
# create list of rfm category names
rfm_hash_list =df_hashed['rfm_category'].unique().tolist()
# create a dataframe for each rfm category name
DataFrameDict = {elem: pd.DataFrame for elem in rfm_hash_list}
# create hashed version of columns
df_hashed['email_hash'] = df_hashed['Email'].apply(hash_me)
df_hashed['phone_hash'] = df_hashed['Phone'].apply(hash_me)
df_hashed['firstname_hash'] = df_hashed['First Name'].apply(hash_me)
df_hashed['lastname_hash'] = df_hashed['Last Name'].apply(hash_me)
# drop columns
df_hashed = df_hashed.drop(columns=['Email', 'First Name', 'Last Name', 'Phone'])
# rename columns
df_hashed.rename(columns={'email_hash': 'Email',
'firstname_hash': 'First Name',
'lastname_hash': 'Last Name',
'phone_hash': 'Phone'}, inplace=True)
# reorder columns for dictionary
df_hashed = df_hashed[['Email', 'First Name', 'Last Name', 'Country', 'Zip', 'Phone', 'rfm_category']]
# insert all data related to each dictionary name
for key in DataFrameDict.keys():
DataFrameDict[key] = df_hashed.iloc[:, :-1][df_hashed.rfm_category == key]
# reorder columns and delete rfm category
df_hashed = df_hashed[['Email', 'First Name', 'Last Name', 'Country', 'Zip', 'Phone']]
# export each category to its own csv file
for rfm in rfm_hash_list:
df_name = rfm
filename = r'C:\Users\jphan\Repositories\CustomerMatching\hashed_csv_files\{}.csv'.format(rfm)
DataFrameDict[df_name].to_csv(path_or_buf=filename, index=False)
########################################################################################################################
```
#### File: proto/services/account_link_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v6.proto.resources import account_link_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_account__link__pb2
from google.ads.google_ads.v6.proto.services import account_link_service_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_account__link__service__pb2
class AccountLinkServiceStub(object):
"""This service allows management of links between Google Ads accounts and other
accounts.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetAccountLink = channel.unary_unary(
'/google.ads.googleads.v6.services.AccountLinkService/GetAccountLink',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_account__link__service__pb2.GetAccountLinkRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_account__link__pb2.AccountLink.FromString,
)
self.CreateAccountLink = channel.unary_unary(
'/google.ads.googleads.v6.services.AccountLinkService/CreateAccountLink',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_account__link__service__pb2.CreateAccountLinkRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_account__link__service__pb2.CreateAccountLinkResponse.FromString,
)
self.MutateAccountLink = channel.unary_unary(
'/google.ads.googleads.v6.services.AccountLinkService/MutateAccountLink',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_account__link__service__pb2.MutateAccountLinkRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_account__link__service__pb2.MutateAccountLinkResponse.FromString,
)
class AccountLinkServiceServicer(object):
"""This service allows management of links between Google Ads accounts and other
accounts.
"""
def GetAccountLink(self, request, context):
"""Returns the account link in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateAccountLink(self, request, context):
"""Creates an account link.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateAccountLink(self, request, context):
"""Creates or removes an account link.
From V5, create is not supported through
AccountLinkService.MutateAccountLink. Please use
AccountLinkService.CreateAccountLink instead.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AccountLinkServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetAccountLink': grpc.unary_unary_rpc_method_handler(
servicer.GetAccountLink,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_account__link__service__pb2.GetAccountLinkRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_account__link__pb2.AccountLink.SerializeToString,
),
'CreateAccountLink': grpc.unary_unary_rpc_method_handler(
servicer.CreateAccountLink,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_account__link__service__pb2.CreateAccountLinkRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_account__link__service__pb2.CreateAccountLinkResponse.SerializeToString,
),
'MutateAccountLink': grpc.unary_unary_rpc_method_handler(
servicer.MutateAccountLink,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_account__link__service__pb2.MutateAccountLinkRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_account__link__service__pb2.MutateAccountLinkResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v6.services.AccountLinkService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class AccountLinkService(object):
"""This service allows management of links between Google Ads accounts and other
accounts.
"""
@staticmethod
def GetAccountLink(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.AccountLinkService/GetAccountLink',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_account__link__service__pb2.GetAccountLinkRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_account__link__pb2.AccountLink.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateAccountLink(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.AccountLinkService/CreateAccountLink',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_account__link__service__pb2.CreateAccountLinkRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_account__link__service__pb2.CreateAccountLinkResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def MutateAccountLink(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.AccountLinkService/MutateAccountLink',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_account__link__service__pb2.MutateAccountLinkRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_account__link__service__pb2.MutateAccountLinkResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
```
#### File: proto/services/ad_group_extension_setting_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v6.proto.resources import ad_group_extension_setting_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_ad__group__extension__setting__pb2
from google.ads.google_ads.v6.proto.services import ad_group_extension_setting_service_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__extension__setting__service__pb2
class AdGroupExtensionSettingServiceStub(object):
"""Proto file describing the AdGroupExtensionSetting service.
Service to manage ad group extension settings.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetAdGroupExtensionSetting = channel.unary_unary(
'/google.ads.googleads.v6.services.AdGroupExtensionSettingService/GetAdGroupExtensionSetting',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__extension__setting__service__pb2.GetAdGroupExtensionSettingRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_ad__group__extension__setting__pb2.AdGroupExtensionSetting.FromString,
)
self.MutateAdGroupExtensionSettings = channel.unary_unary(
'/google.ads.googleads.v6.services.AdGroupExtensionSettingService/MutateAdGroupExtensionSettings',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__extension__setting__service__pb2.MutateAdGroupExtensionSettingsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__extension__setting__service__pb2.MutateAdGroupExtensionSettingsResponse.FromString,
)
class AdGroupExtensionSettingServiceServicer(object):
"""Proto file describing the AdGroupExtensionSetting service.
Service to manage ad group extension settings.
"""
def GetAdGroupExtensionSetting(self, request, context):
"""Returns the requested ad group extension setting in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateAdGroupExtensionSettings(self, request, context):
"""Creates, updates, or removes ad group extension settings. Operation
statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AdGroupExtensionSettingServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetAdGroupExtensionSetting': grpc.unary_unary_rpc_method_handler(
servicer.GetAdGroupExtensionSetting,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__extension__setting__service__pb2.GetAdGroupExtensionSettingRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_ad__group__extension__setting__pb2.AdGroupExtensionSetting.SerializeToString,
),
'MutateAdGroupExtensionSettings': grpc.unary_unary_rpc_method_handler(
servicer.MutateAdGroupExtensionSettings,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__extension__setting__service__pb2.MutateAdGroupExtensionSettingsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__extension__setting__service__pb2.MutateAdGroupExtensionSettingsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v6.services.AdGroupExtensionSettingService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class AdGroupExtensionSettingService(object):
"""Proto file describing the AdGroupExtensionSetting service.
Service to manage ad group extension settings.
"""
@staticmethod
def GetAdGroupExtensionSetting(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.AdGroupExtensionSettingService/GetAdGroupExtensionSetting',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__extension__setting__service__pb2.GetAdGroupExtensionSettingRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_ad__group__extension__setting__pb2.AdGroupExtensionSetting.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def MutateAdGroupExtensionSettings(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.AdGroupExtensionSettingService/MutateAdGroupExtensionSettings',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__extension__setting__service__pb2.MutateAdGroupExtensionSettingsRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__extension__setting__service__pb2.MutateAdGroupExtensionSettingsResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
```
#### File: proto/services/ad_group_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v6.proto.resources import ad_group_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_ad__group__pb2
from google.ads.google_ads.v6.proto.services import ad_group_service_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__service__pb2
class AdGroupServiceStub(object):
"""Proto file describing the Ad Group service.
Service to manage ad groups.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetAdGroup = channel.unary_unary(
'/google.ads.googleads.v6.services.AdGroupService/GetAdGroup',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__service__pb2.GetAdGroupRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_ad__group__pb2.AdGroup.FromString,
)
self.MutateAdGroups = channel.unary_unary(
'/google.ads.googleads.v6.services.AdGroupService/MutateAdGroups',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__service__pb2.MutateAdGroupsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__service__pb2.MutateAdGroupsResponse.FromString,
)
class AdGroupServiceServicer(object):
"""Proto file describing the Ad Group service.
Service to manage ad groups.
"""
def GetAdGroup(self, request, context):
"""Returns the requested ad group in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateAdGroups(self, request, context):
"""Creates, updates, or removes ad groups. Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AdGroupServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetAdGroup': grpc.unary_unary_rpc_method_handler(
servicer.GetAdGroup,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__service__pb2.GetAdGroupRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_ad__group__pb2.AdGroup.SerializeToString,
),
'MutateAdGroups': grpc.unary_unary_rpc_method_handler(
servicer.MutateAdGroups,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__service__pb2.MutateAdGroupsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__service__pb2.MutateAdGroupsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v6.services.AdGroupService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class AdGroupService(object):
"""Proto file describing the Ad Group service.
Service to manage ad groups.
"""
@staticmethod
def GetAdGroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.AdGroupService/GetAdGroup',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__service__pb2.GetAdGroupRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_ad__group__pb2.AdGroup.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def MutateAdGroups(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.AdGroupService/MutateAdGroups',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__service__pb2.MutateAdGroupsRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_ad__group__service__pb2.MutateAdGroupsResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
```
#### File: proto/services/batch_job_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v6.proto.resources import batch_job_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_batch__job__pb2
from google.ads.google_ads.v6.proto.services import batch_job_service_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2
from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
class BatchJobServiceStub(object):
"""Proto file describing the BatchJobService.
Service to manage batch jobs.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.MutateBatchJob = channel.unary_unary(
'/google.ads.googleads.v6.services.BatchJobService/MutateBatchJob',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.MutateBatchJobRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.MutateBatchJobResponse.FromString,
)
self.GetBatchJob = channel.unary_unary(
'/google.ads.googleads.v6.services.BatchJobService/GetBatchJob',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.GetBatchJobRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_batch__job__pb2.BatchJob.FromString,
)
self.ListBatchJobResults = channel.unary_unary(
'/google.ads.googleads.v6.services.BatchJobService/ListBatchJobResults',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.ListBatchJobResultsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.ListBatchJobResultsResponse.FromString,
)
self.RunBatchJob = channel.unary_unary(
'/google.ads.googleads.v6.services.BatchJobService/RunBatchJob',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.RunBatchJobRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.AddBatchJobOperations = channel.unary_unary(
'/google.ads.googleads.v6.services.BatchJobService/AddBatchJobOperations',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.AddBatchJobOperationsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.AddBatchJobOperationsResponse.FromString,
)
class BatchJobServiceServicer(object):
"""Proto file describing the BatchJobService.
Service to manage batch jobs.
"""
def MutateBatchJob(self, request, context):
"""Mutates a batch job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetBatchJob(self, request, context):
"""Returns the batch job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListBatchJobResults(self, request, context):
"""Returns the results of the batch job. The job must be done.
Supports standard list paging.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RunBatchJob(self, request, context):
"""Runs the batch job.
The Operation.metadata field type is BatchJobMetadata. When finished, the
long running operation will not contain errors or a response. Instead, use
ListBatchJobResults to get the results of the job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddBatchJobOperations(self, request, context):
"""Add operations to the batch job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BatchJobServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'MutateBatchJob': grpc.unary_unary_rpc_method_handler(
servicer.MutateBatchJob,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.MutateBatchJobRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.MutateBatchJobResponse.SerializeToString,
),
'GetBatchJob': grpc.unary_unary_rpc_method_handler(
servicer.GetBatchJob,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.GetBatchJobRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_batch__job__pb2.BatchJob.SerializeToString,
),
'ListBatchJobResults': grpc.unary_unary_rpc_method_handler(
servicer.ListBatchJobResults,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.ListBatchJobResultsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.ListBatchJobResultsResponse.SerializeToString,
),
'RunBatchJob': grpc.unary_unary_rpc_method_handler(
servicer.RunBatchJob,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.RunBatchJobRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'AddBatchJobOperations': grpc.unary_unary_rpc_method_handler(
servicer.AddBatchJobOperations,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.AddBatchJobOperationsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.AddBatchJobOperationsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v6.services.BatchJobService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class BatchJobService(object):
"""Proto file describing the BatchJobService.
Service to manage batch jobs.
"""
@staticmethod
def MutateBatchJob(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.BatchJobService/MutateBatchJob',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.MutateBatchJobRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.MutateBatchJobResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetBatchJob(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.BatchJobService/GetBatchJob',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.GetBatchJobRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_batch__job__pb2.BatchJob.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListBatchJobResults(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.BatchJobService/ListBatchJobResults',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.ListBatchJobResultsRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.ListBatchJobResultsResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RunBatchJob(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.BatchJobService/RunBatchJob',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.RunBatchJobRequest.SerializeToString,
google_dot_longrunning_dot_operations__pb2.Operation.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AddBatchJobOperations(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.BatchJobService/AddBatchJobOperations',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.AddBatchJobOperationsRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_batch__job__service__pb2.AddBatchJobOperationsResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
```
#### File: proto/services/campaign_feed_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v6.proto.resources import campaign_feed_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_campaign__feed__pb2
from google.ads.google_ads.v6.proto.services import campaign_feed_service_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_campaign__feed__service__pb2
class CampaignFeedServiceStub(object):
"""Proto file describing the CampaignFeed service.
Service to manage campaign feeds.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetCampaignFeed = channel.unary_unary(
'/google.ads.googleads.v6.services.CampaignFeedService/GetCampaignFeed',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_campaign__feed__service__pb2.GetCampaignFeedRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_campaign__feed__pb2.CampaignFeed.FromString,
)
self.MutateCampaignFeeds = channel.unary_unary(
'/google.ads.googleads.v6.services.CampaignFeedService/MutateCampaignFeeds',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_campaign__feed__service__pb2.MutateCampaignFeedsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_campaign__feed__service__pb2.MutateCampaignFeedsResponse.FromString,
)
class CampaignFeedServiceServicer(object):
"""Proto file describing the CampaignFeed service.
Service to manage campaign feeds.
"""
def GetCampaignFeed(self, request, context):
"""Returns the requested campaign feed in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateCampaignFeeds(self, request, context):
"""Creates, updates, or removes campaign feeds. Operation statuses are
returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CampaignFeedServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetCampaignFeed': grpc.unary_unary_rpc_method_handler(
servicer.GetCampaignFeed,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_campaign__feed__service__pb2.GetCampaignFeedRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_campaign__feed__pb2.CampaignFeed.SerializeToString,
),
'MutateCampaignFeeds': grpc.unary_unary_rpc_method_handler(
servicer.MutateCampaignFeeds,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_campaign__feed__service__pb2.MutateCampaignFeedsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_campaign__feed__service__pb2.MutateCampaignFeedsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v6.services.CampaignFeedService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class CampaignFeedService(object):
"""Proto file describing the CampaignFeed service.
Service to manage campaign feeds.
"""
@staticmethod
def GetCampaignFeed(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.CampaignFeedService/GetCampaignFeed',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_campaign__feed__service__pb2.GetCampaignFeedRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_campaign__feed__pb2.CampaignFeed.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def MutateCampaignFeeds(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.CampaignFeedService/MutateCampaignFeeds',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_campaign__feed__service__pb2.MutateCampaignFeedsRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_campaign__feed__service__pb2.MutateCampaignFeedsResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
```
#### File: proto/services/extension_feed_item_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v6.proto.resources import extension_feed_item_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_extension__feed__item__pb2
from google.ads.google_ads.v6.proto.services import extension_feed_item_service_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_extension__feed__item__service__pb2
class ExtensionFeedItemServiceStub(object):
"""Proto file describing the ExtensionFeedItem service.
Service to manage extension feed items.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetExtensionFeedItem = channel.unary_unary(
'/google.ads.googleads.v6.services.ExtensionFeedItemService/GetExtensionFeedItem',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_extension__feed__item__service__pb2.GetExtensionFeedItemRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_extension__feed__item__pb2.ExtensionFeedItem.FromString,
)
self.MutateExtensionFeedItems = channel.unary_unary(
'/google.ads.googleads.v6.services.ExtensionFeedItemService/MutateExtensionFeedItems',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_extension__feed__item__service__pb2.MutateExtensionFeedItemsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_extension__feed__item__service__pb2.MutateExtensionFeedItemsResponse.FromString,
)
class ExtensionFeedItemServiceServicer(object):
"""Proto file describing the ExtensionFeedItem service.
Service to manage extension feed items.
"""
def GetExtensionFeedItem(self, request, context):
"""Returns the requested extension feed item in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateExtensionFeedItems(self, request, context):
"""Creates, updates, or removes extension feed items. Operation
statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ExtensionFeedItemServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetExtensionFeedItem': grpc.unary_unary_rpc_method_handler(
servicer.GetExtensionFeedItem,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_extension__feed__item__service__pb2.GetExtensionFeedItemRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_extension__feed__item__pb2.ExtensionFeedItem.SerializeToString,
),
'MutateExtensionFeedItems': grpc.unary_unary_rpc_method_handler(
servicer.MutateExtensionFeedItems,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_extension__feed__item__service__pb2.MutateExtensionFeedItemsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_extension__feed__item__service__pb2.MutateExtensionFeedItemsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v6.services.ExtensionFeedItemService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ExtensionFeedItemService(object):
"""Proto file describing the ExtensionFeedItem service.
Service to manage extension feed items.
"""
@staticmethod
def GetExtensionFeedItem(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.ExtensionFeedItemService/GetExtensionFeedItem',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_extension__feed__item__service__pb2.GetExtensionFeedItemRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_extension__feed__item__pb2.ExtensionFeedItem.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def MutateExtensionFeedItems(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.ExtensionFeedItemService/MutateExtensionFeedItems',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_extension__feed__item__service__pb2.MutateExtensionFeedItemsRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_extension__feed__item__service__pb2.MutateExtensionFeedItemsResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
```
#### File: proto/services/gender_view_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v6.proto.resources import gender_view_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_gender__view__pb2
from google.ads.google_ads.v6.proto.services import gender_view_service_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_gender__view__service__pb2
class GenderViewServiceStub(object):
"""Proto file describing the Gender View service.
Service to manage gender views.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetGenderView = channel.unary_unary(
'/google.ads.googleads.v6.services.GenderViewService/GetGenderView',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_gender__view__service__pb2.GetGenderViewRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_gender__view__pb2.GenderView.FromString,
)
class GenderViewServiceServicer(object):
"""Proto file describing the Gender View service.
Service to manage gender views.
"""
def GetGenderView(self, request, context):
"""Returns the requested gender view in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GenderViewServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetGenderView': grpc.unary_unary_rpc_method_handler(
servicer.GetGenderView,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_gender__view__service__pb2.GetGenderViewRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_gender__view__pb2.GenderView.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v6.services.GenderViewService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class GenderViewService(object):
"""Proto file describing the Gender View service.
Service to manage gender views.
"""
@staticmethod
def GetGenderView(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.GenderViewService/GetGenderView',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_gender__view__service__pb2.GetGenderViewRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_gender__view__pb2.GenderView.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
```
#### File: proto/services/paid_organic_search_term_view_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v6.proto.resources import paid_organic_search_term_view_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_paid__organic__search__term__view__pb2
from google.ads.google_ads.v6.proto.services import paid_organic_search_term_view_service_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_paid__organic__search__term__view__service__pb2
class PaidOrganicSearchTermViewServiceStub(object):
"""Proto file describing the Paid Organic Search Term View service.
Service to fetch paid organic search term views.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetPaidOrganicSearchTermView = channel.unary_unary(
'/google.ads.googleads.v6.services.PaidOrganicSearchTermViewService/GetPaidOrganicSearchTermView',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_paid__organic__search__term__view__service__pb2.GetPaidOrganicSearchTermViewRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_paid__organic__search__term__view__pb2.PaidOrganicSearchTermView.FromString,
)
class PaidOrganicSearchTermViewServiceServicer(object):
"""Proto file describing the Paid Organic Search Term View service.
Service to fetch paid organic search term views.
"""
def GetPaidOrganicSearchTermView(self, request, context):
"""Returns the requested paid organic search term view in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PaidOrganicSearchTermViewServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetPaidOrganicSearchTermView': grpc.unary_unary_rpc_method_handler(
servicer.GetPaidOrganicSearchTermView,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_paid__organic__search__term__view__service__pb2.GetPaidOrganicSearchTermViewRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_paid__organic__search__term__view__pb2.PaidOrganicSearchTermView.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v6.services.PaidOrganicSearchTermViewService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class PaidOrganicSearchTermViewService(object):
"""Proto file describing the Paid Organic Search Term View service.
Service to fetch paid organic search term views.
"""
@staticmethod
def GetPaidOrganicSearchTermView(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.PaidOrganicSearchTermViewService/GetPaidOrganicSearchTermView',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_paid__organic__search__term__view__service__pb2.GetPaidOrganicSearchTermViewRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_resources_dot_paid__organic__search__term__view__pb2.PaidOrganicSearchTermView.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
```
#### File: proto/services/reach_plan_service_pb2_grpc.py
```python
import grpc
from google.ads.google_ads.v6.proto.services import reach_plan_service_pb2 as google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2
class ReachPlanServiceStub(object):
"""Proto file describing the reach plan service.
Reach Plan Service gives users information about audience size that can
be reached through advertisement on YouTube. In particular,
GenerateReachForecast provides estimated number of people of specified
demographics that can be reached by an ad in a given market by a campaign of
certain duration with a defined budget.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListPlannableLocations = channel.unary_unary(
'/google.ads.googleads.v6.services.ReachPlanService/ListPlannableLocations',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.ListPlannableLocationsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.ListPlannableLocationsResponse.FromString,
)
self.ListPlannableProducts = channel.unary_unary(
'/google.ads.googleads.v6.services.ReachPlanService/ListPlannableProducts',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.ListPlannableProductsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.ListPlannableProductsResponse.FromString,
)
self.GenerateProductMixIdeas = channel.unary_unary(
'/google.ads.googleads.v6.services.ReachPlanService/GenerateProductMixIdeas',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.GenerateProductMixIdeasRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.GenerateProductMixIdeasResponse.FromString,
)
self.GenerateReachForecast = channel.unary_unary(
'/google.ads.googleads.v6.services.ReachPlanService/GenerateReachForecast',
request_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.GenerateReachForecastRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.GenerateReachForecastResponse.FromString,
)
class ReachPlanServiceServicer(object):
"""Proto file describing the reach plan service.
Reach Plan Service gives users information about audience size that can
be reached through advertisement on YouTube. In particular,
GenerateReachForecast provides estimated number of people of specified
demographics that can be reached by an ad in a given market by a campaign of
certain duration with a defined budget.
"""
def ListPlannableLocations(self, request, context):
"""Returns the list of plannable locations (for example, countries & DMAs).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListPlannableProducts(self, request, context):
"""Returns the list of per-location plannable YouTube ad formats with allowed
targeting.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GenerateProductMixIdeas(self, request, context):
"""Generates a product mix ideas given a set of preferences. This method
helps the advertiser to obtain a good mix of ad formats and budget
allocations based on its preferences.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GenerateReachForecast(self, request, context):
"""Generates a reach forecast for a given targeting / product mix.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ReachPlanServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'ListPlannableLocations': grpc.unary_unary_rpc_method_handler(
servicer.ListPlannableLocations,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.ListPlannableLocationsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.ListPlannableLocationsResponse.SerializeToString,
),
'ListPlannableProducts': grpc.unary_unary_rpc_method_handler(
servicer.ListPlannableProducts,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.ListPlannableProductsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.ListPlannableProductsResponse.SerializeToString,
),
'GenerateProductMixIdeas': grpc.unary_unary_rpc_method_handler(
servicer.GenerateProductMixIdeas,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.GenerateProductMixIdeasRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.GenerateProductMixIdeasResponse.SerializeToString,
),
'GenerateReachForecast': grpc.unary_unary_rpc_method_handler(
servicer.GenerateReachForecast,
request_deserializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.GenerateReachForecastRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.GenerateReachForecastResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v6.services.ReachPlanService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ReachPlanService(object):
"""Proto file describing the reach plan service.
Reach Plan Service gives users information about audience size that can
be reached through advertisement on YouTube. In particular,
GenerateReachForecast provides estimated number of people of specified
demographics that can be reached by an ad in a given market by a campaign of
certain duration with a defined budget.
"""
@staticmethod
def ListPlannableLocations(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.ReachPlanService/ListPlannableLocations',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.ListPlannableLocationsRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.ListPlannableLocationsResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListPlannableProducts(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.ReachPlanService/ListPlannableProducts',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.ListPlannableProductsRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.ListPlannableProductsResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GenerateProductMixIdeas(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.ReachPlanService/GenerateProductMixIdeas',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.GenerateProductMixIdeasRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.GenerateProductMixIdeasResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GenerateReachForecast(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.ReachPlanService/GenerateReachForecast',
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.GenerateReachForecastRequest.SerializeToString,
google_dot_ads_dot_googleads__v6_dot_proto_dot_services_dot_reach__plan__service__pb2.GenerateReachForecastResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
```
#### File: google-ads-python/tests/config_test.py
```python
import mock
import os
import yaml
from pyfakefs.fake_filesystem_unittest import TestCase as FileTestCase
from google.ads.google_ads import config
class ConfigTest(FileTestCase):
def setUp(self):
self.setUpPyfakefs()
self.developer_token = "abc123"
self.client_id = "client_id_123456789"
self.client_secret = "client_secret_987654321"
self.refresh_token = "<PASSWORD>"
self.login_customer_id = "1234567890"
self.path_to_private_key_file = "/test/path/to/config.json"
self.delegated_account = "<EMAIL>"
self.endpoint = "www.testendpoint.com"
def test_load_from_yaml_file(self):
file_path = os.path.join(os.path.expanduser("~"), "google-ads.yaml")
self.fs.create_file(
file_path,
contents=yaml.safe_dump(
{
"developer_token": self.developer_token,
"client_id": self.client_id,
"client_secret": self.client_secret,
"refresh_token": self.refresh_token,
}
),
)
result = config.load_from_yaml_file()
self.assertEqual(result["developer_token"], self.developer_token)
self.assertEqual(result["client_id"], self.client_id)
self.assertEqual(result["client_secret"], self.client_secret)
self.assertEqual(result["refresh_token"], self.refresh_token)
def test_load_from_yaml_file_missing_required_key(self):
file_path = os.path.join(os.path.expanduser("~"), "google-ads.yaml")
# save a YAML file without a required developer_token key
self.fs.create_file(
file_path,
contents=yaml.safe_dump(
{
"client_id": self.client_id,
"client_secret": self.client_secret,
"refresh_token": self.refresh_token,
}
),
)
self.assertRaises(ValueError, config.load_from_yaml_file)
def test_load_from_yaml_file_with_path(self):
custom_path = os.path.expanduser("/test/custom/path")
file_path = os.path.join(custom_path, "google-ads.yaml")
self.fs.create_file(
file_path,
contents=yaml.safe_dump(
{
"developer_token": self.developer_token,
"client_id": self.client_id,
"client_secret": self.client_secret,
"refresh_token": self.refresh_token,
}
),
)
result = config.load_from_yaml_file(path=file_path)
self.assertEqual(result["developer_token"], self.developer_token)
self.assertEqual(result["client_id"], self.client_id)
self.assertEqual(result["client_secret"], self.client_secret)
self.assertEqual(result["refresh_token"], self.refresh_token)
def test_load_from_yaml_file_login_cid_int(self):
login_cid_int = 1234567890
file_path = os.path.join(os.path.expanduser("~"), "google-ads.yaml")
self.fs.create_file(
file_path,
contents=yaml.safe_dump(
{
"login_customer_id": login_cid_int,
"developer_token": self.developer_token,
"client_id": self.client_id,
"client_secret": self.client_secret,
"refresh_token": self.refresh_token,
}
),
)
result = config.load_from_yaml_file()
self.assertEqual(result["developer_token"], self.developer_token)
self.assertEqual(result["client_id"], self.client_id)
self.assertEqual(result["client_secret"], self.client_secret)
self.assertEqual(result["refresh_token"], self.refresh_token)
def test_parse_yaml_document_to_dict(self):
yaml_doc = (
"client_id: {}\n"
"client_secret: {}\n"
"developer_token: {}\n"
"refresh_token: {}\n".format(
self.client_id,
self.client_secret,
self.developer_token,
self.refresh_token,
)
)
result = config.parse_yaml_document_to_dict(yaml_doc)
self.assertEqual(result["developer_token"], self.developer_token)
self.assertEqual(result["client_id"], self.client_id)
self.assertEqual(result["client_secret"], self.client_secret)
self.assertEqual(result["refresh_token"], self.refresh_token)
def test_parse_yaml_document_to_dict_missing_required_key(self):
# YAML document is missing the required developer_token key
yaml_doc = (
"client_id: {}\n"
"client_secret: {}\n"
"refresh_token: {}\n".format(
self.client_id,
self.client_secret,
self.developer_token,
self.refresh_token,
)
)
self.assertRaises(
ValueError, config.parse_yaml_document_to_dict, yaml_doc
)
def test_load_from_dict(self):
config_data = {
"developer_token": self.developer_token,
"client_id": self.client_id,
"client_secret": self.client_secret,
"refresh_token": self.refresh_token,
}
self.assertEqual(config.load_from_dict(config_data), config_data)
def test_load_from_dict_error(self):
config_data = 111
self.assertRaises(ValueError, config.load_from_dict, config_data)
def test_load_from_env(self):
environ = {
"GOOGLE_ADS_DEVELOPER_TOKEN": self.developer_token,
"GOOGLE_ADS_CLIENT_ID": self.client_id,
"GOOGLE_ADS_CLIENT_SECRET": self.client_secret,
"GOOGLE_ADS_REFRESH_TOKEN": self.refresh_token,
"GOOGLE_ADS_LOGGING": '{"test": true}',
"GOOGLE_ADS_ENDPOINT": self.endpoint,
"GOOGLE_ADS_LOGIN_CUSTOMER_ID": self.login_customer_id,
"GOOGLE_ADS_PATH_TO_PRIVATE_KEY_FILE": self.path_to_private_key_file,
"GOOGLE_ADS_DELEGATED_ACCOUNT": self.delegated_account,
}
with mock.patch("os.environ", environ):
result = config.load_from_env()
self.assertEqual(
result,
{
"developer_token": self.developer_token,
"client_id": self.client_id,
"client_secret": self.client_secret,
"refresh_token": self.refresh_token,
"logging": {"test": True},
"endpoint": self.endpoint,
"login_customer_id": self.login_customer_id,
"path_to_private_key_file": self.path_to_private_key_file,
"delegated_account": self.delegated_account,
},
)
def test_load_from_env_missing_required_key(self):
# environ is missing required developer_token key
environ = {
"GOOGLE_ADS_CLIENT_ID": self.client_id,
"GOOGLE_ADS_CLIENT_SECRET": self.client_secret,
"GOOGLE_ADS_REFRESH_TOKEN": self.refresh_token,
"GOOGLE_ADS_LOGGING": '{"test": true}',
"GOOGLE_ADS_ENDPOINT": self.endpoint,
"GOOGLE_ADS_LOGIN_CUSTOMER_ID": self.login_customer_id,
"GOOGLE_ADS_PATH_TO_PRIVATE_KEY_FILE": self.path_to_private_key_file,
"GOOGLE_ADS_DELEGATED_ACCOUNT": self.delegated_account,
}
with mock.patch("os.environ", environ):
self.assertRaises(ValueError, config.load_from_env)
def test_validate_dict(self):
config_data = {"invalid": "config"}
self.assertRaises(ValueError, config.validate_dict, config_data)
def test_validate_dict(self):
config_data = {key: "test" for key in config._REQUIRED_KEYS}
try:
config.validate_dict(config_data)
except ValueError as ex:
self.fail("test_validate_dict failed unexpectedly: {}".format(ex))
def test_validate_dict_with_invalid_login_cid(self):
config_data = {key: "test" for key in config._REQUIRED_KEYS}
config_data["login_customer_id"] = "123-456-5789"
self.assertRaises(ValueError, config.validate_dict, config_data)
def test_validate_dict_with_valid_login_cid(self):
config_data = {key: "test" for key in config._REQUIRED_KEYS}
config_data["login_customer_id"] = "1234567893"
try:
config.validate_dict(config_data)
except ValueError as ex:
self.fail(
"test_validate_dict_with_login_cid failed unexpectedly: "
"{}".format(ex)
)
def test_validate_login_customer_id_invalid(self):
self.assertRaises(
ValueError, config.validate_login_customer_id, "123-456-7890"
)
def test_validate_login_customer_id_too_short(self):
self.assertRaises(ValueError, config.validate_login_customer_id, "123")
def test_get_oauth2_installed_app_keys(self):
self.assertEqual(
config.get_oauth2_installed_app_keys(),
config._OAUTH2_INSTALLED_APP_KEYS,
)
def test_get_oauth2_service_account_keys(self):
self.assertEqual(
config.get_oauth2_service_account_keys(),
config._OAUTH2_SERVICE_ACCOUNT_KEYS,
)
def test_convert_login_customer_id_to_str_with_int(self):
config_data = {"login_customer_id": 1234567890}
expected = {"login_customer_id": "1234567890"}
self.assertEqual(
config.convert_login_customer_id_to_str(config_data), expected
)
def test_parse_login_customer_id_with_str(self):
config_data = {"login_customer_id": "1234567890"}
self.assertEqual(
config.convert_login_customer_id_to_str(config_data), config_data
)
def test_parse_login_customer_id_with_none(self):
config_data = {"not_login_customer_id": 1234567890}
self.assertEqual(
config.convert_login_customer_id_to_str(config_data), config_data
)
```
#### File: google-ads-python/tests/oauth2_test.py
```python
import mock
from unittest import TestCase
from google.ads.google_ads import oauth2
class OAuth2Tests(TestCase):
def setUp(self):
self.client_id = "client_id_123456789"
self.client_secret = "client_secret_987654321"
self.refresh_token = "<PASSWORD>"
self.path_to_private_key_file = "/path/to/file"
self.subject = "<EMAIL>"
self.token_uri = oauth2._DEFAULT_TOKEN_URI
self.scopes = oauth2._SERVICE_ACCOUNT_SCOPES
def test_get_installed_app_credentials(self):
mock_credentials = mock.Mock()
mock_request = mock.Mock()
with mock.patch.object(
oauth2, "InstalledAppCredentials", return_value=mock_credentials
) as mock_initializer, mock.patch.object(
oauth2, "Request", return_value=mock_request
) as mock_request_class:
result = oauth2.get_installed_app_credentials(
self.client_id, self.client_secret, self.refresh_token
)
mock_initializer.assert_called_once_with(
None,
client_id=self.client_id,
client_secret=self.client_secret,
refresh_token=self.refresh_token,
token_uri=self.token_uri,
)
mock_request_class.assert_called_once()
result.refresh.assert_called_once_with(mock_request)
def test_get_service_account_credentials(self):
mock_credentials = mock.Mock()
mock_request = mock.Mock()
with mock.patch.object(
oauth2.ServiceAccountCreds,
"from_service_account_file",
return_value=mock_credentials,
) as mock_initializer, mock.patch.object(
oauth2, "Request", return_value=mock_request
) as mock_request_class:
result = oauth2.get_service_account_credentials(
self.path_to_private_key_file, self.subject
)
mock_initializer.assert_called_once_with(
self.path_to_private_key_file,
subject=self.subject,
scopes=self.scopes,
)
mock_request_class.assert_called_once()
result.refresh.assert_called_once_with(mock_request)
def test_get_credentials_installed_application(self):
mock_config = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"refresh_token": self.refresh_token,
}
with mock.patch.object(
oauth2, "get_installed_app_credentials", return_value=None
) as mock_initializer:
oauth2.get_credentials(mock_config)
mock_initializer.assert_called_once_with(
self.client_id, self.client_secret, self.refresh_token
)
def test_get_credentials_installed_application_bad_config(self):
# using a config that is missing the refresh_token key
mock_config = {
"client_id": self.client_id,
"client_secret": self.client_secret,
}
self.assertRaises(ValueError, oauth2.get_credentials, mock_config)
def test_get_credentials_installed_application(self):
mock_config = {
"path_to_private_key_file": self.path_to_private_key_file,
"delegated_account": self.subject,
}
with mock.patch.object(
oauth2, "get_service_account_credentials", return_value=None
) as mock_initializer:
oauth2.get_credentials(mock_config)
mock_initializer.assert_called_once_with(
self.path_to_private_key_file, self.subject
)
``` |
{
"source": "jphart/weatherstation",
"score": 3
} |
#### File: weatherstation/receiver/weather_receiver.py
```python
import serial
import json
from influxdb import InfluxDBClient
from datetime import datetime
LOCATION_ID = 'l'
TEMPERATURE_ID = 't'
HUMIDITY_ID = 'h'
DEW_POINT_ID = 'd'
PRESSURE_ID = 'p'
def buildInfluxDBMessage(sensor_message):
"""
Builds a influxDB json object from the message from a sensor.
"""
json_body = [{
"measurement" : "weather_reading",
"time" : datetime.now().isoformat(),
"tags" :
{
"location" : sensor_message[LOCATION_ID],
},
"fields" :
{
"humidity" : sensor_message[HUMIDITY_ID],
"temperature" : sensor_message[TEMPERATURE_ID],
"pressure" : sensor_message[PRESSURE_ID],
"dew_point" : sensor_message[DEW_POINT_ID]
}
}]
return json_body
ser = serial.Serial(
port='/dev/ttyS0',
baudrate=1200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=30
)
client = InfluxDBClient(host='localhost', port=8086)
client.switch_database('weather')
while True:
message = ser.readline()
print(message)
print("\n\n")
try:
jsonmsg = json.loads(message)
json_body = buildInfluxDBMessage(jsonmsg)
print(json.dumps(json_body,indent=2))
print("Write points: {0}".format(json_body))
client.write_points(json_body)
except json.decoder.JSONDecodeError as ex:
print(ex.msg)
``` |
{
"source": "jp-harvey/pymapd",
"score": 2
} |
#### File: pymapd/pymapd/connection.py
```python
from collections import namedtuple
import base64
import pandas as pd
import pyarrow as pa
import ctypes
from sqlalchemy.engine.url import make_url
from thrift.protocol import TBinaryProtocol, TJSONProtocol
from thrift.transport import TSocket, TSSLSocket, THttpClient, TTransport
from thrift.transport.TSocket import TTransportException
from omnisci.thrift.OmniSci import Client, TCreateParams
from omnisci.common.ttypes import TDeviceType
from omnisci.thrift.ttypes import TOmniSciException, TFileType
from .cursor import Cursor
from .exceptions import _translate_exception, OperationalError
from ._parsers import _parse_tdf_gpu, _bind_parameters, _extract_column_details
from ._loaders import _build_input_rows
from ._transforms import change_dashboard_sources
from .ipc import load_buffer, shmdt
from ._pandas_loaders import build_row_desc, _serialize_arrow_payload
from . import _pandas_loaders
from ._mutators import set_tdf, get_tdf
from types import MethodType
from ._samlutils import get_saml_response
from packaging.version import Version
ConnectionInfo = namedtuple(
"ConnectionInfo",
[
'user',
'password',
'host',
'port',
'dbname',
'protocol',
'bin_cert_validate',
'bin_ca_certs',
],
)
def connect(
uri=None,
user=None,
password=<PASSWORD>,
host=None,
port=6274,
dbname=None,
protocol='binary',
sessionid=None,
bin_cert_validate=None,
bin_ca_certs=None,
idpurl=None,
idpformusernamefield='username',
idpformpasswordfield='password',
idpsslverify=True,
):
"""
Create a new Connection.
Parameters
----------
uri: str
user: str
password: str
host: str
port: int
dbname: str
protocol: {'binary', 'http', 'https'}
sessionid: str
bin_cert_validate: bool, optional, binary encrypted connection only
Whether to continue if there is any certificate error
bin_ca_certs: str, optional, binary encrypted connection only
Path to the CA certificate file
idpurl : str
EXPERIMENTAL Enable SAML authentication by providing
the logon page of the SAML Identity Provider.
idpformusernamefield: str
The HTML form ID for the username, defaults to 'username'.
idpformpasswordfield: str
The HTML form ID for the password, defaults to 'password'.
idpsslverify: str
Enable / disable certificate checking, defaults to True.
Returns
-------
conn: Connection
Examples
--------
You can either pass a string ``uri``, all the individual components,
or an existing sessionid excluding user, password, and database
>>> connect('mapd://admin:HyperInteractive@localhost:6274/omnisci?'
... 'protocol=binary')
Connection(mapd://mapd:***@localhost:6274/mapd?protocol=binary)
>>> connect(user='admin', password='<PASSWORD>', host='localhost',
... port=6274, dbname='omnisci')
>>> connect(user='admin', password='<PASSWORD>', host='localhost',
... port=443, idpurl='https://sso.localhost/logon',
protocol='https')
>>> connect(sessionid='XihlkjhdasfsadSDoasdllMweieisdpo', host='localhost',
... port=6273, protocol='http')
"""
return Connection(
uri=uri,
user=user,
password=password,
host=host,
port=port,
dbname=dbname,
protocol=protocol,
sessionid=sessionid,
bin_cert_validate=bin_cert_validate,
bin_ca_certs=bin_ca_certs,
idpurl=idpurl,
idpformusernamefield=idpformusernamefield,
idpformpasswordfield=idpformpasswordfield,
idpsslverify=idpsslverify,
)
def _parse_uri(uri):
"""
Parse connection string
Parameters
----------
uri: str
a URI containing connection information
Returns
-------
info: ConnectionInfo
Notes
------
The URI may include information on
- user
- password
- host
- port
- dbname
- protocol
- bin_cert_validate
- bin_ca_certs
"""
url = make_url(uri)
user = url.username
password = url.password
host = url.host
port = url.port
dbname = url.database
protocol = url.query.get('protocol', 'binary')
bin_cert_validate = url.query.get('bin_cert_validate', None)
bin_ca_certs = url.query.get('bin_ca_certs', None)
return ConnectionInfo(
user,
password,
host,
port,
dbname,
protocol,
bin_cert_validate,
bin_ca_certs,
)
class Connection:
"""Connect to your OmniSci database."""
def __init__(
self,
uri=None,
user=None,
password=<PASSWORD>,
host=None,
port=6274,
dbname=None,
protocol='binary',
sessionid=None,
bin_cert_validate=None,
bin_ca_certs=None,
idpurl=None,
idpformusernamefield='username',
idpformpasswordfield='password',
idpsslverify=True,
):
self.sessionid = None
if sessionid is not None:
if any([user, password, uri, dbname, idpurl]):
raise TypeError(
"Cannot specify sessionid with user, password,"
" dbname, uri, or idpurl"
)
if uri is not None:
if not all(
[
user is None,
password is <PASSWORD>,
host is None,
port == 6274,
dbname is None,
protocol == 'binary',
bin_cert_validate is None,
bin_ca_certs is None,
idpurl is None,
]
):
raise TypeError("Cannot specify both URI and other arguments")
(
user,
password,
host,
port,
dbname,
protocol,
bin_cert_validate,
bin_ca_certs,
) = _parse_uri(uri)
if host is None:
raise TypeError("`host` parameter is required.")
if protocol != 'binary' and not all(
[bin_cert_validate is None, bin_ca_certs is None]
):
raise TypeError(
"Cannot specify bin_cert_validate or bin_ca_certs,"
" without binary protocol"
)
if protocol in ("http", "https"):
if not host.startswith(protocol):
# the THttpClient expects http[s]://localhost
host = '{0}://{1}'.format(protocol, host)
transport = THttpClient.THttpClient("{}:{}".format(host, port))
proto = TJSONProtocol.TJSONProtocol(transport)
socket = None
elif protocol == "binary":
if any([bin_cert_validate is not None, bin_ca_certs]):
socket = TSSLSocket.TSSLSocket(
host,
port,
validate=(bin_cert_validate),
ca_certs=bin_ca_certs,
)
else:
socket = TSocket.TSocket(host, port)
transport = TTransport.TBufferedTransport(socket)
proto = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
else:
raise ValueError(
"`protocol` should be one of"
" ['http', 'https', 'binary'],"
" got {} instead".format(protocol),
)
self._user = user
self._password = password
self._host = host
self._port = port
self._dbname = dbname
self._transport = transport
self._protocol = protocol
self._socket = socket
self._closed = 0
self._tdf = None
self._rbc = None
try:
self._transport.open()
except TTransportException as e:
if e.NOT_OPEN:
err = OperationalError("Could not connect to database")
raise err from e
else:
raise
self._client = Client(proto)
try:
# If a sessionid was passed, we should validate it
if sessionid:
self._session = sessionid
self.get_tables()
self.sessionid = sessionid
else:
if idpurl:
self._user = ''
self._password = get_saml_response(
username=user,
password=password,
idpurl=idpurl,
userformfield=idpformusernamefield,
passwordformfield=idpformpasswordfield,
sslverify=idpsslverify,
)
self._dbname = ''
self._idpsslverify = idpsslverify
user = self._user
password = self._password
dbname = self._dbname
self._session = self._client.connect(user, password, dbname)
except TOmniSciException as e:
raise _translate_exception(e) from e
except TTransportException:
raise ValueError(
f"Connection failed with port {port} and "
f"protocol '{protocol}'. Try port 6274 for "
"protocol == binary or 6273, 6278 or 443 for "
"http[s]"
)
# if OmniSci version <4.6, raise RuntimeError, as data import can be
# incorrect for columnar date loads
# Caused by https://github.com/omnisci/pymapd/pull/188
semver = self._client.get_version()
if Version(semver.split("-")[0]) < Version("4.6"):
raise RuntimeError(
f"Version {semver} of OmniSci detected. "
"Please use pymapd <0.11. See release notes "
"for more details."
)
def __repr__(self):
tpl = (
'Connection(omnisci://{user}:***@{host}:{port}/{dbname}?'
'protocol={protocol})'
)
return tpl.format(
user=self._user,
host=self._host,
port=self._port,
dbname=self._dbname,
protocol=self._protocol,
)
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def closed(self):
return self._closed
def close(self):
"""Disconnect from the database unless created with sessionid"""
if not self.sessionid and not self._closed:
try:
self._client.disconnect(self._session)
except (TOmniSciException, AttributeError, TypeError):
pass
self._closed = 1
self._rbc = None
def commit(self):
"""This is a noop, as OmniSci does not provide transactions.
Implemented to comply with the DBI specification.
"""
return None
def execute(self, operation, parameters=None):
"""Execute a SQL statement
Parameters
----------
operation: str
A SQL statement to exucute
Returns
-------
c: Cursor
"""
c = Cursor(self)
return c.execute(operation, parameters=parameters)
def cursor(self):
"""Create a new :class:`Cursor` object attached to this connection."""
return Cursor(self)
def select_ipc_gpu(
self,
operation,
parameters=None,
device_id=0,
first_n=-1,
release_memory=True,
):
"""Execute a ``SELECT`` operation using GPU memory.
Parameters
----------
operation: str
A SQL statement
parameters: dict, optional
Parameters to insert into a parametrized query
device_id: int
GPU to return results to
first_n: int, optional
Number of records to return
release_memory: bool, optional
Call ``self.deallocate_ipc_gpu(df)`` after DataFrame created
Returns
-------
gdf: cudf.GpuDataFrame
Notes
-----
This method requires ``cudf`` and ``libcudf`` to be installed.
An ``ImportError`` is raised if those aren't available.
This method requires the Python code to be executed on the same machine
where OmniSci running.
"""
try:
from cudf.comm.gpuarrow import GpuArrowReader # noqa
from cudf.core.dataframe import DataFrame # noqa
except ImportError:
raise ImportError(
"The 'cudf' package is required for `select_ipc_gpu`"
)
self.register_runtime_udfs()
if parameters is not None:
operation = str(_bind_parameters(operation, parameters))
tdf = self._client.sql_execute_gdf(
self._session,
operation.strip(),
device_id=device_id,
first_n=first_n,
)
self._tdf = tdf
df = _parse_tdf_gpu(tdf)
# Deallocate TDataFrame at OmniSci instance
if release_memory:
self.deallocate_ipc_gpu(df)
return df
def select_ipc(
self, operation, parameters=None, first_n=-1, release_memory=True
):
"""Execute a ``SELECT`` operation using CPU shared memory
Parameters
----------
operation: str
A SQL select statement
parameters: dict, optional
Parameters to insert for a parametrized query
first_n: int, optional
Number of records to return
release_memory: bool, optional
Call ``self.deallocate_ipc(df)`` after DataFrame created
Returns
-------
df: pandas.DataFrame
Notes
-----
This method requires the Python code to be executed on the same machine
where OmniSci running.
"""
self.register_runtime_udfs()
if parameters is not None:
operation = str(_bind_parameters(operation, parameters))
tdf = self._client.sql_execute_df(
self._session,
operation.strip(),
device_type=0,
device_id=0,
first_n=first_n,
)
self._tdf = tdf
df_buf = load_buffer(tdf.df_handle, tdf.df_size)
reader = pa.ipc.open_stream(df_buf[0])
tbl = reader.read_all()
df = tbl.to_pandas()
# this is needed to modify the df object for deallocate_df to work
df.set_tdf = MethodType(set_tdf, df)
df.get_tdf = MethodType(get_tdf, df)
# Because deallocate_df can be called any time in future, keep tdf
# from OmniSciDB so that it can be used whenever deallocate_df called
df.set_tdf(tdf)
# free shared memory from Python
# https://github.com/omnisci/pymapd/issues/46
# https://github.com/omnisci/pymapd/issues/31
free_df = shmdt(ctypes.cast(df_buf[1], ctypes.c_void_p)) # noqa
# Deallocate TDataFrame at OmniSci instance
if release_memory:
self.deallocate_ipc(df)
return df
def deallocate_ipc_gpu(self, df, device_id=0):
"""Deallocate a DataFrame using GPU memory.
Parameters
----------
device_ids: int
GPU which contains TDataFrame
"""
tdf = df.get_tdf()
result = self._client.deallocate_df(
session=self._session,
df=tdf,
device_type=TDeviceType.GPU,
device_id=device_id,
)
return result
def deallocate_ipc(self, df, device_id=0):
"""Deallocate a DataFrame using CPU shared memory.
Parameters
----------
device_id: int
GPU which contains TDataFrame
"""
tdf = df.get_tdf()
result = self._client.deallocate_df(
session=self._session,
df=tdf,
device_type=TDeviceType.CPU,
device_id=device_id,
)
return result
# --------------------------------------------------------------------------
# Convenience methods
# --------------------------------------------------------------------------
def get_tables(self):
"""List all the tables in the database
Examples
--------
>>> con.get_tables()
['flights_2008_10k', 'stocks']
"""
return self._client.get_tables(self._session)
def get_table_details(self, table_name):
"""Get the column names and data types associated with a table.
Parameters
----------
table_name: str
Returns
-------
details: List[tuples]
Examples
--------
>>> con.get_table_details('stocks')
[ColumnDetails(name='date_', type='STR', nullable=True, precision=0,
scale=0, comp_param=32, encoding='DICT'),
ColumnDetails(name='trans', type='STR', nullable=True, precision=0,
scale=0, comp_param=32, encoding='DICT'),
...
]
"""
details = self._client.get_table_details(self._session, table_name)
return _extract_column_details(details.row_desc)
def create_table(self, table_name, data, preserve_index=False):
"""Create a table from a pandas.DataFrame
Parameters
----------
table_name: str
data: DataFrame
preserve_index: bool, default False
Whether to create a column in the table for the DataFrame index
"""
row_desc = build_row_desc(data, preserve_index=preserve_index)
self._client.create_table(
self._session,
table_name,
row_desc,
TFileType.DELIMITED,
TCreateParams(False),
)
def load_table(
self,
table_name,
data,
method='infer',
preserve_index=False,
create='infer',
):
"""Load data into a table
Parameters
----------
table_name: str
data: pyarrow.Table, pandas.DataFrame, or iterable of tuples
method: {'infer', 'columnar', 'rows', 'arrow'}
Method to use for loading the data. Three options are available
1. ``pyarrow`` and Apache Arrow loader
2. columnar loader
3. row-wise loader
The Arrow loader is typically the fastest, followed by the
columnar loader, followed by the row-wise loader. If a DataFrame
or ``pyarrow.Table`` is passed and ``pyarrow`` is installed, the
Arrow-based loader will be used. If arrow isn't available, the
columnar loader is used. Finally, ``data`` is an iterable of tuples
the row-wise loader is used.
preserve_index: bool, default False
Whether to keep the index when loading a pandas DataFrame
create: {"infer", True, False}
Whether to issue a CREATE TABLE before inserting the data.
* infer: check to see if the table already exists, and create
a table if it does not
* True: attempt to create the table, without checking if it exists
* False: do not attempt to create the table
See Also
--------
load_table_arrow
load_table_columnar
"""
if create not in ['infer', True, False]:
raise ValueError(
f"Unexpected value for create: '{create}'. "
"Expected one of {'infer', True, False}"
)
if create == 'infer':
# ask the database if we already exist, creating if not
create = table_name not in set(
self._client.get_tables(self._session)
)
if create:
self.create_table(table_name, data)
if method == 'infer':
if (
isinstance(data, pd.DataFrame)
or isinstance(data, pa.Table)
or isinstance(data, pa.RecordBatch)
): # noqa
return self.load_table_arrow(table_name, data)
elif isinstance(data, pd.DataFrame):
return self.load_table_columnar(table_name, data)
elif method == 'arrow':
return self.load_table_arrow(table_name, data)
elif method == 'columnar':
return self.load_table_columnar(table_name, data)
elif method != 'rows':
raise TypeError(
"Method must be one of {{'infer', 'arrow', "
"'columnar', 'rows'}}. Got {} instead".format(method)
)
if isinstance(data, pd.DataFrame):
# We need to convert a Pandas dataframe to a list of tuples before
# loading row wise
data = data.itertuples(index=preserve_index, name=None)
input_data = _build_input_rows(data)
self._client.load_table(self._session, table_name, input_data)
def load_table_rowwise(self, table_name, data):
"""Load data into a table row-wise
Parameters
----------
table_name: str
data: Iterable of tuples
Each element of `data` should be a row to be inserted
See Also
--------
load_table
load_table_arrow
load_table_columnar
Examples
--------
>>> data = [(1, 'a'), (2, 'b'), (3, 'c')]
>>> con.load_table('bar', data)
"""
input_data = _build_input_rows(data)
self._client.load_table(self._session, table_name, input_data)
def load_table_columnar(
self,
table_name,
data,
preserve_index=False,
chunk_size_bytes=0,
col_names_from_schema=False,
):
"""Load a pandas DataFrame to the database using OmniSci's Thrift-based
columnar format
Parameters
----------
table_name: str
data: DataFrame
preserve_index: bool, default False
Whether to include the index of a pandas DataFrame when writing.
chunk_size_bytes: integer, default 0
Chunk the loading of columns to prevent large Thrift requests. A
value of 0 means do not chunk and send the dataframe as a single
request
col_names_from_schema: bool, default False
Read the existing table schema to determine the column names. This
will read the schema of an existing table in OmniSci and match
those names to the column names of the dataframe. This is for
user convenience when loading from data that is unordered,
especially handy when a table has a large number of columns.
Examples
--------
>>> df = pd.DataFrame({"a": [1, 2, 3], "b": ['d', 'e', 'f']})
>>> con.load_table_columnar('foo', df, preserve_index=False)
See Also
--------
load_table
load_table_arrow
load_table_rowwise
Notes
-----
Use ``pymapd >= 0.11.0`` while running with ``omnisci >= 4.6.0`` in
order to avoid loading inconsistent values into DATE column.
"""
if not isinstance(data, pd.DataFrame):
raise TypeError('Unknown type {}'.format(type(data)))
table_details = self.get_table_details(table_name)
# Validate that there are the same number of columns in the table
# as there are in the dataframe. No point trying to load the data
# if this is not the case
if len(table_details) != len(data.columns):
raise ValueError(
'Number of columns in dataframe ({}) does not \
match number of columns in OmniSci table \
({})'.format(
len(data.columns), len(table_details)
)
)
col_names = (
[i.name for i in table_details]
if col_names_from_schema
else list(data)
)
col_types = table_details
input_cols = _pandas_loaders.build_input_columnar(
data,
preserve_index=preserve_index,
chunk_size_bytes=chunk_size_bytes,
col_types=col_types,
col_names=col_names,
)
for cols in input_cols:
self._client.load_table_binary_columnar(
self._session, table_name, cols
)
def load_table_arrow(self, table_name, data, preserve_index=False):
"""Load a pandas.DataFrame or a pyarrow Table or RecordBatch to the
database using Arrow columnar format for interchange
Parameters
----------
table_name: str
data: pandas.DataFrame, pyarrow.RecordBatch, pyarrow.Table
preserve_index: bool, default False
Whether to include the index of a pandas DataFrame when writing.
Examples
--------
>>> df = pd.DataFrame({"a": [1, 2, 3], "b": ['d', 'e', 'f']})
>>> con.load_table_arrow('foo', df, preserve_index=False)
See Also
--------
load_table
load_table_columnar
load_table_rowwise
"""
metadata = self.get_table_details(table_name)
payload = _serialize_arrow_payload(
data, metadata, preserve_index=preserve_index
)
self._client.load_table_binary_arrow(
self._session, table_name, payload.to_pybytes()
)
def render_vega(self, vega, compression_level=1):
"""Render vega data on the database backend,
returning the image as a PNG.
Parameters
----------
vega: dict
The vega specification to render.
compression_level: int
The level of compression for the rendered PNG. Ranges from
0 (low compression, faster) to 9 (high compression, slower).
"""
result = self._client.render_vega(
self._session,
widget_id=None,
vega_json=vega,
compression_level=compression_level,
nonce=None,
)
rendered_vega = RenderedVega(result)
return rendered_vega
def get_dashboard(self, dashboard_id):
"""Return the dashboard object of a specific dashboard
Examples
--------
>>> con.get_dashboard(123)
"""
dashboard = self._client.get_dashboard(
session=self._session, dashboard_id=dashboard_id
)
return dashboard
def get_dashboards(self):
"""List all the dashboards in the database
Examples
--------
>>> con.get_dashboards()
"""
dashboards = self._client.get_dashboards(session=self._session)
return dashboards
def duplicate_dashboard(
self, dashboard_id, new_name=None, source_remap=None
):
"""
Duplicate an existing dashboard, returning the new dashboard id.
Parameters
----------
dashboard_id: int
The id of the dashboard to duplicate
new_name: str
The name for the new dashboard
source_remap: dict
EXPERIMENTAL
A dictionary remapping table names. The old table name(s)
should be keys of the dict, with each value being another
dict with a 'name' key holding the new table value. This
structure can be used later to support changing column
names.
Examples
--------
>>> source_remap = {'oldtablename1': {'name': 'newtablename1'}, \
'oldtablename2': {'name': 'newtablename2'}}
>>> newdash = con.duplicate_dashboard(12345, "new dash", source_remap)
"""
source_remap = source_remap or {}
d = self._client.get_dashboard(
session=self._session, dashboard_id=dashboard_id
)
newdashname = new_name or '{0} (Copy)'.format(d.dashboard_name)
d = change_dashboard_sources(d, source_remap) if source_remap else d
new_dashboard_id = self._client.create_dashboard(
session=self._session,
dashboard_name=newdashname,
dashboard_state=d.dashboard_state,
image_hash='',
dashboard_metadata=d.dashboard_metadata,
)
return new_dashboard_id
def __call__(self, *args, **kwargs):
"""Runtime UDF decorator.
The connection object can be applied to a Python function as
decorator that will add the function to bending registration
list.
"""
try:
from rbc.omniscidb import RemoteOmnisci
except ImportError:
raise ImportError("The 'rbc' package is required for `__call__`")
if self._rbc is None:
self._rbc = RemoteOmnisci(
user=self._user,
password=self._password,
host=self._host,
port=self._port,
dbname=self._dbname,
)
self._rbc._session_id = self.sessionid
return self._rbc(*args, **kwargs)
def register_runtime_udfs(self):
"""Register any bending Runtime UDF functions in OmniSci server.
If no Runtime UDFs have been defined, the call to this method
is noop.
"""
if self._rbc is not None:
self._rbc.register()
class RenderedVega:
def __init__(self, render_result):
self._render_result = render_result
self.image_data = base64.b64encode(render_result.image).decode()
def _repr_mimebundle_(self, include=None, exclude=None):
return {
'image/png': self.image_data,
'text/html': (
'<img src="data:image/png;base64,{}" '
'alt="OmniSci Vega">'.format(self.image_data)
),
}
```
#### File: pymapd/pymapd/_utils.py
```python
import datetime
import numpy as np
import pandas as pd
from enum import Enum
class TimePrecision(Enum):
SECONDS = 0
MILLISECONDS = 3
MICROSECONDS = 6
NANOSECONDS = 9
def seconds_to_time(seconds):
"""Convert seconds since midnight to a datetime.time"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return datetime.time(h, m, s)
def time_to_seconds(time):
"""Convert a datetime.time to seconds since midnight"""
if time is None:
return None
return 3600 * time.hour + 60 * time.minute + time.second
def datetime_to_seconds(arr, precision):
"""Convert an array of datetime64[ns] to seconds since the UNIX epoch"""
p = TimePrecision(precision)
if arr.dtype != np.dtype('datetime64[ns]'):
if arr.dtype == 'int64':
# The user has passed a unix timestamp already
return arr
if not (
arr.dtype == 'object'
or str(arr.dtype).startswith('datetime64[ns,')
):
raise TypeError(
f"Invalid dtype '{arr.dtype}', expected one of: "
"datetime64[ns], int64 (UNIX epoch), "
"or object (string)"
)
# Convert to datetime64[ns] from string
# Or from datetime with timezone information
# Return timestamp in 'UTC'
arr = pd.to_datetime(arr, utc=True)
return arr.view('i8') // 10 ** 9 # ns -> s since epoch
else:
if p == TimePrecision.SECONDS:
return arr.view('i8') // 10 ** 9
elif p == TimePrecision.MILLISECONDS:
return arr.view('i8') // 10 ** 6
elif p == TimePrecision.MICROSECONDS:
return arr.view('i8') // 10 ** 3
elif p == TimePrecision.NANOSECONDS:
return arr.view('i8')
def datetime_in_precisions(epoch, precision):
"""Convert epoch time value into s, ms, us, ns"""
p = TimePrecision(precision)
if p == TimePrecision.SECONDS:
return np.datetime64(epoch, 's').astype(datetime.datetime)
elif p == TimePrecision.MILLISECONDS:
return np.datetime64(epoch, 'ms')
elif p == TimePrecision.MICROSECONDS:
return np.datetime64(epoch, 'us')
elif p == TimePrecision.NANOSECONDS:
return np.datetime64(epoch, 'ns')
else:
raise TypeError("Invalid timestamp precision: {}".format(precision))
def date_to_seconds(arr):
"""Converts date into seconds"""
return arr.apply(lambda x: np.datetime64(x, "s").astype(int))
mapd_to_slot = {
'BOOL': 'int_col',
'BOOLEAN': 'int_col',
'SMALLINT': 'int_col',
'INT': 'int_col',
'INTEGER': 'int_col',
'BIGINT': 'int_col',
'FLOAT': 'real_col',
'DECIMAL': 'int_col',
'DOUBLE': 'real_col',
'TIMESTAMP': 'int_col',
'DATE': 'int_col',
'TIME': 'int_col',
'STR': 'str_col',
'POINT': 'str_col',
'LINESTRING': 'str_col',
'POLYGON': 'str_col',
'MULTIPOLYGON': 'str_col',
'TINYINT': 'int_col',
'GEOMETRY': 'str_col',
'GEOGRAPHY': 'str_col',
}
mapd_to_na = {
'BOOL': -128,
'BOOLEAN': -128,
'SMALLINT': -32768,
'INT': -2147483648,
'INTEGER': -2147483648,
'BIGINT': -9223372036854775808,
'FLOAT': 0,
'DECIMAL': 0,
'DOUBLE': 0,
'TIMESTAMP': -9223372036854775808,
'DATE': -9223372036854775808,
'TIME': -9223372036854775808,
'STR': '',
'POINT': '',
'LINESTRING': '',
'POLYGON': '',
'MULTIPOLYGON': '',
'TINYINT': -128,
'GEOMETRY': '',
'GEOGRAPHY': '',
}
```
#### File: pymapd/tests/test_connection.py
```python
import pytest
from omnisci.thrift.ttypes import TColumnType
from omnisci.common.ttypes import TTypeInfo
from pymapd import OperationalError, connect
from pymapd.connection import _parse_uri, ConnectionInfo
from pymapd.exceptions import Error
from pymapd._parsers import ColumnDetails, _extract_column_details
@pytest.mark.usefixtures("mapd_server")
class TestConnect:
def test_host_specified(self):
with pytest.raises(TypeError):
connect(user='foo')
def test_raises_right_exception(self):
with pytest.raises(OperationalError):
connect(host='localhost', protocol='binary', port=1234)
def test_close(self):
conn = connect(
user='admin',
password='<PASSWORD>',
host='localhost',
dbname='omnisci',
)
assert conn.closed == 0
conn.close()
assert conn.closed == 1
def test_commit_noop(self, con):
result = con.commit() # it worked
assert result is None
def test_bad_protocol(self, mock_client):
with pytest.raises(ValueError) as m:
connect(
user='user',
host='localhost',
dbname='dbname',
protocol='fake-proto',
)
assert m.match('fake-proto')
def test_session_logon_success(self):
conn = connect(
user='admin',
password='<PASSWORD>',
host='localhost',
dbname='omnisci',
)
sessionid = conn._session
connnew = connect(sessionid=sessionid, host='localhost')
assert connnew._session == sessionid
def test_session_logon_failure(self):
sessionid = 'ILoveDancingOnTables'
with pytest.raises(Error):
connect(sessionid=sessionid, host='localhost', protocol='binary')
def test_bad_binary_encryption_params(self):
with pytest.raises(TypeError):
connect(
user='admin',
host='localhost',
dbname='omnisci',
protocol='http',
validate=False,
)
class TestURI:
def test_parse_uri(self):
uri = (
'omnisci://admin:HyperInteractive@localhost:6274/omnisci?'
'protocol=binary'
)
result = _parse_uri(uri)
expected = ConnectionInfo(
"admin",
"HyperInteractive",
"localhost",
6274,
"omnisci",
"binary",
None,
None,
)
assert result == expected
def test_both_raises(self):
uri = (
'omnisci://admin:HyperInteractive@localhost:6274/omnisci?'
'protocol=binary'
)
with pytest.raises(TypeError):
connect(uri=uri, user='my user')
class TestExtras:
def test_extract_row_details(self):
data = [
TColumnType(
col_name='date_',
col_type=TTypeInfo(
type=6,
encoding=4,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=32,
),
is_reserved_keyword=False,
src_name='',
),
TColumnType(
col_name='trans',
col_type=TTypeInfo(
type=6,
encoding=4,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=32,
),
is_reserved_keyword=False,
src_name='',
),
TColumnType(
col_name='symbol',
col_type=TTypeInfo(
type=6,
encoding=4,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=32,
),
is_reserved_keyword=False,
src_name='',
),
TColumnType(
col_name='qty',
col_type=TTypeInfo(
type=1,
encoding=0,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=0,
),
is_reserved_keyword=False,
src_name='',
),
TColumnType(
col_name='price',
col_type=TTypeInfo(
type=3,
encoding=0,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=0,
),
is_reserved_keyword=False,
src_name='',
),
TColumnType(
col_name='vol',
col_type=TTypeInfo(
type=3,
encoding=0,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=0,
),
is_reserved_keyword=False,
src_name='',
),
]
result = _extract_column_details(data)
expected = [
ColumnDetails(
name='date_',
type='STR',
nullable=True,
precision=0,
scale=0,
comp_param=32,
encoding='DICT',
is_array=False,
),
ColumnDetails(
name='trans',
type='STR',
nullable=True,
precision=0,
scale=0,
comp_param=32,
encoding='DICT',
is_array=False,
),
ColumnDetails(
name='symbol',
type='STR',
nullable=True,
precision=0,
scale=0,
comp_param=32,
encoding='DICT',
is_array=False,
),
ColumnDetails(
name='qty',
type='INT',
nullable=True,
precision=0,
scale=0,
comp_param=0,
encoding='NONE',
is_array=False,
),
ColumnDetails(
name='price',
type='FLOAT',
nullable=True,
precision=0,
scale=0,
comp_param=0,
encoding='NONE',
is_array=False,
),
ColumnDetails(
name='vol',
type='FLOAT',
nullable=True,
precision=0,
scale=0,
comp_param=0,
encoding='NONE',
is_array=False,
),
]
assert result == expected
``` |
{
"source": "jp-harvey/pyomnisci-1",
"score": 3
} |
#### File: pyomnisci-1/tests/test_data_no_nulls_gpu.py
```python
import pytest
import pandas as pd
from .conftest import _tests_table_no_nulls, no_gpu
@pytest.mark.usefixtures("mapd_server")
class TestGPUDataNoNulls:
@pytest.mark.skipif(no_gpu(), reason="No GPU available")
@pytest.mark.parametrize('method', ["rows", "columnar", "arrow", "infer"])
def test_select_ipc_gpu(self, con, method):
df_in = _tests_table_no_nulls(10000)
# keep columns that pass, make issues for ones that don't
df_in_limited = df_in[
["smallint_", "int_", "bigint_", "float_", "double_"]
].copy()
con.execute("drop table if exists test_data_no_nulls_gpu;")
con.load_table("test_data_no_nulls_gpu", df_in_limited, method=method)
df_gdf = con.select_ipc_gpu("select * from test_data_no_nulls_gpu")
# validate dtypes are exactly the same
assert pd.DataFrame.equals(df_in_limited.dtypes, df_gdf.dtypes)
# bring gdf local to CPU, do comparison
df_gdf_cpu_copy = df_gdf.to_pandas()
df_in_limited.sort_values(
by=['smallint_', 'int_', 'bigint_'], inplace=True
)
df_in_limited.reset_index(drop=True, inplace=True)
df_gdf_cpu_copy.sort_values(
by=['smallint_', 'int_', 'bigint_'], inplace=True
)
df_gdf_cpu_copy.reset_index(drop=True, inplace=True)
assert pd.DataFrame.equals(df_in_limited, df_gdf_cpu_copy)
con.execute("drop table if exists test_data_no_nulls_gpu;")
``` |
{
"source": "jphaser/bilalcoin",
"score": 2
} |
#### File: bilalcoin/users/views.py
```python
from __future__ import absolute_import
import requests
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.core.mail import EmailMessage, send_mail
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import get_template, render_to_string
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from django.views.generic import CreateView, DetailView, RedirectView, UpdateView
from .forms import UserPersonalForm, UserProfileForm, UserVerifyForm
from .models import UserProfile, UserVerify
User = get_user_model()
def home(request, *args, **kwargs):
username = str(kwargs.get('username'))
try:
user = User.objects.get(username=username)
request.session['ref_profile'] = user.id
print('user', user.id)
except:
pass
# print(request.session.get_expiry_age())
return render(request, 'pages/home.html', {})
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# context["crypto"] = get_crypto_data()
# return context
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = UserProfile
form_class = UserProfileForm
# second_form_class = UserProfileForm
template_name = 'users/user_form.html'
success_message = _("Your personal information was successfully updated")
slug_field = "username"
slug_url_kwarg = "username"
def get_success_url(self):
return self.request.user.get_absolute_url() # type: ignore [union-attr]
def get_object(self):
self.user = self.request.user
return super().get_object()
def get_object(self):
username = self.kwargs.get('username')
if username is None:
raise Http404
return get_object_or_404(UserProfile, user__username__iexact=username)
def get(self, request, *args, **kwargs):
self.user = request.user
return super().get(request, *args, **kwargs)
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# context["profileform"] = self.form_class(self.request.POST, self.request.FILES, instance=self.request.user)
# return context
def form_valid(self, form):
form.save()
time = timezone.now()
userdata = self.request.user
title = "User Data Update"
msg = f"{userdata.username} just updated his personal details at {time}"
message = get_template('mail/admin-mail.html').render(context={"user_username": userdata.username, "title": title, "time": time, "message": msg})
recepient = str(userdata.email)
frm = settings.EMAIL_HOST_USER
mail = EmailMessage(
title,
#f"{self.request.user.username} just updated his profile at {self.created}",
message,
frm,
[recepient],
)
mail.content_subtype = "html"
mail.send()
return super().form_valid(form)
def form_invalid(self, form):
return messages.error(self.request, "Form was not submited successfully. Check your informations!")
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
class UserVerifyCreateView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = UserVerify
form_class = UserVerifyForm
template_name = 'users/verify.html'
slug_field = "username"
slug_url_kwarg = "username"
success_message = _("Verification information was successfully created")
def get_success_url(self):
return self.request.user.get_absolute_url() # type: ignore [union-attr]
def get_object(self):
username = self.kwargs.get('username')
if username is None:
raise Http404
return get_object_or_404(UserVerify, user__username__iexact=username)
def get(self, request, *args, **kwargs):
self.user = request.user
return super().get(request, *args, **kwargs)
def form_valid(self, form):
form.save()
time = timezone.now()
title = "New Verification Request"
msg = f"{self.request.user.username} just submited informations for his profile verification at {time}"
message = get_template('mail/admin-mail.html').render(context={"user_username": self.request.user.username, "title": title, "time": time, "message": msg})
recepient = self.request.user.email
sender = settings.EMAIL_HOST_USER
mail = EmailMessage(
title,
message,
sender,
[recepient]
)
mail.content_subtype = "html"
mail.send()
return super().form_valid(form)
def form_invalid(self, form):
return messages.error(self.request, "Form was not submited successfully. Check your informations!")
user_verify_view = UserVerifyCreateView.as_view()
``` |
{
"source": "jphaser/encryptfinance",
"score": 2
} |
#### File: encryptfinance/users/models.py
```python
from __future__ import absolute_import
import datetime
import os
import random
import uuid
from decimal import Decimal
from countries_plus.models import Country
from django.contrib.auth.models import AbstractUser
from django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator
from django.db.models import (
CASCADE,
BooleanField,
CharField,
DateField,
DateTimeField,
DecimalField,
EmailField,
FileField,
ForeignKey,
ImageField,
OneToOneField,
TextField,
URLField,
UUIDField,
)
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from model_utils.models import TimeStampedModel
from ..utils.ref_code import ref_generator
from ..utils.validators import (
validate_uploaded_image_extension,
validate_uploaded_pdf_extension,
)
def get_filename_ext(filepath):
base_name = os.path.basename(filepath)
name, ext = os.path.splitext(base_name)
return name, ext
def idcard_image(instance, filename):
new_filename = random.randint(1, 3910209312)
name, ext = get_filename_ext(filename)
final_filename = "{new_filename}{ext}".format(new_filename=new_filename, ext=ext)
return "idcard/{new_filename}/{final_filename}".format(
new_filename=new_filename, final_filename=final_filename
)
def profile_image(instance, filename):
new_filename = random.randint(1, 3910209312)
name, ext = get_filename_ext(filename)
final_filename = "{new_filename}{ext}".format(new_filename=new_filename, ext=ext)
return "profile/{new_filename}/{final_filename}".format(
new_filename=new_filename, final_filename=final_filename
)
def testimonial_image(instance, filename):
new_filename = random.randint(1, 3910209312)
name, ext = get_filename_ext(filename)
final_filename = "{new_filename}{ext}".format(new_filename=new_filename, ext=ext)
return "testimonial/{new_filename}/{final_filename}".format(
new_filename=new_filename, final_filename=final_filename
)
def bank_statement(instance, filename):
new_filename = random.randint(1, 3910209312)
name, ext = get_filename_ext(filename)
final_filename = "{new_filename}{ext}".format(new_filename=new_filename, ext=ext)
return "statement/{new_filename}/{final_filename}".format(
new_filename=new_filename, final_filename=final_filename
)
SSN_REGEX = "^(?!666|000|9\\d{2})\\d{3}-(?!00)\\d{2}-(?!0{4}\\d{4}$)"
NUM_REGEX = "^[0-9]*$"
class User(AbstractUser):
"""Default user for encryptfinance."""
#: First and last name do not cover name patterns around the globe
middle_name = CharField(
_("Middle Name"), blank=True, null=True, max_length=255, help_text="Not compulsary, But good practive to differentciate owners"
)
balance = DecimalField(_("Current Balance"), max_digits=20, null=True, decimal_places=2, default=0.00)
unique_id = UUIDField(editable=False, default=uuid.uuid1)
member_since = DateField(default=datetime.datetime.now)
is_verified = BooleanField(default=False)
has_deposited = BooleanField(default=False)
deposit_date = DateField(default=datetime.datetime.now, null=True, blank=True)
def get_initials(self):
fname = self.first_name[0].upper()
lname = self.last_name[0].upper()
return f"{fname} {lname}"
@property
def plan(self):
if self.balance > 0.00 and self.balance <= 1000.00:
return "DAILY PLAN"
elif self.balance > 1000.00 and self.balance <= 4000.00:
return "SILVER PLAN"
elif self.balance > 4000.00 and self.balance <= 50000.00:
return "GOLD PLAN"
elif self.balance > 50000.00 and self.balance <= 100000.00:
return "DIAMOND PLAN"
elif self.balance == 0.00:
return "UNSUBSCRIBED"
@property
def rate(self):
if self.plan == "DAILY PLAN":
return Decimal(0.2)
elif self.plan == "SILVER PLAN":
return Decimal(0.55)
elif self.plan == "GOLD PLAN":
return Decimal(0.7)
elif self.plan == "DIAMOND PLAN":
return Decimal(0.85)
elif self.plan == "UNSUBSCRIBED":
return Decimal(0.00)
@property
def days(self):
if self.plan == "DAILY PLAN":
return 1
elif self.plan == "SILVER PLAN":
return 7
elif self.plan == "GOLD PLAN":
return 14
elif self.plan == "DIAMOND PLAN":
return 30
elif self.plan == "UNSUBSCRIBED":
return 0
def withdrawal_date(self):
if self.plan == "BRONZE PLAN":
days = 1
if self.deposit_date:
return self.deposit_date + datetime.timedelta(days=days)
elif self.plan == "SILVER PLAN":
days = 7
if self.deposit_date:
return self.deposit_date + datetime.timedelta(days=days)
elif self.plan == "GOLD PLAN":
days = 14
if self.deposit_date:
return self.deposit_date + datetime.timedelta(days=days)
elif self.plan == "DIAMOND PLAN":
days = 30
if self.deposit_date:
return self.deposit_date + datetime.timedelta(days=days)
elif self.plan == "UNSUBSCRIBED":
days = 0
if self.deposit_date:
return self.deposit_date + datetime.timedelta(days=days)
def can_withdraw(self):
if self.plan == "BRONZE PLAN":
days = 1
terminate_date = self.deposit_date + datetime.timedelta(days=days)
if timezone.now().date() > terminate_date:
return True
elif self.plan == "SILVER PLAN":
days = 14
terminate_date = self.deposit_date + datetime.timedelta(days=days)
if timezone.now().date() > terminate_date:
return True
elif self.plan == "GOLD PLAN":
days = 60
terminate_date = self.deposit_date + datetime.timedelta(days=days)
if timezone.now().date() > terminate_date:
return True
elif self.plan == "DIAMOND PLAN":
days = 90
terminate_date = self.deposit_date + datetime.timedelta(days=days)
if timezone.now().date() > terminate_date:
return True
elif self.plan == "UNSUBSCRIBED":
return True
def profit(self):
if self.balance > 0:
return Decimal(self.balance) * Decimal(self.rate)
else:
return Decimal(0.00) * Decimal(self.rate)
def get_absolute_url(self):
"""Get url for user's detail view.
Returns:
str: URL for user detail.
"""
return reverse("users:detail", kwargs={"username": self.username})
class UserProfile(TimeStampedModel):
BANKS = (
("", "Select Bank"),
("Arvest Bank", "Arvest Bank"),
("Ally Financial", "Ally Financial"),
("American Express", "American Express"),
("Amarillos National Bank", "Amarillos National Bank"),
("Apple bank for Savings", "Apple bank for Savings"),
("Bank of Hawaii", "Bank of Hawaii"),
("Bank of Hope", "Bank of Hope"),
("Bank United", "Bank United"),
("BOA", "Bank of America"),
("Bank United", "Bank United"),
("Brown Brothers Harriman & Co", "Brown Brothers Harriman & Co"),
("Barclays", "Barclays"),
("BMO Harris Bank", "BMO Harris Bank"),
("Bank OZK", "Bank OZK"),
("BBVA Compass", "BBVA Compass"),
("BNP Paribas", "BNP Paribas"),
("BOK Financial Corporation", "BOK Financial Corporation"),
("Cathay Bank", "Cathay Bank"),
("Chartway Federal Credit Union", "Chartway Federal Credit Union"),
("Capital One", "Capital One"),
("Capital City Bank", "Capital City Bank"),
("Chase Bank", "Chase Bank"),
("Charles Schwab Corporation", "Charles Schwab Corporation"),
("CG", "CitiGroup"),
("Credit Suisse", "Credit Suisse"),
("Comerica", "Comerica"),
("CIT Group", "CIT Group"),
("CapitalCity Bank", "CapitalCity Bank"),
("Credit Union Page", "Credit Union Page"),
("Citizens Federal Bank", "Citizens Federal Bank"),
("Chemical Financial Corporation", "Chemical Financial Corporation"),
("Discover Financial", "Discover Finacial"),
("Deutsche Bank", "Deutsche Bank"),
("Douglas County Bank & Trust", "Douglas County Bank & Trust "),
("Dime Savings Bank of Williamsburgh", "Dime Savings Bank of Williamsburgh"),
("East West Bank", "East West Bank"),
("Flagster Bank", "Flagster Bank"),
("First National of Nebraska", "First National of Nebraska"),
("FirstBank Holding Co", "FirstBank Holding Co"),
("First Capital Bank", "First Capital Bank"),
("First Commercial Bank", "First Commercial Bank"),
(
"First Federal Savings Bank of Indiana",
"First Federal Savings Bank of Indiana",
),
("First Guaranty Bank of Florida", "First Guaranty Bank of Florida"),
("First Line Direct", "First Line Direct"),
("First USA Bank", "First USA Bank"),
("Fifth Third Bank", "Fifth Third Bank"),
("First Citizens BancShares", "First Citizens BancShares"),
("Fulton Financial Corporation", "Fulton Financial Corporation"),
("First Hawaiian Bank", "First Hawaiian Bank"),
("First Horizon National Corporation", "First Horizon National Corporation"),
("Frost Bank", "Frost Bank"),
("First Midwest Bank", "First Midwest Bank"),
("Goldman Sachs", "Goldman Sachs"),
("Grandeur Financials", "Grandeur Financials"),
("HSBC Bank USA", "HSBC Bank USA"),
("Home BancShares Conway", "Home BancShares Conway"),
("Huntington Bancshares", "Huntington Bancshares"),
("Investors Bank", "Investors Bank"),
("Íntercity State Bank", "Íntercity State Bank"),
("KeyCorp", "KeyCorp"),
("MB Financial", "MB Financial"),
("Mizuho Financial Group", "Mizuho Financial Group"),
("Midfirst Bank", "Midfirst Bank"),
("M&T Bank", "M&T Bank"),
("MUFG Union Bank ", "MUFG Union Bank"),
("<NAME>", "<NAME>"),
("Northern Trust", "Northern Trust"),
("New York Community Bank", "New York Community Bank"),
("Old National Bank", "Old National Bank"),
("Pacwest Bancorp", "Pacwest Bancorp"),
("Pinnacle Financial Partners", "Pinnacle Financial Partners"),
("PNC Financial Services", "PNC Financial Services"),
("Raymond James Financial", "Raymond James Financial"),
("RBC Bank", "RBC Bank"),
("Region Financial Corporation", "Region Financial Corporation"),
("Satander Bank", "Satander Bank"),
("Synovus Columbus", "Synovus Columbus"),
("Synchrony Financial", "Synchrony Financial"),
("<NAME>", "<NAME>"),
("Simmons Bank", "Simmons Bank"),
("South State Bank", "South State Bank"),
("Stifel St. Louise", "Stifel St. Louise"),
("Suntrust Bank", "Suntrust Bank"),
("TCF Financial Corporation", "TCF Financial Corporation"),
("TD Bank", "TD Bank"),
("The Bank of New York Mellon", "The Bank of New York Mellon"),
("Texas Capital Bank", "Texas Capital Bank"),
("UMB Financial Corporation", "UMB Financial Corporation"),
("Utrecht-America", "Utrecht-America"),
("United Bank", "United Bank"),
("USAA", "USAA"),
("U.S Bank", "U.S Bank"),
("UBS", "UBS"),
("Valley National Bank", "Valley National Bank"),
("Washington Federal", "Washington Federal"),
("Western Alliance Banorporation", "Western Alliance Bancorporation"),
("Wintrust Financial", "Wintrust Finacial"),
("Webster Bank", "Webster Bank"),
("Wells Fargo", "Wells Fargo"),
("Zions Bancorporation", "Zions Bancorporation"),
("Other Bank", "Other Bank"),
)
user = OneToOneField(User, on_delete=CASCADE, related_name="userprofile")
# code = CharField(max_length=7, blank=True)
recommended_by = ForeignKey(User, on_delete=CASCADE, blank=True, null=True, related_name='ref_by')
passport = FileField(
_("User Profile Passport"),
upload_to=profile_image,
validators=[validate_uploaded_image_extension],
null=True,
blank=False,
)
bank = CharField(
_("Your Bank Name"), max_length=250, blank=True, null=True, choices=BANKS
)
account_no = CharField(
_("Recipient Account Number"),
max_length=13,
null=True,
blank=False,
validators=[
RegexValidator(
regex=NUM_REGEX,
message="Must Contain Numbers Only",
code="Invalid_input, Only Integers",
)
],
)
routing_no = CharField(
_("Recipient Routing Number"),
max_length=13,
null=True,
blank=True,
help_text="must be the recipients 9 digits routing number",
validators=[
RegexValidator(
regex=NUM_REGEX,
message="Must Contain Numbers Only",
code="Invalid_input, Only Integers",
)
],
)
nationality = ForeignKey(to=Country, on_delete=CASCADE, null=True)
phone = CharField(
_("Contact 10 digit Phone Number"),
max_length=10,
null=True,
blank=True,
unique=True,
help_text="Example: 1234567890 (10 digits only)",
validators=[
RegexValidator(
regex=NUM_REGEX,
message="Must Contain Numbers Only",
code="Invalid_input, Only Integers",
)
],
)
fk_name = 'user'
@property
def country_code(self):
if self.nationality:
country_code = self.nationality.phone
return country_code
def international_number(self):
return f"{self.country_code}{self.phone}"
class Meta:
verbose_name = "User Profile"
verbose_name_plural = "User Profiles"
ordering = ["-modified"]
def get_recommended_profiles(self):
qs = UserProfile.objects.all()
# empty recommended lists
my_recs = []
for profile in qs:
if profile.recommended_by == self.user:
my_recs.append(profile)
return my_recs
# def save(self, *args, **kwargs):
# if self.code == '':
# code = ref_generator()
# self.code = code
# super().save(*args, **kwargs)
def __str__(self):
return (
self.user.username
if self.user.get_full_name() == ""
else self.user.get_full_name()
)
class UserVerify(TimeStampedModel):
PASSPORT = "PASSPORT"
ID_CARD = "ID_CARD"
DRIVERS_LICENSE = "DRIVERS_LICENSE"
ID_TYPE = (
(PASSPORT, "PASSPORT"),
(ID_CARD, "ID CARD"),
(DRIVERS_LICENSE, "DRIVERS LICENSE"),
)
user = OneToOneField(to=User, on_delete=CASCADE, related_name="userverify")
id_type = CharField(
choices=ID_TYPE, default=PASSPORT, max_length=15, null=True, blank=True
)
id_front = FileField(
_("ID Card Front"),
upload_to=idcard_image,
validators=[validate_uploaded_image_extension],
null=True,
blank=False,
help_text="Must be SVG, PNG or JPG files",
)
id_back = FileField(
_("ID Card Back"),
upload_to=idcard_image,
validators=[validate_uploaded_image_extension],
null=True,
blank=False,
help_text="Must be SVG, PNG or JPG files",
)
# bank_statement = FileField(
# _("Last 4 Months Bank Statement"),
# validators=[validate_uploaded_pdf_extension],
# upload_to=bank_statement,
# null=True,
# blank=True,
# help_text="Must be PDF or JPG files",
# )
ssn = CharField(
_("US SSN"),
max_length=16,
null=True,
blank=True,
unique=True,
help_text="Must be valid Social Security Number. *** US Citizens Only",
validators=[
RegexValidator(
regex=NUM_REGEX,
message="Must Contain Numbers Only",
code="Invalid_input, Only Integers",
)
],
)
class Meta:
verbose_name = "User Verify"
verbose_name_plural = "User Verifies"
ordering = ["-modified"]
def __str__(self):
return (
self.user.username
if self.user.get_full_name() == ""
else self.user.get_full_name()
)
class Testimonial(TimeStampedModel):
name = CharField(_("Testimonial Giver's Name"), max_length=500, null=True, blank=False)
desc = TextField(_("Testimonial description"), max_length=1200, null=True, blank=False)
pic = ImageField(
_("Testimonial Sender Image"),
upload_to=testimonial_image,
null=True,
blank=False,
help_text="Must be Image files",
)
class Meta:
verbose_name = "Testimonial"
verbose_name_plural = "Testimonials"
ordering = ["-modified"]
def __str__(self):
return self.name
``` |
{
"source": "jphayek/OEIS-Python",
"score": 4
} |
#### File: jphayek/OEIS-Python/oeis.py
```python
import argparse
import numpy as np
import matplotlib.pyplot as plt
import math
__version__ = "0.0.1"
def parse_args():
parser = argparse.ArgumentParser(description="Print a sweet sequence")
parser.add_argument(
"sequence",
metavar="S",
type=str,
help="Define the sequence to run (e.g.: A181391)",
)
parser.add_argument(
"--limit",
type=int,
default=20,
help="Define the limit of the sequence, (default: 20)",
)
parser.add_argument(
"--plot", action="store_true", help="Print a sweet sweet sweet graph"
)
parser.add_argument(
"--start",
type=int,
default=0,
help="Define the starting point of the sequence (default: 0)",
)
return parser.parse_args()
def A181391(start=0, limit=20, plot=False):
sequence = [0]
last_pos = {}
for i in range(start + limit):
new_value = i - last_pos.get(sequence[i], i)
sequence.append(new_value)
last_pos[sequence[i]] = i
if plot:
colors = []
for i in range(start, start + limit):
colors.append(np.random.rand())
plt.scatter(
range(start, start + limit),
sequence[start : start + limit],
s=50,
c=colors,
alpha=0.5,
)
plt.show()
return sequence[start : start + limit]
def A006577(n):
if n==1: return 0
x=0
while True:
if n%2==0: n/=2
else: n = 3*n + 1
x+=1
if n<2: break
return x
print [A006577(n) for n in xrange(1, 101)]
def A115020():
result = []
for n in range(100, 0, -7):
if n >= 0:
result.append(n)
return result
def A000010(n):
numbers = []
i = 0
for i in range(n):
if math.gcd(i, n) == 1:
numbers.append(i)
return len(numbers)
def A000040(start=0, end=999, plot=False):
result = []
resultIndex = []
i=0
for val in range(start, end + 1):
if val > 1:
for n in range(2, val):
if (val % n) == 0:
break
else:
result.append(val)
resultIndex.append(i)
i=i+1
if plot:
plt.plot(resultIndex,result)
plt.ylabel('some numbers')
plt.show()
else:
return result
def _partitions(n):
if n == 0:
return []
if n==1:
return [[1]]
liste=[[n]]
for i in range(1,n):
for p in _partitions(n-i):
if [i]+p==sorted([i]+p):
liste.append([i]+p)
return liste
def partitions(n):
return len(_partitions(n))
def affiche(n):
listes=_partitions(n)
for i in range(0,len(listes)):
print(listes[i])
def main():
args = parse_args()
if args.sequence == "A181391":
return A181391(args.start, args.limit, args.plot)
elif args.sequence == "A115020":
return A115020()[args.start : args.start + args.limit]
elif args.sequence == "A000040":
return A000040(args.start, args.limit, args.plot)
elif args.sequence == "A000010":
return [A000010(x) for x in range(1, args.limit)]
elif args.sequence == "A006577":
return [A006577(n) for n in xrange(1, 101)]
if args.sequence == "A000041":
print(affiche(args.start))
print(partitions(args.start))
if __name__=="__main__":
main()
```
#### File: OEIS-Python/tests/test_A000010.py
```python
from oeis import phi
def test_phi():
assert [phi(x) for x in range (1, 10)] == [1, 1, 2, 2, 4, 2, 6, 4, 6]
```
#### File: OEIS-Python/tests/test_A000040.py
```python
from oeis import A000040
def test_prime():
assert A000040(0,9)==true
``` |
{
"source": "jphayek/Spaces-Python",
"score": 3
} |
#### File: jphayek/Spaces-Python/spaces.py
```python
"Prend en parametre un dossier et remplace dans ce dossier tous les noms des fichiers contenant des espace en underscore."
import argparse
from pathlib import Path
__version__ = "0.0.1"
def parse_args():
parser = argparse.ArgumentParser(description="Searches and replaces in filenames.")
parser.add_argument("path", help="Root directory in which to search for files.")
parser.add_argument("--search", default=" ", help="Character to search.")
parser.add_argument(
"--replace", default="_", help="Character to use as a replacement."
)
parser.add_argument(
"--recurse",
action="store_true",
help="Give to replace recursively in directories.",
)
return parser.parse_args()
def fix_names(path, search=" ", replace="_", recurse=False):
files = [
file
for file in Path(path).glob("*" if not recurse else "**/*")
if file.is_file()
]
for file in files:
new_name = file.name.replace(search, replace)
file.rename(file.parent / new_name)
def main():
args = parse_args()
fix_names(args.path, args.search, args.replace, args.recurse)
if __name__ == "__main__":
main()
``` |
{
"source": "jphdotam/Cophy",
"score": 2
} |
#### File: Code/Data/plots.py
```python
import numpy as np
import peakutils
from sklearn.metrics import auc
from scipy.signal import savgol_filter
WINDOW_LEN = 17 # Default 17
#from Code.UI.label import SAMPLE_FREQ
SAMPLE_FREQ = 200
MAX_RR_INTERVAL = SAMPLE_FREQ // 4 # //3 -> 180
MIN_PEAK_INTERVAL_OVER_SAMPLE = SAMPLE_FREQ * 6 # 6 -> 10 bpm (NB need to take into account port open etc.)
def find_peaks(trace):
min_peaks = len(trace) / MIN_PEAK_INTERVAL_OVER_SAMPLE
peaks = peakutils.indexes(trace, min_dist=MAX_RR_INTERVAL)
if len(peaks) < min_peaks:
print(f"Found {len(peaks)} but expected at least {min_peaks} - using thresh 0.2 ->")
peaks = peakutils.indexes(trace, thres=0.2, min_dist=MAX_RR_INTERVAL)
print(len(peaks))
if len(peaks < min_peaks):
print(f"Found {len(peaks)} but expected at least {min_peaks} - using thresh 0.1 ->")
peaks = peakutils.indexes(trace, thres=0.05, min_dist=MAX_RR_INTERVAL)
print(len(peaks))
return peaks
def pdpa(labelui, peaks, clip_vals=(0, 4)):
df = labelui.TxtSdyFile.df
pd = np.array(df['pd'])
pa = np.array(df['pa'])
time = np.array(df['time'])
x, y = [], []
for index_from in range(len(peaks) - 1):
index_to = index_from + 1
beat_pa = pa[peaks[index_from]:peaks[index_to]]
beat_pd = pd[peaks[index_from]:peaks[index_to]]
beat_time = time[peaks[index_from]:peaks[index_to]]
auc_pa = auc(x=beat_time, y=beat_pa)
auc_pd = auc(x=beat_time, y=beat_pd)
pdpa = auc_pd / auc_pa
if clip_vals:
pdpa = max(pdpa, clip_vals[0])
pdpa = min(pdpa, clip_vals[1])
x.append(beat_time[-1])
y.append(pdpa)
return {'x': x, 'y': y}
def pdpa_filtered(labelui, pdpa):
x, y = pdpa['x'], pdpa['y']
try:
y_filtered = savgol_filter(y, window_length=WINDOW_LEN, polyorder=3)
except ValueError:
print(f"Insufficient data to plot PdPa - try changing Pa channel if using SDY file?")
y_filtered = np.array([1] * len(x))
return {'x': x, 'y': y_filtered}
def microvascular_resistance(labelui, peaks, flow_mean_or_peak='peak'):
df = labelui.TxtSdyFile.df
pd = np.array(df['pd'])
flow = np.array(df['flow'])
time = np.array(df['time'])
x, y = [], []
for index_from in range(len(peaks) - 1):
index_to = index_from + 1
beat_flow = flow[peaks[index_from]:peaks[index_to]]
beat_pd = pd[peaks[index_from]:peaks[index_to]]
beat_time = time[peaks[index_from]:peaks[index_to]]
mean_pd = np.mean(beat_pd)
x.append(beat_time[-1])
if flow_mean_or_peak == 'peak':
mean_flow = np.mean(beat_flow)
resistance = mean_pd / mean_flow
elif flow_mean_or_peak == 'peak':
peak_flow = max(beat_flow)
resistance = mean_pd / peak_flow
else:
raise ValueError(f"flow_mean_or_peak must be mean or peak, not {flow_mean_or_peak}")
y.append(resistance)
return {'x': x, 'y': y}
def stenosis_resistance(labelui, peaks, flow_mean_or_peak='peak'):
df = labelui.TxtSdyFile.df
pa = np.array(df['pa'])
pd = np.array(df['pd'])
flow = np.array(df['flow'])
time = np.array(df['time'])
x, y = [], []
for index_from in range(len(peaks) - 1):
index_to = index_from + 1
beat_flow = flow[peaks[index_from]:peaks[index_to]]
beat_pa = pa[peaks[index_from]:peaks[index_to]]
beat_pd = pd[peaks[index_from]:peaks[index_to]]
beat_time = time[peaks[index_from]:peaks[index_to]]
mean_pa = np.mean(beat_pa)
mean_pd = np.mean(beat_pd)
delta_p = mean_pa - mean_pd
x.append(beat_time[-1])
if flow_mean_or_peak == 'peak':
mean_flow = np.mean(beat_flow)
resistance = delta_p / mean_flow
elif flow_mean_or_peak == 'peak':
peak_flow = max(beat_flow)
resistance = delta_p / peak_flow
else:
raise ValueError(f"flow_mean_or_peak must be mean or peak, not {flow_mean_or_peak}")
y.append(resistance)
return {'x': x, 'y': y}
def filtered_resistance(resistance):
x, y = resistance['x'], resistance['y']
try:
y_filtered = savgol_filter(y, window_length=WINDOW_LEN, polyorder=3)
except ValueError:
print(f"Insufficient data to plot resistance - try changing Pa channel if using SDY file?")
y_filtered = np.array([1] * len(x))
return {'x': x, 'y': y_filtered}
```
#### File: Code/Data/TxtFile.py
```python
import os
import re
import numpy as np
import pandas as pd
class TxtFile:
def __init__(self, studypath, pd_offset=None):
self.studypath = studypath
self.pd_offset = pd_offset
self.patient_id = None
self.study_date = None
self.export_date = None
self.df = None
self.load_data()
def load_data(self):
"""The heading columns are so variable it's almost unbelievable... And sometimes the RWave and Timestamp columns
are reversed in order! Easiest is just to test for all the possibilities and hard code it (ugh)
Returns a dataframe."""
with open(self.studypath) as f:
"""First find the row with the RWave in it; this is our column headings"""
lines = f.readlines()
self.patient_id = re.search("Patient: ([A-Za-z0-9]*),", lines[0])
self.patient_id = self.patient_id.group(1) if self.patient_id else "?"
self.study_date = re.search("Study date: ([0-9/]*),", lines[0])
self.study_date = self.study_date.group(1) if self.study_date else "?"
self.export_date = re.search("Export date: ([0-9/]*)", lines[0])
self.export_date = self.export_date.group(1) if self.export_date else "?"
for i_line, line in enumerate(lines):
if "RWave" in line:
heading_line = line
heading_line_number = i_line
break
else: # If didn't break
raise ValueError("Failed to find heading row")
if heading_line == "Time Pa Pd ECG IPV Pv RWave Tm\n":
names = ['time', 'pa', 'pd', 'ecg', 'flow', 'pv', 'rwave', 'timestamp']
numeric_cols = 5
elif heading_line == "Time[s] Pa[mmHg] Pa_Trans[mmHg] Pd[mmHg] ECG[V] IPV[cm/s] Pv[mmHg] TimeStamp[s] RWave\n":
names = ['time', 'pa', 'pa_trans', 'pd', 'ecg', 'flow', 'pv', 'timestamp', 'rwave']
numeric_cols = 7
elif heading_line == "Time Pa Pd ECG IPV Pv RWave \n" or heading_line == "Time Pa Pd ECG IPV Pv RWave\n":
names = ['time', 'pa', 'pd', 'ecg', 'flow', 'pv', 'rwave']
numeric_cols = 5
else:
raise AttributeError(f"Unable to process data format {heading_line} in file {self.studypath}")
df = pd.read_csv(self.studypath, skiprows=heading_line_number + 1, sep='\t', header=None,
names=names, dtype=np.object_, index_col=False)
df = df.stack().str.replace(',', '.').unstack()
# Don't try to convert the 'rwave' column to numeric, it's full of crap
df.iloc[:, 0:numeric_cols] = df.iloc[:, 0:numeric_cols].apply(pd.to_numeric)
if self.pd_offset:
new_pd = np.concatenate((np.array(df.pd[self.pd_offset:]), np.zeros(self.pd_offset)))
df.pd = new_pd
# new_flow = np.concatenate((np.array(df.flow[self.pd_offset:]), np.zeros(self.pd_offset)))
# df.flow = new_flow
self.df = df
``` |
{
"source": "jphdotam/DFDC",
"score": 2
} |
#### File: skp/etl/6_extract_face_rois_to_numpy.py
```python
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('mode', type=str)
parser.add_argument('--start', type=int, default=0)
parser.add_argument('--end', type=int, default=-1)
parser.add_argument('--save-failures', type=str, default='./bad_videos.txt')
return parser.parse_args()
import pandas as pd
import numpy as np
import glob
import os, os.path as osp
#import skvideo.io
from scipy.ndimage.interpolation import zoom
from torch.utils.data import DataLoader
from tqdm import tqdm
from helper import *
args = parse_args()
FACEROIS = '../../data/dfdc/jph/face_roi/'
ORIGINAL = '../../data/dfdc/videos/'
SAVEDIR = '../../data/dfdc/mini-numpy/'
NUMFRAMES = 30
if not osp.exists(SAVEDIR): os.makedirs(SAVEDIR)
metadatas = [osp.join(_, 'metadata.json') for _ in glob.glob(osp.join(ORIGINAL, '*'))]
meta_df = pd.concat([load_metadata(_) for _ in tqdm(metadatas, total=len(metadatas))])
fake_df = meta_df[meta_df['video_label'] == 'FAKE']
# Make a mapping from FAKE to REAL
fake2real = {
osp.join(fake_df['train_part'].iloc[_], fake_df['filename'].iloc[_]) : osp.join(fake_df['train_part'].iloc[_], fake_df['original'].iloc[_])
for _ in range(len(fake_df))
}
# Make a mapping from video to label
vid2label = {meta_df['filename'].iloc[_] : meta_df['video_label'].iloc[_] for _ in range(len(meta_df))}
roi_pickles = glob.glob(osp.join(FACEROIS, '*.pickle'))
roi_dicts = [load_roi(p) for p in tqdm(roi_pickles, total=len(roi_pickles))]
#########
# REALS #
#########
real_rois = {}
for _ in roi_dicts:
real_rois.update({k : v for k,v in _.items() if vid2label[k.split('/')[-1]] == 'REAL'})
if args.mode == 'real':
real_keys = np.sort([*real_rois])
if args.end == -1:
real_keys = real_keys[args.start:]
else:
real_keys = real_keys[args.start:args.end+1]
filenames = [osp.join(ORIGINAL, _) for _ in real_keys]
dataset = VideoFirstFramesDataset(videos=filenames, NUMFRAMES=NUMFRAMES)
loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1)
bad_videos = []
for ind, vid in tqdm(enumerate(loader), total=len(loader)):
try:
v = loader.dataset.videos[ind].replace(ORIGINAL, '')
if type(vid) != list:
print('{} failed !'.format(v))
bad_videos.append(v)
continue
vid, start = vid
faces = get_faces(vid[0].numpy(), real_rois[v], start, NUMFRAMES=NUMFRAMES)
if len(faces) == 0:
bad_videos.append(v)
# print('0 faces detected for {} !'.format(v))
for ind, f in enumerate(faces):
if f.shape[1] > 256:
scale = 256./f.shape[1]
f = zoom(f, [1, scale,scale, 1], order=1, prefilter=False)
assert f.shape[1:3] == (256, 256)
train_part = int(v.split('/')[-2].split('_')[-1])
filename = v.split('/')[-1].replace('.mp4', '')
filename = osp.join(SAVEDIR, '{:02d}_{}_{}.mp4'.format(train_part, filename, ind))
np.save(filename, f.astype('uint8'))
#skvideo.io.vwrite(filename, f.astype('uint8'))
except Exception as e:
print(e)
bad_videos.append(v)
#########
# FAKES #
#########
if args.mode == 'fake':
fake_keys = np.sort([*fake2real])
if args.end == -1:
fake_keys = fake_keys[args.start:]
else:
fake_keys = fake_keys[args.start:args.end+1]
fake_rois = {k : real_rois[fake2real[k]] for k in fake_keys}
filenames = [osp.join(ORIGINAL, _) for _ in [*fake_rois]]
dataset = VideoFirstFramesDataset(videos=filenames, NUMFRAMES=NUMFRAMES)
loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1)
bad_videos = []
for ind, vid in tqdm(enumerate(loader), total=len(loader)):
try:
v = loader.dataset.videos[ind].replace(ORIGINAL, '')
if type(vid) != list:
print('{} failed !'.format(v))
bad_videos.append(v)
continue
vid, start = vid
faces = get_faces(vid[0].numpy(), real_rois[fake2real[v]], start, NUMFRAMES=NUMFRAMES)
if len(faces) == 0:
bad_videos.append(v)
# print('0 faces detected for {} !'.format(v))
for ind, f in enumerate(faces):
if f.shape[1] > 256:
scale = 256./f.shape[1]
f = zoom(f, [1, scale,scale, 1], order=1, prefilter=False)
assert f.shape[1:3] == (256, 256)
train_part = int(v.split('/')[-2].split('_')[-1])
filename = v.split('/')[-1].replace('.mp4', '')
filename = osp.join(SAVEDIR, '{:02d}_{}_{}.mp4'.format(train_part, filename, ind))
np.save(filename, f.astype('uint8'))
#skvideo.io.vwrite(filename, f.astype('uint8'))
except Exception as e:
print(e)
bad_videos.append(v)
with open(args.save_failures, 'a') as f:
for bv in bad_videos:
f.write('{}\n'.format(bv))
```
#### File: skp/etl/pair_real_with_fake.py
```python
import pandas as pd
import numpy as np
import os, os.path as osp
import cv2
from tqdm import tqdm
DATADIR = '/home/ianpan/ufrc/deepfake/data/dfdc/'
SAVEDIR = osp.join(DATADIR, 'pairs/')
if not osp.exists(SAVEDIR): os.makedirs(SAVEDIR)
df = pd.read_csv(osp.join(DATADIR, 'train_manyfaces_with_splits.csv'))
# Get 10th frame
df['frame_index'] = [int(_.split('/')[-1].split('-')[0].replace('FRAME', '')) for _ in df['imgfile']]
df = df[df['frame_index'] == 10]
df['face_number'] = [int(_.split('/')[-1].split('-')[1].split('.')[0].replace('FACE', '')) for _ in df['imgfile']]
df = df[df['label'] == 1]
def join_images(x, y):
xh, xw = x.shape[:2]
yh, yw = y.shape[:2]
ratio = xh/yh
y = cv2.resize(y, (int(yw*ratio), int(xh)))
return np.hstack((x,y))
RESIZE_H = 150
for orig,_df in tqdm(df.groupby('original'), total=len(df['original'].unique())):
for face_num, face_df in _df.groupby('face_number'):
# Load in original face
original_facefile = face_df['imgfile'].iloc[0].replace(face_df['filename'].iloc[0].replace('.mp4', ''), orig.replace('.mp4', ''))
original_face = cv2.imread(osp.join(DATADIR, original_facefile))
if type(original_face) == type(None):
print('{} not found ! Skipping ...'.format(original_facefile))
continue
for fake_face in face_df['imgfile']:
ff = cv2.imread(osp.join(DATADIR, fake_face))
joined_image = join_images(original_face, ff)
h, w = joined_image.shape[:2]
ratio = RESIZE_H/h
joined_image = cv2.resize(joined_image, (int(w*ratio), int(h*ratio)))
tmp_save_dir = osp.join(SAVEDIR, face_df['folder'].iloc[0], orig.replace('.mp4', ''))
if not osp.exists(tmp_save_dir): os.makedirs(tmp_save_dir)
savefile = '{}_{}.png'.format(fake_face.split('/')[-2], face_num)
cv2.imwrite(osp.join(tmp_save_dir, savefile), joined_image)
```
#### File: factory/evaluate/metrics.py
```python
from sklearn.metrics import log_loss as _log_loss
from sklearn.metrics import roc_auc_score, accuracy_score
import numpy as np
# dict key should match name of function
def log_loss(y_true, y_prob, **kwargs):
return {'log_loss': _log_loss(y_true, y_prob, eps=1e-7)}
def auc(y_true, y_prob, **kwargs):
return {'auc': roc_auc_score(y_true, y_prob)}
def accuracy(y_true, y_prob, **kwargs):
return {'accuracy': accuracy_score(y_true, (y_prob >= 0.5).astype('float32'))}
```
#### File: cnn3d/export/export_utils.py
```python
import cv2
import math
import numpy as np
import skimage.measure
from PIL import Image
TWO_FRAME_OVERLAP = False
MIN_FRAMES_FOR_FACE = 30
MAX_FRAMES_FOR_FACE = 300
def load_video(filename, every_n_frames=None, specific_frames=None, to_rgb=True, rescale=None, inc_pil=False,
max_frames=None):
"""Loads a video.
Called by:
1) The finding faces algorithm where it pulls a frame every FACE_FRAMES frames up to MAX_FRAMES_TO_LOAD at a scale of FACEDETECTION_DOWNSAMPLE, and then half that if there's a CUDA memory error.
2) The inference loop where it pulls EVERY frame up to a certain amount which it the last needed frame for each face for that video"""
assert every_n_frames or specific_frames, "Must supply either every n_frames or specific_frames"
assert bool(every_n_frames) != bool(
specific_frames), "Supply either 'every_n_frames' or 'specific_frames', not both"
cap = cv2.VideoCapture(filename)
n_frames_in = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
width_in = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height_in = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
width_out = int(width_in * rescale) if rescale else width_in
height_out = int(height_in * rescale) if rescale else height_in
if max_frames:
n_frames_in = min(n_frames_in, max_frames)
if every_n_frames:
n_frames_out = n_frames_in // every_n_frames
specific_frames = [i * every_n_frames for i in range(n_frames_out)]
else:
n_frames_out = len(specific_frames)
out_pil = []
out_video = np.empty((n_frames_out, height_out, width_out, 3), np.dtype('uint8'))
i_frame_in = 0
i_frame_out = 0
ret = True
while (i_frame_in < n_frames_in and ret):
ret, frame_in = cap.read()
if i_frame_in not in specific_frames:
i_frame_in += 1
continue
try:
if rescale:
frame_in = cv2.resize(frame_in, (width_out, height_out))
if to_rgb:
frame_in = cv2.cvtColor(frame_in, cv2.COLOR_BGR2RGB)
except Exception as e:
print(f"Error for frame {i_frame_in} for video {filename}: {e}; using 0s")
frame_in = np.zeros((height_out, width_out, 3))
out_video[i_frame_out] = frame_in
i_frame_out += 1
if inc_pil:
try: # https://www.kaggle.com/zaharch/public-test-errors
pil_img = Image.fromarray(frame_in)
except Exception as e:
print(f"Using a blank frame for video {filename} frame {i_frame_in} as error {e}")
pil_img = Image.fromarray(np.zeros((224, 224, 3), dtype=np.uint8)) # Use a blank frame
out_pil.append(pil_img)
i_frame_in += 1
cap.release()
if inc_pil:
return out_video, out_pil
else:
return out_video
def get_roi_for_each_face(faces_by_frame, video_shape, temporal_upsample, downsample_for_calcs=1, upsample=1):
if not downsample_for_calcs:
downsample_for_calcs = 1
# Create boolean face array
frames_video, rows_video, cols_video, channels_video = video_shape
frames_video = math.ceil(frames_video)
if downsample_for_calcs != 1:
boolean_face_3d = np.zeros(
(frames_video, rows_video // downsample_for_calcs, cols_video // downsample_for_calcs),
dtype=np.bool) # Remove colour channel
else:
boolean_face_3d = np.zeros((frames_video, rows_video, cols_video), dtype=np.bool) # Remove colour channel
for i_frame, faces in enumerate(faces_by_frame):
if faces is not None: # May not be a face in the frame
for face in faces:
left, top, right, bottom = face
if downsample_for_calcs != 1:
left = round(left / downsample_for_calcs)
top = round(top / downsample_for_calcs)
right = round(right / downsample_for_calcs)
bottom = round(bottom / downsample_for_calcs)
boolean_face_3d[i_frame, int(top):int(bottom), int(left):int(right)] = True
# Replace blank frames if face(s) in neighbouring frames with overlap
for i_frame, frame in enumerate(boolean_face_3d):
if i_frame == 0 or i_frame == frames_video - 1: # Can't do this for 1st or last frame
continue
if True not in frame:
neighbour_overlap = boolean_face_3d[i_frame - 1] & boolean_face_3d[i_frame + 1]
boolean_face_3d[i_frame] = neighbour_overlap
# Find faces through time
id_face_3d, n_faces = skimage.measure.label(boolean_face_3d, return_num=True)
# Iterate over faces in video
rois = []
for i_face in range(1, n_faces + 1):
# Find the first and last frame containing the face
frames = np.where(np.any(id_face_3d == i_face, axis=(1, 2)) == True)
starting_frame, ending_frame = frames[0].min(), frames[0].max()
# Iterate over the frames with faces in and find the min/max cols/rows (bounding box)
cols, rows = [], []
for i_frame in range(starting_frame, ending_frame + 1):
rs = np.where(np.any(id_face_3d[i_frame] == i_face, axis=1) == True)
rows.append((rs[0].min(), rs[0].max()))
cs = np.where(np.any(id_face_3d[i_frame] == i_face, axis=0) == True)
cols.append((cs[0].min(), cs[0].max()))
frame_from, frame_to = starting_frame * temporal_upsample, ((ending_frame + 1) * temporal_upsample) - 1
rows_from, rows_to = np.array(rows)[:, 0].min(), np.array(rows)[:, 1].max()
cols_from, cols_to = np.array(cols)[:, 0].min(), np.array(cols)[:, 1].max()
frame_to = min(frame_to, frame_from + MAX_FRAMES_FOR_FACE)
if frame_to - frame_from >= MIN_FRAMES_FOR_FACE:
rois.append(((frame_from, frame_to),
(int(rows_from * upsample * downsample_for_calcs),
int(rows_to * upsample * downsample_for_calcs)),
(int(cols_from * upsample * downsample_for_calcs),
int(cols_to * upsample * downsample_for_calcs))))
return np.array(rois)
def get_frame_rois_for_valid_faces(faces_by_frame, video_shape, temporal_upsample, downsample_for_calcs=1, upsample=1,
n_frames_per_face=5, min_frames_for_face=100):
if not downsample_for_calcs:
downsample_for_calcs = 1
# Create boolean face array
frames_video, rows_video, cols_video, channels_video = video_shape
frames_video = math.ceil(frames_video)
if downsample_for_calcs != 1:
boolean_face_3d = np.zeros(
(frames_video, rows_video // downsample_for_calcs, cols_video // downsample_for_calcs),
dtype=np.bool) # Remove colour channel
else:
boolean_face_3d = np.zeros((frames_video, rows_video, cols_video), dtype=np.bool) # Remove colour channel
for i_frame, faces in enumerate(faces_by_frame):
if faces is not None: # May not be a face in the frame
for face in faces:
left, top, right, bottom = face
if downsample_for_calcs != 1:
left = round(left / downsample_for_calcs)
top = round(top / downsample_for_calcs)
right = round(right / downsample_for_calcs)
bottom = round(bottom / downsample_for_calcs)
boolean_face_3d[i_frame, int(top):int(bottom), int(left):int(right)] = True
# Replace blank frames if face(s) in neighbouring frames with overlap
for i_frame, frame in enumerate(boolean_face_3d):
if i_frame == 0 or i_frame == frames_video - 1: # Can't do this for 1st or last frame
continue
if True not in frame:
if TWO_FRAME_OVERLAP:
if i_frame > 1:
pre_overlap = boolean_face_3d[i_frame - 1] | boolean_face_3d[i_frame - 2]
else:
pre_overlap = boolean_face_3d[i_frame - 1]
if i_frame < frames_video - 2:
post_overlap = boolean_face_3d[i_frame + 1] | boolean_face_3d[i_frame + 2]
else:
post_overlap = boolean_face_3d[i_frame + 1]
neighbour_overlap = pre_overlap & post_overlap
else:
neighbour_overlap = boolean_face_3d[i_frame - 1] & boolean_face_3d[i_frame + 1]
boolean_face_3d[i_frame] = neighbour_overlap
# Find faces through time
id_face_3d, n_faces = skimage.measure.label(boolean_face_3d, return_num=True)
list_of_frame_roi_dicts = []
for i_face in range(1, n_faces + 1):
frame_roi = {}
frames = np.where(np.any(id_face_3d == i_face, axis=(1, 2)) == True)
starting_frame, ending_frame = frames[0].min(), frames[0].max()
# Skip faces with not enough frames
face_length_in_frames = ((ending_frame + 1) * temporal_upsample) - (starting_frame * temporal_upsample)
if face_length_in_frames <= min_frames_for_face:
# print(f"Skipping video as {face_length_in_frames} < {min_frames_for_face} frames for this face"
# f"From {(starting_frame * temporal_upsample)} minus {((ending_frame + 1) * temporal_upsample)}")
continue
frame_numbers = [int(round(f)) for f in np.linspace(starting_frame, ending_frame, n_frames_per_face)]
for i_frame in frame_numbers:
rs = np.where(np.any(id_face_3d[i_frame] == i_face, axis=1) == True)
cs = np.where(np.any(id_face_3d[i_frame] == i_face, axis=0) == True)
# frame_roi[i_frame] = rs[0].min(), rs[0].max(), cs[0].min(), cs[0].max()
frame_roi[i_frame * temporal_upsample] = (int(rs[0].min() * upsample * downsample_for_calcs),
int(rs[0].max() * upsample * downsample_for_calcs),
int(cs[0].min() * upsample * downsample_for_calcs),
int(cs[0].max() * upsample * downsample_for_calcs))
# print(f"ROIS are {frame_roi[i_frame * temporal_upsample]}")
list_of_frame_roi_dicts.append(frame_roi)
return list_of_frame_roi_dicts
# # Iterate over faces in video
# rois = []
# for i_face in range(1, n_faces + 1):
# # Find the first and last frame containing the face
# frames = np.where(np.any(id_face_3d == i_face, axis=(1, 2)) == True)
# starting_frame, ending_frame = frames[0].min(), frames[0].max()
#
# # Iterate over the frames with faces in and find the min/max cols/rows (bounding box)
# cols, rows = [], []
# for i_frame in range(starting_frame, ending_frame + 1):
# rs = np.where(np.any(id_face_3d[i_frame] == i_face, axis=1) == True)
# rows.append((rs[0].min(), rs[0].max()))
# cs = np.where(np.any(id_face_3d[i_frame] == i_face, axis=0) == True)
# cols.append((cs[0].min(), cs[0].max()))
# frame_from, frame_to = starting_frame * temporal_upsample, ((ending_frame + 1) * temporal_upsample) - 1
# rows_from, rows_to = np.array(rows)[:, 0].min(), np.array(rows)[:, 1].max()
# cols_from, cols_to = np.array(cols)[:, 0].min(), np.array(cols)[:, 1].max()
#
# frame_to = min(frame_to, frame_from + MAX_FRAMES_FOR_FACE)
#
# if frame_to - frame_from >= MIN_FRAMES_FOR_FACE:
# rois.append(((frame_from, frame_to),
# (int(rows_from * upsample * downsample_for_calcs), int(rows_to * upsample * downsample_for_calcs)),
# (int(cols_from * upsample * downsample_for_calcs), int(cols_to * upsample * downsample_for_calcs))))
return np.array(rois)
```
#### File: cnn3d/training/datasets_video.py
```python
import os
import cv2
import math
import random
import numpy as np
from glob import glob
import torch
from torch.utils.data import Dataset, DataLoader
class RebalancedVideoDataset(Dataset):
"""This ensures that a every epoch classes are balanced without repetition, classes with more examples
use their full range of videos.
The requested id maps directly 1:1 with the smallest class, whereas larger classes get a video chosen
with the corresponding range. E.g. if class1 has videos 1:100, and class 2 has videos 101:500, then
requesting dataset[5] always results in the 5th video of class one, but dataset[105] will randomly yield
one of 5 videos in range 125 - 130."""
def __init__(self, video_dir, train_or_test, label_per_frame, transforms, framewise_transforms, i3d_norm, test_videos=None,
test_proportion=0.25, file_ext=".mp4", max_frames=64, bce_labels=False, alt_aug=False):
self.video_dir = video_dir
self.train_or_test = train_or_test
self.label_per_frame = label_per_frame
self.test_videos = test_videos
self.test_proportion = test_proportion
self.file_ext = file_ext
self.i3d_norm = i3d_norm
self.max_frames = max_frames
self.transforms = transforms
self.framewise_transforms = framewise_transforms
self.bce_labels = bce_labels
self.alt_aug = alt_aug
self.classes = self.get_classes()
self.n_classes = len(self.classes)
self.videos_by_class = self.get_videos_by_class()
self.n_by_class = self.get_n_by_class()
self.n_smallest_class = self.get_n_smallest_class()
self.n_balanced = self.get_n_balanced()
self.n_unbalanced = self.get_n_unbalanced()
self.c = self.n_classes # FastAI
self.summary()
def get_classes(self):
return os.listdir(self.video_dir)
def get_videos_by_class(self):
videos_by_class = {}
for cls in self.classes:
videos_for_class = []
videopaths = glob(os.path.join(self.video_dir, cls, f"*{self.file_ext}"))
for videopath in videopaths:
is_test = self.train_or_test == 'test'
video_chunk_id = os.path.basename(videopath).split('_', 1)[0]
in_test = video_chunk_id in self.test_videos
if is_test == in_test:
videos_for_class.append(videopath)
videos_by_class[cls] = videos_for_class
return videos_by_class
def get_n_by_class(self):
n_by_class = {}
for cls, videos in self.videos_by_class.items():
n_by_class[cls] = len(videos)
return n_by_class
def get_n_smallest_class(self):
return min([len(videos) for videos in self.videos_by_class.values()])
def get_n_balanced(self):
return self.get_n_smallest_class() * self.n_classes
def get_n_unbalanced(self):
return sum([len(videos) for videos in self.videos_by_class.values()])
def summary(self):
print(f"{self.train_or_test.upper()}:"
f"Loaded {self.n_unbalanced} samples across classes '{', '.join(self.classes)}'; effective sample size of {self.n_balanced}")
def load_video(self, filename, every_n_frames, to_rgb, rescale=None):
cap = cv2.VideoCapture(filename)
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
if rescale:
out_video = np.zeros(
(math.ceil(frameCount / every_n_frames), int(frameHeight * rescale), int(frameWidth * rescale), 3),
np.dtype('uint8'))
else:
out_video = np.zeros((math.ceil(frameCount / every_n_frames), frameHeight, frameWidth, 3),
np.dtype('uint8'))
i_frame = 0
ret = True
while (i_frame * every_n_frames < frameCount and ret):
cap.set(cv2.CAP_PROP_FRAME_COUNT, (i_frame * every_n_frames) - 1)
ret, frame = cap.read()
if rescale:
frame = cv2.resize(frame, (0, 0), fx=rescale, fy=rescale)
if to_rgb:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
out_video[i_frame] = frame
i_frame += 1
cap.release()
return out_video
def __len__(self):
return self.n_balanced
def __getitem__(self, idx):
# Get the class
id_cls = idx // self.n_smallest_class
cls = self.classes[id_cls]
# Get the video within the class
n_cls = self.n_by_class[cls]
id_in_cls_bal = idx % self.n_smallest_class
id_in_cls_from = math.ceil((id_in_cls_bal / self.n_smallest_class) * n_cls)
id_in_cls_to = max(id_in_cls_from,
math.floor((((
id_in_cls_bal + 1) / self.n_smallest_class) * n_cls) - 0.0001)) # Small epsilon to make sure whole numbers round down (so math.ceil != math.floor)
id_in_cls = random.randint(id_in_cls_from, id_in_cls_to)
# Load the video
videoname = self.videos_by_class[cls][id_in_cls]
video = self.load_video(filename=videoname, every_n_frames=1, to_rgb=True)
if self.alt_aug:
frame_incrementer = random.randint(1, 2) # 1 for no aug, 2 for 1
else:
frame_incrementer = 1
max_frames = self.max_frames * frame_incrementer
if self.train_or_test == 'test':
starting_frame = 0
elif self.train_or_test == 'train':
max_starting_frame = len(video) - max_frames
try:
starting_frame = random.randint(0, max_starting_frame)
except ValueError:
print(f"Problem reading {idx} -> {videoname}")
raise Exception()
else:
raise ValueError(f"train_or_test must be 'train' or 'test', not {self.train_or_test}")
video = video[starting_frame:starting_frame + max_frames:frame_incrementer]
label_name = os.path.basename(os.path.dirname(videoname))
label_id = self.classes.index(label_name)
if self.label_per_frame:
label_id = label_id * len(video) # Label for each frame
if self.transforms:
if self.framewise_transforms:
seed = random.randint(0, 99999)
video_aug = []
for frame in video:
random.seed(seed)
video_aug.append(self.transforms(image=frame)['image'])
video_aug = np.array(video_aug)
video = video_aug
else:
video = self.transforms(video)
if type(video) == list: # Transforms may return a list
video = np.array(video)
x = torch.from_numpy(video.transpose([3, 0, 1, 2])).float()
if self.i3d_norm:
x = (x / 255.) * 2 - 1
y = torch.tensor(label_id, dtype=torch.float)
if self.bce_labels: # BCEloss expects batch*size * 1 shape, not just batch_size
y = y.unsqueeze(-1)
else:
y = y.long()
return x, y
``` |
{
"source": "jphdotam/EAE-ECG-autoencoder",
"score": 2
} |
#### File: EAE-ECG-autoencoder/lib/config.py
```python
import os
import yaml
def load_config(configpath):
with open(configpath) as f:
cfg = yaml.safe_load(f)
experiment_id = os.path.splitext(os.path.basename(configpath))[0]
cfg['experiment_id'] = experiment_id
model_dir = cfg['output']['model_dir']
if model_dir:
model_dir = os.path.join(model_dir, experiment_id)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
log_dir = cfg['output']['log_dir']
if log_dir:
log_dir = os.path.join(log_dir, experiment_id)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
vis_dir = cfg['output']['vis_dir']
if vis_dir:
vis_dir = os.path.join(vis_dir, experiment_id)
if not os.path.exists(vis_dir):
os.makedirs(vis_dir)
return cfg
```
#### File: EAE-ECG-autoencoder/lib/logging.py
```python
import os
from torch.utils.tensorboard import SummaryWriter
def get_summary_writer(cfg):
if not cfg['output']['use_tensorboard']:
return None
else:
log_dir = os.path.join(cfg['output']['log_dir'], cfg['experiment_id'])
return SummaryWriter(log_dir=log_dir)
```
#### File: EAE-ECG-autoencoder/lib/models.py
```python
import torch
import torch.nn as nn
from lib.unet1d import UNet1D
def load_model(cfg, load_model_only=False):
modeltype = cfg['training']['model']
in_channels = len(cfg['data']['input_channels'])
out_channels = cfg['data']['n_output_channels']
dp = cfg['training']['data_parallel']
if modeltype == 'unet1d':
model = UNet1D(in_channels, out_channels)
else:
raise ValueError(f"Unknown model type {modeltype}")
if dp:
model = nn.DataParallel(model).to(cfg['training']['device'])
m = model.module
else:
m = model
if load_model_only:
return model
modelpath = cfg['resume'].get('path', None)
config_epoch = cfg['resume'].get('epoch', None)
if modelpath:
state = torch.load(modelpath)
m.load_state_dict(state['state_dict'])
starting_epoch = state['epoch']
if config_epoch:
print(f"WARNING: Loaded model trained for {starting_epoch - 1} epochs but config explicitly overrides to {config_epoch}")
starting_epoch = config_epoch
else:
starting_epoch = 1
state = {}
return model, starting_epoch, state
```
#### File: EAE-ECG-autoencoder/lib/txtfile.py
```python
import pandas as pd
from io import StringIO
import pickle
import scipy.signal as signal
import scipy
import pywt # conda install pywavelets
from math import sqrt, log2
from statsmodels.robust import mad
import numpy as np
class TxtFile:
def __init__(self, filepath, verbose=False):
self.filepath = filepath
self.labelpath = filepath + '.label'
self.channels, self.sample_freq, self.data = self.load_file()
self.labels = self.load_labels()
if verbose: print("Channels: {}".format(self.channels))
def load_file(self):
with open(self.filepath) as f:
channels, sample_freq = self.load_channels(f)
_ = self._read_until(f, "[Data]")
data = f.read()
data = pd.read_table(StringIO(data), names=channels, sep=',')
# data = self.filter_data(data)
return channels, sample_freq, data
@staticmethod
def filter_data(data, type='wavelet', sample_freq=1000, savgol_filter=True):
def waveletSmooth(x, wavelet="coif5", level=1):
# Thanks to http://connor-johnson.com/2016/01/24/using-pywavelets-to-remove-high-frequency-noise/
coefficients = pywt.wavedec(x, wavelet, mode="per") # coefficients
sigma = mad(coefficients[-level]) # sigma for thresholding
uthresh = sigma * np.sqrt(2 * np.log(len(x))) # thresholding value
coefficients[1:] = (pywt.threshold(coefficient, value=uthresh, mode="soft") for coefficient in
coefficients[1:]) # threshold the coefficients
y = pywt.waverec(coefficients, wavelet, mode="per") # reconstruct
return y
if type == 'fir':
nyq_rate = sample_freq / 2
width = 5 / nyq_rate
ripple_db = 100
N, beta = signal.kaiserord(ripple_db, width)
cutoff_hz = 30
taps = signal.firwin(N, cutoff_hz / nyq_rate, window=("kaiser", beta))
filtered_data = signal.lfilter(taps, 1.0, data)
return filtered_data
elif type == 'fft':
fft = scipy.fft(data)
bandpass_fft = fft[:]
for i in range(len(fft)):
if i >= 6000:
fft[i] = 0
ifft = scipy.ifft(bandpass_fft)
data = ifft.real
if savgol_filter:
data = signal.savgol_filter(data, window_length=21, polyorder=1)
return data
elif type == 'wavelet':
data = waveletSmooth(data)
return data
else:
print("UNKNOWN FILTER: {}".format(type))
def load_channels(self, file):
channels = []
line = self._read_until(file, "Channels exported")
sample_freq = int(self._read_until(file, "Sample Rate").rsplit(' ', 1)[-1].rsplit('Hz')[0])
n_channels = int(line.split(' ')[-1])
for n_channel in range(n_channels):
line = self._read_until(file, "Label:")
channel_name = line.split(': ')[-1].rstrip()
channels.append(channel_name)
return channels, sample_freq
def load_labels(self):
try:
with open(self.labelpath, 'rb') as f:
labels = pickle.load(f)
except FileNotFoundError:
print("Label file not found, creating new label file")
labels = {}
with open(self.labelpath, 'wb') as f:
pickle.dump(labels, f)
return labels
def save_labels(self):
with open(self.labelpath, 'wb') as f:
pickle.dump(self.labels, f)
def get_labels_by_type(self, labeltype):
ranges = []
for label in self.labels:
if label['type'] == labeltype:
ranges.append(label)
@staticmethod
def get_labels_from_textfile(textfile):
"""Used by the bard_ui to get the number of labels in the file preview"""
labelfile = textfile + ".label"
try:
with open(labelfile, 'rb') as f:
labels = pickle.load(f)
return labels
except (FileNotFoundError, EOFError):
return []
@staticmethod
def _read_until(file, string):
line = file.readline()
while string not in line:
line = file.readline()
return line
if __name__ == "__main__":
import matplotlib.pyplot as plt
txtfile = TxtFile("D:/Box/His ML/Data/Completed Cases Until Feb 2020/ECGs- His Project/H001/H001_2_2_SH_RNC_10.txt")
``` |
{
"source": "jphdotam/T1T2",
"score": 2
} |
#### File: jphdotam/T1T2/2d_export_labels.py
```python
import os
import multiprocessing
from collections import defaultdict
from glob import glob
from tqdm import tqdm
from lib.cfg import load_config
from lib.export import export_label
# CONFIG = "./experiments/034_mini.yaml"
CONFIG = "./experiments/036_mini.yaml"
EXCLUDED_FILES_PATH = "./data/blacklist.txt"
# Load config
cfg, model_dir = load_config(CONFIG)
source_path_data_trainval = cfg['export']['dicom_path_trainval']
source_path_label_trainval = cfg['export']['label_path_trainval']
output_training_data_dir = os.path.join(cfg['data']['npz_path_trainval'])
sequences = cfg['export']['source_channels']
label_classes = cfg['export']['label_classes']
gaussian_sigma = cfg['export']['gaussian_sigma']
# Excluded files
with open(EXCLUDED_FILES_PATH) as f:
excluded_files = f.read().splitlines()
def export_label_helper(paths):
dicom_path, label_paths = paths
for label_path in label_paths:
output_dir = os.path.join(output_training_data_dir,
os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(label_path)))))
os.makedirs(output_dir, exist_ok=True)
try:
export_label(dicom_path, label_path, 'npz', sequences, label_classes, output_dir, gaussian_sigma)
except Exception as e:
print(f"Failed {label_path}: {e}")
def blacklisted(label_path, excluded_files):
study_dir = os.path.basename(os.path.dirname(label_path))
npy_name = os.path.basename(label_path).split('.npy')[0] + '.npy'
is_blacklisted = f"{study_dir}/{npy_name}" in excluded_files
if is_blacklisted:
print(f"Skipping {label_path} - excluded")
return True
else:
return False
if __name__ == "__main__":
if not os.path.exists(output_training_data_dir):
os.makedirs(output_training_data_dir)
labelpaths_human = glob(os.path.join(source_path_label_trainval, "**", "*npy_HUMAN.pickle"), recursive=True)
labelpaths_auto = glob(os.path.join(source_path_label_trainval, 'auto', "**", "*npy_AUTO.pickle"), recursive=True)
dicompaths = glob(os.path.join(source_path_data_trainval, "**/*.npy"), recursive=True)
print(f"{len(dicompaths)} source files - found {len(labelpaths_human)} human labels and {len(labelpaths_auto)} auto labels")
labels_by_seq = defaultdict(list)
for labelpaths in (labelpaths_human, labelpaths_auto):
for labelpath in labelpaths:
seq_id = f"{os.path.basename(os.path.dirname(labelpath))}__{os.path.basename(labelpath).split('.npy')[0]}"
labels_by_seq[seq_id].append(labelpath)
labelled_dicoms = defaultdict(list)
for dicom_path in dicompaths:
seq_id = f"{os.path.basename(os.path.dirname(dicom_path))}__{os.path.basename(dicom_path).split('.npy')[0]}"
if seq_id in labels_by_seq:
for label_path in labels_by_seq[seq_id]:
labelled_dicoms[dicom_path].append(label_path)
N_WORKERS = multiprocessing.cpu_count() - 4 #// 2
with multiprocessing.Pool(N_WORKERS) as p:
for _ in tqdm(p.imap(export_label_helper, labelled_dicoms.items()), total=len(labelled_dicoms)):
pass
```
#### File: T1T2/lib/bullseye.py
```python
import copy
import math
import scipy
import scipy.spatial
import numpy as np
from skimage import measure
def mask2sectors(endo_mask, epi_mask, rv_mask, rvi_mask, num_sectors):
"""
Split myocardium to num_sectors sectors
Input :
endo_mask : [RO, E1], mask for endo
epi_mask : [RO, E1], mask for epi
rv_mask : [RO, E1], mask for rv
rvi_mask : [RO, E1], mask for rv insertion mask, can be None; if not None, rv_mask is not used
Output :
sectors : [RO, E1] sector mask, sector 1 is labelled as value 1
"""
def get_angle(a, b):
# angle from a to b (rotate a to b)
# positve angle for counter-clock wise
# 0-360 degrees
v1_theta = math.atan2(a[1], a[0])
v2_theta = math.atan2(b[1], b[0])
r = (v2_theta - v1_theta) * (180.0 / math.pi)
if r < 0:
r += 360.0
return r
def img_to_xy(rvi_, _, e1_):
return rvi_[1], e1_ - 1 - rvi_[0]
img_height, img_width = endo_mask.shape
# find lv center
endo_pts = np.argwhere(endo_mask > 0)
lv_center = np.mean(endo_pts, axis=0)
lv_center2 = img_to_xy(lv_center, img_height, img_width)
# find rv center
if rv_mask is not None:
rv_pts = np.argwhere(rv_mask > 0)
rv_center = np.mean(rv_pts, axis=0)
else:
if rvi_mask is None:
raise ValueError("Both rv_mask and rvi_mask are None")
rvi_pts = np.argwhere(rvi_mask > 0)
rvi_pt = np.mean(rvi_pts, axis=0)
dist = np.linalg.norm(rvi_pt - lv_center)
if rvi_pt[1] < lv_center[1]:
rv_center = lv_center
rv_center[1] -= 2 * dist
rv_center[0] += dist
else:
rv_center = lv_center
rv_center[0] -= 2 * dist
rv_center[1] -= dist
rv_center2 = img_to_xy(rv_center, img_height, img_width)
rv_vec = (rv_center2[0] - lv_center2[0], rv_center2[1] - lv_center2[1])
# find rvi
if rvi_mask is None:
num_rv_pts = rv_pts.shape[0]
rvi = np.zeros((1, 2))
max_angle = 0
for pt in range(num_rv_pts):
pt2 = img_to_xy((rv_pts[pt, 0], rv_pts[pt, 1]), img_height, img_width)
rv_pt_vec = (pt2[0] - lv_center2[0], pt2[1] - lv_center2[1])
rv_rvi_angle = get_angle(rv_pt_vec, rv_vec)
if 180 >= rv_rvi_angle > max_angle:
max_angle = rv_rvi_angle
rvi[0, 0] = rv_pts[pt, 0]
rvi[0, 1] = rv_pts[pt, 1]
else:
rvi = np.argwhere(rvi_mask > 0)
rvi2 = img_to_xy((rvi[0, 0], rvi[0, 1]), img_height, img_width)
# split endo/epi to sectors
rvi_vec = (rvi2[0] - lv_center2[0], rvi2[1] - lv_center2[1])
rv_rvi_angle = get_angle(rv_vec, rvi_vec)
delta_rvi_angle = 360 / num_sectors
sectors = np.zeros(endo_mask.shape)
myo_mask = epi_mask - endo_mask
myo_pts = np.argwhere(myo_mask > 0)
n_myo_pts = myo_pts.shape[0]
angle_myo_pts = np.zeros(n_myo_pts)
for n in range(n_myo_pts):
myo_pts_xy = img_to_xy(myo_pts[n, :], img_height, img_width)
angle_myo_pts[n] = get_angle(rvi_vec, (myo_pts_xy[0] - lv_center2[0], myo_pts_xy[1] - lv_center2[1]))
if rv_rvi_angle >= 180: # rotate rvi clock wise
angle_myo_pts[n] = 360 - angle_myo_pts[n]
sector_no = np.floor(angle_myo_pts[n] / delta_rvi_angle) + 1
if sector_no == 1:
sectors[myo_pts[n, 0], myo_pts[n, 1]] = sector_no
else:
sectors[myo_pts[n, 0], myo_pts[n, 1]] = num_sectors + 2 - sector_no
return sectors
def smooth_contours(contour_x, contour_y, n_components=24, circularise=False, n_pts=2000):
""" takes contour_x,contour_y the cartesian coordinates of a contour,
then procdues a smoothed more circular contour smoothed_contour_x,smoothed_contour_y"""
if n_components is None:
n_components = 12 # slightly arbitary number, but seems to work well
npts = n_pts + 1
contour_pts = np.transpose(np.stack([contour_x, contour_y]))
if circularise:
# get the contour points that form a convex hull
hull = scipy.spatial.ConvexHull(contour_pts)
to_sample = hull.vertices
else:
to_sample = range(0, len(contour_x))
# wrap around cirlce
to_sample = np.hstack([to_sample, to_sample[0]])
sample_pts = contour_pts[to_sample, :]
# sample each curve at uniform distances according to arc length parameterisation
dist_between_pts = np.diff(sample_pts, axis=0)
cumulative_distance = np.sqrt(dist_between_pts[:, 0] ** 2 + dist_between_pts[:, 1] ** 2)
cumulative_distance = np.insert(cumulative_distance, 0, 0, axis=0)
cumulative_distance = np.cumsum(cumulative_distance)
cumulative_distance = cumulative_distance / cumulative_distance[-1]
contour_x = np.interp(np.linspace(0, 1, npts), cumulative_distance, sample_pts[:, 0], period=360)
contour_y = np.interp(np.linspace(0, 1, npts), cumulative_distance, sample_pts[:, 1], period=360)
contour_x = contour_x[:-1]
contour_y = contour_y[:-1]
# smooth out contour by keeping the lowest nkeep Fourier components
n = len(contour_x)
n_filt = n - n_components - 1
f = np.fft.fft(contour_x)
f[int(n / 2 + 1 - n_filt / 2):int(n / 2 + n_filt / 2)] = 0.0
smoothed_contour_x = np.abs(np.fft.ifft(f))
f = np.fft.fft(contour_y)
f[int(n / 2 + 1 - n_filt / 2):int(n / 2 + n_filt / 2)] = 0.0
smoothed_contour_y = np.abs(np.fft.ifft(f))
return smoothed_contour_x, smoothed_contour_y
def extract_contours(preds, thres=0.75, smoothing=True, num_components_smoothing=24, circular=False, n_pts=2000):
"""Extract contours from segmentation mask or probability map
Inputs:
preds : [RO E1], input mask or probablity map
thres : threshold to extract contours, a 2D marching cube extration is performed
smoothing : True or False, if true, contours are smoothed
num_components_smoothing : number of fft components kept after smoothing
circular : True or False, if true, contours are kept to approx. circle
Outputs:
contours : a list of contours, every contour is a nx2 numpy array
"""
contours = measure.find_contours(preds, thres)
len_contours = list()
for n, contour in enumerate(contours):
len_contours.append(contours[n].shape[0])
if smoothing:
s_c = copy.deepcopy(contours)
for n, contour in enumerate(contours):
sc_x, sc_y = smooth_contours(contour[:, 0],
contour[:, 1],
n_components=num_components_smoothing,
circularise=circular,
n_pts=n_pts)
s_c[n] = np.zeros((sc_x.shape[0], 2))
s_c[n][:, 0] = sc_x
s_c[n][:, 1] = sc_y
contours = copy.deepcopy(s_c)
return contours, len_contours
def extract_epi_contours(preds, thres=0.75, smoothing=True, num_components_smoothing=24, circular=False, n_pts=2000):
"""Extract myocardium epi contours from segmentation mask or probability map
Inputs:
preds : [RO E1], input mask or probablity map
thres : threshold to extract contours, a 2D marching cube extration is performed
smoothing : True or False, if true, contours are smoothed
num_components_smoothing : number of fft components kept after smoothing
circular : True or False, if true, contours are kept to approx. circle
Outputs:
epi : a nx2 numpy array for epi contour
"""
contours, len_contour = extract_contours(preds, thres, smoothing, num_components_smoothing, circular, n_pts)
num_c = len(contours)
epi = None
if num_c == 0:
return epi
if num_c == 1:
epi = contours[0]
return epi
if num_c > 1:
# find the longest contours as epi
c_len = np.zeros([num_c])
for n, contour in enumerate(contours):
c_len[n] = len_contour[n]
c_ind = np.argsort(c_len)
epi = contours[c_ind[-1]]
return epi
def compute_bullseye_sector_mask_for_slice(endo_mask, epi_mask, rv_mask, rvi_mask, num_sectors=None):
"""
Compute sector masks for single slice
Input :
endo_mask, epi_mask, rv_mask, rvi_mask : [RO, E1]
rvi_mask can be all zeros. In this case, rv_mask is used
num_sectors : 6, but should be for 4 apex
Output :
sectors : [RO, E1], sector mask. For 6 sectors, its values are 1, 2, 3, 4, 5, 6. background is 0.
sectors_32 : [RO, E1], sector mask for endo and epi.
For 6 EPI sectors, its values are 1-6. background is 0.
For ENDO sectors, it is 7-12
"""
rvi_pt = np.argwhere(rvi_mask > 0)
has_rvi = True
if (rvi_pt is None) or (rvi_pt.shape[0] == 0):
print("Cannot find rvi point, image must be in CMR view ... ")
endo_mask = np.transpose(endo_mask, [1, 0, 2])
epi_mask = np.transpose(epi_mask, [1, 0, 2])
rv_mask = np.transpose(rv_mask, [1, 0, 2])
has_rvi = False
img_height, img_width = endo_mask.shape
# refine epi
m = np.zeros((img_height, img_width))
m[np.where(epi_mask > 0)] = 1
m[np.where(endo_mask > 0)] = 1
epi_mask_2 = m
# get contours
contours_endo = extract_epi_contours(endo_mask,
thres=0.5,
smoothing=True,
num_components_smoothing=36,
circular=False,
n_pts=2000)
contours_epi = extract_epi_contours(epi_mask_2,
thres=0.95,
smoothing=True,
num_components_smoothing=36,
circular=False,
n_pts=2000)
# split sectors
rvi_pt = np.argwhere(rvi_mask > 0)
if rvi_pt is None:
raise ValueError("Cannot find rv insertion point")
# split 16 sectors
sectors = mask2sectors(endo_mask, epi_mask, rv_mask, rvi_mask, num_sectors)
# split 32 sectors
endo_kd = scipy.spatial.KDTree(contours_endo)
epi_kd = scipy.spatial.KDTree(contours_epi)
myo = np.copy(sectors)
max_myo = np.max(myo)
pts = np.where(myo > 0)
n_pts = pts[0].shape[0]
pts_2 = np.zeros((n_pts, 2))
pts_2[:, 0] = pts[0]
pts_2[:, 1] = pts[1]
d_endo, i_endo = endo_kd.query(pts_2)
d_epi, i_epi = epi_kd.query(pts_2)
for p in range(n_pts):
if d_epi[p] > d_endo[p]:
myo[pts[0][p], pts[1][p]] = myo[pts[0][p], pts[1][p]] + max_myo
sectors_32 = myo
if (rvi_pt is None) or (rvi_pt.shape[0] == 0):
sectors = np.transpose(sectors, [1, 0, 2])
sectors_32 = np.transpose(sectors_32, [1, 0, 2])
return sectors, sectors_32
```
#### File: T1T2/lib/dataset.py
```python
import os
import math
import random
import hashlib
import skimage.io
import skimage.measure
import numpy as np
from glob import glob
from tqdm import tqdm
import torch
from torch.utils.data import Dataset
def load_npy_file(npy_path):
npy = np.load(npy_path)
t1w, t2w, pd, t1, t2 = np.transpose(npy, (2, 0, 1))
return t1w, t2w, pd, t1, t2
class T1T2Dataset(Dataset):
def __init__(self, cfg, train_or_test, transforms, fold=1):
self.cfg = cfg
self.train_or_test = train_or_test
self.transforms = transforms
self.fold = fold
self.n_folds = cfg['training']['n_folds']
self.mixed_precision = cfg['training'].get('mixed_precision', False)
self.data_dir = cfg['data']['npz_path_trainval']
self.dates = self.load_dates()
self.sequences = self.load_sequences()
def load_dates(self):
"""Get each unique date in the PNG directory and split into train/test using seeding for reproducibility"""
def get_train_test_for_date(date):
randnum = int(hashlib.md5(str.encode(date)).hexdigest(), 16) / 16 ** 32
test_fold = math.floor(randnum * self.n_folds) + 1
if test_fold == self.fold:
return 'test'
else:
return 'train'
assert self.train_or_test in ('train', 'test')
images = sorted(glob(os.path.join(self.data_dir, f"**/*__combined.npz"), recursive=True))
dates = list({os.path.basename(i).split('__')[0] for i in images})
dates = [d for d in dates if get_train_test_for_date(d) == self.train_or_test]
return dates
def load_sequences(self):
"""Get a list of tuples of (imgpath, labpath)"""
sequences = []
for date in sorted(self.dates):
imgpaths = sorted(glob(os.path.join(self.data_dir, f"**/{date}__*__combined.npz"), recursive=True)) # Get all images
sequences.extend(imgpaths)
print(f"{self.train_or_test.upper():<5} FOLD {self.fold}: Loaded {len(sequences)} over {len(self.dates)} dates")
return sequences
def __len__(self):
return len(self.sequences)
def __getitem__(self, idx):
n_channels_keep_img = len(self.cfg['export']['source_channels']) # May have exported more channels to make PNG
imgpath = self.sequences[idx]
img = np.load(imgpath)['dicom']
lab = np.load(imgpath)['label']
imglab = np.dstack((img, lab))
trans = self.transforms(image=imglab)['image']
imglab = trans.transpose([2, 0, 1])
img = imglab[:n_channels_keep_img]
lab = imglab[n_channels_keep_img:]
# BELOW CURRENTLY NOT NEEDED AS WE ARE NOT NORMALISING SO LABELS SHOULD STILL BE VALID
# Scale between 0 and 1, as normalisation will have denormalised, and possibly some augs too, e.g. brightness
# lab = (lab - lab.min())
# lab = lab / (lab.max() + 1e-8)
x = torch.from_numpy(img).float()
y = torch.from_numpy(lab).float()
if self.mixed_precision:
x = x.half()
y = y.half()
else:
x = x.float()
y = y.float()
return x, y, imgpath
def get_numpy_paths_for_sequence(self, sequence_tuple):
npy_root = self.cfg['export']['npydir']
imgpath = sequence_tuple
datefolder, studyfolder, npyname, _ext = os.path.basename(imgpath).split('__')
return os.path.join(npy_root, datefolder, studyfolder, npyname + '.npy')
```
#### File: T1T2/lib/labeling.py
```python
import os
import re
import pickle
import numpy as np
from glob import glob
from tqdm import tqdm
from collections import defaultdict
try:
import pydicom
except ImportError:
pass
REGEX_HUI = "(.*)_([0-9]{3,})_([0-9]{5,})_([0-9]{5,})_([0-9]{2,})_([0-9]{8})-([0-9]{6,})_([0-9]{1,})_?(.*)?"
REGEX_PETER = "(.*)_([0-9]{4,})_([0-9]{4,})_([0-9]{4,})_([0-9]{1,})_([0-9]{8})-([0-9]{6})"
def load_pickle(path):
with open(path, 'rb') as f:
return pickle.load(f)
def save_pickle(path, data):
with open(path, 'wb') as f:
pickle.dump(data, f)
def dicom_to_img(dicom):
if type(dicom) == str:
dcm = pydicom.dcmread(dicom)
else:
dcm = dicom
window_min = max(0, dcm.WindowCenter - dcm.WindowWidth)
frame = dcm.pixel_array - window_min
frame = frame / dcm.WindowWidth
frame = np.clip(frame, 0, 1)
frame = (frame * 255).astype(np.uint8)
return frame
def get_studies_peter(path):
sequences = {}
numpy_paths = glob(os.path.join(path, "**", "*.npy"), recursive=True)
for numpy_path in numpy_paths:
# PETER FORMAT
# DIR:
# 20200313 \ T1T2_141613_25752396_25752404_256_20200711-135533 \ ...
# ^ date ^seq ^scanr ^sid ^pid ^meas_id ^datetime
#
#
# ... T1_T2_PD_SLC0_CON0_PHS0_REP0_SET0_AVE0_1.npy
# ... T1_T2_PD_SLC1_CON0_PHS0_REP0_SET0_AVE0_2.npy
# ... T1_T2_PD_SLC2_CON0_PHS0_REP0_SET0_AVE0_3.npy
dirname = os.path.basename(os.path.dirname(numpy_path))
matches = re.findall(REGEX_PETER, dirname)[0] # Returns a list of len 1, with a tuple of 7
assert len(matches) == 7, f"Expected 7 matches but got {len(matches)}: {matches}"
seq_name, scanner_id, study_id, patient_id, meas_id, date, time = matches
run_id = os.path.splitext(os.path.basename(numpy_path))[0].rsplit('_', 1)[1]
human_report_path = numpy_path + '_HUMAN.pickle'
auto_report_path = numpy_path + '_AUTO.pickle'
if os.path.exists(human_report_path+'.invalid') or os.path.exists(auto_report_path+'.invalid'):
reported = 'invalid'
elif os.path.exists(human_report_path):
reported = 'human'
elif os.path.exists(auto_report_path):
reported = 'auto'
else:
reported = 'no'
scan_id = f"{scanner_id}_{patient_id}_{study_id}_{meas_id}_{date}-{time} - {run_id}"
assert scan_id not in sequences, f"Found clashing ID {scan_id}"
sequences[scan_id] = {
'numpy_path': numpy_path,
'human_report_path': human_report_path,
'auto_report_path': auto_report_path,
'reported': reported,
}
return sequences
``` |
{
"source": "jpherkness/ETA-Detroit-Database-Generator",
"score": 3
} |
#### File: jpherkness/ETA-Detroit-Database-Generator/dataset.py
```python
class DataSet(object):
all_routes = {}
all_stop_orders = {}
all_stop_locations = {}
# all_routes -> comany -> route
def saveRoute(self, route):
company = route.company
if company not in self.all_routes:
self.all_routes[company] = []
self.all_routes[company].append(route.__dict__)
# all_stop_orders -> company -> route_id -> direction -> stopOrder
def saveStopOrder(self, stopOrder):
company = stopOrder.company
route_id = stopOrder.route_id
direction = stopOrder.direction
if company not in self.all_stop_orders:
self.all_stop_orders[company] = {}
if route_id not in self.all_stop_orders[company]:
self.all_stop_orders[company][route_id] = {}
if direction not in self.all_stop_orders[company][route_id]:
self.all_stop_orders[company][route_id][direction] = []
self.all_stop_orders[company][route_id][direction].append(stopOrder.__dict__)
# all_stop_locations -> company -> route_id -> direction -> stopLocation
def saveStopLocation(self, stopLocation):
company = stopLocation.company
route_id = stopLocation.route_id
direction = stopLocation.direction
if company not in self.all_stop_locations:
self.all_stop_locations[company] = {}
if route_id not in self.all_stop_locations[company]:
self.all_stop_locations[company][route_id] = {}
if direction not in self.all_stop_locations[company][route_id]:
self.all_stop_locations[company][route_id][direction] = []
self.all_stop_locations[company][route_id][direction].append(stopLocation.__dict__)
```
#### File: jpherkness/ETA-Detroit-Database-Generator/models.py
```python
class Route(object):
def __init__(self, company, route_id, route_name, route_number, direction1, direction2, days_active):
self.company = company
self.route_id = route_id
self.route_name = normalizeName(route_name)
self.route_number = route_number
self.direction1 = normalizeDirection(direction1)
self.direction2 = normalizeDirection(direction2)
self.days_active = days_active
class StopOrder(object):
def __init__(self, company, route_id, direction, stop_id, stop_name, stop_order, stop_day):
self.company = company
self.route_id = route_id
self.direction = normalizeDirection(direction)
self.stop_id = stop_id
self.stop_name = normalizeName(stop_name)
self.stop_order = stop_order
self.stop_day = stop_day
class StopLocation(object):
def __init__(self, company, route_id, direction, stop_id, stop_name, latitude, longitude):
self.company = company
self.route_id = route_id
self.direction = normalizeDirection(direction)
self.stop_id = stop_id
self.stop_name = normalizeName(stop_name)
self.latitude = latitude
self.longitude = longitude
def normalizeName(name):
return name.title()
def normalizeDirection(direction):
return direction.lower()
```
#### File: jpherkness/ETA-Detroit-Database-Generator/smart.py
```python
import json
import csv
import requests
import os
import database
import difflib
import sqlite3
import fb
from models import *
class Smart(object):
current_path = os.path.dirname(os.path.abspath(__file__))
company = "SmartBus"
data = None
def __init__(self, data):
self.data = data
def load_data(self):
self.load_smart_data()
def load_smart_data(self):
print("*************************************************************")
print("********** IMPORTING SMARTBUS ROUTES AND STOPS **********")
print("*************************************************************")
routes_request = requests.get("http://www.smartbus.org/desktopmodules/SMART.Endpoint/Proxy.ashx?method=getroutesforselect").json()
for route in routes_request:
route_id = route["Value"]
route_number = route_id
route_name = route["Text"].replace(route["Value"] + " - ", "")
# If reflex already has this route, ignore it
skip = False
if "reflex" in self.data.all_routes:
for reflex_route in self.data.all_routes["reflex"]:
if reflex_route["route_id"] == route_id:
skip = True
break
if skip:
continue
# Get both possible directions for the route
direction_request = requests.get("http://www.smartbus.org/desktopmodules/SMART.Endpoint/Proxy.ashx?method=getdirectionbyroute&routeid=" + route_id).json()
direction1 = direction_request[0]
direction2 = direction_request[1]
# Add the days that the route is active
days = requests.get("http://www.smartbus.org/desktopmodules/SMART.Endpoint/Proxy.ashx?method=getservicedaysforschedules&routeid=" + route_id).json()
days_array = []
for day in days:
days_array.append(day["Text"])
self.load_stop_orders(day["Text"], day["Value"], direction1, route_id)
self.load_stop_orders(day["Text"], day["Value"], direction2, route_id)
days_active = ",".join(days_array)
# Add the route to the sqlite database and firebase
new_route = Route(self.company, route_id, route_name, route_number, direction1, direction2, days_active)
database.insert_route(new_route)
self.data.saveRoute(new_route)
# Load all stop locations for both direction1 and direction2
self.load_all_stops(route_id, direction1)
self.load_all_stops(route_id, direction2)
print("IMPORTED ROUTE:", route_name, "(" + route_number + ")")
def load_stop_orders(self, stop_day, day_code, direction, route_id):
stops_request = requests.get("http://www.smartbus.org/DesktopModules/SMART.Schedules/ScheduleService.ashx?route="+ route_id +"&scheduleday="+ day_code +"&direction="+ direction).json()
# sorts stops by name
# stops_request = sorted(stops_request, key=lambda stop: stop["Name"])
stop_order = 1
for stop in stops_request:
# set derived stop properties
stop_name = stop["Name"]
# Add the stop order
new_stop_order = StopOrder(self.company, route_id, direction, None, stop_name, stop_order, stop_day)
database.insert_stop_order(new_stop_order)
self.data.saveStopOrder(new_stop_order)
# Update the stop order counter
stop_order = stop_order + 1
def load_all_stops(self, route_id, direction):
stops_request = requests.get("http://www.smartbus.org/desktopmodules/SMART.Endpoint/Proxy.ashx?method=getstopsbyrouteanddirection&routeid=" + route_id + "&d=" + direction).json()
for stop in stops_request:
# Set derived stop properties
stop_id = stop["StopId"]
stop_name = stop["Name"]
latitude = stop["Latitude"]
longitude = stop["Longitude"]
# Add the stop location
new_stop_location = StopLocation(self.company, route_id, direction, stop_id, stop_name, latitude, longitude)
database.insert_stop_location(new_stop_location)
self.data.saveStopLocation(new_stop_location)
# Creates a csv file with the contents of the array at the specified file path
def export_array_to_csv(array, file_name):
with open(file_name, "w") as f:
w = csv.writer(f)
# If there are no items, then nothing to write...
if len(array) <= 0:
return
# Write the keys as the first row
keys = list(array[0].keys())
keys.sort()
w.writerow(keys)
# Write each row to correspond with the keys
for obj in array:
row = []
for key in keys:
row.append(obj[key])
w.writerow(row)
print("EXPORTED:", current_path + "/" + file_name)
def update_smart_stop_ids():
connection = database.connect()
c = connection.cursor()
for stop in c.execute('select * from stop_orders'):
get_matching_stop_id_within_bounds(stop[1], stop[2], stop[4])
def get_matching_stop_id_within_bounds(route_id, direction, stop_name):
connection = database.connect()
c = connection.cursor()
#search_name = "".join(sorted(stop_name.replace("&", "+"))).lstrip()
search_name = "".join(sorted(stop_name.replace("+", "&").split(" "))).replace(" ", "")
current_best_delta = 0
current_best_name = ""
current_best_stop_id = None
for location in c.execute('select * from stop_locations'):
original_name = location[4]
match_name = "".join(sorted(original_name.split(" "))).replace(" ", "")
delta = difflib.SequenceMatcher(None, search_name, match_name).ratio()
#print(" ", search_name, match_name, delta)
if delta > current_best_delta:
current_best_delta = delta
current_best_name = original_name
current_best_stop_id = location[3]
print(stop_name.ljust(30), "->" , current_best_name.ljust(30),current_best_stop_id.ljust(30), current_best_delta)
# All routes can be found here:
# http://www.smartbus.org/desktopmodules/SMART.Endpoint/Proxy.ashx?method=getroutesforselect
#
# Both directions for a specific route can be found here:
# http://www.smartbus.org/desktopmodules/SMART.Endpoint/Proxy.ashx?method=getdirectionbyroute&routeid=140
# The directions are not always NORTHBOUND and SOUTHBOUND, they might be EASTBOUND and WESTBOUND
#
# Ordered route stops (stop name and schedules) can be found here:
# http://www.smartbus.org/DesktopModules/SMART.Schedules/ScheduleService.ashx?route=140&scheduleday=2&direction=NORTHBOUND
#
# All route stops (longitude, latitude, stop name, and stop id) can be found here:
# http://www.smartbus.org/desktopmodules/SMART.Endpoint/Proxy.ashx?method=getstopsbyrouteanddirection&routeid=140&d=Northbound
# The stop names that this endpoint returns prevents us from ever matching the ordered stops to a specific location
# (DEARBORN TRANSIT CTR vs DEARBORN TRANSIT CENTER) as well as many other naming issues.
# Currently, I don't see a way to match the ordered stops to their location without using a percentage based string comparison algorithm.
#
# A detailed description of a route can be found here:
# http://www.smartbus.org/desktopmodules/SMART.Endpoint/Proxy.ashx?method=getroutebyid&routeid=140
#
#
# Questions
# 1. Do you want the output as a mongodb or csv?
# 2. Should I create a new table for just the stop orders
# Routes
# Stops
# Schedules
# Order
``` |
{
"source": "jphetphoumy/Web-vuln",
"score": 3
} |
#### File: web1/backend/main.py
```python
import subprocess
from flask import Flask, request
from flask_cors import CORS
app = Flask(__name__)
app.config['CORS_HEADER'] = 'Content-Type'
CORS(app)
@app.route('/')
def home():
return 'Hello world'
@app.route('/nslookup', methods=['POST'])
def nslookup():
if request.method == 'POST':
content = request.json
domain = content['domain']
result = subprocess.run("nslookup %s" % domain, shell=True, check=True, stdout=subprocess.PIPE)
print(result.stdout.decode('utf-8'))
return {
'domain': domain,
'data': '{}'.format(result.stdout.decode('utf-8').replace('\n', '<br>'))
}
else:
return {
'data': 'Requete Get non accepté'
}
if __name__ == "__main__":
app.run(host='0.0.0.0', port=3000)
``` |
{
"source": "jphgoodwin/pydriller",
"score": 2
} |
#### File: pydriller/tests/test_repository_mining.py
```python
import logging
from datetime import datetime
import tempfile
import os
import shutil
import platform
import pytest
from pydriller import RepositoryMining, GitRepository
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO)
# It should fail when no URLs are specified
def test_no_url():
with pytest.raises(Exception):
list(RepositoryMining().traverse_commits())
# It should fail when URL is not a string or a List
def test_badly_formatted_repo_url():
with pytest.raises(Exception):
list(RepositoryMining(path_to_repo=set('repo')).traverse_commits())
def test_simple_url():
assert len(list(RepositoryMining(
path_to_repo="test-repos/test1").traverse_commits())) == 5
def test_two_local_urls():
urls = ["test-repos/test1", "test-repos/test3"]
assert len(list(RepositoryMining(
path_to_repo=urls).traverse_commits())) == 11
def test_simple_remote_url():
dt2 = datetime(2018, 10, 20)
assert len(list(RepositoryMining(
path_to_repo="https://github.com/ishepard/pydriller.git",
to=dt2).traverse_commits())) == 159
def test_two_remote_urls():
urls = ["https://github.com/mauricioaniche/repodriller.git",
"https://github.com/ishepard/pydriller"]
dt2 = datetime(2018, 10, 20)
assert len(list(RepositoryMining(path_to_repo=urls,
to=dt2).traverse_commits())) == 518
def test_2_identical_local_urls():
urls = ["test-repos/test1", "test-repos/test1"]
assert len(list(RepositoryMining(
path_to_repo=urls).traverse_commits())) == 10
def test_both_local_and_remote_urls():
dt2 = datetime(2018, 10, 20)
assert len(list(RepositoryMining(
path_to_repo=["test-repos/test1",
"https://github.com/ishepard/pydriller.git"],
to=dt2).traverse_commits())) == 164
def test_both_local_and_remote_urls_list():
dt2 = datetime(2018, 10, 20)
urls = ["test-repos/test1",
"https://github.com/mauricioaniche/repodriller.git",
"test-repos/test3",
"https://github.com/ishepard/pydriller.git"]
assert len(list(RepositoryMining(path_to_repo=urls,
to=dt2).traverse_commits())) == 529
def test_badly_formatted_url():
with pytest.raises(Exception):
list(RepositoryMining(
path_to_repo='https://github.com/ishepard.git/test')
.traverse_commits())
with pytest.raises(Exception):
list(RepositoryMining(path_to_repo='test').traverse_commits())
def test_diff_histogram():
# without histogram
commit = list(RepositoryMining('test-repos/test13',
single="93df8676e6fab70d9677e94fd0f6b17db095e890").traverse_commits())[0]
mod = commit.modifications[0]
gr = GitRepository('test-repos/test13')
diff = gr.parse_diff(mod.diff)
assert len(diff['added']) == 11
assert (3, ' if (path == null)') in diff['added']
assert (5, ' log.error("Icon path is null");') in diff['added']
assert (6, ' return null;') in diff['added']
assert (8, '') in diff['added']
assert (9, ' java.net.URL imgURL = GuiImporter.class.getResource(path);') in diff['added']
assert (10, '') in diff['added']
assert (11, ' if (imgURL == null)') in diff['added']
assert (12, ' {') in diff['added']
assert (14, ' return null;') in diff['added']
assert (16, ' else') in diff['added']
assert (17, ' return new ImageIcon(imgURL);') in diff['added']
assert len(diff['deleted']) == 7
assert (3, ' java.net.URL imgURL = GuiImporter.class.getResource(path);') in diff['deleted']
assert (4, '') in diff['deleted']
assert (5, ' if (imgURL != null)') in diff['deleted']
assert (7, ' return new ImageIcon(imgURL);') in diff['deleted']
assert (9, ' else') in diff['deleted']
assert (10, ' {') in diff['deleted']
assert (13, ' return null;') in diff['deleted']
# with histogram
commit = list(RepositoryMining('test-repos/test13',
single="93df8676e6fab70d9677e94fd0f6b17db095e890",
histogram_diff=True).traverse_commits())[0]
mod = commit.modifications[0]
gr = GitRepository('test-repos/test13')
diff = gr.parse_diff(mod.diff)
assert (4, ' {') in diff["added"]
assert (5, ' log.error("Icon path is null");') in diff["added"]
assert (6, ' return null;') in diff["added"]
assert (7, ' }') in diff["added"]
assert (8, '') in diff["added"]
assert (11, ' if (imgURL == null)') in diff["added"]
assert (12, ' {') in diff["added"]
assert (13, ' log.error("Couldn\'t find icon: " + imgURL);') in diff["added"]
assert (14, ' return null;') in diff["added"]
assert (17, ' return new ImageIcon(imgURL);') in diff["added"]
assert (6, ' {') in diff["deleted"]
assert (7, ' return new ImageIcon(imgURL);') in diff["deleted"]
assert (10, ' {') in diff["deleted"]
assert (11, ' log.error("Couldn\'t find icon: " + imgURL);') in diff["deleted"]
assert (12, ' }') in diff["deleted"]
assert (13, ' return null;') in diff["deleted"]
def test_ignore_add_whitespaces():
commit = list(RepositoryMining('test-repos/test14',
single="338a74ceae164784e216555d930210371279ba8e").traverse_commits())[0]
assert len(commit.modifications) == 1
commit = list(RepositoryMining('test-repos/test14',
skip_whitespaces=True,
single="338a74ceae164784e216555d930210371279ba8e").traverse_commits())[0]
assert len(commit.modifications) == 0
def test_ignore_add_whitespaces_and_modified_normal_line():
gr = GitRepository('test-repos/test14')
commit = list(RepositoryMining('test-repos/test14',
single="52716ef1f11e07308b5df1b313aec5496d5e91ce").traverse_commits())[0]
assert len(commit.modifications) == 1
parsed_normal_diff = gr.parse_diff(commit.modifications[0].diff)
commit = list(RepositoryMining('test-repos/test14',
skip_whitespaces=True,
single="52716ef1f11e07308b5df1b313aec5496d5e91ce").traverse_commits())[0]
assert len(commit.modifications) == 1
parsed_wo_whitespaces_diff = gr.parse_diff(commit.modifications[0].diff)
assert len(parsed_normal_diff['added']) == 2
assert len(parsed_wo_whitespaces_diff['added']) == 1
assert len(parsed_normal_diff['deleted']) == 1
assert len(parsed_wo_whitespaces_diff['deleted']) == 0
def test_ignore_deleted_whitespaces():
commit = list(RepositoryMining('test-repos/test14',
single="e6e429f6b485e18fb856019d9953370fd5420b20").traverse_commits())[0]
assert len(commit.modifications) == 1
commit = list(RepositoryMining('test-repos/test14',
skip_whitespaces=True,
single="e6e429f6b485e18fb856019d9953370fd5420b20").traverse_commits())[0]
assert len(commit.modifications) == 0
def test_ignore_add_whitespaces_and_changed_file():
commit = list(RepositoryMining('test-repos/test14',
single="532068e9d64b8a86e07eea93de3a57bf9e5b4ae0").traverse_commits())[0]
assert len(commit.modifications) == 2
commit = list(RepositoryMining('test-repos/test14',
skip_whitespaces=True,
single="532068e9d64b8a86e07eea93de3a57bf9e5b4ae0").traverse_commits())[0]
assert len(commit.modifications) == 1
@pytest.mark.skipif(platform.system() == "Windows", reason="Sometimes Windows give an error 'Handle is not valid' in this test, though it works anyway outside the test.")
def test_clone_repo_to():
tmp_folder = tempfile.TemporaryDirectory()
dt2 = datetime(2018, 10, 20)
url = "https://github.com/ishepard/pydriller.git"
assert len(list(RepositoryMining(
path_to_repo=url,
to=dt2,
clone_repo_to=tmp_folder.name).traverse_commits())) == 159
``` |
{
"source": "jphgxq/bagua",
"score": 2
} |
#### File: bagua/bagua/bagua_define.py
```python
import enum
from typing import List
import sys
if sys.version_info >= (3, 9):
from typing import TypedDict # pytype: disable=not-supported-yet
else:
from typing_extensions import TypedDict # pytype: disable=not-supported-yet
from pydantic import BaseModel
class TensorDtype(str, enum.Enum):
F32 = "f32"
F16 = "f16"
U8 = "u8"
class TensorDeclaration(TypedDict):
name: str
num_elements: int
dtype: TensorDtype
def get_tensor_declaration_bytes(td: TensorDeclaration) -> int:
dtype_unit_size = {
TensorDtype.F32.value: 4,
TensorDtype.F16.value: 2,
TensorDtype.U8.value: 1,
}
return td["num_elements"] * dtype_unit_size[td["dtype"]]
class BaguaHyperparameter(BaseModel):
"""
Structured all bagua hyperparameters
"""
buckets: List[List[TensorDeclaration]] = []
is_hierarchical_reduce: bool = False
def update(self, param_dict: dict):
tmp = self.dict()
tmp.update(param_dict)
for key, value in param_dict.items():
if key in tmp:
self.__dict__[key] = value
return self
``` |
{
"source": "jphilip/django-treewidget",
"score": 2
} |
#### File: example/exampleapp/models.py
```python
from __future__ import unicode_literals
from django.db import models
from mptt.models import MPTTModel
from treebeard.mp_tree import MP_Node
from treebeard.al_tree import AL_Node
from treebeard.ns_tree import NS_Node
from treewidget.fields import TreeForeignKey, TreeManyToManyField
from django.utils.encoding import python_2_unicode_compatible
# django-mptt
@python_2_unicode_compatible
class Mptt(MPTTModel):
name = models.CharField(max_length=32)
parent = TreeForeignKey(
'self', blank=True, null=True, on_delete=models.CASCADE, settings={'filtered': True})
def __str__(self):
return self.name
# django-treebeard
@python_2_unicode_compatible
class Treebeardmp(MP_Node):
name = models.CharField(max_length=32)
def __str__(self):
return '%s' % self.name
@python_2_unicode_compatible
class Treebeardal(AL_Node):
name = models.CharField(max_length=32)
parent = models.ForeignKey('self', related_name='children_set', null=True,
db_index=True, on_delete=models.CASCADE)
sib_order = models.PositiveIntegerField()
def __str__(self):
return '%s' % self.name
@python_2_unicode_compatible
class Treebeardns(NS_Node):
name = models.CharField(max_length=32)
def __str__(self):
return '%s' % self.name
class Example(models.Model):
mptt = TreeForeignKey(Mptt, on_delete=models.CASCADE)
treebeardmp = TreeForeignKey(Treebeardmp, on_delete=models.CASCADE,
settings={'show_buttons': True, 'filtered': True})
treebeardal = TreeForeignKey(Treebeardal, on_delete=models.CASCADE,
settings={'search': True, 'dnd': True, 'sort': True})
treebeardns = TreeForeignKey(Treebeardns, on_delete=models.CASCADE,
settings={'dnd': True})
mptt_many = TreeManyToManyField(Mptt, related_name='example_many',
settings={'show_buttons': True, 'search': True, 'dnd': True})
treebeardmp_many = TreeManyToManyField(Treebeardmp, related_name='example_many')
treebeardal_many = TreeManyToManyField(Treebeardal, related_name='example_many')
treebeardns_many = TreeManyToManyField(Treebeardns, related_name='example_many')
``` |
{
"source": "JPhilipp/QuestML",
"score": 3
} |
#### File: script/Python/class_quest_handler.py
```python
import xml.dom.minidom
from xml.dom.minidom import *
import cgi
import string
import re
import random
class QuestHandler:
states = {}
def getOutput(self):
xhtml = ""
self.storeStates()
random.seed()
pathXml = "./qml/quest/" + self.states['quest'] + ".xml"
qmlDoc = xml.dom.minidom.parse(pathXml)
content = self.getContent(qmlDoc)
header = self.getHeader(qmlDoc)
footer = self.getFooter()
xhtml = header + content + footer
xhtml = xhtml.encode("latin-1")
return xhtml
def storeStates(self):
form = cgi.FieldStorage()
self.states['quest'] = 'test'
self.states['station'] = 'start'
if form:
for key in form.keys():
if not key == 'submitForm':
s = self.decodeField(form[key])
thisKey = key
if string.find(thisKey, 'qmlInput') == 0:
s = "'" + s + "'"
if thisKey != 'qmlInput':
thisKey = string.replace(thisKey, 'qmlInput', '')
self.states[thisKey] = s
def decodeField(self, field):
if isinstance( field, type([]) ):
return map(self.decodeField, field)
elif hasattr(field, "file") and field.file:
return (field.filename, field.file)
else:
return field.value
def getContent(self, qmlDoc):
xhtml = ''
stationNode = self.getElementById(qmlDoc.documentElement, self.states['station'])
xhtml += '<div class="content">\n'
mainContent = self.getMainContent(stationNode, qmlDoc)
mainContent = self.getIncludeContent(stationNode, qmlDoc, mainContent)
xhtml += mainContent
xhtml += '</div>\n'
xhtml = string.replace(xhtml, '<br></br>', '<br />')
return xhtml
def getIncludeContent(self, stationNode, qmlDoc, mainContent):
targetId = stationNode.getAttribute('id')
for stationNode in qmlDoc.documentElement.getElementsByTagName('station'):
mainContent = self.getIncludeContentOf(stationNode, targetId, mainContent, qmlDoc)
return mainContent
def getIncludeContentOf(self, stationNode, targetNeeded, mainContent, qmlDoc):
for includeNode in stationNode.getElementsByTagName('include'):
for inNode in includeNode.getElementsByTagName('in'):
target = inNode.getAttribute('station')
isTarget = (target == targetNeeded or target == '*' )
if not isTarget:
regex = string.replace(target, '*', '.*')
pattern = re.compile(regex)
isTarget = re.search(pattern, targetNeeded)
if isTarget:
includeContent = self.getMainContent(stationNode, qmlDoc)
process = inNode.getAttribute('process')
if (not process) or process == '':
process = 'before'
if process == 'before':
mainContent = includeContent + mainContent
elif process == 'after':
mainContent += includeContent
else: # if process == 'exclusive':
mainContent = includeContent
return mainContent
def getMainContent(self, topNode, qmlDoc):
xhtml = ''
nodes = topNode.childNodes
for node in nodes:
sClass = ''
thisName = node.nodeName
if thisName == 'if':
if self.checkElement(node):
xhtml += self.getMainContent(node, qmlDoc)
break
elif thisName == 'else':
xhtml += self.getMainContent(node, qmlDoc)
if thisName == 'text':
classValue = node.getAttribute('class')
if classValue:
sClass += ' class="' + classValue + '"'
if self.checkElement(node):
xhtml += '<span' + sClass + '>' + self.getNewInnerXml(node) + '</span>\n'
elif thisName == 'image':
classValue = node.getAttribute('class')
if classValue:
sClass += ' class="' + classValue + '"'
else:
sClass = ' class="qmlImage"'
if self.checkElement(node):
imageSource = node.getAttribute('source')
altValue = node.getAttribute('text')
xhtml += '<div' + sClass + '><img src="/qml/' + imageSource + '" alt="' + altValue + '" /></div>\n'
elif thisName == 'state' or thisName == 'number' or thisName == 'string':
stateName = node.getAttribute('name')
stateValue = node.getAttribute('value')
if thisName == 'state' and stateValue == '':
stateValue = '1'
if thisName == 'string':
stateValue = "'" + stateValue + "'"
stateValue = self.getEval(stateValue)
if thisName == 'string':
stateValue = "'" + str(stateValue) + "'"
self.states[stateName] = str(stateValue)
elif thisName == 'embed':
xhtml += '<iframe src="' + node.getAttribute('source') + '"></iframe>'
elif thisName == 'choose':
self.states['station'] = node.getAttribute("station")
stationNode = self.getElementById(qmlDoc.documentElement, self.states['station'])
xhtml += self.getMainContent(stationNode, qmlDoc)
elif thisName == 'choice' or thisName == 'input':
if self.checkElement(node):
xhtml += self.getChoice(node, thisName)
return xhtml
def getChoice(self, node, thisName):
xhtml = ''
sClass = ''
classValue = node.getAttribute('class')
if classValue:
sClass += ' class="' + classValue + '"'
innerXml = self.getNewInnerXml(node)
innerXml = string.replace(innerXml, '"', '"')
xhtml += '<form action="/cgi-bin/index.py" method="post"' + sClass +'><div>\n'
xhtml += self.getHiddenStates()
xhtml += '<input type="hidden" name="station" value="' + node.getAttribute("station") + '" />\n'
if thisName == 'input':
xhtml += self.getInput(node)
xhtml += '<input type="submit" name="submitForm" value="' + innerXml + '" class="submit" />\n'
xhtml += '</div></form>\n'
return xhtml
def getInput(self, node):
xhtml = ''
inputName = 'qmlInput' + node.getAttribute('name')
xhtml += '<input type="text" name="' + inputName + '" /> '
return xhtml
def checkElement(self, element):
evalString = element.getAttribute('check')
returnValue = self.getEval(evalString)
return returnValue
def getEval(self, evalString):
returnValue = 1
if evalString != '':
evalString = self.replaceStates(evalString, 0)
returnValue = self.safeEval(evalString)
return returnValue
def safeEval(self, s):
s = string.replace(s, "__", "")
s = string.replace(s, "file", "")
s = string.replace(s, "eval", "")
# return rexec.RExec.r_eval(rexec.RExec(), s)
return eval(s)
def replaceStates(self, s, cutQuotation):
for key in self.states.keys():
keyValue = self.states[key]
if cutQuotation:
quotationLeft = string.find(keyValue, "'") == 0
quotationRight = string.rfind(keyValue, "'") == len(keyValue) - 1
if quotationLeft and quotationRight:
keyValue = keyValue[1:-1]
s = string.replace(s, '[' + key + ']', keyValue)
s = string.replace(s, 'true', '1')
s = string.replace(s, 'false', '0')
pattern = re.compile(r'\[.*?\]')
s = pattern.sub('0', s)
s = string.replace(s, ' lower ', ' < ')
s = string.replace(s, ' greater ', ' > ')
s = string.replace(s, ' = ', ' == ')
s = string.replace(s, '\'{', '{')
s = string.replace(s, '}\'', '}')
s = string.replace(s, '{states ', 'self.qml_states(')
s = string.replace(s, '{random ', 'self.qml_random(')
s = string.replace(s, '{lower ', 'self.qml_lower(')
s = string.replace(s, '{upper ', 'self.qml_upper(')
s = string.replace(s, '{contains ', 'self.qml_contains(')
s = string.replace(s, '}', ')')
return s
def getHiddenStates(self):
xhtml = ''
for key in self.states.keys():
if key != 'lastStation':
thisName = key
if key == 'station':
thisName = 'lastStation'
xhtml += '<input type="hidden" name="' + thisName + '" '
xhtml += 'value="' + str(self.states[key]) + '" />\n'
return xhtml
def getNewInnerXml(self, topNode):
s = ''
for subNode in topNode.childNodes:
if subNode.nodeType == Node.TEXT_NODE:
s += self.replaceStates(subNode.data, 1)
elif subNode.nodeType == Node.ELEMENT_NODE:
newName = self.getNewElementName(subNode)
classValue = subNode.getAttribute('class')
if classValue and classValue != '':
classValue = ' class="' + classValue + '"'
innerXml = self.getNewInnerXml(subNode)
s += '<' + newName + classValue +'>' + innerXml
s += '</' + newName + '>'
return s
def getNewElementName(self, node):
oldName = node.nodeName
if oldName == 'emphasis':
newName = 'em'
elif oldName == 'poem':
newName = 'pre'
elif oldName == 'image':
newName = 'img'
elif oldName == 'break':
newName = 'br'
else:
newName = oldName
return newName
# general XML
def getElementById(self, topNode, targetId, idName = 'id'):
foundNode = 0
nodes = topNode.childNodes
for node in nodes:
if node.nodeType == Node.ELEMENT_NODE:
thisId = node.getAttribute(idName)
if thisId == targetId:
foundNode = node
break
return foundNode
# more
def getHeader(self, qmlDoc):
xhtml = ''
xhtml += """\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1" />
<title>QML</title>
"""
xhtml += self.getHeaderStyle(qmlDoc)
xhtml += """\
</head>
<body>
"""
return xhtml
def getHeaderStyle(self, qmlDoc):
xhtml = ''
xhtml += '\n<style media="screen" type="text/css"><!--\n'
xhtml += '.submit\n'
xhtml += '{\n'
xhtml += 'background-color: transparent;\n'
xhtml += 'color: inherit;\n'
xhtml += 'border: 0;\n'
xhtml += 'text-decoration: underline;\n'
xhtml += 'cursor: pointer;\n'
xhtml += 'margin: 0;\n'
xhtml += 'padding: 0;\n'
xhtml += 'text-align: left;\n'
xhtml += 'font-family: inherit;\n'
xhtml += '}\n'
xhtml += self.getQmlStyle(qmlDoc)
xhtml += '--></style>\n'
xhtml = string.replace(xhtml, '\n', '\n ')
xhtml += '\n'
return xhtml
def getQmlStyle(self, qmlDoc):
css = ''
backgroundColor = ''
backgroundImage = ''
backgroundRepeat = ''
fontColor = ''
fontFamily = ''
fontSize = ''
contentLeft = ''
contentTop = ''
contentWidth = ''
contentPosition = ''
classStyle = ''
topNodes = qmlDoc.documentElement.getElementsByTagName('style')
if topNodes:
for node in topNodes[0].childNodes:
thisName = node.nodeName
if thisName == 'background':
backgroundColor = node.getAttribute('color')
backgroundImage = node.getAttribute('image')
backgroundRepeat = node.getAttribute('repeat')
elif thisName == 'font':
fontColor = node.getAttribute('color')
fontFamily = node.getAttribute('family')
fontSize = node.getAttribute('size')
elif thisName == 'content':
contentLeft = node.getAttribute('left')
contentTop = node.getAttribute('top')
contentWidth = node.getAttribute('width')
elif thisName == 'class':
classStyle += self.getClassStyle(node, topNodes[0])
if backgroundColor != '':
backgroundColor = 'background-color: ' + backgroundColor + ';\n'
if backgroundImage != '':
backgroundImage = 'background-image: url(/qml/' + backgroundImage + ');\n'
if backgroundRepeat != '':
backgroundRepeat = 'background-repeat: ' + backgroundRepeat + ';\n'
if fontColor != '':
fontColor = 'color: ' + fontColor + ';\n'
if fontFamily != '':
fontFamily = 'font-family: ' + fontFamily + ';\n'
if fontSize != '':
fontSize = 'font-size: ' + fontSize + ';\n'
if contentLeft != '':
contentLeft = 'left: ' + contentLeft + ';\n'
if contentTop != '':
contentTop = 'top: ' + contentTop + ';\n'
if contentLeft != '' or contentTop != '':
contentPosition = 'position: absolute;\n'
if contentWidth != '':
contentWidth = 'width: ' + contentWidth + ';\n'
css += 'body\n{\n'
css += backgroundColor
css += backgroundImage
css += backgroundRepeat
css += fontColor
css += fontFamily
css += fontSize
css += '}\n'
css += '.content\n{\n'
css += contentPosition
css += contentLeft
css += contentTop
css += contentWidth
css += '}\n'
css += classStyle
return css
def getClassStyle(self, node, topNode):
css = ''
sName = node.getAttribute('name')
css += '.' + sName + '\n{\n'
css += self.getClassStyleContent(topNode, sName)
css += '}\n'
return css
def getClassStyleContent(self, topNode, wantedName):
css = ''
for node in topNode.childNodes:
if node.nodeName == 'class':
name = node.getAttribute('name')
if name == wantedName:
sInherits = node.getAttribute('inherits')
if sInherits and sInherits != '':
css += self.getClassStyleContent(topNode, sInherits)
thisCss = node.getAttribute('style')
thisCss = self.replaceStates(thisCss, 1)
thisCss = string.replace(thisCss, "'", '')
thisCss = string.replace(thisCss, 'url(', 'url(/qml/')
css += thisCss + '\n'
return css
def getFooter(self):
xhtml = """\
</body>
</html>
"""
return xhtml
# QML Inline functions
def qml_states(self, wanted):
s = ''
seperator = ', '
for key in self.states.keys():
if self.states[key]:
if string.find(key, wanted) == 0:
s += key[len(wanted) + 1:] + seperator
if len(s) > 0:
s = s[:-len(seperator)]
return s
def qml_random(self, min, max):
return random.randrange(min, max)
def qml_lower(self, s):
return string.lower(s)
def qml_upper(self, s):
return string.upper(s)
def qml_contains(self, sAll, sSub):
return ( string.find(sAll, sSub) != -1 )
``` |
{
"source": "jphines/pipelines",
"score": 2
} |
#### File: unit_tests/tests/test_train.py
```python
import unittest
import os
import signal
from unittest.mock import patch, call, Mock, MagicMock, mock_open, ANY
from botocore.exceptions import ClientError
from train.src import train
from common import _utils
required_args = [
'--region', 'us-west-2',
'--role', 'arn:aws:iam::123456789012:user/Development/product_1234/*',
'--image', 'test-image',
'--channels', '[{"ChannelName": "train", "DataSource": {"S3DataSource":{"S3Uri": "s3://fake-bucket/data","S3DataType":"S3Prefix","S3DataDistributionType": "FullyReplicated"}},"ContentType":"","CompressionType": "None","RecordWrapperType":"None","InputMode": "File"}]',
'--instance_type', 'ml.m4.xlarge',
'--instance_count', '1',
'--volume_size', '50',
'--max_run_time', '3600',
'--model_artifact_path', 'test-path',
'--model_artifact_url_output_path', '/tmp/model_artifact_url_output_path',
'--job_name_output_path', '/tmp/job_name_output_path',
'--training_image_output_path', '/tmp/training_image_output_path',
]
class TrainTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
parser = train.create_parser()
cls.parser = parser
def test_create_parser(self):
self.assertIsNotNone(self.parser)
def test_main(self):
# Mock out all of utils except parser
train._utils = MagicMock()
train._utils.add_default_client_arguments = _utils.add_default_client_arguments
# Set some static returns
train._utils.create_training_job.return_value = 'job-name'
train._utils.get_image_from_job.return_value = 'training-image'
train._utils.get_model_artifacts_from_job.return_value = 'model-artifacts'
train.main(required_args)
# Check if correct requests were created and triggered
train._utils.create_training_job.assert_called()
train._utils.wait_for_training_job.assert_called()
train._utils.print_logs_for_job.assert_called()
# Check the file outputs
train._utils.write_output.assert_has_calls([
call('/tmp/model_artifact_url_output_path', 'model-artifacts'),
call('/tmp/job_name_output_path', 'job-name'),
call('/tmp/training_image_output_path', 'training-image')
])
def test_create_training_job(self):
mock_client = MagicMock()
mock_args = self.parser.parse_args(required_args + ['--job_name', 'test-job'])
response = _utils.create_training_job(mock_client, vars(mock_args))
mock_client.create_training_job.assert_called_once_with(
AlgorithmSpecification={'TrainingImage': 'test-image', 'TrainingInputMode': 'File'},
EnableInterContainerTrafficEncryption=False,
EnableManagedSpotTraining=False,
EnableNetworkIsolation=True,
HyperParameters={},
InputDataConfig=[{'ChannelName': 'train',
'DataSource': {'S3DataSource': {'S3Uri': 's3://fake-bucket/data', 'S3DataType': 'S3Prefix', 'S3DataDistributionType': 'FullyReplicated'}},
'ContentType': '',
'CompressionType': 'None',
'RecordWrapperType': 'None',
'InputMode': 'File'
}],
OutputDataConfig={'KmsKeyId': '', 'S3OutputPath': 'test-path'},
ResourceConfig={'InstanceType': 'ml.m4.xlarge', 'InstanceCount': 1, 'VolumeSizeInGB': 50, 'VolumeKmsKeyId': ''},
RoleArn='arn:aws:iam::123456789012:user/Development/product_1234/*',
StoppingCondition={'MaxRuntimeInSeconds': 3600},
Tags=[],
TrainingJobName='test-job'
)
self.assertEqual(response, 'test-job')
def test_main_stop_training_job(self):
train._utils = MagicMock()
train._utils.create_training_job.return_value = 'job-name'
try:
os.kill(os.getpid(), signal.SIGTERM)
finally:
train._utils.stop_training_job.assert_called_once_with(ANY, 'job-name')
train._utils.get_image_from_job.assert_not_called()
def test_utils_stop_training_job(self):
mock_sm_client = MagicMock()
mock_sm_client.stop_training_job.return_value = None
response = _utils.stop_training_job(mock_sm_client, 'FakeJobName')
mock_sm_client.stop_training_job.assert_called_once_with(
TrainingJobName='FakeJobName'
)
self.assertEqual(response, None)
def test_sagemaker_exception_in_create_training_job(self):
mock_client = MagicMock()
mock_exception = ClientError({"Error": {"Message": "SageMaker broke"}}, "create_training_job")
mock_client.create_training_job.side_effect = mock_exception
mock_args = self.parser.parse_args(required_args)
with self.assertRaises(Exception):
response = _utils.create_training_job(mock_client, vars(mock_args))
def test_wait_for_training_job(self):
mock_client = MagicMock()
mock_client.describe_training_job.side_effect = [
{"TrainingJobStatus": "Starting"},
{"TrainingJobStatus": "InProgress"},
{"TrainingJobStatus": "Downloading"},
{"TrainingJobStatus": "Completed"},
{"TrainingJobStatus": "Should not be called"}
]
_utils.wait_for_training_job(mock_client, 'training-job', 0)
self.assertEqual(mock_client.describe_training_job.call_count, 4)
def test_wait_for_failed_job(self):
mock_client = MagicMock()
mock_client.describe_training_job.side_effect = [
{"TrainingJobStatus": "Starting"},
{"TrainingJobStatus": "InProgress"},
{"TrainingJobStatus": "Downloading"},
{"TrainingJobStatus": "Failed", "FailureReason": "Something broke lol"},
{"TrainingJobStatus": "Should not be called"}
]
with self.assertRaises(Exception):
_utils.wait_for_training_job(mock_client, 'training-job', 0)
self.assertEqual(mock_client.describe_training_job.call_count, 4)
def test_get_model_artifacts_from_job(self):
mock_client = MagicMock()
mock_client.describe_training_job.return_value = {"ModelArtifacts": {"S3ModelArtifacts": "s3://path/"}}
self.assertEqual(_utils.get_model_artifacts_from_job(mock_client, 'training-job'), 's3://path/')
def test_get_image_from_defined_job(self):
mock_client = MagicMock()
mock_client.describe_training_job.return_value = {"AlgorithmSpecification": {"TrainingImage": "training-image-url"}}
self.assertEqual(_utils.get_image_from_job(mock_client, 'training-job'), "training-image-url")
def test_get_image_from_algorithm_job(self):
mock_client = MagicMock()
mock_client.describe_training_job.return_value = {"AlgorithmSpecification": {"AlgorithmName": "my-algorithm"}}
mock_client.describe_algorithm.return_value = {"TrainingSpecification": {"TrainingImage": "training-image-url"}}
self.assertEqual(_utils.get_image_from_job(mock_client, 'training-job'), "training-image-url")
def test_reasonable_required_args(self):
response = _utils.create_training_job_request(vars(self.parser.parse_args(required_args)))
# Ensure all of the optional arguments have reasonable default values
self.assertFalse(response['EnableManagedSpotTraining'])
self.assertDictEqual(response['HyperParameters'], {})
self.assertNotIn('VpcConfig', response)
self.assertNotIn('MetricDefinitions', response)
self.assertEqual(response['Tags'], [])
self.assertEqual(response['AlgorithmSpecification']['TrainingInputMode'], 'File')
self.assertEqual(response['OutputDataConfig']['S3OutputPath'], 'test-path')
def test_metric_definitions(self):
metric_definition_args = self.parser.parse_args(required_args + ['--metric_definitions', '{"metric1": "regexval1", "metric2": "regexval2"}'])
response = _utils.create_training_job_request(vars(metric_definition_args))
self.assertIn('MetricDefinitions', response['AlgorithmSpecification'])
response_metric_definitions = response['AlgorithmSpecification']['MetricDefinitions']
self.assertEqual(response_metric_definitions, [{
'Name': "metric1",
'Regex': "regexval1"
}, {
'Name': "metric2",
'Regex': "regexval2"
}])
def test_no_defined_image(self):
# Pass the image to pass the parser
no_image_args = required_args.copy()
image_index = no_image_args.index('--image')
# Cut out --image and it's associated value
no_image_args = no_image_args[:image_index] + no_image_args[image_index+2:]
parsed_args = self.parser.parse_args(no_image_args)
with self.assertRaises(Exception):
_utils.create_training_job_request(vars(parsed_args))
def test_first_party_algorithm(self):
algorithm_name_args = self.parser.parse_args(required_args + ['--algorithm_name', 'first-algorithm'])
# Should not throw an exception
response = _utils.create_training_job_request(vars(algorithm_name_args))
self.assertIn('TrainingImage', response['AlgorithmSpecification'])
self.assertNotIn('AlgorithmName', response['AlgorithmSpecification'])
def test_known_algorithm_key(self):
# This passes an algorithm that is a known NAME of an algorithm
known_algorithm_args = required_args + ['--algorithm_name', 'seq2seq modeling']
image_index = required_args.index('--image')
# Cut out --image and it's associated value
known_algorithm_args = known_algorithm_args[:image_index] + known_algorithm_args[image_index+2:]
parsed_args = self.parser.parse_args(known_algorithm_args)
# Patch get_image_uri
_utils.get_image_uri = MagicMock()
_utils.get_image_uri.return_value = "seq2seq-url"
response = _utils.create_training_job_request(vars(parsed_args))
_utils.get_image_uri.assert_called_with('us-west-2', 'seq2seq')
self.assertEqual(response['AlgorithmSpecification']['TrainingImage'], "seq2seq-url")
def test_known_algorithm_value(self):
# This passes an algorithm that is a known SageMaker algorithm name
known_algorithm_args = required_args + ['--algorithm_name', 'seq2seq']
image_index = required_args.index('--image')
# Cut out --image and it's associated value
known_algorithm_args = known_algorithm_args[:image_index] + known_algorithm_args[image_index+2:]
parsed_args = self.parser.parse_args(known_algorithm_args)
# Patch get_image_uri
_utils.get_image_uri = MagicMock()
_utils.get_image_uri.return_value = "seq2seq-url"
response = _utils.create_training_job_request(vars(parsed_args))
_utils.get_image_uri.assert_called_with('us-west-2', 'seq2seq')
self.assertEqual(response['AlgorithmSpecification']['TrainingImage'], "seq2seq-url")
def test_unknown_algorithm(self):
known_algorithm_args = required_args + ['--algorithm_name', 'unknown algorithm']
image_index = required_args.index('--image')
# Cut out --image and it's associated value
known_algorithm_args = known_algorithm_args[:image_index] + known_algorithm_args[image_index+2:]
parsed_args = self.parser.parse_args(known_algorithm_args)
# Patch get_image_uri
_utils.get_image_uri = MagicMock()
_utils.get_image_uri.return_value = "unknown-url"
response = _utils.create_training_job_request(vars(parsed_args))
# Should just place the algorithm name in regardless
_utils.get_image_uri.assert_not_called()
self.assertEqual(response['AlgorithmSpecification']['AlgorithmName'], "unknown algorithm")
def test_no_channels(self):
no_channels_args = required_args.copy()
channels_index = required_args.index('--channels')
# Replace the value after the flag with an empty list
no_channels_args[channels_index + 1] = '[]'
parsed_args = self.parser.parse_args(no_channels_args)
with self.assertRaises(Exception):
_utils.create_training_job_request(vars(parsed_args))
def test_valid_hyperparameters(self):
hyperparameters_str = '{"hp1": "val1", "hp2": "val2", "hp3": "val3"}'
good_args = self.parser.parse_args(required_args + ['--hyperparameters', hyperparameters_str])
response = _utils.create_training_job_request(vars(good_args))
self.assertIn('hp1', response['HyperParameters'])
self.assertIn('hp2', response['HyperParameters'])
self.assertIn('hp3', response['HyperParameters'])
self.assertEqual(response['HyperParameters']['hp1'], "val1")
self.assertEqual(response['HyperParameters']['hp2'], "val2")
self.assertEqual(response['HyperParameters']['hp3'], "val3")
def test_empty_hyperparameters(self):
hyperparameters_str = '{}'
good_args = self.parser.parse_args(required_args + ['--hyperparameters', hyperparameters_str])
response = _utils.create_training_job_request(vars(good_args))
self.assertEqual(response['HyperParameters'], {})
def test_object_hyperparameters(self):
hyperparameters_str = '{"hp1": {"innerkey": "innerval"}}'
invalid_args = self.parser.parse_args(required_args + ['--hyperparameters', hyperparameters_str])
with self.assertRaises(Exception):
_utils.create_training_job_request(vars(invalid_args))
def test_vpc_configuration(self):
required_vpc_args = self.parser.parse_args(required_args + ['--vpc_security_group_ids', 'sg1,sg2', '--vpc_subnets', 'subnet1,subnet2'])
response = _utils.create_training_job_request(vars(required_vpc_args))
self.assertIn('VpcConfig', response)
self.assertIn('sg1', response['VpcConfig']['SecurityGroupIds'])
self.assertIn('sg2', response['VpcConfig']['SecurityGroupIds'])
self.assertIn('subnet1', response['VpcConfig']['Subnets'])
self.assertIn('subnet2', response['VpcConfig']['Subnets'])
def test_training_mode(self):
required_vpc_args = self.parser.parse_args(required_args + ['--training_input_mode', 'Pipe'])
response = _utils.create_training_job_request(vars(required_vpc_args))
self.assertEqual(response['AlgorithmSpecification']['TrainingInputMode'], 'Pipe')
def test_spot_bad_args(self):
no_max_wait_args = self.parser.parse_args(required_args + ['--spot_instance', 'True'])
no_checkpoint_args = self.parser.parse_args(required_args + ['--spot_instance', 'True', '--max_wait_time', '3600'])
no_s3_uri_args = self.parser.parse_args(required_args + ['--spot_instance', 'True', '--max_wait_time', '3600', '--checkpoint_config', '{}'])
for arg in [no_max_wait_args, no_checkpoint_args, no_s3_uri_args]:
with self.assertRaises(Exception):
_utils.create_training_job_request(vars(arg))
def test_spot_lesser_wait_time(self):
args = self.parser.parse_args(required_args + ['--spot_instance', 'True', '--max_wait_time', '3599', '--checkpoint_config', '{"S3Uri": "s3://fake-uri/", "LocalPath": "local-path"}'])
with self.assertRaises(Exception):
_utils.create_training_job_request(vars(args))
def test_spot_good_args(self):
good_args = self.parser.parse_args(required_args + ['--spot_instance', 'True', '--max_wait_time', '3600', '--checkpoint_config', '{"S3Uri": "s3://fake-uri/"}'])
response = _utils.create_training_job_request(vars(good_args))
self.assertTrue(response['EnableManagedSpotTraining'])
self.assertEqual(response['StoppingCondition']['MaxWaitTimeInSeconds'], 3600)
self.assertEqual(response['CheckpointConfig']['S3Uri'], 's3://fake-uri/')
def test_spot_local_path(self):
args = self.parser.parse_args(required_args + ['--spot_instance', 'True', '--max_wait_time', '3600', '--checkpoint_config', '{"S3Uri": "s3://fake-uri/", "LocalPath": "local-path"}'])
response = _utils.create_training_job_request(vars(args))
self.assertEqual(response['CheckpointConfig']['S3Uri'], 's3://fake-uri/')
self.assertEqual(response['CheckpointConfig']['LocalPath'], 'local-path')
def test_tags(self):
args = self.parser.parse_args(required_args + ['--tags', '{"key1": "val1", "key2": "val2"}'])
response = _utils.create_training_job_request(vars(args))
self.assertIn({'Key': 'key1', 'Value': 'val1'}, response['Tags'])
self.assertIn({'Key': 'key2', 'Value': 'val2'}, response['Tags'])
``` |
{
"source": "jphinning/Sorting-Algorithms-Visualizer",
"score": 3
} |
#### File: Sorting-Algorithms-Visualizer/algorithms/countingSort.py
```python
from display import handleDrawing
def countingSort(array, *args):
size = len(array)
A = array.copy()
C = [0]*(max(A)+1)
for i in range(size):
C[A[i]] += 1
for i in range(1, len(C)):
C[i] += C[i-1]
for i in range(0, size):
handleDrawing(array, C[A[size-i-1]]-1, -1, size-i-1, -1)
array[C[A[size-i-1]]-1] = A[size-i-1]
C[A[size-i-1]] -= 1
```
#### File: Sorting-Algorithms-Visualizer/algorithms/stoogeSort.py
```python
from display import handleDrawing
from math import floor
def stoogeSort(arr, l, h):
if l >= h:
return
if arr[l]>arr[h]:
middle = floor((h + l) / 2)
handleDrawing(arr, l, h, middle, -1)
t = arr[l]
arr[l] = arr[h]
arr[h] = t
if h-l + 1 > 2:
t = (int)((h-l + 1)/3)
stoogeSort(arr, l, (h-t))
stoogeSort(arr, l + t, (h))
stoogeSort(arr, l, (h-t))
``` |
{
"source": "jphkun/aeroframe_2",
"score": 2
} |
#### File: aeroframe_2/std_run/run.py
```python
import logging
import json
import argparse
import aeroframe_2.fileio.settings as Settings
import aeroframe_2.deformation.functions as aeroDef
import pytornado.stdfun.run as cfd
import pytornado.fileio as io
import os
import pickle
# import SU2_CFD
logging.basicConfig(level=logging.DEBUG)
__prog_name__ = "Aeroframe2.0"
logger = logging.getLogger(__prog_name__+"."+__name__)
def save_to_pkl(path,act_dea,lattice,vlmdata):
if act_dea == "activated":
name_lattice = "/lattice_defActivated.pkl"
name_vlmdata = "data_defActivated.pkl"
elif act_dea == "deactivated":
name_lattice = "/lattice_defDeactivated.pkl"
name_vlmdata = "data_defDeactivated.pkl"
else:
logger.error("activation or diactivation not specified")
with open(path + name_lattice, "wb") as la:
var = [lattice.p, # 0
lattice.v, # 1
lattice.c, # 2
lattice.n, # 3
lattice.a, # 4
lattice.bound_leg_midpoints] # 5
pickle.dump(var, la)
la.close()
with open(path + name_vlmdata, "wb") as d:
pickle.dump(vlmdata, d)
d.close()
def virtual_cli(dir_path,setting_file_path):
parser = argparse.ArgumentParser(description='Replicates a command-line')
args = parser.parse_args()
args.clean = True
args.clean_only = False
args.cpacs2json = None
args.debug = False
args.list_example_from_db = False
args.make_example = False
args.make_example_from_db = None
args.run = setting_file_path
args.verbose = True
os.chdir(dir_path + "/CFD")
return args
def deform_mesh(settings,lattice,file):
logger.info(settings.settings["deformation"])
if settings.settings["deformation"]:
# Deforms the mesh and uploads the deformed one into the code
logger.info("===== Mesh deformation function activated =====")
mesh_def = aeroDef.Mesh_Def(lattice,file)
mesh_def.deformation(settings)
lattice.p = mesh_def.f_p
lattice.v = mesh_def.f_v # turns everything down
lattice.c = mesh_def.f_c
lattice.bound_leg_midpoints = mesh_def.f_b # turns everything up
lattice.n = mesh_def.f_n
lattice.a = mesh_def.f_a
else:
logger.info("===== Mesh deformation function deactivated =====")
return lattice
def main():
# Starts simulation
logger.info(f"{__prog_name__} started")
# for construction purposes
dir_path = "/home/cfse2/Documents/aeroframe_2/test/static/Optimale_withDLR/"
# /home/cfse2/Documents/aeroframe_2/test/static/Optimale_withDLR
# "/home/user/Documents/aeroframe_2/test/static/3_OptimaleNoFlaps"
#/home/user/Documents/aeroframe_2/test/static/1_OptimaleWingtipON
# "/home/cfse2/Documents/aeroframe_2/test/static/"
# "/home/user/Documents/aeroframe_2/test/static/"
name = "aeroframe2.0_case1.json"
aeroframe_settings_path = dir_path + name
logger.debug(f"Settings file is located: {aeroframe_settings_path}")
with open(aeroframe_settings_path) as json_file:
dictionary = json.load(json_file)
simulation = Settings.settings(dictionary)
# Checks if JSON corresponds to what is implemented in this code
simulation.verify()
if simulation.CFD_solver == "Pytornado":
settings_file = dir_path + "/CFD/settings/Optimale.json"
with open(settings_file, "r") as fp:
settings = json.load(fp)
# Command line simulation
setting_file_path = 'settings/Optimale.json'
# cfd.standard_run(args)
args = virtual_cli(dir_path,setting_file_path)
# Buids CFD mesh
lattice, vlmdata, settings, aircraft, cur_state, state = cfd.meshing(args)
# Deforms CFD mesh
file_path = dir_path + simulation.def_file_path
logger.debug(file_path)
deform_mesh(settings,lattice,file_path)
# Computes CFD solution
cfd.solver(lattice, vlmdata, settings, aircraft, cur_state, state)
if settings.settings["deformation"]:
save_to_pkl(dir_path, "activated", lattice, vlmdata)
else:
save_to_pkl(dir_path, "deactivated", lattice, vlmdata)
# TODO cfd Mesh
# TODO structure mesh
if __name__ == "__main__":
main()
# -TODO get p
# -TODO get v
# -TODO get c
# -TODO get b
# -TODO get n
# -TODO get a
# -TODO get what is in between. it looks like something wrong lives here
# -TODO get RHS
# -TODO get Downwash
```
#### File: aeroframe_2/deformation/RBF_test.py
```python
import numpy as np
import numpy.linalg as LA
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
xs = np.array([[0,0,0],
[0,1,0],
[0,2,0],
[0,3,0]])
n = xs.shape
# Plots line
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xs[:,0],xs[:,1],xs[:,2], label='beam')
def phi(x1,x2,eps):
norm = np.linalg.norm(x1-x2)
r = norm
# Gaussian: np.exp(eps*norm)**2
# tin plate: r**2 * np.log(r)
# multiquadratic: (1+(eps*r)**2)**0.5
return (1+(eps*r)**2)**0.5
# print(xs)
# print("\n")
# P = np.ones((1,n[0]))
# P = np.concatenate((P,xs.T))
# # 4xN
# print("P")
# print(P)
# print("\n")
# p_shape = P.shape
eps = 1
M = np.empty((n[0],n[0]))
for i in range(n[0]):
for j in range(n[0]):
M[i,j] = phi(xs[i],xs[j],eps)
print("M")
print(M)
print("\n")
# zeros = np.zeros((p_shape[0],p_shape[0]))
# Css1 = np.concatenate((zeros, P.T),axis=0)
# print(Css1)
# Css2 = np.concatenate((P, M),axis=0)
# Css = np.concatenate((Css1,Css2),axis=1)
# np.set_printoptions(precision=1)
# print("Css")
# print(Css)
# print("\n")
deltaZs = np.array([0.0,
0.1,
0.2,
0.4])
deltaZs = 0.5*deltaZs
deltaTxs = np.array([0.0,
0.1,
0.2,
0.4])
deltaTxs = 0.5*deltaTxs
eigs = np.linalg.eig(M)
print("eigs")
print(eigs[0])
print("\n")
invM = np.linalg.inv(M)
print("inv")
print(invM)
print("\n")
# _lambda = np.matmul(invM, deltaXs)
# print("Lambda")
# print(_lambda)
# print(_lambda.shape)
# print("\n")
# Plots surface points
xa = np.array([[-1,0,0],
[-1,1,0],
[-1,2,0],
[-1,3,0],
[-1,0.5,0],
[-1,1.5,0],
[-1,2.5,0],
[-1,3.5,0],
[ 1,0,0],
[ 1,1,0],
[ 1,2,0],
[ 1,3,0],
[ 1,0.5,0],
[ 1,1.5,0],
[ 1,2.5,0],
[ 1,3.5,0],
[ 0,0,0],
[ 0,1,0],
[ 0,2,0],
[ 0,3,0],
[ 0,0.5,0],
[ 0,1.5,0],
[ 0,2.5,0],
[ 0,3.5,0]])
ax.scatter(xa[:,0],xa[:,1],xa[:,2], label='surface')
m = xa.shape
# print("xa")
# print(xa)
# print("\n")
# Q = np.ones((1,m[0]))
# Q = np.concatenate((Q,xa.T))
# # 4xN
# print("Q")
# print(Q)
# q_shape = Q.shape
# print(q_shape)
# print("\n")
eps = 1
As = np.empty((n[0],m[0]))
for i in range(n[0]):
for j in range(m[0]):
As[i,j] = phi(xa[i],xa[j],eps)
print("A")
print(As)
As_shape = As.shape
# print(k_shape)
# print("\n")
# As = np.concatenate((Q.T, K.T),axis=1)
# print("As")
# print(As.shape)
# print(As)
H_s = np.matmul(As.T,invM)
print("H_s")
print(H_s)
print("\n")
deltaZa = np.matmul(H_s,deltaZs)
deltaTxa = np.matmul(H_s,deltaTxs)
print("Delta X")
print(deltaZa)
print("Delta T")
print(deltaTxa)
print("\n")
def tranferRotation(p,b,deltaTxa):
# Finds the two closest points
# Computes the vector of the beam
# Computes the distance
# Multiply the distance by deltaTxa
# WARNING: Il faut savoir si il est à gauche ou à droite de la ligne
# Finds the two point
N = len(b)
dist = np.empty(N)
for i in range(N):
dist[i] = np.linalg.norm(p-b[i])
index1 = np.argmin(dist)
print("index1",index1)
print("dist 1 = ",dist)
dist[index1] = 1e15
index2 = np.argmin(dist)
print("index2",index2)
print("dist 2 = ",dist)
# Computes the line director vector
u = b[index1]-b[index2]
AB = p-b[index1]
crossProduct = np.cross(AB,u)
if LA.norm(u) == 0: u = 1e10
d = LA.norm(crossProduct)/LA.norm(u)
print("p = ",p)
print("b1 = ",b[index1])
print("b2 = ",b[index2])
print("d = ",d)
print("cross",crossProduct)
print("\n")
if p[0] < b[index1,0]:
dz = d*deltaTxa
else:
dz = -d*deltaTxa
return dz
N = len(xa)
dz = np.empty(N)
for i in range(N):
dz[i] = tranferRotation(xa[i], xs, deltaTxa[i])
ax.scatter(xa[:,0],
xa[:,1],
xa[:,2]+deltaZa[:]+dz[:], label='surface deformed')
val = 3
ax.set_xlim(-val,val)
ax.set_ylim(-val,val)
ax.set_zlim(-val,val)
ax.legend()
plt.show()
``` |
{
"source": "jphkun/aeroframe",
"score": 2
} |
#### File: unit/data/test_datafields.py
```python
import numpy as np
import pytest
from aeroframe.data.datafields import get_total_num_points
@pytest.fixture
def example_def_field():
"""
Example random deformation field
"""
def_field = {}
def_field['Wing'] = np.random.rand(20, 9)
def_field['Fuselage'] = np.random.rand(20, 9)
def_field['HorizTail'] = np.random.rand(20, 9)
return def_field
def test_get_total_num(example_def_field):
"""
Test function 'get_total_num_points()'
"""
n_tot = get_total_num_points(example_def_field)
assert n_tot == 60
``` |
{
"source": "jphkun/CEASIOMpy",
"score": 2
} |
#### File: wrappers/cfd/su2.py
```python
from collections import defaultdict
from os.path import join
import os
from pathlib import Path
import numpy as np
from aeroframe.templates.wrappers import AeroWrapper
from aeroframe.interpol.translate import get_deformed_mesh
from ceasiompy.SU2Run.su2run import run_SU2_fsi
class Wrapper(AeroWrapper):
def __init__(self, root_path, shared, settings):
super().__init__(root_path, shared, settings)
# SU2 specific files
self.paths = {}
self.paths['d_calc'] = join(self.root_path, '..', 'temp')
self.paths['f_config'] = join(self.paths['d_calc'], 'ToolInput.cfg')
self.paths['f_loads'] = join(self.paths['d_calc'], 'force.csv')
self.paths['f_mesh'] = join(self.paths['d_calc'], 'ToolInput.su2')
self.paths['f_disp'] = join(self.paths['d_calc'], 'disp.dat')
# Make the working directory if it does not exist
Path(self.paths['d_calc']).mkdir(parents=True, exist_ok=True)
self.first_iteration = True
self.undeformed_mesh = None
def _get_su2_load_array(self):
"""Return the load files as a array"""
su2_load_array = np.genfromtxt(
self.paths['f_loads'],
delimiter=',',
dtype=None,
skip_header=1,
encoding='latin1'
)
return su2_load_array
def _get_load_fields(self, use_undeformed_POA=True):
"""
Return AeroFrame load fields from SU2 results
Returns:
:load_fields: (dict) AeroFrame load fields
:use_undeformed_POA: (bool) If True, *undeformed* points of attack
will be used
"""
su2_load_array = self._get_su2_load_array()
load_fields = defaultdict(list)
for row in su2_load_array:
row = tuple(row)
xyz_fxyz = np.concatenate((row[1:7], [0, 0, 0]))
load_fields[str(row[-1])].append(xyz_fxyz)
for component_uid, value in load_fields.items():
value = np.array(value, dtype=float)
# Replace the deformed POA
if not self.first_iteration and use_undeformed_POA:
value[:, 0:3] = self.undeformed_mesh[component_uid]
load_fields[component_uid] = value
return load_fields
def _write_su2_disp_file(self):
"""Write the SU2 displacement file"""
# Fetch the FEM deformation fields
def_fields = self.shared.structure.def_fields
# TODO: make work for multiple wings
orig_mesh = self.undeformed_mesh['Wing']
def_field = self.shared.structure.def_fields['Wing']
def_mesh = get_deformed_mesh(orig_mesh, def_field)
# Indices and displacements at discrete mesh points
num_mesh_points = orig_mesh.shape[0]
idx = np.arange(start=0, stop=num_mesh_points, step=1).reshape((num_mesh_points, 1))
u_xyz = def_mesh - orig_mesh
# Write the displacement file
header = f'{num_mesh_points}\t2\t1\t0'
output_data = np.block([idx, orig_mesh+u_xyz])
fmt = ['%d'] + ['%.10e' for _ in range(3)]
np.savetxt(
self.paths['f_disp'],
output_data,
fmt=fmt,
delimiter='\t',
header=header
)
def run_analysis(self, turn_off_deform=False):
"""
Run the PyTornado analysis
Args:
:turn_off_deform: Flag which can be used to turn off all deformations
"""
# Hint: If there is no displacement file, no deformation will be
# taken into account
if turn_off_deform:
if os.path.exists(self.paths['f_disp']):
pass
# os.remove(self.paths['f_disp'])
else:
self._write_su2_disp_file()
# ----- Run the SU2 analysis -----
run_SU2_fsi(
config_path=self.paths['f_config'],
wkdir=self.paths['d_calc'],
)
# Get the undeformed mesh in the first
if self.first_iteration:
load_fields = self._get_load_fields()
self.undeformed_mesh = {}
for component_uid, value in load_fields.items():
self.undeformed_mesh[component_uid] = value[:, 0:3]
# ----- Share load data -----
self.shared.cfd.load_fields = self._get_load_fields()
self.first_iteration = False
def clean(self):
"""Clean method"""
pass
```
#### File: func/AoutFunc/outputbalancegen.py
```python
import numpy as np
import math
import matplotlib as mpl
from matplotlib import rcParams
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
#=============================================================================
# CLASSES
#=============================================================================
"""All classes are defined inside the classes folder and into the
InputClasses/Uconventional folder"""
#=============================================================================
# FUNCTIONS
#=============================================================================
def output_txt(bout, mw, bi, ec, NAME):
""" The function generates the output text file for the unconventional
balance analysis.
INPUT
(class) bout --Arg.: BalanceOutput class.
(class) mw --Arg.: MassesWeights class.
(class) bi --Arg.: BalanceInputs class.
##======= Classes are defined in the InputClasses folder =======##
(char) NAME --Arg.: Name of the Aircraft
OUTPUT
(file) Balance_module.out --Out.: Text file containing all the
informations estimated from the
code.
"""
out_name = 'ToolOutput/' + NAME + '/' + NAME\
+ '_Balance_unc_module.out'
OutputTextFile = open(out_name, 'w')
OutputTextFile.write('###############################################')
OutputTextFile.write('\n###### AIRCRAFT BALANCE ESTIMATION MODULE #####')
OutputTextFile.write('\n##### OUTPUTS #####')
OutputTextFile.write('\n###############################################')
OutputTextFile.write('\n-----------------------------------------------')
OutputTextFile.write('\nInput data used -------------------------------')
OutputTextFile.write('\n-----------------------------------------------')
if bi.USER_CASE:
OutputTextFile.write('\nUser case option: True')
OutputTextFile.write('\nChosen Fuel Percentage: ' + str(bi.F_PERC))
OutputTextFile.write('\nChosen Payload Percentage: ' + str(bi.P_PERC))
else:
OutputTextFile.write('\nUser case option: False')
OutputTextFile.write('\nEngine in the back: ' + str(not ec.WING_MOUNTED))
OutputTextFile.write('\n')
OutputTextFile.write('\n-----------------------------------------------')
OutputTextFile.write('\nMass data -------------------------------------')
OutputTextFile.write('\n-----------------------------------------------')
OutputTextFile.write('\nMaximum payload mass [kg]: '\
+ str(int(mw.mass_payload)))
OutputTextFile.write('\nMaximum fuel mass with no passengers [kg]: '\
+ str(int(mw.mass_fuel_tot)))
OutputTextFile.write('\nMaximum take off mass [kg]: '\
+ str(int(mw.maximum_take_off_mass)))
OutputTextFile.write('\nOperating empty mass [kg]: '\
+ str(int(mw.operating_empty_mass)))
OutputTextFile.write('\nMaximum fuel mass with max passengers [kg]: '\
+ str(int(mw.mass_fuel_maxpass)))
OutputTextFile.write('\n')
OutputTextFile.write('\n-----------------------------------------------')
OutputTextFile.write('\nResults ---------------------------------------')
OutputTextFile.write('\n-----------------------------------------------')
OutputTextFile.write('\n')
OutputTextFile.write('\nCenter of Gravity coordinates -------------'\
+ '----')
OutputTextFile.write('\nMax Payload configuration -----------------'\
+ '----')
OutputTextFile.write('\n[x, y, z]: '+str(bout.center_of_gravity))
OutputTextFile.write('\n-------------------------------------------'\
+ '----')
OutputTextFile.write('\nZero Fuel configuration -------------------'\
+ '----')
OutputTextFile.write('\n[x, y, z]: '+str(bout.cg_zfm))
OutputTextFile.write('\n-------------------------------------------'\
+ '----')
OutputTextFile.write('\nZero Payload configuration ----------------'\
+ '----')
OutputTextFile.write('\n[x, y, z]: '+str(bout.cg_zpm))
OutputTextFile.write('\n-------------------------------------------'\
+ '----')
OutputTextFile.write('\nOEM configuration -------------------------'\
+ '----')
OutputTextFile.write('\n[x, y, z]: '+str(bout.cg_oem))
OutputTextFile.write('\n-------------------------------------------'\
+ '----')
if bi.USER_CASE:
OutputTextFile.write('\nUser configuration --------------------'\
+ '--------')
OutputTextFile.write('\n[x, y, z]: '+str(bout.cg_user))
OutputTextFile.write('\n---------------------------------------'\
+ '--------')
if bi.USER_EN_PLACEMENT:
OutputTextFile.write('\n---------------- Engine Inertia ------------'\
+'---')
OutputTextFile.write('\nRoll moment, Ixx [kgm^2]: '\
+ str(int(round(bout.Ixxen))))
OutputTextFile.write('\nPitch moment, Iyy [kgm^2]: '\
+ str(int(round(bout.Iyyen))))
OutputTextFile.write('\nYaw moment, Izz [kgm^2]: '
+ str(int(round(bout.Izzen))))
OutputTextFile.write('\nIxy moment [kgm^2]: '\
+ str(int(round(bout.Ixyen))))
OutputTextFile.write('\nIyz moment [kgm^2]: '\
+ str(int(round(bout.Iyzen))))
OutputTextFile.write('\nIxz moment [kgm^2]: '\
+ str(int(round(bout.Ixzen))))
OutputTextFile.write('\n-------------------------------------------'\
+ '----')
OutputTextFile.write('\nMoment of Inertia -------------------------'\
+ '----')
OutputTextFile.write('\n-----------------------------------------------')
OutputTextFile.write('\nLumped mass Inertia -----------------------'\
+ '----')
OutputTextFile.write('\nMax Payload configuration -----------------'\
+ '----')
OutputTextFile.write('\nRoll moment, Ixx [kgm^2]: '\
+ str(int(round(bout.Ixx_lump))))
OutputTextFile.write('\nPitch moment, Iyy [kgm^2]: '\
+ str(int(round(bout.Iyy_lump))))
OutputTextFile.write('\nYaw moment, Izz [kgm^2]: '\
+ str(int(round(bout.Izz_lump))))
OutputTextFile.write('\nIxy moment [kgm^2]: ' + str(bout.Ixy_lump))
OutputTextFile.write('\nIyz moment [kgm^2]: ' + str(bout.Iyz_lump))
OutputTextFile.write('\nIxz moment [kgm^2]: ' + str(bout.Ixz_lump))
OutputTextFile.write('\n-------------------------------------------'\
+ '----')
OutputTextFile.write('\nZero Fuel configuration -------------------'\
+ '----')
OutputTextFile.write('\nRoll moment, Ixx [kgm^2]: '\
+ str(int(round(bout.Ixx_lump_zfm))))
OutputTextFile.write('\nPitch moment, Iyy [kgm^2]: '\
+ str(int(round(bout.Iyy_lump_zfm))))
OutputTextFile.write('\nYaw moment, Izz [kgm^2]: '\
+ str(int(round(bout.Izz_lump_zfm))))
OutputTextFile.write('\nIxy moment [kgm^2]: '\
+ str(bout.Ixy_lump_zfm))
OutputTextFile.write('\nIyz moment [kgm^2]: '\
+ str(bout.Iyz_lump_zfm))
OutputTextFile.write('\nIxz moment [kgm^2]: '\
+ str(bout.Ixz_lump_zfm))
OutputTextFile.write('\n-------------------------------------------'\
+ '----')
OutputTextFile.write('\nZero Payload configuration ----------------'\
+ '----')
OutputTextFile.write('\nRoll moment, Ixx [kgm^2]: '\
+ str(int(round(bout.Ixx_lump_zpm))))
OutputTextFile.write('\nPitch moment, Iyy [kgm^2]: '\
+ str(int(round(bout.Iyy_lump_zpm))))
OutputTextFile.write('\nYaw moment, Izz [kgm^2]: '\
+ str(int(round(bout.Izz_lump_zpm))))
OutputTextFile.write('\nIxy moment [kgm^2]: '\
+ str(bout.Ixy_lump_zpm))
OutputTextFile.write('\nIyz moment [kgm^2]: '\
+ str(bout.Iyz_lump_zpm))
OutputTextFile.write('\nIxz moment [kgm^2]: '\
+ str(bout.Ixz_lump_zpm))
OutputTextFile.write('\n-------------------------------------------'\
+ '----')
OutputTextFile.write('\nOEM configuration -------------------------'\
+ '----')
OutputTextFile.write('\nRoll moment, Ixx [kgm^2]: '\
+ str(int(round(bout.Ixx_lump_oem))))
OutputTextFile.write('\nPitch moment, Iyy [kgm^2]: '\
+ str(int(round(bout.Iyy_lump_oem))))
OutputTextFile.write('\nYaw moment, Izz [kgm^2]: '\
+ str(int(round(bout.Izz_lump_oem))))
OutputTextFile.write('\nIxy moment [kgm^2]: '\
+ str(bout.Ixy_lump_oem))
OutputTextFile.write('\nIyz moment [kgm^2]: '\
+ str(bout.Iyz_lump_oem))
OutputTextFile.write('\nIxz moment [kgm^2]: '\
+ str(bout.Ixz_lump_oem))
OutputTextFile.write('\n-------------------------------------------'\
+ '----')
if bi.USER_CASE:
OutputTextFile.write('\nUser configuration --------------------'\
+ '--------')
OutputTextFile.write('\nRoll moment, Ixx [kgm^2]: '\
+ str(int(round(bout.Ixx_lump_user))))
OutputTextFile.write('\nPitch moment, Iyy [kgm^2]: '\
+ str(int(round(bout.Iyy_lump_user))))
OutputTextFile.write('\nYaw moment, Izz [kgm^2]: '\
+ str(int(round(bout.Izz_lump_user))))
OutputTextFile.write('\nIxy moment [kgm^2]: '\
+ str(bout.Ixy_lump_oem))
OutputTextFile.write('\nIyz moment [kgm^2]: '\
+ str(bout.Iyz_lump_oem))
OutputTextFile.write('\nIxz moment [kgm^2]: '\
+ str(bout.Ixz_lump_oem))
OutputTextFile.write('\n---------------------------------------'\
+ '--------')
### Closing Text File
OutputTextFile.close()
return()
#=============================================================================
# PLOTS
#=============================================================================
### AIRCRAFT NODES PLOT ------------------------------------------------------
def aircraft_nodes_unc_plot(fx, fy, fz, wx, wy, wz, NAME):
"""The function generates the plot of the aircraft nodes.
INPUT
(float_array) fx --Arg.: Array containing the x-coordinates
of the fuselage nodes.
(float_array) fy --Arg.: Array containing the y-coordinates
of the fuselage nodes.
(float_array) fz --Arg.: Array containing the z-coordinates
of the fuselage nodes.
(float_array) wx --Arg.: Array containing the x-coordinates
of the wing nodes.
(float_array) wy --Arg.: Array containing the y-coordinates
of the wing nodes.
(float_array) wz --Arg.: Array containing the z-coordinates
of the wing nodes.
(char) NAME --Arg.: Aircraft name.
OUTPUT
(file)Aircraft_Nodes.png --Out.: Png file containing all the
aircraft nodes plot.
"""
fig = plt.figure()
mpl.rcParams.update({'font.size': 20})
ax = fig.add_subplot(111, projection = '3d')
ax.plot([fx[0]], [fy[0]], [fz[0]], c = 'g', marker = 'o',\
label = 'Fuselage nodes', markersize = 10)
ax.plot([wx[0]], [wy[0]], [wz[0]], c = 'b', marker = 'o',\
label = 'Wing nodes', markersize = 10)
s1 = ax.scatter(fx, fy, fz, c = 'g', marker = 'o',\
s = 100*np.ones((np.max(np.shape(fx)))))
s2 = ax.scatter(wx, wy, wz, c = 'b', marker = 'o',\
s = 100*np.ones((np.max(np.shape(wx)))))
ax.set_ylim3d(np.min(wy)-5, np.max(wy)+5)
ax.set_xlim3d(np.min(fx)-10, np.max(fx)+10)
ax.set_zlim3d(np.min(wz)-5, np.max(wz)+5)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
ax.legend(numpoints = 1, loc = 'upper right')
FIG_NAME = 'ToolOutput/' + NAME + '/' + NAME + '_Aircraft_Nodes.png'
fig.savefig(FIG_NAME, dpi = 500)
return()
def aircraft_nodes_bwb_plot(wx, wy, wz, NAME):
"""The function generates the plot of the aircraft nodes.
INPUT
(float_array) wx --Arg.: Array containing the x-coordinates
of the wing nodes.
(float_array) wy --Arg.: Array containing the y-coordinates
of the wing nodes.
(float_array) wz --Arg.: Array containing the z-coordinates
of the wing nodes.
(char) NAME --Arg.: Aircraft name.
OUTPUT
(file)Aircraft_Nodes.png --Out.: Png file containing all the
aircraft nodes plot.
"""
fig = plt.figure()
mpl.rcParams.update({'font.size': 20})
ax = fig.add_subplot(111, projection = '3d')
ax.plot([wx[0]], [wy[0]], [wz[0]], c = 'b', marker = 'o',\
label='Wing nodes', markersize = 10)
s = ax.scatter(wx, wy, wz, c = 'b', marker = 'o',\
s = 100*np.ones((np.max(np.shape(wx)))))
ax.set_ylim3d(np.min(wy)-5, np.max(wy)+5)
ax.set_xlim3d(np.min(wx)-10, np.max(wx)+10)
ax.set_zlim3d(np.min(wz)-5, np.max(wz)+5)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
ax.legend(numpoints = 1, loc = 'upper right')
FIG_NAME = 'ToolOutput/' + NAME + '/' + NAME + '_Aircraft_Nodes.png'
fig.savefig(FIG_NAME, dpi = 500)
return()
### AIRCRAFT CoG PLOT --------------------------------------------------------
def aircraft_cog_unc_plot(cg, bi, ed, afg, awg, NAME):
""" The function generates the plot of the unconventional aircrafy
center og gravity and the nodes used to evaluate it.
INPUT
(float_array) cg --Arg.: Center of gravity global coordinates [m].
(class) bi --Arg.: BalanceInputs class.
(class) ed --Arg.: EnineData class.
##======= Classes are defined in the Input_classes folder =======##
(class) afg --Arg.: AircraftFuseGeometry class.
(class) awg --Arg.: AircraftWingGeometry class.
##========== Classes are defined in the classes folder ==========##
(char) NAME --Arg.: Aircraft name.
OUTPUT
(file)Aircraft_Cog.png --Out.: Png file containing the center of gravity
and the nodes used to evaluate it.
"""
fig = plt.figure()
fig.patch.set_facecolor('w')
mpl.rcParams.update({'font.size': 12})
cx = cg[0]
cy = cg[1]
cz = cg[2]
ax = fig.add_subplot(111, projection = '3d')
wx = []
wy = []
wz = []
fx = afg.fuse_center_seg_point[:,:,0]
fy = afg.fuse_center_seg_point[:,:,1]
fz = afg.fuse_center_seg_point[:,:,2]
for i in range(1,awg.wing_nb+1):
for j in range(1,np.max(awg.wing_seg_nb)+1):
if awg.wing_center_seg_point[j-1,i-1,0] != 0.0:
wx.append(awg.wing_center_seg_point[j-1,i-1,0])
wy.append(awg.wing_center_seg_point[j-1,i-1,1])
wz.append(awg.wing_center_seg_point[j-1,i-1,2])
wplot1 = wx[0]
wplot2 = wy[0]
wplot3 = wz[0]
if bi.USER_EN_PLACEMENT:
ax.plot([ed.EN_PLACEMENT[0,0]],\
[ed.EN_PLACEMENT[0,1]],\
[ed.EN_PLACEMENT[0,2]],'ok',\
label='Engine nodes', markersize = 8)
ex = ed.EN_PLACEMENT[:,0]
ey = ed.EN_PLACEMENT[:,1]
ez = ed.EN_PLACEMENT[:,2]
ax.scatter([ex], [ey], [ez], c = 'k', marker ='o',\
s = 80*np.ones((np.max(np.shape(ex)))))
ax.plot([afg.fuse_center_seg_point[0,0,0]],\
[afg.fuse_center_seg_point[0,0,1]],
[afg.fuse_center_seg_point[0,0,2]],'og',\
label='Fuselage nodes', markersize = 8)
ax.plot([wplot1], [wplot2], [wplot3],'ob',label = 'Wing nodes',\
markersize = 8)
ax.plot([cx], [cy], [cz],'xr', label = 'Center of Gravity',\
markersize = 14)
s1=ax.scatter([fx], [fy], [fz], c ='g' , marker = 'o',\
s = 80*np.ones((np.max(np.shape(fx)))))
ax.scatter([wx], [wy], [wz], c = 'b', marker = 'o',\
s = 80*np.ones((np.max(np.shape(wx)))))
ax.scatter([cx], [cy], [cz], c = 'r', marker = 'x', s = 80)
ax.set_ylim(np.min(wy)-5, np.max(wy)+5)
ax.set_xlim(np.min(fx)-10, np.max(fx)+10)
ax.set_zlim3d(np.min(wz)-5, np.max(wz)+5)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.legend(loc = 'upper right', bbox_to_anchor = (0.5, 1.0),\
fancybox = True, shadow = True, ncol = 1, numpoints = 1)
FIG_NAME = 'ToolOutput/' + NAME + '/' + NAME + '_Aircraft_Cog.png'
fig.savefig(FIG_NAME, dpi = 500)
return()
def aircraft_cog_bwb_plot(cg, bi, ed, awg, NAME):
""" The function that generate the plot of the blended wing body
center of gravity and the nodes used to evaluate it.
INPUT
(float_array) cg --Arg.: Center of gravity global coordinates [m].
(class) bi --Arg.: BalanceInputs class.
(class) ed --Arg.: EngineInputs class.
(class) awg --Arg.: AircraftWingGeometry class.
##======= Class is defined in the InputClasses folder =======##
(char) NAME --Arg.: Aircraft name.
OUTPUT
(file)Aircraft_Cog.png --Out.: Png file containing the center of gravity.
"""
fig = plt.figure()
fig.patch.set_facecolor('w')
mpl.rcParams.update({'font.size': 12})
cx = cg[0]
cy = cg[1]
cz = cg[2]
ax = fig.add_subplot(111, projection = '3d')
wx = []
wy = []
wz = []
for i in range(1,awg.wing_nb+1):
for j in range(1,np.max(awg.wing_seg_nb)+1):
if awg.wing_center_seg_point[j-1,i-1,0] != 0.0:
wx.append(awg.wing_center_seg_point[j-1,i-1,0])
wy.append(awg.wing_center_seg_point[j-1,i-1,1])
wz.append(awg.wing_center_seg_point[j-1,i-1,2])
wplot1 = wx[0]
wplot2 = wy[0]
wplot3 = wz[0]
if bi.USER_EN_PLACEMENT:
ax.plot([ed.EN_PLACEMENT[0,0]],\
[ed.EN_PLACEMENT[0,1]],\
[ed.EN_PLACEMENT[0,2]],'ok',\
label = 'Engine nodes', markersize = 8)
ex = ed.EN_PLACEMENT[:,0]
ey = ed.EN_PLACEMENT[:,1]
ez = ed.EN_PLACEMENT[:,2]
ax.scatter([ex], [ey], [ez], c = 'k', marker = 'o',\
s = 80*np.ones((np.max(np.shape(ex)))))
ax.plot([wplot1], [wplot2], [wplot3], 'ob',label = 'Wing nodes',\
markersize =10)
ax.plot([cx], [cy], [cz], 'xr', label = 'Center of Gravity',\
markersize = 10)
ax.scatter([wx], [wy], [wz], c = 'b', marker = 'o',\
s = 80*np.ones((np.max(np.shape(wx)))))
ax.scatter([cx], [cy], [cz], c = 'r', marker = 'x', s = 80)
ax.set_ylim3d(np.min(wy)-5, np.max(wy)+5)
ax.set_xlim3d(np.min(wx)-10, np.max(wx)+10)
ax.set_zlim3d(np.min(wz)-5, np.max(wz)+5)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.legend(loc = 'upper right', bbox_to_anchor = (1.1, 1.1),\
fancybox = True, shadow = True, ncol = 1, numpoints = 1)
FIG_NAME = 'ToolOutput/' + NAME + '/' + NAME + '_Aircraft_Cog.png'
fig.savefig(FIG_NAME, dpi = 500)
return()
#=============================================================================
# MAIN
#=============================================================================
if __name__ == '__main__':
log.warning('###########################################################')
log.warning('# ERROR NOT A STANDALONE PROGRAM, RUN Balance_unc_main.py #')
log.warning('###########################################################')
```
#### File: func/Inertia/lumpedmassesinertia.py
```python
import numpy as np
import math
from ceasiompy.utils.ceasiomlogger import get_logger
from ceasiompy.utils.cpacsfunctions import open_tixi, open_tigl, close_tixi
log = get_logger(__file__.split('.')[0])
#=============================================================================
# CLASSES
#=============================================================================
"""All classes are defined inside the classes and into
the InputClasses/Unconventional folder."""
#=============================================================================
# FUNCTIONS
#=============================================================================
def fuselage_inertia(SPACING, center_of_gravity, mass_seg_i, afg,\
cpacs_in):
"""Thefunction evaluates the inertia of the fuselage using the lumped
masses method.
INPUT
(float) SPACING --Arg.: Maximum distance between fuselage nodes [m].
(float_array) center_of_gravity --Arg.: x,y,z coordinates of the CoG.
(float_array) mass_seg_i --Arg.: Mass of each segment of each
component of the aircraft.
(class) afg --Arg.: AircraftFuseGeometry class.
##======= Class is defined in the InputClasses folder =======##
(char) cpacs_in --Arg.: Cpacs xml file location.
OUTPUT
(float) sfx --Out.: Lumped nodes x-coordinate [m].
(float) sfy --Out.: Lumped nodes y-coordinate [m].
(float) sfz --Out.: Lumped nodes z-coordinate [m].
(float) Ixx --Out.: Moment of inertia respect to the x-axis [kgm^2].
(float) Iyy --Out.: Moment of inertia respect to the y-axis [kgm^].
(float) Izz --Out.: Moment of inertia respect to the z-axis [kgm^2].
"""
tixi = open_tixi(cpacs_in)
tigl = open_tigl(tixi)
sfx = []
sfy = []
sfz = []
Ixx = 0
Iyy = 0
Izz = 0
Ixy = 0
Iyz = 0
Ixz = 0
log.info('-------------------------------------------------------------')
log.info('---- Evaluating fuselage nodes for lumped masses inertia ----')
log.info('-------------------------------------------------------------')
for f in range(1,afg.fus_nb+1):
for i in afg.f_seg_sec[:,f-1,2]:
fx = []
fy = []
fz = []
#Number of subdivisions along the longitudinal axis
subd_l = math.ceil((afg.fuse_seg_length[int(i)-1][f-1] / SPACING))
#Number of subdivisions along the perimeter
SUBD_C0 = math.ceil((afg.fuse_sec_per[int(i)-1][f-1] / SPACING))
#Number of subdivisions along the radial axis
subd_r = math.ceil(((afg.fuse_sec_width[int(i)-1][f-1]/2)/ SPACING))
if subd_l == 0:
subd_l = 1.0
if SUBD_C0 == 0:
SUBD_C0 = 1.0
if subd_r == 0:
subd_r = 1.0
eta = 1.0 / (subd_l)
zeta = 1.0 / (SUBD_C0)
D0 = np.sqrt(np.arange(subd_r*SUBD_C0) / float(subd_r*SUBD_C0))
D = np.array([t for t in (D0 - (D0[-1] - 0.98)) if not t < 0])
(xc,yc,zc)=afg.fuse_center_section_point[int(i)-1][f-1][:]
for j in range(int(subd_l) + 1):
et = j * eta
for k in range(int(SUBD_C0) + 1):
ze = k * zeta
(x0,y0,z0) = tigl.fuselageGetPoint(f, int(i), et, ze)
fx.append(x0)
fy.append(y0)
fz.append(z0)
sfx.append(x0)
sfy.append(y0)
sfz.append(z0)
if subd_r > 0.0:
deltar = np.sqrt((y0-yc)**2 + (z0-zc)**2)*D
theta = np.pi * (3 - np.sqrt(5)) * np.arange(len(D))
x = np.zeros(np.shape(deltar)) + x0
y = yc + deltar*np.cos(theta)
z = zc + deltar*np.sin(theta)
fx.extend(x)
fy.extend(y)
fz.extend(z)
sfx.extend(x)
sfy.extend(y)
sfz.extend(z)
M = mass_seg_i[int(i)-1,f-1]/np.max(np.shape(fx))
fcx = (fx-(np.zeros((np.shape(fx))) + center_of_gravity[0]))
fcy = (fy-(np.zeros((np.shape(fx))) + center_of_gravity[1]))
fcz = (fz-(np.zeros((np.shape(fx))) + center_of_gravity[2]))
Ixx += np.sum(M * np.add(fcy**2, fcz**2))
Iyy += np.sum(M * np.add(fcx**2, fcz**2))
Izz += np.sum(M * np.add(fcx**2, fcy**2))
Ixy += np.sum(M * fcx * fcy)
Iyz += np.sum(M * fcy * fcz)
Ixz += np.sum(M * fcx * fcz)
return(sfx, sfy, sfz, Ixx, Iyy, Izz, Ixy, Iyz, Ixz)
###==================================== WINGS ===============================##
def wing_inertia(subd_c, SPACING, fuse, center_of_gravity,
mass_seg_i, awg, cpacs_in):
"""The function evaluates the inertia of the wings using the lumped
masses method.
INPUT
(float) subd_c --Arg.: Number of subdivisions along the perimeter
on each surface, total number of points for
each section subd_c * 2
(float) SPACING --Arg.: Maximum distance between wing nodes along
the span [m].
(float) fuse --Arg.: Number of fuselages.
(float_array) center_of_gravity --Arg.: x,y,z coordinates of the CoG.
(float_array) mass_seg_i --Arg.: Mass of each segment of each
component of the aircraft.
(class) awg --Arg.: AircraftWingGeometry class.
##======= Class is defined in the InputClasses folder =======##
(char) cpacs_in --Arg.: Cpacs xml file location.
OUTPUT
(float) swx --Out.: Lumped nodes x-coordinate [m].
(float) swy --Out.: Lumped nodes y-coordinate [m].
(float) swz --Out.: Lumped nodes z-coordinate [m].
(float) Ixx --Out.: Moment of inertia respect to the x-axis [kgm^2].
(float) Iyy --Out.: Moment of inertia respect to the y-axis [kgm^].
(float) Izz --Out.: Moment of inertia respect to the z-axis [kgm^2].
"""
tixi = open_tixi(cpacs_in)
tigl = open_tigl(tixi)
log.info('-------------------------------------------------------------')
log.info('------ Evaluating wing nodes for lumped masses inertia ------')
log.info('-------------------------------------------------------------')
Ixx = 0
Iyy = 0
Izz = 0
Ixy = 0
Iyz = 0
Ixz = 0
swx = []
swy = []
swz = []
a = 0
for w in range(1,awg.w_nb+1):
DEN = 0.0
for d in range(int(subd_c+2)):
DEN = DEN + d
zeta = 1.0/DEN
for i in awg.w_seg_sec[:,w-1,2]:
if i == 0.0:
break
wx = []
wy = []
wz = []
#Number of subdivisions along the longitudinal axis
subd_l = math.ceil((awg.wing_seg_length[int(i)-1][w+a-1]/SPACING))
if subd_l == 0:
subd_l = 1
eta = 1.0/subd_l
et = 0.0
(xc,yc,zc) = awg.wing_center_seg_point[int(i)-1][w+a-1][:]
for j in range(int(subd_l)+1):
et = j * eta
(xle,yle,zle) = tigl.wingGetLowerPoint(w,int(i),et,0.0)
(xle2,yle2,zle2) = tigl.wingGetLowerPoint(w,int(i),et,1.0)
if xle < xle2:
ZLE = 0.0
ze = 0.0
else:
ZLE = 1.0
ze = 1.0
wx.extend((xle,xle2))
wy.extend((yle,yle2))
wz.extend((zle,zle2))
swx.extend((xle,xle2))
swy.extend((yle,yle2))
swz.extend((zle,zle2))
for k in range(int(subd_c) + 1):
if ZLE == 0.0:
ze += float(k)*zeta
elif ZLE == 1.0:
ze -= float(k)*zeta
(xl,yl,zl) = tigl.wingGetLowerPoint(w,int(i),et,ze)
(xu,yu,zu) = tigl.wingGetUpperPoint(w,int(i),et,ze)
wx.extend((xl,xu))
wy.extend((yl,yu))
wz.extend((zl,zu))
swx.extend((xl,xu))
swy.extend((yl,yu))
swz.extend((zl,zu))
M = mass_seg_i[int(i)-1,fuse+w+a-1]/np.max(np.shape(wx))
wcx = (wx-(np.zeros((np.shape(wx))) + center_of_gravity[0]))
wcy = (wy-(np.zeros((np.shape(wy))) + center_of_gravity[1]))
wcz = (wz-(np.zeros((np.shape(wz))) + center_of_gravity[2]))
Ixx += np.sum(M * np.add(wcy**2, wcz**2))
Iyy += np.sum(M * np.add(wcx**2, wcz**2))
Izz += np.sum(M * np.add(wcx**2, wcy**2))
Ixy += np.sum(M * wcx * wcy)
Iyz += np.sum(M * wcy * wcz)
Ixz += np.sum(M * wcx * wcz)
if awg.wing_sym[int(w)-1] != 0:
if awg.wing_sym[int(w)-1] == 1: # x-y plane
symy = 1 + np.zeros(np.shape(wy))
symx = 1 + np.zeros(np.shape(wx))
symz = -1 + np.zeros(np.shape(wz))
elif awg.wing_sym[int(w)-1] == 2: # x-z plane
symy = -1 + np.zeros(np.shape(wy))
symx = 1 + np.zeros(np.shape(wx))
symz = 1 + np.zeros(np.shape(wz))
elif awg.wing_sym[int(w)-1] == 3: # y-z plane
symy = 1 + np.zeros(np.shape(wy))
symx = -1 + np.zeros(np.shape(wx))
symz = 1 + np.zeros(np.shape(wz))
wx_t = wx * symx
wy_t = wy * symy
wz_t = wz * symz
[swx.append(x) for x in wx_t]
[swy.append(y) for y in wy_t]
[swz.append(z) for z in wz_t]
M = mass_seg_i[int(i)-1,fuse+w+a-1]/np.max(np.shape(wx_t))
wcx_t=(wx_t-(np.zeros((np.shape(wx_t))) + center_of_gravity[0]))
wcy_t=(wy_t-(np.zeros((np.shape(wy_t))) + center_of_gravity[1]))
wcz_t=(wz_t-(np.zeros((np.shape(wz_t))) + center_of_gravity[2]))
Ixx += np.sum(M * np.add(wcy_t**2,wcz_t**2))
Iyy += np.sum(M * np.add(wcx_t**2,wcz_t**2))
Izz += np.sum(M * np.add(wcx_t**2,wcy_t**2))
Ixy += np.sum(M * wcx_t * wcy_t)
Iyz += np.sum(M * wcy_t * wcz_t)
Ixz += np.sum(M * wcx_t * wcz_t)
if awg.wing_sym[int(w) - 1] != 0:
a += 1
return(swx, swy, swz, Ixx, Iyy, Izz, Ixy, Iyz, Ixz)
#=============================================================================
def engine_inertia(center_of_gravity, EngineData):
"""The function evaluates the inertia of the fuselage using the lumped
masses method.
INPUT
(float_array) center_of_gravity --Arg.: x,y,z coordinates of the CoG.
(class) EngineData --Arg.: EngineData class.
##======= Class is defined in the InputClasses folder =======##
OUTPUT
(float) Ixx --Out.: Moment of inertia respect to the x-axis [kgm^2].
(float) Iyy --Out.: Moment of inertia respect to the y-axis [kgm^].
(float) Izz --Out.: Moment of inertia respect to the z-axis [kgm^2].
"""
Ixx = 0
Iyy = 0
Izz = 0
Ixy = 0
Iyz = 0
Ixz = 0
for e in range(0,EngineData.NE):
cx = EngineData.EN_PLACEMENT[e,0] - center_of_gravity[0]
cy = EngineData.EN_PLACEMENT[e,1] - center_of_gravity[1]
cz = EngineData.EN_PLACEMENT[e,2] - center_of_gravity[2]
Ixx += EngineData.en_mass * np.add(cy**2,cz**2)
Iyy += EngineData.en_mass * np.add(cx**2,cz**2)
Izz += EngineData.en_mass * np.add(cx**2,cy**2)
Ixy += EngineData.en_mass * cx * cy
Iyz += EngineData.en_mass * cy * cz
Ixz += EngineData.en_mass * cx * cx
return(Ixx, Iyy, Izz, Ixy, Iyz, Ixz)
#==============================================================================
# MAIN
#==============================================================================
if __name__ == '__main__':
log.warning('#########################################################')
log.warning('# ERROR NOT A STANDALONE PROGRAM, RUN Balanceuncmain.py #')
log.warning('#########################################################')
```
#### File: func/Inertia/uncinertia.py
```python
from . import lumpedmassesinertia
from ceasiompy.utils.ceasiomlogger import get_logger
from ceasiompy.utils import cpacsfunctions as cpf
log = get_logger(__file__.split('.')[0])
#=============================================================================
# CLASSES
#=============================================================================
"""All classes are defined inside the classes and into
the InputClasses/Unconventional folder."""
#=============================================================================
# FUNCTIONS
#=============================================================================
def check_rounding(I, I2):
"""Evaluation of the rounding digit for the inertia evaluation
ARGUMENTS
(float) I --Arg.: Yaw moment of inertia with Max Payload.
(float) I --Arg.: Ixy moment of inertia with Max Payload.
RETURN
(int) rd --Out.: Number of rounded digits.
"""
ex = False
rd = 0
rd2 = 0
while not ex:
if round(I,rd) == 0:
ex = True
else:
rd -= 1
if round(I2,rd2) != 0:
rd2 -= 1
rd += 5
if rd2 > rd:
rd = rd2
return(rd)
def unc_inertia_eval(awg, afg, bout, bi, mw, ed, out_xml):
""" Unconventional aircraft Moment of Inertia analysis main function.
It dvides the cases defined and evaluates them calling the
function in the with_fuse_geom subfolder.
Source: An introduction to mechanics, 2nd ed., <NAME>
and <NAME>, Cambridge University Press.
ARGUMENTS
(class) awg --Arg.: AircraftWingGeometry class.
(class) afg --Arg.: AircraftFuseGeometry class.
(class) bout --Arg.: BalanceOutputs class.
(class) bi --Arg.: BalanceInputs class.
(class) mw --Arg.: MassesWeights class.
(class) ed --Arg.: EngineData class.
##======= Classes are defined in the InputClasses folder =======##
RETURN
(float_array) fx --Out.: Array containing the x-coordinates
of the fuselage nodes.
(float_array) fy --Out.: Array containing the y-coordinates
of the fuselage nodes.
(float_array) fz --Out.: Array containing the z-coordinates
of the fuselage nodes.
(float_array) wx --Out.: Array containing the x-coordinates
of the wing nodes.
(float_array) wy --Out.: Array containing the y-coordinates
of the wing nodes.
(float_array) wz --Out.: Array containing the z-coordinates
of the wing nodes.
(class) bout --Out.: Updated BalanceOutputs class.
"""
center_of_gravity_seg = []
mass_component = []
log.info('---------- Inertia Evaluation ---------')
if bi.USER_EN_PLACEMENT:
(bout.Ixxen, bout.Iyyen, bout.Izzen, bout.Ixyen, bout.Iyzen,\
bout.Ixzen) = lumpedmassesinertia.engine_inertia(\
bout.center_of_gravity, ed)
else:
(bout.Ixxen, bout.Iyyen, bout.Izzen, bout.Ixyen, bout.Iyzen,\
bout.Ixzen) = (0, 0, 0, 0, 0, 0)
# Max Payload Configuration
log.info('------------ Lumped mass Inertia ------------')
log.info('--------- Max Payload configuration ---------')
(fx, fy, fz, Ixxf, Iyyf, Izzf, Ixyf, Iyzf, Ixzf)\
= lumpedmassesinertia.fuselage_inertia(\
bi.SPACING_FUSE, bout.center_of_gravity, mw.mass_seg_i,\
afg, out_xml)
(wx, wy, wz, Ixxw, Iyyw, Izzw, Ixyw, Iyzw, Ixzw)\
= lumpedmassesinertia.wing_inertia(\
bi.WPP, bi.SPACING_WING, afg.fuse_nb, bout.center_of_gravity,\
mw.mass_seg_i, awg, out_xml)
rd = check_rounding(Izzf + Izzw + bout.Izzen,\
Ixyf + Ixyw + bout.Ixyen)
bout.Ixx_lump = round(Ixxf + Ixxw + bout.Ixxen,rd)
bout.Iyy_lump = round(Iyyf + Iyyw + bout.Iyyen,rd)
bout.Izz_lump = round(Izzf + Izzw + bout.Izzen,rd)
bout.Ixy_lump = round(Ixyf + Ixyw + bout.Ixyen,rd)
bout.Iyz_lump = round(Iyzf + Iyzw + bout.Iyzen,rd)
bout.Ixz_lump = round(Ixzf + Ixzw + bout.Ixzen,rd)
# Zero Fuel Configuration
log.info('---------- Zero Fuel configuration ----------')
(fx, fy, fz, Ixxf2, Iyyf2, Izzf2, Ixyf2, Iyzf2, Ixzf2)\
= lumpedmassesinertia.fuselage_inertia(bi.SPACING_FUSE,\
bout.cg_zfm, mw.ms_zfm, afg, out_xml)
(wx, wy, wz, Ixxw2, Iyyw2, Izzw2, Ixyw2, Iyzw2, Ixzw2)\
= lumpedmassesinertia.wing_inertia(bi.WPP, bi.SPACING_WING,\
afg.fuse_nb, bout.cg_zfm, mw.ms_zfm, awg, out_xml)
bout.Ixx_lump_zfm = round(Ixxf2 + Ixxw2 + bout.Ixxen,rd)
bout.Iyy_lump_zfm = round(Iyyf2 + Iyyw2 + bout.Iyyen,rd)
bout.Izz_lump_zfm = round(Izzf2 + Izzw2 + bout.Izzen,rd)
bout.Ixy_lump_zfm = round(Ixyf2 + Ixyw2 + bout.Ixyen,rd)
bout.Iyz_lump_zfm = round(Iyzf2 + Iyzw2 + bout.Iyzen,rd)
bout.Ixz_lump_zfm = round(Ixzf2 + Ixzw2 + bout.Ixzen,rd)
# Zero Payload Configuration
log.info('--------- Zero Payload configuration --------')
(fx, fy, fz, Ixxf3, Iyyf3, Izzf3, Ixyf3, Iyzf3, Ixzf3)\
= lumpedmassesinertia.fuselage_inertia(bi.SPACING_FUSE,\
bout.cg_zpm, mw.ms_zpm, afg, out_xml)
(wx, wy, wz, Ixxw3, Iyyw3, Izzw3, Ixyw3, Iyzw3, Ixzw3)\
= lumpedmassesinertia.wing_inertia(bi.WPP, bi.SPACING_WING,\
afg.fuse_nb, bout.cg_zpm, mw.ms_zpm, awg, out_xml)
bout.Ixx_lump_zpm = round(Ixxf3 + Ixxw3 + bout.Ixxen,rd)
bout.Iyy_lump_zpm = round(Iyyf3 + Iyyw3 + bout.Iyyen,rd)
bout.Izz_lump_zpm = round(Izzf3 + Izzw3 + bout.Izzen,rd)
bout.Ixy_lump_zpm = round(Ixyf3 + Ixyw3 + bout.Ixyen,rd)
bout.Iyz_lump_zpm = round(Iyzf3 + Iyzw3 + bout.Iyzen,rd)
bout.Ixz_lump_zpm = round(Ixzf3 + Ixzw3 + bout.Ixzen,rd)
# OEM Configuration
log.info('------------- OEM configuration -------------')
(fx, fy, fz, Ixxf4, Iyyf4, Izzf4, Ixyf4, Iyzf4, Ixzf4)\
= lumpedmassesinertia.fuselage_inertia(bi.SPACING_FUSE,\
bout.cg_oem, mw.ms_oem, afg, out_xml)
(wx, wy, wz, Ixxw4, Iyyw4, Izzw4, Ixyw4, Iyzw4, Ixzw4)\
= lumpedmassesinertia.wing_inertia(bi.WPP, bi.SPACING_WING,\
afg.fuse_nb, bout.cg_oem, mw.ms_oem, awg, out_xml)
bout.Ixx_lump_oem = round(Ixxf4 + Ixxw4 + bout.Ixxen,rd)
bout.Iyy_lump_oem = round(Iyyf4 + Iyyw4 + bout.Iyyen,rd)
bout.Izz_lump_oem = round(Izzf4 + Izzw4 + bout.Izzen,rd)
bout.Ixy_lump_oem = round(Ixyf4 + Ixyw4 + bout.Ixyen,rd)
bout.Iyz_lump_oem = round(Iyzf4 + Iyzw4 + bout.Iyzen,rd)
bout.Ixz_lump_oem = round(Ixzf4 + Ixzw4 + bout.Ixzen,rd)
# User Configuration
if bi.USER_CASE:
log.info('------------- User configuration ------------')
(fx, fy, fz, Ixxfu, Iyyfu, Izzfu, Ixyfu, Iyzfu, Ixzfu)\
= lumpedmassesinertia.fuselage_inertia(bi.SPACING_FUSE,\
bout.cg_user, mw.ms_user, afg, out_xml)
(wx, wy, wz, Ixxwu, Iyywu, Izzwu, Ixywu, Iyzwu, Ixzwu)\
= lumpedmassesinertia.wing_inertia(bi.WPP, bi.SPACING_WING,\
afg.fuse_nb, bout.cg_user, mw.ms_user, awg, out_xml)
bout.Ixx_lump_user = round(Ixxfu + Ixxwu + bout.Ixxen,rd)
bout.Iyy_lump_user = round(Iyyfu + Iyywu + bout.Iyyen,rd)
bout.Izz_lump_user = round(Izzfu + Izzwu + bout.Izzen,rd)
bout.Ixy_lump_user = round(Ixyfu + Ixywu + bout.Ixyen,rd)
bout.Iyz_lump_user = round(Iyzfu + Iyzwu + bout.Iyzen,rd)
bout.Ixz_lump_user = round(Ixzfu + Ixzwu + bout.Ixzen,rd)
bout.Ixxen = round(bout.Ixxen, rd)
bout.Iyyen = round(bout.Iyyen, rd)
bout.Izzen = round(bout.Izzen, rd)
bout.Ixyen = round(bout.Ixyen, rd)
bout.Iyzen = round(bout.Iyzen, rd)
bout.Ixzen = round(bout.Ixzen, rd)
return(bout, fx, fy, fz, wx, wy, wz)
#=============================================================================
def bwb_inertia_eval(awg, bout, bi, mw, ed, out_xml):
""" Blended wing Body aircraft Moment of Inertia analysis main function.
It dvides the cases defined and evaluates them calling the
function in the no_fuse_geom subfolder.
Source: An introduction to mechanics, 2nd ed., <NAME>
and <NAME>, Cambridge University Press.
ARGUMENTS
(class) awg --Arg.: AircraftWingGeometry class.
(class) bout --Arg.: BalanceOutputs class.
(class) bi --Arg.: BalanceInputs class.
(class) mw --Arg.: MassesWeights class.
(class) ed --Arg.: EnfineData class.
##======= Classes are defined in the InputClasses folder =======##
RETURN
(float_array) wx --Out.: Array containing the x-coordinates
of the wing nodes.
(float_array) wy --Out.: Array containing the y-coordinates
of the wing nodes.
(float_array) wz --Out.: Array containing the z-coordinates
of the wing nodes.
(class) bout --Out.: Updated BalanceOutputs class.
"""
center_of_gravity_seg = []
mass_component = []
log.info('---------- Inertia Evaluation ---------')
if bi.USER_EN_PLACEMENT:
(bout.Ixxen, bout.Iyyen, bout.Izzen, bout.Ixyen, bout.Iyzen,\
bout.Ixzen) = lumpedmassesinertia.engine_inertia(\
bout.center_of_gravity, ed)
else:
(bout.Ixxen, bout.Iyyen, bout.Izzen, bout.Ixyen, bout.Iyzen,\
bout.Ixzen) = (0, 0, 0, 0, 0, 0)
# Max payload confiuration
(wx, wy, wz, Ixxw, Iyyw, Izzw, Ixyw, Iyzw, Ixzw)\
= lumpedmassesinertia.wing_inertia(bi.WPP, bi.SPACING_WING, 0,\
bout.center_of_gravity, mw.mass_seg_i, awg, out_xml)
rd = check_rounding(Izzw + bout.Izzen,\
Ixyw + bout.Ixyen)
bout.Ixx_lump = round(Ixxw + bout.Ixxen,rd)
bout.Iyy_lump = round(Iyyw + bout.Iyyen,rd)
bout.Izz_lump = round(Izzw + bout.Izzen,rd)
bout.Ixy_lump = round(Ixyw + bout.Ixyen,rd)
bout.Iyz_lump = round(Iyzw + bout.Iyzen,rd)
bout.Ixz_lump = round(Ixzw + bout.Ixzen,rd)
# Zero Fuel Configuration
(wx, wy, wz, Ixxw2, Iyyw2, Izzw2, Ixyw2, Iyzw2, Ixzw2)\
= lumpedmassesinertia.wing_inertia(bi.WPP, bi.SPACING_WING, 0,\
bout.cg_zfm, mw.ms_zfm, awg, out_xml)
bout.Ixx_lump_zfm = round(Ixxw2 + bout.Ixxen,rd)
bout.Iyy_lump_zfm = round(Iyyw2 + bout.Iyyen,rd)
bout.Izz_lump_zfm = round(Izzw2 + bout.Izzen,rd)
bout.Ixy_lump_zfm = round(Ixyw2 + bout.Ixyen,rd)
bout.Iyz_lump_zfm = round(Iyzw2 + bout.Iyzen,rd)
bout.Ixz_lump_zfm = round(Ixzw2 + bout.Ixzen,rd)
# Zero Payload Configuration
(wx, wy, wz, Ixxw3, Iyyw3, Izzw3, Ixyw3, Iyzw3, Ixzw3)\
= lumpedmassesinertia.wing_inertia(bi.WPP, bi.SPACING_WING, 0,\
bout.cg_zpm, mw.ms_zpm, awg, out_xml)
bout.Ixx_lump_zpm = round(Ixxw3 + bout.Ixxen,rd)
bout.Iyy_lump_zpm = round(Iyyw3 + bout.Iyyen,rd)
bout.Izz_lump_zpm = round(Izzw3 + bout.Izzen,rd)
bout.Ixy_lump_zpm = round(Ixyw3 + bout.Ixyen,rd)
bout.Iyz_lump_zpm = round(Iyzw3 + bout.Iyzen,rd)
bout.Ixz_lump_zpm = round(Ixzw3 + bout.Ixzen,rd)
# OEM configuration
(wx, wy, wz, Ixxw4, Iyyw4, Izzw4, Ixyw4, Iyzw4, Ixzw4)\
= lumpedmassesinertia.wing_inertia(bi.WPP, bi.SPACING_WING, 0,\
bout.cg_oem, mw.ms_oem, awg, out_xml)
bout.Ixx_lump_oem = round(Ixxw4 + bout.Ixxen,rd)
bout.Iyy_lump_oem = round(Iyyw4 + bout.Iyyen,rd)
bout.Izz_lump_oem = round(Izzw4 + bout.Izzen,rd)
bout.Ixy_lump_oem = round(Ixyw4 + bout.Ixyen,rd)
bout.Iyz_lump_oem = round(Iyzw4 + bout.Iyzen,rd)
bout.Ixz_lump_oem = round(Ixzw4 + bout.Ixzen,rd)
# User configuration
if bi.USER_CASE:
(wx, wy, wz, Ixxwu, Iyywu, Izzwu, Ixywu, Iyzwu, Ixzwu)\
= lumpedmassesinertia.wing_inertia(bi.WPP, bi.SPACING_WING,\
0, bout.cg_user, mw.ms_user, awg, out_xml)
bout.Ixx_lump_user = round(Ixxwu + bout.Ixxen,rd)
bout.Iyy_lump_user = round(Iyywu + bout.Iyyen,rd)
bout.Izz_lump_user = round(Izzwu + bout.Izzen,rd)
bout.Ixy_lump_user = round(Ixywu + bout.Ixyen,rd)
bout.Iyz_lump_user = round(Iyzwu + bout.Iyzen,rd)
bout.Ixz_lump_user = round(Ixzwu + bout.Ixzen,rd)
bout.Ixxen = round(bout.Ixxen, rd)
bout.Iyyen = round(bout.Iyyen, rd)
bout.Izzen = round(bout.Izzen, rd)
bout.Ixyen = round(bout.Ixyen, rd)
bout.Iyzen = round(bout.Iyzen, rd)
bout.Ixzen = round(bout.Ixzen, rd)
return(bout, wx, wy, wz)
#=============================================================================
# MAIN
#=============================================================================
if __name__ == '__main__':
log.warning('#########################################################')
log.warning('# ERROR NOT A STANDALONE PROGRAM, RUN balanceuncmain.py #')
log.warning('#########################################################')
```
#### File: ModuleTemplate/func/subfunc.py
```python
from ceasiompy.utils.ceasiomlogger import get_logger
log = get_logger(__file__.split('.')[0])
#==============================================================================
# CLASSES
#==============================================================================
#==============================================================================
# FUNCTIONS
#==============================================================================
def my_subfunc(arg_a, arg_b):
""" Function to clacluate ...
Function 'my_subfunc' is a subfunction of ModuleTemplate which returns...
Source:
* Reference paper or book, with author and date
Args:
arg_a (str): Argument 1
arg_a (str): Argument 2
Returns:
new_arg (str): Output argument
.. warning::
Example of warning
"""
new_arg = arg_a + ' and ' + arg_b
return new_arg
#==============================================================================
# MAIN
#==============================================================================
if __name__ == '__main__':
log.info('Nothing to execute!')
```
#### File: func/AinFunc/getdatafromcpacs.py
```python
from ceasiompy.utils.ceasiomlogger import get_logger
from ceasiompy.utils import cpacsfunctions as cpf
from ceasiompy.utils.cpacsfunctions import open_tixi, open_tigl, close_tixi, \
add_uid, create_branch
log = get_logger(__file__.split('.')[0])
#=============================================================================
# CLASSES
#=============================================================================
"""All classes are defined inside the classes folder in the
range_output_class script and into the Input_classes/Conventional
folder inside the range_user_input.py script."""
#=============================================================================
# FUNCTIONS
#=============================================================================
def get_data(mw, ri, cpacs_in):
""" The function extracts from the xml file the required input data,
the code will use the default value when they are missing.
INPUT
(class) mw --Arg.: MassesWeight class updated
(class) ri --Arg.: RangeInput class updated
##======= Classes are defined in the Input_classes folder =======##
(char) opt --Arg.: Cpacs or input option
(char) cpacs_in --Arg.: Relative location of the xml file in the
ToolInput folder (cpacs option) or
relative location of the temp. xml file in
the ToolOutput folder (input option).
OUTPUT
(class) mw --Out.: MassesWeight class updated.
(class) ri --Out.: RangeInput class updated.
(file) cpacs_in --Out.: Updated cpasc file.
"""
log.info('CPACS file path check')
# path definition ========================================================
# Opening CPACS file
tixi = open_tixi(cpacs_in)
TSPEC_PATH = '/cpacs/toolspecific/CEASIOMpy'
W_PATH = TSPEC_PATH + '/weight'
C_PATH = W_PATH + '/crew'
P_PATH = C_PATH + '/pilots'
CC_PATH = C_PATH + '/cabinCrewMembers'
PASS_PATH = W_PATH + '/passengers'
FMP_PATH = PASS_PATH + '/fuelMassMaxpass/mass'
PROP_PATH = TSPEC_PATH + '/propulsion'
RANGE_PATH = TSPEC_PATH + '/ranges'
MASS_PATH = '/cpacs/vehicles/aircraft/model/analyses/massBreakdown'
DM_PATH = MASS_PATH + '/designMasses'
MTOM_PATH = DM_PATH + '/mTOM/mass'
F_PATH = MASS_PATH + '/fuel/massDescription/mass'
OEM_PATH = MASS_PATH + '/mOEM/massDescription/mass'
PAY_PATH = MASS_PATH + '/payload/massDescription/mass'
F1_PATH = '/cpacs/vehicles/fuels/fuel'
F2_PATH = TSPEC_PATH + '/fuels'
TSFC_PATH = PROP_PATH + '/tSFC'
create_branch(tixi, TSFC_PATH, False)
create_branch(tixi, RANGE_PATH, False)
create_branch(tixi, P_PATH, False)
create_branch(tixi, F1_PATH, False)
create_branch(tixi, F2_PATH, False)
add_uid(tixi, F1_PATH, 'kerosene')
# Compulsory path checks =================================================
if not tixi.checkElement(TSPEC_PATH):
raise Exception('Missing required toolspecific path.')
elif not tixi.checkElement(CC_PATH + '/cabinCrewMemberNb'):
raise Exception('Missing required cabinCrewMemberNb path.')
elif not tixi.checkElement(MASS_PATH):
raise Exception('Missing required massBreakdown path.')
elif not tixi.checkElement(DM_PATH):
raise Exception('Missing required designMasses path.')
elif not tixi.checkElement(MTOM_PATH):
raise Exception('Missing required mTOM/mass path.')
elif not tixi.checkElement(F_PATH):
raise Exception('Missing required fuel/massDescription/mass path.')
elif not tixi.checkElement(FMP_PATH):
raise Exception('Missing required fuelMassMaxpass/mass path.')
elif not tixi.checkElement(OEM_PATH):
raise Exception('Missing required mOEM/massDescription/mass path.')
elif not tixi.checkElement(PAY_PATH):
raise Exception('Missing required payload/massDescription/mass path.')
else:
log.info('All path correctly defined in the toolinput.xml file, '\
+ 'beginning data extracction.')
# Gathering data =========================================================
## TOOLSPECIFIC ----------------------------------------------------------
if not tixi.checkElement(RANGE_PATH + '/lDRatio'):
tixi.createElement(RANGE_PATH, 'lDRatio')
tixi.updateDoubleElement(RANGE_PATH + '/lDRatio',\
ri.LD, '%g')
else:
temp = tixi.getIntegerElement(RANGE_PATH + '/lDRatio')
if temp != ri.LD and temp > 0:
ri.LD = temp
if not tixi.checkElement(RANGE_PATH + '/cruiseSpeed'):
tixi.createElement(RANGE_PATH, 'cruiseSpeed')
tixi.updateDoubleElement(RANGE_PATH + '/cruiseSpeed',\
ri.CRUISE_SPEED, '%g')
else:
temp = tixi.getIntegerElement(RANGE_PATH + '/cruiseSpeed')
if temp != ri.CRUISE_SPEED and temp > 0:
ri.CRUISE_SPEED = temp
if not tixi.checkElement(RANGE_PATH + '/loiterTime'):
tixi.createElement(RANGE_PATH, 'loiterTime')
tixi.updateDoubleElement(RANGE_PATH + '/loiterTime',\
ri.LOITER_TIME, '%g')
else:
temp = tixi.getIntegerElement(RANGE_PATH + '/loiterTime')
if temp != ri.LOITER_TIME and temp > 0:
ri.LOITER_TIME = temp
if not tixi.checkElement(TSPEC_PATH + '/geometry/winglet'):
tixi.createElement(TSPEC_PATH + '/geometry', 'winglet')
tixi.updateIntegerElement(TSPEC_PATH + '/geometry/winglet',\
ri.WINGLET, '%i')
else:
temp = tixi.getIntegerElement(TSPEC_PATH + '/geometry/winglet')
if temp != ri.WINGLET:
ri.WINGLET = temp
if not tixi.checkElement(P_PATH + '/pilotNb'):
tixi.createElement(P_PATH, 'pilotNb')
tixi.updateIntegerElement(P_PATH + '/pilotNb',\
ri.pilot_nb, '%i')
else:
temp = tixi.getIntegerElement(P_PATH + '/pilotNb')
if temp != ri.pilot_nb and temp > 0:
ri.pilot_nb = temp
# Pilots user input data
if not tixi.checkElement(P_PATH + '/pilotMass'):
tixi.createElement(P_PATH, 'pilotMass')
tixi.updateDoubleElement(P_PATH + '/pilotMass',\
ri.MASS_PILOT, '%g')
else:
temp = tixi.getDoubleElement(P_PATH + '/pilotMass')
if temp != ri.MASS_PILOT and temp > 0:
ri.MASS_PILOT = temp
# Cabin crew user input data
if not tixi.checkElement(CC_PATH + '/cabinCrewMemberMass'):
tixi.createElement(CC_PATH, 'cabinCrewMemberMass')
tixi.updateDoubleElement(CC_PATH + '/cabinCrewMemberMass',\
ri.MASS_CABIN_CREW, '%g')
else:
temp = tixi.getDoubleElement(CC_PATH + '/cabinCrewMemberMass')
if temp != ri.MASS_CABIN_CREW and temp > 0:
ri.MASS_CABIN_CREW = temp
# Passengers input
if not tixi.checkElement(PASS_PATH + '/passMass'):
tixi.createElement(PASS_PATH, 'passMass')
tixi.updateDoubleElement(PASS_PATH + '/passMass',\
ri.MASS_PASS, '%g')
else:
temp = tixi.getDoubleElement(PASS_PATH+ '/passMass')
if temp != ri.MASS_PASS and temp > 0:
ri.MASS_PASS = temp
# Propulsion and Fuel
if not tixi.checkElement(PROP_PATH + '/turboprop'):
create_branch(tixi, PROP_PATH, False)
tixi.createElement(PROP_PATH, 'turboprop')
if ri.TURBOPROP:
tixi.updateTextElement(PROP_PATH + '/turboprop', 'True')
else:
tixi.updateTextElement(PROP_PATH + '/turboprop', 'False')
else:
temp = tixi.getTextElement(PROP_PATH + '/turboprop')
if temp == 'False':
ri.TURBOPROP = False
else:
ri.TURBOPROP = True
if not tixi.checkElement(F2_PATH + '/resFuelPerc'):
tixi.createElement(F2_PATH, 'resFuelPerc')
tixi.updateDoubleElement(F2_PATH + '/resFuelPerc',\
ri.RES_FUEL_PERC, '%g')
else:
temp = tixi.getDoubleElement(F2_PATH + '/resFuelPerc')
if temp != ri.RES_FUEL_PERC and temp > 0:
ri.RES_FUEL_PERC = temp
if not tixi.checkElement(TSFC_PATH + '/tsfcCruise'):
tixi.createElement(TSFC_PATH, 'tsfcCruise')
tixi.updateDoubleElement(TSFC_PATH + '/tsfcCruise',\
ri.TSFC_CRUISE, '%g')
else:
temp = tixi.getDoubleElement(TSFC_PATH + '/tsfcCruise')
if temp != ri.TSFC_CRUISE and temp > 0:
ri.TSFC_CRUISE = temp
if not tixi.checkElement(TSFC_PATH + '/tsfcLoiter'):
tixi.createElement(TSFC_PATH, 'tsfcLoiter')
tixi.updateDoubleElement(TSFC_PATH + '/tsfcLoiter',\
ri.TSFC_LOITER, '%g')
else:
temp = tixi.getDoubleElement(TSFC_PATH + '/tsfcLoiter')
if temp != ri.TSFC_LOITER and temp > 0:
ri.TSFC_LOITER = temp
## REQUIRED DATA =========================================================
# Cabin Crew
ri.cabin_crew_nb = tixi.getIntegerElement(CC_PATH + '/cabinCrewMemberNb')
# Fuel
mw.mass_fuel_maxpass = tixi.getDoubleElement(FMP_PATH)
## REQUIRED MASSBREAKDOWN DATA ===========================================
mw.maximum_take_off_mass = tixi.getDoubleElement(MTOM_PATH)
mw.operating_empty_mass = tixi.getDoubleElement(OEM_PATH)
mw.mass_payload = tixi.getDoubleElement(PAY_PATH)
mw.mass_fuel_max = tixi.getDoubleElement(F_PATH)
log.info('Data from CPACS file succesfully extracted')
# Saving and closing the cpacs file ======================================
tixi.saveDocument(cpacs_in)
close_tixi(tixi, cpacs_in)
return(mw, ri)
#=============================================================================
# MAIN
#=============================================================================
if __name__ == '__main__':
log.warning('##########################################################')
log.warning('#### ERROR NOT A STANDALONE PROGRAM, RUN rangemain.py ####')
log.warning('##########################################################')
```
#### File: ceasiompy/StabilityDynamic/dynamicstabilityState.py
```python
import os
import sys
import time
import math
import numpy as np
from numpy import log as ln
from numpy import linalg # For eigen values and aigen voectors
import matplotlib as mpl, cycler
import matplotlib.patheffects
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
from scipy import signal # For transfert function
import ceasiompy.utils.cpacsfunctions as cpsf
import ceasiompy.utils.apmfunctions as apmf
import ceasiompy.utils.moduleinterfaces as mi
from ceasiompy.StabilityDynamic.func_dynamic import plot_sp_level_a, plot_sp_level_b, plot_sp_level_c,\
get_unic, interpolation, get_index, trim_derivative,\
speed_derivative_at_trim, adimensionalise,\
speed_derivative_at_trim_lat, concise_derivative_longi, concise_derivative_lat,\
longi_root_identification, direc_root_identification,\
check_sign_longi, check_sign_lat,\
short_period_damping_rating, short_period_frequency_rating, cap_rating, \
phugoid_rating, roll_rating, spiral_rating, dutch_roll_rating, plot_splane,\
longi_mode_characteristic, direc_mode_characteristic, trim_condition
from ceasiompy.utils.standardatmosphere import get_atmosphere, plot_atmosphere
from ceasiompy.SkinFriction.skinfriction import get_largest_wing_dim
from ceasiompy.utils.ceasiomlogger import get_logger
log = get_logger(__file__.split('.')[0])
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
MODULE_NAME = os.path.basename(os.getcwd())
DYNAMIC_ANALYSIS_XPATH = '/cpacs/toolspecific/CEASIOMpy/stability/dynamic'
#==============================================================================
# Classes
#========================================================================
#==============================================================================
# FUNCTIONS
#==============================================================================
def dynamic_stability_analysis(cpacs_path, cpacs_out_path):
"""Function to analyse a full Aeromap
Function 'dynamic_stability_analysis' analyses longitudinal dynamic
stability and directionnal dynamic.
Args:
cpacs_path (str): Path to CPACS file
cpacs_out_path (str):Path to CPACS output file
plot (boolean): Choise to plot graph or not
Returns: (#TODO put that in the documentation)
* Adrvertisements certifying if the aircraft is stable or Not
* In case of longitudinal dynamic UNstability or unvalid test on data:
- Plot cms VS aoa for constant Alt, Mach and different aos
- Plot cms VS aoa for const alt and aos and different mach
- plot cms VS aoa for constant mach, AOS and different altitudes
* In case of directionnal dynamic UNstability or unvalid test on data:
- Pcot cml VS aos for constant Alt, Mach and different aoa
- Plot cml VS aos for const alt and aoa and different mach
- plot cml VS aos for constant mach, AOA and different altitudes
* Plot one graph of cruising angles of attack for different mach and altitudes
Make the following tests:
* Check the CPACS path
* For longitudinal dynamic stability analysis:
- If there is more than one angle of attack for a given altitude, mach, aos
- If cml values are only zeros for a given altitude, mach, aos
- If there one aoa value which is repeated for a given altitude, mach, aos
* For directionnal dynamic stability analysis:
- If there is more than one angle of sideslip for a given altitude, mach, aoa
- If cms values are only zeros for a given altitude, mach, aoa
- If there one aos value which is repeated for a given altitude, mach, aoa
"""
# XPATH definition
aeromap_uid_xpath = DYNAMIC_ANALYSIS_XPATH + '/aeroMapUid'
aircraft_class_xpath = DYNAMIC_ANALYSIS_XPATH + '/class' # Classes 1 2 3 4 small, heavy ...
aircraft_cathegory_xpath = DYNAMIC_ANALYSIS_XPATH + '/category' # flight phase A B C
selected_mass_config_xpath = DYNAMIC_ANALYSIS_XPATH + '/massConfiguration'
longi_analysis_xpath = DYNAMIC_ANALYSIS_XPATH + '/instabilityModes/longitudinal'
direc_analysis_xpath = DYNAMIC_ANALYSIS_XPATH + '/instabilityModes/lateralDirectional'
show_plot_xpath = DYNAMIC_ANALYSIS_XPATH + '/showPlots'
save_plot_xpath = DYNAMIC_ANALYSIS_XPATH + '/savePlots'
model_xpath = '/cpacs/vehicles/aircraft/model'
ref_area_xpath = model_xpath + '/reference/area'
ref_length_xpath = model_xpath + '/reference/length'
flight_qualities_case_xpath = model_xpath + '/analyses/flyingQualities/fqCase'
masses_location_xpath = model_xpath + '/analyses/massBreakdown/designMasses'
# aircraft_class_xpath = flight_qualities_case_xpath + '/class' # Classes 1 2 3 4 small, heavy ...
# aircraft_cathegory_xpath = flight_qualities_case_xpath + '/cathegory' # flight phase A B C
# Ask user flight path angles : gamma_e
thrust_available = None # Thrust data are not available
flight_path_angle_deg = [0] # [-15,-10,-5,0,5,10,15] # The user should have the choice to select them !!!!!!!!!!!!!!!!!!!!
flight_path_angle = [angle *(np.pi/180) for angle in flight_path_angle_deg] # flight_path_angle in [rad]
tixi = cpsf.open_tixi(cpacs_path)
# Get aeromap uid
aeromap_uid = cpsf.get_value(tixi, aeromap_uid_xpath )
log.info('The following aeroMap will be analysed: ' + aeromap_uid)
# Mass configuration: (Maximum landing mass, Maximum ramp mass (the maximum weight authorised for the ground handling), Take off mass, Zero Fuel mass)
mass_config = cpsf.get_value(tixi, selected_mass_config_xpath)
log.info('The aircraft mass configuration used for analysis is: ' + mass_config)
# Analyses to do : longitudinal / Lateral-Directional
longitudinal_analysis = cpsf.get_value(tixi,longi_analysis_xpath)
lateral_directional_analysis = False
# lateral_directional_analysis = cpsf.get_value(tixi, direc_analysis_xpath )
# Plots configuration with Setting GUI
show_plots = cpsf.get_value_or_default(tixi,show_plot_xpath,False)
save_plots = cpsf.get_value_or_default(tixi,save_plot_xpath,False)
mass_config_xpath = masses_location_xpath + '/' + mass_config
if tixi.checkElement(mass_config_xpath):
mass_xpath = mass_config_xpath + '/mass'
I_xx_xpath = mass_config_xpath + '/massInertia/Jxx'
I_yy_xpath = mass_config_xpath + '/massInertia/Jyy'
I_zz_xpath = mass_config_xpath + '/massInertia/Jzz'
I_xz_xpath = mass_config_xpath + '/massInertia/Jxz'
else :
raise ValueError('The mass configuration : {} is not defined in the CPACS file !!!'.format(mass_config))
s = cpsf.get_value(tixi,ref_area_xpath) # Wing area : s for non-dimonsionalisation of aero data.
mac = cpsf.get_value(tixi,ref_length_xpath) # ref length for non dimensionalisation, Mean aerodynamic chord: mac,
# TODO: check that
b= s/mac
# TODO: find a way to get that
xh = 10 # distance Aircaft cg-ac_horizontal-tail-plane.
m = cpsf.get_value(tixi,mass_xpath) # aircraft mass dimensional
I_xx = cpsf.get_value(tixi,I_xx_xpath) # X inertia dimensional
I_yy = cpsf.get_value(tixi,I_yy_xpath) # Y inertia dimensional
I_zz = cpsf.get_value(tixi,I_zz_xpath) # Z inertia dimensional
I_xz = cpsf.get_value(tixi,I_xz_xpath) # XZ inertia dimensional
aircraft_class = cpsf.get_value(tixi,aircraft_class_xpath ) # aircraft class 1 2 3 4
flight_phase = cpsf.get_string_vector(tixi, aircraft_cathegory_xpath)[0] # Flight phase A B C
Coeffs = apmf.get_aeromap(tixi,aeromap_uid) # Warning: Empty uID found! This might lead to unknown errors!
alt_list = Coeffs.alt
mach_list = Coeffs.mach
aoa_list = Coeffs.aoa
aos_list = Coeffs.aos
cl_list = Coeffs.cl
cd_list = Coeffs.cd
cs_list = Coeffs.cs
cml_list = Coeffs.cml
cms_list = Coeffs.cms
cmd_list = Coeffs.cmd
dcsdrstar_list = Coeffs.dcsdrstar
dcsdpstar_list = Coeffs.dcsdpstar
dcldqstar_list = Coeffs.dcldqstar
dcmsdqstar_list = Coeffs.dcmsdqstar
dcddqstar_list = Coeffs.dcddqstar
dcmldqstar_list = Coeffs.dcmldqstar
dcmddpstar_list = Coeffs.dcmddpstar
dcmldpstar_list = Coeffs.dcmldpstar
dcmldrstar_list = Coeffs.dcmldrstar
dcmddrstar_list = Coeffs.dcmddrstar
# All different vallues with only one occurence
alt_unic = get_unic(alt_list)
mach_unic = get_unic(mach_list)
aos_unic = get_unic(aos_list)
aoa_unic = get_unic(aoa_list)
# TODO get from CPACS
incrementalMap = False
for alt in alt_unic:
idx_alt = [i for i in range(len(alt_list)) if alt_list[i] == alt]
Atm = get_atmosphere(alt)
g = Atm.grav
a = Atm.sos
rho = Atm.dens
for mach in mach_unic:
print('Mach : ' , mach)
idx_mach = [i for i in range(len(mach_list)) if mach_list[i] == mach]
u0,m_adim,i_xx,i_yy,i_zz,i_xz = adimensionalise(a,mach,rho,s,b,mac,m,I_xx,I_yy,I_zz,I_xz) # u0 is V0 in Cook
# Hyp: trim condition when: ( beta = 0 and dCm/dalpha = 0) OR ( aos=0 and dcms/daoa = 0 )
if 0 not in aos_unic :
log.warning('The aircraft can not be trimmed (requiring symetric flight condition) as beta never equal to 0 for Alt = {}, mach = {}'.format(alt,mach))
else:
idx_aos = [i for i in range(len(aos_list)) if aos_list[i] == 0]
find_index = get_index(idx_alt, idx_mach, idx_aos)
# If there is only one data at (alt, mach, aos) then dont make stability anlysis
if len(find_index) <= 1:
log.warning('Not enough data at : Alt = {} , mach = {}, aos = 0, can not perform stability analysis'.format(alt,mach))
# If there is at leat 2 data at (alt, mach, aos) then, make stability anlysis
else:
# Calculate trim conditions
cms = []
aoa = []
cl = []
for index in find_index:
cms.append(cms_list[index])
aoa.append(aoa_list[index]*np.pi/180)
cl.append(cl_list[index])
cl_required = (m*g)/(0.5*rho*u0**2*s)
(trim_aoa , idx_trim_before, idx_trim_after, ratio) = trim_condition(alt, mach, cl_required, cl, aoa,)
if trim_aoa:
trim_aoa_deg = trim_aoa *180/np.pi
trim_cms = interpolation(cms, idx_trim_before, idx_trim_after, ratio)
pitch_moment_derivative_rad = (cms[idx_trim_after] - cms[idx_trim_before]) / (aoa[idx_trim_after] - aoa[idx_trim_before])
pitch_moment_derivative_deg = pitch_moment_derivative_rad / (180/np.pi)
# Find incremental cms
if incrementalMap :
for index, mach_number in enumerate(mach_unic,0):
if mach_number == mach :
mach_index = index
dcms_before = dcms_list[ mach_index*len(aoa_unic) + idx_trim_before]
dcms_after = dcms_list[ mach_index*len(aoa_unic) + idx_trim_after]
dcms = dcms_before + ratio*(dcms_after - dcms_before)
trim_elevator = - trim_cms / dcms # Trim elevator deflection in [°]
else:
dcms = None
trim_elevator = None
else:
trim_aoa_deg = None
trim_cms = None
pitch_moment_derivative_deg = None
dcms = None
trim_elevator = None
# Longitudinal dynamic stability,
# Stability analysis
if longitudinal_analysis and trim_cms:
cl = []
cd = []
dcldqstar = []
dcddqstar = []
dcmsdqstar = []
for index in find_index:
cl.append(cl_list[index])
cd.append(cd_list[index])
dcldqstar.append(dcldqstar_list[index])
dcddqstar.append(dcddqstar_list[index])
dcmsdqstar.append(dcmsdqstar_list[index])
# Trimm variables
cd0 =interpolation(cd, idx_trim_before, idx_trim_after, ratio) # Dragg coeff at trim
cl0 =interpolation(cl, idx_trim_before, idx_trim_after, ratio) # Lift coeff at trim
cl_dividedby_cd_trim = cl0/cd0 # cl/cd ratio at trim, at trim aoa
# Lift & drag coefficient derivative with respect to AOA at trimm
cl_alpha0 = (cl[idx_trim_after] - cl[idx_trim_before]) / (aoa[idx_trim_after] - aoa[idx_trim_before])
cd_alpha0 = (cd[idx_trim_after] - cd[idx_trim_before]) / (aoa[idx_trim_after] - aoa[idx_trim_before])
print(idx_trim_before, idx_trim_after, ratio)
dcddqstar0 = interpolation(dcddqstar, idx_trim_before, idx_trim_after, ratio) # x_q
dcldqstar0 = interpolation(dcldqstar, idx_trim_before, idx_trim_after, ratio) # z_q
dcmsdqstar0 = interpolation(dcmsdqstar, idx_trim_before, idx_trim_after, ratio) # m_q
cm_alpha0 = trim_cms
# Speed derivatives if there is at least 2 distinct mach values
if len(mach_unic) >=2 :
dcddm0 =speed_derivative_at_trim(cd_list, mach, mach_list, mach_unic, idx_alt, aoa_list, aos_list, idx_trim_before, idx_trim_after, ratio)
if dcddm0 == None :
dcddm0 = 0
log.warning('Not enough data to determine dcddm or (Cd_mach) at trim condition at Alt = {}, mach = {}, aoa = {}, aos = 0. Assumption: dcddm = 0'.format(alt,mach,round(trim_aoa_deg,2)))
dcldm0 =speed_derivative_at_trim (cl_list, mach, mach_list, mach_unic, idx_alt, aoa_list, aos_list, idx_trim_before, idx_trim_after, ratio)
if dcldm0 == None :
dcldm0 = 0
log.warning('Not enough data to determine dcldm (Cl_mach) at trim condition at Alt = {}, mach = {}, aoa = {}, aos = 0. Assumption: dcldm = 0'.format(alt,mach,round(trim_aoa_deg,2)))
else :
dcddm0 = 0
dcldm0 = 0
log.warning('Not enough data to determine dcddm (Cd_mach) and dcldm (Cl_mach) at trim condition at Alt = {}, mach = {}, aoa = {}, aos = 0. Assumption: dcddm = dcldm = 0'.format(alt,mach,round(trim_aoa_deg,2)))
# Controls Derivatives to be found in the CPACS (To be calculated)
dcddeta0 = 0
dcldeta0 = 0
dcmsdeta0 = 0
dcddtau0 = 0
dcldtau0 = 0
dcmsdtau0 = 0
# Traduction Ceasiom -> Theory
Ue = u0*np.cos(trim_aoa) # *np.cos(aos) as aos = 0 at trim, cos(aos)=1
We = u0*np.sin(trim_aoa) # *np.cos(aos) as aos = 0 at trim, cos(aos)=1
# Dimentionless State Space variables,
# In generalised body axes coordinates ,
# simplifications: Ue=V0, We=0, sin(Theta_e)=0 cos(Theta_e)=0
if thrust_available: # If power data
X_u = -(2*cd0 + mach*dcddm0) + 1/(0.5*rho*s*a^2) * dtaudm0 # dtaudm dimensional Thrust derivative at trim conditions, P340 <NAME>
else: # Glider Mode
X_u = -(2*cd0 + mach*dcddm0)
Z_u = -(2*cl0 + mach*dcldm0)
M_u = 0 # Negligible for subsonic conditions or better with P289 Yechout (cm_u+2cm0)
X_w = (cl0 - cd_alpha0 )
Z_w = -(cl_alpha0 + cd0)
M_w = cm_alpha0
X_q = dcddqstar0 # Normally almost = 0
Z_q = dcldqstar0
M_q = - dcmsdqstar0
X_dotw = 0 # Negligible
Z_dotw = 1/3 * M_q/u0 / (xh/mac) # Thumb rule : M_alpha_dot = 1/3 Mq , ( not true for 747 :caughey P83,M_alpha_dot = 1/6Mq )
M_dotw = 1/3 * M_q /u0 # Thumb rule : M_alpha_dot = 1/3 Mq
# Controls:
X_eta = dcddeta0 # To be found from the cpacs file, and defined by the user!
Z_eta = dcldeta0 # To be found from the cpacs file, and defined by the user!
M_eta = dcmsdeta0 # To be found from the cpacs file, and defined by the user!
X_tau = dcddtau0 # To be found from the cpacs file, and defined by the user!
Z_tau = dcldtau0 # To be found from the cpacs file, and defined by the user!
M_tau = dcmsdtau0 # To be found from the cpacs file, and defined by the user!
# ----------------- Traduction Ceasiom -> Theory END -----------------------------------
# Sign check (Ref: Thomas Yechout Book, P304)
check_sign_longi(cd_alpha0,M_w,cl_alpha0,M_dotw,Z_dotw,M_q,Z_q,M_eta,Z_eta)
# Laterl-Directional
if lateral_directional_analysis:
cml = [] # N
cmd = [] # L
aos = []
aoa = [] # For Ue We
cs = [] # For y_v
dcsdpstar = [] # y_p
dcmddpstar = [] # l_p
dcmldpstar = [] # n_p
dcsdrstar = [] # y_r
dcmldrstar = [] # n_r
dcmddrstar = [] # l_r
for index in find_index:
cml.append(cml_list[index]) # N , N_v
cmd.append(cmd_list[index]) # L , L_v
aos.append(aos_list[index]*np.pi/180)
aoa.append(aoa_list[index]) # For Ue We
cs.append(cs_list[index])
dcsdpstar.append(dcsdpstar_list[index]) # y_p
dcmddpstar.append(dcmddpstar_list[index]) # l_p
dcmldpstar.append(dcmldpstar_list[index]) # n_p
dcsdrstar.append(dcsdrstar_list[index]) # y_r
dcmldrstar.append(dcmldrstar_list[index]) # n_r
dcmddrstar.append(dcmddrstar_list[index]) # l_r
#Trimm condition calculation
# speed derivatives : y_v / l_v / n_v / Must be devided by speed given that the hyp v=Beta*U
if len(aos_unic) >=2 :
print('Mach : ', mach, ' and idx_mach : ', idx_mach)
cs_beta0 = speed_derivative_at_trim_lat(cs_list , aos_list, aos_unic, idx_alt, idx_mach, aoa_list, idx_trim_before, idx_trim_after, ratio)# y_v
if cs_beta0 == None :
cs_beta0 = 0
log.warning('Not enough data to determine cs_beta (Y_v) at trim condition at Alt = {}, mach = {}, aoa = {}, aos = 0. Assumption: cs_beta = 0'.format(alt,mach,round(trim_aoa_deg,2)))
cmd_beta0 = speed_derivative_at_trim_lat(cmd_list , aos_list, aos_unic, idx_alt, idx_mach, aoa_list, idx_trim_before, idx_trim_after, ratio)# l_v
if cmd_beta0 ==None :
cmd_beta0 = 0
log.warning('Not enough data to determine cmd_beta (L_v) at trim condition at Alt = {}, mach = {}, aoa = {}, aos = 0. Assumption: cmd_beta = 0'.format(alt,mach,round(trim_aoa_deg,2)))
cml_beta0 = speed_derivative_at_trim_lat(cml_list , aos_list, aos_unic, idx_alt, idx_mach, aoa_list, idx_trim_before, idx_trim_after, ratio)# n_v
if cml_beta0 == None :
cml_beta0 = 0
log.warning('Not enough data to determine cml_beta (N_v) at trim condition at Alt = {}, mach = {}, aoa = {}, aos = 0. Assumption: cml_beta = 0'.format(alt,mach,round(trim_aoa_deg,2)))
else :
cs_beta0 = 0
cmd_beta0 = 0
cml_beta0 = 0
log.warning('Not enough data to determine cs_beta (Y_v), cmd_beta (L_v) and cml_beta (N_v) at trim condition at Alt = {}, mach = {}, aoa = {}, aos = 0. Assumption: cs_beta = cmd_beta = cml_beta = 0'.format(alt,mach,round(trim_aoa_deg,2)))
dcsdpstar0 = interpolation(dcsdpstar, idx_trim_before, idx_trim_after, ratio) # y_p
dcmddpstar0 = interpolation(dcmddpstar, idx_trim_before, idx_trim_after, ratio) # l_p
dcmldpstar0 = interpolation(dcmldpstar, idx_trim_before, idx_trim_after, ratio) # n_p
dcsdrstar0 =interpolation(dcsdrstar, idx_trim_before, idx_trim_after, ratio) # y_r
dcmldrstar0 = interpolation(dcmldrstar, idx_trim_before, idx_trim_after, ratio) # n_r
dcmddrstar0 = interpolation(dcmddrstar, idx_trim_before, idx_trim_after, ratio) # l_r
# TODO: calculate that and find in the cpacs
dcsdxi0 = 0
dcmddxi0 = 0
dcmldxi0 = 0
dcsdzeta0 = 0
dcmddzeta0 = 0
dcmldzeta0 = 0
# Traduction Ceasiom -> Theory
Y_v = cs_beta0
L_v = cmd_beta0
N_v = cml_beta0
Y_p = -dcsdpstar0*mac/b
L_p = -dcmddpstar0*mac/b
N_p = dcmldpstar0*mac/b
Y_r = dcsdrstar0*mac/b
N_r = -dcmldrstar0*mac/b # mac/b :Because coefficients in ceasiom are nondimensionalised by the mac instead of the span
L_r = dcmddrstar0*mac/b
# Controls:
# Ailerons
Y_xi = dcsdxi0 # To be found from the cpacs file, and defined by the user!
L_xi = dcmddxi0 # To be found from the cpacs file, and defined by the user!
N_xi = dcmldxi0 # To be found from the cpacs file, and defined by the user!
# Rudder
Y_zeta = dcsdzeta0 # To be found from the cpacs file, and defined by the user!
L_zeta = dcmddzeta0 # To be found from the cpacs file, and defined by the user!
N_zeta = dcmldzeta0 # To be found from the cpacs file, and defined by the user!
Ue = u0*np.cos(trim_aoa) # *np.cos(aos) as aos = 0 at trim, cos(aos)=1
We = u0*np.sin(trim_aoa) # *np.cos(aos) as aos = 0 at trim, cos(aos)=1
# Sign check (Ref: Thomas Yechout Book, P304)
check_sign_lat(Y_v,L_v,N_v,Y_p,L_p,Y_r,L_r,N_r,L_xi,Y_zeta,L_zeta,N_zeta)
if trim_aoa :
for angles in flight_path_angle:
theta_e = angles + trim_aoa
if longitudinal_analysis :
(A_longi, B_longi, x_u,z_u,m_u,x_w,z_w,m_w, x_q,z_q,m_q,x_theta,z_theta,m_theta,x_eta,z_eta,m_eta, x_tau,z_tau,m_tau)\
= concise_derivative_longi(X_u,Z_u,M_u,X_w,Z_w,M_w,\
X_q,Z_q,M_q,X_dotw,Z_dotw,M_dotw,X_eta,Z_eta,M_eta,\
X_tau,Z_tau,M_tau, g, theta_e, u0,We,Ue,mac,m_adim,i_yy)
C_longi = np.identity(4)
D_longi = np.zeros((4,2))
# Identify longitudinal roots
if longi_root_identification(A_longi)[0] == None : # If longitudinal root not complex conjugate raise warning and plot roots
eg_value_longi = longi_root_identification(A_longi)[1]
log.warning('Longi : charcateristic equation roots are not complex conjugate : {}'.format(eg_value_longi))
legend = ['Root1', 'Root2', 'Root3', 'Root4']
plot_title = 'S-plane longitudinal characteristic equation roots at (Alt = {}, Mach= {}, trimed at aoa = {}°)'.format(alt,mach,trim_aoa)
plot_splane(eg_value_longi, plot_title,legend,show_plots,save_plots)
else: # Longitudinal roots are complex conjugate
(sp1, sp2, ph1, ph2, eg_value_longi , eg_vector_longi, eg_vector_longi_magnitude)\
= longi_root_identification(A_longi)
legend = ['sp1', 'sp2', 'ph1', 'ph2']
plot_title = 'S-plane longitudinal characteristic equation roots at (Alt = {}, Mach= {}, trimed at aoa = {}°)'.format(alt,mach,trim_aoa)
plot_splane(eg_value_longi, plot_title,legend,show_plots,save_plots)
# Modes parameters : damping ratio, frequence, CAP, time tou double amplitude
Z_w_dimensional = Z_w*(0.5*rho*s*u0**2) # Z_w* (0.5*rho*s*u0**2) is the dimensional form of Z_w, Z_w = -(cl_alpha0 + cd0) P312 Yechout
z_alpha = Z_w_dimensional * u0 /m # alpha = w/u0 hence, z_alpha = Z_w_dimensional * u0 [Newton/rad/Kg : m/s^2 /rad]
load_factor = - z_alpha/g # number of g's/rad (1g/rad 2g/rad 3g/rad)
(sp_freq, sp_damp, sp_cap, ph_freq, ph_damp, ph_t2)\
= longi_mode_characteristic(sp1,sp2,ph1,ph2,load_factor)
# Rating
sp_damp_rate = short_period_damping_rating(aircraft_class,sp_damp)
sp_freq_rate = short_period_frequency_rating(flight_phase,aircraft_class,sp_freq, load_factor)
# Plot SP freq vs Load factor
legend = 'Alt = {}, Mach= {}, trim aoa = {}°'.format(alt,mach,trim_aoa)
if flight_phase == 'A' :
plot_sp_level_a([load_factor], [sp_freq], legend, show_plots,save_plots)
elif flight_phase == 'B' :
plot_sp_level_b(x_axis, y_axis, legend, show_plots,save_plots)
else:
plot_sp_level_c(x_axis, y_axis, legend, show_plots,save_plots)
sp_cap_rate = cap_rating(flight_phase, sp_cap, sp_damp)
ph_rate = phugoid_rating(ph_damp, ph_t2)
# Raise warning if unstable mode in the log file
if sp_damp_rate == None :
log.warning('ShortPeriod UNstable at Alt = {}, Mach = {} , due to DampRatio = {} '.format(alt,mach,round(sp_damp, 4)))
if sp_freq_rate == None :
log.warning('ShortPeriod UNstable at Alt = {}, Mach = {} , due to UnDampedFreq = {} rad/s '.format(alt,mach,round(sp_freq, 4)))
if sp_cap_rate == None :
log.warning('ShortPeriod UNstable at Alt = {}, Mach = {} , with CAP evaluation, DampRatio = {} , CAP = {} '.format(alt,mach,round(sp_damp, 4),round(sp_cap, 4)))
if ph_rate == None :
log.warning('Phugoid UNstable at Alt = {}, Mach = {} , DampRatio = {} , UnDampedFreq = {} rad/s'.format(alt,mach,round(ph_damp, 4),round(ph_freq, 4)))
# TODO
# Compute numerator TF for (Alt, mach, flight_path_angle, aoa_trim, aos=0
if lateral_directional_analysis:
(A_direc, B_direc,y_v,l_v,n_v,y_p,y_phi,y_psi,l_p,l_phi,l_psi,n_p,y_r,l_r,n_r,n_phi,n_psi, y_xi,l_xi,n_xi, y_zeta,l_zeta,n_zeta)\
= concise_derivative_lat(Y_v,L_v,N_v,Y_p,L_p,N_p,Y_r,L_r,N_r,\
Y_xi,L_xi,N_xi, Y_zeta,L_zeta,N_zeta,\
g, b, theta_e, u0,We,Ue,m_adim,i_xx,i_zz,i_xz )
C_direc = np.identity(5)
D_direc = np.zeros((5,2))
if direc_root_identification(A_direc)[0] == None: # Lateral-directional roots are correctly identified
eg_value_direc = direc_root_identification(A_direc)[1]
print('Lat-Dir : charcateristic equation roots are not complex conjugate : {}'.format(eg_value_direc))
legend = ['Root1', 'Root2', 'Root3', 'Root4']
plot_title = 'S-plane lateral characteristic equation roots at (Alt = {}, Mach= {}, trimed at aoa = {}°)'.format(alt,mach,trim_aoa)
plot_splane(eg_value_direc, plot_title,legend,show_plots,save_plots)
else: # Lateral-directional roots are correctly identified
(roll, spiral, dr1, dr2, eg_value_direc, eg_vector_direc, eg_vector_direc_magnitude)\
= direc_root_identification(A_direc)
legend = ['roll', 'spiral', 'dr1', 'dr2']
plot_title = 'S-plane lateralcharacteristic equation roots at (Alt = {}, Mach= {}, trimed at aoa = {}°)'.format(alt,mach,trim_aoa)
plot_splane(eg_value_direc, plot_title,legend,show_plots,save_plots)
(roll_timecst, spiral_timecst, spiral_t2, dr_freq, dr_damp, dr_damp_freq) = direc_mode_characteristic(roll,spiral,dr1,dr2)
# Rating
roll_rate = roll_rating(flight_phase, aircraft_class, roll_timecst)
spiral_rate = spiral_rating(flight_phase, spiral_timecst, spiral_t2)
dr_rate = dutch_roll_rating(flight_phase, aircraft_class, dr_damp, dr_freq, dr_damp_freq)
# Raise warning in the log file if unstable mode
if roll_rate == None :
log.warning('Roll mode UNstable at Alt = {}, Mach = {} , due to roll root = {}, roll time contatant = {} s'.format(alt,mach,round(roll_root, 4), round(roll_timecst, 4)))
if spiral_rate == None :
log.warning('Spiral mode UNstable at Alt = {}, Mach = {} , spiral root = {}, time_double_ampl = {}'.format(alt,mach,round(spiral_root, 4), round(spiral_t2, 4)))
if dr_rate == None :
log.warning('Dutch Roll UNstable at Alt = {}, Mach = {} , Damping Ratio = {} , frequency = {} rad/s '.format(alt,mach,round(dr_damp, 4),round(dr_freq, 4)))
# TODO: Save those value if code works
# Save Parameters for the flight conditions
# # xpath definition
# flight_case_uid = 'alt= mach= aoa= flightPathAngle'
# flight_case_xpath = model_xpath + '/analyses/flightDynamics/flightCases/flightCase'
# flight_case_uid_xpath = flight_case_xpath + '/flightCaseUID'
# trim_result_xpath = flight_case_uid_xpath + '/trimResult'
# linear_model_xpath = flight_case_uid_xpath + '/linearModel'
#
# flying_qality_uid_xpath = model_xpath + '/analyses/flyingQualities/fqCase'
# tf_longi_xpath = flying_qality_uid_xpath +'/longitudinal' # TF longi path
# tf_lat_xpath = flying_qality_uid_xpath + '/lateral' # TF lateral path
# parameters_xpath = flying_qality_uid_xpath + '/charParameters' # stability parameters dmaping etc..
# ratings_xpath = flying_qality_uid_xpath + '/ratings'
#
# # Flight case branche and UID
# cpsf.create_branch(tixi, flight_case_uid_xpath )
# tixi.updateTextElement(flight_case_uid_xpath, flight_case_uid )
# # Save trim results (alt, mach, aoa_trim)
# cpsf.create_branch(tixi,trim_result_xpath)
# tixi.updateDoubleElement(trim_result_xpath + '/altitude', mach, '%g')
# tixi.updateDoubleElement(trim_result_xpath + '/mach', mach, '%g')
# tixi.updateDoubleElement(trim_result_xpath + '/alpha', mach, '%g')
# # Save linerarisation matrixes
# cpsf.create_branch(tixi,linear_model_xpath )
# tixi.addFloatVector(linear_model_xpath + '/aLon', A_longi, '%g') # SHould be an arrayy!!!!!!
# tixi.addFloatVector(linear_model_xpath + '/bLon', B_longi, '%g')
# tixi.addFloatVector(linear_model_xpath + '/cLon', C_longi, '%g')
# tixi.addFloatVector(linear_model_xpath + '/dLon', D_longi, '%g')
# tixi.addFloatVector(linear_model_xpath + '/aLat', A_direc, '%g')
# tixi.addFloatVector(linear_model_xpath + '/bLat', B_direc, '%g')
# tixi.addFloatVector(linear_model_xpath + '/cLat', C_direc, '%g')
# tixi.addFloatVector(linear_model_xpath + '/dLat', D_direc, '%g')
# # Flying qualities branche and UID
# cpsf.create_branch(tixi, flying_qality_uid_xpath )
# tixi.updateTextElement(flying_qality_uid_xpath , flight_case_uid ) # Set UID
# tixi.updateIntegerElement(flying_qality_uid_xpath + '/class', aircraft_class, '%i') # Aircraft calss : 1 2 3
# tixi.updateTextElement(flying_qality_uid_xpath + '/category', flight_phase) # Aircraft calss : A B C
# # TF longi
# cpsf.create_branch(tixi, tf_longi_xpath )
# tixi.addFloatVector(tf_longi_xpath+'/denLon', delta_longi, '%g') # DEN Longi TF
# # TF lateral
# cpsf.create_branch(tixi, tf_lat_xpath )
# tixi.addFloatVector(tf_lat_xpath+'/denLat', delta_direc, '%g') # DEN Lateral-direction TF
# # Parameters
# cpsf.create_branch(tixi, parameters_xpath)
# tixi.updateDoubleElement(parameters_xpath + '/shortPeriod/nAlpha', load_factor, '%g') # Short period load factor
# tixi.updateDoubleElement(parameters_xpath + '/shortPeriod/spFrequency', sp_freq, '%g') # Short period frequency
# tixi.updateDoubleElement(parameters_xpath + '/shortPeriod/spDamping', sp_damp, '%g') # Short period dmaping
# tixi.updateDoubleElement(parameters_xpath + '/shortPeriod/cap', sp_cap, '%g') # Short period CAP
# tixi.updateDoubleElement(parameters_xpath + '/phugoid/phDamping', ph_damp, '%g') # Phugoid Damping
# tixi.updateDoubleElement(parameters_xpath + '/phugoid/phDoublingTime', ph_t2, '%g') #Phugoid Time to double amplitudes
# tixi.updateTextElement(parameters_xpath + '/rollSpiral', 'normal') # No coupling between roll and spiral mode
# tixi.updateDoubleElement(parameters_xpath + '/eiglat/dutchRollFrequency', dr_freq, '%g')
# tixi.updateDoubleElement(parameters_xpath + '/eiglat/dutchRollDamping', dr_damp, '%g')
# tixi.updateDoubleElement(parameters_xpath + '/eiglat/rollTimeConstant', roll_timecst, '%g')
# tixi.updateDoubleElement(parameters_xpath + '/eiglat/spiralDoublingTime', spiral_t2, '%g')
# # Parameters' rate
# cpsf.create_branch(tixi, ratings_xpath)
# tixi.updateIntegerElement(ratings_xpath + '/shortPeriod/spFrequency', sp_freq_rate, '%i') # Short period frequency
# tixi.updateIntegerElement(ratings_xpath + '/shortPeriod/spDamping', sp_damp_rate, '%i') # Short period dmaping
# tixi.updateIntegerElement(ratings_xpath + '/shortPeriod/cap', sp_cap_rate, '%i') # Short period CAP
# tixi.updateIntegerElement(ratings_xpath + '/phugoid/phDamping', ph_rate, '%i') # Phugoid Damping
# tixi.updateIntegerElement(ratings_xpath + '/phugoid/phDoublingTime', ph_rate, '%i') #Phugoid Time to double amplitudes
# tixi.updateTextElement(ratings_xpath + '/rollSpiral', 'normal') # No coubling between roll and spiral mode
# tixi.updateIntegerElement(ratings_xpath + '/eiglat/dutchRollFrequency', dr_rate, '%i')
# tixi.updateIntegerElement(ratings_xpath + '/eiglat/dutchRollDamping', dr_rate, '%i')
# tixi.updateIntegerElement(ratings_xpath + '/eiglat/rollTimeConstant', roll_rate, '%i')
# tixi.updateIntegerElement(ratings_xpath + '/eiglat/spiralDoublingTime', spiral_rate, '%i')
# TODO : compute TF polynoms from Cook (P 423 424) and save them using the following x_path
# # Xpath of longitudinal transfter function polynoms
# num_tf_elev_theta_xpath = flight_qualities_case_xpath + '/longitudinal/numThe'# numerator of TF pitch angle theta due to elevator deflection
# den_tf_longi_xpath = flight_qualities_case_xpath + '/longitudinal/denLon' # denominator of longitudinal motion
# # Xpath of lateral-directional transfter function polynoms of 5th order system
# num_tf_ail_phi_xpath = flight_qualities_case_xpath +'lateral/numPhiDas' # numerator of TF of aileron impact to bank angle, roll angle phi
# num_tf_ail_r_xpath = flight_qualities_case_xpath +'lateral/numRDas' # numerator of TF of aileron impact to yaw rate : r
# num_tf_ail_beta_xpath = flight_qualities_case_xpath +'lateral/numBetaDas' # numerator of TF of aileron impact to sideslip angle : beta
# num_tf_rud_r_xpath = flight_qualities_case_xpath +'lateral/numRDrp' # numerator of TF of rudder impact to yaw rate : r
# num_tf_rud_beta_xpath = flight_qualities_case_xpath +'lateral/numBetaDrp' # numerator of TF of rudder impact to sideslip angle : beta
# den_tf_latdir_xpath = flight_qualities_case_xpath + '/lateral/denLat' # denominator of longitudinal motion
if __name__ == '__main__':
log.info('----- Start of ' + MODULE_NAME + ' -----')
cpacs_path = mi.get_toolinput_file_path(MODULE_NAME)
cpacs_out_path = mi.get_tooloutput_file_path(MODULE_NAME)
# Call the function which check if imputs are well define
mi.check_cpacs_input_requirements(cpacs_path)
# Call the main function for static stability analysis
dynamic_stability_analysis(cpacs_path, cpacs_out_path)
log.info('----- End of ' + MODULE_NAME + ' -----')
```
#### File: ceasiompy/SUMOAutoMesh/sumoautomesh.py
```python
import os
import sys
import math
import shutil
import platform
import ceasiompy.utils.ceasiompyfunctions as ceaf
import ceasiompy.utils.cpacsfunctions as cpsf
from ceasiompy.utils.ceasiomlogger import get_logger
log = get_logger(__file__.split('.')[0])
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
#==============================================================================
# CLASSES
#==============================================================================
#==============================================================================
# FUNCTIONS
#==============================================================================
def add_mesh_parameters(sumo_file_path, refine_level=0.0):
""" Function to add mesh parameter options in SUMO geometry (.smx file)
Function 'add_mesh_parameters' is used to add meshing paramers in the SUMO
geometry (.smx file) to get finer meshes. The only user input parameter is
the refinement level which allows to generate finer meshes. 0 correspond
to the default (close to values obtain with SUMO GUI). Then, increasing
refinement level of 1 corespond to approximately two time more cells in
the mesh. You can alos use float number (e.g. refine_level=2.4).
Source :
* sumo source code
Args:
sumo_file_path (str): Path to the SUMO geometry (.smx)
cpacs_out_path (str): Path to the output CPACS file
"""
refine_ratio = 0.6 # to get approx. double mesh cell when +1 on "refine_level"
refine_factor = refine_ratio**refine_level
log.info('Refinement factor is {}'.format(refine_factor))
# Open SUMO (.smx) with tixi library
sumo = cpsf.open_tixi(sumo_file_path)
ROOT_XPATH = '/Assembly'
# Get all Body (fuselage) and apply mesh parameters
if sumo.checkElement(ROOT_XPATH):
body_cnt = sumo.getNamedChildrenCount(ROOT_XPATH, 'BodySkeleton')
log.info(str(body_cnt) + ' body has been found.')
else:
body_cnt = 0
log.warning('No Fuselage has been found in this SUMO file!')
for i_body in range(body_cnt):
body_xpath = ROOT_XPATH + '/BodySkeleton[' + str(i_body+1) + ']'
circ_list = []
min_radius = 10e6
# Go throught every Boby frame (fuselage sections)
frame_cnt = sumo.getNamedChildrenCount(body_xpath, 'BodyFrame')
for i_sec in range(frame_cnt):
frame_xpath = body_xpath + '/BodyFrame[' + str(i_sec+1) + ']'
# Estimate circumference and add to the list
height = sumo.getDoubleAttribute(frame_xpath,'height')
width = sumo.getDoubleAttribute(frame_xpath,'width')
circ = 2 * math.pi * math.sqrt((height**2 + width**2) / 2)
circ_list.append(circ)
# Get overall min radius (semi-minor axi for elipse)
min_radius = min(min_radius,height,width)
mean_circ = sum(circ_list) / len(circ_list)
# Calculate mesh parameters from inputs and geometry
maxlen = (0.08 * mean_circ) * refine_factor
minlen = min(0.1* maxlen, min_radius/4) * refine_factor # in SUMO, it is min_radius/2, but sometimes it leads to meshing errors
# Add mesh parameters in the XML file (.smx)
meshcrit_xpath = body_xpath + '/MeshCriterion'
if not sumo.checkElement(meshcrit_xpath):
sumo.addTextElement(body_xpath, 'MeshCriterion','')
sumo.addTextAttribute(meshcrit_xpath, 'defaults', 'false')
sumo.addTextAttribute(meshcrit_xpath, 'maxlen', str(maxlen))
sumo.addTextAttribute(meshcrit_xpath, 'minlen', str(minlen))
sumo.addTextAttribute(meshcrit_xpath, 'maxphi', '30')
sumo.addTextAttribute(meshcrit_xpath, 'maxstretch', '6')
sumo.addTextAttribute(meshcrit_xpath, 'nvmax', '1073741824')
sumo.addTextAttribute(meshcrit_xpath, 'xcoarse', 'false')
# Chage fusage caps
cap_cnt = sumo.getNamedChildrenCount(body_xpath, 'Cap')
for i_cap in range(cap_cnt):
cap_xpath = body_xpath + '/Cap[1]'
sumo.removeElement(cap_xpath)
sumo.addTextElementAtIndex(body_xpath,'Cap','',1)
cap1_xpath = body_xpath + '/Cap[1]'
sumo.addTextAttribute(cap1_xpath, 'height', '0')
sumo.addTextAttribute(cap1_xpath, 'shape', 'LongCap')
sumo.addTextAttribute(cap1_xpath, 'side', 'south')
cap2_xpath = body_xpath + '/Cap[2]'
sumo.addTextElementAtIndex(body_xpath,'Cap','',2)
sumo.addTextAttribute(cap2_xpath, 'height', '0')
sumo.addTextAttribute(cap2_xpath, 'shape', 'LongCap')
sumo.addTextAttribute(cap2_xpath, 'side', 'north')
# Go through every Wing and apply mesh parameters
if sumo.checkElement(ROOT_XPATH):
wing_cnt = sumo.getNamedChildrenCount(ROOT_XPATH, 'WingSkeleton')
log.info(str(wing_cnt) + ' wing(s) has been found.')
else:
wing_cnt = 0
log.warning('No wing has been found in this CPACS file!')
for i_wing in range(wing_cnt):
wing_xpath = ROOT_XPATH + '/WingSkeleton[' + str(i_wing+1) + ']'
chord_list = []
# Go throught every WingSection
section_cnt = sumo.getNamedChildrenCount(wing_xpath, 'WingSection')
for i_sec in range(section_cnt):
section_xpath = wing_xpath + '/WingSection[' + str(i_sec+1) + ']'
chord_length = sumo.getDoubleAttribute(section_xpath,'chord')
chord_list.append(chord_length)
# In SUMO refChord is calculated from Area and Span, but this is not
# trivial to get those value for each wing from the .smx file
ref_chord = sum(chord_list) / len(chord_list)
# Calculate mesh parameter from inputs and geometry
maxlen = (0.15 * ref_chord) * refine_factor
minlen = (0.08* maxlen) * refine_factor # in sumo it is 0.08*maxlen or 0.7*min leading edge radius...?
if refine_level > 1:
lerfactor = 1 / (2.0 + 0.5 * (refine_level-1))
terfactor = 1 / (2.0 + 0.5 * (refine_level-1))
else:
# correspond to the default value in SUMO
lerfactor = 1 / 2.0
terfactor = 1 / 2.0
# Add mesh parameters in the XML file (.smx)
meshcrit_xpath = wing_xpath + '/WingCriterion'
if not sumo.checkElement(meshcrit_xpath):
sumo.addTextElement(wing_xpath, 'WingCriterion','')
sumo.addTextAttribute(meshcrit_xpath, 'defaults', 'false')
sumo.addTextAttribute(meshcrit_xpath, 'maxlen', str(maxlen))
sumo.addTextAttribute(meshcrit_xpath, 'minlen', str(minlen))
sumo.addTextAttribute(meshcrit_xpath, 'lerfactor', str(lerfactor))
sumo.addTextAttribute(meshcrit_xpath, 'terfactor', str(terfactor))
sumo.addTextAttribute(meshcrit_xpath, 'maxphi', '30')
sumo.addTextAttribute(meshcrit_xpath, 'maxstretch', '6')
sumo.addTextAttribute(meshcrit_xpath, 'nvmax', '1073741824')
sumo.addTextAttribute(meshcrit_xpath, 'xcoarse', 'false')
cpsf.close_tixi(sumo, sumo_file_path)
def create_SU2_mesh(cpacs_path,cpacs_out_path):
""" Function to create a simple SU2 mesh form an SUMO file (.smx)
Function 'create_mesh' is used to generate an unstructured mesh with SUMO
(which integrage Tetgen for the volume mesh) using a SUMO (.smx) geometry
file as input.
Meshing option could be change manually (only in the script for now)
Source :
* sumo help, tetgen help (in the folder /doc)
Args:
cpacs_path (str): Path to the CPACS file
cpacs_out_path (str): Path to the output CPACS file
"""
tixi = cpsf.open_tixi(cpacs_path)
wkdir = ceaf.get_wkdir_or_create_new(tixi)
sumo_dir = os.path.join(wkdir,'SUMO')
if not os.path.isdir(sumo_dir):
os.mkdir(sumo_dir)
su2_mesh_path = os.path.join(sumo_dir,'ToolOutput.su2')
meshdir = os.path.join(wkdir,'MESH')
if not os.path.isdir(meshdir):
os.mkdir(meshdir)
original_dir = os.getcwd()
os.chdir(sumo_dir)
sumo_file_xpath = '/cpacs/toolspecific/CEASIOMpy/filesPath/sumoFilePath'
sumo_file_path = cpsf.get_value_or_default(tixi,sumo_file_xpath,'')
if sumo_file_path == '':
raise ValueError('No SUMO file to use to create a mesh')
# Set mesh parameters
log.info('Mesh parameter will be set')
refine_level_xpath = '/cpacs/toolspecific/CEASIOMpy/mesh/sumoOptions/refinementLevel'
refine_level = cpsf.get_value_or_default(tixi,refine_level_xpath,0.0)
log.info('Refinement level is {}'.format(refine_level))
add_mesh_parameters(sumo_file_path,refine_level)
# Check current Operating System
current_os = platform.system()
if current_os == 'Darwin':
log.info('Your OS is Mac\n\n')
log.info('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
log.info('On MacOS the mesh has to be generated manually.')
log.info('To create a SU2Mesh you have to :')
log.info('Open the .smx geometry that you will find there:')
log.info(sumo_file_path)
log.info('Click on the button "Mesh"')
log.info('Click on "Create Mesh"')
log.info('Click on "Volume Mesh"')
log.info('Click on "Run"')
log.info('When the mesh generation is completed, click on "Close"')
log.info('Go to the Menu "Mesh" -> "Save volume mesh..."')
log.info('Chose "SU2 (*.su2)" as File Type"')
log.info('Copy/Paste the following line as File Name')
log.info(su2_mesh_path)
log.info('Click on "Save"')
log.info('You can now close SUMO, your workflow will continue.')
log.info('More information: https://ceasiompy.readthedocs.io/en/latest/user_guide/modules/SUMOAutoMesh/index.html')
log.info('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n')
# For now, I did not find a way to run "sumo -batch" on Mac...
# The command just open SUMO GUI, the mesh has to be generate and save manually
command = ['open','/Applications/SUMO/dwfsumo.app/']
os.system(' '.join(command))
elif current_os == 'Linux':
log.info('Your OS is Linux')
# Check if SUMO is installed
soft_dict = ceaf.get_install_path(['sumo'])
# Run SUMO in batch
output = '-output=su2'
options = '-tetgen-options=pq1.16VY' # See Tetgen help for more options, maybe transform that as an input
# Command line to run: sumo -batch -output=su2 -tetgen-options=pq1.16VY ToolOutput.smx
command = [soft_dict['sumo'],'-batch',output,options,sumo_file_path]
os.system(' '.join(command))
elif current_os == 'Windows':
log.info('Your OS is Windows')
# TODO: develop this part
log.warning('OS not supported yet by SUMOAutoMesh!')
raise OSError('OS not supported yet!')
else:
raise OSError('OS not recognize!')
# Copy the mesh in the MESH directory
aircraft_name = cpsf.aircraft_name(tixi)
su2_mesh_name = aircraft_name + '_baseline.su2'
su2_mesh_new_path = os.path.join(meshdir,su2_mesh_name)
shutil.copyfile(su2_mesh_path, su2_mesh_new_path)
if os.path.isfile(su2_mesh_new_path):
log.info('An SU2 Mesh has been correctly generated.')
su2_mesh_xpath = '/cpacs/toolspecific/CEASIOMpy/filesPath/su2Mesh'
cpsf.create_branch(tixi,su2_mesh_xpath)
tixi.updateTextElement(su2_mesh_xpath,su2_mesh_new_path)
os.remove(su2_mesh_path)
else:
raise ValueError('No SU2 Mesh file has been generated!')
cpsf.close_tixi(tixi, cpacs_out_path)
os.chdir(original_dir)
#==============================================================================
# MAIN
#==============================================================================
if __name__ == '__main__':
log.info('----- Start of ' + os.path.basename(__file__) + ' -----')
cpacs_path = os.path.join(MODULE_DIR,'ToolInput','ToolInput.xml')
cpacs_out_path = os.path.join(MODULE_DIR,'ToolOutput','ToolOutput.xml')
create_SU2_mesh(cpacs_path,cpacs_out_path)
log.info('----- End of ' + os.path.basename(__file__) + ' -----')
```
#### File: func/AoutFunc/cpacsweightupdate.py
```python
import ceasiompy.utils.cpacsfunctions as cpsf
from ceasiompy.utils.ceasiomlogger import get_logger
log = get_logger(__file__.split('.')[0])
#=============================================================================
# CLASSES
#=============================================================================
"""
InsideDimensions class, can be found on the InputClasses folder inside the
weightconvclass.py script.
"""
#=============================================================================
# FUNCTIONS
#=============================================================================
def cpacs_update(mw, out, cpacs_path, cpacs_out_path):
""" The function updates the cpacs file after the Weight analysis.
Args:
mw (class) : MassesWeights class
out (class) : WeightOutput class
cpacs_path (str) : Path to the CPACS file
cpacs_out_path (str) : Path to the output CPACS file
"""
tixi = cpsf.open_tixi(cpacs_out_path) # (because it has been modifed somewre else, TODO: change that)
# Path definition
WEIGHT_XPATH = '/cpacs/toolspecific/CEASIOMpy/weight'
CREW_XPATH = WEIGHT_XPATH + '/crew'
PASS_XPATH = WEIGHT_XPATH + '/passengers'
# Path update
if not tixi.checkElement(CREW_XPATH+'/cabinCrewMembers/cabinCrewMemberNb'):
cpsf.create_branch(tixi,CREW_XPATH+'/cabinCrewMembers/cabinCrewMemberNb')
tixi.updateDoubleElement(CREW_XPATH+'/cabinCrewMembers/cabinCrewMemberNb',\
out.cabin_crew_nb, '%g')
if not tixi.checkElement(PASS_XPATH + '/passNb'):
tixi.createElement(PASS_XPATH, 'passNb')
tixi.updateIntegerElement(PASS_XPATH + '/passNb', out.pass_nb, '%i')
if not tixi.checkElement(PASS_XPATH + '/rowNb'):
tixi.createElement(PASS_XPATH, 'rowNb')
tixi.updateIntegerElement(PASS_XPATH + '/rowNb', out.row_nb, '%i')
if not tixi.checkElement(PASS_XPATH + '/aisleNb'):
tixi.createElement(PASS_XPATH, 'aisleNb')
tixi.updateIntegerElement(PASS_XPATH + '/aisleNb', out.aisle_nb, '%i')
if not tixi.checkElement(PASS_XPATH + '/toiletNb'):
tixi.createElement(PASS_XPATH, 'toiletNb')
tixi.updateIntegerElement(PASS_XPATH + '/toiletNb', out.toilet_nb, '%i')
if not tixi.checkElement(PASS_XPATH + '/abreastNb'):
tixi.createElement(PASS_XPATH, 'abreastNb')
tixi.updateIntegerElement(PASS_XPATH + '/abreastNb', out.abreast_nb, '%i')
if not tixi.checkElement(PASS_XPATH + '/fuelMassMaxpass'):
tixi.createElement(PASS_XPATH, 'fuelMassMaxpass')
FMP_XPATH = PASS_XPATH + '/fuelMassMaxpass'
if not tixi.checkElement(FMP_XPATH + '/description'):
tixi.createElement(FMP_XPATH, 'description')
tixi.updateTextElement(FMP_XPATH + '/description', 'Maximum amount of '\
+ 'fuel with maximum payload [kg]')
if not tixi.checkElement(FMP_XPATH + '/mass'):
tixi.createElement(FMP_XPATH, 'mass')
tixi.updateDoubleElement(FMP_XPATH + '/mass', mw.mass_fuel_maxpass, '%g')
# CPACS MASS BREAKDOWN UPDATE
# Path creation
MB_XPATH = '/cpacs/vehicles/aircraft/model/analyses/massBreakdown'
if tixi.checkElement(MB_XPATH):
tixi.removeElement(MB_XPATH)
MD_XPATH = MB_XPATH + '/designMasses'
MTOM_XPATH = MD_XPATH + '/mTOM'
MZFM_XPATH = MD_XPATH + '/mZFM'
MF_XPATH = MB_XPATH + '/fuel/massDescription'
OEM_XPATH = MB_XPATH + '/mOEM/massDescription'
PAY_XPATH = MB_XPATH + '/payload/massDescription'
MC_XPATH = MB_XPATH + '/payload/mCargo'
OIM_XPATH = MB_XPATH + '/mOEM/mOperatorItems/mCrewMembers/massDescription'
cpsf.create_branch(tixi, MTOM_XPATH + '/mass', False)
cpsf.create_branch(tixi, MZFM_XPATH + '/mass', False)
cpsf.create_branch(tixi, MF_XPATH + '/mass', False)
cpsf.create_branch(tixi, OEM_XPATH + '/mass', False)
cpsf.create_branch(tixi, PAY_XPATH + '/mass', False)
cpsf.create_branch(tixi, MC_XPATH, False)
cpsf.create_branch(tixi, OIM_XPATH + '/mass', False)
# DESIGN MASSES
cpsf.add_uid(tixi, MTOM_XPATH, 'MTOM')
tixi.createElement(MTOM_XPATH, 'name')
tixi.updateTextElement(MTOM_XPATH + '/name', 'Maximum take-off mass')
tixi.createElement(MTOM_XPATH, 'description')
tixi.updateTextElement(MTOM_XPATH + '/description', 'Maximum '\
+ 'take off mass [kg], CoG coordinate [m] and '\
+ 'moment of inertia.')
tixi.updateDoubleElement(MTOM_XPATH + '/mass', mw.maximum_take_off_mass, '%g')
# MZFM
cpsf.add_uid(tixi, MZFM_XPATH, 'MZFM')
tixi.createElement(MZFM_XPATH, 'name')
tixi.updateTextElement(MZFM_XPATH + '/name', 'Maximum zero fuel mass')
tixi.createElement(MZFM_XPATH, 'description')
tixi.updateTextElement(MZFM_XPATH + '/description', 'Maximum '\
+ 'zero fuel mass [kg] and corresponding CoG '\
+ 'coordinate [m], moment of inertia.')
tixi.updateDoubleElement(MZFM_XPATH + '/mass', mw.zero_fuel_mass, '%g')
# FUEL MASS
cpsf.add_uid(tixi, MF_XPATH, 'MFM')
tixi.createElement(MF_XPATH, 'name')
tixi.updateTextElement(MF_XPATH + '/name', 'Max fuel mass')
tixi.createElement(MF_XPATH, 'description')
tixi.updateTextElement(MF_XPATH + '/description', 'Maximum fuel mass [kg]')
tixi.updateDoubleElement(MF_XPATH + '/mass', mw.mass_fuel_max, '%g')
# OEM
cpsf.add_uid(tixi, OEM_XPATH, 'OEM')
tixi.createElement(OEM_XPATH, 'name')
tixi.updateTextElement(OEM_XPATH + '/name', 'Operating empty mass')
tixi.createElement(OEM_XPATH, 'description')
tixi.updateTextElement(OEM_XPATH + '/description', 'Operating empty'\
+ ' mass [kg] and related inertia [kgm^2].')
tixi.updateDoubleElement(OEM_XPATH + '/mass', mw.operating_empty_mass, '%g')
tixi.updateDoubleElement(OIM_XPATH + '/mass', mw.mass_crew, '%g')
cpsf.add_uid(tixi, OIM_XPATH, 'massCrew')
# PAYLOAD MASS AND FUEL WITH MAX PAYLOAD
cpsf.add_uid(tixi, PAY_XPATH, 'MPM')
tixi.createElement(PAY_XPATH, 'name')
tixi.updateTextElement(PAY_XPATH + '/name', 'Max payload mass')
tixi.createElement(PAY_XPATH, 'description')
tixi.updateTextElement(PAY_XPATH + '/description', 'Maximum '\
+ 'payload mass [kg].')
tixi.updateDoubleElement(PAY_XPATH + '/mass', mw.mass_payload, '%g')
if mw.mass_cargo:
tixi.createElement(MC_XPATH, 'massCargo')
tixi.updateDoubleElement(MC_XPATH + '/massCargo', mw.mass_cargo, '%g')
cpsf.close_tixi(tixi, cpacs_out_path)
#=============================================================================
# MAIN
#=============================================================================
if __name__ == '__main__':
log.warning('###########################################################')
log.warning('#### ERROR NOT A STANDALONE PROGRAM, RUN weightmain.py ####')
log.warning('###########################################################')
```
#### File: func/Passengers/passengers.py
```python
import math
from ceasiompy.utils.ceasiomlogger import get_logger
log = get_logger(__file__.split('.')[0])
#=============================================================================
# CLASSES
#=============================================================================
"""
InsideDimensions class, can be found on the InputClasses folder inside the
weightconvclass.py script.
"""
#=============================================================================
# FUNCTIONS
#=============================================================================
def estimate_passengers(PASS_PER_TOILET, cabin_length, fuse_width, ind):
""" The function evaluates the maximum number of passengers that can sit in
the airplane, taking into account also the necessity of common space for
big airplanes and a number of toilets in relation with the number
of passengers.
Args:
PASS_PER_TOILET(int): Number of passengers per toilet [-]
cabin_length (float): Cabin length [m]
fuse_width (float): Fuselage width [m]
ind (class): InsideDimensions class [-]
Returns:
pass_nb (int): Number of passengers [-]
row_nb (int): Number of seat rows [-]
abreast_nb (int): Number of seat abreasts [-]
aisle_nb (int): Number of aisles [-]
toilet_nb (int): Number of toilets [-]
ind (class): InsideDimensions class updated [-]
"""
cabin_width = fuse_width * (1-(ind.fuse_thick/100))
if cabin_width < 4.89:
aisle_nb = 1
max_abreasts = 3
max_ab2 = 3
elif cabin_width < 7.6 :
aisle_nb = 2
max_abreasts = 4
max_ab2 = 5
else:
aisle_nb = 3
max_abreasts = 4
max_ab2 = 7
abreast_nb = math.floor((fuse_width/(1 + (ind.fuse_thick/100))\
- aisle_nb*ind.aisle_width)/ind.seat_width)
if (int(round(abreast_nb/2.0,0) - max_ab2) > 0):
add = int(round(abreast_nb/2.0,0) - max_ab2)
log.warning('Configuration with ' + str(max_abreasts + add)\
+ ' consecutive seats')
if ((max_abreasts + add >= 3 and aisle_nb == 1)\
or ((max_abreasts + add >= 5 and aisle_nb > 1))):
log.warning('Reducing it to ' + str(max_abreasts)\
+ ' increasing the seats width')
while add != 0:
ind.seat_width = ind.seat_width + 0.01*(add)
abreast_nb = math.floor((fuse_width/(1 + (ind.fuse_thick/100))\
- aisle_nb*ind.aisle_width)/ind.seat_width)
add = int(round(abreast_nb/2.0,0) - max_ab2)
log.warning('Seats width increased to [m]:' + str(ind.seat_width))
if ind.seat_width < 0.4:
log.warning('Seats width less than 0.4 m, seats are too small')
check_width= ind.aisle_width*aisle_nb + ind.seat_width*abreast_nb
if round(abs(cabin_width-check_width),1) > 0.01:
if check_width > cabin_width:
log.warning('Not enought lateral space')
log.info('It is possible to modify the sits width by: '\
+ str(round((cabin_width-check_width) / (abreast_nb),2)\
- 0.01) + ' m')
log.info('or')
log.info('It is possible to modify the aisles width by: '\
+ str(round((cabin_width-check_width) / (aisle_nb),2)\
- 0.01) + ' m')
log.info('or')
log.info('It is possible to modify both seats and'\
+ 'aisles width by: ' + str(round((cabin_width-check_width)\
/ (abreast_nb+aisle_nb),2) - 0.01) + ' m')
row_nb = round((cabin_length) / ind.seat_length,0)
pass_nb = abreast_nb * row_nb
control = 0
count = 0
a = 0
while control == 0:
count += 1
if a != 1:
toilet_nb = round(pass_nb / PASS_PER_TOILET,1)
a = 0
Tot_T_L = int(round((toilet_nb / 2.0),0))
row_nb = round((cabin_length-Tot_T_L*ind.toilet_length)/ind.seat_length,0)
pass_nb = abreast_nb * row_nb
if abs(int(toilet_nb) - int(round(pass_nb / PASS_PER_TOILET,0))) == 0:
control = 1
elif abs(int(toilet_nb) - int(round(pass_nb / PASS_PER_TOILET,0))) <= 1:
if (int(toilet_nb) % 2 == 0 &\
int(toilet_nb) > int(round(pass_nb / PASS_PER_TOILET,0))):
control = 2
elif (int(toilet_nb) % 2 != 0 &\
int(toilet_nb) < int(round(pass_nb / PASS_PER_TOILET,0))):
toilet_nb = int(round(pass_nb / PASS_PER_TOILET,0))
control = 3
elif count > 5:
control = 4
elif (abs(int(toilet_nb) - int(round(pass_nb / PASS_PER_TOILET,0)))\
% 2 == 0):
toilet_nb = abs(int(toilet_nb)\
+ int(round(pass_nb / PASS_PER_TOILET,0))) / 2
a = 1
elif count > 10:
control = 5
check_length = row_nb*ind.seat_length + round((toilet_nb/2),0)*ind.toilet_length
if round(abs((cabin_length) - (check_length))/(row_nb),2) > 0.01:
if check_length > cabin_length:
log.warning('------------------- Warning -----------------')
log.warning(' Not enought longitudinal space -------------')
log.warning(' Reduce seats length ------------------------')
log.info('It is possible to modify the sits length by: '+\
str(round((cabin_length - check_length) / (row_nb),2)\
- 0.01) + ' m')
log.info('------------ Seating estimation -------------')
log.info(' Nb of abreasts: ' + str(abreast_nb))
log.info(' Nb of row: ' + str(row_nb))
log.info(' Nb of passengers: ' + str(pass_nb))
log.info(' Nb of Toilets: ' + str(int(toilet_nb)))
ind.cabin_width = cabin_width
ind.cabin_area = cabin_length * cabin_width
return (int(pass_nb), int(row_nb), int(abreast_nb),\
int(aisle_nb), int(toilet_nb), ind)
#==============================================================================
# MAIN
#==============================================================================
if __name__ == '__main__':
log.warning('###########################################################')
log.warning('#### ERROR NOT A STANDALONE PROGRAM, RUN weightmain.py ####')
log.warning('###########################################################')
```
#### File: ceasiompy/WorkflowCreator/workflowcreator.py
```python
import os
import sys
import shutil
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox, filedialog
import ceasiompy.utils.workflowfunctions as wkf
import ceasiompy.utils.ceasiompyfunctions as ceaf
import ceasiompy.utils.cpacsfunctions as cpsf
import ceasiompy.utils.moduleinterfaces as mi
from ceasiompy.Optimisation.optimisation import routine_launcher
from ceasiompy.utils.ceasiomlogger import get_logger
log = get_logger(__file__.split('.')[0])
import ceasiompy.__init__
LIB_DIR = os.path.dirname(ceasiompy.__init__.__file__)
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
MODULE_NAME = os.path.basename(os.getcwd())
# ==============================================================================
# IMPORTS
# ==============================================================================
class WorkflowOptions:
""" Class to pass option of the workflow """
def __init__(self):
cpacs_path = mi.get_toolinput_file_path(MODULE_NAME)
if os.path.isfile(cpacs_path):
self.cpacs_path = cpacs_path
else:
self.cpacs_path = ''
self.optim_method = 'None' # 'None', 'Optim', 'DoE'
self.module_pre = []
self.module_optim = []
self.module_post = []
class Tab(tk.Frame):
""" Class to create tab in the WorkflowCreator GUI """
def __init__(self, master, name, **kwargs):
tk.Frame.__init__(self, master, **kwargs)
self.name = name
# Get list of available modules
self.modules_list = mi.get_submodule_list()
self.modules_list.sort()
self.modules_list.remove('SettingsGUI')
self.modules_list.insert(0,'SettingsGUI')
self.modules_list.remove('CPACSUpdater')
self.modules_list.remove('WorkflowCreator')
self.modules_list.remove('utils')
try:
self.modules_list.remove('WKDIR')
except:
log.info('No WKDIR yet.')
self.selected_list = []
row_pos = 0
if name == 'Optim':
label_optim = tk.Label(self, text='Optimisation method')
label_optim.grid(column=0, row=0, columnspan=1,pady=10)
# The Combobox is directly use as the varaible
optim_choice = ['None', 'DoE', 'Optim']
self.optim_choice_CB = ttk.Combobox(self, values=optim_choice, width=15)
self.optim_choice_CB.grid(column=4, row=row_pos)
row_pos += 1
# ListBox with all available modules
tk.Label(self, text='Available modules').grid(column=0, row=row_pos, pady=5)
self.LB_modules = tk.Listbox(self, selectmode=tk.SINGLE, width=25, height=len(self.modules_list))
item_count = len(self.modules_list)
self.LB_modules.grid(column=0, row=row_pos+1, columnspan=3, rowspan=15, padx=10, pady=3)
for item in self.modules_list:
self.LB_modules.insert(tk.END, item)
# Button
addButton = tk.Button(self, text=' Add > ', command=self._add)
addButton.grid(column=4, row=row_pos+1)
removeButton = tk.Button(self, text='< Remove', command=self._remove)
removeButton.grid(column=4, row=row_pos+2)
upButton = tk.Button(self, text=' Up ^ ', command=self._up)
upButton.grid(column=4, row=row_pos+3)
downButton = tk.Button(self, text=' Down v ', command=self._down)
downButton.grid(column=4, row=row_pos+4)
# ListBox with all selected modules
tk.Label(self, text='Selected modules').grid(column=5, row=row_pos)
self.LB_selected = tk.Listbox(self, selectmode=tk.SINGLE, width=25, height=len(self.modules_list))
self.LB_selected.grid(column=5, row=row_pos+1, columnspan=3, rowspan=15, padx=10, pady=3)
for item in self.selected_list:
self.LB_selected.insert(tk.END, item)
row_pos += (item_count + 1)
def _add(self, event=None):
""" Function of the button add: to pass a module from Available module
list to Selected module list"""
try:
select_item = [self.LB_modules.get(i) for i in self.LB_modules.curselection()]
self.selected_list.append(select_item[0])
self.LB_selected.insert(tk.END, select_item)
except IndexError:
self.selected_item = None
def _remove(self, event=None):
""" Function of the button remove: to remove a module from the Selected
module list"""
sel = self.LB_selected.curselection()
for index in sel[::-1]:
self.LB_selected.delete(index)
def _up(self, event=None):
""" Function of the button up: to move upward a module in the Selected
module list"""
pos_list = self.LB_selected.curselection()
if not pos_list:
return
for pos in pos_list:
if pos == 0:
continue
item = self.LB_selected.get(pos)
self.LB_selected.delete(pos)
self.LB_selected.insert(pos - 1, item)
def _down(self, event=None):
""" Function of the button down: to move downward a module in the
Selected module list."""
pos_list = self.LB_selected.curselection()
if not pos_list:
return
for pos in pos_list:
if pos == self.LB_selected.size():
continue
item = self.LB_selected.get(pos)
self.LB_selected.delete(pos)
self.LB_selected.insert(pos + 1, item)
class WorkFlowGUI(tk.Frame):
def __init__(self, master=None, **kwargs):
tk.Frame.__init__(self, master, **kwargs)
self.pack(fill=tk.BOTH)
self.Options = WorkflowOptions()
space_label = tk.Label(self, text=' ')
space_label.grid(column=0, row=0)
# Input CPACS file
self.label = tk.Label(self, text=' Input CPACS file')
self.label.grid(column=0, row=1)
self.path_var = tk.StringVar()
self.path_var.set(self.Options.cpacs_path)
value_entry = tk.Entry(self, textvariable=self.path_var, width= 45)
value_entry.grid(column=1, row=1)
self.browse_button = tk.Button(self, text="Browse", command=self._browse_file)
self.browse_button.grid(column=2, row=1, pady=5)
# Notebook for tabs
self.tabs = ttk.Notebook(self)
self.tabs.grid(column=0, row=2, columnspan=3,padx=10,pady=10)
self.TabPre = Tab(self, 'Pre')
self.TabOptim = Tab(self, 'Optim')
self.TabPost = Tab(self, 'Post')
self.tabs.add(self.TabPre, text=self.TabPre.name)
self.tabs.add(self.TabOptim, text=self.TabOptim.name)
self.tabs.add(self.TabPost, text=self.TabPost.name)
# General buttons
self.close_button = tk.Button(self, text='Save & Quit', command=self._save_quit)
self.close_button.grid(column=2, row=3)
def _browse_file(self):
cpacs_template_dir = os.path.join(MODULE_DIR,'..','..','test','CPACSfiles')
self.filename = filedialog.askopenfilename(initialdir = cpacs_template_dir, title = "Select a CPACS file" )
self.path_var.set(self.filename)
def _save_quit(self):
self.Options.optim_method = self.TabOptim.optim_choice_CB.get()
self.Options.module_pre = [item[0] for item in self.TabPre.LB_selected.get(0, tk.END)]
self.Options.module_optim = [item[0] for item in self.TabOptim.LB_selected.get(0, tk.END)]
self.Options.module_post = [item[0] for item in self.TabPost.LB_selected.get(0, tk.END)]
self.Options.cpacs_path = self.path_var.get()
if self.path_var.get() == '':
messagebox.showerror('ValueError', 'Yon must select an input CPACS file!')
raise TypeError('No CPACS file has been define !')
self.quit()
# ==============================================================================
# MAIN
# ==============================================================================
def create_wf_gui():
""" Create a GUI with Tkinter to fill the workflow to run
Args:
cpacs_path (str): Path to the CPACS file
cpacs_out_path (str): Path to the output CPACS file
module_list (list): List of module to inclue in the GUI
"""
root = tk.Tk()
root.title('Workflow Creator')
root.geometry('475x495+400+300')
my_gui = WorkFlowGUI()
my_gui.mainloop()
disg = my_gui.Options
root.iconify() # Not super solution but only way to make it close on Mac
root.destroy()
return disg
if __name__ == '__main__':
log.info('----- Start of ' + os.path.basename(__file__) + ' -----')
cpacs_path_out = mi.get_tooloutput_file_path(MODULE_NAME)
gui = False
if len(sys.argv) > 1:
if sys.argv[1] == '-gui':
gui = True
else:
print(' ')
print('Not valid argument!')
print('You can use the option -gui to run this module with a user interface.')
print(' ')
sys.exit()
if gui:
Opt = create_wf_gui()
else:
####### USER INPUT ########
### Available Module:
# Settings: 'SettingsGUI'
# Geometry and mesh: 'CPACSCreator','CPACS2SUMO','SUMOAutoMesh'
# Weight and balance: 'WeightConventional','WeightUnconventional','BalanceConventional','BalanceUnconventional'
# Aerodynamics: 'CLCalculator','PyTornado','SkinFriction','PlotAeroCoefficients','SU2MeshDef','SU2Run'
# Mission analysis: 'Range','StabilityStatic'
Opt = WorkflowOptions()
# These options can be modified here if WorkflowCreator is used without GUI
# Opt.cpacs_path = '../../test/CPACSfiles/simpletest_cpacs.xml'
Opt.module_pre = []
Opt.module_optim = ['WeightConventional', 'PyTornado']
Opt.optim_method = 'DoE' # DoE, Optim, None
Opt.module_post = []
# Copy ToolInput.xml in ToolInput dir if not already there
cpacs_path = mi.get_toolinput_file_path(MODULE_NAME)
if not Opt.cpacs_path == cpacs_path:
shutil.copy(Opt.cpacs_path, cpacs_path)
Opt.cpacs_path = cpacs_path
# Create a new wkdir
tixi = cpsf.open_tixi(Opt.cpacs_path)
wkdir = ceaf.get_wkdir_or_create_new(tixi)
cpsf.close_tixi(tixi, Opt.cpacs_path)
# Run Pre-otimisation workflow
if Opt.module_pre:
wkf.run_subworkflow(Opt.module_pre, Opt.cpacs_path)
if not Opt.module_optim and not Opt.module_post:
shutil.copy(mi.get_tooloutput_file_path(Opt.module_pre[-1]), cpacs_path_out)
# Run Optimisation workflow
if Opt.module_optim:
if Opt.module_pre:
wkf.copy_module_to_module(Opt.module_pre[-1], 'out', 'Optimisation', 'in')
else:
wkf.copy_module_to_module('WorkflowCreator', 'in', 'Optimisation', 'in')
if Opt.optim_method != 'None':
routine_launcher(Opt)
else:
log.warning('No optimization method has been selected!')
log.warning('The modules will be run as a simple workflow')
wkf.run_subworkflow(Opt.module_optim)
if not Opt.module_post:
shutil.copy(mi.get_tooloutput_file_path(Opt.module_optim[-1]), cpacs_path_out)
# Run Post-optimisation workflow
if Opt.module_post:
if Opt.module_optim:
wkf.copy_module_to_module(Opt.module_optim[-1], 'out', Opt.module_post[0], 'in')
elif Opt.module_pre:
wkf.copy_module_to_module(Opt.module_pre[-1], 'out', Opt.module_post[0], 'in')
else:
wkf.copy_module_to_module('WorkflowCreator', 'in', Opt.module_post[0], 'in')
# wkf.copy_module_to_module('CPACSUpdater','out',Opt.module_post[0],'in') usefuel?
wkf.run_subworkflow(Opt.module_post)
shutil.copy(mi.get_tooloutput_file_path(Opt.module_post[-1]), cpacs_path_out)
log.info('----- End of ' + os.path.basename(__file__) + ' -----')
``` |
{
"source": "JPhlpL/ALQ-Computer-Laboratory-Monitoring-and-Controlling-Electrical-Appliances-Simulation-using-Raspberry",
"score": 3
} |
#### File: JPhlpL/ALQ-Computer-Laboratory-Monitoring-and-Controlling-Electrical-Appliances-Simulation-using-Raspberry/ledoff3.py
```python
import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(27, GPIO.OUT)
def trigger ():
GPIO.output(27, GPIO.HIGH)
try:
trigger()
except KeyboardInterrupt:
print ("Quit")
GPIO.cleanup()
``` |
{
"source": "jphoulihan/pizzapy",
"score": 3
} |
#### File: jphoulihan/pizzapy/nearest_neighbour.py
```python
def fastest_route_by_order_num(routes, size):
fast_rut = [0]
fast_rut_obj = []
for row in range(size):
for col in range(size):
if routes[row][col].time_taken != 0 and not fast_rut.__contains__(routes[row][col].col): #ignores first case where origin and dest are same and inserts if col not in fast list
fast_rut.append(routes[row][col].col)
fast_rut_obj.append(routes[row][col])
row = routes[row][col].col
col = 0 #traverse row again
return fast_rut, fast_rut_obj
```
#### File: jphoulihan/pizzapy/route.py
```python
class Route:
def __init__(self, row, col, time_taken):
self.row = row
self.col = col
self.time_taken = time_taken
```
#### File: jphoulihan/pizzapy/total_time.py
```python
import datetime
def total_time(fast_rut_obj):
total_secs = sum(x.time_taken for x in fast_rut_obj[1:])
print('\n\n')
print(total_secs)
conversion = datetime.timedelta(seconds=total_secs)
converted_time = str(conversion)
print('\n\n__Total Delivery Time__\n', converted_time)
``` |
{
"source": "jphppd/pre-commit-hooks",
"score": 3
} |
#### File: hooks/python/yaml_check_syntax.py
```python
import argparse
import sys
from typing import Any
from typing import Generator
from typing import NamedTuple
from typing import Optional
from typing import Sequence
import ruamel.yaml
YAML = ruamel.yaml.YAML(typ='safe')
def _exhaust(gen: Generator[str, None, None]) -> None:
for _ in gen:
pass
def _parse_unsafe(*args: Any, **kwargs: Any) -> None:
_exhaust(YAML.parse(*args, **kwargs))
def _load_all(*args: Any, **kwargs: Any) -> None:
_exhaust(YAML.load_all(*args, **kwargs))
class Key(NamedTuple):
multi: bool
unsafe: bool
LOAD_FNS = {
Key(multi=False, unsafe=False): YAML.load,
Key(multi=False, unsafe=True): _parse_unsafe,
Key(multi=True, unsafe=False): _load_all,
Key(multi=True, unsafe=True): _parse_unsafe,
}
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument(
'-m',
'--multi',
'--allow-multiple-documents',
action='store_true',
)
parser.add_argument(
'--unsafe',
action='store_true',
help=(
'Instead of loading the files, simply parse them for syntax. '
'A syntax-only check enables extensions and unsafe contstructs '
'which would otherwise be forbidden. Using this option removes '
'all guarantees of portability to other yaml implementations. '
'Implies --allow-multiple-documents'
),
)
parser.add_argument('filenames', nargs='*', help='Filenames to check.')
args = parser.parse_args(argv)
load_fn = LOAD_FNS[Key(multi=args.multi, unsafe=args.unsafe)]
retval = 0
for filename in args.filenames:
try:
with open(filename, encoding='UTF-8') as file_handler:
load_fn(file_handler)
except ruamel.yaml.YAMLError as exc:
print(exc)
retval = 1
return retval
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "jphtmt/mongo_query",
"score": 3
} |
#### File: jphtmt/mongo_query/mongodb_distinct.py
```python
import sys
from pymongo import MongoClient
from mongodb import connect_info
def data_distinct():
while 1:
ip, port = connect_info()
while 1:
client = MongoClient(host=ip, port=port)
for i in range(len(client.database_names())):
print "%i.%s " % (i + 1, client.database_names()[i])
num = int(raw_input("选择一个database,返回上一步输入 0,退出输入 999:"))
if num != 0 and num != 999:
db = client[client.database_names()[num - 1]]
while 1:
print "Document:"
for i in range(len(db.collection_names())):
print "%i.%s " % (i + 1, db.collection_names()[i])
num = int(raw_input("选择一个DOCUMENT,返回上一步输入 0:"))
if num != 0:
while 1:
collection = db[db.collection_names()[num - 1]]
print "1. IMEI " \
"2. IMSI " \
"3. cellularIP " \
"4. cpuABI " \
"5. sdkVersion " \
"6. wifiMacAddress " \
"7. IDFA " \
"8. displayRom "
choose_num = int(raw_input("输入要筛选的字段,返回上一步输入 0,退出输入 999:"))
if choose_num != 0 and choose_num != 999:
while 1:
if choose_num == 1:
distinct_info = collection.distinct('IMEI')
elif choose_num == 2:
distinct_info = collection.distinct('IMSI')
elif choose_num == 3:
distinct_info == collection.distinct('cellularIP')
elif choose_num == 4:
distinct_info == collection.distinct('cpuABI')
elif choose_num == 5:
distinct_info == collection.distinct('sdkVersion')
elif choose_num == 6:
distinct_info == collection.distinct('wifiMacAddress')
elif choose_num == 7:
distinct_info == collection.distinct('IDFA')
elif choose_num == 8:
distinct_info == collection.distinct('displayRom')
elif distinct_info is not None:
print distinct_info
break
continue
elif choose_num == '0':
break
elif choose_num == '999':
sys.exit(0)
else:
break
elif num == 0:
break
elif num == 999:
sys.exit(0)
if __name__ == '__main__':
data_distinct()
``` |
{
"source": "jphuart/swatplus-automatic-workflow",
"score": 2
} |
#### File: editor_api/actions/create_databases.py
```python
from helpers.executable_api import ExecutableApi, Unbuffered
from database.datasets.setup import SetupDatasetsDatabase
from database.output.setup import SetupOutputDatabase
from database.project.setup import SetupProjectDatabase
from database.project.config import Project_config
from database import soils
import sys
import argparse
import os.path
class CreateDatasetsDb(ExecutableApi):
def __init__(self, db_file):
self.__abort = False
SetupDatasetsDatabase.init(db_file)
def create(self, version):
SetupDatasetsDatabase.create_tables()
SetupDatasetsDatabase.initialize_data(version)
class CreateOutputDb(ExecutableApi):
def __init__(self, db_file):
self.__abort = False
SetupOutputDatabase.init(db_file)
def create(self):
SetupOutputDatabase.create_tables()
class CreateProjectDb(ExecutableApi):
def __init__(self, db_file, datasets_db_file, project_name, editor_version):
self.__abort = False
self.project_db = db_file
self.reference_db = datasets_db_file
self.project_name = project_name
self.editor_version = editor_version
SetupProjectDatabase.init(db_file, datasets_db_file)
def create(self):
SetupProjectDatabase.create_tables()
SetupProjectDatabase.initialize_data("demo")
base_path = os.path.dirname(self.project_db)
rel_project_db = os.path.relpath(self.project_db, base_path)
rel_reference_db = os.path.relpath(self.reference_db, base_path)
print("\n\t> project_db\t\t: {}".format(self.project_db))
print("\t> base_path : {}".format(base_path))
print("\t> rel_project_db : {}".format(rel_project_db))
print("\t> rel_reference_db : {}\n".format(rel_reference_db))
Project_config.get_or_create_default(
editor_version=self.editor_version,
project_name=self.project_name,
#project_db=rel_project_db,
reference_db=rel_reference_db
)
if __name__ == '__main__':
sys.stdout = Unbuffered(sys.stdout)
parser = argparse.ArgumentParser(description="Create the SWAT+ datasets database")
parser.add_argument("db_type", type=str, help="which database: datasets, output, project")
parser.add_argument("db_file", type=str, help="full path of SQLite database file")
parser.add_argument("db_file2", type=str, help="full path of SQLite database file", nargs="?")
parser.add_argument("project_name", type=str, help="project name", nargs="?")
parser.add_argument("editor_version", type=str, help="editor version", nargs="?")
args = parser.parse_args()
if args.db_type == "datasets":
api = CreateDatasetsDb(args.db_file)
api.create(args.editor_version)
elif args.db_type == "output":
api = CreateOutputDb(args.db_file)
api.create()
elif args.db_type == "project":
project_name = "demo" if args.project_name is None else args.project_name
editor_version = "api" if args.editor_version is None else args.editor_version
api = CreateProjectDb(args.db_file, args.db_file2, project_name, editor_version)
api.create()
elif args.db_type == "ssurgo_soils":
soils.db.init(args.db_file)
api = soils.ImportSoils()
api.ssurgo(args.db_file2)
```
#### File: editor_api/actions/write_files.py
```python
from helpers.executable_api import ExecutableApi, Unbuffered
from database import lib as db_lib
from database.project import base as project_base
from database.project.setup import SetupProjectDatabase
from database.project.config import Project_config
from database.project.config import File_cio as project_file_cio, File_cio_classification
from database.project.climate import Weather_file
from fileio import connect, exco, dr, recall, climate, channel, aquifer, hydrology, reservoir, hru, lum, soils, init, routing_unit, regions, simulation, hru_parm_db, config, ops, structural, decision_table, basin, change
from helpers import utils
import sys
import argparse
import os.path
from datetime import datetime
from shutil import copyfile
NULL_FILE = "null"
class WriteFiles(ExecutableApi):
def __init__(self, project_db_file):
self.__abort = False
SetupProjectDatabase.init(project_db_file)
self.project_db = project_base.db
try:
config = Project_config.get()
input_files_dir = utils.full_path(project_db_file, config.input_files_dir).replace("\\","/")
if not os.path.exists(input_files_dir):
sys.exit('The input files directory {dir} does not exist. Please select a valid path and try again.'.format(dir=input_files_dir))
weather_data_dir = None
if config.weather_data_dir is not None:
weather_data_dir = utils.full_path(project_db_file, config.weather_data_dir).replace("\\","/")
if not os.path.exists(weather_data_dir):
sys.exit('Weather data directory {dir} does not exist.'.format(dir=weather_data_dir))
self.__dir = input_files_dir
self.__weather_dir = weather_data_dir
self.__version = config.editor_version
self.__current_progress = 0
self.__is_lte = config.is_lte
except Project_config.DoesNotExist:
sys.exit('Could not retrieve project configuration from database')
def write(self):
try:
step = 3
small_step = 1
big_step = 5
bigger_step = 10
total = 0
self.write_simulation(total, step)
total += step
self.write_climate(total, bigger_step)
total += bigger_step
self.copy_weather_files(total, step)
total += step
self.write_connect(total, step)
total += step
self.write_channel(total, step)
total += step
self.write_reservoir(total, step)
total += step
self.write_routing_unit(total, step)
total += step
self.write_hru(total, bigger_step)
total += bigger_step
self.write_dr(total, small_step)
total += small_step
self.write_aquifer(total, small_step)
total += small_step
self.write_herd(total, small_step)
total += small_step
self.write_water_rights(total, small_step)
total += small_step
self.write_link(total, small_step)
total += small_step
self.write_basin(total, small_step)
total += small_step
self.write_hydrology(total, step)
total += step
self.write_exco(total, step)
total += step
self.write_recall(total, step)
total += step
self.write_structural(total, step)
total += step
self.write_parm_db(total, step)
total += step
self.write_ops(total, step)
total += step
self.write_lum(total, step)
total += step
self.write_chg(total, step)
total += step
self.write_init(total, step)
total += step
self.write_soils(total, bigger_step)
total += bigger_step
self.write_decision_table(total, step)
total += step
self.write_regions(total, step)
total += step
self.update_file_status(total, "file.cio")
config.File_cio(os.path.join(self.__dir, "file.cio"), self.__version).write()
Project_config.update(input_files_last_written=datetime.now(), swat_last_run=None, output_last_imported=None).execute()
except ValueError as err:
sys.exit(err)
def get_file_names(self, section, num_required):
file_names = []
try:
c = File_cio_classification.get(File_cio_classification.name == section)
m = project_file_cio.select().where(project_file_cio.classification == c).order_by(project_file_cio.order_in_class)
file_names = [v.file_name for v in m]
except File_cio_classification.DoesNotExist:
pass
except project_file_cio.DoesNotExist:
pass
if len(file_names) < num_required:
raise ValueError(
"{section} file names not available in the project database nor the SWAT+ datasets database.".format(
section=section))
return file_names
def copy_weather_files(self, start_prog, allocated_prog):
if self.__weather_dir is not None and self.__dir != self.__weather_dir:
self.copy_weather_file("hmd.cli", start_prog)
self.copy_weather_file("pcp.cli", start_prog)
self.copy_weather_file("slr.cli", start_prog)
self.copy_weather_file("tmp.cli", start_prog)
self.copy_weather_file("wnd.cli", start_prog)
query = Weather_file.select()
num_files = query.count()
if num_files > 0:
prog_step = round((allocated_prog) / num_files)
prog = start_prog
for wf in query:
self.copy_weather_file(wf.filename, prog)
prog += prog_step
def copy_weather_file(self, file_name, prog):
try:
self.emit_progress(prog, "Copying weather file {}...".format(file_name))
copyfile(os.path.join(self.__weather_dir, file_name), os.path.join(self.__dir, file_name))
except IOError as err:
print("\n\t ! {0} was not copied\n\t was {1}.txt in the data?".format(
os.path.basename(file_name), os.path.basename(file_name).split(".")[0]))
# print(err)
def write_simulation(self, start_prog, allocated_prog):
num_files = 4
files = self.get_file_names("simulation", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
time_sim_file = files[0].strip()
if time_sim_file != NULL_FILE:
self.update_file_status(start_prog, time_sim_file)
simulation.Time_sim(os.path.join(self.__dir, time_sim_file), self.__version).write()
prog += prog_step
print_prt_file = files[1].strip()
if print_prt_file != NULL_FILE:
self.update_file_status(start_prog, print_prt_file)
simulation.Print_prt(os.path.join(self.__dir, print_prt_file), self.__version).write()
prog += prog_step
object_prt_file = files[2].strip()
if object_prt_file != NULL_FILE:
self.update_file_status(start_prog, object_prt_file)
simulation.Object_prt(os.path.join(self.__dir, object_prt_file), self.__version).write()
prog += prog_step
object_cnt_file = files[3].strip()
if object_cnt_file != NULL_FILE:
self.update_file_status(start_prog, object_cnt_file)
simulation.Object_cnt(os.path.join(self.__dir, object_cnt_file), self.__version).write()
def write_climate(self, start_prog, allocated_prog):
num_files = 8
files = self.get_file_names("climate", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
weather_sta_file = files[0].strip()
if weather_sta_file != NULL_FILE:
self.update_file_status(prog, weather_sta_file)
climate.Weather_sta_cli(os.path.join(self.__dir, weather_sta_file), self.__version).write()
prog += prog_step
weather_wgn_file = files[1].strip()
if weather_wgn_file != NULL_FILE:
self.update_file_status(prog, weather_wgn_file)
climate.Weather_wgn_cli(os.path.join(self.__dir, weather_wgn_file), self.__version).write()
def write_connect(self, start_prog, allocated_prog):
num_files = 13
files = self.get_file_names("connect", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
hru_con_file = files[0].strip()
if hru_con_file != NULL_FILE:
self.update_file_status(prog, hru_con_file)
connect.Hru_con(os.path.join(self.__dir, hru_con_file), self.__version).write()
prog += prog_step
hru_lte_con_file = files[1].strip()
if hru_lte_con_file != NULL_FILE:
self.update_file_status(prog, hru_lte_con_file)
connect.Hru_lte_con(os.path.join(self.__dir, hru_lte_con_file), self.__version).write()
prog += prog_step
rout_unit_con_file = files[2].strip()
if rout_unit_con_file != NULL_FILE:
self.update_file_status(prog, rout_unit_con_file)
connect.Rout_unit_con(os.path.join(self.__dir, rout_unit_con_file), self.__version).write()
prog += prog_step
aquifer_con_file = files[4].strip()
if aquifer_con_file != NULL_FILE:
self.update_file_status(prog, aquifer_con_file)
connect.Aquifer_con(os.path.join(self.__dir, aquifer_con_file), self.__version).write()
prog += prog_step
channel_con_file = files[6].strip()
if channel_con_file != NULL_FILE:
self.update_file_status(prog, channel_con_file)
connect.Channel_con(os.path.join(self.__dir, channel_con_file), self.__version).write()
prog += prog_step
reservoir_con_file = files[7].strip()
if reservoir_con_file != NULL_FILE:
self.update_file_status(prog, reservoir_con_file)
connect.Reservoir_con(os.path.join(self.__dir, reservoir_con_file), self.__version).write()
prog += prog_step
recall_con_file = files[8].strip()
if recall_con_file != NULL_FILE:
self.update_file_status(prog, recall_con_file)
connect.Recall_con(os.path.join(self.__dir, recall_con_file), self.__version).write()
prog += prog_step
exco_con_file = files[9].strip()
if exco_con_file != NULL_FILE:
self.update_file_status(prog, exco_con_file)
connect.Exco_con(os.path.join(self.__dir, exco_con_file), self.__version).write()
prog += prog_step
delratio_con_file = files[10].strip()
if delratio_con_file != NULL_FILE:
self.update_file_status(prog, delratio_con_file)
connect.Delratio_con(os.path.join(self.__dir, delratio_con_file), self.__version).write()
prog += prog_step
chandeg_con_file = files[12].strip()
if chandeg_con_file != NULL_FILE:
self.update_file_status(prog, chandeg_con_file)
connect.Chandeg_con(os.path.join(self.__dir, chandeg_con_file), self.__version).write()
def write_channel(self, start_prog, allocated_prog):
num_files = 7
files = self.get_file_names("channel", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
channel_cha_file = files[1].strip()
if channel_cha_file != NULL_FILE:
self.update_file_status(start_prog, channel_cha_file)
channel.Channel_cha(os.path.join(self.__dir, channel_cha_file), self.__version).write()
prog += prog_step
initial_cha_file = files[0].strip()
if initial_cha_file != NULL_FILE:
self.update_file_status(prog, initial_cha_file)
channel.Initial_cha(os.path.join(self.__dir, initial_cha_file), self.__version).write()
prog += prog_step
hydrology_cha_file = files[2].strip()
if hydrology_cha_file != NULL_FILE:
self.update_file_status(prog, hydrology_cha_file)
channel.Hydrology_cha(os.path.join(self.__dir, hydrology_cha_file), self.__version).write()
prog += prog_step
sediment_cha_file = files[3].strip()
if sediment_cha_file != NULL_FILE:
self.update_file_status(prog, sediment_cha_file)
channel.Sediment_cha(os.path.join(self.__dir, sediment_cha_file), self.__version).write()
prog += prog_step
nutrients_cha_file = files[4].strip()
if nutrients_cha_file != NULL_FILE:
self.update_file_status(prog, nutrients_cha_file)
channel.Nutrients_cha(os.path.join(self.__dir, nutrients_cha_file), self.__version).write()
prog += prog_step
channel_lte_cha_file = files[5].strip()
if channel_lte_cha_file != NULL_FILE:
self.update_file_status(prog, channel_lte_cha_file)
channel.Channel_lte_cha(os.path.join(self.__dir, channel_lte_cha_file), self.__version).write()
prog += prog_step
hyd_sed_lte_cha_file = files[6].strip()
if hyd_sed_lte_cha_file != NULL_FILE:
self.update_file_status(prog, hyd_sed_lte_cha_file)
channel.Hyd_sed_lte_cha(os.path.join(self.__dir, hyd_sed_lte_cha_file), self.__version).write()
def write_reservoir(self, start_prog, allocated_prog):
num_files = 8
files = self.get_file_names("reservoir", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
initial_res_file = files[0].strip()
if initial_res_file != NULL_FILE:
self.update_file_status(prog, initial_res_file)
reservoir.Initial_res(os.path.join(self.__dir, initial_res_file), self.__version).write()
prog += prog_step
reservoir_res_file = files[1].strip()
if reservoir_res_file != NULL_FILE:
self.update_file_status(prog, reservoir_res_file)
reservoir.Reservoir_res(os.path.join(self.__dir, reservoir_res_file), self.__version).write()
prog += prog_step
hydrology_res_file = files[2].strip()
if hydrology_res_file != NULL_FILE:
self.update_file_status(prog, hydrology_res_file)
reservoir.Hydrology_res(os.path.join(self.__dir, hydrology_res_file), self.__version).write()
prog += prog_step
sediment_res_file = files[3].strip()
if sediment_res_file != NULL_FILE:
self.update_file_status(prog, sediment_res_file)
reservoir.Sediment_res(os.path.join(self.__dir, sediment_res_file), self.__version).write()
prog += prog_step
nutrients_res_file = files[4].strip()
if nutrients_res_file != NULL_FILE:
self.update_file_status(prog, nutrients_res_file)
reservoir.Nutrients_res(os.path.join(self.__dir, nutrients_res_file), self.__version).write()
prog += prog_step
weir_res_file = files[5].strip()
if weir_res_file != NULL_FILE:
self.update_file_status(prog, weir_res_file)
reservoir.Weir_res(os.path.join(self.__dir, weir_res_file), self.__version).write()
prog += prog_step
wetland_wet_file = files[6].strip()
if wetland_wet_file != NULL_FILE:
self.update_file_status(prog, wetland_wet_file)
reservoir.Wetland_wet(os.path.join(self.__dir, wetland_wet_file), self.__version).write()
prog += prog_step
hydrology_wet_file = files[7].strip()
if hydrology_wet_file != NULL_FILE:
self.update_file_status(prog, hydrology_wet_file)
reservoir.Hydrology_wet(os.path.join(self.__dir, hydrology_wet_file), self.__version).write()
def write_routing_unit(self, start_prog, allocated_prog):
num_files = 4
files = self.get_file_names("routing_unit", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
prog += prog_step
rout_unit_def_file = files[0].strip()
if rout_unit_def_file != NULL_FILE:
self.update_file_status(prog, rout_unit_def_file)
routing_unit.Rout_unit_def(os.path.join(self.__dir, rout_unit_def_file), self.__version).write()
prog += prog_step
rout_unit_ele_file = files[1].strip()
if rout_unit_ele_file != NULL_FILE:
self.update_file_status(prog, rout_unit_ele_file)
routing_unit.Rout_unit_ele(os.path.join(self.__dir, rout_unit_ele_file), self.__version).write()
rout_unit_ru_file = files[2].strip()
if rout_unit_ru_file != NULL_FILE:
self.update_file_status(prog, rout_unit_ru_file)
routing_unit.Rout_unit(os.path.join(self.__dir, rout_unit_ru_file), self.__version).write()
prog += prog_step
rout_unit_dr_file = files[3].strip()
if rout_unit_dr_file != NULL_FILE:
self.update_file_status(prog, rout_unit_dr_file)
routing_unit.Rout_unit_dr(os.path.join(self.__dir, rout_unit_dr_file), self.__version).write()
def write_hru(self, start_prog, allocated_prog):
num_files = 2
files = self.get_file_names("hru", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
hru_data_file = files[0].strip()
if hru_data_file != NULL_FILE:
self.update_file_status(prog, hru_data_file)
hru.Hru_data_hru(os.path.join(self.__dir, hru_data_file), self.__version).write()
prog += prog_step
hru_lte_hru_file = files[1].strip()
if hru_lte_hru_file != NULL_FILE:
self.update_file_status(prog, hru_lte_hru_file)
hru.Hru_lte_hru(os.path.join(self.__dir, hru_lte_hru_file), self.__version).write()
def write_dr(self, start_prog, allocated_prog):
num_files = 6
files = self.get_file_names("dr", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
delratio_del_file = files[0].strip()
if delratio_del_file != NULL_FILE:
self.update_file_status(prog, delratio_del_file)
dr.Delratio_del(os.path.join(self.__dir, delratio_del_file), self.__version).write()
prog += prog_step
dr_om_file = files[1].strip()
if dr_om_file != NULL_FILE:
self.update_file_status(prog, dr_om_file)
dr.Dr_om_del(os.path.join(self.__dir, dr_om_file), self.__version).write()
def write_aquifer(self, start_prog, allocated_prog):
num_files = 2
files = self.get_file_names("aquifer", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
aquifer_aqu_file = files[1].strip()
if aquifer_aqu_file != NULL_FILE:
self.update_file_status(start_prog, aquifer_aqu_file)
aquifer.Aquifer_aqu(os.path.join(self.__dir, aquifer_aqu_file), self.__version).write()
prog += prog_step
initial_aqu_file = files[0].strip()
if initial_aqu_file != NULL_FILE:
self.update_file_status(prog, initial_aqu_file)
aquifer.Initial_aqu(os.path.join(self.__dir, initial_aqu_file), self.__version).write()
def write_herd(self, start_prog, allocated_prog):
pass
def write_water_rights(self, start_prog, allocated_prog):
pass
def write_link(self, start_prog, allocated_prog):
pass
"""num_files = 2
files = self.get_file_names("link", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
chan_aqu_lin_file = files[1].strip()
if chan_aqu_lin_file != NULL_FILE:
self.update_file_status(start_prog, chan_aqu_lin_file)
aquifer.Chan_aqu_lin(os.path.join(self.__dir, chan_aqu_lin_file), self.__version).write()"""
def write_basin(self, start_prog, allocated_prog):
num_files = 2
files = self.get_file_names("basin", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
codes_bsn_file = files[0].strip()
if codes_bsn_file != NULL_FILE:
self.update_file_status(prog, codes_bsn_file)
basin.Codes_bsn(os.path.join(self.__dir, codes_bsn_file), self.__version).write()
prog += prog_step
parameters_bsn_file = files[1].strip()
if parameters_bsn_file != NULL_FILE:
self.update_file_status(prog, parameters_bsn_file)
basin.Parameters_bsn(os.path.join(self.__dir, parameters_bsn_file), self.__version).write()
def write_hydrology(self, start_prog, allocated_prog):
num_files = 3
files = self.get_file_names("hydrology", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
hydrology_hyd_file = files[0].strip()
if hydrology_hyd_file != NULL_FILE:
self.update_file_status(start_prog, hydrology_hyd_file)
hydrology.Hydrology_hyd(os.path.join(self.__dir, hydrology_hyd_file), self.__version).write()
prog += prog_step
topography_hyd_file = files[1].strip()
if topography_hyd_file != NULL_FILE:
self.update_file_status(start_prog + 5, topography_hyd_file)
hydrology.Topography_hyd(os.path.join(self.__dir, topography_hyd_file), self.__version).write()
prog += prog_step
field_fld_file = files[2].strip()
if field_fld_file != NULL_FILE:
self.update_file_status(start_prog + 5, field_fld_file)
hydrology.Field_fld(os.path.join(self.__dir, field_fld_file), self.__version).write()
def write_exco(self, start_prog, allocated_prog):
num_files = 6
files = self.get_file_names("exco", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
exco_exc_file = files[0].strip()
if exco_exc_file != NULL_FILE:
self.update_file_status(prog, exco_exc_file)
exco.Exco_exc(os.path.join(self.__dir, exco_exc_file), self.__version).write()
prog += prog_step
exco_om_file = files[1].strip()
if exco_om_file != NULL_FILE:
self.update_file_status(prog, exco_om_file)
exco.Exco_om_exc(os.path.join(self.__dir, exco_om_file), self.__version).write()
prog += prog_step
exco_pest_file = files[2].strip()
if exco_pest_file != NULL_FILE:
self.update_file_status(prog, exco_pest_file)
exco.Exco_pest_exc(os.path.join(self.__dir, exco_pest_file), self.__version).write()
prog += prog_step
exco_path_file = files[3].strip()
if exco_path_file != NULL_FILE:
self.update_file_status(prog, exco_path_file)
exco.Exco_path_exc(os.path.join(self.__dir, exco_path_file), self.__version).write()
prog += prog_step
exco_hmet_file = files[4].strip()
if exco_hmet_file != NULL_FILE:
self.update_file_status(prog, exco_hmet_file)
exco.Exco_hmet_exc(os.path.join(self.__dir, exco_hmet_file), self.__version).write()
prog += prog_step
exco_salt_file = files[5].strip()
if exco_salt_file != NULL_FILE:
self.update_file_status(prog, exco_salt_file)
exco.Exco_salt_exc(os.path.join(self.__dir, exco_salt_file), self.__version).write()
def write_recall(self, start_prog, allocated_prog):
num_files = 1
files = self.get_file_names("recall", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
recall_rec_file = files[0].strip()
if recall_rec_file != NULL_FILE:
self.update_file_status(prog, recall_rec_file)
recall.Recall_rec(os.path.join(self.__dir, recall_rec_file), self.__version).write()
def write_structural(self, start_prog, allocated_prog):
num_files = 5
files = self.get_file_names("structural", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
tiledrain_str_file = files[0].strip()
if tiledrain_str_file != NULL_FILE:
self.update_file_status(prog, tiledrain_str_file)
structural.Tiledrain_str(os.path.join(self.__dir, tiledrain_str_file), self.__version).write()
prog += prog_step
septic_str_file = files[1].strip()
if septic_str_file != NULL_FILE:
self.update_file_status(prog, septic_str_file)
structural.Septic_str(os.path.join(self.__dir, septic_str_file), self.__version).write()
prog += prog_step
filterstrip_str_file = files[2].strip()
if filterstrip_str_file != NULL_FILE:
self.update_file_status(prog, filterstrip_str_file)
structural.Filterstrip_str(os.path.join(self.__dir, filterstrip_str_file), self.__version).write()
prog += prog_step
grassedww_str_file = files[3].strip()
if grassedww_str_file != NULL_FILE:
self.update_file_status(prog, grassedww_str_file)
structural.Grassedww_str(os.path.join(self.__dir, grassedww_str_file), self.__version).write()
prog += prog_step
bmpuser_str_file = files[4].strip()
if bmpuser_str_file != NULL_FILE:
self.update_file_status(prog, bmpuser_str_file)
structural.Bmpuser_str(os.path.join(self.__dir, bmpuser_str_file), self.__version).write()
def write_parm_db(self, start_prog, allocated_prog):
num_files = 10
files = self.get_file_names("hru_parm_db", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
plants_plt_file = files[0].strip()
if plants_plt_file != NULL_FILE:
self.update_file_status(prog, plants_plt_file)
hru_parm_db.Plants_plt(os.path.join(self.__dir, plants_plt_file), self.__version).write()
prog += prog_step
fertilizer_frt_file = files[1].strip()
if fertilizer_frt_file != NULL_FILE:
self.update_file_status(prog, fertilizer_frt_file)
hru_parm_db.Fertilizer_frt(os.path.join(self.__dir, fertilizer_frt_file), self.__version).write()
prog += prog_step
tillage_til_file = files[2].strip()
if tillage_til_file != NULL_FILE:
self.update_file_status(prog, tillage_til_file)
hru_parm_db.Tillage_til(os.path.join(self.__dir, tillage_til_file), self.__version).write()
prog += prog_step
pesticide_pst_file = files[3].strip()
if pesticide_pst_file != NULL_FILE:
self.update_file_status(prog, pesticide_pst_file)
hru_parm_db.Pesticide_pst(os.path.join(self.__dir, pesticide_pst_file), self.__version).write()
prog += prog_step
pathogens_pth_file = files[4].strip()
if pathogens_pth_file != NULL_FILE:
self.update_file_status(prog, pathogens_pth_file)
hru_parm_db.Pathogens_pth(os.path.join(self.__dir, pathogens_pth_file), self.__version).write()
prog += prog_step
urban_urb_file = files[7].strip()
if urban_urb_file != NULL_FILE:
self.update_file_status(prog, urban_urb_file)
hru_parm_db.Urban_urb(os.path.join(self.__dir, urban_urb_file), self.__version).write()
prog += prog_step
septic_sep_file = files[8].strip()
if septic_sep_file != NULL_FILE:
self.update_file_status(prog, septic_sep_file)
hru_parm_db.Septic_sep(os.path.join(self.__dir, septic_sep_file), self.__version).write()
prog += prog_step
snow_sno_file = files[9].strip()
if snow_sno_file != NULL_FILE:
self.update_file_status(prog, snow_sno_file)
hru_parm_db.Snow_sno(os.path.join(self.__dir, snow_sno_file), self.__version).write()
def write_ops(self, start_prog, allocated_prog):
num_files = 6
files = self.get_file_names("ops", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
harv_ops_file = files[0].strip()
if harv_ops_file != NULL_FILE:
self.update_file_status(prog, harv_ops_file)
ops.Harv_ops(os.path.join(self.__dir, harv_ops_file), self.__version).write()
prog += prog_step
graze_ops_file = files[1].strip()
if graze_ops_file != NULL_FILE:
self.update_file_status(prog, graze_ops_file)
ops.Graze_ops(os.path.join(self.__dir, graze_ops_file), self.__version).write()
prog += prog_step
irr_ops_file = files[2].strip()
if irr_ops_file != NULL_FILE:
self.update_file_status(prog, irr_ops_file)
ops.Irr_ops(os.path.join(self.__dir, irr_ops_file), self.__version).write()
prog += prog_step
chem_app_ops_file = files[3].strip()
if chem_app_ops_file != NULL_FILE:
self.update_file_status(prog, chem_app_ops_file)
ops.Chem_app_ops(os.path.join(self.__dir, chem_app_ops_file), self.__version).write()
prog += prog_step
fire_ops_file = files[4].strip()
if fire_ops_file != NULL_FILE:
self.update_file_status(prog, fire_ops_file)
ops.Fire_ops(os.path.join(self.__dir, fire_ops_file), self.__version).write()
prog += prog_step
sweep_ops_file = files[5].strip()
if sweep_ops_file != NULL_FILE:
self.update_file_status(prog, sweep_ops_file)
ops.Sweep_ops(os.path.join(self.__dir, sweep_ops_file), self.__version).write()
def write_lum(self, start_prog, allocated_prog):
num_files = 5
files = self.get_file_names("lum", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
landuse_lum_file = files[0].strip()
if landuse_lum_file != NULL_FILE:
self.update_file_status(prog, landuse_lum_file)
lum.Landuse_lum(os.path.join(self.__dir, landuse_lum_file), self.__version).write()
prog += prog_step
management_sch_file = files[1].strip()
if management_sch_file != NULL_FILE:
self.update_file_status(prog, management_sch_file)
lum.Management_sch(os.path.join(self.__dir, management_sch_file), self.__version).write()
prog += prog_step
cntable_lum_file = files[2].strip()
if cntable_lum_file != NULL_FILE:
self.update_file_status(prog, cntable_lum_file)
lum.Cntable_lum(os.path.join(self.__dir, cntable_lum_file), self.__version).write()
prog += prog_step
cons_prac_lum_file = files[3].strip()
if cons_prac_lum_file != NULL_FILE:
self.update_file_status(prog, cons_prac_lum_file)
lum.Cons_prac_lum(os.path.join(self.__dir, cons_prac_lum_file), self.__version).write()
prog += prog_step
ovn_table_lum_file = files[4].strip()
if ovn_table_lum_file != NULL_FILE:
self.update_file_status(prog, ovn_table_lum_file)
lum.Ovn_table_lum(os.path.join(self.__dir, ovn_table_lum_file), self.__version).write()
def write_chg(self, start_prog, allocated_prog):
num_files = 9
files = self.get_file_names("chg", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
cal_parms_cal_file = files[0].strip()
if cal_parms_cal_file != NULL_FILE:
self.update_file_status(prog, cal_parms_cal_file)
change.Cal_parms_cal(os.path.join(self.__dir, cal_parms_cal_file), self.__version).write()
prog += prog_step
calibration_cal_file = files[1].strip()
if calibration_cal_file != NULL_FILE:
self.update_file_status(prog, calibration_cal_file)
change.Calibration_cal(os.path.join(self.__dir, calibration_cal_file), self.__version).write()
prog += prog_step
codes_sft_file = files[2].strip()
if codes_sft_file != NULL_FILE:
self.update_file_status(prog, codes_sft_file)
change.Codes_sft(os.path.join(self.__dir, codes_sft_file), self.__version).write()
prog += prog_step
wb_parms_sft_file = files[3].strip()
if wb_parms_sft_file != NULL_FILE:
self.update_file_status(prog, wb_parms_sft_file)
change.Wb_parms_sft(os.path.join(self.__dir, wb_parms_sft_file), self.__version).write()
prog += prog_step
water_balance_sft_file = files[4].strip()
if water_balance_sft_file != NULL_FILE:
self.update_file_status(prog, water_balance_sft_file)
change.Water_balance_sft(os.path.join(self.__dir, water_balance_sft_file), self.__version).write()
prog += prog_step
ch_sed_budget_sft_file = files[5].strip()
if ch_sed_budget_sft_file != NULL_FILE:
self.update_file_status(prog, ch_sed_budget_sft_file)
change.Ch_sed_budget_sft(os.path.join(self.__dir, ch_sed_budget_sft_file), self.__version).write()
prog += prog_step
chsed_parms_sft_file = files[6].strip()
if chsed_parms_sft_file != NULL_FILE:
self.update_file_status(prog, chsed_parms_sft_file)
change.Ch_sed_parms_sft(os.path.join(self.__dir, chsed_parms_sft_file), self.__version).write()
prog += prog_step
plant_parms_sft_file = files[7].strip()
if plant_parms_sft_file != NULL_FILE:
self.update_file_status(prog, plant_parms_sft_file)
change.Plant_parms_sft(os.path.join(self.__dir, plant_parms_sft_file), self.__version).write()
prog += prog_step
plant_gro_sft_file = files[8].strip()
if plant_gro_sft_file != NULL_FILE:
self.update_file_status(prog, plant_gro_sft_file)
change.Plant_gro_sft(os.path.join(self.__dir, plant_gro_sft_file), self.__version).write()
def write_init(self, start_prog, allocated_prog):
num_files = 2
files = self.get_file_names("init", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
initial_plt_file = files[0].strip()
if initial_plt_file != NULL_FILE:
self.update_file_status(start_prog, initial_plt_file)
init.Plant_ini(os.path.join(self.__dir, initial_plt_file), self.__version).write()
prog += prog_step
soil_plant_ini_file = files[1].strip()
if soil_plant_ini_file != NULL_FILE:
self.update_file_status(prog, soil_plant_ini_file)
init.Soil_plant_ini(os.path.join(self.__dir, soil_plant_ini_file), self.__version).write()
prog += prog_step
om_water_ini_file = files[2].strip()
if om_water_ini_file != NULL_FILE:
self.update_file_status(prog, om_water_ini_file)
init.Om_water_ini(os.path.join(self.__dir, om_water_ini_file), self.__version).write()
prog += prog_step
pest_hru_ini_file = files[3].strip()
if pest_hru_ini_file != NULL_FILE:
self.update_file_status(prog, pest_hru_ini_file)
init.Pest_hru_ini(os.path.join(self.__dir, pest_hru_ini_file), self.__version).write()
prog += prog_step
pest_water_ini_file = files[4].strip()
if pest_water_ini_file != NULL_FILE:
self.update_file_status(prog, pest_water_ini_file)
init.Pest_water_ini(os.path.join(self.__dir, pest_water_ini_file), self.__version).write()
prog += prog_step
path_hru_ini_file = files[5].strip()
if path_hru_ini_file != NULL_FILE:
self.update_file_status(prog, path_hru_ini_file)
init.Path_hru_ini(os.path.join(self.__dir, path_hru_ini_file), self.__version).write()
prog += prog_step
path_water_ini_file = files[6].strip()
if path_water_ini_file != NULL_FILE:
self.update_file_status(prog, path_water_ini_file)
init.Path_water_ini(os.path.join(self.__dir, path_water_ini_file), self.__version).write()
def write_soils(self, start_prog, allocated_prog):
num_files = 3
files = self.get_file_names("soils", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
nutrients_sol_file = files[1].strip()
if nutrients_sol_file != NULL_FILE:
self.update_file_status(prog, nutrients_sol_file)
soils.Nutrients_sol(os.path.join(self.__dir, nutrients_sol_file), self.__version).write()
prog += prog_step
soils_sol_file = files[0].strip()
if soils_sol_file != NULL_FILE:
self.update_file_status(prog, soils_sol_file)
soils.Soils_sol(os.path.join(self.__dir, soils_sol_file), self.__version).write()
if self.__is_lte:
prog += prog_step
soils_lte_sol_file = files[2].strip()
if soils_lte_sol_file != NULL_FILE:
self.update_file_status(prog, soils_lte_sol_file)
soils.Soils_lte_sol(os.path.join(self.__dir, soils_lte_sol_file), self.__version).write()
def write_decision_table(self, start_prog, allocated_prog):
num_files = 4
files = self.get_file_names("decision_table", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
lum_dtl_file = files[0].strip()
if lum_dtl_file != NULL_FILE:
self.update_file_status(start_prog, lum_dtl_file)
decision_table.D_table_dtl(os.path.join(self.__dir, lum_dtl_file), self.__version).write()
prog += prog_step
res_rel_dtl_file = files[1].strip()
if res_rel_dtl_file != NULL_FILE:
self.update_file_status(prog, res_rel_dtl_file)
decision_table.D_table_dtl(os.path.join(self.__dir, res_rel_dtl_file), self.__version).write()
prog += prog_step
scen_lu_dtl_file = files[2].strip()
if scen_lu_dtl_file != NULL_FILE:
self.update_file_status(prog, scen_lu_dtl_file)
decision_table.D_table_dtl(os.path.join(self.__dir, scen_lu_dtl_file), self.__version).write()
prog += prog_step
flo_con_dtl_file = files[3].strip()
if flo_con_dtl_file != NULL_FILE:
self.update_file_status(prog, flo_con_dtl_file)
decision_table.D_table_dtl(os.path.join(self.__dir, flo_con_dtl_file), self.__version).write()
def write_regions(self, start_prog, allocated_prog):
num_files = 17
files = self.get_file_names("regions", num_files)
prog_step = round(allocated_prog / num_files)
prog = start_prog
ls_unit_ele_file = files[0].strip()
if ls_unit_ele_file != NULL_FILE:
self.update_file_status(start_prog, ls_unit_ele_file)
regions.Ls_unit_ele(os.path.join(self.__dir, ls_unit_ele_file), self.__version).write()
prog += prog_step
ls_unit_def_file = files[1].strip()
if ls_unit_def_file != NULL_FILE:
self.update_file_status(start_prog, ls_unit_def_file)
regions.Ls_unit_def(os.path.join(self.__dir, ls_unit_def_file), self.__version).write()
prog += prog_step
aqu_catunit_ele_file = files[8].strip()
if aqu_catunit_ele_file != NULL_FILE:
self.update_file_status(start_prog, aqu_catunit_ele_file)
regions.Aqu_catunit_ele(os.path.join(self.__dir, aqu_catunit_ele_file), self.__version).write()
def update_file_status(self, prog, file_name):
self.emit_progress(prog, "Writing {name}...".format(name=file_name))
if __name__ == '__main__':
sys.stdout = Unbuffered(sys.stdout)
parser = argparse.ArgumentParser(description="Write SWAT+ text files from database.")
parser.add_argument("project_db_file", type=str, help="full path of project SQLite database file")
args = parser.parse_args()
api = WriteFiles(args.project_db_file)
api.write()
```
#### File: database/datasets/setup.py
```python
from peewee import *
from . import base, definitions, hru_parm_db, lum, ops, structural, decision_table, basin, climate, soils, init, change
from fileio import hru_parm_db as files_parmdb
from fileio import lum as files_lum
from fileio import ops as files_ops
from fileio import structural as files_str
from fileio import decision_table as files_dtable
from fileio import basin as files_basin
from fileio import change as files_change
from fileio import soils as files_soils
from database import lib as db_lib
source_data_path = "../data/source-data/"
def val_exists(val):
return val is not None and val != ''
class SetupDatasetsDatabase():
@staticmethod
def init(datasets_db: str = None):
base.db.init(datasets_db, pragmas={'journal_mode': 'off'})
@staticmethod
def create_tables():
base.db.create_tables([definitions.Tropical_bounds, definitions.Version, definitions.File_cio_classification, definitions.File_cio, definitions.Print_prt, definitions.Print_prt_object])
base.db.create_tables([hru_parm_db.Plants_plt, hru_parm_db.Fertilizer_frt, hru_parm_db.Tillage_til, hru_parm_db.Pesticide_pst, hru_parm_db.Pathogens_pth, hru_parm_db.Urban_urb, hru_parm_db.Septic_sep, hru_parm_db.Snow_sno, soils.Soil, soils.Soil_layer, soils.Soils_lte_sol, climate.Wgn, climate.Wgn_mon])
base.db.create_tables([basin.Codes_bsn, basin.Parameters_bsn])
base.db.create_tables([decision_table.D_table_dtl, decision_table.D_table_dtl_cond, decision_table.D_table_dtl_cond_alt, decision_table.D_table_dtl_act, decision_table.D_table_dtl_act_out])
base.db.create_tables([ops.Graze_ops, ops.Harv_ops, ops.Fire_ops, ops.Irr_ops, ops.Sweep_ops, ops.Chem_app_ops])
base.db.create_tables([structural.Bmpuser_str, structural.Filterstrip_str, structural.Grassedww_str, structural.Septic_str, structural.Tiledrain_str])
base.db.create_tables([lum.Cntable_lum, lum.Ovn_table_lum, lum.Cons_prac_lum, lum.Management_sch, lum.Management_sch_auto, lum.Management_sch_op, lum.Landuse_lum])
base.db.create_tables([init.Plant_ini, init.Plant_ini_item])
base.db.create_tables([change.Cal_parms_cal])
@staticmethod
def check_version(datasets_db, editor_version, compatibility_versions=['1.1.0', '1.1.1', '1.1.2', '1.2.0']):
conn = db_lib.open_db(datasets_db)
if db_lib.exists_table(conn, 'version'):
SetupDatasetsDatabase.init(datasets_db)
m = definitions.Version.get()
if not (m.value in compatibility_versions or m.value == editor_version):
return 'Please update your swatplus_datasets.sqlite to the most recent version: {new_version}. Your version is {current_version}.'.format(new_version=editor_version, current_version=m.value)
else:
return 'Please update your swatplus_datasets.sqlite to the most recent version, {new_version}, before creating your project.'.format(new_version=editor_version)
return None
@staticmethod
def initialize_data(version: str = None):
codes = [
{'table': 'connect', 'variable': 'obj_typ', 'code': 'hru', 'description': 'hru'},
{'table': 'connect', 'variable': 'obj_typ', 'code': 'hlt', 'description': 'hru_lte'},
{'table': 'connect', 'variable': 'obj_typ', 'code': 'ru', 'description': 'routing unit'},
{'table': 'connect', 'variable': 'obj_typ', 'code': 'mfl', 'description': 'modflow'},
{'table': 'connect', 'variable': 'obj_typ', 'code': 'aqu', 'description': 'aquifer'},
{'table': 'connect', 'variable': 'obj_typ', 'code': 'cha', 'description': 'channel'},
{'table': 'connect', 'variable': 'obj_typ', 'code': 'res', 'description': 'reservoir'},
{'table': 'connect', 'variable': 'obj_typ', 'code': 'rec', 'description': 'recall'},
{'table': 'connect', 'variable': 'obj_typ', 'code': 'exc', 'description': 'export coefficients'},
{'table': 'connect', 'variable': 'obj_typ', 'code': 'dr', 'description': 'delivery ratio'},
{'table': 'connect', 'variable': 'obj_typ', 'code': 'out', 'description': 'outlet'},
{'table': 'connect', 'variable': 'obj_typ', 'code': 'sdc', 'description': 'swat-deg channel'},
{'table': 'connect', 'variable': 'hyd_typ', 'code': 'tot', 'description': 'total flow'},
{'table': 'connect', 'variable': 'hyd_typ', 'code': 'rhg', 'description': 'recharge'},
{'table': 'connect', 'variable': 'hyd_typ', 'code': 'sur', 'description': 'surface'},
{'table': 'connect', 'variable': 'hyd_typ', 'code': 'lat', 'description': 'lateral'},
{'table': 'connect', 'variable': 'hyd_typ', 'code': 'til', 'description': 'tile'},
{'table': 'management_sch', 'variable': 'op_typ', 'code': 'plnt', 'description': 'plant'},
{'table': 'management_sch', 'variable': 'op_typ', 'code': 'harv', 'description': 'harvest only'},
{'table': 'management_sch', 'variable': 'op_typ', 'code': 'kill', 'description': 'kill'},
{'table': 'management_sch', 'variable': 'op_typ', 'code': 'hvkl', 'description': 'harvest and kill'},
{'table': 'management_sch', 'variable': 'op_typ', 'code': 'till', 'description': 'tillage'},
{'table': 'management_sch', 'variable': 'op_typ', 'code': 'irrm', 'description': 'irrigation'},
{'table': 'management_sch', 'variable': 'op_typ', 'code': 'fert', 'description': 'fertilizer'},
{'table': 'management_sch', 'variable': 'op_typ', 'code': 'pest', 'description': 'pesticide application'},
{'table': 'management_sch', 'variable': 'op_typ', 'code': 'graz', 'description': 'grazing'},
{'table': 'management_sch', 'variable': 'op_typ', 'code': 'burn', 'description': 'burn'},
{'table': 'management_sch', 'variable': 'op_typ', 'code': 'swep', 'description': 'street sweep'},
{'table': 'management_sch', 'variable': 'op_typ', 'code': 'prtp', 'description': 'print plant vars'},
{'table': 'management_sch', 'variable': 'op_typ', 'code': 'skip', 'description': 'skip to end of the year'}
]
with base.db.atomic():
if version is not None:
definitions.Version.create(value=version)
definitions.Tropical_bounds.insert(north=18, south=-18).execute()
"""if definitions.Var_code.select().count() < 1:
definitions.Var_code.insert_many(codes).execute()"""
if basin.Codes_bsn.select().count() < 1:
files_basin.Codes_bsn(source_data_path + 'codes.bsn').read('datasets')
if basin.Parameters_bsn.select().count() < 1:
files_basin.Parameters_bsn(source_data_path + 'parameters.bsn').read('datasets')
if change.Cal_parms_cal.select().count() < 1:
files_change.Cal_parms_cal(source_data_path + 'cal_parms.cal').read('datasets')
if hru_parm_db.Plants_plt.select().count() < 1:
files_parmdb.Plants_plt(source_data_path + 'plants.plt').read('datasets')
if hru_parm_db.Fertilizer_frt.select().count() < 1:
files_parmdb.Fertilizer_frt(source_data_path + 'fertilizer.frt').read('datasets')
if hru_parm_db.Tillage_til.select().count() < 1:
files_parmdb.Tillage_til(source_data_path + 'tillage.til').read('datasets')
if hru_parm_db.Pesticide_pst.select().count() < 1:
files_parmdb.Pesticide_pst(source_data_path + 'pesticide.pst').read('datasets')
if hru_parm_db.Urban_urb.select().count() < 1:
files_parmdb.Urban_urb(source_data_path + 'urban.urb').read('datasets')
if hru_parm_db.Septic_sep.select().count() < 1:
files_parmdb.Septic_sep(source_data_path + 'septic.sep').read('datasets')
if hru_parm_db.Snow_sno.select().count() < 1:
files_parmdb.Snow_sno(source_data_path + 'snow.sno').read('datasets')
if lum.Cntable_lum.select().count() < 1:
files_lum.Cntable_lum(source_data_path + 'cntable.lum').read('datasets')
if lum.Ovn_table_lum.select().count() < 1:
files_lum.Ovn_table_lum(source_data_path + 'ovn_table.lum').read('datasets')
if lum.Cons_prac_lum.select().count() < 1:
files_lum.Cons_prac_lum(source_data_path + 'cons_practice.lum').read('datasets')
if ops.Graze_ops.select().count() < 1:
files_ops.Graze_ops(source_data_path + 'graze.ops').read('datasets')
if ops.Harv_ops.select().count() < 1:
files_ops.Harv_ops(source_data_path + 'harv.ops').read('datasets')
if ops.Fire_ops.select().count() < 1:
files_ops.Fire_ops(source_data_path + 'fire.ops').read('datasets')
if ops.Irr_ops.select().count() < 1:
files_ops.Irr_ops(source_data_path + 'irr.ops').read('datasets')
if ops.Sweep_ops.select().count() < 1:
files_ops.Sweep_ops(source_data_path + 'sweep.ops').read('datasets')
if ops.Chem_app_ops.select().count() < 1:
files_ops.Chem_app_ops(source_data_path + 'chem_app.ops').read('datasets')
if structural.Bmpuser_str.select().count() < 1:
files_str.Bmpuser_str(source_data_path + 'bmpuser.str').read('datasets')
if structural.Filterstrip_str.select().count() < 1:
files_str.Filterstrip_str(source_data_path + 'filterstrip.str').read('datasets')
if structural.Grassedww_str.select().count() < 1:
files_str.Grassedww_str(source_data_path + 'grassedww.str').read('datasets')
if structural.Septic_str.select().count() < 1:
files_str.Septic_str(source_data_path + 'septic.str').read('datasets')
if structural.Tiledrain_str.select().count() < 1:
files_str.Tiledrain_str(source_data_path + 'tiledrain.str').read('datasets')
if decision_table.D_table_dtl.select().count() < 1:
files_dtable.D_table_dtl(source_data_path + 'lum.dtl').read('datasets')
files_dtable.D_table_dtl(source_data_path + 'res_rel.dtl').read('datasets')
files_dtable.D_table_dtl(source_data_path + 'scen_lu.dtl').read('datasets')
files_dtable.D_table_dtl(source_data_path + 'flo_con.dtl').read('datasets')
if soils.Soils_lte_sol.select().count() < 1:
files_soils.Soils_lte_sol(source_data_path + 'soils_lte.sol').read('datasets')
"""if lum.Management_sch.select().count() < 1:
files_lum.Management_sch(source_data_path + 'management.sch').read('datasets')"""
classifications = [
{'id': 1, 'name': 'simulation'},
{'id': 2, 'name': 'basin'},
{'id': 3, 'name': 'climate'},
{'id': 4, 'name': 'connect'},
{'id': 5, 'name': 'channel'},
{'id': 6, 'name': 'reservoir'},
{'id': 7, 'name': 'routing_unit'},
{'id': 8, 'name': 'hru'},
{'id': 9, 'name': 'exco'},
{'id': 10, 'name': 'recall'},
{'id': 11, 'name': 'dr'},
{'id': 12, 'name': 'aquifer'},
{'id': 13, 'name': 'herd'},
{'id': 14, 'name': 'water_rights'},
{'id': 15, 'name': 'link'},
{'id': 16, 'name': 'hydrology'},
{'id': 17, 'name': 'structural'},
{'id': 18, 'name': 'hru_parm_db'},
{'id': 19, 'name': 'ops'},
{'id': 20, 'name': 'lum'},
{'id': 21, 'name': 'chg'},
{'id': 22, 'name': 'init'},
{'id': 23, 'name': 'soils'},
{'id': 24, 'name': 'decision_table'},
{'id': 25, 'name': 'regions'},
{'id': 26, 'name': 'pcp_path'},
{'id': 27, 'name': 'tmp_path'},
{'id': 28, 'name': 'slr_path'},
{'id': 29, 'name': 'hmd_path'},
{'id': 30, 'name': 'wnd_path'}
]
file_cio = [
{'classification': 1, 'order_in_class': 1, 'default_file_name': 'time.sim', 'database_table': 'time_sim', 'is_core_file': True},
{'classification': 1, 'order_in_class': 2, 'default_file_name': 'print.prt', 'database_table': 'print_prt', 'is_core_file': True},
{'classification': 1, 'order_in_class': 3, 'default_file_name': 'object.prt', 'database_table': 'object_prt', 'is_core_file': False},
{'classification': 1, 'order_in_class': 4, 'default_file_name': 'object.cnt', 'database_table': 'object_cnt', 'is_core_file': True},
{'classification': 1, 'order_in_class': 5, 'default_file_name': 'constituents.cs', 'database_table': 'constituents_cs', 'is_core_file': False},
{'classification': 2, 'order_in_class': 1, 'default_file_name': 'codes.bsn', 'database_table': 'codes_bsn', 'is_core_file': True},
{'classification': 2, 'order_in_class': 2, 'default_file_name': 'parameters.bsn', 'database_table': 'parameters_bsn', 'is_core_file': True},
{'classification': 3, 'order_in_class': 1, 'default_file_name': 'weather-sta.cli', 'database_table': 'weather_sta_cli', 'is_core_file': True},
{'classification': 3, 'order_in_class': 2, 'default_file_name': 'weather-wgn.cli', 'database_table': 'weather_wgn_cli', 'is_core_file': True},
{'classification': 3, 'order_in_class': 3, 'default_file_name': 'wind-dir.cli', 'database_table': 'wind_dir_cli', 'is_core_file': False},
{'classification': 3, 'order_in_class': 4, 'default_file_name': 'pcp.cli', 'database_table': 'weather_file', 'is_core_file': True},
{'classification': 3, 'order_in_class': 5, 'default_file_name': 'tmp.cli', 'database_table': 'weather_file', 'is_core_file': True},
{'classification': 3, 'order_in_class': 6, 'default_file_name': 'slr.cli', 'database_table': 'weather_file', 'is_core_file': True},
{'classification': 3, 'order_in_class': 7, 'default_file_name': 'hmd.cli', 'database_table': 'weather_file', 'is_core_file': True},
{'classification': 3, 'order_in_class': 8, 'default_file_name': 'wnd.cli', 'database_table': 'weather_file', 'is_core_file': True},
{'classification': 3, 'order_in_class': 9, 'default_file_name': 'atmodep.cli', 'database_table': 'atmodep_cli', 'is_core_file': False},
{'classification': 4, 'order_in_class': 1, 'default_file_name': 'hru.con', 'database_table': 'hru_con', 'is_core_file': True},
{'classification': 4, 'order_in_class': 2, 'default_file_name': 'hru-lte.con', 'database_table': 'hru_lte_con', 'is_core_file': False},
{'classification': 4, 'order_in_class': 3, 'default_file_name': 'rout_unit.con', 'database_table': 'rout_unit_con', 'is_core_file': True},
{'classification': 4, 'order_in_class': 4, 'default_file_name': 'modflow.con', 'database_table': 'modflow_con', 'is_core_file': False},
{'classification': 4, 'order_in_class': 5, 'default_file_name': 'aquifer.con', 'database_table': 'aquifer_con', 'is_core_file': True},
{'classification': 4, 'order_in_class': 6, 'default_file_name': 'aquifer2d.con', 'database_table': 'aquifer2d_con', 'is_core_file': False},
{'classification': 4, 'order_in_class': 7, 'default_file_name': 'channel.con', 'database_table': 'channel_con', 'is_core_file': True},
{'classification': 4, 'order_in_class': 8, 'default_file_name': 'reservoir.con', 'database_table': 'reservoir_con', 'is_core_file': True},
{'classification': 4, 'order_in_class': 9, 'default_file_name': 'recall.con', 'database_table': 'recall_con', 'is_core_file': True},
{'classification': 4, 'order_in_class': 10, 'default_file_name': 'exco.con', 'database_table': 'exco_con', 'is_core_file': False},
{'classification': 4, 'order_in_class': 11, 'default_file_name': 'delratio.con', 'database_table': 'delratio_con', 'is_core_file': False},
{'classification': 4, 'order_in_class': 12, 'default_file_name': 'outlet.con', 'database_table': 'outlet_con', 'is_core_file': True},
{'classification': 4, 'order_in_class': 13, 'default_file_name': 'chandeg.con', 'database_table': 'chandeg_con', 'is_core_file': False},
{'classification': 5, 'order_in_class': 1, 'default_file_name': 'initial.cha', 'database_table': 'initial_cha', 'is_core_file': True},
{'classification': 5, 'order_in_class': 2, 'default_file_name': 'channel.cha', 'database_table': 'channel_cha', 'is_core_file': True},
{'classification': 5, 'order_in_class': 3, 'default_file_name': 'hydrology.cha', 'database_table': 'hydrology_cha', 'is_core_file': True},
{'classification': 5, 'order_in_class': 4, 'default_file_name': 'sediment.cha', 'database_table': 'sediment_cha', 'is_core_file': True},
{'classification': 5, 'order_in_class': 5, 'default_file_name': 'nutrients.cha', 'database_table': 'nutrients_cha', 'is_core_file': True},
{'classification': 5, 'order_in_class': 6, 'default_file_name': 'channel-lte.cha', 'database_table': 'channel_lte_cha', 'is_core_file': False},
{'classification': 5, 'order_in_class': 7, 'default_file_name': 'hyd-sed-lte.cha', 'database_table': 'hyd_sed_lte_cha', 'is_core_file': False},
{'classification': 5, 'order_in_class': 8, 'default_file_name': 'temperature.cha', 'database_table': 'temperature_cha', 'is_core_file': False},
{'classification': 6, 'order_in_class': 1, 'default_file_name': 'initial.res', 'database_table': 'initial_res', 'is_core_file': True},
{'classification': 6, 'order_in_class': 2, 'default_file_name': 'reservoir.res', 'database_table': 'reservoir_res', 'is_core_file': True},
{'classification': 6, 'order_in_class': 3, 'default_file_name': 'hydrology.res', 'database_table': 'hydrology_res', 'is_core_file': True},
{'classification': 6, 'order_in_class': 4, 'default_file_name': 'sediment.res', 'database_table': 'sediment_res', 'is_core_file': True},
{'classification': 6, 'order_in_class': 5, 'default_file_name': 'nutrients.res', 'database_table': 'nutrients_res', 'is_core_file': True},
{'classification': 6, 'order_in_class': 6, 'default_file_name': 'weir.res', 'database_table': 'weir_res', 'is_core_file': False},
{'classification': 6, 'order_in_class': 7, 'default_file_name': 'wetland.wet', 'database_table': 'wetland_wet', 'is_core_file': False},
{'classification': 6, 'order_in_class': 8, 'default_file_name': 'hydrology.wet', 'database_table': 'hydrology_wet', 'is_core_file': False},
{'classification': 7, 'order_in_class': 1, 'default_file_name': 'rout_unit.def', 'database_table': '', 'is_core_file': True},
{'classification': 7, 'order_in_class': 2, 'default_file_name': 'rout_unit.ele', 'database_table': 'rout_unit_ele', 'is_core_file': True},
{'classification': 7, 'order_in_class': 3, 'default_file_name': 'rout_unit.rtu', 'database_table': 'rout_unit_rtu', 'is_core_file': True},
{'classification': 7, 'order_in_class': 4, 'default_file_name': 'rout_unit.dr', 'database_table': 'rout_unit_dr', 'is_core_file': False},
{'classification': 8, 'order_in_class': 1, 'default_file_name': 'hru-data.hru', 'database_table': 'hru_data_hru', 'is_core_file': True},
{'classification': 8, 'order_in_class': 2, 'default_file_name': 'hru-lte.hru', 'database_table': 'hru_lte_hru', 'is_core_file': False},
{'classification': 9, 'order_in_class': 1, 'default_file_name': 'exco.exc', 'database_table': 'exco_exc', 'is_core_file': False},
{'classification': 9, 'order_in_class': 2, 'default_file_name': 'exco_om.exc', 'database_table': 'exco_om_exc', 'is_core_file': False},
{'classification': 9, 'order_in_class': 3, 'default_file_name': 'exco_pest.exc', 'database_table': 'exco_pest_exc', 'is_core_file': False},
{'classification': 9, 'order_in_class': 4, 'default_file_name': 'exco_path.exc', 'database_table': 'exco_path_exc', 'is_core_file': False},
{'classification': 9, 'order_in_class': 5, 'default_file_name': 'exco_hmet.exc', 'database_table': 'exco_hmet_exc', 'is_core_file': False},
{'classification': 9, 'order_in_class': 6, 'default_file_name': 'exco_salt.exc', 'database_table': 'exco_salt_exc', 'is_core_file': False},
{'classification': 10, 'order_in_class': 1, 'default_file_name': 'recall.rec', 'database_table': 'recall_rec', 'is_core_file': True},
{'classification': 11, 'order_in_class': 1, 'default_file_name': 'delratio.del', 'database_table': 'delratio_del', 'is_core_file': False},
{'classification': 11, 'order_in_class': 2, 'default_file_name': 'dr_om.del', 'database_table': 'dr_om_exc', 'is_core_file': False},
{'classification': 11, 'order_in_class': 3, 'default_file_name': 'dr_pest.del', 'database_table': 'dr_pest_del', 'is_core_file': False},
{'classification': 11, 'order_in_class': 4, 'default_file_name': 'dr_path.del', 'database_table': 'dr_path_del', 'is_core_file': False},
{'classification': 11, 'order_in_class': 5, 'default_file_name': 'dr_hmet.del', 'database_table': 'dr_hmet_del', 'is_core_file': False},
{'classification': 11, 'order_in_class': 6, 'default_file_name': 'dr_salt.del', 'database_table': 'dr_salt_del', 'is_core_file': False},
{'classification': 12, 'order_in_class': 1, 'default_file_name': 'initial.aqu', 'database_table': 'initial_aqu', 'is_core_file': True},
{'classification': 12, 'order_in_class': 2, 'default_file_name': 'aquifer.aqu', 'database_table': 'aquifer_aqu', 'is_core_file': True},
{'classification': 13, 'order_in_class': 1, 'default_file_name': 'animal.hrd', 'database_table': 'animal_hrd', 'is_core_file': False},
{'classification': 13, 'order_in_class': 2, 'default_file_name': 'herd.hrd', 'database_table': 'herd_hrd', 'is_core_file': False},
{'classification': 13, 'order_in_class': 3, 'default_file_name': 'ranch.hrd', 'database_table': 'ranch_hrd', 'is_core_file': False},
{'classification': 14, 'order_in_class': 1, 'default_file_name': 'define.wro', 'database_table': 'define_wro', 'is_core_file': False},
{'classification': 14, 'order_in_class': 2, 'default_file_name': 'element.wro', 'database_table': 'element_wro', 'is_core_file': False},
{'classification': 14, 'order_in_class': 3, 'default_file_name': 'water_rights.wro', 'database_table': 'water_rights_wro', 'is_core_file': False},
{'classification': 15, 'order_in_class': 1, 'default_file_name': 'chan-surf.lin', 'database_table': 'chan_surf_lin', 'is_core_file': False},
{'classification': 15, 'order_in_class': 2, 'default_file_name': 'chan-aqu.lin', 'database_table': 'chan_aqu_lin', 'is_core_file': False},
{'classification': 16, 'order_in_class': 1, 'default_file_name': 'hydrology.hyd', 'database_table': 'hydrology_hyd', 'is_core_file': True},
{'classification': 16, 'order_in_class': 2, 'default_file_name': 'topography.hyd', 'database_table': 'topography_hyd', 'is_core_file': True},
{'classification': 16, 'order_in_class': 3, 'default_file_name': 'field.fld', 'database_table': 'field_fld', 'is_core_file': True},
{'classification': 17, 'order_in_class': 1, 'default_file_name': 'tiledrain.str', 'database_table': 'tiledrain_str', 'is_core_file': True},
{'classification': 17, 'order_in_class': 2, 'default_file_name': 'septic.str', 'database_table': 'septic_str', 'is_core_file': False},
{'classification': 17, 'order_in_class': 3, 'default_file_name': 'filterstrip.str', 'database_table': 'filterstrip_str', 'is_core_file': True},
{'classification': 17, 'order_in_class': 4, 'default_file_name': 'grassedww.str', 'database_table': 'grassedww_str', 'is_core_file': True},
{'classification': 17, 'order_in_class': 5, 'default_file_name': 'bmpuser.str', 'database_table': 'bmpuser_str', 'is_core_file': False},
{'classification': 18, 'order_in_class': 1, 'default_file_name': 'plants.plt', 'database_table': 'plants_plt', 'is_core_file': True},
{'classification': 18, 'order_in_class': 2, 'default_file_name': 'fertilizer.frt', 'database_table': 'fertilizer_frt', 'is_core_file': True},
{'classification': 18, 'order_in_class': 3, 'default_file_name': 'tillage.til', 'database_table': 'tillage_til', 'is_core_file': True},
{'classification': 18, 'order_in_class': 4, 'default_file_name': 'pesticide.pst', 'database_table': 'pesticide_pst', 'is_core_file': False},
{'classification': 18, 'order_in_class': 5, 'default_file_name': 'pathogens.pth', 'database_table': 'pathogens_pth', 'is_core_file': False},
{'classification': 18, 'order_in_class': 6, 'default_file_name': 'metals.mtl', 'database_table': 'metals_mtl', 'is_core_file': False},
{'classification': 18, 'order_in_class': 7, 'default_file_name': 'salts.slt', 'database_table': 'salts_slt', 'is_core_file': False},
{'classification': 18, 'order_in_class': 8, 'default_file_name': 'urban.urb', 'database_table': 'urban_urb', 'is_core_file': True},
{'classification': 18, 'order_in_class': 9, 'default_file_name': 'septic.sep', 'database_table': 'septic_sep', 'is_core_file': False},
{'classification': 18, 'order_in_class': 10, 'default_file_name': 'snow.sno', 'database_table': 'snow_sno', 'is_core_file': True},
{'classification': 19, 'order_in_class': 1, 'default_file_name': 'harv.ops', 'database_table': 'harv_ops', 'is_core_file': True},
{'classification': 19, 'order_in_class': 2, 'default_file_name': 'graze.ops', 'database_table': 'graze_ops', 'is_core_file': True},
{'classification': 19, 'order_in_class': 3, 'default_file_name': 'irr.ops', 'database_table': 'irr_ops', 'is_core_file': True},
{'classification': 19, 'order_in_class': 4, 'default_file_name': 'chem_app.ops', 'database_table': 'chem_app_ops', 'is_core_file': False},
{'classification': 19, 'order_in_class': 5, 'default_file_name': 'fire.ops', 'database_table': 'fire_ops', 'is_core_file': True},
{'classification': 19, 'order_in_class': 6, 'default_file_name': 'sweep.ops', 'database_table': 'sweep_ops', 'is_core_file': False},
{'classification': 20, 'order_in_class': 1, 'default_file_name': 'landuse.lum', 'database_table': 'landuse_lum', 'is_core_file': True},
{'classification': 20, 'order_in_class': 2, 'default_file_name': 'management.sch', 'database_table': 'management_sch', 'is_core_file': True},
{'classification': 20, 'order_in_class': 3, 'default_file_name': 'cntable.lum', 'database_table': 'cntable_lum', 'is_core_file': True},
{'classification': 20, 'order_in_class': 4, 'default_file_name': 'cons_practice.lum', 'database_table': 'cons_practice_lum', 'is_core_file': True},
{'classification': 20, 'order_in_class': 5, 'default_file_name': 'ovn_table.lum', 'database_table': 'ovn_table_lum', 'is_core_file': True},
{'classification': 21, 'order_in_class': 1, 'default_file_name': 'cal_parms.cal', 'database_table': 'cal_parms_cal', 'is_core_file': False},
{'classification': 21, 'order_in_class': 2, 'default_file_name': 'calibration.cal', 'database_table': 'calibration_cal', 'is_core_file': False},
{'classification': 21, 'order_in_class': 3, 'default_file_name': 'codes.sft', 'database_table': 'codes_sft', 'is_core_file': False},
{'classification': 21, 'order_in_class': 4, 'default_file_name': 'wb_parms.sft', 'database_table': 'wb_parms_sft', 'is_core_file': False},
{'classification': 21, 'order_in_class': 5, 'default_file_name': 'water_balance.sft', 'database_table': 'water_balance_sft', 'is_core_file': False},
{'classification': 21, 'order_in_class': 6, 'default_file_name': 'ch_sed_budget.sft', 'database_table': 'ch_sed_budget_sft', 'is_core_file': False},
{'classification': 21, 'order_in_class': 7, 'default_file_name': 'ch_sed_parms.sft', 'database_table': 'ch_sed_parms_sft', 'is_core_file': False},
{'classification': 21, 'order_in_class': 8, 'default_file_name': 'plant_parms.sft', 'database_table': 'plant_parms_sft', 'is_core_file': False},
{'classification': 21, 'order_in_class': 9, 'default_file_name': 'plant_gro.sft', 'database_table': 'plant_gro_sft', 'is_core_file': False},
{'classification': 22, 'order_in_class': 1, 'default_file_name': 'plant.ini', 'database_table': 'plant_ini', 'is_core_file': False},
{'classification': 22, 'order_in_class': 2, 'default_file_name': 'soil_plant.ini', 'database_table': 'soil_plant_ini', 'is_core_file': True},
{'classification': 22, 'order_in_class': 3, 'default_file_name': 'om_water.ini', 'database_table': 'om_water_ini', 'is_core_file': True},
{'classification': 22, 'order_in_class': 4, 'default_file_name': 'pest_hru.ini', 'database_table': 'pest_hru_ini', 'is_core_file': True},
{'classification': 22, 'order_in_class': 5, 'default_file_name': 'pest_water.ini', 'database_table': 'pest_water_ini', 'is_core_file': True},
{'classification': 22, 'order_in_class': 6, 'default_file_name': 'path_hru.ini', 'database_table': 'path_hru_ini', 'is_core_file': True},
{'classification': 22, 'order_in_class': 7, 'default_file_name': 'path_water.ini', 'database_table': 'path_water_ini', 'is_core_file': True},
{'classification': 22, 'order_in_class': 8, 'default_file_name': 'hmet_hru.ini', 'database_table': 'hmet_hru_ini', 'is_core_file': True},
{'classification': 22, 'order_in_class': 9, 'default_file_name': 'hmet_water.ini', 'database_table': 'hmet_water_ini', 'is_core_file': True},
{'classification': 22, 'order_in_class': 10, 'default_file_name': 'salt_hru.ini', 'database_table': 'salt_hru_ini', 'is_core_file': True},
{'classification': 22, 'order_in_class': 11, 'default_file_name': 'salt_water.ini', 'database_table': 'salt_water_ini', 'is_core_file': True},
{'classification': 23, 'order_in_class': 1, 'default_file_name': 'soils.sol', 'database_table': 'soils_sol', 'is_core_file': True},
{'classification': 23, 'order_in_class': 2, 'default_file_name': 'nutrients.sol', 'database_table': 'nutrients_sol', 'is_core_file': True},
{'classification': 23, 'order_in_class': 3, 'default_file_name': 'soils_lte.sol', 'database_table': 'soils_lte_sol', 'is_core_file': True},
{'classification': 24, 'order_in_class': 1, 'default_file_name': 'lum.dtl', 'database_table': 'lum_dtl', 'is_core_file': True},
{'classification': 24, 'order_in_class': 2, 'default_file_name': 'res_rel.dtl', 'database_table': 'res_rel_dtl', 'is_core_file': True},
{'classification': 24, 'order_in_class': 3, 'default_file_name': 'scen_lu.dtl', 'database_table': 'scen_lu_dtl', 'is_core_file': True},
{'classification': 24, 'order_in_class': 4, 'default_file_name': 'flo_con.dtl', 'database_table': 'flo_con_dtl', 'is_core_file': True},
{'classification': 25, 'order_in_class': 1, 'default_file_name': 'ls_unit.ele', 'database_table': 'ls_unit_ele', 'is_core_file': True},
{'classification': 25, 'order_in_class': 2, 'default_file_name': 'ls_unit.def', 'database_table': 'ls_unit_def', 'is_core_file': True},
{'classification': 25, 'order_in_class': 3, 'default_file_name': 'ls_reg.ele', 'database_table': 'ls_reg_ele', 'is_core_file': False},
{'classification': 25, 'order_in_class': 4, 'default_file_name': 'ls_reg.def', 'database_table': 'ls_reg_def', 'is_core_file': False},
{'classification': 25, 'order_in_class': 5, 'default_file_name': 'ls_cal.reg', 'database_table': 'ls_cal_reg', 'is_core_file': False},
{'classification': 25, 'order_in_class': 6, 'default_file_name': 'ch_catunit.ele', 'database_table': 'ch_catunit_ele', 'is_core_file': False},
{'classification': 25, 'order_in_class': 7, 'default_file_name': 'ch_catunit.def', 'database_table': 'ch_catunit_def', 'is_core_file': False},
{'classification': 25, 'order_in_class': 8, 'default_file_name': 'ch_reg.def', 'database_table': 'ch_reg_def', 'is_core_file': False},
{'classification': 25, 'order_in_class': 9, 'default_file_name': 'aqu_catunit.ele', 'database_table': 'aqu_catunit_ele', 'is_core_file': False},
{'classification': 25, 'order_in_class': 10, 'default_file_name': 'aqu_catunit.def', 'database_table': 'aqu_catunit_def', 'is_core_file': False},
{'classification': 25, 'order_in_class': 11, 'default_file_name': 'aqu_reg.def', 'database_table': 'aqu_reg_def', 'is_core_file': False},
{'classification': 25, 'order_in_class': 12, 'default_file_name': 'res_catunit.ele', 'database_table': 'res_catunit_ele', 'is_core_file': False},
{'classification': 25, 'order_in_class': 13, 'default_file_name': 'res_catunit.def', 'database_table': 'res_catunit_def', 'is_core_file': False},
{'classification': 25, 'order_in_class': 14, 'default_file_name': 'res_reg.def', 'database_table': 'res_reg_def', 'is_core_file': False},
{'classification': 25, 'order_in_class': 15, 'default_file_name': 'rec_catunit.ele', 'database_table': 'rec_catunit_ele', 'is_core_file': False},
{'classification': 25, 'order_in_class': 16, 'default_file_name': 'rec_catunit.def', 'database_table': 'rec_catunit_def', 'is_core_file': False},
{'classification': 25, 'order_in_class': 17, 'default_file_name': 'rec_reg.def', 'database_table': 'rec_reg_def', 'is_core_file': False}
]
print_prt_objects = [
{'name': 'basin_wb', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'basin_nb', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'basin_ls', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'basin_pw', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'basin_aqu', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'basin_res', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'basin_cha', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'basin_sd_cha', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'basin_psc', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'region_wb', 'daily': False, 'monthly': False, 'yearly': False, 'avann': False},
{'name': 'region_nb', 'daily': False, 'monthly': False, 'yearly': False, 'avann': False},
{'name': 'region_ls', 'daily': False, 'monthly': False, 'yearly': False, 'avann': False},
{'name': 'region_pw', 'daily': False, 'monthly': False, 'yearly': False, 'avann': False},
{'name': 'region_aqu', 'daily': False, 'monthly': False, 'yearly': False, 'avann': False},
{'name': 'region_res', 'daily': False, 'monthly': False, 'yearly': False, 'avann': False},
{'name': 'region_cha', 'daily': False, 'monthly': False, 'yearly': False, 'avann': False},
{'name': 'region_sd_cha', 'daily': False, 'monthly': False, 'yearly': False, 'avann': False},
{'name': 'region_psc', 'daily': False, 'monthly': False, 'yearly': False, 'avann': False},
{'name': 'lsunit_wb', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'lsunit_nb', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'lsunit_ls', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'lsunit_pw', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'hru_wb', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'hru_nb', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'hru_ls', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'hru_pw', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'hru-lte_wb', 'daily': False, 'monthly': False, 'yearly': False, 'avann': False},
{'name': 'hru-lte_nb', 'daily': False, 'monthly': False, 'yearly': False, 'avann': False},
{'name': 'hru-lte_ls', 'daily': False, 'monthly': False, 'yearly': False, 'avann': False},
{'name': 'hru-lte_pw', 'daily': False, 'monthly': False, 'yearly': False, 'avann': False},
{'name': 'channel', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'channel_sd', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'aquifer', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'reservoir', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'recall', 'daily': False, 'monthly': False, 'yearly': True, 'avann': False},
{'name': 'hyd', 'daily': False, 'monthly': False, 'yearly': False, 'avann': False},
{'name': 'ru', 'daily': False, 'monthly': False, 'yearly': False, 'avann': False},
{'name': 'pest', 'daily': False, 'monthly': False, 'yearly': False, 'avann': False}
]
with base.db.atomic():
if definitions.File_cio_classification.select().count() < 1:
definitions.File_cio_classification.insert_many(classifications).execute()
if definitions.File_cio.select().count() < 1:
definitions.File_cio.insert_many(file_cio).execute()
if definitions.Print_prt.select().count() < 1:
definitions.Print_prt.create(
nyskip=1,
day_start=0,
day_end=0,
yrc_start=0,
yrc_end=0,
interval=1,
csvout=False,
dbout=False,
cdfout=False,
soilout=False,
mgtout=False,
hydcon=False,
fdcout=False
)
if definitions.Print_prt_object.select().count() < 1:
definitions.Print_prt_object.insert_many(print_prt_objects).execute()
if lum.Landuse_lum.select().count() < 1:
SetupDatasetsDatabase.insert_lum()
"""if definitions.Var_range.select().count() < 1:
SetupDatasetsDatabase.insert_var_range()
SetupDatasetsDatabase.insert_var_range_option()"""
@staticmethod
def insert_var_range():
file = open(source_data_path + 'var_range.csv', "r")
i = 0
items = []
for line in file:
if i > 0:
val = line.split(',')
items.append({
'id': i,
'table': val[0],
'variable': val[1],
'type': val[2],
'min_value': val[3],
'max_value': val[4],
'default_value': val[5],
'default_text': val[6],
'units': val[7],
'description': val[8]
})
i += 1
db_lib.bulk_insert(base.db, definitions.Var_range, items)
@staticmethod
def insert_var_range_option():
file = open(source_data_path + 'var_range_option.csv', "r")
i = 0
items = []
for line in file:
if i > 0:
val = line.split(',')
vr = definitions.Var_range.get_or_none((definitions.Var_range.table == val[0]) & (definitions.Var_range.variable == val[1]))
if vr is not None:
items.append({
'id': i,
'var_range_id': vr.id,
'value': val[2],
'text': val[3],
'text_only': True if val[4].strip() == '1' else False,
'text_value': None if val[5].strip() == '' else val[5].strip()
})
i += 1
db_lib.bulk_insert(base.db, definitions.Var_range_option, items)
@staticmethod
def insert_lum():
file = open(source_data_path + 'plants_landuse_rules.csv', "r")
i = 1
rules = {}
for line in file:
if i > 1:
val = line.split(',')
n = val[0].lower().strip()
lc = int(val[6])
rules[n] = {
'mgt': None,
'cn2': val[3],
'cons_prac': val[4],
'ov_mann': val[5],
'lc_status': True if lc is 1 else False,
'lai_init': float(val[7]),
'bm_init': float(val[8]),
'phu_init': float(val[9]),
'plnt_pop': float(val[10]),
'yrs_init': float(val[11]),
'rsd_init': float(val[12])
}
i += 1
plants = hru_parm_db.Plants_plt.select()
plant_coms = []
plant_com_items = []
plant_com_id = 1
for plt in plants:
rule = rules[plt.name]
plant_com = {
'id': plant_com_id,
'name': '{name}_comm'.format(name=plt.name),
'rot_yr_ini': 1
}
plant_coms.append(plant_com)
plant_com_item = {
'plant_ini': plant_com_id,
'plnt_name': plt.id,
'lc_status': rule['lc_status'],
'lai_init': rule['lai_init'],
'bm_init': rule['bm_init'],
'phu_init': rule['phu_init'],
'plnt_pop': rule['plnt_pop'],
'yrs_init': rule['yrs_init'],
'rsd_init': rule['rsd_init']
}
plant_com_items.append(plant_com_item)
plant_com_id += 1
db_lib.bulk_insert(base.db, init.Plant_ini, plant_coms)
db_lib.bulk_insert(base.db, init.Plant_ini_item, plant_com_items)
lum_default_cal_group = None
lum_default_mgt = None #lum.Management_sch.get(lum.Management_sch.name == 'no_mgt').id
lum_default_cn2 = 5
lum_default_cons_prac = 1
lum_default_ov_mann = 2
lums = []
lum_dict = {}
lum_id = 1
for pcom in init.Plant_ini.select().order_by(init.Plant_ini.id):
plant_name = pcom.name.strip().split('_comm')[0]
rule = rules[plant_name]
"""mgt_id = lum_default_mgt
if val_exists(rule['mgt']):
mgt = lum.Management_sch.get(lum.Management_sch.name == rule['mgt'])
mgt_id = mgt.id"""
cn2_id = lum_default_cn2
if val_exists(rule['cn2']):
cn2 = lum.Cntable_lum.get(lum.Cntable_lum.name == rule['cn2'])
cn2_id = cn2.id
cons_prac_id = lum_default_cons_prac
if val_exists(rule['cons_prac']):
cons_prac = lum.Cons_prac_lum.get(lum.Cons_prac_lum.name == rule['cons_prac'])
cons_prac_id = cons_prac.id
ov_mann_id = lum_default_ov_mann
if val_exists(rule['ov_mann']):
ov_mann = lum.Ovn_table_lum.get(lum.Ovn_table_lum.name == rule['ov_mann'])
ov_mann_id = ov_mann.id
l = {
'id': lum_id,
'name': '{name}_lum'.format(name=plant_name),
'plnt_com': pcom.id,
'mgt': None, #mgt_id,
'cn2': cn2_id,
'cons_prac': cons_prac_id,
'ov_mann': ov_mann_id,
'cal_group': lum_default_cal_group
}
lums.append(l)
lum_dict[plant_name] = lum_id
lum_id += 1
db_lib.bulk_insert(base.db, lum.Landuse_lum, lums)
urbans = hru_parm_db.Urban_urb.select()
urb_lums = []
for urb in urbans:
l = {
'id': lum_id,
'name': '{name}_lum'.format(name=urb.name),
'urban': urb.id,
'urb_ro': 'buildup_washoff',
'mgt': lum_default_mgt,
'cn2': 49,
'cons_prac': lum_default_cons_prac,
'ov_mann': 18,
'cal_group': lum_default_cal_group
}
urb_lums.append(l)
lum_dict[urb.name] = lum_id
lum_id += 1
db_lib.bulk_insert(base.db, lum.Landuse_lum, urb_lums)
```
#### File: database/project/setup.py
```python
from peewee import *
from . import base, config, simulation, climate, link, channel, reservoir, dr, exco, recall, hydrology, routing_unit, aquifer, \
basin, hru_parm_db, structural, ops, decision_table, init, lum, soils, \
change, regions, hru, connect, gis
from database import lib
from database.datasets import base as datasets_base, definitions as dataset_defs, decision_table as dataset_dts
import os, os.path
from shutil import copyfile, copy
import time
class SetupProjectDatabase():
@staticmethod
def init(project_db:str, datasets_db:str = None):
base.db.init(project_db, pragmas={'journal_mode': 'off'})
if datasets_db:
datasets_base.db.init(datasets_db, pragmas={'journal_mode': 'off'})
@staticmethod
def rollback(project_db:str, rollback_db:str):
base_path = os.path.dirname(project_db)
rel_project_db = os.path.relpath(project_db, base_path)
filename, file_extension = os.path.splitext(rel_project_db)
err_filename = filename + '_error_' + time.strftime('%Y%m%d-%H%M%S') + file_extension
err_dir = os.path.join(base_path, 'DatabaseErrors')
if not os.path.exists(err_dir):
os.makedirs(err_dir)
copyfile(project_db, os.path.join(err_dir, err_filename))
copy(rollback_db, project_db)
SetupProjectDatabase.init(project_db)
@staticmethod
def create_tables():
base.db.create_tables([config.Project_config, config.File_cio_classification, config.File_cio])
base.db.create_tables([basin.Codes_bsn, basin.Parameters_bsn])
base.db.create_tables([simulation.Time_sim, simulation.Print_prt, simulation.Print_prt_aa_int, simulation.Print_prt_object, simulation.Object_prt, simulation.Object_cnt, simulation.Constituents_cs])
base.db.create_tables([climate.Weather_wgn_cli, climate.Weather_wgn_cli_mon, climate.Weather_sta_cli, climate.Weather_file, climate.Wind_dir_cli, climate.Atmo_cli, climate.Atmo_cli_sta, climate.Atmo_cli_sta_value])
base.db.create_tables([link.Chan_aqu_lin, link.Chan_aqu_lin_ob, link.Chan_surf_lin, link.Chan_surf_lin_ob])
base.db.create_tables([channel.Initial_cha, channel.Hydrology_cha, channel.Sediment_cha, channel.Nutrients_cha, channel.Channel_cha, channel.Hyd_sed_lte_cha, channel.Channel_lte_cha])
base.db.create_tables([reservoir.Initial_res, reservoir.Hydrology_res, reservoir.Sediment_res, reservoir.Nutrients_res, reservoir.Weir_res, reservoir.Reservoir_res, reservoir.Hydrology_wet, reservoir.Wetland_wet])
base.db.create_tables([dr.Dr_om_del, dr.Dr_pest_del, dr.Dr_pest_col, dr.Dr_pest_val, dr.Dr_path_del, dr.Dr_path_col, dr.Dr_path_val, dr.Dr_hmet_del, dr.Dr_hmet_col, dr.Dr_hmet_val, dr.Dr_salt_del, dr.Dr_salt_col, dr.Dr_salt_val, dr.Delratio_del])
base.db.create_tables([exco.Exco_om_exc, exco.Exco_pest_exc, exco.Exco_path_exc, exco.Exco_hmet_exc, exco.Exco_salt_exc, exco.Exco_exc])
base.db.create_tables([exco.Exco_pest_col, exco.Exco_pest_val, exco.Exco_path_col, exco.Exco_path_val, exco.Exco_hmet_col, exco.Exco_hmet_val, exco.Exco_salt_col, exco.Exco_salt_val])
base.db.create_tables([recall.Recall_rec, recall.Recall_dat])
base.db.create_tables([hydrology.Topography_hyd, hydrology.Hydrology_hyd, hydrology.Field_fld])
base.db.create_tables([routing_unit.Rout_unit_dr, routing_unit.Rout_unit_rtu])
base.db.create_tables([aquifer.Initial_aqu, aquifer.Aquifer_aqu])
base.db.create_tables([hru_parm_db.Plants_plt, hru_parm_db.Fertilizer_frt, hru_parm_db.Tillage_til, hru_parm_db.Pesticide_pst, hru_parm_db.Pathogens_pth, hru_parm_db.Metals_mtl, hru_parm_db.Salts_slt, hru_parm_db.Urban_urb, hru_parm_db.Septic_sep, hru_parm_db.Snow_sno])
base.db.create_tables([structural.Septic_str, structural.Bmpuser_str, structural.Filterstrip_str, structural.Grassedww_str, structural.Tiledrain_str])
base.db.create_tables([ops.Graze_ops, ops.Harv_ops, ops.Irr_ops, ops.Chem_app_ops, ops.Fire_ops, ops.Sweep_ops])
base.db.create_tables([decision_table.D_table_dtl, decision_table.D_table_dtl_cond, decision_table.D_table_dtl_cond_alt, decision_table.D_table_dtl_act, decision_table.D_table_dtl_act_out])
base.db.create_tables([init.Plant_ini, init.Plant_ini_item, init.Om_water_ini, init.Pest_hru_ini, init.Pest_hru_ini_item, init.Pest_water_ini, init.Path_hru_ini, init.Path_water_ini, init.Hmet_hru_ini, init.Hmet_water_ini, init.Salt_hru_ini, init.Salt_water_ini, init.Soil_plant_ini])
base.db.create_tables([lum.Management_sch, lum.Management_sch_auto, lum.Management_sch_op, lum.Cntable_lum, lum.Cons_prac_lum, lum.Ovn_table_lum, lum.Landuse_lum])
base.db.create_tables([soils.Soils_sol, soils.Soils_sol_layer, soils.Nutrients_sol, soils.Soils_lte_sol])
base.db.create_tables([change.Cal_parms_cal, change.Calibration_cal, change.Calibration_cal_cond, change.Calibration_cal_elem, change.Codes_sft, change.Wb_parms_sft, change.Water_balance_sft, change.Water_balance_sft_item, change.Plant_parms_sft, change.Plant_gro_sft, change.Plant_gro_sft_item, change.Ch_sed_parms_sft, change.Ch_sed_budget_sft, change.Ch_sed_budget_sft_item])
base.db.create_tables([regions.Ls_unit_def, regions.Ls_unit_ele,
regions.Ls_reg_def, regions.Ls_reg_ele,
regions.Ch_catunit_ele, regions.Ch_catunit_def, regions.Ch_catunit_def_elem, regions.Ch_reg_def, regions.Ch_reg_def_elem,
regions.Aqu_catunit_ele, regions.Aqu_catunit_def, regions.Aqu_catunit_def_elem, regions.Aqu_reg_def, regions.Aqu_reg_def_elem,
regions.Res_catunit_ele, regions.Res_catunit_def, regions.Res_catunit_def_elem, regions.Res_reg_def, regions.Res_reg_def_elem,
regions.Rec_catunit_ele, regions.Rec_catunit_def, regions.Rec_catunit_def_elem, regions.Rec_reg_def, regions.Rec_reg_def_elem])
base.db.create_tables([hru.Hru_lte_hru, hru.Hru_data_hru])
base.db.create_tables([connect.Hru_con, connect.Hru_con_out,
connect.Hru_lte_con, connect.Hru_lte_con_out,
connect.Rout_unit_con, connect.Rout_unit_con_out,
connect.Modflow_con, connect.Modflow_con_out,
connect.Aquifer_con, connect.Aquifer_con_out,
connect.Aquifer2d_con, connect.Aquifer2d_con_out,
connect.Channel_con, connect.Channel_con_out,
connect.Reservoir_con, connect.Reservoir_con_out,
connect.Recall_con, connect.Recall_con_out,
connect.Exco_con, connect.Exco_con_out,
connect.Delratio_con, connect.Delratio_con_out,
connect.Outlet_con, connect.Outlet_con_out,
connect.Chandeg_con, connect.Chandeg_con_out])
base.db.create_tables([connect.Rout_unit_ele])
base.db.create_tables([gis.Gis_channels, gis.Gis_subbasins, gis.Gis_hrus, gis.Gis_lsus, gis.Gis_water, gis.Gis_points, gis.Gis_routing])
@staticmethod
def initialize_data(project_name, is_lte=False, overwrite_plants=False):
# Set up default simulation data
simulation.Object_cnt.get_or_create_default(project_name=project_name)
simulation.Time_sim.get_or_create_default()
datasets_db_name = datasets_base.db.database
project_db_name = base.db.database
if basin.Codes_bsn.select().count() < 1:
lib.copy_table('codes_bsn', datasets_db_name, project_db_name)
if basin.Parameters_bsn.select().count() < 1:
lib.copy_table('parameters_bsn', datasets_db_name, project_db_name)
if overwrite_plants:
base.db.drop_tables([hru_parm_db.Plants_plt])
base.db.create_tables([hru_parm_db.Plants_plt])
if hru_parm_db.Plants_plt.select().count() < 1:
lib.copy_table('plants_plt', datasets_db_name, project_db_name)
if hru_parm_db.Urban_urb.select().count() < 1:
lib.copy_table('urban_urb', datasets_db_name, project_db_name)
if not is_lte:
if hru_parm_db.Fertilizer_frt.select().count() < 1:
lib.copy_table('fertilizer_frt', datasets_db_name, project_db_name)
if hru_parm_db.Septic_sep.select().count() < 1:
lib.copy_table('septic_sep', datasets_db_name, project_db_name)
if hru_parm_db.Snow_sno.select().count() < 1:
lib.copy_table('snow_sno', datasets_db_name, project_db_name)
if hru_parm_db.Tillage_til.select().count() < 1:
lib.copy_table('tillage_til', datasets_db_name, project_db_name)
if hru_parm_db.Pesticide_pst.select().count() < 1:
lib.copy_table('pesticide_pst', datasets_db_name, project_db_name)
if lum.Cntable_lum.select().count() < 1:
lib.copy_table('cntable_lum', datasets_db_name, project_db_name)
if lum.Ovn_table_lum.select().count() < 1:
lib.copy_table('ovn_table_lum', datasets_db_name, project_db_name)
if lum.Cons_prac_lum.select().count() < 1:
lib.copy_table('cons_prac_lum', datasets_db_name, project_db_name)
if ops.Graze_ops.select().count() < 1:
lib.copy_table('graze_ops', datasets_db_name, project_db_name)
if ops.Harv_ops.select().count() < 1:
lib.copy_table('harv_ops', datasets_db_name, project_db_name)
if ops.Fire_ops.select().count() < 1:
lib.copy_table('fire_ops', datasets_db_name, project_db_name)
if ops.Irr_ops.select().count() < 1:
lib.copy_table('irr_ops', datasets_db_name, project_db_name)
if ops.Sweep_ops.select().count() < 1:
lib.copy_table('sweep_ops', datasets_db_name, project_db_name)
if ops.Chem_app_ops.select().count() < 1:
lib.copy_table('chem_app_ops', datasets_db_name, project_db_name)
if structural.Bmpuser_str.select().count() < 1:
lib.copy_table('bmpuser_str', datasets_db_name, project_db_name)
if structural.Filterstrip_str.select().count() < 1:
lib.copy_table('filterstrip_str', datasets_db_name, project_db_name)
if structural.Grassedww_str.select().count() < 1:
lib.copy_table('grassedww_str', datasets_db_name, project_db_name)
if structural.Septic_str.select().count() < 1:
lib.copy_table('septic_str', datasets_db_name, project_db_name)
if structural.Tiledrain_str.select().count() < 1:
lib.copy_table('tiledrain_str', datasets_db_name, project_db_name)
if change.Cal_parms_cal.select().count() < 1:
lib.copy_table('cal_parms_cal', datasets_db_name, project_db_name)
if is_lte and soils.Soils_lte_sol.select().count() < 1:
lib.copy_table('soils_lte_sol', datasets_db_name, project_db_name)
if decision_table.D_table_dtl.select().count() < 1:
if not is_lte:
lib.copy_table('d_table_dtl', datasets_db_name, project_db_name, include_id=True)
lib.copy_table('d_table_dtl_cond', datasets_db_name, project_db_name, include_id=True)
lib.copy_table('d_table_dtl_cond_alt', datasets_db_name, project_db_name, include_id=True)
lib.copy_table('d_table_dtl_act', datasets_db_name, project_db_name, include_id=True)
lib.copy_table('d_table_dtl_act_out', datasets_db_name, project_db_name, include_id=True)
else:
dt_names = ['pl_grow_sum', 'pl_end_sum', 'pl_grow_win', 'pl_end_win']
for dt in dataset_dts.D_table_dtl.select().where(dataset_dts.D_table_dtl.name << dt_names):
d_id = decision_table.D_table_dtl.insert(name=dt.name, file_name=dt.file_name).execute()
for c in dt.conditions:
c_id = decision_table.D_table_dtl_cond.insert(
d_table=d_id,
var=c.var,
obj=c.obj,
obj_num=c.obj_num,
lim_var=c.lim_var,
lim_op=c.lim_op,
lim_const=c.lim_const
).execute()
for ca in c.alts:
decision_table.D_table_dtl_cond_alt.insert(cond=c_id, alt=ca.alt).execute()
for a in dt.actions:
a_id = decision_table.D_table_dtl_act.insert(
d_table=d_id,
act_typ=a.act_typ,
obj=a.obj,
obj_num=a.obj_num,
name=a.name,
option=a.option,
const=a.const,
const2=a.const2,
fp=a.fp
).execute()
for ao in a.outcomes:
decision_table.D_table_dtl_act_out.insert(act=a_id, outcome=ao.outcome).execute()
if not is_lte and lum.Management_sch.select().count() < 1:
lib.copy_table('management_sch', datasets_db_name, project_db_name, include_id=True)
lib.copy_table('management_sch_auto', datasets_db_name, project_db_name, include_id=True)
lib.copy_table('management_sch_op', datasets_db_name, project_db_name, include_id=True)
if config.File_cio_classification.select().count() < 1:
#lib.copy_table('file_cio_classification', datasets_db_name, project_db_name)
class_query = dataset_defs.File_cio_classification.select()
if class_query.count() > 0:
classes = []
for f in class_query:
cl = {
'name': f.name
}
classes.append(cl)
lib.bulk_insert(base.db, config.File_cio_classification, classes)
file_cio_query = dataset_defs.File_cio.select()
if file_cio_query.count() > 0:
file_cios = []
for f in file_cio_query:
file_cio = {
'classification': f.classification.id,
'order_in_class': f.order_in_class,
'file_name': f.default_file_name
}
file_cios.append(file_cio)
lib.bulk_insert(base.db, config.File_cio, file_cios)
if simulation.Print_prt.select().count() < 1:
lib.copy_table('print_prt', datasets_db_name, project_db_name)
print_prt_id = simulation.Print_prt.select().first().id
print_obj_query = dataset_defs.Print_prt_object.select().order_by(dataset_defs.Print_prt_object.id)
if print_obj_query.count() > 0:
print_objs = []
for p in print_obj_query:
try:
existing = simulation.Print_prt_object.get(simulation.Print_prt_object.name == p.name)
except simulation.Print_prt_object.DoesNotExist:
print_obj = {
'print_prt': print_prt_id,
'name': p.name,
'daily': p.daily,
'monthly': p.monthly,
'yearly': p.yearly,
'avann': p.avann
}
print_objs.append(print_obj)
lib.bulk_insert(base.db, simulation.Print_prt_object, print_objs)
```
#### File: editor_api/fileio/hru_parm_db.py
```python
from .base import BaseFileModel
from database.project import base as project_base
from database.project import hru_parm_db as project_parmdb
from database.datasets import base as datasets_base
from database.datasets import hru_parm_db as datasets_parmdb
from database import lib as db_lib
from helpers import utils
import database.project.hru_parm_db as db
class Plants_plt(BaseFileModel):
def __init__(self, file_name, version=None):
self.file_name = file_name
self.version = version
def read(self, database='project', csv=False):
if database == 'project':
self.read_default_table(project_parmdb.Plants_plt, project_base.db, 53, ignore_id_col=True, csv=csv)
else:
self.read_default_table(datasets_parmdb.Plants_plt, datasets_base.db, 53, ignore_id_col=True, csv=csv)
def write(self, database='project', csv=False):
if database == 'project':
table = project_parmdb.Plants_plt
else:
table = datasets_parmdb.Plants_plt
if csv:
self.write_default_csv(table, True)
else:
self.write_default_table(table, True)
class Fertilizer_frt(BaseFileModel):
def __init__(self, file_name, version=None):
self.file_name = file_name
self.version = version
def read(self, database ='project'):
if database == 'project':
self.read_default_table(project_parmdb.Fertilizer_frt, project_base.db, 0, ignore_id_col=True)
else:
self.read_default_table(datasets_parmdb.Fertilizer_frt, datasets_base.db, 0, ignore_id_col=True)
def write(self):
self.write_default_table(db.Fertilizer_frt, True)
class Tillage_til(BaseFileModel):
def __init__(self, file_name, version=None):
self.file_name = file_name
self.version = version
def read(self, database ='project'):
if database == 'project':
self.read_default_table(project_parmdb.Tillage_til, project_base.db, 0, ignore_id_col=True)
else:
self.read_default_table(datasets_parmdb.Tillage_til, datasets_base.db, 0, ignore_id_col=True)
def write(self):
self.write_default_table(db.Tillage_til, True)
class Pesticide_pst(BaseFileModel):
def __init__(self, file_name, version=None):
self.file_name = file_name
self.version = version
def read(self, database ='project'):
if database == 'project':
self.read_default_table(project_parmdb.Pesticide_pst, project_base.db, 0, ignore_id_col=True)
else:
self.read_default_table(datasets_parmdb.Pesticide_pst, datasets_base.db, 0, ignore_id_col=True)
def write(self):
self.write_default_table(db.Pesticide_pst, True)
class Urban_urb(BaseFileModel):
def __init__(self, file_name, version=None):
self.file_name = file_name
self.version = version
def read(self, database ='project'):
if database == 'project':
self.read_default_table(project_parmdb.Urban_urb, project_base.db, 0, ignore_id_col=True)
else:
self.read_default_table(datasets_parmdb.Urban_urb, datasets_base.db, 0, ignore_id_col=True)
def write(self):
self.write_default_table(db.Urban_urb, True)
class Septic_sep(BaseFileModel):
def __init__(self, file_name, version=None):
self.file_name = file_name
self.version = version
def read(self, database ='project'):
"""
Read a septic.sep text file into the database.
NOTE: CURRENTLY THERE IS AN EXTRA NUMERIC COLUMN BEFORE THE DESCRIPTION.
:param database: project or datasets
:return:
"""
file = open(self.file_name, "r")
i = 1
septics = []
for line in file:
if i > 2:
val = line.split()
self.check_cols(val, 13, 'septic')
sep = {
'name': val[0].lower(),
'q_rate': val[1],
'bod': val[2],
'tss': val[3],
'nh4_n': val[4],
'no3_n': val[5],
'no2_n': val[6],
'org_n': val[7],
'min_p': val[8],
'org_p': val[9],
'fcoli': val[10],
'description': val[12] if val[12] != 'null' else None # 12 index because extra column
}
septics.append(sep)
i += 1
if database == 'project':
db_lib.bulk_insert(project_base.db, project_parmdb.Septic_sep, septics)
else:
db_lib.bulk_insert(datasets_base.db, datasets_parmdb.Septic_sep, septics)
def write(self):
self.write_default_table(db.Septic_sep, True)
class Snow_sno(BaseFileModel):
def __init__(self, file_name, version=None):
self.file_name = file_name
self.version = version
def read(self, database='project'):
if database == 'project':
self.read_default_table(project_parmdb.Snow_sno, project_base.db, 0, ignore_id_col=True)
else:
self.read_default_table(datasets_parmdb.Snow_sno, datasets_base.db, 0, ignore_id_col=True)
def write(self):
self.write_default_table(db.Snow_sno, True)
class Pathogens_pth(BaseFileModel):
def __init__(self, file_name, version=None):
self.file_name = file_name
self.version = version
def read(self, database ='project'):
if database == 'project':
self.read_default_table(project_parmdb.Pathogens_pth, project_base.db, 0, ignore_id_col=True)
else:
self.read_default_table(datasets_parmdb.Pathogens_pth, datasets_base.db, 0, ignore_id_col=True)
def write(self):
self.write_default_table(db.Pathogens_pth, True)
```
#### File: editor_api/fileio/recall.py
```python
from .base import BaseFileModel, FileColumn as col
import database.project.recall as db
from helpers import utils
import os.path
from database.project import base as project_base
from database import lib as db_lib
import csv
class Recall_rec(BaseFileModel):
def __init__(self, file_name, version=None):
self.file_name = file_name
self.version = version
def read(self):
raise NotImplementedError('Reading not implemented yet.')
def read_data(self, recall_rec_id, delete_existing):
with open(self.file_name, mode='r') as csv_file:
dialect = csv.Sniffer().sniff(csv_file.readline())
csv_file.seek(0)
replace_commas = dialect is not None and dialect.delimiter != ','
csv_reader = csv.DictReader(csv_file)
rows = []
for row in csv_reader:
if replace_commas:
for key in row:
row[key] = row[key].replace(',', '.', 1)
row['recall_rec_id'] = recall_rec_id
rows.append(row)
if delete_existing:
db.Recall_dat.delete().where(db.Recall_dat.recall_rec_id == recall_rec_id).execute()
db_lib.bulk_insert(project_base.db, db.Recall_dat, rows)
def read_const_data(self):
with open(self.file_name, mode='r') as csv_file:
dialect = csv.Sniffer().sniff(csv_file.readline())
csv_file.seek(0)
replace_commas = dialect is not None and dialect.delimiter != ','
csv_reader = csv.DictReader(csv_file, dialect=dialect)
rows = []
for row in csv_reader:
rec = db.Recall_rec.get_or_none(db.Recall_rec.name == row['name'])
if rec is not None:
db.Recall_rec.update(rec_typ=4).where(db.Recall_rec.id == rec.id).execute()
db.Recall_dat.delete().where(db.Recall_dat.recall_rec_id == rec.id).execute()
if replace_commas:
for key in row:
row[key] = row[key].replace(',', '.', 1)
row['recall_rec_id'] = rec.id
row['yr'] = 0
row['t_step'] = 0
row.pop('name', None)
rows.append(row)
db_lib.bulk_insert(project_base.db, db.Recall_dat, rows)
def write(self):
table = db.Recall_rec
order_by = db.Recall_rec.id
if table.select().count() > 0:
with open(self.file_name, 'w') as file:
file.write(self.get_meta_line())
cols = [col(table.id),
col(table.name, direction="left"),
col(table.rec_typ),
col("file", not_in_db=True, padding_override=utils.DEFAULT_STR_PAD, direction="left")]
self.write_headers(file, cols)
file.write("\n")
for row in table.select().order_by(order_by):
file_name = '{name}.rec'.format(name=row.name) if row.rec_typ != 4 else row.name
file.write(utils.int_pad(row.id))
file.write(utils.string_pad(row.name, direction="left"))
file.write(utils.int_pad(row.rec_typ))
file.write(utils.string_pad(file_name, direction="left"))
file.write("\n")
dir = os.path.dirname(self.file_name)
if row.rec_typ != 4:
self.write_data(row.data, os.path.join(dir, file_name))
def write_data(self, data, file_name):
table = db.Recall_dat
with open(file_name, 'w') as file:
file.write(self.get_meta_line())
file.write(str(len(data)))
file.write("\n")
cols = [
col(table.yr),
col(table.t_step),
col(table.flo),
col(table.sed),
col(table.ptl_n),
col(table.ptl_p),
col(table.no3_n),
col(table.sol_p),
col(table.chla),
col(table.nh3_n),
col(table.no2_n),
col(table.cbn_bod),
col(table.oxy),
col(table.sand),
col(table.silt),
col(table.clay),
col(table.sm_agg),
col(table.lg_agg),
col(table.gravel),
col(table.tmp)
]
self.write_headers(file, cols)
file.write("\n")
for row in data.order_by(db.Recall_dat.yr, db.Recall_dat.t_step):
file.write(utils.int_pad(row.yr))
file.write(utils.int_pad(row.t_step))
file.write(utils.num_pad(row.flo))
file.write(utils.num_pad(row.sed))
file.write(utils.num_pad(row.ptl_n))
file.write(utils.num_pad(row.ptl_p))
file.write(utils.num_pad(row.no3_n))
file.write(utils.num_pad(row.sol_p))
file.write(utils.num_pad(row.chla))
file.write(utils.num_pad(row.nh3_n))
file.write(utils.num_pad(row.no2_n))
file.write(utils.num_pad(row.cbn_bod))
file.write(utils.num_pad(row.oxy))
file.write(utils.num_pad(row.sand))
file.write(utils.num_pad(row.silt))
file.write(utils.num_pad(row.clay))
file.write(utils.num_pad(row.sm_agg))
file.write(utils.num_pad(row.lg_agg))
file.write(utils.num_pad(row.gravel))
file.write(utils.num_pad(row.tmp))
file.write("\n")
```
#### File: editor_api/fileio/reservoir.py
```python
from .base import BaseFileModel, FileColumn as col
from peewee import *
from helpers import utils
from database.project import init
import database.project.reservoir as db
class Reservoir_res(BaseFileModel):
def __init__(self, file_name, version=None):
self.file_name = file_name
self.version = version
def read(self):
raise NotImplementedError('Reading not implemented yet.')
def write(self):
table = db.Reservoir_res
order_by = db.Reservoir_res.id
if table.select().count() > 0:
with open(self.file_name, 'w') as file:
file.write(self.get_meta_line())
file.write(utils.int_pad("id"))
file.write(utils.string_pad("name", direction="left"))
file.write(utils.string_pad("init"))
file.write(utils.string_pad("hyd"))
file.write(utils.string_pad("rel"))
file.write(utils.string_pad("sed"))
file.write(utils.string_pad("nut"))
file.write("\n")
for row in table.select().order_by(order_by):
file.write(utils.int_pad(row.id))
file.write(utils.string_pad(row.name, direction="left"))
file.write(utils.key_name_pad(row.init, default_pad=utils.DEFAULT_STR_PAD))
file.write(utils.key_name_pad(row.hyd, default_pad=utils.DEFAULT_STR_PAD))
file.write(utils.key_name_pad(row.rel, default_pad=utils.DEFAULT_STR_PAD))
file.write(utils.key_name_pad(row.sed, default_pad=utils.DEFAULT_STR_PAD))
file.write(utils.key_name_pad(row.nut, default_pad=utils.DEFAULT_STR_PAD))
file.write("\n")
class Hydrology_res(BaseFileModel):
def __init__(self, file_name, version=None):
self.file_name = file_name
self.version = version
def read(self, database='project'):
raise NotImplementedError('Reading not implemented yet.')
def write(self):
self.write_default_table(db.Hydrology_res, True)
class Initial_res(BaseFileModel):
def __init__(self, file_name, version=None):
self.file_name = file_name
self.version = version
def read(self, database='project'):
raise NotImplementedError('Reading not implemented yet.')
def write(self):
table = db.Initial_res
query = (table.select(table.name,
init.Om_water_ini.name.alias("org_min"),
init.Pest_water_ini.name.alias("pest"),
init.Path_water_ini.name.alias("path"),
init.Hmet_water_ini.name.alias("hmet"),
init.Salt_water_ini.name.alias("salt"),
table.description)
.join(init.Om_water_ini, JOIN.LEFT_OUTER)
.switch(table)
.join(init.Pest_water_ini, JOIN.LEFT_OUTER)
.switch(table)
.join(init.Path_water_ini, JOIN.LEFT_OUTER)
.switch(table)
.join(init.Hmet_water_ini, JOIN.LEFT_OUTER)
.switch(table)
.join(init.Salt_water_ini, JOIN.LEFT_OUTER)
.order_by(table.id))
cols = [col(table.name, direction="left"),
col(table.org_min, query_alias="org_min"),
col(table.pest, query_alias="pest"),
col(table.path, query_alias="path"),
col(table.hmet, query_alias="hmet"),
col(table.salt, query_alias="salt"),
col(table.description, direction="left")]
self.write_query(query, cols)
class Sediment_res(BaseFileModel):
def __init__(self, file_name, version=None):
self.file_name = file_name
self.version = version
def read(self, database='project'):
raise NotImplementedError('Reading not implemented yet.')
def write(self):
self.write_default_table(db.Sediment_res, True)
class Nutrients_res(BaseFileModel):
def __init__(self, file_name, version=None):
self.file_name = file_name
self.version = version
def read(self, database='project'):
raise NotImplementedError('Reading not implemented yet.')
def write(self):
self.write_default_table(db.Nutrients_res, True)
class Weir_res(BaseFileModel):
def __init__(self, file_name, version=None):
self.file_name = file_name
self.version = version
def read(self, database='project'):
raise NotImplementedError('Reading not implemented yet.')
def write(self):
self.write_default_table(db.Weir_res, True)
class Wetland_wet(BaseFileModel):
def __init__(self, file_name, version=None):
self.file_name = file_name
self.version = version
def read(self):
raise NotImplementedError('Reading not implemented yet.')
def write(self):
table = db.Wetland_wet
order_by = db.Wetland_wet.id
if table.select().count() > 0:
with open(self.file_name, 'w') as file:
file.write(self.get_meta_line())
file.write(utils.int_pad("id"))
file.write(utils.string_pad("name", direction="left"))
file.write(utils.string_pad("init"))
file.write(utils.string_pad("hyd"))
file.write(utils.string_pad("rel"))
file.write(utils.string_pad("sed"))
file.write(utils.string_pad("nut"))
file.write("\n")
for row in table.select().order_by(order_by):
file.write(utils.int_pad(row.id))
file.write(utils.string_pad(row.name, direction="left"))
file.write(utils.key_name_pad(row.init, default_pad=utils.DEFAULT_STR_PAD))
file.write(utils.key_name_pad(row.hyd, default_pad=utils.DEFAULT_STR_PAD))
file.write(utils.key_name_pad(row.rel, default_pad=utils.DEFAULT_STR_PAD))
file.write(utils.key_name_pad(row.sed, default_pad=utils.DEFAULT_STR_PAD))
file.write(utils.key_name_pad(row.nut, default_pad=utils.DEFAULT_STR_PAD))
file.write("\n")
class Hydrology_wet(BaseFileModel):
def __init__(self, file_name, version=None):
self.file_name = file_name
self.version = version
def read(self, database='project'):
raise NotImplementedError('Reading not implemented yet.')
def write(self):
self.write_default_table(db.Hydrology_wet, True)
```
#### File: editor_api/helpers/executable_api.py
```python
import json, sys
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
class ExecutableApi:
def emit_progress(self, percent, message):
sys.stdout.write('\r\t-> percent: {0} - {1} '.format(str(percent).rjust(3), message))
sys.stdout.flush()
```
#### File: editor_api/helpers/utils.py
```python
import re
from datetime import datetime
import json
import urllib.parse
import os.path
from decimal import Decimal
DEFAULT_STR_PAD = 16
DEFAULT_KEY_PAD = 16
DEFAULT_CODE_PAD = 12
DEFAULT_NUM_PAD = 12
DEFAULT_INT_PAD = 8
DEFAULT_DECIMALS = 5
DEFAULT_DIRECTION = "right"
DEFAULT_SPACES_AFTER = 2
NULL_STR = "null"
NULL_NUM = "0"
NON_ZERO_MIN = 0.00001
def get_valid_filename(s):
s = s.strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
def remove_space(s, c='_'):
if s is None or s == '':
return s
return s.strip().replace(' ', c)
def string_pad(val, default_pad=DEFAULT_STR_PAD, direction=DEFAULT_DIRECTION, text_if_null=NULL_STR, spaces_after=DEFAULT_SPACES_AFTER):
val_text = text_if_null if val is None or val == '' else remove_space(val)
space = ""
for x in range(0, spaces_after):
space += " "
if direction == "right":
return str(val_text).rjust(default_pad) + space
else:
return str(val_text).ljust(default_pad) + space
def code_pad(val, default_pad=DEFAULT_CODE_PAD, direction=DEFAULT_DIRECTION, text_if_null=NULL_STR):
return string_pad(val, default_pad, direction, text_if_null)
def key_name_pad(prop, default_pad=DEFAULT_KEY_PAD, direction=DEFAULT_DIRECTION, text_if_null=NULL_STR):
val = None if prop is None else prop.name
return string_pad(val, default_pad, direction, text_if_null)
def num_pad(val, decimals=DEFAULT_DECIMALS, default_pad=DEFAULT_NUM_PAD, direction=DEFAULT_DIRECTION, text_if_null=NULL_NUM, use_non_zero_min=False):
val_text = val
if is_number(val):
if use_non_zero_min and val < NON_ZERO_MIN:
val = NON_ZERO_MIN
val_text = "{:.{prec}f}".format(float(val), prec=decimals)
return string_pad(val_text, default_pad, direction, text_if_null)
def exp_pad(val, decimals=4, default_pad=DEFAULT_NUM_PAD, direction=DEFAULT_DIRECTION, text_if_null=NULL_NUM, use_non_zero_min=False):
val_text = val
if is_number(val):
if use_non_zero_min and val == 0:
val = 0.00000001
val_text = "{:.{prec}E}".format(Decimal(val), prec=decimals)
return string_pad(val_text, default_pad, direction, text_if_null)
def int_pad(val, default_pad=DEFAULT_INT_PAD, direction=DEFAULT_DIRECTION):
return num_pad(val, 0, default_pad, direction, NULL_NUM)
def write_string(file, val, default_pad=DEFAULT_STR_PAD, direction=DEFAULT_DIRECTION, text_if_null=NULL_STR, spaces_after=DEFAULT_SPACES_AFTER):
file.write(string_pad(val, default_pad, direction, text_if_null, spaces_after))
def write_code(file, val, default_pad=DEFAULT_CODE_PAD, direction=DEFAULT_DIRECTION, text_if_null=NULL_STR):
file.write(code_pad(val, default_pad, direction, text_if_null))
def write_key_name(file, prop, default_pad=DEFAULT_KEY_PAD, direction=DEFAULT_DIRECTION, text_if_null=NULL_STR):
file.write(key_name_pad(prop, default_pad, direction, text_if_null))
def write_num(file, val, decimals=DEFAULT_DECIMALS, default_pad=DEFAULT_NUM_PAD, direction=DEFAULT_DIRECTION, text_if_null=NULL_NUM, use_non_zero_min=False):
file.write(num_pad(val, decimals, default_pad, direction, text_if_null, use_non_zero_min))
def write_int(file, val, default_pad=DEFAULT_INT_PAD, direction=DEFAULT_DIRECTION):
file.write(int_pad(val, default_pad, direction))
def write_bool_yn(file, val, default_pad=DEFAULT_CODE_PAD, direction=DEFAULT_DIRECTION, text_if_null=NULL_STR):
yn = "y" if val else "n"
file.write(code_pad(yn, default_pad, direction, text_if_null))
def write_desc_string(file, val):
if val is not None:
file.write(val)
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
except TypeError:
return False
def json_encode_datetime(o):
if isinstance(o, datetime):
return o.isoformat()
return o
def sanitize(q):
return urllib.parse.unquote(q)
def rel_path(compare_path, curr_path):
if curr_path is None:
return None
if curr_path[0].lower() != compare_path[0].lower():
return curr_path
base_path = os.path.dirname(compare_path)
return os.path.relpath(curr_path, base_path)
def full_path(compare_path, curr_path):
if curr_path is None:
return None
p = curr_path
if not os.path.isabs(curr_path):
p = os.path.normpath(os.path.join(os.path.dirname(compare_path), curr_path))
return p
def are_paths_equal(p1, p2):
p1n = os.path.normcase(os.path.realpath(p1))
p2n = os.path.normcase(os.path.realpath(p2))
return p1n == p2n
```
#### File: editor_api/rest/auto_complete.py
```python
from flask_restful import Resource, abort
from database.project.setup import SetupProjectDatabase
from database.project import connect, climate, channel, aquifer, reservoir, hydrology, hru, hru_parm_db, lum, soils, routing_unit, dr, init, decision_table, exco, dr, structural
from helpers import table_mapper # Note: string to table name dictionary moved here
MAX_ROWS = 10000
class AutoCompleteApi(Resource):
def get(self, project_db, type, partial_name):
SetupProjectDatabase.init(project_db)
table = table_mapper.types.get(type, None)
if table is None:
return abort(404, message='Unable to find table type for auto-complete.')
# If table is a decision table, filter based on file_name
if '.dtl' in type:
m = table.select(table.name).where((table.name.contains(partial_name)) & (table.file_name == type)).limit(MAX_ROWS)
nm = table.select(table.name).where((~(table.name.contains(partial_name))) & (table.file_name == type)).limit(MAX_ROWS)
else:
m = table.select(table.name).where(table.name.contains(partial_name)).limit(MAX_ROWS)
nm = table.select(table.name).where(~(table.name.contains(partial_name))).limit(MAX_ROWS)
matches = [v.name for v in m]
non_matches = [nv.name for nv in nm]
if len(matches) > 0:
if len(non_matches) > 0:
return matches + non_matches
return matches
return non_matches
class AutoCompleteNoParmApi(Resource):
def get(self, project_db, type):
SetupProjectDatabase.init(project_db)
table = table_mapper.types.get(type, None)
if table is None:
return abort(404, message='Unable to find table type for auto-complete.')
# If table is a decision table, filter based on file_name
if '.dtl' in type:
m = table.select(table.name).where(table.file_name == type).order_by(table.name).limit(MAX_ROWS)
else:
m = table.select(table.name).order_by(table.name).limit(MAX_ROWS)
return [v.name for v in m]
class AutoCompleteIdApi(Resource):
def get(self, project_db, type, name):
SetupProjectDatabase.init(project_db)
table = table_mapper.types.get(type, None)
if table is None:
return abort(404, message='Unable to find table type for auto-complete.')
try:
m = table.get(table.name == name)
return {'id': m.id}
except table.DoesNotExist:
abort(404, message='{name} does not exist in the database.'.format(name=name))
class SelectListApi(Resource):
def get(self, project_db, type):
SetupProjectDatabase.init(project_db)
table = table_mapper.types.get(type, None)
if table is None:
return abort(404, message='Unable to find table type for auto-complete.')
items = table.select().order_by(table.name)
return [{'value': m.id, 'text': m.name} for m in items]
```
#### File: editor_api/rest/base.py
```python
from abc import ABCMeta, abstractmethod
from flask_restful import Resource, reqparse, abort
from playhouse.shortcuts import model_to_dict
from peewee import *
from helpers import utils, table_mapper
from database.project import base as project_base, climate
from database.project.setup import SetupProjectDatabase
from database.project.config import Project_config
from database.datasets.setup import SetupDatasetsDatabase
from database.vardefs import Var_range
import os.path
import ast
from pprint import pprint
class BaseRestModel(Resource):
__metaclass__ = ABCMeta
__invalid_name_msg = 'Invalid name {name}. Please ensure the value exists in your database.'
def base_get(self, project_db, id, table, description, back_refs=False, max_depth=1):
SetupProjectDatabase.init(project_db)
try:
m = table.get(table.id == id)
if back_refs:
d = model_to_dict(m, backrefs=True, max_depth=max_depth)
self.get_obj_name(d)
return d
else:
return model_to_dict(m, recurse=False)
except table.DoesNotExist:
abort(404, message='{description} {id} does not exist'.format(description=description, id=id))
def get_obj_name(self, d):
if 'con_outs' in d:
for o in d['con_outs']:
c_table = table_mapper.obj_typs.get(o['obj_typ'], None)
o['obj_name'] = c_table.get(c_table.id == o['obj_id']).name
if 'elements' in d:
for o in d['elements']:
c_table = table_mapper.obj_typs.get(o['obj_typ'], None)
key = 'obj_id' if 'obj_id' in o else '<KEY>'
o['obj_name'] = c_table.get(c_table.id == o[key]).name
if 'obj_typ' in d and ('obj_id' in d or 'obj_typ_no' in d):
c_table = table_mapper.obj_typs.get(d['obj_typ'], None)
key = 'obj_id' if 'obj_id' in d else '<KEY>'
d['obj_name'] = c_table.get(c_table.id == d[key]).name
def base_get_datasets_name(self, datasets_db, name, table, description, back_refs=False):
SetupDatasetsDatabase.init(datasets_db)
try:
m = table.get(table.name == name)
if back_refs:
return model_to_dict(m, backrefs=True, max_depth=1)
else:
return model_to_dict(m, recurse=False)
except table.DoesNotExist:
abort(404, message='{description} {name} does not exist'.format(description=description, name=name))
def base_delete(self, project_db, id, table, description):
SetupProjectDatabase.init(project_db)
try:
project_base.db.execute_sql("PRAGMA foreign_keys = ON")
m = table.get(table.id == id)
result = m.delete_instance()
if result > 0:
return 204
abort(400, message='Unable to delete {description} {id}.'.format(description=description, id=id))
except table.DoesNotExist:
abort(404, message='{description} {id} does not exist'.format(description=description, id=id))
def base_paged_list(self, project_db, sort, reverse, page, items_per_page, table, list_name, back_refs=False):
SetupProjectDatabase.init(project_db)
total = table.select().count()
sort_val = SQL('[{}]'.format(sort))
if reverse == 'true':
sort_val = SQL('[{}]'.format(sort)).desc()
m = table.select().order_by(sort_val).paginate(int(page), int(items_per_page))
if back_refs:
ml = [model_to_dict(v, backrefs=True, max_depth=1) for v in m]
for d in ml:
self.get_obj_name(d)
else:
ml = [model_to_dict(v, recurse=False) for v in m]
return {'total': total, list_name: ml}
def base_list(self, project_db, table):
SetupProjectDatabase.init(project_db)
m = table.select()
return [model_to_dict(v, recurse=False) for v in m]
def base_put(self, project_db, id, table, item_description):
try:
SetupProjectDatabase.init(project_db)
args = self.get_args_reflect(table, project_db)
result = self.save_args(table, args, id=id)
if result > 0:
return 201
abort(400, message='Unable to update {item} {id}.'.format(item=item_description.lower(), id=id))
except IntegrityError as e:
abort(400, message='{item} name must be unique. '.format(item=item_description) + str(e))
except table.DoesNotExist:
abort(404, message='{item} {id} does not exist'.format(item=item_description, id=id))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
def base_post(self, project_db, table, item_description, extra_args=[]):
try:
SetupProjectDatabase.init(project_db)
args = self.get_args_reflect(table, project_db, extra_args=extra_args)
result = self.save_args(table, args, is_new=True, extra_args=extra_args)
if result > 0:
return {'id': result }, 201
abort(400, message='Unable to create {item}.'.format(item=item_description.lower()))
except IntegrityError as e:
abort(400, message='{item} name must be unique. {ex}'.format(item=item_description, ex=str(e)))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
def base_put_many(self, project_db, table, item_description):
try:
table_name = table._meta.name.lower()
SetupProjectDatabase.init(project_db)
args = self.get_args(table_name, project_db, True)
param_dict = {}
for key in args.keys():
if args[key] is not None and key != 'selected_ids':
param_dict[key] = args[key]
query = table.update(param_dict).where(table.id.in_(args['selected_ids']))
result = query.execute()
if result > 0:
return 200
abort(400, message='Unable to update {item}.'.format(item=item_description.lower()))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
def get_id_from_name(self, table, value):
if value is None or value == '':
return None
i = table.get(table.name == value)
return i.id
def base_name_id_list(self, project_db, table):
SetupProjectDatabase.init(project_db)
m = table.select(table.id, table.name).order_by(table.name)
return [{'id': v.id, 'name': v.name} for v in m]
def get_args(self, table_name, project_db, get_selected_ids=False, extra_args=[]):
parser = reqparse.RequestParser()
if get_selected_ids:
parser.add_argument('selected_ids', type=int, action='append', required=False, location='json')
else:
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('name', type=str, required=False, location='json')
parser.add_argument('description', type=str, required=False, location='json')
for extra in extra_args:
parser.add_argument(extra['name'], type=extra['type'], required=False, location='json')
try:
c = Project_config.get()
datasets_db = c.reference_db
if not os.path.exists(c.reference_db):
datasets_db = os.path.normpath(os.path.join(os.path.dirname(project_db), c.reference_db))
SetupDatasetsDatabase.init(datasets_db)
m = Var_range.select().where(Var_range.table == table_name)
types = {
'float': float,
'int': int,
'text': str,
'string': str,
'select': str,
'lookup': str
}
for v in m:
parser.add_argument(v.variable, type=types.get(v.type, str), required=False, location='json')
except Project_config.DoesNotExist:
abort(404, message="Could not retrieve project configuration data.")
args = parser.parse_args(strict=False)
return args
def get_args_reflect(self, table, project_db, get_selected_ids=False, extra_args=[]):
parser = reqparse.RequestParser()
if get_selected_ids:
parser.add_argument('selected_ids', type=int, action='append', required=False, location='json')
for extra in extra_args:
parser.add_argument(extra['name'], type=extra['type'], required=False, location='json')
type_map = {
DoubleField: float,
IntegerField: int,
AutoField: int,
TextField: str,
CharField: str,
ForeignKeyField: int,
BooleanField: bool
}
for field in table._meta.sorted_fields:
parser.add_argument(field.column_name, type=type_map.get(type(field), str), required=False, location='json')
args = parser.parse_args(strict=False)
return args
def save_args(self, table, args, id=0, is_new=False, lookup_fields=[], extra_args=[]):
params = {}
for field in table._meta.sorted_fields:
if field.column_name in args or field.name in args:
if field.name in lookup_fields:
d = ast.literal_eval(args[field.name])
params[field.name] = int(d['id'])
elif field.column_name in lookup_fields:
d = ast.literal_eval(args[field.column_name])
params[field.column_name] = int(d['id'])
else:
params[field.column_name] = args[field.column_name]
for extra in extra_args:
params[extra['name']] = args[extra['name']]
if is_new:
query = table.insert(params)
else:
query = table.update(params).where(table.id == id)
return query.execute()
def get_con_args(self, prop_name):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('name', type=str, required=True, location='json')
parser.add_argument('gis_id', type=int, required=False, location='json')
parser.add_argument('area', type=float, required=True, location='json')
parser.add_argument('lat', type=float, required=True, location='json')
parser.add_argument('lon', type=float, required=True, location='json')
parser.add_argument('elev', type=float, required=False, location='json')
parser.add_argument('wst', type=int, required=False, location='json')
parser.add_argument('wst_name', type=str, required=False, location='json')
parser.add_argument('cst', type=int, required=False, location='json')
parser.add_argument('ovfl', type=int, required=False, location='json')
parser.add_argument('rule', type=int, required=False, location='json')
parser.add_argument(prop_name, type=int, required=False, location='json')
parser.add_argument('{prop}_name'.format(prop=prop_name), type=str, required=False, location='json')
args = parser.parse_args(strict=True)
return args
def get_con_out_args(self, con_table):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('order', type=int, required=True, location='json')
parser.add_argument('obj_typ', type=str, required=True, location='json')
parser.add_argument('obj_id', type=int, required=True, location='json')
parser.add_argument('hyd_typ', type=str, required=True, location='json')
parser.add_argument('frac', type=float, required=True, location='json')
parser.add_argument('{con}_id'.format(con=con_table), type=int, required=False, location='json')
args = parser.parse_args(strict=True)
return args
def get_con_map(self, project_db, table):
SetupProjectDatabase.init(project_db)
t = table
limit = 500
bounds = t.select(fn.Max(t.lat).alias("max_lat"),
fn.Min(t.lat).alias("min_lat"),
fn.Max(t.lon).alias("max_lon"),
fn.Min(t.lon).alias("min_lon")).get()
m = t.select().limit(limit)
features = []
for v in m:
feature = {
"geometry": {
"type": "Point",
"coordinates": [v.lon, v.lat]
},
"type": "Feature",
"properties": {
"name": v.name,
"area": v.area,
"lat": v.lat,
"lon": v.lon,
"elevation": v.elev
},
"id": v.id
}
features.append(feature)
return {
"bounds": {
"max_lat": bounds.max_lat,
"max_lon": bounds.max_lon,
"min_lat": bounds.min_lat,
"min_lon": bounds.min_lon
},
"geojson": {
"type": "FeatureCollection",
"features": features
},
"display": {
"limit": limit,
"total": t.select().count()
}
}
def put_con(self, project_db, id, prop_name, con_table, prop_table):
args = self.get_con_args(prop_name)
try:
SetupProjectDatabase.init(project_db)
params = {
'name': args['name'],
'area': args['area'],
'lat': args['lat'],
'lon': args['lon'],
'elev': args['elev'],
}
params['{}_id'.format(prop_name)] = self.get_id_from_name(prop_table, args['{}_name'.format(prop_name)])
if args['wst_name'] is not None:
params['wst_id'] = self.get_id_from_name(climate.Weather_sta_cli, args['wst_name'])
result = con_table.update(params).where(con_table.id == id).execute()
if result > 0:
return 200
abort(400, message='Unable to update Hru {id}.'.format(id=id))
except IntegrityError:
abort(400, message='Name must be unique.')
except con_table.DoesNotExist:
abort(404, message='Object {id} does not exist'.format(id=id))
except prop_table.DoesNotExist:
abort(400, message=self.__invalid_name_msg.format(name=args['{}_name'.format(prop_name)]))
except climate.Weather_sta_cli.DoesNotExist:
abort(400, message=self.__invalid_name_msg.format(name=args['wst_name']))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
def post_con(self, project_db, prop_name, con_table, prop_table):
args = self.get_con_args(prop_name)
SetupProjectDatabase.init(project_db)
try:
e = con_table.get(con_table.name == args['name'])
abort(400, message='Name must be unique. Object with this name already exists.')
except con_table.DoesNotExist:
try:
params = {
'name': args['name'],
'area': args['area'],
'lat': args['lat'],
'lon': args['lon'],
'elev': args['elev'],
'ovfl': 0,
'rule': 0
}
params['{}_id'.format(prop_name)] = self.get_id_from_name(prop_table, args['{}_name'.format(prop_name)])
if args['wst_name'] is not None:
params['wst_id'] = self.get_id_from_name(climate.Weather_sta_cli, args['wst_name'])
result = con_table.insert(params).execute()
if result > 0:
return 201
abort(400, message='Unable to create object.')
except IntegrityError:
abort(400, message='Name must be unique.')
except prop_table.DoesNotExist:
abort(400, message=self.__invalid_name_msg.format(name=args['{}_name'.format(prop_name)]))
except climate.Weather_sta_cli.DoesNotExist:
abort(400, message=self.__invalid_name_msg.format(name=args['wst_name']))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
def put_con_out(self, project_db, id, prop_name, con_out_table):
try:
args = self.get_con_out_args(prop_name)
SetupProjectDatabase.init(project_db)
params = {
'order': args['order'],
'obj_typ': args['obj_typ'],
'obj_id': args['obj_id'],
'hyd_typ': args['hyd_typ'],
'frac': args['frac']
}
result = con_out_table.update(params).where(con_out_table.id == id).execute()
if result > 0:
return 200
abort(400, message='Unable to update outflow {id}.'.format(id=id))
except con_out_table.DoesNotExist:
abort(404, message='Outflow {id} does not exist'.format(id=id))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
def post_con_out(self, project_db, prop_name, con_out_table):
args = self.get_con_out_args(prop_name)
SetupProjectDatabase.init(project_db)
try:
params = {
'order': args['order'],
'obj_typ': args['obj_typ'],
'obj_id': args['obj_id'],
'hyd_typ': args['hyd_typ'],
'frac': args['frac']
}
params['{}_id'.format(prop_name)] = args['{}_id'.format(prop_name)]
result = con_out_table.insert(params).execute()
if result > 0:
return 201
abort(400, message='Unable to create outflow.')
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
```
#### File: editor_api/rest/climate.py
```python
from flask_restful import Resource, reqparse, abort
from playhouse.shortcuts import model_to_dict
from peewee import *
from database.project import base as project_base
from database.project.setup import SetupProjectDatabase
from database.project.config import Project_config
from database.project.climate import Weather_sta_cli, Weather_file, Weather_wgn_cli, Weather_wgn_cli_mon
from database import lib as db_lib
from helpers import utils
import os.path
import sqlite3
import traceback
def get_station_args():
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('name', type=str, required=True, location='json')
parser.add_argument('wgn_id', type=int, required=False, location='json')
parser.add_argument('wgn', type=int, required=False, location='json')
parser.add_argument('wgn_name', type=str, required=False, location='json')
parser.add_argument('pcp', type=str, required=True, location='json')
parser.add_argument('tmp', type=str, required=True, location='json')
parser.add_argument('slr', type=str, required=True, location='json')
parser.add_argument('hmd', type=str, required=True, location='json')
parser.add_argument('wnd', type=str, required=True, location='json')
parser.add_argument('wnd_dir', type=str, required=True, location='json')
parser.add_argument('atmo_dep', type=str, required=True, location='json')
parser.add_argument('lat', type=float, required=False, location='json')
parser.add_argument('lon', type=float, required=False, location='json')
args = parser.parse_args(strict=True)
return args
def get_wgn_args():
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('name', type=str, required=True, location='json')
parser.add_argument('lat', type=float, required=True, location='json')
parser.add_argument('lon', type=float, required=True, location='json')
parser.add_argument('elev', type=float, required=True, location='json')
parser.add_argument('rain_yrs', type=int, required=True, location='json')
args = parser.parse_args(strict=True)
return args
def get_wgn_mon_args():
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('weather_wgn_cli_id', type=int, required=False, location='json')
parser.add_argument('weather_wgn_cli', type=int, required=False, location='json')
parser.add_argument('month', type=int, required=True, location='json')
parser.add_argument('tmp_max_ave', type=float, required=True, location='json')
parser.add_argument('tmp_min_ave', type=float, required=True, location='json')
parser.add_argument('tmp_max_sd', type=float, required=True, location='json')
parser.add_argument('tmp_min_sd', type=float, required=True, location='json')
parser.add_argument('pcp_ave', type=float, required=True, location='json')
parser.add_argument('pcp_sd', type=float, required=True, location='json')
parser.add_argument('pcp_skew', type=float, required=True, location='json')
parser.add_argument('wet_dry', type=float, required=True, location='json')
parser.add_argument('wet_wet', type=float, required=True, location='json')
parser.add_argument('pcp_days', type=float, required=True, location='json')
parser.add_argument('pcp_hhr', type=float, required=True, location='json')
parser.add_argument('slr_ave', type=float, required=True, location='json')
parser.add_argument('dew_ave', type=float, required=True, location='json')
parser.add_argument('wnd_ave', type=float, required=True, location='json')
args = parser.parse_args(strict=True)
return args
class WeatherStationApi(Resource):
def get(self, project_db, id):
SetupProjectDatabase.init(project_db)
try:
m = Weather_sta_cli.get(Weather_sta_cli.id == id)
d = model_to_dict(m, recurse=False)
if m.wgn is not None:
d["wgn_name"] = m.wgn.name
return d
except Weather_sta_cli.DoesNotExist:
abort(404, message='Weather station {id} does not exist'.format(id=id))
def delete(self, project_db, id):
SetupProjectDatabase.init(project_db)
try:
project_base.db.execute_sql("PRAGMA foreign_keys = ON")
m = Weather_sta_cli.get(Weather_sta_cli.id == id)
result = m.delete_instance()
if result > 0:
return 204
abort(400, message='Unable to delete weather station {id}.'.format(id=id))
except Weather_sta_cli.DoesNotExist:
abort(404, message='Weather station {id} does not exist'.format(id=id))
def put(self, project_db, id):
args = get_station_args()
SetupProjectDatabase.init(project_db)
try:
m = Weather_sta_cli.get(Weather_sta_cli.id == id)
m.name = args['name']
if args['wgn_name'] is not None:
try:
w = Weather_wgn_cli.get(Weather_wgn_cli.name == args['wgn_name'])
m.wgn_id = w.id
except Weather_wgn_cli.DoesNotExist:
abort(400, message='Invalid weather generator name {name}. Please ensure the value exists in your database.'.format(name=args['wgn_name']))
else:
m.wgn_id = args['wgn'] if args['wgn_id'] is None else args['wgn_id']
m.pcp = args['pcp']
m.tmp = args['tmp']
m.slr = args['slr']
m.hmd = args['hmd']
m.wnd = args['wnd']
m.wnd_dir = args['wnd_dir']
m.atmo_dep = args['atmo_dep']
m.lat = args['lat']
m.lon = args['lon']
result = m.save()
if result > 0:
return 200
abort(400, message='Unable to update weather station {id}.'.format(id=id))
except Weather_sta_cli.DoesNotExist:
abort(404, message='Weather station {id} does not exist'.format(id=id))
class WeatherStationListApi(Resource):
def get(self, project_db):
SetupProjectDatabase.init(project_db)
m = Weather_sta_cli.select()
return [model_to_dict(v, recurse=False) for v in m]
def post(self, project_db):
args = get_station_args()
SetupProjectDatabase.init(project_db)
try:
e = Weather_sta_cli.get(Weather_sta_cli.name == args['name'])
abort(400, message='Weather station name must be unique. A station with this name already exists.')
except Weather_sta_cli.DoesNotExist:
m = Weather_sta_cli()
m.name = args['name']
if args['wgn_name'] is not None:
try:
w = Weather_wgn_cli.get(Weather_wgn_cli.name == args['wgn_name'])
m.wgn_id = w.id
except Weather_wgn_cli.DoesNotExist:
abort(404, message='Invalid weather generator name {name}. Please ensure the value exists in your database.'.format(name=args['wgn_name']))
else:
m.wgn_id = args['wgn_id']
m.pcp = args['pcp']
m.tmp = args['tmp']
m.slr = args['slr']
m.hmd = args['hmd']
m.wnd = args['wnd']
m.wnd_dir = args['wnd_dir']
m.atmo_dep = args['atmo_dep']
m.lat = args['lat']
m.lon = args['lon']
result = m.save()
if result > 0:
return model_to_dict(m), 201
abort(400, message='Unable to create weather station.')
class WeatherStationPageApi(Resource):
def get(self, project_db, sort, reverse, page, items_per_page):
SetupProjectDatabase.init(project_db)
try:
config = Project_config.get()
total = Weather_sta_cli.select().count()
sort_val = SQL(sort)
if reverse == 'true':
sort_val = SQL(sort).desc()
m = Weather_sta_cli.select().order_by(sort_val).paginate(int(page), int(items_per_page))
stations = []
for v in m:
s = model_to_dict(v, recurse=False)
if v.wgn is not None:
s['wgn_name'] = v.wgn.name
stations.append(s)
return {
'total': total,
'weather_data_dir': utils.full_path(project_db, config.weather_data_dir),
'stations': stations
}
except Project_config.DoesNotExist:
abort(400, message='Could not retrieve project configuration data.')
class WeatherStationSaveDirApi(Resource):
def put(self, project_db):
parser = reqparse.RequestParser()
parser.add_argument('weather_data_dir', type=str, required=True, location='json')
args = parser.parse_args(strict=True)
SetupProjectDatabase.init(project_db)
try:
m = Project_config.get()
m.weather_data_dir = utils.rel_path(project_db, args['weather_data_dir'])
result = m.save()
if result > 0:
return 200
abort(400, message='Unable to update project configuration.')
except Project_config.DoesNotExist:
abort(404, message='Could not retrieve project configuration data.')
class WeatherFileAutoCompleteApi(Resource):
def get(self, project_db, type, partial_name):
SetupProjectDatabase.init(project_db)
m = Weather_file.select().where((Weather_file.type == type) & (Weather_file.filename.startswith(partial_name)))
return [v.filename for v in m]
class WgnAutoCompleteApi(Resource):
def get(self, project_db, partial_name):
SetupProjectDatabase.init(project_db)
m = Weather_wgn_cli.select().where(Weather_wgn_cli.name.startswith(partial_name))
return [v.name for v in m]
class WgnApi(Resource):
def get(self, project_db, id):
SetupProjectDatabase.init(project_db)
try:
m = Weather_wgn_cli.get(Weather_wgn_cli.id == id)
return model_to_dict(m, backrefs=True, max_depth=1)
except Weather_wgn_cli.DoesNotExist:
abort(404, message='Weather generator {id} does not exist'.format(id=id))
def delete(self, project_db, id):
SetupProjectDatabase.init(project_db)
try:
project_base.db.execute_sql("PRAGMA foreign_keys = ON")
m = Weather_wgn_cli.get(Weather_wgn_cli.id == id)
result = m.delete_instance()
if result > 0:
return 204
abort(400, message='Unable to delete weather generator {id}.'.format(id=id))
except Weather_wgn_cli.DoesNotExist:
abort(404, message='Weather generator {id} does not exist'.format(id=id))
def put(self, project_db, id):
args = get_wgn_args()
SetupProjectDatabase.init(project_db)
try:
m = Weather_wgn_cli.get(Weather_wgn_cli.id == id)
m.name = args['name']
m.lat = args['lat']
m.lon = args['lon']
m.elev = args['elev']
m.rain_yrs = args['rain_yrs']
result = m.save()
if result > 0:
return 200
abort(400, message='Unable to update weather generator {id}.'.format(id=id))
except Weather_wgn_cli.DoesNotExist:
abort(404, message='Weather generator {id} does not exist'.format(id=id))
class WgnListApi(Resource):
def get(self, project_db):
SetupProjectDatabase.init(project_db)
m = Weather_wgn_cli.select()
return [model_to_dict(v, recurse=False) for v in m]
def post(self, project_db):
args = get_wgn_args()
SetupProjectDatabase.init(project_db)
try:
e = Weather_wgn_cli.get(Weather_wgn_cli.name == args['name'])
abort(400, message='Weather generator name must be unique. A generator with this name already exists.')
except Weather_wgn_cli.DoesNotExist:
m = Weather_wgn_cli()
m.name = args['name']
m.lat = args['lat']
m.lon = args['lon']
m.elev = args['elev']
m.rain_yrs = args['rain_yrs']
result = m.save()
if result > 0:
return model_to_dict(m), 201
abort(400, message='Unable to create weather generator.')
class WgnPageApi(Resource):
def get(self, project_db, sort, reverse, page, items_per_page):
SetupProjectDatabase.init(project_db)
try:
config = Project_config.get()
total = Weather_wgn_cli.select().count()
sort_val = SQL(sort)
if reverse == 'true':
sort_val = SQL(sort).desc()
m = Weather_wgn_cli.select().order_by(sort_val).paginate(int(page), int(items_per_page))
return {
'total': total,
'wgn_db': utils.full_path(project_db, config.wgn_db),
'wgn_table_name': config.wgn_table_name,
'wgns': [model_to_dict(v, backrefs=True, max_depth=1) for v in m]
}
except Project_config.DoesNotExist:
abort(400, message='Could not retrieve project configuration data.')
class WgnTablesAutoCompleteApi(Resource):
def get(self, wgn_db, partial_name):
conn = sqlite3.connect(wgn_db)
conn.row_factory = sqlite3.Row
m = db_lib.get_matching_table_names_wgn(conn, partial_name)
matches = [v[0] for v in m]
nm = db_lib.get_table_names(conn)
non_matches = [v[0] for v in nm if v[0] not in matches and '_mon' not in v[0]]
return matches + non_matches
class WgnSaveImportDbApi(Resource):
def put(self, project_db):
parser = reqparse.RequestParser()
parser.add_argument('wgn_db', type=str, required=False, location='json')
parser.add_argument('wgn_table_name', type=str, required=False, location='json')
args = parser.parse_args(strict=False)
if 'wgn_db' not in args or args['wgn_db'] is None or args['wgn_db'] == '':
return 200
SetupProjectDatabase.init(project_db)
try:
conn = sqlite3.connect(args['wgn_db'])
conn.row_factory = sqlite3.Row
monthly_table = "{}_mon".format(args['wgn_table_name'])
if not db_lib.exists_table(conn, args['wgn_table_name']):
abort(400, message="Table {table} does not exist in {file}.".format(table=args['wgn_table_name'], file=args['wgn_db']))
if not db_lib.exists_table(conn, monthly_table):
abort(400, message="Table {table} does not exist in {file}.".format(table=monthly_table, file=args['wgn_db']))
default_wgn_db = 'C:/SWAT/SWATPlus/Databases/swatplus_wgn.sqlite'
wgn_db_path = default_wgn_db
if not utils.are_paths_equal(default_wgn_db, args['wgn_db']):
wgn_db_path = utils.rel_path(project_db, args['wgn_db'])
m = Project_config.get()
m.wgn_db = wgn_db_path
m.wgn_table_name = args['wgn_table_name']
result = m.save()
if result > 0:
return 200
abort(400, message='Unable to update project configuration.')
except Project_config.DoesNotExist:
abort(404, message='Could not retrieve project configuration data.')
class WgnMonthApi(Resource):
def get(self, project_db, id):
SetupProjectDatabase.init(project_db)
try:
m = Weather_wgn_cli_mon.get(Weather_wgn_cli_mon.id == id)
return model_to_dict(m, recurse=False)
except Weather_wgn_cli_mon.DoesNotExist:
abort(404, message='Weather generator monthly value {id} does not exist'.format(id=id))
def delete(self, project_db, id):
SetupProjectDatabase.init(project_db)
try:
m = Weather_wgn_cli_mon.get(Weather_wgn_cli_mon.id == id)
result = m.delete_instance()
if result > 0:
return 204
abort(400, message='Unable to delete weather generator monthly value {id}.'.format(id=id))
except Weather_wgn_cli_mon.DoesNotExist:
abort(404, message='Weather generator monthly value {id} does not exist'.format(id=id))
def put(self, project_db, id):
args = get_wgn_mon_args()
SetupProjectDatabase.init(project_db)
try:
m = Weather_wgn_cli_mon.get(Weather_wgn_cli_mon.id == id)
m.month = args['month']
m.tmp_max_ave = args['tmp_max_ave']
m.tmp_min_ave = args['tmp_min_ave']
m.tmp_max_sd = args['tmp_max_sd']
m.tmp_min_sd = args['tmp_min_sd']
m.pcp_ave = args['pcp_ave']
m.pcp_sd = args['pcp_sd']
m.pcp_skew = args['pcp_skew']
m.wet_dry = args['wet_dry']
m.wet_wet = args['wet_wet']
m.pcp_days = args['pcp_days']
m.pcp_hhr = args['pcp_hhr']
m.slr_ave = args['slr_ave']
m.dew_ave = args['dew_ave']
m.wnd_ave = args['wnd_ave']
result = m.save()
if result > 0:
return 200
abort(400, message='Unable to update weather generator monthly value {id}.'.format(id=id))
except Weather_wgn_cli.DoesNotExist:
abort(404, message='Weather generator monthly value {id} does not exist'.format(id=id))
class WgnMonthListApi(Resource):
def get(self, project_db, wgn_id):
SetupProjectDatabase.init(project_db)
m = Weather_wgn_cli_mon.select(Weather_wgn_cli_mon.weather_wgn_cli_id == wgn_id).order_by(Weather_wgn_cli_mon.month)
return [model_to_dict(v, recurse=False) for v in m]
def post(self, project_db, wgn_id):
args = get_wgn_mon_args()
SetupProjectDatabase.init(project_db)
try:
e = Weather_wgn_cli_mon.get((Weather_wgn_cli_mon.weather_wgn_cli_id == wgn_id) & (Weather_wgn_cli_mon.month == args['month']))
abort(400, message='Weather generator already has data for month {month}.'.format(month=args['month']))
except Weather_wgn_cli_mon.DoesNotExist:
m = Weather_wgn_cli_mon()
m.weather_wgn_cli = wgn_id
m.month = args['month']
m.tmp_max_ave = args['tmp_max_ave']
m.tmp_min_ave = args['tmp_min_ave']
m.tmp_max_sd = args['tmp_max_sd']
m.tmp_min_sd = args['tmp_min_sd']
m.pcp_ave = args['pcp_ave']
m.pcp_sd = args['pcp_sd']
m.pcp_skew = args['pcp_skew']
m.wet_dry = args['wet_dry']
m.wet_wet = args['wet_wet']
m.pcp_days = args['pcp_days']
m.pcp_hhr = args['pcp_hhr']
m.slr_ave = args['slr_ave']
m.dew_ave = args['dew_ave']
m.wnd_ave = args['wnd_ave']
result = m.save()
if result > 0:
return model_to_dict(m, recurse=False), 201
abort(400, message='Unable to create weather generator monthly value.')
```
#### File: editor_api/rest/regions.py
```python
from flask_restful import Resource, reqparse, abort
from playhouse.shortcuts import model_to_dict
from peewee import *
from .base import BaseRestModel
from database.project import base as project_base
from database.project.setup import SetupProjectDatabase
from database.project.config import Project_config
from database.project.regions import Ls_unit_def, Ls_unit_ele
class LsUnitDefListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Ls_unit_def
list_name = 'ls_units'
SetupProjectDatabase.init(project_db)
total = table.select().count()
sort_val = SQL(sort)
if reverse == 'true':
sort_val = SQL(sort).desc()
m = table.select().order_by(sort_val).paginate(int(page), int(items_per_page))
ml = [{'id': v.id, 'name': v.name, 'area': v.area, 'num_elements': len(v.elements)} for v in m]
return {'total': total, list_name: ml}
class LsUnitDefApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Ls_unit_def, 'Landscape unit', back_refs=True)
def delete(self, project_db, id):
return self.base_delete(project_db, id, Ls_unit_def, 'Landscape unit')
def put(self, project_db, id):
return self.base_put(project_db, id, Ls_unit_def, 'Landscape unit')
class LsUnitDefPostApi(BaseRestModel):
def post(self, project_db):
return self.base_post(project_db, Ls_unit_def, 'Landscape unit')
class LsUnitEleListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Ls_unit_ele
list_name = 'ls_unit_eles'
return self.base_paged_list(project_db, sort, reverse, page, items_per_page, table, list_name, back_refs=True)
class LsUnitEleApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Ls_unit_ele, 'Landscape unit element', back_refs=True)
def delete(self, project_db, id):
return self.base_delete(project_db, id, Ls_unit_ele, 'Landscape unit element')
def put(self, project_db, id):
return self.base_put(project_db, id, Ls_unit_ele, 'Landscape unit element')
class LsUnitElePostApi(BaseRestModel):
def post(self, project_db):
return self.base_post(project_db, Ls_unit_ele, 'Landscape unit element', extra_args=[{'name': 'ls_unit_def_id', 'type': int}])
```
#### File: editor_api/rest/routing_unit.py
```python
from flask_restful import Resource, reqparse, abort
from playhouse.shortcuts import model_to_dict
from peewee import *
from .base import BaseRestModel
from database.project.setup import SetupProjectDatabase
from database.project.climate import Weather_sta_cli
from database.project.routing_unit import Rout_unit_rtu
from database.project.connect import Rout_unit_con, Rout_unit_con_out, Rout_unit_ele, Chandeg_con
from database.project.dr import Delratio_del
from database.project.hydrology import Topography_hyd, Field_fld
invalid_name_msg = 'Invalid name {name}. Please ensure the value exists in your database.'
class RoutUnitBoundariesApi(Resource):
def get(self, project_db):
SetupProjectDatabase.init(project_db)
if Rout_unit_con.select().count() > 0:
m = Rout_unit_con.select(
fn.Max(Rout_unit_con.lat).alias('n'),
fn.Min(Rout_unit_con.lat).alias('s'),
fn.Max(Rout_unit_con.lon).alias('e'),
fn.Min(Rout_unit_con.lon).alias('w')
).scalar(as_tuple=True)
return {
'n': m[0],
's': m[1],
'e': m[2],
'w': m[3]
}
elif Chandeg_con.select().count() > 0: # Quick fix for lte
m = Chandeg_con.select(
fn.Max(Chandeg_con.lat).alias('n'),
fn.Min(Chandeg_con.lat).alias('s'),
fn.Max(Chandeg_con.lon).alias('e'),
fn.Min(Chandeg_con.lon).alias('w')
).scalar(as_tuple=True)
return {
'n': m[0],
's': m[1],
'e': m[2],
'w': m[3]
}
else:
abort(404, message='No routing unit connections in database.')
def get_con_args():
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('name', type=str, required=True, location='json')
parser.add_argument('gis_id', type=int, required=False, location='json')
parser.add_argument('area', type=float, required=True, location='json')
parser.add_argument('lat', type=float, required=True, location='json')
parser.add_argument('lon', type=float, required=True, location='json')
parser.add_argument('elev', type=float, required=False, location='json')
parser.add_argument('wst', type=int, required=False, location='json')
parser.add_argument('wst_name', type=str, required=False, location='json')
parser.add_argument('cst', type=int, required=False, location='json')
parser.add_argument('ovfl', type=int, required=False, location='json')
parser.add_argument('rule', type=int, required=False, location='json')
parser.add_argument('rtu', type=int, required=False, location='json')
parser.add_argument('rtu_name', type=str, required=False, location='json')
args = parser.parse_args(strict=True)
return args
class RoutingUnitConApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Rout_unit_con, 'Rout_unit', True)
def delete(self, project_db, id):
return self.base_delete(project_db, id, Rout_unit_con, 'Rout_unit')
def put(self, project_db, id):
args = get_con_args()
try:
SetupProjectDatabase.init(project_db)
m = Rout_unit_con.get(Rout_unit_con.id == id)
m.name = args['name']
m.area = args['area']
m.lat = args['lat']
m.lon = args['lon']
m.elev = args['elev']
m.rtu_id = self.get_id_from_name(Rout_unit_rtu, args['rtu_name'])
if args['wst_name'] is not None:
m.wst_id = self.get_id_from_name(Weather_sta_cli, args['wst_name'])
result = m.save()
if result > 0:
return 200
abort(400, message='Unable to update Routing Unit {id}.'.format(id=id))
except IntegrityError:
abort(400, message='Routing unit name must be unique.')
except Rout_unit_con.DoesNotExist:
abort(404, message='Rout_unit {id} does not exist'.format(id=id))
except Rout_unit_rtu.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['rtu_name']))
except Weather_sta_cli.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['wst_name']))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class RoutingUnitConPostApi(BaseRestModel):
def post(self, project_db):
args = get_con_args()
SetupProjectDatabase.init(project_db)
try:
e = Rout_unit_con.get(Rout_unit_con.name == args['name'])
abort(400, message='Routing unit name must be unique. Routing unit with this name already exists.')
except Rout_unit_con.DoesNotExist:
try:
m = Rout_unit_con()
m.name = args['name']
m.area = args['area']
m.lat = args['lat']
m.lon = args['lon']
m.elev = args['elev']
m.ovfl = 0
m.rule = 0
m.rtu_id = self.get_id_from_name(Rout_unit_rtu, args['rtu_name'])
if args['wst_name'] is not None:
m.wst_id = self.get_id_from_name(Weather_sta_cli, args['wst_name'])
result = m.save()
if result > 0:
return model_to_dict(m), 201
abort(400, message='Unable to create Routingunit.')
except IntegrityError:
abort(400, message='Routing unit name must be unique.')
except Rout_unit_rtu.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['rtu_name']))
except Weather_sta_cli.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['wst_name']))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class RoutingUnitConListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Rout_unit_con
list_name = 'routing_unit'
return self.base_paged_list(project_db, sort, reverse, page, items_per_page, table, list_name, True)
class RoutingUnitConMapApi(BaseRestModel):
def get(self, project_db):
return self.get_con_map(project_db, Rout_unit_con)
def get_con_out_args():
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('order', type=int, required=True, location='json')
parser.add_argument('obj_typ', type=str, required=True, location='json')
parser.add_argument('obj_id', type=int, required=True, location='json')
parser.add_argument('hyd_typ', type=str, required=True, location='json')
parser.add_argument('frac', type=float, required=True, location='json')
parser.add_argument('rtu_con_id', type=int, required=False, location='json')
args = parser.parse_args(strict=True)
return args
class RoutingUnitConOutApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Rout_unit_con_out, 'Outflow', True)
def delete(self, project_db, id):
return self.base_delete(project_db, id, Rout_unit_con_out, 'Outflow')
def put(self, project_db, id):
try:
args = get_con_out_args()
SetupProjectDatabase.init(project_db)
m = Rout_unit_con_out.get(Rout_unit_con_out.id == id)
m.order = args['order']
m.obj_typ = args['obj_typ']
m.obj_id = args['obj_id']
m.hyd_typ = args['hyd_typ']
m.frac = args['frac']
result = m.save()
if result > 0:
return 200
abort(400, message='Unable to update Routing unit outflow {id}.'.format(id=id))
except Rout_unit_con_out.DoesNotExist:
abort(404, message='Routing Unit outflow {id} does not exist'.format(id=id))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class RoutingUnitConOutPostApi(BaseRestModel):
def post(self, project_db):
args = get_con_out_args()
SetupProjectDatabase.init(project_db)
try:
m = Rout_unit_con_out()
m.order = args['order']
m.obj_typ = args['obj_typ']
m.obj_id = args['obj_id']
m.hyd_typ = args['hyd_typ']
m.frac = args['frac']
m.rtu_con_id = args['rtu_con_id']
result = m.save()
if result > 0:
return model_to_dict(m), 201
abort(400, message='Unable to create routingunit outflow.')
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
def get_routingunit_args(get_selected_ids=False):
parser = reqparse.RequestParser()
if get_selected_ids:
parser.add_argument('selected_ids', type=int, action='append', required=False, location='json')
else:
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('name', type=str, required=True, location='json')
parser.add_argument('description', type=str, required=False, location='json')
parser.add_argument('dlr_name', type=str, required=False, location='json')
parser.add_argument('topo_name', type=str, required=False, location='json')
parser.add_argument('field_name', type=str, required=False, location='json')
args = parser.parse_args(strict=True)
return args
class RoutingUnitRtuListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Rout_unit_rtu
list_name = 'routing_unit'
return self.base_paged_list(project_db, sort, reverse, page, items_per_page, table, list_name, True)
class RoutingUnitRtuApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Rout_unit_rtu, 'Rout_unit', True)
def delete(self, project_db, id):
return self.base_delete(project_db, id, Rout_unit_rtu, 'Rout_unit')
def put(self, project_db, id):
args = get_routingunit_args()
try:
SetupProjectDatabase.init(project_db)
m = Rout_unit_rtu.get(Rout_unit_rtu.id == id)
m.name = args['name']
m.description = args['description']
if args['dlr_name']:
m.dlr_id = self.get_id_from_name(Delratio_del, args['dlr_name'])
if args['topo_name']:
m.topo_id = self.get_id_from_name(Topography_hyd, args['topo_name'])
if args['field_name']:
m.field_id = self.get_id_from_name(Field_fld, args['field_name'])
result = m.save()
if result > 0:
return 200
abort(400, message='Unable to update routing unit properties {id}.'.format(id=id))
except IntegrityError as e:
abort(400, message='Routing unit properties name must be unique.')
except Rout_unit_rtu.DoesNotExist:
abort(404, message='Routing unit properties {id} does not exist'.format(id=id))
except Topography_hyd.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['topo_name']))
except Delratio_del.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['dlr_name']))
except Field_fld.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['field_name']))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class RoutingUnitRtuUpdateManyApi(BaseRestModel):
def get(self, project_db):
return self.base_name_id_list(project_db, Rout_unit_rtu)
def put(self, project_db):
SetupProjectDatabase.init(project_db)
args = get_routingunit_args(True)
try:
param_dict = {}
if args['dlr_name'] is not None:
param_dict['dlr_id'] = self.get_id_from_name(Delratio_del, args['dlr_name'])
if args['topo_name'] is not None:
param_dict['topo_id'] = self.get_id_from_name(Topography_hyd, args['topo_name'])
if args['field_name'] is not None:
param_dict['field_id'] = self.get_id_from_name(Field_fld, args['field_name'])
query = Rout_unit_rtu.update(param_dict).where(Rout_unit_rtu.id.in_(args['selected_ids']))
result = query.execute()
if result > 0:
return 200
abort(400, message='Unable to update routing unit properties.')
except Delratio_del.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['dlr_name']))
except Topography_hyd.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['topo_name']))
except Field_fld.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['field_name']))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class RoutingUnitRtuPostApi(BaseRestModel):
def post(self, project_db):
args = get_routingunit_args()
try:
SetupProjectDatabase.init(project_db)
m = Rout_unit_rtu()
m.name = args['name']
m.description = args['description']
if args['dlr_name']:
m.dlr_id = self.get_id_from_name(Delratio_del, args['dlr_name'])
m.topo_id = self.get_id_from_name(Topography_hyd, args['topo_name'])
m.field_id = self.get_id_from_name(Field_fld, args['field_name'])
result = m.save()
if result > 0:
return 200
abort(400, message='Unable to update routing unit properties {id}.'.format(id=id))
except IntegrityError as e:
abort(400, message='Routing unit properties name must be unique.')
except Delratio_del.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['dlr_name']))
except Topography_hyd.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['topo_name']))
except Field_fld.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['field_name']))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class RoutingUnitEleListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Rout_unit_ele
list_name = 'rout_unit_eles'
return self.base_paged_list(project_db, sort, reverse, page, items_per_page, table, list_name, back_refs=True)
class RoutingUnitEleApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Rout_unit_ele, 'Routing unit element', back_refs=True)
def delete(self, project_db, id):
return self.base_delete(project_db, id, Rout_unit_ele, 'Routing unit element')
def put(self, project_db, id):
return self.base_put(project_db, id, Rout_unit_ele, 'Routing unit element')
class RoutingUnitElePostApi(BaseRestModel):
def post(self, project_db):
return self.base_post(project_db, Rout_unit_ele, 'Routing unit element')
```
#### File: editor_api/rest/simulation.py
```python
from flask_restful import Resource, reqparse, abort
from playhouse.shortcuts import model_to_dict
from database.project.setup import SetupProjectDatabase
from database.project.simulation import Time_sim, Print_prt, Print_prt_object
class TimeSimApi(Resource):
def get(self, project_db):
SetupProjectDatabase.init(project_db)
m = Time_sim.get_or_create_default()
return model_to_dict(m)
def put(self, project_db):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('day_start', type=int, required=True, location='json')
parser.add_argument('yrc_start', type=int, required=True, location='json')
parser.add_argument('day_end', type=int, required=True, location='json')
parser.add_argument('yrc_end', type=int, required=True, location='json')
parser.add_argument('step', type=int, required=False, location='json')
args = parser.parse_args(strict=True)
SetupProjectDatabase.init(project_db)
result = Time_sim.update_and_exec(args['day_start'], args['yrc_start'], args['day_end'], args['yrc_end'], args['step'])
if result == 1:
return 200
abort(400, message='Unable to update time_sim table.')
class PrintPrtApi(Resource):
def get(self, project_db):
SetupProjectDatabase.init(project_db)
try:
m = Print_prt.get()
prt = model_to_dict(m, recurse=False)
o = Print_prt_object.select()
objects = [model_to_dict(v, recurse=False) for v in o]
return {"prt": prt, "objects": objects}
except Print_prt.DoesNotExist:
abort(404, message="Could not retrieve print_prt data.")
def put(self, project_db):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('nyskip', type=int, required=True, location='json')
parser.add_argument('day_start', type=int, required=True, location='json')
parser.add_argument('day_end', type=int, required=True, location='json')
parser.add_argument('yrc_start', type=int, required=True, location='json')
parser.add_argument('yrc_end', type=int, required=True, location='json')
parser.add_argument('interval', type=int, required=True, location='json')
parser.add_argument('csvout', type=bool, required=True, location='json')
parser.add_argument('dbout', type=bool, required=True, location='json')
parser.add_argument('cdfout', type=bool, required=True, location='json')
parser.add_argument('soilout', type=bool, required=True, location='json')
parser.add_argument('mgtout', type=bool, required=True, location='json')
parser.add_argument('hydcon', type=bool, required=True, location='json')
parser.add_argument('fdcout', type=bool, required=True, location='json')
parser.add_argument('objects', type=list, required=False, location='json')
args = parser.parse_args(strict=False)
SetupProjectDatabase.init(project_db)
q = Print_prt.update(
nyskip=args['nyskip'],
day_start=args['day_start'],
day_end=args['day_end'],
yrc_start=args['yrc_start'],
yrc_end=args['yrc_end'],
interval=args['interval'],
csvout=args['csvout'],
dbout=args['dbout'],
cdfout=args['cdfout'],
soilout=args['soilout'],
mgtout=args['mgtout'],
hydcon=args['hydcon'],
fdcout=args['fdcout']
)
result = q.execute()
if args['objects'] is not None:
for o in args['objects']:
Print_prt_object.update(
daily=o['daily'],
monthly=o['monthly'],
yearly=o['yearly'],
avann=o['avann']
).where(Print_prt_object.id == o['id']).execute()
return 200
class PrintPrtObjectApi(Resource):
def put(self, project_db, id):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('print_prt', type=int, required=True, location='json')
parser.add_argument('name', type=str, required=True, location='json')
parser.add_argument('daily', type=bool, required=True, location='json')
parser.add_argument('monthly', type=bool, required=True, location='json')
parser.add_argument('yearly', type=bool, required=True, location='json')
parser.add_argument('avann', type=bool, required=True, location='json')
args = parser.parse_args(strict=True)
SetupProjectDatabase.init(project_db)
q = Print_prt_object.update(
daily=args['daily'],
monthly=args['monthly'],
yearly=args['yearly'],
avann=args['avann']
).where(Print_prt_object.id == id)
result = q.execute()
if result == 1:
return 200
abort(400, message='Unable to update print_print_object {}.'.format(args['name']))
```
#### File: swatplus-automatic-workflow/editor_api/swatplus_rest_api.py
```python
from flask import Flask, request
from flask_restful import Resource, Api
from flask_cors import CORS
from rest import setup, simulation, auto_complete, climate, routing_unit, hru_parm_db, channel, definitions, aquifer, reservoir, hydrology, hru, exco, dr, lum, init, ops, basin, soils, regions, change, recall, decision_table, structural
from helpers.executable_api import Unbuffered
import sys
import argparse
from werkzeug.routing import PathConverter
class EverythingConverter(PathConverter):
regex = '.*?'
app = Flask(__name__)
api = Api(app)
CORS(app)
app.url_map.converters['everything'] = EverythingConverter
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
class SwatPlusApi(Resource):
def get(self):
return {'SWATPlusEditor': 'API call working'}
class SwatPlusShutdownApi(Resource):
def get(self):
shutdown_server()
return {'SWATPlusEditor': 'Server shutting down...'}
list_params = 'list/<sort>/<reverse>/<page>/<items_per_page>/<everything:project_db>'
post_params = 'post/<everything:project_db>'
get_params = '<id>/<everything:project_db>'
many_params = 'many/<everything:project_db>'
datasets_get_name_params = 'datasets/<name>/<everything:datasets_db>'
api.add_resource(SwatPlusApi, '/')
api.add_resource(SwatPlusShutdownApi, '/shutdown')
api.add_resource(setup.SetupApi, '/setup')
api.add_resource(setup.ConfigApi, '/setup/config/<everything:project_db>')
api.add_resource(setup.CheckImportConfigApi, '/setup/check-config/<everything:project_db>')
api.add_resource(setup.InputFilesSettingsApi, '/setup/inputfiles/<everything:project_db>')
api.add_resource(setup.SwatRunSettingsApi, '/setup/swatrun/<everything:project_db>')
api.add_resource(setup.SaveOutputReadSettingsApi, '/setup/outputread/<everything:project_db>')
api.add_resource(setup.InfoApi, '/setup/info/<everything:project_db>')
api.add_resource(auto_complete.AutoCompleteApi, '/autocomplete/<type>/<partial_name>/<everything:project_db>')
api.add_resource(auto_complete.AutoCompleteNoParmApi, '/autocomplete-np/<type>/<everything:project_db>')
api.add_resource(auto_complete.AutoCompleteIdApi, '/autocomplete/id/<type>/<name>/<everything:project_db>')
api.add_resource(auto_complete.SelectListApi, '/selectlist/<type>/<everything:project_db>')
api.add_resource(definitions.VarRangeApi, '/vars/<table>/<everything:db>')
api.add_resource(definitions.VarCodeApi, '/codes/<table>/<variable>/<everything:db>')
api.add_resource(simulation.TimeSimApi, '/sim/time/<everything:project_db>')
api.add_resource(simulation.PrintPrtApi, '/sim/print/<everything:project_db>')
api.add_resource(simulation.PrintPrtObjectApi, '/sim/print/objects/' + get_params)
api.add_resource(basin.ParametersBsnApi, '/basin/parameters/<everything:project_db>')
api.add_resource(basin.CodesBsnApi, '/basin/codes/<everything:project_db>')
api.add_resource(climate.WeatherStationListApi, '/climate/stations/' + post_params)
api.add_resource(climate.WeatherStationApi, '/climate/stations/' + get_params)
api.add_resource(climate.WeatherStationPageApi, '/climate/stations/' + list_params)
api.add_resource(climate.WeatherStationSaveDirApi, '/climate/stations/directory/<everything:project_db>')
api.add_resource(climate.WeatherFileAutoCompleteApi, '/climate/stations/files/<type>/<partial_name>/<everything:project_db>')
api.add_resource(climate.WgnListApi, '/climate/wgn/' + post_params)
api.add_resource(climate.WgnApi, '/climate/wgn/' + get_params)
api.add_resource(climate.WgnPageApi, '/climate/wgn/' + list_params)
api.add_resource(climate.WgnSaveImportDbApi, '/climate/wgn/db/<everything:project_db>')
api.add_resource(climate.WgnTablesAutoCompleteApi, '/climate/wgn/db/tables/autocomplete/<partial_name>/<everything:wgn_db>')
api.add_resource(climate.WgnAutoCompleteApi, '/climate/wgn/autocomplete/<partial_name>/<everything:project_db>')
api.add_resource(climate.WgnMonthListApi, '/climate/wgn/months/<wgn_id>/<everything:project_db>')
api.add_resource(climate.WgnMonthApi, '/climate/wgn/month/' + get_params)
api.add_resource(routing_unit.RoutUnitBoundariesApi, '/routing_unit/boundaries/<everything:project_db>')
""" Channels Modules """
api.add_resource(channel.ChannelTypeApi, '/channels/get_type/<everything:project_db>')
api.add_resource(channel.ChannelConListApi, '/channels/' + list_params)
api.add_resource(channel.ChannelConPostApi, '/channels/' + post_params)
api.add_resource(channel.ChannelConApi, '/channels/' + get_params)
api.add_resource(channel.ChannelConMapApi, '/channels/map/<everything:project_db>')
api.add_resource(channel.ChannelConOutPostApi, '/channels/out/' + post_params)
api.add_resource(channel.ChannelConOutApi, '/channels/out/' + get_params)
api.add_resource(channel.ChandegConListApi, '/channels-lte/' + list_params)
api.add_resource(channel.ChandegConPostApi, '/channels-lte/' + post_params)
api.add_resource(channel.ChandegConApi, '/channels-lte/' + get_params)
api.add_resource(channel.ChandegConMapApi, '/channels-lte/map/<everything:project_db>')
api.add_resource(channel.ChandegConOutPostApi, '/channels-lte/out/' + post_params)
api.add_resource(channel.ChandegConOutApi, '/channels-lte/out/' + get_params)
api.add_resource(channel.ChannelChaListApi, '/channels/properties/' + list_params)
api.add_resource(channel.ChannelChaPostApi, '/channels/properties/' + post_params)
api.add_resource(channel.ChannelChaUpdateManyApi, '/channels/properties/' + many_params)
api.add_resource(channel.ChannelChaApi, '/channels/properties/' + get_params)
api.add_resource(channel.InitialChaListApi, '/channels/initial/' + list_params)
api.add_resource(channel.InitialChaPostApi, '/channels/initial/' + post_params)
api.add_resource(channel.InitialChaUpdateManyApi, '/channels/initial/' + many_params)
api.add_resource(channel.InitialChaApi, '/channels/initial/' + get_params)
api.add_resource(channel.HydrologyChaListApi, '/channels/hydrology/' + list_params)
api.add_resource(channel.HydrologyChaPostApi, '/channels/hydrology/' + post_params)
api.add_resource(channel.HydrologyChaUpdateManyApi, '/channels/hydrology/' + many_params)
api.add_resource(channel.HydrologyChaApi, '/channels/hydrology/' + get_params)
api.add_resource(channel.SedimentChaListApi, '/channels/sediment/' + list_params)
api.add_resource(channel.SedimentChaPostApi, '/channels/sediment/' + post_params)
api.add_resource(channel.SedimentChaUpdateManyApi, '/channels/sediment/' + many_params)
api.add_resource(channel.SedimentChaApi, '/channels/sediment/' + get_params)
api.add_resource(channel.NutrientsChaListApi, '/channels/nutrients/' + list_params)
api.add_resource(channel.NutrientsChaPostApi, '/channels/nutrients/' + post_params)
api.add_resource(channel.NutrientsChaUpdateManyApi, '/channels/nutrients/' + many_params)
api.add_resource(channel.NutrientsChaApi, '/channels/nutrients/' + get_params)
api.add_resource(channel.ChannelLteChaListApi, '/channels-lte/properties/' + list_params)
api.add_resource(channel.ChannelLteChaPostApi, '/channels-lte/properties/' + post_params)
api.add_resource(channel.ChannelLteChaUpdateManyApi, '/channels-lte/properties/' + many_params)
api.add_resource(channel.ChannelLteChaApi, '/channels-lte/properties/' + get_params)
api.add_resource(channel.HydSedLteChaListApi, '/channels-lte/hydsed/' + list_params)
api.add_resource(channel.HydSedLteChaPostApi, '/channels-lte/hydsed/' + post_params)
api.add_resource(channel.HydSedLteChaUpdateManyApi, '/channels-lte/hydsed/' + many_params)
api.add_resource(channel.HydSedLteChaApi, '/channels-lte/hydsed/' + get_params)
""" Channels Modules """
""" HRUs Modules """
api.add_resource(hru.HruConListApi, '/hrus/' + list_params)
api.add_resource(hru.HruConPostApi, '/hrus/' + post_params)
api.add_resource(hru.HruConApi, '/hrus/' + get_params)
api.add_resource(hru.HruConMapApi, '/hrus/map/<everything:project_db>')
api.add_resource(hru.HruConOutPostApi, '/hrus/out/' + post_params)
api.add_resource(hru.HruConOutApi, '/hrus/out/' + get_params)
api.add_resource(hru.HruDataHruListApi, '/hrus/properties/' + list_params)
api.add_resource(hru.HruDataHruPostApi, '/hrus/properties/' + post_params)
api.add_resource(hru.HruDataHruUpdateManyApi, '/hrus/properties/' + many_params)
api.add_resource(hru.HruDataHruApi, '/hrus/properties/' + get_params)
api.add_resource(hru.HruLteConListApi, '/hrus-lte/' + list_params)
api.add_resource(hru.HruLteConPostApi, '/hrus-lte/' + post_params)
api.add_resource(hru.HruLteConApi, '/hrus-lte/' + get_params)
api.add_resource(hru.HruLteConMapApi, '/hrus-lte/map/<everything:project_db>')
api.add_resource(hru.HruLteConOutPostApi, '/hrus-lte/out/' + post_params)
api.add_resource(hru.HruLteConOutApi, '/hrus-lte/out/' + get_params)
api.add_resource(hru.HruLteListApi, '/hrus-lte/properties/' + list_params)
api.add_resource(hru.HruLtePostApi, '/hrus-lte/properties/' + post_params)
api.add_resource(hru.HruLteUpdateManyApi, '/hrus-lte/properties/' + many_params)
api.add_resource(hru.HruLteApi, '/hrus-lte/properties/' + get_params)
""" HRUs Modules """
""" RoutingUnit Modules """
api.add_resource(routing_unit.RoutingUnitConListApi, '/routing_unit/' + list_params)
api.add_resource(routing_unit.RoutingUnitConPostApi, '/routing_unit/' + post_params)
api.add_resource(routing_unit.RoutingUnitConApi, '/routing_unit/' + get_params)
api.add_resource(routing_unit.RoutingUnitConMapApi, '/routing_unit/map/<everything:project_db>')
api.add_resource(routing_unit.RoutingUnitConOutPostApi, '/routing_unit/out/' + post_params)
api.add_resource(routing_unit.RoutingUnitConOutApi, '/routing_unit/out/' + get_params)
api.add_resource(routing_unit.RoutingUnitRtuListApi, '/routing_unit/properties/' + list_params)
api.add_resource(routing_unit.RoutingUnitRtuPostApi, '/routing_unit/properties/' + post_params)
api.add_resource(routing_unit.RoutingUnitRtuUpdateManyApi, '/routing_unit/properties/' + many_params)
api.add_resource(routing_unit.RoutingUnitRtuApi, '/routing_unit/properties/' + get_params)
api.add_resource(routing_unit.RoutingUnitEleListApi, '/routing_unit/elements/' + list_params)
api.add_resource(routing_unit.RoutingUnitElePostApi, '/routing_unit/elements/' + post_params)
api.add_resource(routing_unit.RoutingUnitEleApi, '/routing_unit/elements/' + get_params)
""" RoutingUnit Modules """
""" Aquifers Modules """
api.add_resource(aquifer.AquiferConListApi, '/aquifers/' + list_params)
api.add_resource(aquifer.AquiferConPostApi, '/aquifers/' + post_params)
api.add_resource(aquifer.AquiferConApi, '/aquifers/' + get_params)
api.add_resource(aquifer.AquiferConMapApi, '/aquifers/map/<everything:project_db>')
api.add_resource(aquifer.AquiferConOutPostApi, '/aquifers/out/' + post_params)
api.add_resource(aquifer.AquiferConOutApi, '/aquifers/out/' + get_params)
api.add_resource(aquifer.AquiferAquListApi, '/aquifers/properties/' + list_params)
api.add_resource(aquifer.AquiferAquPostApi, '/aquifers/properties/' + post_params)
api.add_resource(aquifer.AquiferAquUpdateManyApi, '/aquifers/properties/' + many_params)
api.add_resource(aquifer.AquiferAquApi, '/aquifers/properties/' + get_params)
api.add_resource(aquifer.InitialAquListApi, '/aquifers/initial/' + list_params)
api.add_resource(aquifer.InitialAquPostApi, '/aquifers/initial/' + post_params)
api.add_resource(aquifer.InitialAquUpdateManyApi, '/aquifers/initial/' + many_params)
api.add_resource(aquifer.InitialAquApi, '/aquifers/initial/' + get_params)
""" Aquifers Modules """
""" Reservoirs Modules """
api.add_resource(reservoir.ReservoirConListApi, '/reservoirs/' + list_params)
api.add_resource(reservoir.ReservoirConPostApi, '/reservoirs/' + post_params)
api.add_resource(reservoir.ReservoirConApi, '/reservoirs/' + get_params)
api.add_resource(reservoir.ReservoirConMapApi, '/reservoirs/map/<everything:project_db>')
api.add_resource(reservoir.ReservoirConOutPostApi, '/reservoirs/out/' + post_params)
api.add_resource(reservoir.ReservoirConOutApi, '/reservoirs/out/' + get_params)
api.add_resource(reservoir.ReservoirResListApi, '/reservoirs/properties/' + list_params)
api.add_resource(reservoir.ReservoirResPostApi, '/reservoirs/properties/' + post_params)
api.add_resource(reservoir.ReservoirResUpdateManyApi, '/reservoirs/properties/' + many_params)
api.add_resource(reservoir.ReservoirResApi, '/reservoirs/properties/' + get_params)
api.add_resource(reservoir.InitialResListApi, '/reservoirs/initial/' + list_params)
api.add_resource(reservoir.InitialResPostApi, '/reservoirs/initial/' + post_params)
api.add_resource(reservoir.InitialResUpdateManyApi, '/reservoirs/initial/' + many_params)
api.add_resource(reservoir.InitialResApi, '/reservoirs/initial/' + get_params)
api.add_resource(reservoir.HydrologyResListApi, '/reservoirs/hydrology/' + list_params)
api.add_resource(reservoir.HydrologyResPostApi, '/reservoirs/hydrology/' + post_params)
api.add_resource(reservoir.HydrologyResUpdateManyApi, '/reservoirs/hydrology/' + many_params)
api.add_resource(reservoir.HydrologyResApi, '/reservoirs/hydrology/' + get_params)
api.add_resource(reservoir.SedimentResListApi, '/reservoirs/sediment/' + list_params)
api.add_resource(reservoir.SedimentResPostApi, '/reservoirs/sediment/' + post_params)
api.add_resource(reservoir.SedimentResUpdateManyApi, '/reservoirs/sediment/' + many_params)
api.add_resource(reservoir.SedimentResApi, '/reservoirs/sediment/' + get_params)
api.add_resource(reservoir.NutrientsResListApi, '/reservoirs/nutrients/' + list_params)
api.add_resource(reservoir.NutrientsResPostApi, '/reservoirs/nutrients/' + post_params)
api.add_resource(reservoir.NutrientsResUpdateManyApi, '/reservoirs/nutrients/' + many_params)
api.add_resource(reservoir.NutrientsResApi, '/reservoirs/nutrients/' + get_params)
api.add_resource(reservoir.WetlandsWetListApi, '/reservoirs/wetlands/' + list_params)
api.add_resource(reservoir.WetlandsWetPostApi, '/reservoirs/wetlands/' + post_params)
api.add_resource(reservoir.WetlandsWetUpdateManyApi, '/reservoirs/wetlands/' + many_params)
api.add_resource(reservoir.WetlandsWetApi, '/reservoirs/wetlands/' + get_params)
api.add_resource(reservoir.HydrologyWetListApi, '/reservoirs/wetlands_hydrology/' + list_params)
api.add_resource(reservoir.HydrologyWetPostApi, '/reservoirs/wetlands_hydrology/' + post_params)
api.add_resource(reservoir.HydrologyWetUpdateManyApi, '/reservoirs/wetlands_hydrology/' + many_params)
api.add_resource(reservoir.HydrologyWetApi, '/reservoirs/wetlands_hydrology/' + get_params)
""" Reservoirs Modules """
""" Exco Modules """
api.add_resource(exco.ExcoConListApi, '/exco/' + list_params)
api.add_resource(exco.ExcoConPostApi, '/exco/' + post_params)
api.add_resource(exco.ExcoConApi, '/exco/' + get_params)
api.add_resource(exco.ExcoConMapApi, '/exco/map/<everything:project_db>')
api.add_resource(exco.ExcoConOutPostApi, '/exco/out/' + post_params)
api.add_resource(exco.ExcoConOutApi, '/exco/out/' + get_params)
api.add_resource(exco.ExcoExcListApi, '/exco/properties/' + list_params)
api.add_resource(exco.ExcoExcPostApi, '/exco/properties/' + post_params)
api.add_resource(exco.ExcoExcUpdateManyApi, '/exco/properties/' + many_params)
api.add_resource(exco.ExcoExcApi, '/exco/properties/' + get_params)
api.add_resource(exco.ExcoOMListApi, '/exco/om/' + list_params)
api.add_resource(exco.ExcoOMPostApi, '/exco/om/' + post_params)
api.add_resource(exco.ExcoOMUpdateManyApi, '/exco/om/' + many_params)
api.add_resource(exco.ExcoOMApi, '/exco/om/' + get_params)
""" Exco Modules """
""" Delratio Modules """
api.add_resource(dr.DelratioConListApi, '/dr/' + list_params)
api.add_resource(dr.DelratioConPostApi, '/dr/' + post_params)
api.add_resource(dr.DelratioConApi, '/dr/' + get_params)
api.add_resource(dr.DelratioConMapApi, '/dr/map/<everything:project_db>')
api.add_resource(dr.DelratioConOutPostApi, '/dr/out/' + post_params)
api.add_resource(dr.DelratioConOutApi, '/dr/out/' + get_params)
api.add_resource(dr.DelratioDelListApi, '/dr/properties/' + list_params)
api.add_resource(dr.DelratioDelPostApi, '/dr/properties/' + post_params)
api.add_resource(dr.DelratioDelUpdateManyApi, '/dr/properties/' + many_params)
api.add_resource(dr.DelratioDelApi, '/dr/properties/' + get_params)
api.add_resource(dr.DelratioOMListApi, '/dr/om/' + list_params)
api.add_resource(dr.DelratioOMPostApi, '/dr/om/' + post_params)
api.add_resource(dr.DelratioOMUpdateManyApi, '/dr/om/' + many_params)
api.add_resource(dr.DelratioOMApi, '/dr/om/' + get_params)
""" Delratio Modules """
""" Recall Modules """
api.add_resource(recall.RecallConListApi, '/recall/' + list_params)
api.add_resource(recall.RecallConPostApi, '/recall/' + post_params)
api.add_resource(recall.RecallConApi, '/recall/' + get_params)
api.add_resource(recall.RecallConMapApi, '/recall/map/<everything:project_db>')
api.add_resource(recall.RecallConOutPostApi, '/recall/out/' + post_params)
api.add_resource(recall.RecallConOutApi, '/recall/out/' + get_params)
api.add_resource(recall.RecallRecListApi, '/recall/data/' + list_params)
api.add_resource(recall.RecallRecPostApi, '/recall/data/' + post_params)
api.add_resource(recall.RecallRecApi, '/recall/data/' + get_params)
api.add_resource(recall.RecallDatPostApi, '/recall/data/item/' + post_params)
api.add_resource(recall.RecallDatApi, '/recall/data/item/' + get_params)
""" Recall Modules """
""" Landuse Modules """
api.add_resource(lum.LanduseLumListApi, '/landuse/' + list_params)
api.add_resource(lum.LanduseLumPostApi, '/landuse/' + post_params)
api.add_resource(lum.LanduseLumUpdateManyApi, '/landuse/' + many_params)
api.add_resource(lum.LanduseLumApi, '/landuse/' + get_params)
api.add_resource(lum.CntableLumListApi, '/cntable/' + list_params)
api.add_resource(lum.CntableLumPostApi, '/cntable/' + post_params)
api.add_resource(lum.CntableLumUpdateManyApi, '/cntable/' + many_params)
api.add_resource(lum.CntableLumApi, '/cntable/' + get_params)
api.add_resource(lum.CntableLumDatasetsApi, '/cntable/' + datasets_get_name_params)
api.add_resource(lum.OvntableLumListApi, '/ovntable/' + list_params)
api.add_resource(lum.OvntableLumPostApi, '/ovntable/' + post_params)
api.add_resource(lum.OvntableLumUpdateManyApi, '/ovntable/' + many_params)
api.add_resource(lum.OvntableLumApi, '/ovntable/' + get_params)
api.add_resource(lum.OvntableLumDatasetsApi, '/ovntable/' + datasets_get_name_params)
api.add_resource(lum.ConsPracLumListApi, '/cons_prac/' + list_params)
api.add_resource(lum.ConsPracLumPostApi, '/cons_prac/' + post_params)
api.add_resource(lum.ConsPracLumUpdateManyApi, '/cons_prac/' + many_params)
api.add_resource(lum.ConsPracLumApi, '/cons_prac/' + get_params)
api.add_resource(lum.ConsPracLumDatasetsApi, '/cons_prac/' + datasets_get_name_params)
api.add_resource(lum.ManagementSchListApi, '/mgt_sch/' + list_params)
api.add_resource(lum.ManagementSchPostApi, '/mgt_sch/' + post_params)
api.add_resource(lum.ManagementSchApi, '/mgt_sch/' + get_params)
""" Landuse Modules """
""" Operations Modules """
api.add_resource(ops.GrazeOpsListApi, '/ops/graze/' + list_params)
api.add_resource(ops.GrazeOpsPostApi, '/ops/graze/' + post_params)
api.add_resource(ops.GrazeOpsUpdateManyApi, '/ops/graze/' + many_params)
api.add_resource(ops.GrazeOpsApi, '/ops/graze/' + get_params)
api.add_resource(ops.GrazeOpsDatasetsApi, '/ops/graze/' + datasets_get_name_params)
api.add_resource(ops.HarvOpsListApi, '/ops/harvest/' + list_params)
api.add_resource(ops.HarvOpsPostApi, '/ops/harvest/' + post_params)
api.add_resource(ops.HarvOpsUpdateManyApi, '/ops/harvest/' + many_params)
api.add_resource(ops.HarvOpsApi, '/ops/harvest/' + get_params)
api.add_resource(ops.HarvOpsDatasetsApi, '/ops/harvest/' + datasets_get_name_params)
api.add_resource(ops.ChemAppOpsListApi, '/ops/chemapp/' + list_params)
api.add_resource(ops.ChemAppOpsPostApi, '/ops/chemapp/' + post_params)
api.add_resource(ops.ChemAppOpsUpdateManyApi, '/ops/chemapp/' + many_params)
api.add_resource(ops.ChemAppOpsApi, '/ops/chemapp/' + get_params)
api.add_resource(ops.ChemAppOpsDatasetsApi, '/ops/chemapp/' + datasets_get_name_params)
api.add_resource(ops.IrrOpsListApi, '/ops/irrigation/' + list_params)
api.add_resource(ops.IrrOpsPostApi, '/ops/irrigation/' + post_params)
api.add_resource(ops.IrrOpsUpdateManyApi, '/ops/irrigation/' + many_params)
api.add_resource(ops.IrrOpsApi, '/ops/irrigation/' + get_params)
api.add_resource(ops.IrrOpsDatasetsApi, '/ops/irrigation/' + datasets_get_name_params)
api.add_resource(ops.FireOpsListApi, '/ops/fire/' + list_params)
api.add_resource(ops.FireOpsPostApi, '/ops/fire/' + post_params)
api.add_resource(ops.FireOpsUpdateManyApi, '/ops/fire/' + many_params)
api.add_resource(ops.FireOpsApi, '/ops/fire/' + get_params)
api.add_resource(ops.FireOpsDatasetsApi, '/ops/fire/' + datasets_get_name_params)
api.add_resource(ops.SweepOpsListApi, '/ops/sweep/' + list_params)
api.add_resource(ops.SweepOpsPostApi, '/ops/sweep/' + post_params)
api.add_resource(ops.SweepOpsUpdateManyApi, '/ops/sweep/' + many_params)
api.add_resource(ops.SweepOpsApi, '/ops/sweep/' + get_params)
api.add_resource(ops.SweepOpsDatasetsApi, '/ops/sweep/' + datasets_get_name_params)
""" Operations Modules """
""" Hydrology Modules """
api.add_resource(hydrology.HydrologyHydListApi, '/hydrology/' + list_params)
api.add_resource(hydrology.HydrologyHydPostApi, '/hydrology/' + post_params)
api.add_resource(hydrology.HydrologyHydUpdateManyApi, '/hydrology/' + many_params)
api.add_resource(hydrology.HydrologyHydApi, '/hydrology/' + get_params)
api.add_resource(hydrology.TopographyHydListApi, '/topography/' + list_params)
api.add_resource(hydrology.TopographyHydPostApi, '/topography/' + post_params)
api.add_resource(hydrology.TopographyHydUpdateManyApi, '/topography/' + many_params)
api.add_resource(hydrology.TopographyHydApi, '/topography/' + get_params)
api.add_resource(hydrology.FieldFldListApi, '/fields/' + list_params)
api.add_resource(hydrology.FieldFldPostApi, '/fields/' + post_params)
api.add_resource(hydrology.FieldFldUpdateManyApi, '/fields/' + many_params)
api.add_resource(hydrology.FieldFldApi, '/fields/' + get_params)
""" Hydrology Modules """
""" Initialization Data Modules """
api.add_resource(init.SoilPlantListApi, '/soil_plant/' + list_params)
api.add_resource(init.SoilPlantPostApi, '/soil_plant/' + post_params)
api.add_resource(init.SoilPlantUpdateManyApi, '/soil_plant/' + many_params)
api.add_resource(init.SoilPlantApi, '/soil_plant/' + get_params)
api.add_resource(init.OMWaterListApi, '/om_water/' + list_params)
api.add_resource(init.OMWaterPostApi, '/om_water/' + post_params)
api.add_resource(init.OMWaterUpdateManyApi, '/om_water/' + many_params)
api.add_resource(init.OMWaterApi, '/om_water/' + get_params)
api.add_resource(init.PlantIniListApi, '/plant_ini/' + list_params)
api.add_resource(init.PlantIniPostApi, '/plant_ini/' + post_params)
api.add_resource(init.PlantIniApi, '/plant_ini/' + get_params)
api.add_resource(init.PlantIniItemPostApi, '/plant_ini/item/' + post_params)
api.add_resource(init.PlantIniItemApi, '/plant_ini/item/' + get_params)
""" Initialization Data Modules """
""" Databases - Modules """
api.add_resource(hru_parm_db.PlantsPltListApi, '/db/plants/' + list_params)
api.add_resource(hru_parm_db.PlantsPltPostApi, '/db/plants/' + post_params)
api.add_resource(hru_parm_db.PlantsPltUpdateManyApi, '/db/plants/' + many_params)
api.add_resource(hru_parm_db.PlantsPltApi, '/db/plants/' + get_params)
api.add_resource(hru_parm_db.PlantsPltDatasetsApi, '/db/plants/' + datasets_get_name_params)
api.add_resource(hru_parm_db.FertilizerFrtListApi, '/db/fertilizer/' + list_params)
api.add_resource(hru_parm_db.FertilizerFrtPostApi, '/db/fertilizer/' + post_params)
api.add_resource(hru_parm_db.FertilizerFrtUpdateManyApi, '/db/fertilizer/' + many_params)
api.add_resource(hru_parm_db.FertilizerFrtApi, '/db/fertilizer/' + get_params)
api.add_resource(hru_parm_db.FertilizerFrtDatasetsApi, '/db/fertilizer/' + datasets_get_name_params)
api.add_resource(hru_parm_db.TillageTilListApi, '/db/tillage/' + list_params)
api.add_resource(hru_parm_db.TillageTilPostApi, '/db/tillage/' + post_params)
api.add_resource(hru_parm_db.TillageTilUpdateManyApi, '/db/tillage/' + many_params)
api.add_resource(hru_parm_db.TillageTilApi, '/db/tillage/' + get_params)
api.add_resource(hru_parm_db.TillageTilDatasetsApi, '/db/tillage/' + datasets_get_name_params)
api.add_resource(hru_parm_db.PesticidePstListApi, '/db/pesticides/' + list_params)
api.add_resource(hru_parm_db.PesticidePstPostApi, '/db/pesticides/' + post_params)
api.add_resource(hru_parm_db.PesticidePstUpdateManyApi, '/db/pesticides/' + many_params)
api.add_resource(hru_parm_db.PesticidePstApi, '/db/pesticides/' + get_params)
api.add_resource(hru_parm_db.PesticidePstDatasetsApi, '/db/pesticides/' + datasets_get_name_params)
api.add_resource(hru_parm_db.UrbanUrbListApi, '/db/urban/' + list_params)
api.add_resource(hru_parm_db.UrbanUrbPostApi, '/db/urban/' + post_params)
api.add_resource(hru_parm_db.UrbanUrbUpdateManyApi, '/db/urban/' + many_params)
api.add_resource(hru_parm_db.UrbanUrbApi, '/db/urban/' + get_params)
api.add_resource(hru_parm_db.UrbanUrbDatasetsApi, '/db/urban/' + datasets_get_name_params)
api.add_resource(hru_parm_db.SepticSepListApi, '/db/septic/' + list_params)
api.add_resource(hru_parm_db.SepticSepPostApi, '/db/septic/' + post_params)
api.add_resource(hru_parm_db.SepticSepUpdateManyApi, '/db/septic/' + many_params)
api.add_resource(hru_parm_db.SepticSepApi, '/db/septic/' + get_params)
api.add_resource(hru_parm_db.SepticSepDatasetsApi, '/db/septic/' + datasets_get_name_params)
api.add_resource(hru_parm_db.SnowSnoListApi, '/db/snow/' + list_params)
api.add_resource(hru_parm_db.SnowSnoPostApi, '/db/snow/' + post_params)
api.add_resource(hru_parm_db.SnowSnoUpdateManyApi, '/db/snow/' + many_params)
api.add_resource(hru_parm_db.SnowSnoApi, '/db/snow/' + get_params)
api.add_resource(hru_parm_db.SnowSnoDatasetsApi, '/db/snow/' + datasets_get_name_params)
""" Databases - Modules """
""" Soils Modules """
api.add_resource(soils.SoilsSolListApi, '/soils/' + list_params)
api.add_resource(soils.SoilsSolPostApi, '/soils/' + post_params)
api.add_resource(soils.SoilsSolApi, '/soils/' + get_params)
api.add_resource(soils.SoilsSolLayerPostApi, '/soils/layer/' + post_params)
api.add_resource(soils.SoilsSolLayerApi, '/soils/layer/' + get_params)
api.add_resource(soils.NutrientsSolListApi, '/soil-nutrients/' + list_params)
api.add_resource(soils.NutrientsSolPostApi, '/soil-nutrients/' + post_params)
api.add_resource(soils.NutrientsSolUpdateManyApi, '/soil-nutrients/' + many_params)
api.add_resource(soils.NutrientsSolApi, '/soil-nutrients/' + get_params)
api.add_resource(soils.SoilsLteSolListApi, '/soils-lte/' + list_params)
api.add_resource(soils.SoilsLteSolPostApi, '/soils-lte/' + post_params)
api.add_resource(soils.SoilsLteSolUpdateManyApi, '/soils-lte/' + many_params)
api.add_resource(soils.SoilsLteSolApi, '/soils-lte/' + get_params)
""" Soils Modules """
""" Landscape Units Modules """
api.add_resource(regions.LsUnitDefListApi, '/ls_units/' + list_params)
api.add_resource(regions.LsUnitDefPostApi, '/ls_units/' + post_params)
api.add_resource(regions.LsUnitDefApi, '/ls_units/' + get_params)
api.add_resource(regions.LsUnitEleListApi, '/ls_units/elements/' + list_params)
api.add_resource(regions.LsUnitElePostApi, '/ls_units/elements/' + post_params)
api.add_resource(regions.LsUnitEleApi, '/ls_units/elements/' + get_params)
""" Landscape Units Modules """
""" Change Modules """
api.add_resource(change.CodesSftApi, '/change/codes/<everything:project_db>')
api.add_resource(change.CalParmsCalListApi, '/change/cal_parms/' + list_params)
api.add_resource(change.CalParmsCalApi, '/change/cal_parms/' + get_params)
api.add_resource(change.CalParmsTypesApi, '/change/cal_parms/types/<everything:project_db>')
api.add_resource(change.CalibrationCalListApi, '/change/calibration/' + list_params)
api.add_resource(change.CalibrationCalPostApi, '/change/calibration/' + post_params)
api.add_resource(change.CalibrationCalApi, '/change/calibration/' + get_params)
api.add_resource(change.WbParmsSftListApi, '/change/soft/parms/wb/' + list_params)
api.add_resource(change.WbParmsSftPostApi, '/change/soft/parms/wb/' + post_params)
api.add_resource(change.WbParmsSftApi, '/change/soft/parms/wb/' + get_params)
api.add_resource(change.ChsedParmsSftListApi, '/change/soft/parms/chsed/' + list_params)
api.add_resource(change.ChsedParmsSftPostApi, '/change/soft/parms/chsed/' + post_params)
api.add_resource(change.ChsedParmsSftApi, '/change/soft/parms/chsed/' + get_params)
api.add_resource(change.PlantParmsSftListApi, '/change/soft/parms/plant/' + list_params)
api.add_resource(change.PlantParmsSftPostApi, '/change/soft/parms/plant/' + post_params)
api.add_resource(change.PlantParmsSftApi, '/change/soft/parms/plant/' + get_params)
api.add_resource(change.WaterBalanceSftListApi, '/change/soft/regions/wb/' + list_params)
api.add_resource(change.WaterBalanceSftPostApi, '/change/soft/regions/wb/' + post_params)
api.add_resource(change.WaterBalanceSftApi, '/change/soft/regions/wb/' + get_params)
api.add_resource(change.ChsedBudgetSftListApi, '/change/soft/regions/chsed/' + list_params)
api.add_resource(change.ChsedBudgetSftPostApi, '/change/soft/regions/chsed/' + post_params)
api.add_resource(change.ChsedBudgetSftApi, '/change/soft/regions/chsed/' + get_params)
api.add_resource(change.PlantGroSftListApi, '/change/soft/regions/plant/' + list_params)
api.add_resource(change.PlantGroSftPostApi, '/change/soft/regions/plant/' + post_params)
api.add_resource(change.PlantGroSftApi, '/change/soft/regions/plant/' + get_params)
""" Change Modules """
""" Decision Table Modules """
api.add_resource(decision_table.DTableDtlListApi, '/decision_table/<table_type>/' + list_params)
api.add_resource(decision_table.DTableDtlApi, '/decision_table/' + get_params)
""" Decision Table """
""" Structural Modules """
api.add_resource(structural.BmpuserStrListApi, '/structural/bmpuser/' + list_params)
api.add_resource(structural.BmpuserStrPostApi, '/structural/bmpuser/' + post_params)
api.add_resource(structural.BmpuserStrUpdateManyApi, '/structural/bmpuser/' + many_params)
api.add_resource(structural.BmpuserStrApi, '/structural/bmpuser/' + get_params)
api.add_resource(structural.BmpuserStrDatasetsApi, '/structural/bmpuser/' + datasets_get_name_params)
api.add_resource(structural.TiledrainStrListApi, '/structural/tiledrain/' + list_params)
api.add_resource(structural.TiledrainStrPostApi, '/structural/tiledrain/' + post_params)
api.add_resource(structural.TiledrainStrUpdateManyApi, '/structural/tiledrain/' + many_params)
api.add_resource(structural.TiledrainStrApi, '/structural/tiledrain/' + get_params)
api.add_resource(structural.TiledrainStrDatasetsApi, '/structural/tiledrain/' + datasets_get_name_params)
api.add_resource(structural.SepticStrListApi, '/structural/septic/' + list_params)
api.add_resource(structural.SepticStrPostApi, '/structural/septic/' + post_params)
api.add_resource(structural.SepticStrUpdateManyApi, '/structural/septic/' + many_params)
api.add_resource(structural.SepticStrApi, '/structural/septic/' + get_params)
api.add_resource(structural.SepticStrDatasetsApi, '/structural/septic/' + datasets_get_name_params)
api.add_resource(structural.FilterstripStrListApi, '/structural/filterstrip/' + list_params)
api.add_resource(structural.FilterstripStrPostApi, '/structural/filterstrip/' + post_params)
api.add_resource(structural.FilterstripStrUpdateManyApi, '/structural/filterstrip/' + many_params)
api.add_resource(structural.FilterstripStrApi, '/structural/filterstrip/' + get_params)
api.add_resource(structural.FilterstripStrDatasetsApi, '/structural/filterstrip/' + datasets_get_name_params)
api.add_resource(structural.GrassedwwStrListApi, '/structural/grassedww/' + list_params)
api.add_resource(structural.GrassedwwStrPostApi, '/structural/grassedww/' + post_params)
api.add_resource(structural.GrassedwwStrUpdateManyApi, '/structural/grassedww/' + many_params)
api.add_resource(structural.GrassedwwStrApi, '/structural/grassedww/' + get_params)
api.add_resource(structural.GrassedwwStrDatasetsApi, '/structural/grassedww/' + datasets_get_name_params)
""" Structural Modules """
if __name__ == '__main__':
sys.stdout = Unbuffered(sys.stdout)
parser = argparse.ArgumentParser(description="SWAT+ Editor REST API")
parser.add_argument("port", type=str, help="port number to run API", default=5000, nargs="?")
args = parser.parse_args()
app.run(port=int(args.port))
```
#### File: swatplus-automatic-workflow/main_stages/prepare_project.py
```python
import os
import pickle
import sys
import zipfile
from glob import glob
from shutil import copyfile, rmtree
from osgeo import gdal
from osgeo import osr
sys.path.insert(0, os.path.join(os.environ["swatplus_wf_dir"], "packages"))
sys.path.append(os.path.join(os.environ["swatplus_wf_dir"]))
sys.path.insert(0, sys.argv[1])
from helper_functions import (read_from, write_to, raster_statistics,
list_files, copy_file, copytree, file_name, python_variable, get_extents)
from projection_lookup import epsg_lookup_dictionary
from qgs_project_template import template
from sqlite_tools import sqlite_connection
from logger import log
log = log("{base}/swatplus_aw_log.txt".format(base = sys.argv[1]))
keep_log = True
# check if there is need to prepare project
if os.path.isfile("{base}/config.py".format(base=sys.argv[1])):
import config
if config.Keep_Log == True:
keep_log = True
log.initialise()
else:
keep_log = False
if config.Model_2_config == True:
log.info("config creation is required; 'Model_2_config' is set to 'True'", keep_log)
sys.exit(0)
else:
log.initialise()
log.info("config was not found in the current directory", keep_log)
print("\t! config.py not found in current directory")
sys.exit(0)
class dem_data:
def __init__(self):
self.raster_data = None
self.suffix_list = []
# location of wgn
# https://bitbucket.org/swatplus/swatplus.editor/downloads/swatplus_wgn.sqlite
# announce action
print("\n >> preparing project")
log.info("preparing the qgis project", keep_log)
# set importane variables
project_name = config.Project_Name
outlet_name = config.Outlets.split(".")[0]
soil_lookup = config.Soil_Lookup.split(".")[0]
land_lookup = config.Landuse_Lookup.split(".")[0]
usersoil = config.Usersoil.split(".")[0]
thresholdCh = config.Channel_Threshold
thresholdSt = config.Stream_Threshold
burn_name = config.Burn_In_Shape
dem_name = config.Topography
landuse_name = config.Land_Use
soil_name = config.Soils
dem_file_name_ = config.Topography if ".tif" in config.Topography.lower()\
else "{dem}.tif".format(dem=config.Topography)
landuse_file_name_ = config.Land_Use if ".tif" in config.Land_Use.lower()\
else "{land}/hdr.adf".format(land=config.Land_Use)
soil_file_name_ = config.Soils if ".tif" in config.Soils.lower()\
else "{soil}/hdr.adf".format(soil=config.Soils)
extension_suffix = ".tif"
# prepare rasters
log.info("preparing raster files", keep_log)
dem_fn = "{base_dir}/data/rasters/{dem_file}".format(
base_dir=sys.argv[1], dem_file=config.Topography)
log.info(" - dem file: {0}".format(config.Topography), keep_log)
soil_fn = "{base_dir}/data/rasters/{dem_file}".format(
base_dir=sys.argv[1], dem_file=config.Soils)
log.info(" - soils file: {0}".format(config.Soils), keep_log)
landuse_fn = "{base_dir}/data/rasters/{dem_file}".format(
base_dir=sys.argv[1], dem_file=config.Land_Use)
log.info(" - soils file: {0}".format(config.Land_Use), keep_log)
dem_dirname = '{base}/{project_name}/Watershed/Rasters/DEM/'.format(
base=sys.argv[1], project_name=project_name)
soil_dirname = '{base}/{project_name}/Watershed/Rasters/Landuse/'.format(
base=sys.argv[1], project_name=project_name)
landuse_dirname = '{base}/{project_name}/Watershed/Rasters/Soil/'.format(
base=sys.argv[1], project_name=project_name)
log.info("creating raster directories in project", keep_log)
if not os.path.isdir(dem_dirname):
os.makedirs(dem_dirname)
if not os.path.isdir(soil_dirname):
os.makedirs(soil_dirname)
if not os.path.isdir(landuse_dirname):
os.makedirs(landuse_dirname)
# prjcrs_tmp = "{base}/data/rasters/prjcrs.crs".format(base=sys.argv[1])
# prjcrs_formated_tmp = "{base}/data/rasters/prjcrs_formated.crs".format(base=sys.argv[1])
#
# if os.path.isfile(prjcrs_tmp):
# os.remove(prjcrs_tmp)
# copy and convert rasters
log.info("extracting DEM to {0}".format(
'{base}/{project_name}/Watershed/Rasters/DEM/{dem_name}'.format(
base=sys.argv[1], project_name=project_name, dem_name=dem_name)
), keep_log)
if config.Topography.lower().endswith(".tif"):
copy_file(dem_fn, '{base}/{project_name}/Watershed/Rasters/DEM/{dem_name}'.format(
base=sys.argv[1], project_name=project_name, dem_name=dem_file_name_))
copy_file(dem_fn, '{base}/{project_name}/Watershed/Rasters/DEM/{dem_name}slp_bands.tif'.format(
base=sys.argv[1], project_name=project_name, dem_name=dem_file_name_[:-4]))
else:
# copytree(dem_fn, '{base}/{project_name}/Watershed/Rasters/DEM/{dem_name}'.format(
# base=sys.argv[1], project_name=project_name, dem_name=dem_name))
src_ds = gdal.Open(dem_fn)
ds = gdal.Translate('{base}/{project_name}/Watershed/Rasters/DEM/{dem_name}'.format(
base=sys.argv[1], project_name=project_name, dem_name=dem_file_name_), src_ds, format='GTiff')
ds = gdal.Translate('{base}/{project_name}/Watershed/Rasters/DEM/{dem_name}slp_bands.tif'.format(
base=sys.argv[1], project_name=project_name, dem_name=dem_file_name_[:-4]), src_ds, format='GTiff')
log.info("extracting landuse to {0}".format(
'{base}/{project_name}/Watershed/Rasters/Landuse/{landuse_name}'.format(
base=sys.argv[1], project_name=project_name, landuse_name=landuse_name)
), keep_log)
if config.Land_Use.lower().endswith(".tif"):
copy_file(landuse_fn, '{base}/{project_name}/Watershed/Rasters/Landuse/{landuse_name}'.format(
base=sys.argv[1], project_name=project_name, landuse_name=landuse_name))
else:
copytree(landuse_fn, '{base}/{project_name}/Watershed/Rasters/Landuse/{landuse_name}'.format(
base=sys.argv[1], project_name=project_name, landuse_name=landuse_name))
# src_ds = gdal.Open(landuse_fn)
# ds = gdal.Translate('{base}/{project_name}/Watershed/Rasters/Landuse/{landuse_name}'.format(
# base=sys.argv[1], project_name=project_name, landuse_name=landuse_name), src_ds, format='GTiff')
log.info("extracting soil to {0}".format(
'{base}/{project_name}/Watershed/Rasters/Soil/{soil_name}'.format(
base=sys.argv[1], project_name=project_name, soil_name=soil_name)
), keep_log)
if config.Soils.lower().endswith(".tif"):
copy_file(soil_fn, '{base}/{project_name}/Watershed/Rasters/Soil/{soil_name}'.format(
base=sys.argv[1], project_name=project_name, soil_name=soil_name))
else:
copytree(soil_fn, '{base}/{project_name}/Watershed/Rasters/Soil/{soil_name}'.format(
base=sys.argv[1], project_name=project_name, soil_name=soil_name))
# src_ds = gdal.Open(soil_fn)
# ds = gdal.Translate('{base}/{project_name}/Watershed/Rasters/Soil/{soil_name}'.format(
# base=sys.argv[1], project_name=project_name, soil_name=soil_name), src_ds, format='GTiff')
log.info("getting dem projection information", keep_log)
dataset = gdal.Open('{base}/{project_name}/Watershed/Rasters/DEM/{dem_name}'.format(
base=sys.argv[1], project_name=project_name, dem_name=dem_file_name_))
formated_projcs = gdal.Info(dataset).split("Data axis")[
0].split("System is:\n")[-1]
prjcrs = ""
for line in formated_projcs.split("\n"):
while line.startswith(" "):
line = line[1:]
prjcrs += line.strip("\n")
srs = osr.SpatialReference(wkt=prjcrs)
proj4 = srs.ExportToProj4()
geogcs = srs.GetAttrValue('geogcs')
log.info("geogcs: {0}".format(geogcs), keep_log)
log.info("proj4: {0}".format(proj4), keep_log)
if prjcrs.split('"')[1] in epsg_lookup_dictionary:
srid, projectionacronym, srsid, ellipsoidacronym = epsg_lookup_dictionary[prjcrs.split('"')[
1]]
geographicflag = "false"
else:
srid, projectionacronym, srsid, ellipsoidacronym = "", "", "", ""
geographicflag = "true"
log.error("DEM is not projected", keep_log)
print("Error! DEM is not projected!")
sys.exit(1)
srs_description = prjcrs.split('"')[1]
# hru settings
log.info("setting hru filter method", keep_log)
hru_land_thres, hru_soil_thres, hru_slope_thres = "", "", ""
area_val = 0 # value for area if option 3 for HRU Filter Method is selected
target_val = 0 # value for area if option 4 for HRU Filter Method is selected
is_area = 0
use_area = 1 if config.HRU_Thresholds_Type == 1 else 0
is_dominant_hru = 0
is_multiple = 0
is_target = 0
if config.HRU_Filter_Method == 1:
log.info("> filter method is dominant landuse, soil, slope", keep_log)
is_dominant_hru = 1
if config.HRU_Filter_Method == 2:
log.info("> filter method is dominant hrus", keep_log)
is_dominant_hru = 1
if config.HRU_Filter_Method == 3:
log.info("> filter method is target area", keep_log)
area_val = config.Target_Area
log.info(" - target area = {0}".format(area_val), keep_log)
is_multiple = 1
is_area = 1
if config.HRU_Filter_Method == 4:
log.info("> filter method is target area", keep_log)
target_val = config.Target_Value
log.info(" - target value = {0}".format(target_val), keep_log)
is_multiple = 1
is_target = 1
if config.HRU_Filter_Method == 5:
log.info("> filter method is filter by landuse, soil, slope", keep_log)
log.info(" - thresholds = {0}".format(config.Land_Soil_Slope_Thres), keep_log)
if len(config.Land_Soil_Slope_Thres.split(",")) != 3:
print('\t! Provide thresholds in the config with the correct format\n\t - e.g. Land_Soil_Slope_Thres = "12, 10, 7"')
sys.exit(1)
else:
hru_land_thres, hru_soil_thres, hru_slope_thres = config.Land_Soil_Slope_Thres.replace(
" ", "").split(",")
is_multiple = 1
log.info("writing raster projection information", keep_log)
write_to('{base}/{project_name}/Watershed/Rasters/DEM/{dem_name}.prj.txt'.format(
base=sys.argv[1], project_name=project_name, dem_name=dem_name), formated_projcs)
write_to('{base}/{project_name}/Watershed/Rasters/DEM/{dem_name}.prj'.format(
base=sys.argv[1], project_name=project_name, dem_name=dem_name), prjcrs)
write_to('{base}/{project_name}/Watershed/Rasters/DEM/{dem_name}hillshade.prj'.format(
base=sys.argv[1], project_name=project_name, dem_name=dem_name), prjcrs)
log.info("getting gis data extents", keep_log)
extent_xmin, extent_ymin, extent_xmax, extent_ymax = get_extents(dem_fn)
raster_stats = raster_statistics(dem_fn)
third_delta = round((raster_stats.maximum - raster_stats.minimum)/3, 0)
lower_third = raster_stats.minimum + third_delta
upper_third = raster_stats.maximum - third_delta
project_string = template.format(
# dem_raster=dem_name,
prjcrs=prjcrs,
shape_wkt=prjcrs,
proj4=proj4,
geogcs=geogcs,
project_name=project_name,
snap_threshold=config.Out_Snap_Threshold,
channel_threshold=config.Channel_Threshold,
stream_threshold=config.Stream_Threshold,
hru_land_thres=hru_land_thres,
hru_slope_thres=hru_slope_thres,
hru_soil_thres=hru_soil_thres,
area_val=area_val,
target_val=target_val,
is_area=is_area,
use_area=use_area,
is_dominant_hru=is_dominant_hru,
is_multiple=is_multiple,
is_target=is_target,
outlet_name=outlet_name,
dem_min=raster_stats.minimum,
dem_max=raster_stats.maximum,
lower_third=lower_third,
upper_third=upper_third,
mid_thirds=round((upper_third + lower_third)/2, 0),
dem_name=dem_file_name_[:-4],
landuse_name=landuse_name[:-4],
extension_suffix=extension_suffix,
soil_name=soil_name[:-4],
dem_file_name=dem_file_name_,
landuse_file_name=landuse_file_name_,
soil_file_name=soil_file_name_,
soil_lookup=soil_lookup,
land_lookup=land_lookup,
extent_xmin=extent_xmin,
extent_ymin=extent_ymin,
extent_xmax=extent_xmax,
extent_ymax=extent_ymax,
thresholdCh=thresholdCh,
thresholdSt=thresholdSt,
usersoil=usersoil,
slope_classes=config.Slope_Classes,
srsid=srsid,
srid=srid,
srs_description=srs_description,
projectionacronym=projectionacronym,
ellipsoidacronym=ellipsoidacronym,
geographicflag=geographicflag,
)
project_string = project_string.replace("--close-curly--", "}")
project_string = project_string.replace("key--open-curly--", 'key="{')
project_string = project_string.replace("value--open-curly--", 'value="{')
log.info("writing qgis project file to {0}.qgs".format(
"{base}/{project_name}/{project_name}.qgs".format(
base=sys.argv[1], project_name=project_name)
), keep_log)
write_to("{base}/{project_name}/{project_name}.qgs".format(
base=sys.argv[1], project_name=project_name), project_string)
# dem_data_variable = python_variable(
# "{qswatplus_wf_dir}/dem_data.dat".format(qswatplus_wf_dir=os.environ["qswatplus_wf"]))
# for suff in dem_data_variable.suffix_list: # suff is the last_part of each raster that is appended to dem
# if suff == "fel.tif":
# continue
# with open('{base}/{project_name}/Watershed/Rasters/DEM/{dem_name}{suff}'.format(
# base=sys.argv[1], project_name=project_name, dem_name=dem_name, suff=suff), 'wb') as fl:
# fl.write(dem_data_variable.raster_data)
log.info("creating hillshade for DEM", keep_log)
hillshade_name = '{base}/{project_name}/Watershed/Rasters/DEM/{dem_name}hillshade.tif'.format(
base=sys.argv[1], project_name=project_name, dem_name=dem_file_name_[:-4])
src_ds = gdal.Open('{base}/{project_name}/Watershed/Rasters/DEM/{dem_name}'.format(
base=sys.argv[1], project_name=project_name, dem_name=dem_file_name_))
ds = gdal.DEMProcessing(hillshade_name, src_ds, 'hillshade', zFactor=30)
# copy shapefile
shapes_dir = '{base}/{project_name}/Watershed/Shapes/'.format(
base=sys.argv[1], project_name=project_name)
rmtree(shapes_dir, ignore_errors=True)
data_shapes = list_files("{base_dir}/data/shapefiles/".format(
base_dir=sys.argv[1]))
# place holding shapefiles
log.info("creating placeholder shapefiles", keep_log)
if not os.path.isdir(shapes_dir):
os.makedirs(shapes_dir)
with zipfile.ZipFile("{qswatplus_wf_dir}/packages/shapes.dat".format(
qswatplus_wf_dir=os.environ["swatplus_wf_dir"]), 'r') as zip_ref:
zip_ref.extractall(shapes_dir)
all_files_shapes = list_files(shapes_dir)
for shp_file in all_files_shapes:
os.rename(shp_file, shp_file.replace("[dem]", dem_name))
log.info("copying outlet and burn shapefiles", keep_log)
for shape_data in data_shapes:
if (outlet_name in shape_data) or ((burn_name in shape_data)
and (not burn_name == "")):
copy_file(shape_data, "{shapes_dir}/{file_n}".format(
shapes_dir=shapes_dir, file_n=file_name(shape_data, extension=True)))
# prepare databases
# - copy templates
log.info("getting swatplus_datasets.sqlite", keep_log)
copy_file(
"{qswatplus_wf_dir}/editor_api/swatplus_datasets.sqlite".format(
qswatplus_wf_dir=os.environ["swatplus_wf_dir"]),
'{base}/{project_name}/swatplus_datasets.sqlite'.format(
base=sys.argv[1], project_name=project_name)
)
log.info("creating {0}.sqlite".format(project_name), keep_log)
copy_file(
"{qswatplus_wf_dir}/editor_api/template.sqlite".format(
qswatplus_wf_dir=os.environ["swatplus_wf_dir"]),
'{base}/{project_name}/{project_name}.sqlite'.format(
base=sys.argv[1], project_name=project_name)
)
project_database = sqlite_connection(
'{base}/{project_name}/{project_name}.sqlite'.format(
base=sys.argv[1], project_name=project_name)
)
# - copy templates
log.info("importing usersoil into project database", keep_log)
project_database.connect()
if project_database.table_exists("{usersoil}".format(usersoil=usersoil)):
project_database.delete_table("{usersoil}".format(usersoil=usersoil))
# - get_data into database
# - - usersoil
usersoil_rows = read_from("{base_dir}/data/tables/{usersoil_file}".format(
base_dir=sys.argv[1], usersoil_file=config.Usersoil))
column_types_usersoil = {
"OBJECTID": 'INTEGER',
"TEXTURE": 'TEXT',
"HYDGRP": 'TEXT',
"CMPPCT": 'TEXT',
"S5ID": 'TEXT',
"SNAM": 'TEXT',
"SEQN": 'TEXT',
"MUID": 'TEXT',
}
project_database.create_table(
"{usersoil}".format(usersoil=usersoil),
usersoil_rows[0].replace('"', "").split(",")[0],
column_types_usersoil[usersoil_rows[0].replace('"', "").split(
",")[0].upper()] if usersoil_rows[0].replace('"', "").split(",")[
0].upper() in column_types_usersoil else 'REAL'
)
for usersoil_row in usersoil_rows:
if usersoil_row == usersoil_rows[0]:
for usersoil_column in usersoil_row.replace('"', "").split(",")[1:]:
project_database.insert_field(
"{usersoil}".format(usersoil=usersoil),
usersoil_column.upper(),
column_types_usersoil[usersoil_column.upper()] if usersoil_column.upper(
) in column_types_usersoil else 'REAL'
)
else:
row_insert = usersoil_row.replace('"', "").split(",")
row_insert = [row_item if not row_item ==
"" else None for row_item in row_insert]
project_database.insert_rows("{usersoil}".format(
usersoil=usersoil), [row_insert], messages=False)
print("")
# - - soil lookup
log.info("importing soil lookup into project database", keep_log)
soillookup_rows = read_from("{base_dir}/data/tables/{soil_lookup}".format(
base_dir=sys.argv[1], soil_lookup=config.Soil_Lookup))
if project_database.table_exists(soil_lookup):
project_database.delete_table(soil_lookup)
project_database.create_table(
"{soil_lookup}".format(soil_lookup=soil_lookup),
"SOIL_ID",
"INTEGER"
)
project_database.insert_field(
"{soil_lookup}".format(soil_lookup=soil_lookup),
"SNAM",
"TEXT"
)
print("")
for line in soillookup_rows[1:]:
project_database.insert_rows("{soil_lookup}".format(
soil_lookup=soil_lookup), [line.split(",")], messages=False)
# - - landuse lookup
log.info("importing landuse lookup into project database", keep_log)
landuselookup_rows = read_from("{base_dir}/data/tables/{landuse_file}".format(
base_dir=sys.argv[1], landuse_file=config.Landuse_Lookup))
project_database.create_table(
"{land_lookup}".format(land_lookup=land_lookup),
"LANDUSE_ID",
"INTEGER")
project_database.insert_field(
"{land_lookup}".format(land_lookup=land_lookup),
"SWAT_CODE",
"TEXT")
print("")
for line in landuselookup_rows[1:]:
line = line.strip(" ").strip("\n")
project_database.insert_rows("{land_lookup}".format(
land_lookup=land_lookup), [line.split(",")], messages=False)
project_database.close_connection()
log.info("project has been prepared\n", keep_log)
```
#### File: swatplus-automatic-workflow/main_stages/run_qswat.py
```python
import os.path
import shutil
import sys
import platform
import warnings
if platform.system() == "Linux":
import pyximport # importing cython needs this on linux
pyximport.install()
# skip deprecation warnings when importing PyQt5
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from qgis.core import *
from qgis.utils import iface
from PyQt5.QtGui import *
from PyQt5.QtCore import *
# QgsApplication.setPrefixPath('C:/Program Files/QGIS 3.10/apps/qgis', True)
qgs = QgsApplication([], True)
qgs.initQgis()
# Prepare processing framework
if platform.system() == "Windows":
sys.path.append('{QGIS_Dir}/apps/qgis-ltr/python/plugins'.format(
QGIS_Dir = os.environ['QGIS_Dir'])) # Folder where Processing is located
else:
sys.path.append('/usr/share/qgis/python/plugins') # Folder where Processing is located
# skip syntax warnings on linux
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from processing.core.Processing import Processing
Processing.initialize()
import processing
sys.path.append(os.path.join(os.environ["swatplus_wf_dir"], "packages"))
sys.path.append(os.path.join(os.environ["swatplus_wf_dir"], "qswatplus"))
sys.path.append(os.path.join(os.environ["swatplus_wf_dir"]))
sys.path.insert(0, sys.argv[1])
from helper_functions import list_files
import atexit
import qswatplus
import config
from logger import log
os.chdir(os.path.join(os.environ["swatplus_wf_dir"], "qswatplus"))
from QSWATPlus import QSWATPlus
from delineation import Delineation
from hrus import HRUs
from QSWATUtils import QSWATUtils
from parameters import Parameters
from glob import glob
atexit.register(QgsApplication.exitQgis)
class DummyInterface(object):
"""Dummy iface to give access to layers."""
def __getattr__(self, *args, **kwargs):
"""Dummy function."""
def dummy(*args, **kwargs):
return self
return dummy
def __iter__(self):
"""Dummy function."""
return self
def __next__(self):
"""Dummy function."""
raise StopIteration
def layers(self):
"""Simulate iface.legendInterface().layers()."""
return list(QgsProject.instance().mapLayers().values())
if config.Model_2_config:
sys.exit(0)
sp_api_mode = False
run_watershed = True
run_hrus = True
try:
if sys.argv[2] == "watershed":
sp_api_mode = True
run_hrus = False
print("\n >> delineating watershed from DEM")
if sys.argv[2] == "hrus":
sp_api_mode = True
run_watershed = False
print("\n >> creating model hrus")
except:
pass
if not sp_api_mode:
# announce
print(f"\n >> setting up model hrus")
keep_log = True if config.Keep_Log else False
log = log("{base}/swatplus_aw_log.txt".format(base = sys.argv[1]))
iface = DummyInterface()
plugin = QSWATPlus(iface)
dlg = plugin._odlg # useful shorthand for later
base_dir = sys.argv[1]
projDir = "{base}/{model_name}".format(base=base_dir,
model_name=config.Project_Name)
if not os.path.exists(projDir):
QSWATUtils.error('Project directory {0} not found'.format(projDir), True)
log.error('project directory {0} was not found'.format(projDir), keep_log)
sys.exit(1)
projName = os.path.split(projDir)[1]
projFile = "{dir}/{nm}.qgs".format(dir=projDir, nm=projName)
log.info("creating qgis project instance", keep_log)
proj = QgsProject.instance()
if (sp_api_mode and run_watershed) or (not sp_api_mode):
# clean up before new files
log.info("cleaning up files from 'Watershed\Shapes'", keep_log)
Watershed_shapes = list_files(QSWATUtils.join(
projDir, r'Watershed/Shapes'), "shp")
delete_shapes = []
for Watershed_shape in Watershed_shapes:
if os.path.basename(Watershed_shape).startswith("reservoirs"):
delete_shapes.append(Watershed_shape)
if os.path.basename(Watershed_shape).startswith("rivs"):
delete_shapes.append(Watershed_shape)
if os.path.basename(Watershed_shape).startswith("subs"):
delete_shapes.append(Watershed_shape)
if Watershed_shape.endswith("channel.shp"):
delete_shapes.append(Watershed_shape)
if Watershed_shape.endswith("stream.shp"):
delete_shapes.append(Watershed_shape)
if Watershed_shape.endswith("subbasins.shp"):
delete_shapes.append(Watershed_shape)
if os.path.basename(Watershed_shape).startswith("hrus"):
delete_shapes.append(Watershed_shape)
if Watershed_shape.endswith("wshed.shp"):
delete_shapes.append(Watershed_shape)
if os.path.basename(Watershed_shape).startswith("lsus"):
delete_shapes.append(Watershed_shape)
for delete_shape in delete_shapes:
QSWATUtils.removeFiles(delete_shape)
log.info("cleaning up files from 'Watershed\\Text'", keep_log)
shutil.rmtree(QSWATUtils.join(projDir, r'Watershed/Text'), ignore_errors=True)
shutil.rmtree(QSWATUtils.join(projDir, 'Scenarios'), ignore_errors=True)
log.info("reading qgis project", keep_log)
proj.read(projFile)
log.info("initialising qswatplus module for watershed delineation", keep_log)
plugin.setupProject(proj, True)
if not (os.path.exists(plugin._gv.textDir) and os.path.exists(plugin._gv.landuseDir)):
log.error("cannot initialise qswatplus module", keep_log)
QSWATUtils.error('Directories not created', True)
sys.exit(1)
if not dlg.delinButton.isEnabled():
log.error("cannot initialise qswatplus module", keep_log)
QSWATUtils.error('Delineate button not enabled', True)
sys.exit(1)
log.info("initialising delineation", keep_log)
delin = Delineation(plugin._gv, plugin._demIsProcessed)
delin.init()
QSWATUtils.information('\t - DEM: {0}'.format(os.path.split(plugin._gv.demFile)[1]), True)
delin.addHillshade(plugin._gv.demFile, None, None, None)
QSWATUtils.information('\t - Inlets/outlets file: {0}'.format(os.path.split(plugin._gv.outletFile)[1]), True)
log.info("running taudem", keep_log)
delin.runTauDEM2()
log.info("finishing delineation", keep_log)
delin.finishDelineation()
if run_watershed:
QSWATUtils.information('\t - finished delineation\n', True)
if not dlg.hrusButton.isEnabled():
log.error("could not initialise hru creation", keep_log)
QSWATUtils.error('\t ! HRUs button not enabled', True)
sys.exit(1)
if (sp_api_mode and run_hrus) or (not sp_api_mode):
# ensure that HRUs runs 'from files' and not from 'saved from previous run'
log.info("reading qgis project", keep_log)
proj.read(projFile)
log.info("initialising qswatplus module for HRU creation", keep_log)
plugin.setupProject(proj, True)
log.info("removing old gis data from 'BASINDATA' table in project database", keep_log)
plugin._gv.db.clearTable('BASINSDATA')
hrus = HRUs(plugin._gv, dlg.reportsBox)
hrus.init()
hrus.readFiles()
if not os.path.exists(QSWATUtils.join(plugin._gv.textDir, Parameters._TOPOREPORT)):
log.error("error reading HRU data", keep_log)
QSWATUtils.error('\t ! Elevation report not created \n\n\t Have you run Delineation?\n', True)
sys.exit(1)
if not os.path.exists(QSWATUtils.join(plugin._gv.textDir, Parameters._BASINREPORT)):
log.error("error reading HRU data", keep_log)
QSWATUtils.error('\t ! Landuse and soil report not created', True)
sys.exit(1)
hrus.calcHRUs()
if not os.path.exists(QSWATUtils.join(plugin._gv.textDir, Parameters._HRUSREPORT)):
log.error("error creating HRUs", keep_log)
QSWATUtils.error('\t ! HRUs report not created', True)
sys.exit(1)
if not os.path.exists(QSWATUtils.join(projDir, r'Watershed/Shapes/rivs1.shp')):
log.error("error creating HRUs", keep_log)
QSWATUtils.error('\t ! Streams shapefile not created', True)
sys.exit(1)
if not os.path.exists(QSWATUtils.join(projDir, r'Watershed/Shapes/subs1.shp')):
log.error("error creating HRUs", keep_log)
QSWATUtils.error('\t ! Subbasins shapefile not created', True)
sys.exit(1)
log.info("finished creating HRUs\n", keep_log)
if run_hrus:
QSWATUtils.information('\t - finished creating HRUs\n', True)
```
#### File: swatplus-automatic-workflow/packages/helper_functions.py
```python
import os
import sys
import shutil
from glob import glob
from shutil import copyfile, copytree
# import gdal
import pickle
import xml.etree.ElementTree as ET
from osgeo import gdal
from osgeo import ogr
from osgeo import gdalconst
def copy_directory(src, parent_dst, core_count):
try:
shutil.copytree(
src, "{dst_parent}/{core_number}".format(
core_number=core_count,
dst_parent=parent_dst))
return True
except:
return False
def write_to(filename, text_to_write, report_=False):
try:
g = open(filename, 'w')
g.write(text_to_write)
g.close
if report_:
print('\n\t> file saved to ' + filename)
return True
except:
return False
def raster_statistics(tif_file):
ds = gdal.Open(tif_file)
minimum, maximum, mean, std_dev = ds.GetRasterBand(1).GetStatistics(0, 1)
class gdal_stats:
def __init__(self, mn, mx, mean, std_dev):
self.minimum = mn
self.maximum = mx
self.mean = mean
self.stdev = std_dev
def __repr__(self):
return 'min: {0}, max: {1}, mean: {2}, sdev: {3}'.format(
self.minimum, self.maximum, self.mean, self.stdev)
all_stats = gdal_stats(minimum, maximum, mean, std_dev)
return all_stats
def list_folders(directory):
"""
directory: string or pathlike object
"""
all_dirs = os.listdir(directory)
dirs = [dir_ for dir_ in all_dirs if os.path.isdir(
os.path.join(directory, dir_))]
return dirs
def xml_children_attributes(xml_file_name, x_path):
root = ET.parse(xml_file_name).getroot()
result = {}
for element in root.findall(x_path):
for child in element:
result[child.tag] = child.text
return result
def list_files(folder, extension="*"):
if folder.endswith("/"):
if extension == "*":
list_of_files = glob(folder + "*")
else:
list_of_files = glob(folder + "*." + extension)
else:
if extension == "*":
list_of_files = glob(folder + "/*")
else:
list_of_files = glob(folder + "/*." + extension)
return list_of_files
def copy_file(src, dst):
if not os.path.isdir(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
copyfile(src, dst)
def file_name(path_, extension=True):
if extension:
fn = os.path.basename(path_)
else:
fn = os.path.basename(path_).split(".")[0]
return(fn)
def rasterise(shapefile, column, raster_template, destination):
'''
adapted from https://gis.stackexchange.com/questions/212795/rasterizing-shapefiles-with-gdal-and-python#212812
'''
data = gdal.Open(raster_template, gdalconst.GA_ReadOnly)
prj_wkt = data.GetProjection()
geo_transform = data.GetGeoTransform()
#source_layer = data.GetLayer()
x_min = geo_transform[0]
y_max = geo_transform[3]
x_max = x_min + geo_transform[1] * data.RasterXSize
y_min = y_max + geo_transform[5] * data.RasterYSize
x_res = data.RasterXSize
y_res = data.RasterYSize
polygon_data = ogr.Open(shapefile)
layer_data = polygon_data.GetLayer()
pixel_width = geo_transform[1]
target_ds = gdal.GetDriverByName('GTiff').Create(destination, x_res, y_res, 1, gdal.GDT_Float32)
target_ds.SetGeoTransform((x_min, pixel_width, 0, y_min, 0, pixel_width))
target_ds.SetProjection(prj_wkt)
band = target_ds.GetRasterBand(1)
NoData_value = -999
band.SetNoDataValue(NoData_value)
band.FlushCache()
gdal.RasterizeLayer(target_ds, [1], layer_data, options=["ATTRIBUTE={col}".format(col = column)])
target_ds = None
return True
def clear_directory(dir_path, fail_message = "cannot delete folder"):
try:
if os.path.isdir(dir_path):
shutil.rmtree(dir_path)
os.makedirs(dir_path)
except:
print("\t! {fail_message}".format(fail_message = fail_message))
def python_variable(filename):
with open(filename, "rb") as f:
variable = pickle.load(f)
return variable
def get_extents(raster):
src = gdal.Open(raster)
upper_lef_x, xres, xskew, upper_left_y, yskew, yres = src.GetGeoTransform()
lower_right_x = upper_lef_x + (src.RasterXSize * xres)
lower_right_y = upper_left_y + (src.RasterYSize * yres)
return upper_lef_x, lower_right_y, lower_right_x, upper_left_y
def read_from(filename):
try:
g = open(filename, 'r')
except:
print(
"\t ! error reading {0}, make sure the file exists".format(filename))
return
file_text = g.readlines()
g.close
return file_text
def show_progress(count, end_val, string_before="percent complete", string_after="", bar_length=30):
percent = float(count) / end_val
hashes = "#" * int(round(percent * bar_length))
spaces = '_' * (bar_length - len(hashes))
sys.stdout.write("\r{str_b} [{bar}] {pct}% {str_after}\t\t".format(
str_b=string_before,
bar=hashes + spaces,
pct='{0:.2f}'.format(percent * 100),
str_after=string_after))
sys.stdout.flush()
```
#### File: packages/pystran/extrafunctions.py
```python
import numpy as np
def rescale(arr,vmin,vmax):
arrout=(vmax-vmin)*arr+vmin
return arrout
##############################################################################
## TESTFUNCTIONS TO WORK WITH
##############################################################################
def analgfunc(ai,Xi):
Y=1.0
for i in range(len(Xi)):
try:
g_Xi=(np.abs(4.*Xi[i]-2.)+ai[i,:])/(1.+ai[i,:])
except:
g_Xi=(np.abs(4.*Xi[i]-2.)+ai[i])/(1.+ai[i])
Y=Y*g_Xi
return Y
def analgstarfunc(ai,alphai,Xi,di):
Y=1.0
di=np.random.random(10)
for i in range(len(Xi)):
g_Xi=((1.+alphai[i])*np.abs(2.*(Xi[i]+di[i]-int(Xi[i]+di[i]))-1.)**alphai[i] + ai[i])/(1.+ai[i])
Y=Y*g_Xi
return Y
def simplelinear(ai,Xi):
Y = 0.
for i in range(len(ai)):
Y=Y+(ai[i]*Xi[i])
return Y
def harder(ai,Xi):
Y = 0.
Xii = np.random.permutation(Xi)
for i in range(len(ai)):
Y=Y+(ai[i]*np.exp(ai[i]+Xii[i]))
return Y
```
#### File: swatplus-automatic-workflow/qswatplus/QSWATPlus.py
```python
from PyQt5.QtCore import * # @UnusedWildImport
from PyQt5.QtGui import * # @UnusedWildImport
from PyQt5.QtWidgets import * # @UnusedWildImport
from qgis.core import * # @UnusedWildImport
import os
import subprocess
import time
import shutil
import sys
import traceback
# Initialize Qt resources from file resources_rc.py
try:
from resources_rc import * # @UnusedWildImport
except:
from resources_rc import * # for convertFromArc @UnresolvedImport
# Import the code for the dialog
# allow this to fail so no exception when loaded in wrong architecture (32 or 64 bit)
# QSWATUtils should have no further dependencies, especially in Cython modules
try:
from QSWATUtils import QSWATUtils, FileTypes # @UnresolvedImport
except:
# for convertFromArc
from QSWATUtils import QSWATUtils, FileTypes # @UnresolvedImport
try:
txt = 'QSwatDialog'
from qswatdialog import QSwatDialog
txt = 'HRUs'
from hrus import HRUs
txt = 'QSWATTopology'
from QSWATTopology import QSWATTopology
txt = 'GlobalVars'
from globals import GlobalVars
txt = 'Delineation'
from delineation import Delineation
txt = 'Parameters'
from parameters import Parameters
txt = 'Visualise'
from visualise import Visualise
txt = 'AboutQSWAT'
from about import AboutQSWAT
txt = 'ExportTable'
from exporttable import ExportTable
except Exception:
QSWATUtils.loginfo('QSWAT+ failed to import {0}: {1}'.format(txt, traceback.format_exc()))
class QSWATPlus(QObject):
"""QGIS plugin to prepare geographic data for SWAT+ Editor."""
__version__ = '1.2.2'
def __init__(self, iface):
"""Constructor."""
QObject.__init__(self)
# this import is a dependency on a Cython produuced .pyd or .so file which will fail if the wrong architecture
# and so gives an immediate exit before the plugin is loaded
## flag to show if init ran successfully
self.loadFailed = False
try:
import dataInC # @UnusedImport
except Exception:
QSWATUtils.loginfo('Failed to load Cython module: wrong architecture?: {0}'.format(traceback.format_exc()))
self.loadFailed = True
return
# uncomment next line for debugging
# import pydevd; pydevd.settrace()
# Save reference to the QGIS interface
self._iface = iface
# initialize plugin directory
## plugin directory
self.plugin_dir = os.path.dirname(__file__)
# add to PYTHONPATH
sys.path.append(self.plugin_dir)
settings = QSettings()
# initialize locale
# in testing with a dummy iface object this settings value can be None
try:
locale = settings.value("locale/userLocale")[0:2]
except Exception:
locale = 'en'
localePath = os.path.join(self.plugin_dir, 'i18n', 'qswat_{}.qm'.format(locale))
# set default behaviour for loading files with no CRS to prompt - the safest option
settings.setValue('Projections/defaultBehaviour', 'prompt')
## translator
if os.path.exists(localePath):
self.translator = QTranslator()
self.translator.load(localePath)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
self._gv = None # set later
# font = QFont('MS Shell Dlg 2', 8)
try:
pointSize = int(settings.value('/QSWATPlus/FontSize'))
except Exception:
pointSize = 8
self.setUbuntuFont(pointSize)
# an experiment - probably not use
# self.setStyles()
# Create the dialog (after translation) and keep reference
self._odlg = QSwatDialog()
self._odlg.setWindowFlags(self._odlg.windowFlags() & ~Qt.WindowContextHelpButtonHint & Qt.WindowMinimizeButtonHint)
self._odlg.move(0, 0)
#=======================================================================
# font = self._odlg.font()
# fm = QFontMetrics(font)
# txt = 'The quick brown fox jumps over the lazy dog.'
# family = font.family()
# size = font.pointSize()
# QSWATUtils.information('Family: {2}. Point size: {3!s} (intended {4!s}).\nWidth of "{0}" is {1} pixels.'.format(txt, fm.width(txt), family, size, pointSize), False)
#=======================================================================
self._odlg.setWindowTitle('QSWAT+ {0}'.format(QSWATPlus.__version__))
# flag used in initialising delineation form
self._demIsProcessed = False
## deineation window
self.delin = None
## create hrus window
self.hrus = None
## visualise window
self.vis = None
# report QGIS version
QSWATUtils.loginfo('QGIS version: {0}; QSWAT+ version: {1}'.format(Qgis.QGIS_VERSION, QSWATPlus.__version__))
def initGui(self):
"""Create QSWAT button in the toolbar."""
if self.loadFailed:
return
## Action that will start plugin configuration
self.action = QAction(
QIcon(":/plugins/QSWATPlus/swatplus32.png"),
'{0}'.format(QSWATUtils._QSWATNAME), self._iface.mainWindow())
# connect the action to the run method
self.action.triggered.connect(self.run)
# Add toolbar button and menu item
self._iface.addToolBarIcon(self.action)
self._iface.addPluginToMenu('&{0}'.format(QSWATUtils._QSWATNAME), self.action)
def unload(self):
"""Remove the QSWAT menu item and icon."""
# allow for it not to have been loaded
try:
self._iface.removePluginMenu('&{0}'.format(QSWATUtils._QSWATNAME), self.action)
self._iface.removeToolBarIcon(self.action)
except Exception:
pass
def run(self):
"""Run QSWAT."""
self._odlg.reportsBox.setVisible(False)
self._odlg.reportsLabel.setVisible(False)
self._odlg.reportsBox.clear()
self._odlg.reportsBox.addItem(QSWATUtils.trans('Select report to view'))
self._odlg.finished.connect(self.finish)
# connect buttons
self._odlg.aboutButton.clicked.connect(self.about)
self._odlg.newButton.clicked.connect(self.newProject)
self._odlg.existingButton.clicked.connect(self.existingProject)
self._odlg.delinButton.clicked.connect(self.doDelineation)
self._odlg.hrusButton.clicked.connect(self.doCreateHRUs)
self._odlg.editButton.clicked.connect(self.startEditor)
self._odlg.visualiseButton.clicked.connect(self.visualise)
self._odlg.paramsButton.clicked.connect(self.runParams)
self._odlg.reportsBox.activated.connect(self.showReport)
self._odlg.exportButton.clicked.connect(self.exportTable)
self.initButtons()
self._odlg.projPath.setText('')
# make sure we clear data from previous runs
self.delin = None
self.hrus = None
self.vis = None
# show the dialog
self._odlg.show()
# initially only new/existing project buttons visible if project not set
proj = QgsProject.instance()
if proj.fileName() == '':
self._odlg.mainBox.setVisible(False)
self._odlg.exportButton.setVisible(False)
else:
self._iface.mainWindow().setCursor(Qt.WaitCursor)
self.setupProject(proj, False)
self._iface.mainWindow().setCursor(Qt.ArrowCursor)
# Run the dialog event loop
result = self._odlg.exec_()
# See if OK was pressed
if result == 1:
proj.write()
def initButtons(self):
"""Initial button settings."""
self._odlg.delinLabel.setText('Step 1')
self._odlg.hrusLabel.setText('Step 2')
self._odlg.hrusLabel.setEnabled(False)
self._odlg.hrusButton.setEnabled(False)
self._odlg.editLabel.setEnabled(False)
self._odlg.editButton.setEnabled(False)
self._odlg.visualiseLabel.setVisible(False)
self._odlg.visualiseButton.setVisible(False)
def about(self):
"""Show information about QSWAT."""
form = AboutQSWAT(self._gv)
form.run(QSWATPlus.__version__)
def setUbuntuFont(self, ptSize):
"""Set Ubuntu font."""
QFontDatabase.addApplicationFont(":/fonts/Ubuntu-B.ttf")
QFontDatabase.addApplicationFont(":/fonts/Ubuntu-BI.ttf")
QFontDatabase.addApplicationFont(":/fonts/Ubuntu-C.ttf")
QFontDatabase.addApplicationFont(":/fonts/Ubuntu-L.ttf")
QFontDatabase.addApplicationFont(":/fonts/Ubuntu-LI.ttf")
QFontDatabase.addApplicationFont(":/fonts/Ubuntu-M.ttf")
QFontDatabase.addApplicationFont(":/fonts/Ubuntu-MI.ttf")
QFontDatabase.addApplicationFont(":/fonts/Ubuntu-R.ttf")
QFontDatabase.addApplicationFont(":/fonts/Ubuntu-RI.ttf")
ufont = QFont("Ubuntu", ptSize, 1)
QApplication.setFont(ufont)
QSWATUtils.loginfo('Ubuntu {0} point font set'.format(ptSize))
# def setStyles(self):
# """Set stle sheet values"""
# QApplication.instance().setStyleSheet("""
# QDialog[QSWATDialog=true] {
# background-color: white
# }
# QPushButton {
# color: white
# }
# QPushButton[browseButton=true] {
# color: white;
# background-color: blue
# }
# QPushButton[okButton=true] {
# color: white;
# background-color: green
# }
# QPushButton[cancelButton=true] {
# background-color: grey
# }
# """)
def newProject(self):
"""Call QGIS actions to create and name a new project."""
settings = QSettings()
if settings.contains('/QSWATPlus/LastInputPath'):
path = settings.value('/QSWATPlus/LastInputPath')
else:
path = ''
title = 'Select parent directory'
parentDir = QFileDialog.getExistingDirectory(None, title, path)
if parentDir is not None and os.path.isdir(parentDir):
projName, ok = QInputDialog.getText(None, 'Project name', 'Please enter the project name, starting with a letter:')
if not ok:
return
if not projName[0].isalpha():
QSWATUtils.error('Project name must start with a letter', False)
return
projDir = QSWATUtils.join(parentDir, projName)
if os.path.exists(projDir):
response = QSWATUtils.question('Project directory {0} already exists. Do you wish to delete it?'.format(projDir), False, False)
if response != QMessageBox.Yes:
return
shutil.rmtree(projDir, True)
try:
os.mkdir(projDir)
except Exception:
QSWATUtils.exceptionError('Failed to create project directory {0}'.format(projDir), False)
return
self._iface.newProject()
projFile = QSWATUtils.join(projDir, projName + '.qgs')
proj = QgsProject.instance()
proj.setFileName(projFile)
QSWATUtils.loginfo('Project file is {0}'.format(projFile))
self._iface.actionSaveProject().trigger()
# allow time for project to be created
time.sleep(2)
self.initButtons()
settings.setValue('/QSWATPlus/LastInputPath', str(projDir))
self._odlg.raise_()
self.setupProject(proj, False)
self._gv.writeProjectConfig(0, 0)
def existingProject(self):
"""Open an existing QGIS project."""
self._iface.actionOpenProject().trigger()
# allow time for project to be opened
time.sleep(2)
proj = QgsProject.instance()
if proj.fileName() == '':
QSWATUtils.error('No project opened', False)
return
self._odlg.raise_()
self.setupProject(proj, False)
def setupProject(self, proj, isBatch):
"""Set up the project."""
self._odlg.mainBox.setVisible(True)
self._odlg.mainBox.setEnabled(False)
self._odlg.setCursor(Qt.WaitCursor)
self._odlg.projPath.setText('Restarting project ...')
proj.setTitle(QFileInfo(proj.fileName()).baseName())
# now have project so initiate global vars
# if we do this earlier we cannot for example find the project database
self._gv = GlobalVars(self._iface, QSWATPlus.__version__, self.plugin_dir, isBatch)
if self._gv.SWATPlusDir == '':
# failed to find SWATPlus directory
return
self._odlg.projPath.repaint()
self.checkReports()
self.setLegendGroups()
# enable edit button if converted from Arc with 'No GIS' option
title = proj.title()
choice, found = proj.readNumEntry(title, 'fromArc', -1)
if found:
self._gv.fromArcChoice = choice
if choice == 2: # NB value from convertFromArc.py
self._odlg.editLabel.setEnabled(True)
self._odlg.editButton.setEnabled(True)
if self.demProcessed():
self._demIsProcessed = True
self.allowCreateHRU()
self.hrus = HRUs(self._gv, self._odlg.reportsBox)
#result = hrus.tryRun()
#if result == 1:
if self.hrus.HRUsAreCreated():
QSWATUtils.progress('Done', self._odlg.hrusLabel)
self.showReports()
self._odlg.editLabel.setEnabled(True)
self._odlg.editButton.setEnabled(True)
if os.path.exists(QSWATUtils.join(self._gv.resultsDir, Parameters._OUTPUTDB)):
self._odlg.visualiseLabel.setVisible(True)
self._odlg.visualiseButton.setVisible(True)
self._odlg.projPath.setText(self._gv.projDir)
self._odlg.mainBox.setEnabled(True)
self._odlg.exportButton.setVisible(True)
self._odlg.setCursor(Qt.ArrowCursor)
def runParams(self):
"""Run parameters form."""
params = Parameters(self._gv)
params.run()
def showReport(self):
"""Display selected report."""
if not self._odlg.reportsBox.hasFocus():
return
item = self._odlg.reportsBox.currentText()
if item == Parameters._TOPOITEM:
report = Parameters._TOPOREPORT
elif item == Parameters._BASINITEM:
report = Parameters._BASINREPORT
elif item == Parameters._HRUSITEM:
report = Parameters._HRUSREPORT
else:
return
report = QSWATUtils.join(self._gv.textDir, report)
if not os.path.exists(report):
QSWATUtils.error('Cannot find report {0}'.format(report))
return
if Parameters._ISWIN : # Windows
os.startfile(report)
elif os.name == 'posix': # Linux
subprocess.call(('xdg-open', report))
self._odlg.reportsBox.setCurrentIndex(0)
def exportTable(self):
"""Run export table form."""
export = ExportTable(self._gv)
export.run()
def checkReports(self):
"""Add existing reports to reports box and if there are some make it visible."""
makeVisible = False
topoReport = QSWATUtils.join(self._gv.textDir, Parameters._TOPOREPORT)
if os.path.exists(topoReport) and self._odlg.reportsBox.findText(Parameters._TOPOITEM) < 0:
makeVisible = True
self._odlg.reportsBox.addItem(Parameters._TOPOITEM)
basinReport = QSWATUtils.join(self._gv.textDir, Parameters._BASINREPORT)
if os.path.exists(basinReport) and self._odlg.reportsBox.findText(Parameters._BASINITEM) < 0:
makeVisible = True
self._odlg.reportsBox.addItem(Parameters._BASINITEM)
hrusReport = QSWATUtils.join(self._gv.textDir, Parameters._HRUSREPORT)
if os.path.exists(hrusReport) and self._odlg.reportsBox.findText(Parameters._HRUSITEM) < 0:
makeVisible = True
self._odlg.reportsBox.addItem(Parameters._HRUSITEM)
if makeVisible:
self._odlg.reportsBox.setVisible(True)
self._odlg.reportsLabel.setVisible(True)
self._odlg.reportsBox.setCurrentIndex(0)
def doDelineation(self):
"""Run the delineation dialog."""
# avoid getting second window
if self.delin is not None and self.delin._dlg.isEnabled():
self.delin._dlg.close()
self.delin = Delineation(self._gv, self._demIsProcessed)
result = self.delin.run()
if result == 1 and self._gv.isDelinDone():
self._demIsProcessed = True
self.allowCreateHRU()
# remove old data so cannot be reused
self._gv.db.clearTable('BASINSDATA')
# make sure HRUs starts from scratch
if self.hrus and self.hrus._dlg is not None:
self.hrus._dlg.close()
self.hrus = None
elif result == 0:
self._demIsProcessed = False
self._odlg.delinLabel.setText('Step 1')
self._odlg.hrusLabel.setText('Step 2')
self._odlg.hrusLabel.setEnabled(False)
self._odlg.hrusButton.setEnabled(False)
self._odlg.editLabel.setEnabled(False)
self._odlg.editButton.setEnabled(False)
self._odlg.raise_()
def doCreateHRUs(self):
"""Run the HRU creation dialog."""
# avoid getting second window
if self.hrus is not None and self.hrus._dlg.isEnabled():
self.hrus._dlg.close()
self.hrus = HRUs(self._gv, self._odlg.reportsBox)
result = self.hrus.run()
if result == 1:
QSWATUtils.progress('Done', self._odlg.hrusLabel)
self._odlg.editLabel.setEnabled(True)
self._odlg.editButton.setEnabled(True)
self._odlg.raise_()
def demProcessed(self):
"""
Return true if we can proceed with HRU creation.
Return false if any required project setting is not found
in the project file
Return true if:
Using existing watershed and watershed grid exists and
is newer than dem
or
Not using existing watershed and filled dem exists and
is no older than dem, and
watershed shapefile exists and is no older than filled dem
"""
proj = QgsProject.instance()
if not proj:
QSWATUtils.loginfo('demProcessed failed: no project')
return False
title = proj.title()
root = proj.layerTreeRoot()
demFile, found = proj.readEntry(title, 'delin/DEM', '')
if not found or demFile == '':
QSWATUtils.loginfo('demProcessed failed: no DEM')
return False
demFile = proj.readPath(demFile)
demLayer, _ = QSWATUtils.getLayerByFilename(root.findLayers(), demFile, FileTypes._DEM,
self._gv, None, QSWATUtils._WATERSHED_GROUP_NAME)
if not demLayer:
QSWATUtils.loginfo('demProcessed failed: no DEM layer')
return False
self._gv.demFile = demFile
units = demLayer.crs().mapUnits()
factor = 1 if units == QgsUnitTypes.DistanceMeters else Parameters._FEETTOMETRES if units == QgsUnitTypes.DistanceFeet else 0
if factor == 0:
QSWATUtils.loginfo('demProcessed failed: units are {0!s}'.format(units))
return False
self._gv.cellArea = demLayer.rasterUnitsPerPixelX() * demLayer.rasterUnitsPerPixelY() * factor * factor
# hillshade
Delineation.addHillshade(demFile, root, demLayer, self._gv)
outletFile, found = proj.readEntry(title, 'delin/outlets', '')
if found and outletFile != '':
outletFile = proj.readPath(outletFile)
outletLayer, _ = \
QSWATUtils.getLayerByFilename(root.findLayers(), outletFile, FileTypes._OUTLETS,
self._gv, None, QSWATUtils._WATERSHED_GROUP_NAME)
if not outletLayer:
QSWATUtils.loginfo('demProcessed failed: no outlet layer')
return False
else:
outletLayer = None
self._gv.outletFile = outletFile
self._gv.existingWshed = proj.readBoolEntry(title, 'delin/existingWshed', False)[0]
self._gv.useGridModel = proj.readBoolEntry(title, 'delin/useGridModel', False)[0]
self._gv.useLandscapes = proj.readBoolEntry(title, 'lsu/useLandscapes', False)[0]
streamFile, found = proj.readEntry(title, 'delin/net', '')
if self._gv.useGridModel or not self._gv.existingWshed:
if not found or streamFile == '':
QSWATUtils.loginfo('demProcessed failed: no streams shapefile')
return False
streamFile = proj.readPath(streamFile)
ft = FileTypes._GRIDSTREAMS if self._gv.useGridModel else FileTypes._STREAMS
streamLayer, _ = \
QSWATUtils.getLayerByFilename(root.findLayers(), streamFile, ft,
self._gv, None, QSWATUtils._WATERSHED_GROUP_NAME)
if not streamLayer:
QSWATUtils.loginfo('demProcessed failed: no streams layer')
return False
self._gv.streamFile = streamFile
if self._gv.useGridModel:
self._gv.gridSize, found = proj.readNumEntry(title, 'delin/gridSize', 0)
if not found or self._gv.gridSize <= 0:
QSWATUtils.loginfo('demProcessed failed: grid size not set')
return False
else:
channelFile, found = proj.readEntry(title, 'delin/channels', '')
if not found or channelFile == '':
QSWATUtils.loginfo('demProcessed failed: no channels shapefile')
return False
channelFile = proj.readPath(channelFile)
channelLayer, _ = \
QSWATUtils.getLayerByFilename(root.findLayers(), channelFile, FileTypes._CHANNELS,
self._gv, None, QSWATUtils._WATERSHED_GROUP_NAME)
if not channelLayer:
QSWATUtils.loginfo('demProcessed failed: no channels layer')
return False
self._gv.channelFile = channelFile
subbasinsFile, found = proj.readEntry(title, 'delin/subbasins', '')
if not found or subbasinsFile == '':
QSWATUtils.loginfo('demProcessed failed: no subbasins shapefile')
return False
subbasinsFile = proj.readPath(subbasinsFile)
subbasinsInfo = QFileInfo(subbasinsFile)
subbasinsTime = subbasinsInfo.lastModified()
subbasinsLayer, _ = \
QSWATUtils.getLayerByFilename(root.findLayers(), subbasinsFile, FileTypes._SUBBASINS,
self._gv, None, QSWATUtils._WATERSHED_GROUP_NAME)
if not subbasinsLayer:
QSWATUtils.loginfo('demProcessed failed: no subbasins layer')
return False
self._gv.subbasinsFile = subbasinsFile
if not self._gv.useGridModel:
wshedFile, found = proj.readEntry(title, 'delin/wshed', '')
if not found or wshedFile == '':
QSWATUtils.loginfo('demProcessed failed: no wshed shapefile')
return False
wshedFile = proj.readPath(wshedFile)
if self._gv.existingWshed:
wshedLayer, _ = \
QSWATUtils.getLayerByFilename(root.findLayers(), wshedFile, FileTypes._EXISTINGWATERSHED,
self._gv, None, QSWATUtils._WATERSHED_GROUP_NAME)
if not wshedLayer:
QSWATUtils.loginfo('demProcessed failed: no wshed layer')
return False
self._gv.wshedFile = wshedFile
demInfo = QFileInfo(demFile)
if not demInfo.exists():
QSWATUtils.loginfo('demProcessed failed: no DEM info')
return False
base = QSWATUtils.join(demInfo.absolutePath(), demInfo.baseName())
if not self._gv.existingWshed:
burnFile, found = proj.readEntry(title, 'delin/burn', '')
if found and burnFile != '':
burnFile = proj.readPath(burnFile)
if not os.path.exists(burnFile):
QSWATUtils.loginfo('demProcessed failed: no burn file')
return False
self._gv.slopeFile = base + 'slope.tif'
else:
self._gv.slopeFile = base + 'slp.tif'
else:
self._gv.slopeFile = base + 'slp.tif'
if not os.path.exists(self._gv.slopeFile):
QSWATUtils.loginfo('demProcessed failed: no slope raster')
return False
self._gv.basinFile = base + 'wStream.tif'
if not self._gv.useGridModel:
self._gv.channelBasinFile = base + 'wChannel.tif'
self._gv.srcChannelFile = base + 'srcChannel.tif'
streamDrainage = proj.readBoolEntry(title, 'delin/streamDrainage', False)[0]
if self._gv.existingWshed:
if not self._gv.useGridModel:
if not os.path.exists(self._gv.basinFile):
QSWATUtils.loginfo('demProcessed failed: no subbasins raster')
return False
else:
self._gv.pFile = base + 'p.tif'
if not os.path.exists(self._gv.pFile):
QSWATUtils.loginfo('demProcessed failed: no p raster')
return False
self._gv.felFile = base + 'fel.tif'
felInfo = QFileInfo(self._gv.felFile)
if not (felInfo.exists() and subbasinsInfo.exists()):
QSWATUtils.loginfo('demProcessed failed: no filled raster')
return False
self._gv.ad8File = base + 'ad8.tif'
if not os.path.exists(self._gv.ad8File):
QSWATUtils.loginfo('demProcessed failed: no D8 accumulation raster')
return False
demTime = demInfo.lastModified()
felTime = felInfo.lastModified()
if not (demTime <= felTime <= subbasinsTime):
QSWATUtils.loginfo('demProcessed failed: not up to date')
return False
self._gv.distStFile = base + 'distst.tif'
if not os.path.exists(self._gv.distStFile):
QSWATUtils.loginfo('demProcessed failed: no distance to outlet raster')
return False
self._gv.distChFile = base + 'distch.tif'
if not self._gv.useGridModel:
if not os.path.exists(self._gv.distChFile):
QSWATUtils.loginfo('demProcessed failed: no distance to channel raster')
return False
valleyDepthsFile = base + 'depths.tif'
if os.path.exists(valleyDepthsFile):
self._gv.valleyDepthsFile = valleyDepthsFile
# no longer compulsory
# if not os.path.exists(self._gv.valleyDepthsFile):
# QSWATUtils.loginfo('demProcessed failed: no valley depths raster')
# return False
if not self._gv.useGridModel:
if not os.path.exists(self._gv.channelBasinFile):
QSWATUtils.loginfo('demProcessed failed: no channel basins raster')
return False
snapFile, found = proj.readEntry(title, 'delin/snapOutlets', '')
if found and snapFile != '':
snapFile = proj.readPath(snapFile)
if os.path.exists(snapFile):
self._gv.snapFile = snapFile
else:
snapFile = ''
else:
snapFile = ''
lakeLayer = None
lakeFile, found = proj.readEntry(title, 'delin/lakes', '')
if found and lakeFile != '':
lakeFile = proj.readPath(lakeFile)
if os.path.exists(lakeFile):
self._gv.lakeFile = lakeFile
lakeLayer = QgsVectorLayer(lakeFile, 'Lakes', 'ogr')
if self._gv.useGridModel:
gridLakesAdded = proj.readBoolEntry(title, 'delin/gridLakesAdded', False)[0]
if not gridLakesAdded:
QSWATUtils.loginfo('demProcessed failed: grid lakes not added')
return False
else:
chBasinNoLakeFile = base + 'wChannelNoLake.tif'
if os.path.exists(chBasinNoLakeFile):
self._gv.chBasinNoLakeFile = chBasinNoLakeFile
if not self._gv.existingWshed:
lakePointsAdded = proj.readBoolEntry(title, 'delin/lakePointsAdded', False)[0]
if not lakePointsAdded:
QSWATUtils.loginfo('demProcessed failed: lake points not added')
return False
else:
QSWATUtils.loginfo('demProcessed failed: no channel basins without lakes raster')
return False
snapLayer = outletLayer if snapFile == '' else QgsVectorLayer(self._gv.snapFile, 'Snapped outlets', 'ogr')
chanLayer = streamLayer if self._gv.useGridModel else channelLayer
if self._gv.existingWshed:
ad8Layer = None
else:
ad8Layer = QgsRasterLayer(self._gv.ad8File, 'Accumulation')
if not self._gv.topo.setUp0(demLayer, chanLayer, snapLayer, ad8Layer, self._gv.verticalFactor, self._gv.useGridModel):
return False
basinIndex = self._gv.topo.getIndex(subbasinsLayer, QSWATTopology._POLYGONID)
if basinIndex < 0:
return False
for feature in subbasinsLayer.getFeatures():
basin = feature.attributes()[basinIndex]
centroid = feature.geometry().centroid().asPoint()
self._gv.topo.basinCentroids[basin] = (centroid.x(), centroid.y())
if lakeLayer is not None:
if not self._gv.topo.readLakesData(self._gv.db):
QSWATUtils.loginfo('demProcessed failed: lakes data not read')
return False
# this can go wrong if eg the streams and watershed files exist but are inconsistent
try:
if not self._gv.topo.setUp(demLayer, chanLayer, subbasinsLayer, snapLayer, lakeLayer,
self._gv, self._gv.existingWshed, False, self._gv.useGridModel, streamDrainage, False):
QSWATUtils.loginfo('demProcessed failed: topo setup failed')
return False
if len(self._gv.topo.inlets) == 0:
# no inlets, so no need to expand subbasins layer legend
treeSubbasinsLayer = root.findLayer(subbasinsLayer.id())
treeSubbasinsLayer.setExpanded(False)
except Exception:
QSWATUtils.loginfo('demProcessed failed: topo setup raised exception: {0}'.format(traceback.format_exc()))
return False
return True
def allowCreateHRU(self):
"""Mark delineation as Done and make create HRUs option visible."""
QSWATUtils.progress('Done', self._odlg.delinLabel)
QSWATUtils.progress('Step 2', self._odlg.hrusLabel)
self._odlg.hrusLabel.setEnabled(True)
self._odlg.hrusButton.setEnabled(True)
self._odlg.editLabel.setEnabled(False)
self._odlg.editButton.setEnabled(False)
def showReports(self):
"""Show reports combo box and add items if necessary."""
self._odlg.reportsBox.setVisible(True)
if self._odlg.reportsBox.findText(Parameters._TOPOITEM) < 0:
self._odlg.reportsBox.addItem(Parameters._TOPOITEM)
if self._odlg.reportsBox.findText(Parameters._BASINITEM) < 0:
self._odlg.reportsBox.addItem(Parameters._BASINITEM)
if self._odlg.reportsBox.findText(Parameters._HRUSITEM) < 0:
self._odlg.reportsBox.addItem(Parameters._HRUSITEM)
def setLegendGroups(self):
"""Legend groups are used to keep legend in reasonable order.
Create them if necessary.
"""
root = QgsProject.instance().layerTreeRoot()
groups = [QSWATUtils._ANIMATION_GROUP_NAME,
QSWATUtils._RESULTS_GROUP_NAME,
QSWATUtils._WATERSHED_GROUP_NAME,
QSWATUtils._LANDUSE_GROUP_NAME,
QSWATUtils._SOIL_GROUP_NAME,
QSWATUtils._SLOPE_GROUP_NAME]
for i in range(len(groups)):
group = groups[i]
node = root.findGroup(group)
if node is None:
root.insertGroup(i, group)
def startEditor(self):
"""Start the SWAT Editor, first setting its initial parameters."""
# self._gv.setSWATEditorParams()
editor = self._gv.findSWATPlusEditor()
if editor is None:
return
QSWATUtils.loginfo('Starting SWAT+ editor with command: "{0}" "{1}"'.format(editor, self._gv.db.dbFile))
subprocess.call('"{0}" "{1}"'.format(editor, self._gv.db.dbFile), shell=True)
if os.path.exists(QSWATUtils.join(self._gv.resultsDir, Parameters._OUTPUTDB)):
self._odlg.visualiseLabel.setVisible(True)
self._odlg.visualiseButton.setVisible(True)
def visualise(self):
"""Run visualise form."""
# avoid getting second window
if self.vis is not None and self.vis._dlg.isEnabled():
self.vis._dlg.close()
self.vis = Visualise(self._gv)
self.vis.run()
def finish(self):
"""Close the database connections and subsidiary forms."""
if QSWATUtils is not None:
QSWATUtils.loginfo('Closing databases')
try:
self.delin = None
self.hrus = None
self.vis = None
if self._gv and self._gv.db:
if self._gv.db.conn:
self._gv.db.conn.close()
if self._gv.db.connRef:
self._gv.db.connRef.close()
if QSWATUtils is not None:
QSWATUtils.loginfo('Databases closed')
except Exception:
pass
```
#### File: swatplus-automatic-workflow/qswatplus/QSWATTopology.py
```python
from PyQt5.QtCore import * # @UnusedWildImport
from PyQt5.QtGui import * # @UnusedWildImport
from qgis.core import * # @UnusedWildImport
from osgeo import gdal
from numpy import * # @UnusedWildImport
import os.path
import time
import csv
import traceback
try:
from QSWATUtils import QSWATUtils, FileTypes, ListFuns
from DBUtils import DBUtils
from parameters import Parameters
from raster import Raster
from dataInC import ReachData, MergedChannelData, LakeData # @UnresolvedImport
except:
# used by convertFromArc
from QSWATUtils import QSWATUtils, FileTypes, ListFuns
from DBUtils import DBUtils
from parameters import Parameters
from raster import Raster
from dataInC import ReachData, MergedChannelData, LakeData # @UnresolvedImport
class QSWATTopology:
"""Module for creating and storing topological data
derived from watershed delineation.
Nomenclature: From TauDEM we have channels and also streams, each with link and wsno values.
We translate as follows:
channel link: channel
channel wsno: chBasin
stream link: stream
stream wsno: [sub]basin
These all have numbers from zero. To avoid zero ids in the output, a SWATchannel etc has an id
of at least one.
Unlike QSWAT, we do not guarantee SWAT identifiers will form a continuous range 1..n.
"""
## Value used to indicate no basin since channel is zero length and has no points.
## Must be negative (since TauDEM basin (WSNO) numbers go from 0 up
## and must not be -1 since this indicates a 'not found' in most gets, or a main outlet
_NOBASIN = -2
_RESTYPE = 1
_PONDTYPE = 2
_LINKNO = 'LINKNO'
_DSLINKNO = 'DSLINKNO'
_USLINKNO1 = 'USLINKNO1'
_USLINKNO2 = 'USLINKNO2'
_DSNODEID = 'DSNODEID'
_DRAINAREA = 'DS_Cont_Ar' if Parameters._ISWIN else 'DSContArea'
_DRAINAGE = 'Drainage'
_ORDER = 'Order' if Parameters._ISWIN else 'strmOrder'
_LENGTH = 'Length'
_MAGNITUDE = 'Magnitude'
_DROP = 'Drop' if Parameters._ISWIN else 'strmDrop'
_SLOPE = 'Slope'
_STRAIGHTL = 'Straight_L' if Parameters._ISWIN else 'StraightL'
_USCONTAR = 'US_Cont_Ar' if Parameters._ISWIN else 'USContArea'
_WSNO = 'WSNO'
_DOUTEND = 'DOUT_END' if Parameters._ISWIN else 'DOUTEND'
_DOUTSTART = 'DOUT_START' if Parameters._ISWIN else 'DOUTSTART'
_DOUTMID = 'DOUT_MID' if Parameters._ISWIN else 'DOUTMID'
_BASINNO = 'BasinNo'
_ID = 'ID'
_INLET = 'INLET'
_RES = 'RES'
_PTSOURCE = 'PTSOURCE'
_POLYGONID = 'PolygonId'
_DOWNID = 'DownId'
_STREAMLINK = 'StreamLink'
_STREAMLEN = 'StreamLen'
_DSNODEIDW = 'DSNodeID'
_DSWSID = 'DSWSID'
_US1WSID = 'US1WSID'
_US2WSID = 'US2WSID'
_SUBBASIN = 'Subbasin'
_CHANNEL = 'Channel'
_CHANNELR = 'ChannelR'
_LANDSCAPE = 'Landscape'
_AQUIFER = 'Aquifer'
_LSU = 'LSU'
_LSUID = 'LSUID'
_PENWIDTH = 'PenWidth'
_HRUS = 'HRUS'
_HRUGIS = 'HRUGIS'
_LAKEID = 'LakeId'
_RESERVOIR = 'Reservoir'
_POND = 'Pond'
_AREAC = 'AreaC'
_LEN2 = 'Len2'
_SLO2 = 'Slo2'
_WID2 = 'Wid2'
_DEP2 = 'Dep2'
_MINEL = 'MinEl'
_MAXEL = 'MaxEl'
_LAKEIN = 'LakeIn'
_LAKEOUT = 'LakeOut'
_LAKEWITHIN = 'LakeWithin'
_LAKEMAIN = 'LakeMain'
def __init__(self, isBatch):
"""Initialise class variables."""
## Link to project database
self.db = None
## True if outlet end of reach is its first point, i.e. index zero."""
self.outletAtStart = True
## index to LINKNO in channel shapefile
self.channelIndex = -1
## index to DSLINKNO in channel shapefile
self.dsChannelIndex = -1
## relation between channel basins and subbasins
# not used with grid models (since would be 1-1)
self.chBasinToSubbasin = dict()
## WSNO does not obey SWAT rules for element numbers (> 0)
# so we invent and store SWATBasin numbers
# also SWAT basins may not be empty
# not complete: zero areas/stream lengths excluded
self.subbasinToSWATBasin = dict()
##inverse map to make it easy to output in numeric order of SWAT Basins
self.SWATBasinToSubbasin = dict()
## original channel links may have zero numbers and may have zero lengths
## so we generate SWATChannel ids
# not complete
self.channelToSWATChannel = dict()
## inverse map
self.SWATChannelToChannel = dict()
## subbasin to stream mapping (wsno to link fields in streams shapefile)
# complete
self.subbasinToStream = dict()
## stream to downstream (link to dslink in streams shapefile)
# complete
self.downStreams = dict()
## stream lengths (link to length in streams shapefile). Lengths are in metres
# complete
self.streamLengths = dict()
## LINKNO to DSLINKNO in channel shapefile
# complete = all channel links defined
self.downChannels = dict()
## zero length channels
self.zeroChannels = set()
## subbasin to downstream subbasin
# incomplete: no empty basins (zero length streams or missing wsno value in subbasins layer)
self.downSubbasins = dict()
## map from channel to chBasin
# incomplete - no zero length channels
self.chLinkToChBasin = dict()
## reverse of chLinkToChBasin
self.chBasinToChLink = dict()
## centroids of basins as (x, y) pairs in projected units
self.basinCentroids = dict()
## channel link to channel length in metres:
# complete
self.channelLengths = dict()
## channel slopes in m/m
# complete
self.channelSlopes = dict()
## numpy array of total area draining to downstream end of channel in square metres
self.drainAreas = None
## map of lake id to ids of points added to split channels entering lakes
self.lakeInlets = dict()
## map of lake id to ids of points added to split channels leaving lakes
self.lakeOutlets = dict()
## map of channel to ReachData: points and elevations at ends of channels, plus basin
# not complete: zero areas/channel lengths excluded
self.channelsData = dict()
## map of lake id to LakeData for lakes defined by shapefile
self.lakesData = dict()
## map of channel links to lake ids: channel flowing into lake
self.chLinkIntoLake = dict()
## map of channel links to lake ids: channel completely inside lake
self.chLinkInsideLake = dict()
## map of channel links to lake ids: channel flowing out of lake
self.chLinkFromLake = dict()
## map of subbasin to lake id for subbasins with their outlet inside a lake (non-grid models only)
self.outletsInLake = dict()
## channel basin to area in square metres. Not used with grid model.
self.chBasinAreas = dict()
## current point id (for outlets, inlets and point sources)
self.pointId = 0
## current water body id (for lakes, reservoirs and ponds)
self.waterBodyId = 0
## channel links to reservoir or pond point ids plus water type: reservoir or pond discharges into channel
self.chLinkToWater = dict()
## channel links with point sources flowing into them (defined by inlets/outlets file)
self.chLinkToPtSrc = dict()
## channel links to watershed inlets (for grid models only)
self.chLinkToInlet = dict()
## basins draining to inlets
self.upstreamFromInlets = set()
## width of DEM cell in metres
self.dx = 0
## depth of DEM cell in metres
self.dy = 0
## x direction threshold for points to be considered coincident
self.xThreshold = 0
## y direction threshold for points to be considered coincident
self.yThreshold = 0
## multiplier to turn DEM elevations to metres
self.verticalFactor = 1
## DEM nodata value
self.demNodata = 0
## DEM extent
self.demExtent = None
## map from subbasin to outlet pointId, point, and channel draining to it
self.outlets = dict()
## map from subbasin to inlet pointId and point (not used with grid models)
self.inlets = dict()
## map from channel links to point sources
self.chPointSources = dict()
## reservoirs found by converting water HRUs
self.foundReservoirs = dict()
## project projection (set from DEM)
self.crsProject = None
## lat-long coordinate reference system
self.crsLatLong = QgsCoordinateReferenceSystem()
if not self.crsLatLong.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId):
QSWATUtils.error('Failed to create lat-long coordinate reference system', isBatch)
## transform from project corrdinates to lat-long
self.transformToLatLong = None
## Flag to show if batch run
self.isBatch = isBatch
## table for memorizing distances from basin to join in flowpath with other basin:
# basin -> otherBasin -> join distance in metres
self.distancesToJoins = dict()
## table for use in existing non-grid models of maximum channel flow lengths in metres to subbasin outlets
# complete
self.maxFlowLengths = dict()
## number of chunks to use for rasters and their arrays; increased when memory fails
self.chunkCount = 1
## dsNodeIds that cannot be retained when making grids as they would be in same grid cell as another point
self.lostDsNodeIds = set()
def setUp0(self, demLayer, channelLayer, outletLayer, ad8Layer, verticalFactor, useGridModel):
"""Set DEM size parameters and stream orientation, and store source and outlet points for stream reaches."""
# can fail if demLayer is None or not projected
try:
self.setCrs(demLayer)
units = self.crsProject.mapUnits()
except Exception:
QSWATUtils.loginfo('Failure to read DEM units: {0}'.format(traceback.format_exc()))
return False
if units == QgsUnitTypes.DistanceMeters:
factor = 1
elif units == QgsUnitTypes.DistanceFeet:
factor = Parameters._FEETTOMETRES
else:
# unknown or degrees - will be reported in delineation - just quietly fail here
QSWATUtils.loginfo('Failure to read DEM units: {0}'.format(str(units)))
return False
self.dx = demLayer.rasterUnitsPerPixelX() * factor
self.dy = demLayer.rasterUnitsPerPixelY() * factor
self.xThreshold = self.dx * Parameters._NEARNESSTHRESHOLD
self.yThreshold = self.dy * Parameters._NEARNESSTHRESHOLD
QSWATUtils.loginfo('Factor is {0}, cell width is {1}, cell depth is {2}'.format(factor, self.dx, self.dy))
self.demExtent = demLayer.extent()
self.verticalFactor = verticalFactor
self.outletAtStart = self.hasOutletAtStart(channelLayer, ad8Layer)
QSWATUtils.loginfo('Outlet at start is {0!s}'.format(self.outletAtStart))
return self.saveOutletsAndSources(channelLayer, outletLayer, useGridModel)
def setCrs(self, demLayer):
"""Set crsProject and transformToLatLong if necessary."""
if self.crsProject is None:
self.crsProject = demLayer.crs()
self.transformToLatLong = QgsCoordinateTransform(self.crsProject, self.crsLatLong, QgsProject.instance())
QgsProject.instance().setCrs(self.crsProject)
settings = QSettings()
settings.setValue('Projections/defaultBehaviour', 'useProject')
def setUp1(self, streamLayer):
"""Establish subbasinToStream, downStreams and streamLengths dictionaries.
Used when calculating ridges by branch length method and setUp has not been run yet."""
self.subbasinToStream.clear()
self.downStreams.clear()
self.streamLengths.clear()
streamIndex = self.getIndex(streamLayer, QSWATTopology._LINKNO)
if streamIndex < 0:
QSWATUtils.loginfo('No LINKNO field in stream layer')
return False
dsStreamIndex = self.getIndex(streamLayer, QSWATTopology._DSLINKNO)
if dsStreamIndex < 0:
QSWATUtils.loginfo('No DSLINKNO field in stream layer')
return False
lengthIndex = self.getIndex(streamLayer, QSWATTopology._LENGTH, True)
wsnoIndex = self.getIndex(streamLayer, QSWATTopology._WSNO)
if wsnoIndex < 0:
QSWATUtils.loginfo('No WSNO field in stream layer')
return False
for reach in streamLayer.getFeatures():
link = reach[streamIndex]
dsLink = reach[dsStreamIndex]
basin = reach[wsnoIndex]
if lengthIndex < 0:
length = reach.geometry().length()
else:
length = reach[lengthIndex]
self.subbasinToStream[basin] = link
self.downStreams[link] = dsLink
self.streamLengths[link] = length
return True
def setUp(self, demLayer, channelLayer, subbasinsLayer, outletLayer, lakesLayer, gv, existing,
recalculate, useGridModel, streamDrainage, reportErrors):
"""Create topological data from layers."""
#QSWATUtils.loginfo('Channel layer {0}'.format(channelLayer.dataProvider().dataSourceUri()))
#QSWATUtils.loginfo('Subbasins layer {0}'.format(subbasinsLayer.dataProvider().dataSourceUri()))
self.db = gv.db
self.chLinkToChBasin.clear()
self.chBasinToChLink.clear()
self.subbasinToSWATBasin.clear()
self.SWATBasinToSubbasin.clear()
self.channelToSWATChannel.clear()
self.SWATChannelToChannel.clear()
self.downChannels.clear()
self.zeroChannels.clear()
# do not clear centroids unless existing and not using grid model:
if existing and not useGridModel:
self.basinCentroids.clear()
self.channelLengths.clear()
self.channelSlopes.clear()
self.channelsData.clear()
self.chLinkToWater.clear()
self.chLinkToPtSrc.clear()
self.chLinkToInlet.clear()
self.distancesToJoins.clear()
self.maxFlowLengths.clear()
dsNodeToLink = dict()
ignoreError = not reportErrors
ignoreWithExisting = existing or not reportErrors
ignoreWithGrid = useGridModel or not reportErrors
ignoreWithGridOrExisting = ignoreWithGrid or ignoreWithExisting
self.channelIndex = self.getIndex(channelLayer, QSWATTopology._LINKNO, ignoreMissing=ignoreError)
if self.channelIndex < 0:
QSWATUtils.loginfo('No LINKNO field in channels layer')
return False
self.dsChannelIndex = self.getIndex(channelLayer, QSWATTopology._DSLINKNO, ignoreMissing=ignoreError)
if self.dsChannelIndex < 0:
QSWATUtils.loginfo('No DSLINKNO field in channels layer')
return False
dsNodeIndex = self.getIndex(channelLayer, QSWATTopology._DSNODEID, ignoreMissing=ignoreWithExisting)
wsnoIndex = self.getIndex(channelLayer, QSWATTopology._WSNO, ignoreMissing=ignoreError)
if wsnoIndex < 0:
QSWATUtils.loginfo('No WSNO field in channels layer')
return False
drainAreaIndex = self.getIndex(channelLayer, QSWATTopology._DRAINAREA, ignoreMissing=ignoreWithGridOrExisting)
lengthIndex = self.getIndex(channelLayer, QSWATTopology._LENGTH, ignoreMissing=ignoreWithGridOrExisting)
dropIndex = self.getIndex(channelLayer, QSWATTopology._DROP, ignoreMissing=ignoreWithGridOrExisting)
polyIndex = self.getIndex(subbasinsLayer, QSWATTopology._POLYGONID, ignoreMissing=ignoreError)
if polyIndex < 0:
QSWATUtils.loginfo('No POLYGONID field in subbasins layer')
return False
subbasinIndex = self.getIndex(subbasinsLayer, QSWATTopology._SUBBASIN, ignoreMissing=ignoreWithGridOrExisting)
if outletLayer is not None:
if dsNodeIndex < 0:
QSWATUtils.information('Warning: streams layer has no {0} field, so points in inlets/outlets file will be ignored'
.format(QSWATTopology._DSNODEID), gv.isBatch)
idIndex = self.getIndex(outletLayer, QSWATTopology._ID, ignoreMissing=ignoreError)
if idIndex < 0:
QSWATUtils.loginfo('No ID field in outlets layer')
return False
inletIndex = self.getIndex(outletLayer, QSWATTopology._INLET, ignoreMissing=ignoreError)
if inletIndex < 0:
QSWATUtils.loginfo('No INLET field in outlets layer')
return False
ptSourceIndex = self.getIndex(outletLayer, QSWATTopology._PTSOURCE, ignoreMissing=ignoreError)
if ptSourceIndex < 0:
QSWATUtils.loginfo('No PTSOURCE field in outlets layer')
return False
resIndex = self.getIndex(outletLayer, QSWATTopology._RES, ignoreMissing=ignoreError)
if resIndex < 0:
QSWATUtils.loginfo('No RES field in outlets layer')
return False
self.demNodata = demLayer.dataProvider().sourceNoDataValue(1)
if not useGridModel:
# upstream array will get very big for grid
us = dict()
time1 = time.process_time()
maxChLink = 0
SWATChannel = 0
for channel in channelLayer.getFeatures():
chLink = channel[self.channelIndex]
dsChLink = channel[self.dsChannelIndex]
chBasin = channel[wsnoIndex]
geom = channel.geometry()
if lengthIndex < 0 or recalculate:
length = geom.length()
else:
length = channel[lengthIndex]
data = self.getReachData(geom, demLayer)
self.channelsData[chLink] = data
if data and (dropIndex < 0 or recalculate):
drop = data.upperZ - data.lowerZ
elif dropIndex >= 0:
drop = channel[dropIndex]
else:
drop = 0
slope = 0 if length <= 0 else float(drop) / length
dsNode = channel[dsNodeIndex] if dsNodeIndex >= 0 else -1
if useGridModel and chBasin < 0:
# it is the downstream channel link from an inlet, and has no basin
pass
else:
# exit channels in grid model can have zero length
if length > 0 or useGridModel:
self.chLinkToChBasin[chLink] = chBasin
self.chBasinToChLink[chBasin] = chLink
SWATChannel += 1
self.channelToSWATChannel[chLink] = SWATChannel
self.SWATChannelToChannel[SWATChannel] = chLink
else:
self.zeroChannels.add(chLink)
maxChLink = max(maxChLink, chLink)
self.downChannels[chLink] = dsChLink
self.channelLengths[chLink] = length
self.channelSlopes[chLink] = slope
if dsNode >= 0:
dsNodeToLink[dsNode] = chLink
#QSWATUtils.loginfo('DSNode {0} mapped to channel link {1}'.format(dsNode, chLink))
if dsChLink >= 0:
if not useGridModel:
ups = us.get(dsChLink, None)
if ups is None:
us[dsChLink] = [chLink]
else:
ups.append(chLink)
# check we haven't just made the us relation circular
if QSWATTopology.reachable(dsChLink, [chLink], us):
QSWATUtils.error('Circular drainage network from channel link {0}'.format(dsChLink), self.isBatch)
return False
time2 = time.process_time()
QSWATUtils.loginfo('Topology setup for channels took {0} seconds'.format(int(time2 - time1)))
if not useGridModel:
self.setChannelBasinAreas(gv)
if existing:
# need to set centroids
for polygon in subbasinsLayer.getFeatures():
basin = polygon[polyIndex]
centroid = polygon.geometry().centroid().asPoint()
self.basinCentroids[basin] = (centroid.x(), centroid.y())
# find maximum channel flow length for each subbasin
self.setMaxFlowLengths()
time3 = time.process_time()
QSWATUtils.loginfo('Topology setup of subbasin areas and centroids took {0} seconds'.format(int(time3 - time2)))
if outletLayer is not None:
features = outletLayer.getFeatures()
else:
features = []
if dsNodeIndex >= 0:
doneNodes = set()
for point in features:
dsNode = point[idIndex]
if dsNode in doneNodes:
if reportErrors:
QSWATUtils.error('ID value {0} is used more than once in inlets/outlets file {1}. Occurrences after the first are ignored'
.format(dsNode, QSWATUtils.layerFilename(outletLayer)), self.isBatch)
chLink = -1
elif dsNode in self.lostDsNodeIds:
chLink = -1
elif dsNode not in dsNodeToLink:
if reportErrors:
QSWATUtils.error('ID value {0} from inlets/outlets file {1} not found as DSNODEID in channels file {2}. Will be ignored.'
.format(dsNode, QSWATUtils.layerFilename(outletLayer),
QSWATUtils.layerFileInfo(channelLayer).filePath()), self.isBatch)
chLink = -1
else:
chLink = dsNodeToLink[dsNode]
doneNodes.add(dsNode)
if chLink >= 0:
isInlet = point[inletIndex] == 1
isPtSource = point[ptSourceIndex] == 1
isReservoir = point[resIndex] == 1
isPond = point[resIndex] == 2
if lakesLayer is not None:
# check if point is inside lake
inLake = False
for lake in lakesLayer.getFeatures():
lakeGeom = lake.geometry()
lakeRect = lakeGeom.boundingBox()
if QSWATTopology.polyContains(point.geometry().asPoint(), lakeGeom, lakeRect):
inLake = True
if isInlet:
typ = 'Inlet'
elif isPtSource:
typ = 'Point source'
elif isReservoir:
typ = 'Reservoir'
elif isPond:
typ = 'Pond'
else:
# main outlets allowed within lakes
break
lakeIdIndex = lakesLayer.fieldNameIndex(QSWATTopology._LAKEID)
QSWATUtils.information('{0} {1} is inside lake {2}. Will be ignored.'.format(typ, point.id(), lake[lakeIdIndex]), self.isBatch)
break
if inLake:
continue
if isInlet:
if isPtSource:
pt = point.geometry().asPoint()
self.chLinkToPtSrc[chLink] = (self.nonzeroPointId(dsNode), pt)
elif useGridModel: # inlets collected in setUp0 for non-grids
pt = point.geometry().asPoint()
self.chLinkToInlet[chLink] = (self.nonzeroPointId(dsNode), pt)
elif isReservoir:
pt = point.geometry().asPoint()
self.chLinkToWater[chLink] = (self.nonzeroPointId(dsNode), pt, QSWATTopology._RESTYPE)
elif isPond:
pt = point.geometry().asPoint()
self.chLinkToWater[chLink] = (self.nonzeroPointId(dsNode), pt, QSWATTopology._PONDTYPE)
# else an outlet: nothing to do
# check for user-defined outlets coincident with stream junctions
if chLink in self.zeroChannels and chLink not in self.chLinkIntoLake:
if isInlet: typ = 'Inlet'
elif isPtSource: typ = 'Point source'
elif isReservoir: typ = 'Reservoir'
elif isPond: typ = 'Pond'
else: typ = 'Outlet'
msg = '{0} with id {1} has a zero length channel leading to it: please remove or move downstream'.format(typ, dsNode)
if reportErrors:
QSWATUtils.error(msg, self.isBatch)
else:
QSWATUtils.loginfo(msg)
return False
time4 = time.process_time()
QSWATUtils.loginfo('Topology setup for inlets/outlets took {0} seconds'.format(int(time4 - time3)))
# add any extra reservoirs and point sources
# set drainage
# drainAreas is a mapping from channelLink number (used as index to array) of channel basin or grid cell areas in sq m
self.drainAreas = zeros((maxChLink + 1), dtype=float)
if useGridModel:
gridCellArea = self.dx * self.dy * gv.gridSize * gv.gridSize
# try to use Drainage field from grid channels shapefile
if streamDrainage:
ok = self.setGridDrainageFromChannels(channelLayer)
else:
ok = False
if not ok:
self.setGridDrainageAreas(maxChLink, gridCellArea)
else:
# can use drain areas from TauDEM if we have them
if drainAreaIndex >= 0:
self.setDrainageFromChannels(channelLayer, drainAreaIndex)
else:
self.setDrainageAreas(us)
time5 = time.process_time()
QSWATUtils.loginfo('Topology drainage took {0} seconds'.format(int(time5 - time4)))
#try existing subbasin numbers as SWAT basin numbers
ok = polyIndex >= 0 and subbasinIndex >= 0 and self.tryBasinAsSWATBasin(subbasinsLayer, polyIndex, subbasinIndex)
if not ok:
# failed attempt may have put data in these, so clear them
self.subbasinToSWATBasin.clear()
self.SWATBasinToSubbasin.clear()
if useGridModel:
# lower limit on drainage area for outlets to be included
# 1.5 multiplier guards against rounding errors:
# ensures that any cell with drainage area exceeding this cannot be a singleton
minDrainArea = gridCellArea * 1.5
# Create SWAT basin numbers for grid
# we ignore single cell outlets, by checking that outlets have a drainage area greater than a single cell
SWATBasin = 0
# for grid models, streams and channels are the same, so chBasin is the same as basin
# we ignore edge basins which are outlets with nothing upstream, ie they are single cell outlets,
# by counting only those which have a downstream link or have an upstream link
for chLink, chBasin in self.chLinkToChBasin.items():
dsChLink = self.downChannels[chLink] if useGridModel else self.getDownChannel(chLink)
if dsChLink >= 0 or self.drainAreas[chLink] > minDrainArea:
SWATBasin += 1
self.subbasinToSWATBasin[chBasin] = SWATBasin
self.SWATBasinToSubbasin[SWATBasin] = chBasin
else:
# create SWAT basin numbers
SWATBasin = 0
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry).setSubsetOfAttributes([polyIndex])
for feature in subbasinsLayer.getFeatures(request):
subbasin = feature[polyIndex]
if subbasin not in self.upstreamFromInlets:
SWATBasin += 1
self.subbasinToSWATBasin[subbasin] = SWATBasin
self.SWATBasinToSubbasin[SWATBasin] = subbasin
# put SWAT Basin numbers in subbasin field of subbasins shapefile
subbasinsLayer.startEditing()
if subbasinIndex < 0:
# need to add subbasin field
subbasinsLayer.dataProvider().addAttributes([QgsField(QSWATTopology._SUBBASIN, QVariant.Int)])
subbasinsLayer.updateFields()
subbasinIndex = subbasinsLayer.fields().indexOf(QSWATTopology._SUBBASIN)
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry).setSubsetOfAttributes([polyIndex])
for feature in subbasinsLayer.getFeatures(request):
subbasin = feature[polyIndex]
SWATBasin = self.subbasinToSWATBasin.get(subbasin, 0)
subbasinsLayer.changeAttributeValue(feature.id(), subbasinIndex, SWATBasin)
subbasinsLayer.commitChanges()
time6 = time.process_time()
QSWATUtils.loginfo('Topology setting SWATBasin numbers took {0} seconds'.format(int(time6 - time5)))
subbasinsLayer.setLabelsEnabled(True)
subbasinsLayer.triggerRepaint()
if not useGridModel:
# add SWAT channel numbers to watershed shapefile
# in case loaded
root = QgsProject.instance().layerTreeRoot()
wshedLayer, _ = QSWATUtils.getLayerByFilename(root.findLayers(), gv.wshedFile, FileTypes._WATERSHED,
None, None, None)
if wshedLayer is None:
wshedLayer = QgsVectorLayer(gv.wshedFile, FileTypes.legend(FileTypes._WATERSHED), 'ogr')
wshedPolyIndex = self.getIndex(wshedLayer, QSWATTopology._POLYGONID, ignoreMissing=ignoreError)
wshedChannelIndex = self.getIndex(wshedLayer, QSWATTopology._CHANNEL, ignoreMissing=ignoreWithGridOrExisting)
wshedLayer.startEditing()
if wshedChannelIndex < 0:
wshedLayer.dataProvider().addAttributes([QgsField(QSWATTopology._CHANNEL, QVariant.Int)])
wshedLayer.updateFields()
wshedChannelIndex = wshedLayer.fields().indexOf(QSWATTopology._CHANNEL)
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry).setSubsetOfAttributes([wshedPolyIndex])
for feature in wshedLayer.getFeatures(request):
chBasin = feature.attributes()[wshedPolyIndex]
channel = self.chBasinToChLink.get(chBasin, -1)
SWATChannel = self.channelToSWATChannel.get(channel, 0)
wshedLayer.changeAttributeValue(feature.id(), wshedChannelIndex, SWATChannel)
wshedLayer.commitChanges()
drainageFile = QSWATUtils.join(gv.shapesDir, gv.projName + Parameters._DRAINAGECSV)
self.writeDrainageFile(drainageFile)
return useGridModel or lakesLayer is not None or self.checkAreas(subbasinsLayer, gv)
def addLakes(self, lakesLayer, subbasinsLayer, chBasinsLayer, streamsLayer, channelsLayer,
demLayer, snapThreshold, gv, reportErrors=True):
"""Add lakes from lakes shapefile layer.
Not used with grid models."""
lakesProvider = lakesLayer.dataProvider()
lakeIdIndex = lakesProvider.fieldNameIndex(QSWATTopology._LAKEID)
lakeResIndex = lakesProvider.fieldNameIndex(QSWATTopology._RES)
if lakeResIndex < 0:
QSWATUtils.information('No RES field in lakes shapefile {0}: assuming lakes are reservoirs'.
format(QSWATUtils.layerFilename(lakesLayer)), self.isBatch)
subsProvider = subbasinsLayer.dataProvider()
subsAreaIndex = subsProvider.fieldNameIndex(Parameters._AREA)
if subsAreaIndex < 0:
QSWATUtils.error('Cannot find {0} field in {1}'.format(Parameters._AREA, gv.subbasinsFile), self.isBatch, reportErrors=reportErrors)
return False
chBasinsProvider = chBasinsLayer.dataProvider()
chBasinsPolyIndex = chBasinsProvider.fieldNameIndex(QSWATTopology._POLYGONID)
chBasinsAreaIndex = chBasinsProvider.fieldNameIndex(Parameters._AREA)
channelsProvider = channelsLayer.dataProvider()
channelLinkIndex = channelsProvider.fieldNameIndex(QSWATTopology._LINKNO)
channelDsLinkIndex = channelsProvider.fieldNameIndex(QSWATTopology._DSLINKNO)
channelDsNodeIndex = channelsProvider.fieldNameIndex(QSWATTopology._DSNODEID)
channelDrainAreaIndex = channelsProvider.fieldNameIndex(QSWATTopology._DRAINAREA)
channelWSNOIndex = channelsProvider.fieldNameIndex(QSWATTopology._WSNO)
channelLakeInIndex = channelsProvider.fieldNameIndex(QSWATTopology._LAKEIN)
channelLakeOutIndex = channelsProvider.fieldNameIndex(QSWATTopology._LAKEOUT)
channelLakeWithinIndex = channelsProvider.fieldNameIndex(QSWATTopology._LAKEWITHIN)
channelLakeMainIndex = channelsProvider.fieldNameIndex(QSWATTopology._LAKEMAIN)
fields = []
if channelLakeInIndex < 0:
fields.append(QgsField(QSWATTopology._LAKEIN, QVariant.Int))
if channelLakeOutIndex < 0:
fields.append(QgsField(QSWATTopology._LAKEOUT, QVariant.Int))
if channelLakeWithinIndex < 0:
fields.append(QgsField(QSWATTopology._LAKEWITHIN, QVariant.Int))
if channelLakeMainIndex < 0:
fields.append(QgsField(QSWATTopology._LAKEMAIN, QVariant.Int))
if len(fields) > 0:
if not channelsProvider.addAttributes(fields):
QSWATUtils.error('Cannot add lake fields to channels shapefile', self.isBatch)
return False
channelsLayer.updateFields()
channelLakeInIndex = channelsProvider.fieldNameIndex(QSWATTopology._LAKEIN)
channelLakeOutIndex = channelsProvider.fieldNameIndex(QSWATTopology._LAKEOUT)
channelLakeWithinIndex = channelsProvider.fieldNameIndex(QSWATTopology._LAKEWITHIN)
channelLakeMainIndex = self.getIndex(channelsLayer, QSWATTopology._LAKEMAIN)
self.chLinkIntoLake = dict()
self.chLinkInsideLake = dict()
self.chLinkFromLake = dict()
self.outletsInLake = dict()
lakeAttMap = dict()
for lake in lakesProvider.getFeatures():
lakeGeom = lake.geometry()
lakeRect = lakeGeom.boundingBox()
lakeId = lake[lakeIdIndex]
if lakeResIndex < 0:
waterRole = QSWATTopology._RESTYPE
else:
waterRole = lake[lakeResIndex]
lakeData = LakeData(lakeGeom.area(), lakeGeom.centroid().asPoint(), waterRole)
totalElevation = 0
# the area removed from channel basins that intersect wih the lake
chBasinWaterArea = 0
attMap = dict()
geomMap = dict()
for sub in subsProvider.getFeatures():
subGeom = sub.geometry()
if QSWATTopology.intersectsPoly(subGeom, lakeGeom, lakeRect):
# TODO: sub inside lake
subId = sub.id()
area1 = subGeom.area()
newGeom = subGeom.difference(lakeGeom)
area2 = newGeom.area()
if area2 < area1:
QSWATUtils.loginfo('Lake {0} overlaps subbasin {1}: area reduced from {2} to {3}'.format(lakeId, subId, area1, area2))
geomMap[subId] = newGeom
attMap[subId] = {subsAreaIndex: newGeom.area() / 1E4}
if not subsProvider.changeAttributeValues(attMap):
QSWATUtils.error('Failed to update subbasins attributes in {0}'.format(gv.subbasinsFile), self.isBatch, reportErrors=reportErrors)
for err in subsProvider.errors():
QSWATUtils.loginfo(err)
return False
if not subsProvider.changeGeometryValues(geomMap):
QSWATUtils.error('Failed to update subbasin geometries in {0}'.format(gv.subbasinsFile), self.isBatch, reportErrors=reportErrors)
for err in subsProvider.errors():
QSWATUtils.loginfo(err)
return False
# for some reason doing both changes at once fails
# if not subsProvider.changeFeatures(attMap, geomMap):
# QSWATUtils.error(u'Failed to update {0}'.format(gv.subbasinsFile), self.isBatch)
# for err in subsProvider.errors():
# QSWATUtils.loginfo(err)
# return
attMap = dict()
geomMap = dict()
# map of polygon id to area that is part of the lake
channelAreaChange = dict()
for chBasin in chBasinsProvider.getFeatures():
chBasinGeom = chBasin.geometry()
polyId = chBasin[chBasinsPolyIndex]
# if area reduced to zero because inside another lake, geometry is None
if chBasinGeom is not None and not chBasinGeom.disjoint(lakeGeom):
chBasinId = chBasin.id()
area1 = chBasinGeom.area()
newGeom = chBasinGeom.difference(lakeGeom)
area2 = newGeom.area()
if area2 < area1:
QSWATUtils.loginfo('Lake {0} overlaps channel basin {1}: area reduced from {2} to {3}'.format(lakeId, polyId, area1, area2))
chBasinWaterArea += area1 - area2
geomMap[chBasinId] = newGeom
attMap[chBasinId] = {chBasinsAreaIndex: newGeom.area() / 1E4}
channelAreaChange[polyId] = area1 - area2
if not chBasinsProvider.changeAttributeValues(attMap):
QSWATUtils.error('Failed to update channel basin attributes in {0}'.format(gv.wshedFile), self.isBatch, reportErrors=reportErrors)
for err in chBasinsProvider.errors():
QSWATUtils.loginfo(err)
return False
if not chBasinsProvider.changeGeometryValues(geomMap):
QSWATUtils.error('Failed to update channel basin geometries in {0}'.format(gv.wshedFile), self.isBatch, reportErrors=reportErrors)
for err in chBasinsProvider.errors():
QSWATUtils.loginfo(err)
return False
attMap = dict()
currentDrainArea = 0
# first pass through channels: collect inflowing and outflowing channels from DsNodes in lakeInlets and lakeOutlets
for channel in channelsProvider.getFeatures():
link = channel[channelLinkIndex]
dsLink = channel[channelDsLinkIndex]
dsNode = channel[channelDsNodeIndex]
if dsNode > 0:
if dsNode in self.lakeInlets[lakeId]:
inflowData = self.getReachData(channel.geometry(), demLayer)
lakeData.inChLinks[link] = (dsNode, QgsPointXY(inflowData.lowerX, inflowData.lowerY), inflowData.lowerZ)
if dsLink >= 0:
lakeData.lakeChLinks.add(dsLink)
self.chLinkInsideLake[dsLink] = lakeId
self.chLinkIntoLake[link] = lakeId
totalElevation += inflowData.lowerZ
channelId = channel.id()
chBasin = channel[channelWSNOIndex]
areaChange = channelAreaChange.get(chBasin, 0)
drainArea = channel[channelDrainAreaIndex] - areaChange
attMap[channelId] = {channelDrainAreaIndex: drainArea}
elif dsNode in self.lakeOutlets[lakeId]:
outflowData = self.getReachData(channel.geometry(), demLayer)
outlet = QgsPointXY(outflowData.lowerX, outflowData.lowerY)
replace = True
if dsLink >= 0:
if lakeData.outPoint[2] is not None:
# choose point with larger drain area
newDrainArea = channel[channelDrainAreaIndex]
if newDrainArea > currentDrainArea:
currentDrainArea = newDrainArea
if lakeData.outChLink >= 0:
lakeData.otherOutChLinks.add(lakeData.outChLink)
else:
replace = False
if replace:
chBasin = channel[channelWSNOIndex]
subbasin = self.chBasinToSubbasin[chBasin]
lakeData.outPoint = (subbasin, dsNode, outlet, outflowData.lowerZ)
lakeData.outChLink = dsLink
else:
lakeData.otherOutChLinks.add(dsLink)
self.chLinkFromLake[dsLink] = lakeId
lakeData.lakeChLinks.add(link)
self.chLinkInsideLake[link] = lakeId
# check to see of a watershed outlet was marked inside the lake
# and if so try to move it to the lake perimeter. Else leave it as an internal outlet.
# we don't need to exclude outlets created to split channels flowing into and out of lake
# because the outlets map is made from the streams before lake inlets and outlets are added to the snap file
# and the augmented snapfile is only used to make channels
for subbasin, (pointId, pt, ch) in self.outlets.items():
if QSWATTopology.polyContains(pt, lakeGeom, lakeRect) and \
QSWATTopology.isWatershedOutlet(pointId, channelsProvider, channelDsLinkIndex, channelDsNodeIndex):
if not os.path.exists(gv.pFile):
QSWATUtils.error('Cannot find D8 flow directions file {0}'.format(gv.pFile), self.isBatch, reportErrors=reportErrors)
break
# need to give different id to outPoint, since this is used to make the reservoir point
# which will then route to the subbasin outlet
# can use outlet point id if already created
if lakeData.outPoint[1] >= 0:
newPointId = lakeData.outPoint[1]
else:
self.pointId += 1
newPointId = self.pointId
elev = QSWATTopology.valueAtPoint(pt, demLayer)
lakeData.outPoint = (subbasin, newPointId, pt, elev)
# maximum number of steps approximates to the threshold for snapping points expressed as number of DEM cells
maxSteps = 5 if self.dx == 0 else int(snapThreshold / self.dx + 0.5)
lakeOutlet, found = QSWATTopology.movePointToPerimeter(pt, lakeGeom, gv.pFile, maxSteps)
if found:
if lakeData.outPoint[2] is not None:
QSWATUtils.information('User marked outlet {0} chosen as main outlet for lake {1}'.
format(pointId, lakeId), gv.isBatch)
if lakeData.outChLink >= 0:
lakeData.otherOutChLinks.add(lakeData.outChLink)
elev = QSWATTopology.valueAtPoint(lakeOutlet, demLayer)
lakeData.outPoint = (subbasin, newPointId, lakeOutlet, elev)
QSWATUtils.loginfo('Outlet of lake {0} set to ({1}, {2})'.
format(lakeId, int(lakeOutlet.x()), int(lakeOutlet.y())))
# update outlets map
self.outlets[subbasin] = (pointId, lakeOutlet, ch)
else:
QSWATUtils.loginfo('Outlet of lake {0} set to internal point ({1}, {2})'.
format(lakeId, int(lakeOutlet.x()), int(lakeOutlet.y())))
lakeData.outChLink = -1
break
# second pass through channels: collect channels within lake: i.e. both ends in lake
# and set LakeIn, LakeOut, LakeWithin fields
for channel in channelsProvider.getFeatures():
link = channel[channelLinkIndex]
channelId = channel.id()
channelData = None
channelGeom = None
lakeIn = self.chLinkIntoLake.get(link, 0)
lakeOut = self.chLinkFromLake.get(link, 0)
lakeWithin = self.chLinkInsideLake.get(link, 0)
if link not in self.chLinkIntoLake and link not in self.chLinkFromLake and link not in self.chLinkInsideLake:
channelGeom = channel.geometry()
channelData = self.getReachData(channelGeom, None)
pt1 = QgsPointXY(channelData.lowerX, channelData.lowerY)
pt2 = QgsPointXY(channelData.upperX, channelData.upperY)
if QSWATTopology.polyContains(pt1, lakeGeom, lakeRect) and QSWATTopology.polyContains(pt2, lakeGeom, lakeRect):
lakeData.lakeChLinks.add(link)
self.chLinkInsideLake[link] = lakeId
lakeWithin = lakeId
lakeAttMap[channelId] = {channelLakeInIndex: lakeIn, channelLakeOutIndex: lakeOut,
channelLakeWithinIndex: lakeWithin}
if link in lakeData.lakeChLinks:
# remove the channel's point source
del self.chPointSources[link]
# if the lake has an outlet channel with a drain area less than LAKEOUTLETCHANNELAREA percent of the lake area
# make its channel internal
outLinkId = None
outLink = lakeData.outChLink
outBasin = -1
dsOutLink = -1
if outLink >= 0:
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry).setSubsetOfAttributes([channelLinkIndex,
channelWSNOIndex,
channelDsLinkIndex])
for channel in channelsProvider.getFeatures(request):
if channel[channelLinkIndex] == outLink:
outLinkId = channel.id()
outBasin = channel[channelWSNOIndex]
dsOutLink = channel[channelDsLinkIndex]
break
if outBasin >= 0:
# threshold in ha: LAKEOUTLETCHANNELAREA of lake area
threshold = (lakeData.area / 1E6) * Parameters._LAKEOUTLETCHANNELAREA
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry).setSubsetOfAttributes([chBasinsPolyIndex, chBasinsAreaIndex])
for chBasin in chBasinsProvider.getFeatures():
if chBasin[chBasinsPolyIndex] == outBasin:
areaHa = chBasin[chBasinsAreaIndex]
if areaHa < threshold:
# move outlet channel inside lake
lakeData.lakeChLinks.add(outLink)
lakeData.outChLink = dsOutLink
del self.chLinkFromLake[outLink]
self.chLinkInsideLake[outLink] = lakeId
# mark it as within as well as being the outlet (already set)
lakeAttMap[outLinkId][channelLakeWithinIndex] = lakeId
# check if this point now inside the lake is a subbasin outlet
subbasin = self.chBasinToSubbasin[outBasin]
(_, _, outChannel) = self.outlets[subbasin]
if outChannel == outLink:
# subbasin outlet has moved inside the lake
self.outletsInLake[subbasin] = lakeId
QSWATUtils.loginfo('Channel link {0} channel basin {1} moved inside lake {2}'.
format(outLink, outBasin, lakeId))
# remove the channel's point source
del self.chPointSources[outLink]
if dsOutLink >= 0:
self.chLinkFromLake[dsOutLink] = lakeId
break
if lakeData.outPoint[2] is None:
QSWATUtils.error('Failed to find outlet for lake {0}'.format(lakeId), self.isBatch, reportErrors=reportErrors)
return False
if lakeData.outChLink >= 0:
chId = -1
# find the channel's id
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry).setSubsetOfAttributes([channelLinkIndex])
for channel in channelsProvider.getFeatures(request):
if channel[channelLinkIndex] == lakeData.outChLink:
chId = channel.id()
break
if chId >= 0:
lakeAttMap[chId][channelLakeMainIndex] = lakeId
else:
QSWATUtils.error('Internal error: unable to find main outlet channel {0}'.
format(lakeData.outChLink), self.isBatch, reportErrors=reportErrors)
return False
numInflows = len(lakeData.inChLinks)
meanElevation = totalElevation / numInflows if numInflows > 0 else lakeData.outPoint[3]
lakeData.elevation = meanElevation
QSWATUtils.loginfo('Lake {0} has outlet on channel {1}, other outlets on channels {2}, inlets on channels {3} and contains channels {4}'
.format(lakeId, lakeData.outChLink, lakeData.otherOutChLinks,
list(lakeData.inChLinks.keys()), lakeData.lakeChLinks))
OK = channelsProvider.changeAttributeValues(attMap)
OK = OK and channelsProvider.changeAttributeValues(lakeAttMap)
if not OK:
QSWATUtils.error('Failed to update channel attributes in {0}'.format(gv.channelFile), self.isBatch, reportErrors=reportErrors)
for err in channelsProvider.errors():
QSWATUtils.loginfo(err)
return False
self.lakesData[lakeId] = lakeData
lakeArea = lakeData.area
percentChBasinWater = chBasinWaterArea / lakeArea * 100
QSWATUtils.loginfo('Lake {0} has area {1} and channel basin water area {2}: {3}%'.format(lakeId, lakeArea, chBasinWaterArea, percentChBasinWater))
# intPercent = int(percentChBasinWater + 0.5)
# if percentChBasinWater < 99:
# QSWATUtils.information(u'WARNING: Only {0}% of the area of lake {1} is accounted for in your watershed. There may be other channels flowing into it'
# .format(intPercent, lakeId), self.isBatch)
if len(self.lakesData) == 0:
QSWATUtils.error('No lakes found in {0}'.format(QSWATUtils.layerFilename(lakesLayer)), self.isBatch, reportErrors=reportErrors)
return False
chBasinsLayer.triggerRepaint()
streamsLayer.triggerRepaint()
channelsLayer.triggerRepaint()
return True
@staticmethod
def isWatershedOutlet(pointId, channelsProvider, dsLinkIndex, dsNodeIndex):
"""Return true if there is a channel with dsNode equal to pointId and with dsLink -1."""
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry).setSubsetOfAttributes([dsLinkIndex, dsNodeIndex])
for link in channelsProvider.getFeatures(request):
if link[dsNodeIndex] == pointId and link[dsLinkIndex] == -1:
return True
return False
def isOutlet(self, pointId, outletsLayer):
"""Return true if outletsLayer contains an outlet point with id pointId."""
idIndex = self.getIndex(outletsLayer, QSWATTopology._ID, ignoreMissing=True)
inletIndex = self.getIndex(outletsLayer, QSWATTopology._INLET, ignoreMissing=True)
resIndex = self.getIndex(outletsLayer, QSWATTopology._RES, ignoreMissing=True)
if idIndex < 0 or inletIndex < 0 or resIndex < 0:
return False
for point in outletsLayer.getFeatures():
if point[idIndex] == pointId and point[inletIndex] == 0 and point[resIndex] == 0:
return True
return False
def addGridLakes(self, gridLayer, channelsLayer, demLayer, gv, reportErrors=True):
"""Add lakes when using grid model. Return number of lakes (which may be zero) or -1 if error."""
gridProvider = gridLayer.dataProvider()
gridPolyIndex = gridProvider.fieldNameIndex(QSWATTopology._POLYGONID)
gridDownIndex = gridProvider.fieldNameIndex(QSWATTopology._DOWNID)
gridAreaIndex = gridProvider.fieldNameIndex(Parameters._AREA)
gridLakeIdIndex = gridProvider.fieldNameIndex(QSWATTopology._LAKEID)
if gridLakeIdIndex < 0:
# can be no lakes
return 0
gridResIndex = gridProvider.fieldNameIndex(QSWATTopology._RES)
channelsProvider = channelsLayer.dataProvider()
channelLinkIndex = channelsProvider.fieldNameIndex(QSWATTopology._LINKNO)
channelDsLinkIndex = channelsProvider.fieldNameIndex(QSWATTopology._DSLINKNO)
channelWSNOIndex = channelsProvider.fieldNameIndex(QSWATTopology._WSNO)
# the drainage field may no exist if we are using grid or table drainage: deal with this later
streamDrainageIndex = channelsProvider.fieldNameIndex(QSWATTopology._DRAINAGE)
polysIntoLake = dict()
polysInsidelake = dict()
polysFromLake = dict()
self.chLinkIntoLake = dict()
self.chLinkInsideLake = dict()
self.chLinkFromLake = dict()
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry).setSubsetOfAttributes([gridPolyIndex, gridLakeIdIndex])
# first make map poly -> lake id
polyToLake = dict()
for cell in gridProvider.getFeatures(request):
lakeId = cell[gridLakeIdIndex]
if lakeId != NULL:
polyToLake[cell[gridPolyIndex]] = lakeId
# make sure waterbody id is set to maximum lake id in case using existing grid
self.waterBodyId = max(self.waterBodyId, lakeId)
if len(polyToLake) == 0:
# no lakes
return 0
# data for calculating centroid
# map of lake id to (area, x moment of area, y moment)
lakeAreaData = dict()
for cell in gridProvider.getFeatures():
waterRole = cell[gridResIndex]
poly = cell[gridPolyIndex]
downPoly = cell[gridDownIndex]
sourceLake = cell[gridLakeIdIndex]
targetLake = polyToLake.get(downPoly, None)
if sourceLake != NULL:
if sourceLake not in lakeAreaData:
lakeAreaData[sourceLake] = (waterRole, 0, 0, 0)
area = cell[gridAreaIndex] * 1E4 # convert ha to m^2
centre, _, _ = QSWATUtils.centreGridCell(cell)
_, totalArea, xMoment, yMoment = lakeAreaData[sourceLake]
lakeAreaData[sourceLake] = (waterRole, totalArea + area, xMoment + area * centre.x(), yMoment + area * centre.y())
if targetLake == sourceLake:
# channel links two lake cells within lake
polysInsidelake[poly] = sourceLake
else:
# exit channel
polysFromLake[poly] = sourceLake
elif targetLake is not None:
polysIntoLake[poly] = targetLake
totalElevation = dict()
# map of lake id to possible exit channels
# will choose one with largest drainage
exitData = dict()
for lakeId, (waterRole, area, xMoment, yMoment) in lakeAreaData.items():
centroid = QgsPointXY(float(xMoment) / area, float(yMoment) / area)
self.lakesData[lakeId] = LakeData(area, centroid, waterRole)
totalElevation[lakeId] = 0
exitData[lakeId] = dict()
# convert wsnos to links and complete LakesData
# get maximum chLink and create downChannels map in case drainage needs calculating
self.downChannels = dict()
maxChLink = 0
for channel in channelsProvider.getFeatures():
chLink = channel[channelLinkIndex]
maxChLink = max(maxChLink, chLink)
dsChLink = channel[channelDsLinkIndex]
self.downChannels[chLink] = dsChLink
wsno = channel[channelWSNOIndex]
lakeIdInto = polysIntoLake.get(wsno, 0)
if lakeIdInto > 0:
self.chLinkIntoLake[chLink] = lakeIdInto
# since this is a grid model the grid cells form different subbasins and there will be a suitable outlet
# point already stored in the outlets map
pointId, point, _ = self.outlets[wsno]
elev = QSWATTopology.valueAtPoint(point, demLayer)
self.lakesData[lakeIdInto].inChLinks[chLink] = (pointId, point, elev)
totalElevation[lakeIdInto] += elev
continue
lakeIdFrom = polysFromLake.get(wsno, 0)
if lakeIdFrom > 0:
# allow for no drainage field
drainage = -1 if streamDrainageIndex < 0 else channel[streamDrainageIndex]
data = self.getReachData(channel.geometry(), demLayer)
exitData[lakeIdFrom][chLink] = (wsno, drainage, QgsPointXY(data.upperX, data.upperY), data.upperZ)
continue
lakeIdInside = polysInsidelake.get(wsno, 0)
if lakeIdInside > 0:
self.chLinkInsideLake[chLink] = lakeIdInside
self.lakesData[lakeIdInside].lakeChLinks.add(chLink)
continue
# check if we need to calculate drainage: no drainage field and more than one exit for at least one lake
needDrainage = False
if streamDrainageIndex < 0:
for data in exitData.values():
if len(data) > 1:
needDrainage = True
break
if needDrainage:
self.drainAreas = zeros((maxChLink + 1), dtype=float)
gridCellArea = self.dx * self.dy * gv.gridSize * gv.gridSize
self.setGridDrainageAreas(maxChLink, gridCellArea)
# find outlet with largest drainage and mark as THE outlet
for lakeId, data in exitData.items():
# set maxDrainage less than -1 value used for missing drainage so that first exit link registers
# as if there is only one exit for each lake needDrainage will be false
maxDrainage = -2
exLink = -1
exWsno = -1
exPoint = None
exElev = 0
for chLink, (wsno, drainage, pt, elev) in data.items():
if needDrainage:
drainage = float(self.drainAreas[chLink]) # use float to convert from numpy float
if drainage > maxDrainage:
maxDrainage = drainage
exLink = chLink
exWsno = wsno
exPoint = pt
exElev = elev
if exLink < 0:
QSWATUtils.error('There seems to be no outflow stream for lake {0}'.format(lakeId), gv.isBatch, reportErrors=reportErrors)
return -1
else:
others = list(data.keys())
others.remove(exLink)
if others != []:
QSWATUtils.information(
"""Warning: Stream link {0} chosen as main outlet for all of lake {1}.
Other possible outlet stream links are {2}.
""".format(exLink, lakeId, str([int(link) for link in others])), gv.isBatch, reportErrors=reportErrors)
self.chLinkFromLake[exLink] = lakeId
self.lakesData[lakeId].outChLink = exLink
for chLink in others:
self.chLinkFromLake[chLink] = lakeId
self.lakesData[lakeId].otherOutChLinks.add(chLink)
self.pointId += 1
self.lakesData[lakeId].outPoint = (exWsno, self.pointId, exPoint, exElev)
for lakeId, totalElev in totalElevation.items():
numInLinks = len(self.lakesData[lakeId].inChLinks)
if numInLinks > 0:
self.lakesData[lakeId].elevation = float(totalElev) / numInLinks
else:
self.lakesData[lakeId].elevation = self.lakesData[lakeId].outPoint[3]
return len(self.lakesData)
def addExistingLakes(self, lakesLayer, channelsLayer, demLayer, gv, reportErrors=True):
"""Add lakes data to existing non-grid model.
We ignore DsNodeIds for inflowing and outflowing channels since these were
probably only added previously to the snapped inlets/outlets file
and inlets/outlets are little use in any case with existing watersheds."""
lakeIdIndex = self.getIndex(lakesLayer, QSWATTopology._LAKEID)
lakeResIndex = self.getIndex(lakesLayer, QSWATTopology._RES)
channelLinkIndex = self.getIndex(channelsLayer, QSWATTopology._LINKNO)
channelDsLinkIndex = self.getIndex(channelsLayer, QSWATTopology._DSLINKNO)
channelBasinIndex = self.getIndex(channelsLayer, QSWATTopology._BASINNO)
channelLakeInIndex = self.getIndex(channelsLayer, QSWATTopology._LAKEIN)
channelLakeOutIndex = self.getIndex(channelsLayer, QSWATTopology._LAKEOUT)
channelLakeWithinIndex = self.getIndex(channelsLayer, QSWATTopology._LAKEWITHIN)
channelLakeMainIndex = self.getIndex(channelsLayer, QSWATTopology._LAKEMAIN)
if lakeIdIndex < 0 or channelLinkIndex < 0 or channelDsLinkIndex < 0 or channelBasinIndex < 0 or \
channelLakeInIndex < 0 or channelLakeOutIndex < 0 or channelLakeWithinIndex < 0 or channelLakeMainIndex < 0:
return False
self.lakesData = dict()
for lake in lakesLayer.getFeatures():
lakeId = lake[lakeIdIndex]
waterRole = lake[lakeResIndex]
if lakeId in self.lakesData:
QSWATUtils.error('Lake identifier {0} occurs twice in {1}. Lakes not added.'.format(lakeId, QSWATUtils.layerFilename(lakesLayer)),
gv.isBatch, reportErrors=reportErrors)
self.lakesData = dict()
return False
# to stop reuse of the same water body id
self.waterBodyId = max(self.waterBodyId, lakeId)
geom = lake.geometry()
area = geom.area()
centroid = geom.centroid().asPoint()
self.lakesData[lakeId] = LakeData(area, centroid, waterRole)
self.chLinkIntoLake = dict()
self.chLinkInsideLake = dict()
self.chLinkFromLake = dict()
self.outletsInLake = dict()
for channel in channelsLayer.getFeatures():
chLink = channel[channelLinkIndex]
dsLink = channel[channelDsLinkIndex]
lakeIn = channel[channelLakeInIndex]
lakeOut = channel[channelLakeOutIndex]
lakeWithin = channel[channelLakeWithinIndex]
lakeMain = channel[channelLakeMainIndex]
reachData = None
geom = None
if lakeIn != NULL and lakeIn > 0:
data = self.lakesData.get(lakeIn, None)
if data is None:
QSWATUtils.error('Channel with LINKNO {0} flows into lake {1} not defined in {2}. Lakes not added.'.
format(chLink, lakeIn, QSWATUtils.layerFilename(lakesLayer)),
gv.isBatch, reportErrors=reportErrors)
self.lakesData = dict()
return False
geom = channel.geometry()
reachData = self.getReachData(geom, demLayer)
point = QgsPointXY(reachData.lowerX, reachData.lowerY)
elev = reachData.lowerZ
data.elevation += elev
self.pointId += 1
data.inChLinks[chLink] = (self.pointId, point, elev)
self.chLinkIntoLake[chLink] = lakeIn
elif lakeWithin != NULL and lakeWithin > 0:
data = self.lakesData.get(lakeWithin, None)
if data is None:
QSWATUtils.error('Channel with LINKNO {0} inside lake {1} not defined in {2}. Lakes not added.'.
format(chLink, lakeWithin, QSWATUtils.layerFilename(lakesLayer)),
gv.isBatch, reportErrors=reportErrors)
self.lakesData = dict()
return False
data.lakeChLinks.add(chLink)
self.chLinkInsideLake[chLink] = lakeWithin
if dsLink < 0:
# watershed outlet
geom = channel.geometry()
reachData = self.getReachData(geom, demLayer)
subbasin = channel[channelBasinIndex]
data.outChLink = -1
point = QgsPointXY(reachData.lowerX, reachData.lowerY)
elev = reachData.lowerZ
self.pointId += 1
data.outPoint = (subbasin, self.pointId, point, elev)
self.outletsInLake[subbasin] = lakeWithin
if lakeOut != NULL and lakeOut > 0:
data = self.lakesData.get(lakeOut, None)
if data is None:
QSWATUtils.error('Channel with LINKNO {0} flows out of lake {1} not defined in {2}. Lakes not added.'.
format(chLink, lakeOut, QSWATUtils.layerFilename(lakesLayer)),
gv.isBatch, reportErrors=reportErrors)
self.lakesData = dict()
return False
if lakeMain != NULL and lakeMain == lakeOut:
# lake's main outlet
# channel leaves lake at upper end
geom = channel.geometry()
reachData = self.getReachData(geom, demLayer)
subbasin = channel[channelBasinIndex]
data.outChLink = chLink
point = QgsPointXY(reachData.upperX, reachData.upperY)
elev = reachData.upperZ
self.pointId += 1
data.outPoint = (subbasin, self.pointId, point, elev)
self.chLinkFromLake[chLink] = lakeOut
else:
# other outlet
data.otherOutChLinks.add(chLink)
# define lake elevation
for data in self.lakesData.values():
numInflows = len(data.inChLinks)
data.elevation = data.outPoint[3] if numInflows == 0 else float(data.elevation) / numInflows
return True
@staticmethod
def intersectsPoly(geom, polyGeom, polyRect):
"""Returns true if any part of geom intersects any part of polyGeom, which has associated rectangle polyRect."""
geoRect = geom.boundingBox()
if QSWATTopology.disjointBoxes(geoRect, polyRect):
return False
else:
return geom.intersects(polyGeom)
@staticmethod
def disjointBoxes(box1, box2):
"""Return True if the boxes are disjoint."""
return box1.xMinimum() > box2.xMaximum() or \
box1.xMaximum() < box2.xMinimum() or \
box1.yMinimum() > box2.yMaximum() or \
box1.yMaximum() < box2.yMinimum()
@staticmethod
def polyContains(point, polyGeom, polyRect):
"""Return true if point within polyGeom, which has associated rectangle polyRect."""
if polyRect.xMinimum() < point.x() < polyRect.xMaximum() and \
polyRect.yMinimum() < point.y() < polyRect.yMaximum():
return polyGeom.contains(point)
else:
return False
def saveLakesData(self, db):
"""Save lakes data in project database."""
with db.conn as conn:
if not conn:
return
curs = conn.cursor()
lakesTable = 'LAKESDATA'
clearSQL = 'DROP TABLE IF EXISTS ' + lakesTable
curs.execute(clearSQL)
curs.execute(db._CREATELAKESDATA)
linksTable = 'LAKELINKS'
clearSQL = 'DROP TABLE IF EXISTS ' + linksTable
curs.execute(clearSQL)
curs.execute(db._CREATELAKELINKS)
basinsTable = 'LAKEBASINS'
clearSQL = 'DROP TABLE IF EXISTS ' + basinsTable
curs.execute(clearSQL)
curs.execute(db._CREATELAKEBASINS)
for lakeId, lakeData in self.lakesData.items():
curs.execute(db._INSERTLAKESDATA, (lakeId, lakeData.outPoint[0], lakeData.waterRole, lakeData.area, lakeData.elevation, lakeData.outChLink,
lakeData.outPoint[1], lakeData.outPoint[2].x(), lakeData.outPoint[2].y(),
lakeData.outPoint[3], lakeData.centroid.x(), lakeData.centroid.y()))
# QSWATUtils.loginfo(str(lakeData.inChLinks.keys()))
# QSWATUtils.loginfo(str(lakeData.lakeChLinks))
for chLink, (pointId, pt, elev) in lakeData.inChLinks.items():
try:
curs.execute(db._INSERTLAKELINKS, (chLink, lakeId, True, False, pointId, pt.x(), pt.y(), elev))
except:
QSWATUtils.error('Failed to add in channel link {0}'.format(chLink), self.isBatch)
for chLink in lakeData.lakeChLinks:
try:
curs.execute(db._INSERTLAKELINKS, (chLink, lakeId, False, True, None, None, None, None))
except:
QSWATUtils.error('Failed to add inside channel link {0}'.format(chLink), self.isBatch)
for chLink in lakeData.otherOutChLinks:
try:
curs.execute(db._INSERTLAKELINKS, (chLink, lakeId, False, False, None, None, None, None))
except:
QSWATUtils.error('Failed to add other out channel link {0}'.format(chLink), self.isBatch)
for subbasin, lakeId in self.outletsInLake.items():
curs.execute(db._INSERTLAKEBASINS, (subbasin, lakeId))
db.hashDbTable(conn, lakesTable)
db.hashDbTable(conn, linksTable)
db.hashDbTable(conn, basinsTable)
def readLakesData(self, db):
"""Read lakes data from project database. Return true if data read OK, false if no data or error."""
with db.conn as conn:
if not conn:
return False
self.lakesData.clear()
self.chLinkIntoLake.clear()
self.chLinkInsideLake.clear()
self.chLinkFromLake.clear()
self.outletsInLake.clear()
curs = conn.cursor()
lakesTable = 'LAKESDATA'
linksTable = 'LAKELINKS'
basinsTable = 'LAKEBASINS'
lakeSql = db.sqlSelect(lakesTable, '*', '', '')
linksSql = db.sqlSelect(linksTable, '*', '', 'lakeid=?')
basinsSql = db.sqlSelect(basinsTable, '*', '', '')
try: # in case old database without these tables
# without fetchall this only reads first row. Strange
for lakeRow in curs.execute(lakeSql).fetchall():
lakeId = lakeRow['id']
self.waterBodyId = max(self.waterBodyId, lakeId)
self.lakesData[lakeId] = LakeData(lakeRow['area'], QgsPointXY(lakeRow['centroidx'], lakeRow['centroidy'], lakeRow['role']))
outChLink = lakeRow['outlink']
self.lakesData[lakeId].outChLink = outChLink
self.chLinkFromLake[outChLink] = lakeId
self.lakesData[lakeId].outPoint = (lakeRow['subbasin'], lakeRow['outletid'],
QgsPointXY(lakeRow['outletx'], lakeRow['outlety']), lakeRow['outletelev'])
self.lakesData[lakeId].centroid = QgsPointXY(lakeRow['centroidx'], lakeRow['centroidy'])
self.lakesData[lakeId].elevation = lakeRow['meanelev']
for linkRow in curs.execute(linksSql, (lakeId,)):
chLink = linkRow['linkno']
if linkRow['inside']:
self.lakesData[lakeId].lakeChLinks.add(chLink)
self.chLinkInsideLake[chLink] = lakeId
elif linkRow['inlet']:
self.lakesData[lakeId].inChLinks[chLink] = (linkRow['inletid'],
QgsPointXY(linkRow['inletx'], linkRow['inlety']), linkRow['inletelev'])
self.chLinkIntoLake[chLink] = lakeId
else:
self.lakesData[lakeId].otherOutChLinks.add(chLink)
self.chLinkFromLake[chLink] = lakeId
for basinRow in curs.execute(basinsSql).fetchall():
self.outletsInLake[basinRow['subbasin']] = basinRow['lakeid']
return len(self.lakesData) > 0
except:
QSWATUtils.loginfo('Reading lakes data failed: {0}'.format(traceback.format_exc()))
return False
def getDownChannel(self, channel):
"""Get downstream channel, skipping zero-length channels.
Returns -1 if the channel flows into a lake."""
if channel in self.chLinkIntoLake:
return -1
while True:
dsChannel = self.downChannels[channel]
if dsChannel in self.zeroChannels:
channel = dsChannel
else:
return dsChannel
def setChannelBasinAreas(self, gv):
"""
Define map chBasinAreas from channel basin to basin area in sq m.
Done by counting pixels in the wChannel file (as an alternative to creating a shapefile from it).
Not used with grid models.
"""
self.chBasinAreas.clear()
unitArea = self.dx * self.dy # area of une DEM pixel in sq m
completed = False
raster = Raster(gv.channelBasinFile, gv)
while not completed:
try:
# safer to mark complete immediately to avoid danger of endless loop
# only way to loop is then the memory error exception being raised
completed = True
if not raster.open(self.chunkCount):
QSWATUtils.error('Failed to open channel basins raster {0}'.format(gv.channelBasinFile), gv.isBatch)
return
for row in range(raster.numRows):
for col in range(raster.numCols):
val = int(raster.read(row, col))
if val == raster.noData:
continue
elif val in self.chBasinAreas:
self.chBasinAreas[val] += unitArea
else:
self.chBasinAreas[val] = unitArea
raster.close()
except MemoryError:
QSWATUtils.loginfo('Out of memory calculating channel basin areas with chunk count {0}'.format(self.chunkCount))
try:
raster.close()
except Exception:
pass
completed = False
self.chunkCount += 1
def checkAreas(self, subbasinsLayer, gv):
"""
Check total channel basin areas in each subbasin tally with subbasin areas,
and total watershed areaa for both tally, i.e. are the same within area of one DEM pixel.
This is only done in testing ('test' in project name) and is mostly a check that
channels are correctly assigned to subbasins.
Not used with grid models (since channel-subbasin is one-one for grid models).
"""
# TODO: make work with lakes
if 'test' in gv.projName:
unitArea = self.dx * self.dy # area of une DEM pixel in sq m
polyIndex = self.getIndex(subbasinsLayer, QSWATTopology._POLYGONID)
if polyIndex < 0:
return False
areaIndex = self.getIndex(subbasinsLayer, Parameters._AREA, ignoreMissing=True)
totalBasinsArea = 0
totalChannelBasinsArea = 0
# one percent test: using 1 pixel test instead
# def compare(x, y): # return true if both zero or difference < 1% of x
# if x == 0:
# return y == 0
# else:
# return abs(x - y) < 0.01 * x
for poly in subbasinsLayer.getFeatures():
if areaIndex < 0:
basinArea = poly.geometry().area()
else:
basinArea = poly[areaIndex] * 1E4 # areas in subbasins shapefile are in hectares
# need to count areas of basins upstream from inlets because comparison for whole watershed
# by using all channels will not exclude them
totalBasinsArea += basinArea
basin = poly[polyIndex]
chBasins = set()
chLinks = set()
for chLink, chBasin in self.chLinkToChBasin.items():
if basin == self.chBasinToSubbasin.get(chBasin, -1):
chBasins.add(chBasin)
chLinks.add(chLink)
area = 0
for chBasin, chArea in self.chBasinAreas.items():
if chBasin in chBasins:
area += chArea
if abs(basinArea - area) >= unitArea: # not using compare(basinArea, area):
SWATChannels = {self.channelToSWATChannel[chLink] for chLink in chLinks}
SWATBasin = self.subbasinToSWATBasin[basin]
QSWATUtils.error('Basin {0} with area {1} has channels {2} with total area {3}'.
format(SWATBasin, basinArea, SWATChannels, area), True)
return False
# now compare areas for whole watershed
for _, chArea in self.chBasinAreas.items():
totalChannelBasinsArea += chArea
if abs(totalBasinsArea - totalChannelBasinsArea) >= unitArea: # not using compare(totalBasinsArea, totalChannelBasinsArea):
QSWATUtils.error('Watershed area is {0} by adding subbasin areas and {1} by adding channel basin areas'.
format(totalBasinsArea, totalChannelBasinsArea), True)
return False
QSWATUtils.loginfo('Total watershed area is {0}'.format(totalBasinsArea))
return True
@staticmethod
def reachable(chLink, chLinks, us):
"""Return true if chLink is in chLinks or reachable from an item in chLinks via the one-many relation us."""
if chLink in chLinks:
return True
for nxt in chLinks:
if QSWATTopology.reachable(chLink, us.get(nxt, []), us):
return True
return False
#===========================================================================
# def addUpstreamLinks(self, link, us):
# """Add to upstreamFromInlets the links upstream from link."""
# ups = us.get(link, None)
# if ups is not None:
# for up in ups:
# self.upstreamFromInlets.add(up)
# self.addUpstreamLinks(up, us)
#===========================================================================
def setDrainageFromChannels(self, channelLayer, drainAreaIndex):
"""Get drain areas from channelLayer file's DS_Cont_Ar attribute."""
inds = [self.channelIndex, drainAreaIndex]
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry).setSubsetOfAttributes(inds)
for reach in channelLayer.getFeatures(request):
channelLink = reach[self.channelIndex]
self.drainAreas[channelLink] = reach[drainAreaIndex]
def setGridDrainageFromChannels(self, channelLayer):
"""Get drain areas from channelLayer file's Drainage attribute. Return True if successful."""
channelIndex = self.getIndex(channelLayer, QSWATTopology._LINKNO, ignoreMissing=True)
drainageIndex = self.getIndex(channelLayer, QSWATTopology._DRAINAGE, ignoreMissing=True)
if channelIndex < 0 or drainageIndex < 0:
return False
inds = [channelIndex, drainageIndex]
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry).setSubsetOfAttributes(inds)
for reach in channelLayer.getFeatures(request):
channel = reach[channelIndex]
self.drainAreas[channel] = reach[drainageIndex] * 1E6 # drainage attribute is in sq km
return True
def setGridDrainageAreas(self, maxChLink, gridCellArea):
"""Calculate and save grid drain areas in sq km."""
self.drainAreas.fill(gridCellArea)
# number of incoming links for each link
incount = zeros((maxChLink + 1), dtype=int)
for dsLink in self.downChannels.values():
if dsLink >= 0:
incount[dsLink] += 1
# queue contains all links whose drainage areas have been calculated
# i.e. will not increase and can be propagated
queue = [link for link in range(maxChLink + 1) if incount[link] == 0]
while queue:
link = queue.pop(0)
dsLink = self.downChannels.get(link, -1)
if dsLink >= 0:
self.drainAreas[dsLink] += self.drainAreas[link]
incount[dsLink] -= 1
if incount[dsLink] == 0:
queue.append(dsLink)
# incount values should now all be zero
remainder = [link for link in range(maxChLink + 1) if incount[link] > 0]
if remainder:
QSWATUtils.error('Drainage areas incomplete. There is a circularity in links {0!s}'.format(remainder), self.isBatch)
def setDrainageAreas(self, us):
"""
Calculate and save drainAreas.
Not used with grid models.
"""
for chLink, chBasin in self.chLinkToChBasin.items():
self.setLinkDrainageArea(chLink, chBasin, us)
def setLinkDrainageArea(self, chLink, chBasin, us):
"""
Calculate and save drainArea for chLink.
Not used with grid models.
"""
if self.drainAreas[chLink] > 0:
# already done in calculating one further downstream
return
ownArea = self.chBasinAreas.get(chBasin, 0)
upsArea = 0
ups = us.get(chLink, [])
for up in ups:
self.setLinkDrainageArea(up, self.chLinkToChBasin[up], us)
upsArea += self.drainAreas[up]
self.drainAreas[chLink] = ownArea + upsArea
def getDistanceToJoin(self, basin, otherBasin):
"""Get distance in metres to join with otherBasin from outlet of basin. Add to distancesToJoins if necessary."""
link = self.subbasinToStream[basin]
otherLink = self.subbasinToStream[otherBasin]
distances = self.distancesToJoins.get(link, dict())
distance = distances.get(otherLink, -1)
if distance < 0:
distance = self.distanceToJoin(link, otherLink)
distances[otherLink] = distance
self.distancesToJoins[link] = distances
return distance
def distanceToJoin(self, start, otherLink):
"""
Return distance in metres from outlet of link start to point of confluence with
flow from otherLink, or to Outlet if no confluence.
"""
return sum([self.streamLengths[link] for link in self.pathFromJoin(start, otherLink)])
def pathFromJoin(self, start, otherLink):
"""
Return list of downstream links starting with confluence with downstream path from otherLink,
and finishing with link immediately downstream from start.
If otherLink is immediately downstream from start, list will be [otherLink].
If start and otherLink both flow immediately into x, list will be empty.
If there is no confluence, list will be path from outlet to immediately downstream from start.
"""
startPath = self.pathFromOutlet(start)
otherPath = self.pathFromOutlet(otherLink)
return self.removeCommonPrefix(startPath, otherPath)
def pathFromOutlet(self, start):
"""List of links downstream of start, in upstream order."""
result = []
nxt = start
while True:
nxt = self.downStreams.get(nxt, -1)
if nxt == -1:
break
result = [nxt] + result
return result
def removeCommonPrefix(self, path1, path2):
"""Remove from the beginning of path1 the longest sequence that starts path2."""
i = 0
while i < len(path1) and i < len(path2):
if path1[i] == path2[i]:
i += 1
else:
break
return path1[i:]
def addBasinsToChannelFile(self, channelLayer, wStreamFile):
"""
Add basinno field (if necessary) to channels shapefile and populate with values from wStreamFile.
Not done with grid models.
"""
provider = channelLayer.dataProvider()
bsnIdx = self.getIndex(channelLayer, QSWATTopology._BASINNO, ignoreMissing=True)
if bsnIdx < 0:
field = QgsField(QSWATTopology._BASINNO, QVariant.Int)
OK = provider.addAttributes([field])
if not OK:
QSWATUtils.error('Cannot add {0} field to channels shapefile'.format(QSWATTopology._BASINNO), self.isBatch)
return
channelLayer.updateFields()
bsnIdx = self.getIndex(channelLayer, QSWATTopology._BASINNO)
wLayer = QgsRasterLayer(wStreamFile, 'Basins')
lenIdx = self.getIndex(channelLayer, QSWATTopology._LENGTH, ignoreMissing=True)
chsMap = dict()
for feature in provider.getFeatures():
# find a point well into the channel to ensure we are not just outside the basin
geometry = feature.geometry()
if lenIdx < 0:
length = geometry.length()
else:
length = feature[lenIdx]
if length <= 0:
basin = QSWATTopology._NOBASIN # value to indicate a zero-length channel
else:
if geometry.isMultipart():
lines = geometry.asMultiPolyline()
numLines = len(lines)
if numLines == 0:
QSWATUtils.error('Link in channel with id {0} consists of 0 lines'.format(feature.id()), self.isBatch)
return
line = lines[numLines // 2]
else:
line = geometry.asPolyline()
numPoints = len(line)
if numPoints < 2:
QSWATUtils.error('Link in channel with id {0} has less than two points'.format(feature.id()), self.isBatch)
return
point = line[numPoints // 2]
basin = QSWATTopology.valueAtPoint(point, wLayer)
fid = feature.id()
chsMap[fid] = dict()
chsMap[fid][bsnIdx] = basin
OK = provider.changeAttributeValues(chsMap)
if not OK:
QSWATUtils.error('Cannot add basin values to channels shapefile', self.isBatch)
return
def writeDrainageFile(self, drainageFile):
"""Write drainage csv file."""
if os.path.exists(drainageFile):
os.remove(drainageFile)
with open(drainageFile, 'w', newline='') as connFile:
writer = csv.writer(connFile)
writer.writerow([b'Subbasin', b'DownSubbasin'])
for subbasin, downSubbasin in self.downSubbasins.items():
SWATBasin = self.subbasinToSWATBasin.get(subbasin, -1)
if SWATBasin > 0:
downSWATBasin = self.subbasinToSWATBasin.get(downSubbasin, -1)
writer.writerow([str(SWATBasin),str(downSWATBasin)])
def getReachData(self, geom, demLayer):
"""
Generate ReachData record for reach geometry. demLayer may be none, in which case elevations are set zero.
"""
firstLine = QSWATTopology.reachFirstLine(geom, self.xThreshold, self.yThreshold)
if firstLine is None or len(firstLine) < 1:
QSWATUtils.error('It looks like your stream shapefile does not obey the single direction rule, that all reaches are either upstream or downstream.', self.isBatch)
return None
lastLine = QSWATTopology.reachLastLine(geom, self.xThreshold, self.yThreshold)
if lastLine is None or len(lastLine) < 1:
QSWATUtils.error('It looks like your stream shapefile does not obey the single direction rule, that all reaches are either upstream or downstream.', self.isBatch)
return None
pStart = firstLine[0]
pFinish = lastLine[-1]
if demLayer is None:
startVal = 0
finishVal = 0
else:
startVal = QSWATTopology.valueAtPoint(pStart, demLayer)
finishVal = QSWATTopology.valueAtPoint(pFinish, demLayer)
if startVal is None or startVal == self.demNodata:
if finishVal is None or finishVal == self.demNodata:
# QSWATUtils.loginfo(u'({0!s},{1!s}) elevation {4} to ({2!s},{3!s}) elevation {5}'
# .format(pStart.x(), pStart.y(), pFinish.x(), pFinish.y(), str(startVal), str(finishVal)))
return None
else:
startVal = finishVal
elif finishVal is None or finishVal == self.demNodata:
finishVal = startVal
if self.outletAtStart:
maxElev = finishVal * self.verticalFactor
minElev = startVal * self.verticalFactor
return ReachData(pFinish.x(), pFinish.y(), maxElev, pStart.x(), pStart.y(), minElev)
else:
minElev = finishVal * self.verticalFactor
maxElev = startVal * self.verticalFactor
return ReachData(pStart.x(), pStart.y(), maxElev, pFinish.x(), pFinish.y(), minElev)
@staticmethod
def gridReachLength(data):
"""Length of reach assuming it is a single straight line."""
dx = data.upperX - data.lowerX
dy = data.upperY - data.lowerY
return math.sqrt(dx * dx + dy * dy)
def tryBasinAsSWATBasin(self, subbasinsLayer, polyIndex, subbasinIndex):
"""Return true if the subbasin field values can be used as SWAT basin numbers.
The basin numbers, if any, can be used if they
are all positive and different.
Also populate subbasinToSWATBasin and SWATBasinToSubbasin if successful, else these are undetermined.
"""
assert polyIndex >= 0 and subbasinIndex >= 0 and len(self.subbasinToSWATBasin) == 0 and len(self.SWATBasinToSubbasin) == 0
SWATBasins = set()
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry).setSubsetOfAttributes([polyIndex, subbasinIndex])
for polygon in subbasinsLayer.getFeatures(request):
subbasin = polygon[polyIndex]
if subbasin in self.upstreamFromInlets:
continue
SWATBasin = polygon[subbasinIndex]
if SWATBasin <= 0:
return False
if SWATBasin in SWATBasins:
return False
self.subbasinToSWATBasin[subbasin] = SWATBasin
self.SWATBasinToSubbasin[SWATBasin] = subbasin
SWATBasins.add(SWATBasin)
return True
@staticmethod
def snapPointToReach(streamLayer, point, threshold, transform, isBatch):
"""Return the nearest point on a stream segment to the input point."""
line, pointIndex = QSWATTopology.nearestVertex(streamLayer, point)
if pointIndex < 0:
QSWATUtils.error('Cannot snap point ({0:.0f}, {1:.0f}) to stream network'.format(point.x(), point.y()), isBatch)
return None
p1, p2 = QSWATTopology.intercepts(line, pointIndex, point)
p = QSWATTopology.nearer(p1, p2, point)
if p is None:
p = line[pointIndex]
return p if QSWATTopology.distanceMeasure(p, point) <= threshold * threshold else None
# check p is sufficiently near point
if QSWATTopology.distanceMeasure(p, point) <= threshold * threshold:
# before returning p, move it along the stream a little if it is on or close to a '4 corners' position
# since TauDEM can fail to make a boundary or use its id as a DSNODEID if is so positioned
if p1 == p2:
# a point on the line was chosen, which is safe (points on the line are centres of DEM cells)
return p
else:
floatCol = float(p.x() - transform[0]) / transform[1]
intCol = int(floatCol + 0.5)
floatRow = float(p.y() - transform[3]) / transform[5]
intRow = int(floatRow + 0.5)
if abs(floatCol - intCol) < 0.1 and abs(floatRow - intRow) < 0.1:
# move the point towards line[pointIndex] by about half a cell
p3 = QSWATTopology.shiftedPoint(p, line[pointIndex], transform, 0.4)
QSWATUtils.loginfo('({0:.0f},{1:.0f}) shifted to ({2:.0f},{3:.0f})'.format(p.x(), p.y(), p3.x(), p3.y()))
return p3
else:
return p
else:
QSWATUtils.error('Cannot snap point ({0:.0f}, {1:.0f}) to stream network within threshold {2!s}'.format(point.x(), point.y(), threshold), isBatch)
return None
@staticmethod
def separatePoints(p1, p2, transform):
"""If p2 is in same cell as p1 return a point in the next cell in the direction of p1 to p2.
Else return p2."""
# p1 is the end of a channel, so will be in the centre of a cell. So enough
# to move one coordinate of p2 by one cell from p1, and the other proportionately but less
col1, row1 = QSWATTopology.projToCell(p1.x(), p1.y(), transform)
col2, row2 = QSWATTopology.projToCell(p2.x(), p2.y(), transform)
if col1 == col2 and row1 == row2:
return QSWATTopology.shiftedPoint(p1, p2, transform, 1.0)
else:
return p2
@staticmethod
def shiftedPoint(p1, p2, transform, frac):
"""Return point at least frac of a cell away from p1 in direction p1 to p2."""
x1 = p1.x()
y1 = p1.y()
x2 = p2.x()
y2 = p2.y()
dirx = 1 if x2 > x1 else -1
diry = 1 if y2 > y1 else -1
stepx = transform[1] * frac
stepy = abs(transform[5]) * frac
if x1 == x2: # vertical
shiftx = 0
shifty = stepy * diry
else:
slope = abs(float(y1 - y2) / (x1 - x2))
if slope < 1:
shiftx = stepx * dirx
shifty = stepy * diry * slope
else:
shiftx = stepx * dirx / slope
shifty = stepy * diry
return QgsPointXY(x1 + shiftx, y1 + shifty)
@staticmethod
def nearestVertex(streamLayer, point):
"""Find nearest vertex in streamLayer to point and
return the line (list of points) in the reach and
index of the vertex within the line.
"""
bestPointIndex = -1
bestLine = None
minMeasure = float('inf')
for reach in streamLayer.getFeatures():
geometry = reach.geometry()
if geometry.isMultipart():
parts = geometry.asMultiPolyline()
else:
parts = [geometry.asPolyline()]
for line in parts:
for j in range(len(line)):
measure = QSWATTopology.distanceMeasure(line[j], point)
if measure < minMeasure:
minMeasure = measure
bestPointIndex = j
bestLine = line
# distance = math.sqrt(minMeasure)
# QSWATUtils.information(u'Nearest point at ({0:.2F}, {1:.2F}), distance {2:.2F}'.format(bestReach[bestPointIndex].x(), bestReach[bestPointIndex].y(), distance), False)
return (bestLine, bestPointIndex)
@staticmethod
def intercepts(line, pointIndex, point):
"""Get points on segments on either side of pointIndex where
vertical from point meets the segment.
"""
assert pointIndex in range(len(line))
# first try above pointIndex
if pointIndex == len(line) - 1:
# We are at the upper end - no upper segment.
# Return just this point to avoid a tiny subbasin.
return (line[pointIndex], line[pointIndex])
else:
upper = QSWATTopology.getIntercept(line[pointIndex], line[pointIndex+1], point)
if pointIndex == 0:
# We are at the lower end - no lower segment.
# Return just this point to avoid a tiny subbasin.
return (line[0], line[0])
else:
lower = QSWATTopology.getIntercept(line[pointIndex], line[pointIndex-1], point)
return (lower, upper)
@staticmethod
def getIntercept(p1, p2, p):
"""Return point on line from p1 to p2 where
vertical from p intercepts it, or None if there is no intercept.
"""
x1 = p1.x()
x2 = p2.x()
xp = p.x()
y1 = p1.y()
y2 = p2.y()
yp = p.y()
X = x1 - x2
Y = y1 - y2
assert not (X == 0 and Y == 0)
prop = (X * (x1 - xp) + Y * (y1 - yp)) / (X * X + Y * Y)
if prop < 0:
# intercept is off the line beyond p1
# technically we should check for prop > 1, which means
# intercept is off the line beyond p2, but we can assume p is nearer to p1
return None
else:
assert 0 <= prop < 1
return QPoint(x1 - prop * X, y1 - prop * Y)
@staticmethod
def nearer(p1, p2, p):
"""Return the nearer of p1 and p2 to p."""
if p1 is None:
return p2
if p2 is None:
return p1
if QSWATTopology.distanceMeasure(p1, p) < QSWATTopology.distanceMeasure(p2, p):
return p1
else:
return p2
@staticmethod
def distanceMeasure(p1, p2):
"""Return square of distance between p1 and p2."""
dx = p1.x() - p2.x()
dy = p1.y() - p2.y()
return dx * dx + dy * dy
def setMaxFlowLengths(self):
"""
Write table of subbasin to maximum flow length along channels within the basin.
Used for maximum flow path for existing non-grid models, and only defined for these.
"""
channelFlowLengths = dict()
for chLink, length in self.channelLengths.items():
self.setChannelFlowLength(chLink, length, channelFlowLengths)
def setChannelFlowLength(self, chLink, length, channelFlowLengths):
"""Add eentry for chLink to channelFlowLengths map. Also update maxFlowLengths for chLink's subbasin.
post: chLink in channelFlowLengths
"""
if chLink in channelFlowLengths:
return # nothing to do: set on previous recursive call
if chLink in self.zeroChannels:
return
chBasin = self.chLinkToChBasin[chLink]
subbasin = self.chBasinToSubbasin[chBasin]
dsLink = self.downChannels[chLink]
dsChBasin = self.chLinkToChBasin.get(dsLink, -1)
dsBasin = self.chBasinToSubbasin.get(dsChBasin, -1)
if dsBasin == subbasin:
# still in same subbasin:
# add this channel's length to downstream flow length
dsFlowLength = channelFlowLengths.get(dsLink, -1)
if dsFlowLength < 0:
self.setChannelFlowLength(dsLink, self.channelLengths[dsLink], channelFlowLengths)
dsFlowLength = channelFlowLengths[dsLink]
flowLength = dsFlowLength + length
else:
# outlet channel for subbasin
flowLength = length
channelFlowLengths[chLink] = flowLength
maxFlowLength = self.maxFlowLengths.get(subbasin, 0)
if flowLength > maxFlowLength:
self.maxFlowLengths[subbasin] = flowLength
def writePointsTable(self, demLayer, mergees, useGridModel):
"""Write the gis_points table in the project database."""
with self.db.conn as conn:
if not conn:
return
curs = conn.cursor()
table = 'gis_points'
clearSQL = 'DROP TABLE IF EXISTS ' + table
curs.execute(clearSQL)
curs.execute(self.db._POINTSCREATESQL)
waterAdded = []
# Add outlets from streams
for subbasin, (pointId, pt, chLink) in self.outlets.items():
if subbasin in self.upstreamFromInlets or subbasin in self.outletsInLake or \
chLink in self.chLinkInsideLake:
continue # excluded
elev = QSWATTopology.valueAtPoint(pt, demLayer)
self.addPoint(curs, subbasin, pointId, pt, elev, 'O')
# Add inlets
if useGridModel:
for chLink, (pointId, pt) in self.chLinkToInlet.items():
if chLink in self.chLinkInsideLake or chLink in self.chLinkFromLake: # shouldn't happen
continue
subbasin = self.chLinkToChBasin[chLink]
elev = QSWATTopology.valueAtPoint(pt, demLayer)
self.addPoint(curs, subbasin, pointId, pt, elev, 'I')
else:
for subbasin, (pointId, pt) in self.inlets.items():
if subbasin in self.upstreamFromInlets:
# shouldn't happen, but users can be stupid
continue
elev = QSWATTopology.valueAtPoint(pt, demLayer)
self.addPoint(curs, subbasin, pointId, pt, elev, 'I')
# Add point sources at heads of channels
for chLink, (pointId, pt) in self.chLinkToPtSrc.items():
if chLink in self.chLinkInsideLake:
continue
if useGridModel:
if chLink in self.chLinkFromLake:
continue
subbasin = self.chLinkToChBasin[chLink]
else:
chBasin = self.chLinkToChBasin.get(chLink, -1)
subbasin = self.chBasinToSubbasin.get(chBasin, -1)
if subbasin < 0 or subbasin in self.upstreamFromInlets:
continue
elev = QSWATTopology.valueAtPoint(pt, demLayer)
self.addPoint(curs, subbasin, pointId, pt, elev, 'P')
for chLink, (pointId, pt) in self.chPointSources.items():
if chLink in self.chLinkToPtSrc or chLink in mergees or chLink in self.chLinkInsideLake:
continue # link has user-defined point source flowing into it or has been merged or is inside lake
if useGridModel:
if chLink in self.chLinkFromLake:
continue # channel is inside lake
subbasin = self.chLinkToChBasin[chLink]
else:
chBasin = self.chLinkToChBasin.get(chLink, -1)
subbasin = self.chBasinToSubbasin.get(chBasin, -1)
if subbasin < 0 or subbasin in self.upstreamFromInlets:
continue
elev = QSWATTopology.valueAtPoint(pt, demLayer)
self.addPoint(curs, subbasin, pointId, pt, elev, 'P')
# Add lakes
for lake in self.lakesData.values():
# outlet from lake
subbasin, pointId, pt, elev = lake.outPoint
chLink = lake.outChLink
if useGridModel:
# subbasin for outlet will be inside lake and addPoint will fail
# since there will be no SWAT basin. Use one downstream if there is one
downChLink = self.downChannels[chLink]
if downChLink >= 0:
subbasin = self.chLinkToChBasin[downChLink]
elif chLink == -1:
# main outlet was moved inside lake, but reservoir point will still be routed to it
# so add its definition
(outletId, outPt, _) = self.outlets[subbasin]
self.addPoint(curs, subbasin, outletId, outPt, elev, 'O')
self.addPoint(curs, subbasin, pointId, pt, elev, 'W')
waterAdded.append(pointId)
# inlets to lake. These are outlets from streams in grid models, so not necessary
if not useGridModel:
for chLink, (pointId, pt, elev) in lake.inChLinks.items():
chBasin = self.chLinkToChBasin[chLink]
subbasin = self.chBasinToSubbasin[chBasin]
self.addPoint(curs, subbasin, pointId, pt, elev, 'O')
for chLink, (pointId, pt, _) in self.chLinkToWater.items():
# reservoir points at lake outlets can appear here
# but already added from lakesdata
if pointId in waterAdded:
continue
if useGridModel:
subbasin = self.chLinkToChBasin[chLink]
else:
chBasin = self.chLinkToChBasin.get(chLink, -1)
subbasin = self.chBasinToSubbasin.get(chBasin, -1)
if subbasin in self.upstreamFromInlets:
continue
elev = QSWATTopology.valueAtPoint(pt, demLayer)
self.addPoint(curs, subbasin, pointId, pt, elev, 'W')
for channel, (_, pointId, pt) in self.foundReservoirs.items():
if useGridModel:
subbasin = self.chLinkToChBasin[channel]
else:
chBasin = self.chLinkToChBasin.get(channel, -1)
subbasin = self.chBasinToSubbasin.get(chBasin, -1)
if subbasin in self.upstreamFromInlets:
continue
elev = QSWATTopology.valueAtPoint(pt, demLayer)
self.addPoint(curs, subbasin, pointId, pt, elev, 'W')
# for subbasin, (pointId, pt) in self.extraReservoirs.iteritems():
# if subbasin in self.upstreamFromInlets:
# # shouldn't happen, but users can be stupid
# continue
# elev = QSWATTopology.valueAtPoint(pt, demLayer)
# self.addPoint(curs, subbasin, pointId, pt, elev, 'R')
conn.commit()
def addExtraPointsToPointsTable(self, extraPoints, useGridModel):
"""Add extra points needed to mark where channels drain into reservoirs."""
with self.db.conn as conn:
if not conn:
return
curs = conn.cursor()
for channel, pointId in extraPoints:
if useGridModel:
subbasin = self.chLinkToChBasin[channel]
else:
chBasin = self.chLinkToChBasin[channel]
subbasin = self.chBasinToSubbasin[chBasin]
data = self.channelsData[channel]
pt = QgsPointXY(data.lowerX, data.lowerY)
self.addPoint(curs, subbasin, pointId, pt, data.lowerZ, 'O')
conn.commit()
def addPoint(self, cursor, subbasin, pointId, pt, elev, typ):
"""Add point to gis_points table."""
table = 'gis_points'
SWATBasin = self.subbasinToSWATBasin.get(subbasin, 0)
if SWATBasin == 0:
return
ptll = self.pointToLatLong(pt)
sql = "INSERT INTO " + table + " VALUES(?,?,?,?,?,?,?,?)"
try:
cursor.execute(sql, (pointId, SWATBasin, typ,
pt.x(), pt.y(), ptll.y(), ptll.x(), elev))
except:
QSWATUtils.exceptionError('Internal error: unable to add point {0} type {1}'.format(pointId, typ), self.isBatch)
#===========================================================================
# def addPoint(self, cursor, link, data, pointId, typ):
# """Add a point to the points table."""
# table = 'gis_points'
# # inlets will be located at the upstream ends of their links
# # since they are attached to their downstream basins
# if not data:
# return
# SWATBasin = self.subbasinToSWATBasin.get(data.ru, 0)
# if SWATBasin == 0:
# return
# lsu = 0
# if typ == 'I': # inlet
# pt = QgsPointXY(data.upperX, data.upperY)
# elev = data.upperZ
# drainId = SWATBasin
# drainCat = 'R'
# else:
# pt = QgsPointXY(data.lowerX, data.lowerY)
# elev = data.lowerZ
# if typ == 'P': # point source
# resId = self.linkToReservoir.get(link, 0)
# if resId > 0:
# # point source drains to reservoir
# drainId = resId
# drainCat = 'P'
# else:
# # point source drains to link outlet
# drainId = self.linkToOutlet[link]
# drainCat = 'P'
# elif typ == 'R': # reservoir: drains to link outlet
# drainId = self.linkToOutlet[link]
# drainCat = 'P'
# else:
# assert typ == 'O', u'Unknown point type: ' + typ
# # outlet: drains to reach of downstream basin (if any)
# dsLink = self.downLinks[link]
# dsSWATBasin = 0
# while dsLink >= 0 and dsSWATBasin == 0:
# dsBasin = self.linkToBasin[dsLink]
# dsSWATBasin = self.subbasinToSWATBasin.get(dsBasin, 0)
# if dsSWATBasin == 0:
# dsLink = self.downLinks[dsLink]
# if dsSWATBasin > 0:
# drainId = dsSWATBasin
# drainCat = 'R'
# else:
# drainId = -1
# drainCat = 'X'
# ptll = self.pointToLatLong(pt)
# sql = "INSERT INTO " + table + " VALUES(?,?,?,?,?,?,?,?,?)"
# cursor.execute(sql, (pointId, SWATBasin, lsu, typ, \
# pt.x(), pt.y(), ptll.y(), ptll.x(), elev))
#===========================================================================
def writeChannelsTable(self, mergeChannels, basins, gv):
"""
Write the channels table in the project database, make rivs1.shp in shapes directory, and copy as results template to TablesOut directory.
Changes the channel layer, so if successful, returns the new one.
"""
root = QgsProject.instance().layerTreeRoot()
if gv.useGridModel:
# use streams as channels
channelFile = gv.streamFile
strng = 'streams'
else:
channelFile = gv.channelFile
strng = 'channel'
if not os.path.exists(channelFile):
QSWATUtils.error('Cannot find {0} file {1}'.format(strng, channelFile), gv.isBatch)
return
channelLayer = QSWATUtils.getLayerByFilename(root.findLayers(), channelFile, FileTypes._CHANNELS,
None, None, None)[0]
if channelLayer is None: # perhaps removed by user
channelLayer = QgsVectorLayer(channelFile, 'Channels', 'ogr')
QSWATUtils.copyShapefile(channelFile, Parameters._RIVS1, gv.shapesDir)
rivs1File = QSWATUtils.join(gv.shapesDir, Parameters._RIVS1 + '.shp')
QSWATUtils.removeLayer(rivs1File, root)
rivs1Layer = QgsVectorLayer(rivs1File, 'Channels ({0})'.format(Parameters._RIVS1), 'ogr')
provider1 = rivs1Layer.dataProvider()
# add Channel, ChannelR, and Subbasin fields unless already has them
chIdx = self.getIndex(rivs1Layer, QSWATTopology._CHANNEL, ignoreMissing=True)
chRIdx = self.getIndex(rivs1Layer, QSWATTopology._CHANNELR, ignoreMissing=True)
subIdx = self.getIndex(rivs1Layer, QSWATTopology._SUBBASIN, ignoreMissing=True)
if chIdx < 0:
OK = provider1.addAttributes([QgsField(QSWATTopology._CHANNEL, QVariant.Int)])
if not OK:
QSWATUtils.error('Cannot add {0} field to channels shapefile {1}'.format(QSWATTopology._CHANNEL, rivs1File), self.isBatch)
return None
if chRIdx < 0:
OK = provider1.addAttributes([QgsField(QSWATTopology._CHANNELR, QVariant.Int)])
if not OK:
QSWATUtils.error('Cannot add {0} field to channels shapefile {1}'.format(QSWATTopology._CHANNELR, rivs1File), self.isBatch)
return None
if subIdx < 0:
OK = provider1.addAttributes([QgsField(QSWATTopology._SUBBASIN, QVariant.Int)])
if not OK:
QSWATUtils.error('Cannot add {0} field to channels shapefile {1}'.format(QSWATTopology._SUBBASIN, rivs1File), self.isBatch)
return None
rivs1Layer.updateFields()
chIdx = self.getIndex(rivs1Layer, QSWATTopology._CHANNEL)
chRIdx = self.getIndex(rivs1Layer, QSWATTopology._CHANNELR)
subIdx = self.getIndex(rivs1Layer, QSWATTopology._SUBBASIN)
chLinkIdx = self.getIndex(rivs1Layer, QSWATTopology._LINKNO)
request = QgsFeatureRequest().setSubsetOfAttributes([chLinkIdx])
if not gv.useGridModel:
basinMerge = self.mergeChannelData(mergeChannels)
# make map channel -> feature it is merged with for merged channels
merges = dict()
targets = []
for reach in provider1.getFeatures(request):
for channel in mergeChannels.keys():
target = self.finalTarget(channel, mergeChannels)
if target not in targets:
targets.append(target)
if reach[chLinkIdx] == target:
merges[channel] = reach
#QSWATUtils.loginfo('Channel {0} merging to target {1} with length {2}'.format(channel, target, reach.geometry().length()))
# create geometries for merged reaches
merged = []
for reach in provider1.getFeatures(request):
rid = reach.id()
channel = reach[chLinkIdx]
if channel in targets and rid not in merged:
merged.append(rid)
mergeReach = merges.get(channel, None)
if mergeReach is not None:
# add its geometry to its merger target
#length1 = mergeReach.geometry().length()
#length2 = reach.geometry().length()
mergeReach.setGeometry(mergeReach.geometry().combine(reach.geometry()))
#length3 = mergeReach.geometry().length()
#QSWATUtils.loginfo('Channel {0} merged to target with length {1} ({2} + {3})' \
# .format(channel, length3, length1, length2))
if rid not in merged:
merged.append(rid)
# remove channels and targets involved in mergers
provider1.deleteFeatures(merged)
# add mergers
mergers = []
for channel, reach in merges.items():
if reach not in mergers:
mergers.append(reach)
provider1.addFeatures(mergers)
chsMap = dict()
zeroRids = []
for reach in provider1.getFeatures(request):
channel = reach[chLinkIdx]
if gv.useGridModel:
# subbasin and chBasin are the same
subbasin = self.chLinkToChBasin.get(channel, -1)
downChannel = self.downChannels[channel]
else:
chBasin = self.chLinkToChBasin.get(channel, -1)
subbasin = self.chBasinToSubbasin.get(chBasin, -1)
downChannel = self.finalDownstream(channel, mergeChannels)
SWATBasin = self.subbasinToSWATBasin.get(subbasin, 0)
SWATChannel = 0 if SWATBasin == 0 else self.channelToSWATChannel.get(channel, 0)
downSWATChannel = self.channelToSWATChannel.get(downChannel, 0)
rid = reach.id()
if SWATChannel == 0:
zeroRids.append(rid)
chsMap[rid] = dict()
chsMap[rid][chIdx] = SWATChannel
chsMap[rid][chRIdx] = downSWATChannel
chsMap[rid][subIdx] = SWATBasin
OK = provider1.changeAttributeValues(chsMap)
if not OK:
QSWATUtils.error('Cannot add channel and subbasin values to channels shapefile {0}'.format(rivs1File), self.isBatch)
return None
if len(zeroRids) > 0:
OK = provider1.deleteFeatures(zeroRids)
if not OK:
QSWATUtils.error('Cannot remove merged, zero length, or above inlet channels from channels shapefile {0}'.format(rivs1File), self.isBatch)
return None
# Add fields from channels table to rivs1File if less than RIV1SUBS1MAX features; otherwise takes too long.
addToRiv1 = rivs1Layer.featureCount() <= Parameters._RIVS1SUBS1MAX
# remove fields apart from Channel, ChannelR and Subbasin
if addToRiv1:
self.removeFields(provider1, [QSWATTopology._LINKNO, QSWATTopology._CHANNEL, QSWATTopology._CHANNELR, QSWATTopology._SUBBASIN], rivs1File, self.isBatch)
if addToRiv1:
fields = []
fields.append(QgsField(QSWATTopology._AREAC, QVariant.Double, len=20, prec=0))
fields.append(QgsField(QSWATTopology._LEN2, QVariant.Double))
fields.append(QgsField(QSWATTopology._SLO2, QVariant.Double))
fields.append(QgsField(QSWATTopology._WID2, QVariant.Double))
fields.append(QgsField(QSWATTopology._DEP2, QVariant.Double))
fields.append(QgsField(QSWATTopology._MINEL, QVariant.Double))
fields.append(QgsField(QSWATTopology._MAXEL, QVariant.Double))
fields.append(QgsField(QSWATTopology._RESERVOIR, QVariant.Int))
fields.append(QgsField(QSWATTopology._POND, QVariant.Int))
fields.append(QgsField(QSWATTopology._LAKEIN, QVariant.Int))
fields.append(QgsField(QSWATTopology._LAKEOUT, QVariant.Int))
provider1.addAttributes(fields)
rivs1Layer.updateFields()
linkIdx = self.getIndex(rivs1Layer, QSWATTopology._LINKNO)
chIdx = self.getIndex(rivs1Layer, QSWATTopology._CHANNEL)
areaCIdx = self.getIndex(rivs1Layer, QSWATTopology._AREAC)
len2Idx = self.getIndex(rivs1Layer, QSWATTopology._LEN2)
slo2Idx = self.getIndex(rivs1Layer, QSWATTopology._SLO2)
wid2Idx = self.getIndex(rivs1Layer, QSWATTopology._WID2)
dep2Idx = self.getIndex(rivs1Layer, QSWATTopology._DEP2)
minElIdx = self.getIndex(rivs1Layer, QSWATTopology._MINEL)
maxElIdx = self.getIndex(rivs1Layer, QSWATTopology._MAXEL)
resIdx = self.getIndex(rivs1Layer, QSWATTopology._RESERVOIR)
pndIdx = self.getIndex(rivs1Layer, QSWATTopology._POND)
lakeInIdx = self.getIndex(rivs1Layer, QSWATTopology._LAKEIN)
lakeOutIdx = self.getIndex(rivs1Layer, QSWATTopology._LAKEOUT)
mmap = dict()
with self.db.conn as conn:
if not conn:
return None
curs = conn.cursor()
table = 'gis_channels'
clearSQL = 'DROP TABLE IF EXISTS ' + table
curs.execute(clearSQL)
curs.execute(self.db._CHANNELSCREATESQL)
time1 = time.process_time()
wid2Data = dict()
floodscape = QSWATUtils._FLOODPLAIN if gv.useLandscapes else QSWATUtils._NOLANDSCAPE
sql = "INSERT INTO " + table + " VALUES(?,?,?,?,?,?,?,?,?)"
if addToRiv1:
# iterate over channels in rivs1 shapefile
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry).setSubsetOfAttributes([linkIdx, chIdx])
generator = self.generateChannelsFromShapefile(request, provider1, linkIdx, chIdx)
else:
generator = self.generateChannelsFromTable()
toDelete = []
for fid, channel, SWATChannel in generator:
if gv.useGridModel:
# basin and chBasin are the same
subbasin = self.chLinkToChBasin[channel]
else:
chBasin = self.chLinkToChBasin.get(channel, -1)
subbasin = self.chBasinToSubbasin.get(chBasin, -1)
SWATBasin = 0 if channel in self.chLinkInsideLake else self.subbasinToSWATBasin.get(subbasin, 0)
lakeOutId = self.chLinkFromLake.get(channel, 0)
if SWATBasin == 0 and (lakeOutId == 0 or self.downChannels.get(channel, -1) < 0):
toDelete.append(fid)
continue
if gv.useGridModel:
channelData = self.channelsData[channel]
# drain area is a numpy float, so need to coerce, or won't get written to attributes of rivs1
drainAreaHa = float(self.drainAreas[channel]) / 1E4
length = float(self.channelLengths[channel] * gv.mainLengthMultiplier)
slopePercent = float(self.channelSlopes[channel] * 100 * gv.reachSlopeMultiplier / gv.mainLengthMultiplier)
minEl = float(channelData.lowerZ)
maxEl = float(channelData.upperZ)
else:
mergeData = basinMerge.get(channel, None)
if mergeData is None:
continue
drainAreaHa = float(mergeData.areaC / 1E4)
length = float(mergeData.length * gv.mainLengthMultiplier)
slopePercent = float(mergeData.slope * 100 * gv.reachSlopeMultiplier) / gv.mainLengthMultiplier
minEl = float(mergeData.minEl)
maxEl = float(mergeData.maxEl)
# possible for channel to be so short it has no pixels draining to it
# also no LSU data when channel is outlet from lake in grid model
basinData = basins.get(subbasin, None)
lsuData = None if basinData is None else basinData.getLsus().get(channel, None)
drainAreaKm = float(drainAreaHa) / 100
channelWidth = float(gv.channelWidthMultiplier * (drainAreaKm ** gv.channelWidthExponent))
wid2Data[SWATChannel] = channelWidth
channelDepth = float(gv.channelDepthMultiplier * (drainAreaKm ** gv.channelDepthExponent))
rid = 0 if lsuData is None else self.getReservoirId(lsuData, floodscape)
pid = 0 if lsuData is None else self.getPondId(lsuData, floodscape)
if rid == 0 and pid == 0:
# omit from gis_channels channels which have become reservoirs or ponds
curs.execute(sql, (SWATChannel, SWATBasin, drainAreaHa, length, slopePercent,
channelWidth, channelDepth, minEl, maxEl))
if addToRiv1:
lakeInId = self.chLinkIntoLake.get(channel, 0)
mmap[fid] = dict()
mmap[fid][areaCIdx] = drainAreaHa
mmap[fid][len2Idx] = length
mmap[fid][slo2Idx] = slopePercent
mmap[fid][wid2Idx] = channelWidth
mmap[fid][dep2Idx] = channelDepth
mmap[fid][minElIdx] = minEl
mmap[fid][maxElIdx] = maxEl
mmap[fid][resIdx] = rid
mmap[fid][pndIdx] = pid
mmap[fid][lakeInIdx] = lakeInId
mmap[fid][lakeOutIdx] = lakeOutId
time2 = time.process_time()
QSWATUtils.loginfo('Writing gis_channels table took {0} seconds'.format(int(time2 - time1)))
conn.commit()
self.db.hashDbTable(conn, table)
if addToRiv1:
if not provider1.changeAttributeValues(mmap):
QSWATUtils.error('Cannot edit values in channels shapefile {0}'.format(rivs1File), self.isBatch)
return None
if len(toDelete) > 0:
OK = provider1.deleteFeatures(toDelete)
if not OK:
QSWATUtils.error('Cannot remove channels in lakes from channels shapefile {0}'.format(rivs1File), self.isBatch)
return None
# make copy as template for stream results
QSWATUtils.copyShapefile(rivs1File, Parameters._RIVS, gv.resultsDir)
rivFile = QSWATUtils.join(gv.resultsDir, Parameters._RIVS + '.shp')
rivLayer = QgsVectorLayer(rivFile, 'Channels', 'ogr')
provider = rivLayer.dataProvider()
# leave only the Channel, ChannelR and Subbasin attributes
self.removeFields(provider, [QSWATTopology._CHANNEL, QSWATTopology._CHANNELR, QSWATTopology._SUBBASIN], rivFile, self.isBatch)
# add PenWidth field to stream results template
OK = provider.addAttributes([QgsField(QSWATTopology._PENWIDTH, QVariant.Double)])
if not OK:
QSWATUtils.error('Cannot add {0} field to streams results template {1}'.format(QSWATTopology._PENWIDTH, rivFile), self.isBatch)
return None
self.setPenWidth(wid2Data, provider)
if gv.useGridModel:
return channelLayer
else:
layers = root.findLayers()
subLayer = root.findLayer(channelLayer.id())
rivs1Layer = QSWATUtils.getLayerByFilename(layers, rivs1File, FileTypes._CHANNELREACHES,
gv, subLayer, QSWATUtils._WATERSHED_GROUP_NAME)[0]
# hide channel layer
if channelLayer is not None:
QSWATUtils.setLayerVisibility(channelLayer, False, root)
if len(self.upstreamFromInlets) > 0:
self.replaceStreamLayer(root, layers, gv)
return rivs1Layer
def generateChannelsFromShapefile(self, request, provider, linkIdx, chIdx):
"""Yield (feature id, channel, swatChammel) tupless from rivs1.shp."""
for feature in provider.getFeatures(request):
yield feature.id(), feature[linkIdx], feature[chIdx]
def generateChannelsFromTable(self):
"""Yield (feature id, channel, swatChammel) tuples from tables."""
for channel, SWATChannel in self.channelToSWATChannel.items():
yield 0, channel, SWATChannel
def replaceStreamLayer(self, root, layers, gv):
"""Copy stream layer, remove streams upstream from inlets, and replace stream layer."""
streamLayer = QSWATUtils.getLayerByFilename(layers, gv.streamFile, FileTypes._STREAMREACHES, gv, None, None)[0]
if streamLayer is not None:
base, _ = os.path.splitext(os.path.split(gv.streamFile)[1])
QSWATUtils.copyShapefile(gv.streamFile, base + 'act', gv.shapesDir)
actStreamFile = QSWATUtils.join(gv.shapesDir, base + 'act.shp')
actstreamLayer = QgsVectorLayer(actStreamFile, FileTypes.legend(FileTypes._STREAMREACHES), 'ogr')
basinIdx = self.getIndex(actstreamLayer, QSWATTopology._WSNO)
if basinIdx < 0:
return
provider = actstreamLayer.dataProvider()
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry).setSubsetOfAttributes([basinIdx])
toDelete = []
for feature in provider.getFeatures(request):
if feature[basinIdx] in self.upstreamFromInlets:
toDelete.append(feature.id())
if provider.deleteFeatures(toDelete):
subLayer = root.findLayer(streamLayer.id())
actstreamLayer = QSWATUtils.getLayerByFilename(layers, actStreamFile, FileTypes._STREAMREACHES, gv, subLayer,
QSWATUtils._WATERSHED_GROUP_NAME, True)[0]
QSWATUtils.setLayerVisibility(streamLayer, False, root)
def getReservoirId(self, channelData, floodscape):
"""Return reservoir id, if any, else 0."""
lsuData = channelData.get(floodscape, None)
if lsuData is not None and lsuData.waterBody is not None and lsuData.waterBody.isReservoir():
return lsuData.waterBody.id
return 0
def getPondId(self, channelData, floodscape):
"""Return pond id, if any, else 0."""
lsuData = channelData.get(floodscape, None)
if lsuData is not None and lsuData.waterBody is not None and lsuData.waterBody.isPond():
return lsuData.waterBody.id
return 0
def mergeChannelData(self, mergeChannels):
"""Generate and return map of channel to MergedChannelData."""
# first pass: collect data for unmerged channels
mergedChannelData = dict()
for channel in self.channelToSWATChannel.keys():
if channel not in mergeChannels:
channelData = self.channelsData[channel]
mergedChannelData[channel] = MergedChannelData(self.drainAreas[channel],
self.channelLengths[channel],
self.channelSlopes[channel],
channelData.lowerZ,
channelData.upperZ)
# second pass: add data for merged channels
for source, target in mergeChannels.items():
channelData = self.channelsData[channel]
final = self.finalTarget(target, mergeChannels)
mergedChannelData[final].add(self.drainAreas[source],
self.channelLengths[source],
self.channelSlopes[source],
channelData.lowerZ,
channelData.upperZ)
return mergedChannelData
def finalTarget(self, target, mergeChannels):
"""Find final target of merges."""
nxt = mergeChannels.get(target, -1)
if nxt < 0:
return target
else:
return self.finalTarget(nxt, mergeChannels)
def finalDownstream(self, start, mergeChannels):
"""Find downstream channel from start, skipping merged channels, and return it."""
chLink1 = self.finalTarget(start, mergeChannels)
return self.finalTarget(self.getDownChannel(chLink1), mergeChannels)
def routeChannelsOutletsAndBasins(self, basins, mergedChannels, mergees, extraPoints, gv):
"""Add channels, lakes, basins, point sources, reservoirs, inlets and outlets to main gis_routing table."""
chCat = 'CH'
subbasinCat = 'SUB'
ptCat = 'PT'
resCat = 'RES'
pondCat = 'PND'
xCat = 'X'
# first associate any inlets, point sources and reservoirs with appropriate channels
if gv.useGridModel:
# no merging
channelToInlet = self.chLinkToInlet
channelToPtSrc = self.chLinkToPtSrc
else:
channelToInlet = dict()
for subbasin, inlet in self.inlets.items():
# find an inlet channel for this subbasin
found = False
for channel, data in self.channelsData.items():
chBasin = self.chLinkToChBasin.get(channel, -1)
if subbasin == self.chBasinToSubbasin.get(chBasin, -1) and \
QSWATTopology.coincidentPoints(QgsPointXY(data.upperX, data.upperY),
inlet[1], self.xThreshold, self.yThreshold):
channelToInlet[self.finalTarget(channel, mergedChannels)] = inlet
found = True
break
if not found:
QSWATUtils.error('Failed to find channel for inlet to subbasin {0}'.format(subbasin), gv.isBatch)
# map point sources to the unmerged channels they drain into
channelToPtSrc = dict()
for channel, ptsrc in self.chLinkToPtSrc.items():
channelToPtSrc[channel] = ptsrc
#QSWATUtils.loginfo('Channel {0} merged to {1} has point source {2}'.format(channel, self.finalTarget(channel, mergedChannels), ptsrc[0]))
# add point sources at stream sources
for channel, ptsrc in self.chPointSources.items():
if channel not in channelToPtSrc and channel not in mergees and \
channel not in self.chLinkInsideLake and \
not (gv.useGridModel and channel in self.chLinkFromLake): # not already has a point, not merged, not inslide lake
channelToPtSrc[channel] = ptsrc
# map channels to water bodies that replace them as drainage targets
# and water bodies to channels they drain into
floodscape = QSWATUtils._FLOODPLAIN if gv.useLandscapes else QSWATUtils._NOLANDSCAPE
channelToWater = dict()
for basinData in basins.values():
for channel, channelData in basinData.getLsus().items():
lsuData = channelData.get(floodscape, None)
if lsuData is not None and lsuData.waterBody is not None and not lsuData.waterBody.isUnknown():
channelToWater[channel] = (lsuData.waterBody.id, lsuData.waterBody.waterRole)
try:
with self.db.conn as conn:
curs = conn.cursor()
routedPoints = []
routedWater = []
routedChannels = []
# routedSubbasins = []
for channel, SWATChannel in self.channelToSWATChannel.items():
if channel in mergedChannels:
# all that is needed is to map its point source to the merge target
ptsrc = channelToPtSrc.get(channel, None)
if ptsrc is not None:
ptsrcId = ptsrc[0]
if ptsrcId not in routedPoints:
finalChannel = self.finalTarget(channel, mergedChannels)
wid, role = channelToWater.get(finalChannel, (-1, -1))
if wid >= 0:
wCat = resCat if role == 1 else pondCat
curs.execute(DBUtils._ROUTINGINSERTSQL,
(ptsrcId, ptCat, wid, wCat, 100))
else:
finalSWATChannel = self.channelToSWATChannel[finalChannel]
curs.execute(DBUtils._ROUTINGINSERTSQL,
(ptsrcId, ptCat, finalSWATChannel, chCat, 100))
routedPoints.append(ptsrcId)
continue
# if channel is lake outflow
# if main outflow, route lake to outlet and outlet to channel
# else route 0% of lake to channel
outLakeId = self.chLinkFromLake.get(channel, -1)
if outLakeId >= 0:
lakeData = self.lakesData[outLakeId]
wCat = resCat if lakeData.waterRole == 1 else pondCat
if channel == lakeData.outChLink:
# main outlet
outletId = lakeData.outPoint[1]
curs.execute(DBUtils._ROUTINGINSERTSQL, (outLakeId, wCat, outletId, ptCat, 100))
if outletId not in routedPoints:
if gv.useGridModel and self.downChannels.get(channel, -1) < 0:
# we have an internal lake exit: route outlet id to watershed exit
curs.execute(DBUtils._ROUTINGINSERTSQL, (outletId, ptCat, 0, xCat, 100))
else:
curs.execute(DBUtils._ROUTINGINSERTSQL, (outletId, ptCat, SWATChannel, chCat, 100))
routedPoints.append(outletId)
else:
# other outlet
curs.execute(DBUtils._ROUTINGINSERTSQL, (outLakeId, wCat, SWATChannel, chCat, 0))
# check if channel routes into lake
inLakeId = self.chLinkIntoLake.get(channel, -1)
if inLakeId >= 0:
# route its point source to the channel
ptsrc = channelToPtSrc.get(channel, None)
if ptsrc is not None:
ptsrcId = ptsrc[0]
if ptsrcId not in routedPoints:
curs.execute(DBUtils._ROUTINGINSERTSQL,
(ptsrcId, ptCat, SWATChannel, chCat, 100))
routedPoints.append(ptsrcId)
# route the channel into its outlet, and the outlet into the lake
lakeData = self.lakesData[inLakeId]
outletId = lakeData.inChLinks[channel][0]
wCat = resCat if lakeData.waterRole == 1 else pondCat
if SWATChannel not in routedChannels:
curs.execute(DBUtils._ROUTINGINSERTSQL, (SWATChannel, chCat, outletId, ptCat, 100))
routedChannels.append(SWATChannel)
if outletId not in routedPoints:
curs.execute(DBUtils._ROUTINGINSERTSQL, (outletId, ptCat, inLakeId, wCat, 100))
routedPoints.append(outletId)
if not gv.useGridModel:
continue # since we know it is into the lake and so cannot have a downstream channel or be a subbasin outlet
if gv.useGridModel:
subbasin = self.chLinkToChBasin[channel]
else:
chBasin = self.chLinkToChBasin.get(channel, -1)
subbasin = self.chBasinToSubbasin.get(chBasin, -1)
SWATBasin = self.subbasinToSWATBasin.get(subbasin, 0)
if SWATBasin == 0:
continue
# if channel is inside lake ignore it unless a lake outflow
if channel in self.chLinkInsideLake and outLakeId < 0:
continue
dsChannel = self.finalDownstream(channel, mergedChannels)
dsSWATChannel = self.channelToSWATChannel.get(dsChannel, 0)
wid, role = channelToWater.get(channel, (-1, -1))
wCat = resCat if role == 1 else pondCat
inlet = channelToInlet.get(channel, None)
if inlet is not None:
# route inlet to channel or water
if wid >= 0:
curs.execute(DBUtils._ROUTINGINSERTSQL, (inlet[0], ptCat, wid, wCat, 100))
else:
curs.execute(DBUtils._ROUTINGINSERTSQL, (inlet[0], ptCat, SWATChannel, chCat, 100))
(pointId, _, outletChannel) = self.outlets[subbasin]
if channel == outletChannel or gv.useGridModel:
# subbasin outlet: channel routes to outlet point of subbasin; outlet routes to downstream channel
# but with some exceptions:
# - if the channel is replaced by a reservoir, this is routed to the outlet instead
# - if subbasin has an extra point source, this is added to its outlet channel or reservoir
if gv.useGridModel:
ptsrc = channelToPtSrc.get(channel, None)
else:
ptsrc = None
# if ptsrc is None:
# ptsrc = self.extraPtSrcs.get(subbasin, None)
if ptsrc is not None:
# route it to the outlet channel, unless already routed
ptsrcId = ptsrc[0]
if ptsrcId not in routedPoints:
if wid < 0:
curs.execute(DBUtils._ROUTINGINSERTSQL,
(ptsrcId, ptCat, SWATChannel, chCat, 100))
else:
curs.execute(DBUtils._ROUTINGINSERTSQL,
(ptsrcId, ptCat, wid, wCat, 100))
routedPoints.append(ptsrcId)
if wid >= 0:
# need to check if this is a reservoir that is continued in the downstream subbasin
# to make sure we only route it once, and at its final downstream end
widDown, _ = channelToWater.get(dsChannel, (-1, -1))
if wid not in routedWater and wid != widDown:
# route water to water point and water point to outlet
(waterId, _, _) = self.chLinkToWater.get(channel, (-1, None, -1))
if waterId < 0:
(waterId, ptId, _) = self.foundReservoirs.get(channel, (-1, -1, None))
else:
# it is safe to use same id for reservoir and reservoir outlet point when
# using DSNODEID from inlets/outlets file
ptId = waterId
if waterId < 0:
QSWATUtils.error('Cannot find water point for channel {0}'
.format(SWATChannel), gv.isBatch)
else:
curs.execute(DBUtils._ROUTINGINSERTSQL,
(wid, wCat, ptId, ptCat, 100))
if ptId not in routedPoints:
curs.execute(DBUtils._ROUTINGINSERTSQL,
(ptId, ptCat, pointId, ptCat, 100))
routedPoints.append(ptId)
routedWater.append(wid)
elif SWATChannel not in routedChannels:
curs.execute(DBUtils._ROUTINGINSERTSQL,
(SWATChannel, chCat, pointId, ptCat, 100))
routedChannels.append(SWATChannel)
if pointId not in routedPoints:
if dsSWATChannel > 0:
widDown, roleDown = channelToWater.get(dsChannel, (-1, -1))
if widDown >= 0:
wCat = resCat if roleDown == 1 else pondCat
curs.execute(DBUtils._ROUTINGINSERTSQL,
(pointId, ptCat, widDown, wCat, 100))
else:
curs.execute(DBUtils._ROUTINGINSERTSQL,
(pointId, ptCat, dsSWATChannel, chCat, 100))
else:
# watershed outlet: mark point as category X
curs.execute(DBUtils._ROUTINGINSERTSQL,
(pointId, ptCat, 0, xCat, 100))
routedPoints.append(pointId)
else:
# channel and downstream channel within a subbasin:
# channel routes to downstream channel unless it is replaced by water
# and if it has a point source this is added
assert dsSWATChannel > 0, 'Channel {0} has no downstream channel'.format(channel)
widDown, roleDown = channelToWater.get(dsChannel, (-1, -1))
if wid >= 0:
if wid not in routedWater:
if widDown >= 0:
# if wid == widDown wid is only a part water body
# and we will eventually route the one downstream
if wid != widDown:
# route water to water point and water point to ridDown
(waterId, _, _) = self.chLinkToWater.get(channel, (-1, None))
if waterId < 0:
(waterId, ptId, _) = self.foundReservoirs.get(channel, (-1, -1, None))
else:
# it is safe to use same id for reservoir and reservoir outlet point when
# using DSNODEID from inlets/outlets file
ptId = waterId
if waterId < 0:
QSWATUtils.error('Cannot find water point for channel {0}'
.format(SWATChannel), gv.isBatch)
else:
curs.execute(DBUtils._ROUTINGINSERTSQL,
(wid, wCat, ptId, ptCat, 100))
if ptId not in routedPoints:
wCat = resCat if roleDown == 1 else pondCat
curs.execute(DBUtils._ROUTINGINSERTSQL,
(ptId, ptCat, widDown, wCat, 100))
routedPoints.append(ptId)
routedWater.append(wid)
else:
# route water to water point and water point to downstream channel
(waterId, _, _) = self.chLinkToWater.get(channel, (-1, None, -1))
if waterId < 0:
(waterId, ptId, _) = self.foundReservoirs.get(channel, (-1, -1, None))
else:
# it is safe to use same id for water and water outlet point when
# using DSNODEID from inlets/outlets file
ptId = waterId
if waterId < 0:
QSWATUtils.error('Cannot find water point for channel {0}'
.format(SWATChannel), gv.isBatch)
else:
curs.execute(DBUtils._ROUTINGINSERTSQL,
(wid, wCat, ptId, ptCat, 100))
if ptId not in routedPoints:
curs.execute(DBUtils._ROUTINGINSERTSQL,
(ptId, ptCat, dsSWATChannel, chCat, 100))
routedPoints.append(ptId)
routedWater.append(wid)
elif SWATChannel not in routedChannels:
if widDown >= 0:
# insert an outlet point so that channel's contribution to reservoir
# is included in outputs
self.pointId += 1
extraPoints.append((channel, self.pointId))
curs.execute(DBUtils._ROUTINGINSERTSQL,
(SWATChannel, chCat, self.pointId, ptCat, 100))
wCat = resCat if roleDown == 1 else pondCat
curs.execute(DBUtils._ROUTINGINSERTSQL,
(self.pointId, ptCat, widDown, wCat, 100))
routedPoints.append(self.pointId)
else:
curs.execute(DBUtils._ROUTINGINSERTSQL,
(SWATChannel, chCat, dsSWATChannel, chCat, 100))
routedChannels.append(SWATChannel)
# also route point source, if any, to channel or water
ptsrc = channelToPtSrc.get(channel, None)
if ptsrc is not None:
ptsrcId = ptsrc[0]
if ptsrcId not in routedPoints:
if wid > 0:
curs.execute(DBUtils._ROUTINGINSERTSQL,
(ptsrcId, ptCat, wid, wCat, 100))
else:
curs.execute(DBUtils._ROUTINGINSERTSQL,
(ptsrcId, ptCat, SWATChannel, chCat, 100))
routedPoints.append(ptsrcId)
# route lakes without outlet channels to main outlet points
for lakeId, lakeData in self.lakesData.items():
if lakeData.outChLink == -1:
(subbasin, lakeOutletId, _, _) = lakeData.outPoint
(outletId, _, _) = self.outlets[subbasin]
wCat = resCat if lakeData.waterRole == 1 else pondCat
# route the lake to its lake outlet, the lake outlet to the main outlet, and mark main outlet as category X
curs.execute(DBUtils._ROUTINGINSERTSQL, (lakeId, wCat, lakeOutletId, ptCat, 100))
if lakeOutletId not in routedPoints:
curs.execute(DBUtils._ROUTINGINSERTSQL, (lakeOutletId, ptCat, outletId, ptCat, 100))
routedPoints.append(lakeOutletId)
if outletId not in routedPoints:
curs.execute(DBUtils._ROUTINGINSERTSQL, (outletId, ptCat, 0, xCat, 100))
routedPoints.append(outletId)
# route subbasin to outlet points
# or to lake if outlet in lake
for subbasin, (pointId, _, chLink) in self.outlets.items():
SWATBasin = self.subbasinToSWATBasin.get(subbasin, 0)
if SWATBasin == 0:
continue
if gv.useGridModel:
if chLink in self.chLinkInsideLake or chLink in self.chLinkFromLake:
continue
lakeId = self.outletsInLake.get(subbasin, None)
if lakeId is None:
curs.execute(DBUtils._ROUTINGINSERTSQL,
(SWATBasin, subbasinCat, pointId, ptCat, 100))
else:
lakeData = self.lakesData[lakeId]
wCat = resCat if lakeData.waterRole == 1 else pondCat
curs.execute(DBUtils._ROUTINGINSERTSQL,
(SWATBasin, subbasinCat, lakeId, wCat, 100))
return True
except Exception:
QSWATUtils.loginfo('Routing channels, outlets and subbasins failed: {0}'.format(traceback.format_exc()))
return False
@staticmethod
def removeFields(provider, keepFieldNames, fileName, isBatch):
"""Remove fields other than keepFieldNames from shapefile fileName with provider."""
toDelete = []
fields = provider.fields()
for idx in range(fields.count()):
name = fields.field(idx).name()
if not name in keepFieldNames:
toDelete.append(idx)
if len(toDelete) > 0:
OK = provider.deleteAttributes(toDelete)
if not OK:
QSWATUtils.error('Cannot remove fields from shapefile {0}'.format(fileName), isBatch)
def setPenWidth(self, data, provider):
"""Scale wid2 data to 1 .. 4 and write to layer."""
minW = float('inf')
maxW = 0
for val in data.values():
minW = min(minW, val)
maxW = max(maxW, val)
if maxW > minW: # guard against division by zero
rng = maxW - minW
fun = lambda x: (x - minW) * 3 / rng + 1.0
else:
fun = lambda _: 1.0
chIdx = provider.fieldNameIndex(QSWATTopology._CHANNEL)
if chIdx < 0:
QSWATUtils.error('Cannot find {0} field in channels results template'.format(QSWATTopology._CHANNEL))
return
penIdx = provider.fieldNameIndex(QSWATTopology._PENWIDTH)
if penIdx < 0:
QSWATUtils.error('Cannot find {0} field in channels results template'.format(QSWATTopology._PENWIDTH))
return
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry).setSubsetOfAttributes([chIdx, penIdx])
mmap = dict()
for f in provider.getFeatures(request):
ch = f[chIdx]
width = data.get(ch, minW)
mmap[f.id()] = {penIdx: fun(width)}
OK = provider.changeAttributeValues(mmap)
if not OK:
QSWATUtils.error('Cannot edit channels results template', self.isBatch)
def makeOutletThresholds(self, gv, root):
"""
Make file like D8 contributing area but with heightened values at subbasin outlets.
Return -1 if cannot make the file.
"""
assert os.path.exists(gv.demFile)
demBase = os.path.splitext(gv.demFile)[0]
if not QSWATUtils.isUpToDate(gv.demFile, gv.ad8File):
# Probably using existing watershed but switched tabs in delineation form
# At any rate, cannot calculate flow paths
QSWATUtils.loginfo('ad8 file not found or out of date')
return -1
assert len(self.outlets) > 0
gv.hd8File = demBase + 'hd8.tif'
QSWATUtils.removeLayerAndFiles(gv.hd8File, root)
assert not os.path.exists(gv.hd8File)
ad8Layer = QgsRasterLayer(gv.ad8File, 'D8 contributing area')
# calculate maximum contributing area at an outlet point
maxContrib = 0
for (_, pt, _) in self.outlets.values():
contrib = QSWATTopology.valueAtPoint(pt, ad8Layer)
# assume ad8nodata is negative
if not (contrib is None or contrib < 0):
maxContrib = max(maxContrib, contrib)
threshold = int(2 * maxContrib)
ad8Layer = None
# copy ad8 to hd8 and then set outlet point values to threshold
ad8Ds = gdal.Open(gv.ad8File, gdal.GA_ReadOnly)
driver = gdal.GetDriverByName('GTiff')
hd8Ds = driver.CreateCopy(gv.hd8File, ad8Ds, 0)
if not hd8Ds:
QSWATUtils.error('Failed to create hd8 file {0}'.format(gv.hd8File), self.isBatch)
return -1, None
ad8Ds = None
QSWATUtils.copyPrj(gv.ad8File, gv.hd8File)
band = hd8Ds.GetRasterBand(1)
transform = hd8Ds.GetGeoTransform()
arr = array([[threshold]])
for (_, pt, _) in self.outlets.values():
x, y = QSWATTopology.projToCell(pt.x(), pt.y(), transform)
band.WriteArray(arr, x, y)
hd8Ds = None
return threshold
def runCalc1(self, file1, func, outFile, gv, isInt=False, fun1=None):
"""Use func as a function to calulate outFile from file1.
Valid input data values have fun1 applied if it is not None"""
if os.path.exists(outFile):
QSWATUtils.removeLayerAndFiles(outFile, gv.iface.legendInterface())
r1 = Raster(file1, gv)
rout = Raster(outFile, gv, canWrite=True, isInt=isInt)
completed = False
while not completed:
try:
# safer to mark complete immediately to avoid danger of endless loop
# only way to loop is then the memory error exception being raised
completed = True
r1.open(self.chunkCount)
noData = -99999 if isInt else r1.noData
rout.open(self.chunkCount, numRows=r1.numRows, numCols=r1.numCols,
transform=r1.ds.GetGeoTransform(), projection=r1.ds.GetProjection(), noData=noData)
for row in range(r1.numRows):
for col in range(r1.numCols):
v1 = r1.read(row, col)
if fun1 is not None and v1 != r1.noData:
v1 = fun1(v1)
vout = func(v1, r1.noData, noData)
rout.write(row, col, vout)
r1.close()
rout.close()
except MemoryError:
QSWATUtils.loginfo('runCalc1 out of memory with chunk count {0}'.format(self.chunkCount))
try:
r1.close()
rout.close()
except Exception:
pass
self.chunkCount += 1
completed = False
if os.path.exists(outFile):
QSWATUtils.copyPrj(file1, outFile)
return True
else:
# QSWATUtils.error(u'Calculator failed', self._gv.isBatch)
return False
def runCalc2(self, file1, file2, func, outFile, gv, isInt=False, fun1=None, fun2=None):
"""Use func as a function to calulate outFile from file1 and file2.
Assumes file1 and file2 have same origina and pixel size.
If file1/2 values are not nodata and fun1/2 are not None, they are applied before func is applied."""
if os.path.exists(outFile):
QSWATUtils.removeLayerAndFiles(outFile, gv.iface.legendInterface())
r1 = Raster(file1, gv)
r2 = Raster(file2, gv)
rout = Raster(outFile, gv, canWrite=True, isInt=isInt)
completed = False
while not completed:
try:
# safer to mark complete immediately to avoid danger of endless loop
# only way to loop is then the memory error exception being raised
completed = True
r1.open(self.chunkCount)
r2.open(self.chunkCount)
noData = -1 if isInt else r1.noData
rout.open(self.chunkCount, numRows=r1.numRows, numCols=r1.numCols,
transform=r1.ds.GetGeoTransform(), projection=r1.ds.GetProjection(), noData=noData)
for row in range(r1.numRows):
for col in range(r1.numCols):
v1 = r1.read(row, col)
if fun1 is not None and v1 != r1.noData:
v1 = fun1(v1)
v2 = r2.read(row, col)
if fun2 is not None and v2 != r2.noData:
v2 = fun2(v2)
vout = func(v1, r1.noData, v2, r2.noData, noData)
rout.write(row, col, vout)
r1.close()
r2.close()
rout.close()
except MemoryError:
QSWATUtils.loginfo('runCalc2 out of memory with chunk count {0}'.format(self.chunkCount))
try:
r1.close()
r2.close()
rout.close()
except:
pass
self.chunkCount += 1
completed = False
if os.path.exists(outFile):
QSWATUtils.copyPrj(file1, outFile)
return True
else:
# QSWATUtils.error(u'Calculator failed', self._gv.isBatch)
return False
def runCalc2Trans(self, file1, file2, func, outFile, baseFile, gv, isInt=False, fun1=None, fun2=None):
"""Use func as a function to calulate outFile from file1 and file2, using rows, columns and extent of baseFile.
If file1/2 values are not nodata and fun1/2 are not None, they are applied before func is applied."""
if os.path.exists(outFile):
QSWATUtils.removeLayerAndFiles(outFile, gv.iface.legendInterface())
r1 = Raster(file1, gv)
r2 = Raster(file2, gv)
rout = Raster(outFile, gv, canWrite=True, isInt=isInt)
ds = gdal.Open(baseFile, gdal.GA_ReadOnly)
transform = ds.GetGeoTransform()
numRows = ds.RasterYSize
numCols = ds.RasterXSize
projection=ds.GetProjection()
ds = None
completed = False
while not completed:
try:
# safer to mark complete immediately to avoid danger of endless loop
# only way to loop is then the memory error exception being raised
completed = True
r1.open(self.chunkCount)
r2.open(self.chunkCount)
transform1 = r1.ds.GetGeoTransform()
transform2 = r2.ds.GetGeoTransform()
rowFun1, colFun1 = QSWATTopology.translateCoords(transform, transform1, numRows, numCols)
rowFun2, colFun2 = QSWATTopology.translateCoords(transform, transform2, numRows, numCols)
noData = -1 if isInt else r1.noData
rout.open(self.chunkCount, numRows=numRows, numCols=numCols,
transform=transform, projection=projection, noData=noData)
for row in range(numRows):
y = QSWATTopology.rowToY(row, transform)
row1 = rowFun1(row, y)
row2 = rowFun2(row, y)
for col in range(numCols):
x = QSWATTopology.colToX(col, transform)
col1 = colFun1(col, x)
col2 = colFun2(col, x)
v1 = r1.read(row1, col1)
if fun1 is not None and v1 != r1.noData:
v1 =fun1(v1)
v2 = r2.read(row2, col2)
if fun2 is not None and v2 != r2.noData:
v2 = fun2(v2)
vout = func(v1, r1.noData, v2, r2.noData, noData)
rout.write(row, col, vout)
r1.close()
r2.close()
rout.close()
except MemoryError:
QSWATUtils.loginfo('runCalc2Trans out of memory with chunk count {0}'.format(self.chunkCount))
try:
r1.close()
r2.close()
rout.close()
except:
pass
self.chunkCount += 1
completed = False
if os.path.exists(outFile):
QSWATUtils.copyPrj(baseFile, outFile)
return True
else:
# QSWATUtils.error(u'Calculator failed', self._gv.isBatch)
return False
@staticmethod
def burnStream(streamFile, demFile, burnFile, depth, verticalFactor, isBatch):
"""Create as burnFile a copy of demFile with points on lines streamFile reduced in height by depth metres."""
# use vertical factor to convert from metres to vertical units of DEM
demReduction = float(depth) / verticalFactor
assert not os.path.exists(burnFile)
demDs = gdal.Open(demFile, gdal.GA_ReadOnly)
driver = gdal.GetDriverByName('GTiff')
burnDs = driver.CreateCopy(burnFile, demDs, 0)
if burnDs is None:
QSWATUtils.error('Failed to create burned-in DEM {0}'.format(burnFile), isBatch)
return
demDs = None
QSWATUtils.copyPrj(demFile, burnFile)
band = burnDs.GetRasterBand(1)
nodata = band.GetNoDataValue()
burnTransform = burnDs.GetGeoTransform()
streamLayer = QgsVectorLayer(streamFile, 'Burn in streams', 'ogr')
start = time.process_time()
countHits = 0
countPoints = 0
countChanges = 0
changed = dict()
for reach in streamLayer.getFeatures():
geometry = reach.geometry()
if geometry.isMultipart():
lines = geometry.asMultiPolyline()
else:
lines = [geometry.asPolyline()]
for line in lines:
for i in range(len(line) - 1):
countPoints += 1
p0 = line[i]
px0 = p0.x()
py0 = p0.y()
x0, y0 = QSWATTopology.projToCell(px0, py0, burnTransform)
p1 = line[i+1]
px1 = p1.x()
py1 = p1.y()
x1, y1 = QSWATTopology.projToCell(px1, py1, burnTransform)
steep = abs(y1 - y0) > abs(x1 - x0)
if steep:
x0, y0 = y0, x0
x1, y1 = y1, x1
if x0 > x1:
x0, x1 = x1, x0
y0, y1 = y1, y0
deltax = x1 - x0
deltay = abs(y1 - y0)
err = 0
deltaerr = deltay
y = y0
ystep = 1 if y0 < y1 else -1
arr = array([[0.0]])
for x in range(x0, x1+1):
if steep:
if QSWATTopology.addPointToChanged(changed, y, x):
arr = band.ReadAsArray(y, x, 1, 1)
# arr may be none if stream map extends outside DEM extent
if arr and arr[0,0] != nodata:
arr[0,0] = arr[0,0] - demReduction
band.WriteArray(arr, y, x)
countChanges += 1
else:
countHits += 1
else:
if QSWATTopology.addPointToChanged(changed, x, y):
arr = band.ReadAsArray(x, y, 1, 1)
# arr may be none if stream map extends outside DEM extent
if arr and arr[0,0] != nodata:
arr[0,0] = arr[0,0] - demReduction
band.WriteArray(arr, x, y)
countChanges += 1
else:
countHits += 1
err += deltaerr
if 2 * err < deltax:
continue
y += ystep
err -= deltax
finish = time.process_time()
QSWATUtils.loginfo('Created burned-in DEM {0} in {1!s} milliseconds; {2!s} points; {3!s} hits; {4!s} changes'.format(burnFile, int((finish - start)*1000), countPoints, countHits, countChanges))
@staticmethod
def addPointToChanged(changed, col, row):
"""Changed points held in dictionary column -> row-sortedlist, since it is like a sparse matrix.
Add a point unless ready there. Return true if added.
"""
rows = changed.get(col, [])
inserted = ListFuns.insertIntoSortedList(row, rows, True)
if inserted:
changed[col] = rows
return True
else:
return False
@staticmethod
def valueAtPoint(point, layer):
"""
Get the band 1 value at point in a grid layer.
"""
val, ok = layer.dataProvider().sample(point, 1)
if not ok:
return layer.dataProvider().sourceNoDataValue(1)
else:
return val
def isUpstreamSubbasin(self, subbasin):
"""Return true if a subbasin is upstream from an inlet."""
return subbasin in self.upstreamFromInlets
def pointToLatLong(self, point):
"""Convert a QgsPointXY to latlong coordinates and return it."""
geom = QgsGeometry.fromPointXY(point)
geom.transform(self.transformToLatLong)
return geom.asPoint()
def getIndex(self, layer, name, ignoreMissing=False):
"""Get the index of a shapefile layer attribute name,
reporting error if not found, unless ignoreMissing is true.
"""
# field names are truncated to 10 characters when created, so only search for up to 10 characters
# also allow any case, since using lookupField rather than indexOf
index = layer.fields().lookupField(name[:10])
if not ignoreMissing and index < 0:
QSWATUtils.error('Cannot find field {0} in {1}'.format(name, QSWATUtils.layerFileInfo(layer).filePath()), self.isBatch)
return index
def getProviderIndex(self, provider, name, ignoreMissing=False):
"""Get the index of a shapefile provider attribute name,
reporting error if not found, unless ignoreMissing is true.
"""
# field names are truncated to 10 characters when created, so only search for up to 10 characters
index = provider.fieldNameIndex(name[:10])
if not ignoreMissing and index < 0:
QSWATUtils.error('Cannot find field {0} in provider'.format(name), self.isBatch)
return index
def makePointInLine(self, reach, percent):
"""Return a point percent along line from outlet end to next point."""
if self.outletAtStart:
line = QSWATTopology.reachFirstLine(reach.geometry(), self.xThreshold, self.yThreshold)
pt1 = line[0]
pt2 = line[1]
else:
line = QSWATTopology.reachLastLine(reach.geometry(), self.xThreshold, self.yThreshold)
length = len(line)
pt1 = line[length-1]
pt2 = line[length-2]
x = (pt1.x() * (100 - percent) + pt2.x() * percent) / 100.0
y = (pt1.y() * (100 - percent) + pt2.y() * percent) / 100.0
return QgsPointXY(x, y)
def hasOutletAtStart(self, streamLayer, ad8Layer):
"""Returns true iff streamLayer lines have their outlet points at their start points.
If ad8Layer is not None, we are not in an existing watershed, and can rely on accumulations.
Accumulation will be higher at the outlet end.
Finds shapes with a downstream connections, and
determines the orientation by seeing how such a shape is connected to the downstream shape.
If they don't seem to be connected (as my happen after merging subbasins)
tries other shapes with downstream connections, up to 10.
A line is connected to another if their ends are less than dx and dy apart horizontally and vertically.
Assumes the orientation found for this shape can be used generally for the layer.
"""
streamIndex = self.getIndex(streamLayer, QSWATTopology._LINKNO, ignoreMissing=False)
if streamIndex < 0:
QSWATUtils.error('No LINKNO field in stream layer', self.isBatch)
return True # default as true for TauDEM
dsStreamIndex = self.getIndex(streamLayer, QSWATTopology._DSLINKNO, ignoreMissing=False)
if dsStreamIndex < 0:
QSWATUtils.error('No DSLINKNO field in stream layer', self.isBatch)
return True # default as true for TauDEM
if ad8Layer is not None: # only set to non-None if not an existing watershed
# use accumulation difference at ends of reach (or line in reach) to decide
for reach in streamLayer.getFeatures():
geometry = reach.geometry()
if geometry.isMultipart():
lines = geometry.asMultiPolyline()
else:
lines = [geometry.asPolyline()]
for line in lines:
if len(line) > 1: # make sure we haven't picked on an empty line
p1 = line[0]
p2 = line[-1]
acc1 = QSWATTopology.valueAtPoint(p1, ad8Layer)
acc2 = QSWATTopology.valueAtPoint(p2, ad8Layer)
if acc1 != acc2: # degenerate single point line
return acc1 > acc2
# find candidates: links with a down connection
candidates = [] # reach, downReach pairs
for reach in streamLayer.getFeatures():
downLink = reach[dsStreamIndex]
if downLink >= 0:
# find the down reach
downReach = QSWATUtils.getFeatureByValue(streamLayer, streamIndex, downLink)
if downReach is not None:
candidates.append((reach, downReach))
if len(candidates) < 10:
continue
else:
break
else:
QSWATUtils.error('Cannot find link {0!s} in {1}'.format(downLink, QSWATUtils.layerFileInfo(streamLayer).filePath()), self.isBatch)
return True
if candidates == []:
QSWATUtils.error('Cannot find link with a downstream link in {0}. Do you only have one stream?'.format(QSWATUtils.layerFileInfo(streamLayer).filePath()), self.isBatch)
return True
for (upReach, downReach) in candidates:
downGeom = downReach.geometry()
downStart = QSWATTopology.reachFirstLine(downGeom, self.xThreshold, self.yThreshold)
if downStart is None:
continue
downFinish = QSWATTopology.reachLastLine(downGeom, self.xThreshold, self.yThreshold)
if downFinish is None:
continue
upGeom = upReach.geometry()
upStart = QSWATTopology.reachFirstLine(upGeom, self.xThreshold, self.yThreshold)
if upStart is None:
continue
upFinish = QSWATTopology.reachLastLine(upGeom, self.xThreshold, self.yThreshold)
if upFinish is None:
continue
if QSWATTopology.pointOnLine(upStart[0], downFinish, self.xThreshold, self.yThreshold):
return True
if QSWATTopology.pointOnLine(upFinish[-1], downStart, self.xThreshold, self.yThreshold):
return False
QSWATUtils.error('Cannot find physically connected reaches in streams shapefile {0}. Try increasing nearness threshold'.format(QSWATUtils.layerFileInfo(streamLayer).filePath()), self.isBatch)
return True
def saveOutletsAndSources(self, channelLayer, outletLayer, useGridModel):
"""Write outlets, downSubbasins, and (unless useGridModel)
inlets, upstreamFromInlets, and outletChannels tables."""
# in case called twice
self.pointId = 0
self.waterBodyId = 0
self.outlets.clear()
self.inlets.clear()
self.chPointSources.clear()
self.upstreamFromInlets.clear()
self.downSubbasins.clear()
self.chBasinToSubbasin.clear()
chLinkToSubbasin = dict()
downChannels = dict()
chInlets = dict()
chOutlets = dict()
chLinkIndex = self.getIndex(channelLayer, QSWATTopology._LINKNO)
dsChLinkIndex = self.getIndex(channelLayer, QSWATTopology._DSLINKNO)
wsnoIndex = self.getIndex(channelLayer, QSWATTopology._WSNO, ignoreMissing=not useGridModel)
if chLinkIndex < 0 or dsChLinkIndex < 0:
return False
# ignoreMissing for subbasinIndex necessary when useGridModel, since channelLayer is then a streams layer
subbasinIndex = self.getIndex(channelLayer, QSWATTopology._BASINNO, ignoreMissing=useGridModel)
if useGridModel:
if wsnoIndex < 0:
return False
else:
if subbasinIndex < 0:
return False
dsNodeIndex = self.getIndex(channelLayer, QSWATTopology._DSNODEID, ignoreMissing=True)
if outletLayer is not None:
idIndex = self.getIndex(outletLayer, QSWATTopology._ID, ignoreMissing=False)
inletIndex = self.getIndex(outletLayer, QSWATTopology._INLET, ignoreMissing=False)
srcIndex = self.getIndex(outletLayer, QSWATTopology._PTSOURCE, ignoreMissing=False)
resIndex = self.getIndex(outletLayer, QSWATTopology._RES, ignoreMissing=False)
# set pointId to max id value in outletLayer
# and waterBodyId to max reservoir or pond id
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry)
for point in outletLayer.getFeatures(request):
self.pointId = max(self.pointId, point[idIndex])
if point[inletIndex] == 0 and point[resIndex] > 0:
self.waterBodyId = max(self.waterBodyId, point[idIndex])
else:
dsNodeIndex = -1
for reach in channelLayer.getFeatures():
chLink = reach[chLinkIndex]
dsChLink = reach[dsChLinkIndex]
chBasin = reach[wsnoIndex]
geom = reach.geometry()
# for grids, channel basins and subbasins are the same
subbasin = chBasin if useGridModel else reach[subbasinIndex]
chLinkToSubbasin[chLink] = subbasin
if not useGridModel:
self.chBasinToSubbasin[chBasin] = subbasin
downChannels[chLink] = dsChLink
dsNode = reach[dsNodeIndex] if dsNodeIndex >= 0 else -1
if dsNode >= 0 and idIndex >= 0 and inletIndex >= 0 and srcIndex >= 0 and resIndex >= 0:
outletPoint = None
inletPoint = None
for f in outletLayer.getFeatures():
if f[idIndex] == dsNode:
if f[inletIndex] == 0:
if f[resIndex] == 0:
outletPoint = f
break
elif f[srcIndex] == 0:
inletPoint = f
break
if outletPoint is not None:
pt = outletPoint.geometry().asPoint()
chOutlets[chLink] = (self.nonzeroPointId(dsNode), pt)
elif inletPoint is not None:
pt = inletPoint.geometry().asPoint()
chInlets[chLink] = (self.nonzeroPointId(dsNode), pt)
first = QSWATTopology.reachFirstLine(geom, self.xThreshold, self.yThreshold)
if first is None or len(first) < 2:
QSWATUtils.error('It looks like your channels shapefile does not obey the single direction rule, that all channels are either upstream or downstream.', self.isBatch)
return False
last = QSWATTopology.reachLastLine(geom, self.xThreshold, self.yThreshold)
if last is None or len(last) < 2:
QSWATUtils.error('It looks like your channels shapefile does not obey the single direction rule, that all channels are either upstream or downstream.', self.isBatch)
return False
outId, pt = chOutlets.get(chLink, (-1, None))
if pt is None:
self.pointId += 1
outId = self.pointId
self.pointId += 1
srcId = self.pointId
if self.outletAtStart:
if not useGridModel and pt is not None and not QSWATTopology.coincidentPoints(first[0], pt, self.xThreshold, self.yThreshold):
QSWATUtils.error('Outlet point {0} at ({1}, {2}) not coincident with start of channel link {3}'
.format(outId, pt.x(), pt.y(), chLink), self.isBatch)
chOutlets[chLink] = (outId, first[0])
self.chPointSources[chLink] = (srcId, last[-1])
else:
if not useGridModel and pt is not None and not QSWATTopology.coincidentPoints(last[-1], pt, self.xThreshold, self.yThreshold):
QSWATUtils.error('Outlet point {0} at ({1}, {2}) not coincident with end of channel link {3}'
.format(outId, pt.x(), pt.y(), chLink), self.isBatch)
chOutlets[chLink] = (outId, last[-1])
self.chPointSources[chLink] = (srcId, first[0])
# now find the channels which are on subbasin boundaries,
# i.e. their downstream channels are in different basins
hasInlet = False
for chLink, dsChLink in downChannels.items():
subbasin = chLinkToSubbasin[chLink]
if subbasin == QSWATTopology._NOBASIN: # from a zero-length channel
continue
dsSubbasin = chLinkToSubbasin[dsChLink] if dsChLink >= 0 else -1
while dsSubbasin == QSWATTopology._NOBASIN:
# skip past zero-length channels
dsChLink = downChannels.get(dsChLink, -1)
dsSubbasin = chLinkToSubbasin.get(dsChLink, -1)
if subbasin != dsSubbasin:
self.downSubbasins[subbasin] = dsSubbasin
# collect the basin's outlet location:
outletId, outletPt = chOutlets[chLink]
self.outlets[subbasin] = (outletId, outletPt, chLink)
if not useGridModel:
# self.extraResPoints[subbasin] = chResPoints[chLink]
# self.extraPtSrcPoints[subbasin] = chSources[chLink]
inletId, inletPt = chInlets.get(chLink, (-1, None))
if inletPt is not None and dsSubbasin >= 0:
# inlets are associated with downstream basin
self.inlets[dsSubbasin] = (inletId, inletPt)
hasInlet = True
# collect subbasins upstream from inlets
# this looks inefficient, repeatedly going through all basins, but probably few projects have inlets:
if not useGridModel and hasInlet:
for subbasin in self.inlets.keys():
self.addUpstreamSubbasins(subbasin)
return True
def nonzeroPointId(self, dsNode):
"""Return dsNode, or next pointId if dsNode is zero. Used to prevent a zero point id."""
if dsNode == 0:
self.pointId += 1
return self.pointId
return dsNode
def addUpstreamSubbasins(self, start):
"""Add basins upstream from start to upstreamFromInlets."""
for subbasin, downSubbasin in self.downSubbasins.items():
if downSubbasin == start:
self.upstreamFromInlets.add(subbasin)
self.addUpstreamSubbasins(subbasin)
def surroundingLake(self, SWATChannel, useGridModel):
"""Return id of lake containing channel, if any, else -1."""
chLink = self.SWATChannelToChannel[SWATChannel]
lake1 = self.chLinkInsideLake.get(chLink, -1)
if useGridModel and lake1 < 0:
return self.chLinkFromLake.get(chLink, -1)
else:
return lake1
@staticmethod
def maskFun(val, valNoData, mask, maskNoData, resNoData):
"""Result is val unless mask is nodata."""
if val == valNoData or mask == maskNoData:
return resNoData
else:
return val
@staticmethod
def reachFirstLine(geometry, xThreshold, yThreshold):
"""Returns the line of a single polyline,
or a line in a multipolyline whose first point is not adjacent to a point
of another line in the multipolyline.
"""
if not geometry.isMultipart():
return geometry.asPolyline()
mpl = geometry.asMultiPolyline()
numLines = len(mpl)
for i in range(numLines):
linei = mpl[i]
connected = False
if linei is None or len(linei) == 0:
continue
else:
start = linei[0]
for j in range(numLines):
if i != j:
linej = mpl[j]
if QSWATTopology.pointOnLine(start, linej, xThreshold, yThreshold):
connected = True
break
if not connected:
return linei
# should not get here
return None
@staticmethod
def reachLastLine(geometry, xThreshold, yThreshold):
"""Returns the line of a single polyline,
or a line in a multipolyline whose last point is not adjacent to a point
of another line in the multipolyline.
"""
if not geometry.isMultipart():
return geometry.asPolyline()
mpl = geometry.asMultiPolyline()
numLines = len(mpl)
for i in range(numLines):
linei = mpl[i]
connected = False
if linei is None or len(linei) == 0:
continue
else:
finish = linei[-1]
for j in range(numLines):
if i != j:
linej = mpl[j]
if QSWATTopology.pointOnLine(finish, linej, xThreshold, yThreshold):
connected = True
break
if not connected:
return linei
# should not get here
return None
@staticmethod
def pointOnLine(point, line, xThreshold, yThreshold):
"""Return true if point is coincident with a point on the line.
Note this only checks if the point is close to a vertex."""
if line is None or len(line) == 0:
return False
for pt in line:
if QSWATTopology.coincidentPoints(point, pt, xThreshold, yThreshold):
return True
return False
@staticmethod
def coincidentPoints(pt1, pt2, xThreshold, yThreshold):
"""Return true if points are within xThreshold and yThreshold
horizontally and vertically."""
return abs(pt1.x() - pt2.x()) < xThreshold and \
abs(pt1.y() - pt2.y()) < yThreshold
@staticmethod
def colToX(col, transform):
"""Convert column number to X-coordinate."""
return (col + 0.5) * transform[1] + transform[0]
@staticmethod
def rowToY(row, transform):
"""Convert row number to Y-coordinate."""
return (row + 0.5) * transform[5] + transform[3]
#=========currently not used==================================================================
# @staticmethod
# def xToCol(x, transform):
# """Convert X-coordinate to column number."""
# return int((x - transform[0]) / transform[1])
#===========================================================================
#=========currently not used==================================================================
# @staticmethod
# def yToRow(y, transform):
# """Convert Y-coordinate to row number."""
# return int((y - transform[3]) / transform[5])
#===========================================================================
@staticmethod
def cellToProj(col, row, transform):
"""Convert column and row numbers to (X,Y)-coordinates."""
x = (col + 0.5) * transform[1] + transform[0]
y = (row + 0.5) * transform[5] + transform[3]
return (x,y)
@staticmethod
def projToCell(x, y, transform):
"""Convert (X,Y)-coordinates to column and row numbers."""
col = int((x - transform[0]) / transform[1])
row = int((y - transform[3]) / transform[5])
return (col, row)
#==========not currently used=================================================================
# @staticmethod
# def haveSameCoords(band1, transform1, transform2):
# """
# Return true if raster transform1 and transform2 are the same or sufficiently
# close for row/col coordinates of first to be used without reprojection
# as row/col coordinates of the second.
#
# Assumes second raster has sufficient extent.
# We could demand this, but in practice often read rasters well within their extents,
# because only looking at points within a watershed.
# """
# # may work, though may also fail - we are comparing float values
# if transform1 == transform2:
# return True
# # accept the origins as the same if they are within a tenth of the cell size
# # otherwise return false
# if (abs(transform1[0] - transform2[0]) > transform2[1] * 0.1 or \
# abs(transform1[3] - transform2[3]) > abs(transform2[5]) * 0.1):
# return False
# # then check if the vertical/horizontal difference in cell size times the number of rows/columns
# # in the first is less than half the depth/width of a cell in the second
# return abs(transform1[1] - transform2[1]) * band1.XSize < transform2[1] * 0.5 and \
# abs(transform1[5] - transform2[5]) * band1.YSize < abs(transform2[5]) * 0.5
#===========================================================================
@staticmethod
def translateCoords(transform1, transform2, numRows1, numCols1):
"""
Return a pair of functions:
row, latitude -> row and column, longitude -> column
for transforming positions in raster1 to row and column of raster2.
The functions are:
identities on the first argument if the rasters have (sufficiently)
the same origins and cell sizes;
a simple shift on the first argument if the rasters have
the same cell sizes but different origins;
otherwise a full transformation on the second argument.
It is assumed that the first and second arguments are consistent,
ie they identify the same cell in raster1.
"""
# may work, thuough we are comparing real values
if transform1 == transform2:
return (lambda row, _: row), (lambda col, _: col)
xOrigin1, xSize1, _, yOrigin1, _, ySize1 = transform1
xOrigin2, xSize2, _, yOrigin2, _, ySize2 = transform2
# accept the origins as the same if they are within a tenth of the cell size
sameXOrigin = abs(xOrigin1 - xOrigin2) < xSize2 * 0.1
sameYOrigin = abs(yOrigin1 - yOrigin2) < abs(ySize2) * 0.1
# accept cell sizes as equivalent if vertical/horizontal difference
# in cell size times the number of rows/columns
# in the first is less than half the depth/width of a cell in the second
sameXSize = abs(xSize1 - xSize2) * numCols1 < xSize2 * 0.5
sameYSize = abs(ySize1 - ySize2) * numRows1 < abs(ySize2) * 0.5
if sameXSize:
if sameXOrigin:
xFun = (lambda col, _: col)
else:
# just needs origin shift
# note that int truncates, i.e. rounds towards zero
if xOrigin1 > xOrigin2:
colShift = int((xOrigin1 - xOrigin2) / xSize1 + 0.5)
xFun = lambda col, _: col + colShift
else:
colShift = int((xOrigin2 - xOrigin1) / xSize1 + 0.5)
xFun = lambda col, _: col - colShift
else:
# full transformation
xFun = lambda _, x: int((x - xOrigin2) / xSize2)
if sameYSize:
if sameYOrigin:
yFun = (lambda row, _: row)
else:
# just needs origin shift
# note that int truncates, i.e. rounds towards zero, and y size will be negative
if yOrigin1 > yOrigin2:
rowShift = int((yOrigin2 - yOrigin1) / ySize1 + 0.5)
yFun = lambda row, _: row - rowShift
else:
rowShift = int((yOrigin1 - yOrigin2) / ySize1 + 0.5)
yFun = lambda row, _: row + rowShift
else:
# full transformation
yFun = lambda _, y: int((y - yOrigin2) / ySize2)
# note row, column order of return (same as order of reading rasters)
return yFun, xFun
@staticmethod
def sameTransform(transform1, transform2, numRows1, numCols1):
"""Return true if transforms are sufficiently close to be regarded as the same,
i.e. row and column numbers for the first can be used without transformation to read the second.
Avoids relying on equality between real numbers."""
# may work, thuough we are comparing real values
if transform1 == transform2:
return True
xOrigin1, xSize1, _, yOrigin1, _, ySize1 = transform1
xOrigin2, xSize2, _, yOrigin2, _, ySize2 = transform2
# accept the origins as the same if they are within a tenth of the cell size
sameXOrigin = abs(xOrigin1 - xOrigin2) < xSize2 * 0.1
if sameXOrigin:
sameYOrigin = abs(yOrigin1 - yOrigin2) < abs(ySize2) * 0.1
if sameYOrigin:
# accept cell sizes as equivalent if vertical/horizontal difference
# in cell size times the number of rows/columns
# in the first is less than half the depth/width of a cell in the second
sameXSize = abs(xSize1 - xSize2) * numCols1 < xSize2 * 0.5
if sameXSize:
sameYSize = abs(ySize1 - ySize2) * numRows1 < abs(ySize2) * 0.5
return sameYSize
return False
def splitReachByLake(self, lakeGeom, reachGeom, reachData):
"""lakeGeom is a polygon representing a lake. reach is known to intersect wil the lake..
Returns a pair of inflowing and outflowing reaches, either or both of which may be None."""
sourcePt = QgsPointXY(reachData.upperX, reachData.upperY)
sourceToLake = QSWATTopology.toIntersection(reachGeom, lakeGeom, sourcePt, not self.outletAtStart, self.xThreshold, self.yThreshold)
outletPt = QgsPointXY(reachData.lowerX, reachData.lowerY)
outletToLake = QSWATTopology.toIntersection(reachGeom, lakeGeom, outletPt, self.outletAtStart, self.xThreshold, self.yThreshold)
return sourceToLake, outletToLake
@staticmethod
def toIntersection(reachGeom, lakeGeom, start, isUp, xThreshold, yThreshold):
"""Return geometry for sequence of points from start to one before first one that intersects with lakeGeom,
or None if this is empty or a singleton, or if start is within the lake.
If isUp the search is from index 0 if the of the reach, else it is from the last index."""
if lakeGeom.contains(start):
return None
if reachGeom.isMultipart():
mpl = reachGeom.asMultiPolyline()
else:
mpl = [reachGeom.asPolyline()]
result = []
done = set()
while True:
progress = False
for i in range(len(mpl)):
if i not in done:
line = mpl[i]
if len(line) <= 1:
continue
if isUp:
if QSWATTopology.coincidentPoints(start, line[0], xThreshold, yThreshold):
for pt in line:
if lakeGeom.contains(pt):
length = len(result)
if length < 1:
return None
elif length == 1:
# create zero-length line at result[0]
return QgsGeometry.fromPolylineXY([result[0], result[0]])
return QgsGeometry.fromPolylineXY(result)
result.append(pt)
start = line[-1]
done.add(i)
progress = True
else:
if QSWATTopology.coincidentPoints(start, line[-1], xThreshold, yThreshold):
for pt in reversed(line):
if lakeGeom.contains(pt):
length = len(result)
if length < 1:
return None
elif length == 1:
# create zero-length line at result[0]
return QgsGeometry.fromPolylineXY([result[0], result[0]])
return QgsGeometry.fromPolylineXY(result)
result.insert(0, pt)
start = line[0]
done.add(i)
progress = True
if not progress:
raise Exception('Looping trying to calculate reach')
# @staticmethod
# def splitReach(resGeom, reachGeom, source, outlet, outletAtStart, xThreshold, yThreshold):
# """Split reachGeom into two parts, one from source to reservoir and one from reservoir to outlet.
#
# Assumes the reach has been split into at least two disjoint parts, one flowing from source, the other flowing to outlet.
# Algorithm checks each line in reach geometry, moving up from source or down from outlet until reservoir is reached
# in both cases."""
# sourcePart = []
# outletPart = []
# mpl = reachGeom.asMultiPolyline()
# done = set()
# outletToLakeDone = False
# sourceToLakeDone = False
# while True:
# reduced = False
# for i in xrange(len(mpl)):
# if i not in done:
# line = mpl[i]
# start = line[0]
# finish = line[-1]
# if outletAtStart:
# if not outletToLakeDone and QSWATTopology.coincidentPoints(outlet, start, xThreshold, yThreshold):
# newLine = []
# for pt in line:
# newLine.append(pt)
# if resGeom.intersects(QgsGeometry.fromPointXY(pt)):
# outletToLakeDone = True
# break
# outletPart.append(newLine)
# outlet = finish
# reduced = True
# done.add(i)
# elif not sourceToLakeDone and QSWATTopology.coincidentPoints(source, finish, xThreshold, yThreshold):
# newLine = []
# for pt in reversed(line):
# newLine.insert(0, pt)
# if resGeom.intersects(QgsGeometry.fromPointXY(pt)):
# sourceToLakeDone = True
# break
# sourcePart.append(newLine)
# source = start
# done.add(i)
# reduced = True
# else:
# if not outletToLakeDone and QSWATTopology.coincidentPoints(outlet, finish, xThreshold, yThreshold):
# newLine = []
# for pt in reversed(line):
# newLine.insert(0, pt)
# if resGeom.intersects(QgsGeometry.fromPointXY(pt)):
# outletToLakeDone = True
# break
# outletPart.append(line)
# outlet = start
# done.add(i)
# reduced = True
# elif QSWATTopology.coincidentPoints(source, start, xThreshold, yThreshold):
# newLine = []
# for pt in line:
# newLine.append(pt)
# if resGeom.intersects(QgsGeometry.fromPointXY(pt)):
# sourceToLakeDone = True
# break
# sourcePart.append(line)
# source = finish
# done.add(i)
# reduced = True
# if outletToLakeDone and sourceToLakeDone:
# break
# if not reduced:
# raise Exception('Looping trying to split reach')
# sourceGeom = QgsGeometry.fromPolyline(sourcePart[0]) if len(sourcePart) == 1 else QgsGeometry.fromMultiPolyline(sourcePart)
# outletGeom = QgsGeometry.fromPolyline(outletPart[0]) if len(outletPart) == 1 else QgsGeometry.fromMultiPolyline(outletPart)
# return sourceGeom, outletGeom
@staticmethod
def movePointToPerimeter(pt, lakeGeom, pFile, maxSteps):
"""Point pt is contained in lake. Move it downstream at most maxSteps
using D8 flow direction raster pFile until it is not inside the lake,
returning new point and true.
Return original point and false if failed to find perimeter."""
pLayer = QgsRasterLayer(pFile, 'FlowDir')
ds = gdal.Open(pFile, gdal.GA_ReadOnly)
pNodata = ds.GetRasterBand(1).GetNoDataValue()
transform = ds.GetGeoTransform()
stepCount = 0
pt1 = pt
while stepCount < maxSteps:
if not lakeGeom.contains(pt1):
return pt1, True
dir1 = QSWATTopology.valueAtPoint(pt1, pLayer)
if dir1 is None or dir1 == pNodata:
QSWATUtils.loginfo('Failed to reach lake perimeter: no flow direction available.')
return pt, False
# dir1 is read as a float. Also subtract 1 to get range 0..7
dir0 = int(dir1) - 1
col, row = QSWATTopology.projToCell(pt1.x(), pt1.y(), transform)
col1, row1 = col + QSWATUtils._dX[dir0], row + QSWATUtils._dY[dir0]
x1, y1 = QSWATTopology.cellToProj(col1, row1, transform)
pt1 = QgsPointXY(x1, y1)
stepCount += 1
QSWATUtils.loginfo('Failed to reach lake perimeter in {0} steps.'.format(maxSteps))
return pt, False
``` |
{
"source": "JPHutchins/gatter",
"score": 2
} |
#### File: gatterserver/routers/ble.py
```python
import logging
import sys
from fastapi import APIRouter, WebSocket
from fastapi.encoders import jsonable_encoder
from gatterserver import models
from gatterserver.ble.discovery import BLEDiscoveryManager
LOGGER = logging.getLogger(__name__)
router = APIRouter()
discovery_manager: BLEDiscoveryManager = None
def register(discovery_manager: BLEDiscoveryManager):
sys.modules[__name__].__dict__["discovery_manager"] = discovery_manager
@router.websocket("/api/ws/blediscovery")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
async for device in discovery_manager.receive():
discovery_message = models.BLEDiscoveryMessage(
address=device.address,
name=device.name,
rssi=device.rssi,
rssiAverage=discovery_manager.get_average_rssi(device.address),
services=device.metadata["uuids"],
manufacturerData=device.metadata["manufacturer_data"],
)
await websocket.send_json(jsonable_encoder(discovery_message))
@router.post("/api/ble/discovery")
async def ble_discovery_endpoint(command: models.DiscoveryCommand):
if command.discovery:
await discovery_manager.start_discovery()
else:
await discovery_manager.stop_discovery()
return command
```
#### File: gatterserver/streams/__init__.py
```python
import asyncio
import logging
import struct
from collections import deque
from typing import Awaitable, Callable
from pydantic import BaseModel
from gatterserver import models
LOGGER = logging.getLogger(__name__)
class Stream(BaseModel):
start: Callable[[Awaitable], asyncio.Task]
task_handle: asyncio.Task = None
send: Awaitable = None
class Config:
arbitrary_types_allowed = True
class StreamPacket:
"""A packet of bytes associated with a particular stream."""
def __init__(self, stream_id: models.StreamId, raw_data: bytes):
self._stream_id = stream_id
self._raw_data = raw_data
self._raw_data_length = len(raw_data)
self._byte_array = (
struct.pack(
"BBH", stream_id.deviceId, stream_id.channelId, self._raw_data_length
)
+ self._raw_data
)
@property
def byte_array(self) -> bytes:
"""The bytes | device_id u8 | channel_id u8 | length u16 | data[0] u8 | data[length-1] u8 |"""
return self._byte_array
@property
def stream_id(self) -> models.StreamId:
"""The stream ID."""
return self._stream_id
class StreamManager:
"""Manage streams of data."""
def __init__(self):
self._lock = asyncio.Lock()
self._pending_data = deque([])
self._semaphore = asyncio.Semaphore(0)
self._streams = {}
async def add_stream(self, stream_id: models.StreamId) -> Awaitable:
"""Register a stream and return an awaitable used to queue packets."""
async with self._lock:
if stream_id in self._streams:
raise Exception(f"{stream_id} already added!")
self._streams[stream_id] = stream_id
LOGGER.info(f"{stream_id} added.")
def _send_wrapper(stream_id: models.StreamId) -> Awaitable:
async def _send(data: bytes):
async with self._lock:
try:
self._pending_data.append(StreamPacket(stream_id, data))
self._semaphore.release()
except Exception as e:
LOGGER.critical(e, exc_info=True)
return _send
return _send_wrapper(stream_id)
async def remove_stream(self, stream_id: models.StreamId):
"""Remove a stream."""
async with self._lock:
if stream_id not in self._streams:
LOGGER.warning(
f"{stream_id} cannot be removed because it does not exist."
)
return
del self._streams[stream_id]
async def receive(self) -> StreamPacket:
while True:
await self._semaphore.acquire()
async with self._lock:
yield self._pending_data.popleft()
```
#### File: backend/tests/test_emitters.py
```python
import asyncio
import pytest
from gatterserver import models
from gatterserver.emitters.emitter import Emitter
from gatterserver.emitters.emittermanager import EmitterManager
from gatterserver.emitters.signalgen import Ramp
from gatterserver.streams import StreamPacket
def test_emitter_base_class():
e = Emitter(0)
assert e
assert e.device_id == 0
@pytest.mark.asyncio
async def test_emitter_manager_device_registration():
em = EmitterManager()
assert em
assert len(em._emitters) == 0
assert len(em._available_device_id_stack) == 255
d = await em.register_device(Emitter)
assert d == 0
assert await em.is_registered(d)
assert type(em._emitters[d]) == Emitter
assert em._emitters[d].streams == [] # base class has no streams
assert len(em._emitters) == 1
assert len(em._available_device_id_stack) == 254
for i in range(1, 20):
d = await em.register_device(Emitter)
assert d == i
assert await em.is_registered(i)
assert len(em._emitters) == i + 1
assert len(em._available_device_id_stack) == 254 - i
await em.unregister(15)
assert not await em.is_registered(15)
await em.unregister(3)
assert not await em.is_registered(3)
await em.unregister(12)
assert not await em.is_registered(12)
assert len(em._emitters) == 17
assert await em.register_device(Emitter) == 12
assert await em.register_device(Emitter) == 3
assert await em.register_device(Emitter) == 15
assert await em.register_device(Emitter) == 20
@pytest.mark.asyncio
async def test_emitter_manager_starts_stops_stream():
em = EmitterManager()
assert em
# Register and configure a ramp
d = await em.register_device(Ramp)
assert d == 0
assert type(em._emitters[d]) == Ramp
assert len(em._emitters[d].streams) == 1
r: Ramp = em._emitters[d]
s = em._emitters[d].get_stream(0)
assert s.task_handle == None
assert s.send == None
assert s.start != None
TOP_OF_RAMP = 10
r.configure(0, TOP_OF_RAMP, 1, 0.001)
assert r.device_id == 0
assert r._max == 10
assert r._step_interval_s == 0.001
assert r._step == 1
assert r._min == 0
# Start a stream via the EmitterManager
r_stream_id = models.StreamId(deviceId=0, channelId=0)
sm = em.stream_manager
await em.start_stream(r_stream_id)
assert type(s.task_handle) == asyncio.Task
assert s.send != None
async def test_ramp():
i = -1
a = 0
periods = 0
while periods < 2:
i: StreamPacket = await sm.receive().__anext__()
i = int.from_bytes(i.byte_array[4:], "little")
assert i == a
a += 1
if i == TOP_OF_RAMP:
periods += 1
a = 0
await test_ramp()
await em.stop_stream(r_stream_id)
assert s.task_handle == None
assert s.send != None # Since a stream was started once, the send cb is valid
assert s.start != None
# Test resuming stopped stream
r._val = r._min # ramp would otherwise resume where it left off
await em.start_stream(r_stream_id)
await test_ramp()
await em.stop_stream(r_stream_id)
assert s.task_handle == None
assert s.send != None
assert s.start != None
```
#### File: backend/tests/test_streams.py
```python
import asyncio
import pytest
from pydantic import ValidationError
from gatterserver import models
from gatterserver.streams import Stream, StreamManager, StreamPacket
def test_stream_type():
with pytest.raises(ValidationError):
s = Stream()
with pytest.raises(ValidationError):
s = Stream(start="hello fellow callback functions")
s = Stream(start=lambda x: x)
assert s
assert s.start != None
assert s.task_handle == None
assert s.send == None
def test_stream_id_type():
s = models.StreamId(deviceId=4, channelId=8)
assert s
assert s.deviceId == 4
assert s.channelId == 8
with pytest.raises(ValueError):
s = models.StreamId(deviceId=4)
with pytest.raises(ValueError):
s = models.StreamId(channelId=4)
with pytest.raises(ValueError):
s = models.StreamId(deviceId=-1)
with pytest.raises(ValueError):
s = models.StreamId(channelId=256)
with pytest.raises(ValueError):
s = models.StreamId()
s = models.StreamId(deviceId=5, channelId=12)
assert s.__hash__() == 1292
assert s.__hash__() == (5 << 8) | 12
def test_stream_packet_type():
s = models.StreamId(deviceId=4, channelId=8)
d = b"\x01\x02\x03\x04"
p = StreamPacket(s, d)
assert p
assert p.stream_id.deviceId == 4
assert p._raw_data == b"\x01\x02\x03\x04"
assert p._raw_data_length == 4
assert p._byte_array == b"\x04\x08\x04\x00\x01\x02\x03\x04"
assert p.byte_array == b"\x04\x08\x04\x00\x01\x02\x03\x04"
d = bytearray([i for i in range(0xFF)])
p = StreamPacket(s, d)
assert p
assert p._raw_data_length == 0xFF
assert int.from_bytes(p.byte_array[2:4], "little") == 0xFF
assert p.byte_array[4:] == bytearray([i for i in range(0xFF)])
def test_stream_manager_constructor():
sm = StreamManager()
assert sm
assert sm._lock.locked() == False
assert not sm._semaphore.locked() == False
assert len(sm._pending_data) == 0
assert len(sm._streams) == 0
@pytest.mark.asyncio
async def test_stream_manager_adds_streams():
s0 = models.StreamId(deviceId=0, channelId=0)
s1 = models.StreamId(deviceId=1, channelId=0)
sm = StreamManager()
f0 = await sm.add_stream(s0)
f1 = await sm.add_stream(s1)
assert s0 in sm._streams
assert s1 in sm._streams
assert f0 != f1
@pytest.mark.asyncio
async def test_stream_manager_removes_streams():
s0 = models.StreamId(deviceId=0, channelId=0)
s1 = models.StreamId(deviceId=1, channelId=0)
sm = StreamManager()
f0 = await sm.add_stream(s0)
f1 = await sm.add_stream(s1)
assert s0 in sm._streams
assert s1 in sm._streams
assert f0 != f1
await sm.remove_stream(s0)
assert s0 not in sm._streams
assert s1 in sm._streams
await sm.remove_stream(s1)
assert s0 not in sm._streams
assert s1 not in sm._streams
# Shouldn't raise exception
await sm.remove_stream(s0)
await sm.remove_stream(s1)
@pytest.mark.asyncio
async def test_stream_manager_callbacks_set_flag_and_queue_packets():
s0 = models.StreamId(deviceId=0, channelId=0)
s1 = models.StreamId(deviceId=1, channelId=0)
sm = StreamManager()
f0 = await sm.add_stream(s0)
f1 = await sm.add_stream(s1)
assert sm._semaphore.locked()
await f0(b"\x00")
assert not sm._semaphore.locked()
assert len(sm._pending_data) == 1
assert sm._pending_data[0].byte_array == b"\x00\x00\x01\x00\x00"
await f1(b"\x01")
assert not sm._semaphore.locked()
assert len(sm._pending_data) == 2
assert sm._pending_data[0].byte_array == b"\x00\x00\x01\x00\x00"
assert sm._pending_data[1].byte_array == b"\x01\x00\x01\x00\x01"
@pytest.mark.asyncio
async def test_stream_manager_receive_method():
s0 = models.StreamId(deviceId=0, channelId=0)
sm = StreamManager()
f0 = await sm.add_stream(s0)
assert sm._semaphore.locked()
await f0(b"\x00")
assert not sm._semaphore.locked()
assert len(sm._pending_data) == 1
assert sm._pending_data[0].byte_array == b"\x00\x00\x01\x00\x00"
packet = await sm.receive().__anext__()
assert packet.byte_array == b"\x00\x00\x01\x00\x00"
assert len(sm._pending_data) == 0
assert sm._semaphore.locked()
@pytest.mark.asyncio
async def test_stream_manager_receive_method_many():
s0 = models.StreamId(deviceId=0, channelId=0)
s1 = models.StreamId(deviceId=1, channelId=0)
sm = StreamManager()
f0 = await sm.add_stream(s0)
f1 = await sm.add_stream(s1)
assert sm._semaphore.locked()
# Add 1000 events to the stream queue
await asyncio.gather(
*tuple(
[f0(int.to_bytes(even, 2, "little")) for even in range(0, 1000, 2)]
+ [f1(int.to_bytes(odd, 2, "little")) for odd in range(1, 1000, 2)]
)
)
assert not sm._semaphore.locked()
assert len(sm._pending_data) == 1000
s0_vals = []
s1_vals = []
for _ in range(len(sm._pending_data)):
packet: StreamPacket = await sm.receive().__anext__()
data = packet.byte_array
size = int.from_bytes(data[2:4], "little")
if data[0] == 0:
s0_vals.append(int.from_bytes(data[4 : 4 + size], "little"))
elif data[0] == 1:
s1_vals.append(int.from_bytes(data[4 : 4 + size], "little"))
else:
assert 0
assert s0_vals == [even for even in range(0, 1000, 2)]
assert s1_vals == [odd for odd in range(1, 1000, 2)]
assert sm._semaphore.locked()
assert len(sm._pending_data) == 0
```
#### File: backend/tests/test_test_endpoints.py
```python
from fastapi.testclient import TestClient
from gatterserver.api import app
client = TestClient(app)
def test_hello_world():
response = client.get("/tests/hello_world")
assert response.status_code == 200
assert response.json() == {"msg": "Hello world!"}
def test_reads_bytes():
response = client.get("/tests/reads_bytes")
assert response.status_code == 200
assert response.content == b"\x00\x01\x02\x03"
def test_websocket_json():
with client.websocket_connect("/tests/ws/hello") as websocket:
data = websocket.receive_json()
assert data == {"msg": "Hello WebSocket!"}
def test_websocket_bytes():
with client.websocket_connect("/tests/ws/bytes") as websocket:
data = websocket.receive_bytes()
assert data == b"\x13\x37"
``` |
{
"source": "JPHutchins/pyavreceiver",
"score": 3
} |
#### File: pyavreceiver/denon/response.py
```python
import logging
from pyavreceiver import const
from pyavreceiver.denon.error import DenonCannotParse
from pyavreceiver.denon.parse import parse
from pyavreceiver.response import Message
_LOGGER = logging.getLogger(__name__)
class DenonMessage(Message):
"""Define a Denon telnet message representation."""
def __init__(self, message: str = None, command_dict: dict = None):
"""Init a new Denon message."""
self._message = None # type: str
self._raw_val = None # type: str
self._cmd = None # type: str
self._prm = None # type: str
self._val = None
self._name = None # type: str
self._command_dict = command_dict or {}
self._new_command = None
self._state_update = self._parse(message) if message else {}
def __str__(self):
"""Get user readable message."""
return self._message
def __repr__(self):
"""Get readable message."""
return self._message
def _parse(self, message: str) -> dict:
"""Parse message, assign attributes, return a state update dict."""
self._message = message
self._cmd, self._prm, self._raw_val = self.separate(message)
try:
self._val = self.parse_value(self._cmd, self._prm, self._raw_val)
except DenonCannotParse:
return {}
return self._make_state_update()
def _make_state_update(self) -> dict:
"""Return the state update dict."""
entry = self._command_dict.get(self._cmd) or self._cmd
key = entry
val = self._val if self._val is not None else self._raw_val
try:
if const.COMMAND_NAMES in entry:
try:
_ = int(val)
val_type = "number"
except ValueError:
val_type = val
key = entry[const.COMMAND_NAMES].get(val_type)
if key is None:
key = entry[const.COMMAND_NAMES]["other"]
if const.COMMAND_NAME in entry and const.COMMAND_PARAMS in entry:
cmd_name = entry[const.COMMAND_NAME]
key = f"{cmd_name}_{self._prm.lower()}" if self._prm else cmd_name
entry = entry.get(self._prm)
try:
key = (
entry.get(const.COMMAND_NAME)
or key.get(const.COMMAND_NAME) # key may be string
or f"{self._cmd}{'_' + self._prm if self._prm else ''}"
)
except AttributeError:
pass
_ = entry.get(self._raw_val)
val = _ if _ is not None else self._val or val
entry = entry.get(self._prm)
val = entry.get(self._raw_val) or self._val or val
key = (
self._command_dict[self._cmd][self._prm].get(const.COMMAND_NAME) or key
)
except (KeyError, AttributeError):
pass
self._name = key
return {key: val}
def separate(self, message) -> tuple:
"""Separate command category, parameter, and value."""
return DenonMessage._search(self._command_dict, 0, message, (), self=self)
@staticmethod
def _search(lvl: dict, depth: int, rem: str, cur: tuple, self=None) -> tuple:
"""Search dict for best match."""
# pylint: disable=protected-access
if rem in lvl:
if depth == 1:
return (*cur, None, rem)
elif const.COMMAND_RANGE in lvl and rem.isnumeric():
return (*cur, None, rem)
elif const.COMMAND_PARAMS in lvl:
prm = rem
val = ""
# Check for match at each partition
for _ in range(len(prm)):
if prm in lvl:
return (*cur, prm.strip(), val.strip())
val = prm[-1:] + val
prm = prm[:-1]
# No match found: return new entry, assume val after last space
words = rem.split(" ")
if len(words) < 2:
_LOGGER.debug(
"Added new event with empty value: %s, %s, None", *cur, rem
)
if self:
self._new_command = {"cmd": cur[0], "prm": rem, "val": None}
return (*cur, words[0], None)
prm = " ".join(words[:-1]).strip()
val = words[-1].strip()
_LOGGER.debug("Added new event: %s, %s, %s", *cur, prm, val)
if self:
self._new_command = {"cmd": cur[0], "prm": prm, "val": val}
return (*cur, prm, val)
elif depth == 1:
self._new_command = {"cmd": cur[0], "prm": None, "val": rem.strip()}
return (*cur, None, rem.strip())
# Search for matches at every prefix/postfix
for i in range(-1, -len(rem), -1):
prefix = rem[:i]
if prefix not in lvl:
continue
return DenonMessage._search(
lvl[prefix], depth + 1, rem[i:], (*cur, prefix), self=self
)
# No match found: return new entry, assume val after last space
words = rem.split(" ")
if len(words) < 2:
_LOGGER.error("Unparsable event: %s", rem)
return (rem, None, None)
cmd = " ".join(words[:-1]).strip()
val = words[-1].strip()
_LOGGER.debug("Parsed new cmd event: %s, None, %s", cmd, val)
if self:
self._new_command = {"cmd": cmd, "prm": None, "val": val}
return (cmd, None, val)
def parse_value(self, cmd: str, prm: str, val: str):
"""Parse a value from val."""
if not isinstance(self._command_dict.get(cmd), dict):
return val
entry = self._command_dict[cmd].get(prm) or self._command_dict[cmd]
if const.COMMAND_FUNCTION in entry:
function_name = entry[const.COMMAND_FUNCTION]
elif const.COMMAND_FUNCTION in self._command_dict[cmd]:
function_name = self._command_dict[cmd][const.COMMAND_FUNCTION]
else:
return val
if function_name == const.FUNCTION_VOLUME:
parser = parse[const.FUNCTION_NUM_TO_DB]
return parser(
num=val,
zero=entry.get(const.COMMAND_ZERO),
valid_strings=entry.get(const.COMMAND_STRINGS),
)
raise Exception
@property
def parsed(self) -> tuple:
"""Return the message parsed into (command, param, value)."""
return (self._cmd, self._prm, self._val)
@property
def message(self) -> str:
return self._message
@property
def raw_value(self) -> str:
return self._raw_val
@property
def state_update(self) -> dict:
return self._state_update
@property
def group(self) -> str:
return self._cmd + (self._prm or "")
@property
def new_command(self) -> dict:
return self._new_command
@property
def name(self) -> str:
return self._name
```
#### File: pyavreceiver/pyavreceiver/dispatch.py
```python
import asyncio
import functools
from collections import defaultdict
from typing import Any, Callable, Dict, List, Sequence
TargetType = Callable[..., Any]
DisconnectType = Callable[[], None]
ConnectType = Callable[[str, TargetType], DisconnectType]
SendType = Callable[..., Sequence[asyncio.Future]]
class Dispatcher:
"""Define the dispatch class."""
def __init__(
self,
*,
connect: ConnectType = None,
send: SendType = None,
signal_prefix: str = "",
loop=None
):
"""Init a new dispatch component."""
self._connect = connect or self._default_connect
self._send = send or self._default_send
self._signal_prefix = signal_prefix
self._loop = loop or asyncio.get_event_loop()
self._signals = defaultdict(list)
self._disconnects = []
def connect(self, signal: str, target: TargetType) -> DisconnectType:
"""Connect function to signal. Must be ran in the event loop."""
disconnect = self._connect(self._signal_prefix + signal, target)
self._disconnects.append(disconnect)
return disconnect
def send(self, signal: str, *args: Any) -> Sequence[asyncio.Future]:
"""Fire a signal. Must be ran in the event loop."""
return self._send(self._signal_prefix + signal, *args)
def disconnect_all(self):
"""Disconnect all connected."""
disconnects = self._disconnects.copy()
self._disconnects.clear()
for disconnect in disconnects:
disconnect()
def _default_connect(self, signal: str, target: TargetType) -> DisconnectType:
"""Connect function to signal. Must be ran in the event loop."""
self._signals[signal].append(target)
def remove_dispatcher() -> None:
"""Remove signal listener."""
try:
self._signals[signal].remove(target)
except ValueError:
# signal was already removed
pass
return remove_dispatcher
def _default_send(self, signal: str, *args: Any) -> Sequence[asyncio.Future]:
"""Fire a signal. Must be ran in the event loop."""
targets = self._signals[signal]
futures = []
for target in targets:
task = self._call_target(target, *args)
futures.append(task)
return futures
def _call_target(self, target, *args) -> asyncio.Future:
check_target = target
while isinstance(check_target, functools.partial):
check_target = check_target.func
if asyncio.iscoroutinefunction(check_target):
return self._loop.create_task(target(*args))
return self._loop.run_in_executor(None, target, *args)
@property
def signals(self) -> Dict[str, List[TargetType]]:
"""Get the dictionary of registered signals and callbacks."""
return self._signals
```
#### File: pyavreceiver/pyavreceiver/error.py
```python
class AVReceiverError(Exception):
"""Base class for library errors."""
class AVReceiverInvalidArgumentError(AVReceiverError):
"""Invalid argument error."""
class AVReceiverIncompatibleDeviceError(AVReceiverError):
"""Invalid argument error."""
class QosTooHigh(AVReceiverError):
"""QoS too high error."""
def __init__(self):
self.message = "Highest QoS value is reserved for resent commands."
super().__init__(self.message)
```
#### File: pyavreceiver/pyavreceiver/__init__.py
```python
import asyncio
import logging
import aiohttp
from pyavreceiver import const
from pyavreceiver.denon.http_api import DenonAVRApi, DenonAVRX2016Api, DenonAVRXApi
from pyavreceiver.denon.receiver import DenonReceiver
from pyavreceiver.error import AVReceiverIncompatibleDeviceError
_LOGGER = logging.getLogger(__name__)
async def factory(host: str, log_level: int = logging.WARNING):
"""Return an instance of an AV Receiver."""
_LOGGER.setLevel(log_level)
names, tasks = [], []
async with aiohttp.ClientSession() as session:
for name, url in const.UPNP_ENDPOINTS.items():
names.append(name)
tasks.append(
asyncio.create_task(session.get(f"http://{host}{url}", timeout=5))
)
responses = await asyncio.gather(*tasks)
for name, response in zip(names, responses):
if response.status == 200:
if name == "denon-avr-x-2016":
http_api = DenonAVRX2016Api(host, await response.text())
return DenonReceiver(host, http_api=http_api)
if name == "denon-avr-x":
http_api = DenonAVRXApi(host, await response.text())
return DenonReceiver(host, http_api=http_api)
if name == "denon-avr":
http_api = DenonAVRApi(host, await response.text())
return DenonReceiver(host, http_api=http_api)
raise AVReceiverIncompatibleDeviceError
```
#### File: pyavreceiver/pyavreceiver/receiver.py
```python
from collections import defaultdict
from typing import Dict, Optional
from pyavreceiver import const
from pyavreceiver.command import Command, CommandValues
from pyavreceiver.dispatch import Dispatcher
from pyavreceiver.http_api import HTTPApi
from pyavreceiver.telnet_connection import TelnetConnection
from pyavreceiver.zone import Zone
class AVReceiver:
"""Representation of an audio/video receiver."""
def __init__(
self,
host: str,
*,
dispatcher: Dispatcher = Dispatcher(),
heart_beat: Optional[float] = const.DEFAULT_HEART_BEAT,
http_api: HTTPApi = None,
telnet: bool = True,
timeout: float = const.DEFAULT_TIMEOUT,
zone_aux_class: Zone = None,
zone_main_class: Zone = None,
):
"""Init the device."""
self._host = host
self._dispatcher = dispatcher
self._heart_beat = heart_beat
self._http_api = http_api
self._telnet = telnet
self._timeout = timeout
self._zone_aux_class = zone_aux_class
self._zone_main_class = zone_main_class
self._connection = None # type: TelnetConnection
self._connections = []
self._device_info = {}
self._sources = None # type: dict
self._state = defaultdict()
self._main_zone = None # type: Zone
self._zone2, self._zone3, self._zone4 = None, None, None
async def init(
self,
*,
auto_reconnect=False,
reconnect_delay: float = const.DEFAULT_RECONNECT_DELAY,
):
"""Await the initialization of the device."""
disconnect = await self._connection.init(
auto_reconnect=auto_reconnect, reconnect_delay=reconnect_delay
)
self._connections.append(disconnect)
if self._sources:
self.commands[const.ATTR_SOURCE].init_values(CommandValues(self._sources))
if self._http_api:
await self.update_device_info()
if self.zones >= 1:
self._main_zone = self._zone_main_class(self)
if self.zones >= 2:
self._zone2 = self._zone_aux_class(self, zone="zone2")
if self.zones >= 3:
self._zone3 = self._zone_aux_class(self, zone="zone3")
if self.zones >= 4:
self._zone4 = self._zone_aux_class(self, zone="zone4")
async def connect(
self,
*,
auto_reconnect=False,
reconnect_delay: float = const.DEFAULT_RECONNECT_DELAY,
):
"""Connect to the audio/video receiver."""
if self._telnet:
await self._connection.connect_telnet(
auto_reconnect=auto_reconnect, reconnect_delay=reconnect_delay
)
self._connections.append(self._connection.disconnect_telnet)
async def disconnect(self):
"""Disconnect from the audio/video receiver."""
while self._connections:
disconnect = self._connections.pop()
await disconnect()
def update_state(self, state_update: dict) -> bool:
"""Handle a state update."""
update = False
for attr, val in state_update.items():
if attr not in self._state or self._state[attr] != val:
self._state[attr] = val
update = True
return update
async def update_device_info(self):
"""Update information about the A/V Receiver."""
self._device_info = await self._http_api.get_device_info()
self._sources = await self._http_api.get_source_names()
@property
def dispatcher(self) -> Dispatcher:
"""Get the dispatcher instance."""
return self._dispatcher
@property
def commands(self) -> Dict[str, Command]:
"""Get the dict of commands."""
return self._connection.commands
@property
def connection_state(self) -> str:
"""Get the state of the connection."""
return self._connection.state
@property
def host(self) -> str:
"""Get the host."""
return self._host
@property
def friendly_name(self) -> str:
"""Get the friendly name."""
return self._device_info.get(const.INFO_FRIENDLY_NAME)
@property
def mac(self) -> str:
"""Get the MAC address."""
return self._device_info.get(const.INFO_MAC)
@property
def manufacturer(self) -> str:
"""Get the manufacturer."""
return self._device_info.get(const.INFO_MANUFACTURER)
@property
def model(self) -> str:
"""Get the model."""
return self._device_info.get(const.INFO_MODEL)
@property
def main(self) -> Zone:
"""Get the main zone object."""
return self._main_zone
@property
def serial_number(self) -> str:
"""Get the serial number."""
return self._device_info.get(const.INFO_SERIAL)
@property
def sources(self) -> dict:
"""Get the input sources map."""
return self._sources if self._sources else {}
@property
def state(self) -> defaultdict:
"""Get the current state."""
return self._state
@property
def telnet_connection(self) -> TelnetConnection:
"""Get the telnet connection."""
return self._connection
@property
def power(self) -> str:
"""The state of power."""
return self.state.get(const.ATTR_POWER)
@property
def zone2(self) -> Zone:
"""Get the Zone 2 object."""
return self._zone2
@property
def zone3(self) -> Zone:
"""Get the Zone 3 object."""
return self._zone3
@property
def zone4(self) -> Zone:
"""Get the Zone 4 object."""
return self._zone4
@property
def zones(self) -> int:
"""Get the number of zones."""
return self._device_info.get(const.INFO_ZONES)
def set_power(self, val: bool) -> bool:
"""Request the receiver set power to val."""
# pylint: disable=protected-access
command = self._connection._command_lookup[const.ATTR_POWER].set_val(val)
self._connection.send_command(command)
return True
```
#### File: pyavreceiver/pyavreceiver/telnet_connection.py
```python
import asyncio
import logging
from abc import ABC, abstractmethod
from collections import OrderedDict, defaultdict
from datetime import datetime, timedelta
from typing import Coroutine, Dict, Optional, Tuple
import telnetlib3
from pyavreceiver import const
from pyavreceiver.command import TelnetCommand
from pyavreceiver.functions import none
from pyavreceiver.priority_queue import PriorityQueue
from pyavreceiver.response import Message
_LOGGER = logging.getLogger(__name__)
# Monkey patch misbehaving repr until fixed
telnetlib3.client_base.BaseClient.__repr__ = lambda x: "AV Receiver"
class TelnetConnection(ABC):
"""Define the telnet connection interface."""
def __init__(
self,
avr,
host: str,
*,
port: int = const.CLI_PORT,
timeout: float = const.DEFAULT_TIMEOUT,
heart_beat: Optional[float] = const.DEFAULT_HEART_BEAT,
):
"""Init the connection."""
self._avr = avr
self.host = host
self.port = port
self._command_dict = {}
self._command_lookup = {}
self._command_timeout = const.DEFAULT_TELNET_TIMEOUT
self._learned_commands = {}
self.timeout = timeout # type: int
self._reader = None # type: telnetlib3.TelnetReader
self._writer = None # type: telnetlib3.TelnetWriter
self._response_handler_task = None # type: asyncio.Task
self._command_queue = PriorityQueue()
self._command_queue_task = None # type: asyncio.Task
self._expected_responses = ExpectedResponseQueue()
self._sequence = 0 # type: int
self._state = const.STATE_DISCONNECTED # type: str
self._auto_reconnect = True # type: bool
self._reconnect_delay = const.DEFAULT_RECONNECT_DELAY # type: float
self._reconnect_task = None # type: asyncio.Task
self._last_activity = datetime(1970, 1, 1) # type: datetime
self._last_command_time = datetime(2020, 1, 1) # type: datetime
self._heart_beat_interval = heart_beat # type: Optional[float]
self._heart_beat_task = None # type: asyncio.Task
self._message_interval_limit = const.DEFAULT_MESSAGE_INTERVAL_LIMIT
@abstractmethod
def _load_command_dict(self, path=None):
"""Load the commands YAML."""
@abstractmethod
def _get_command_lookup(self, command_dict):
"""Create a command lookup dict."""
@abstractmethod
async def _response_handler(self):
"""Handle messages received from the device."""
def _heartbeat_command(self):
command = self._command_lookup[const.ATTR_POWER].set_query()
self.send_command(command, heartbeat=True)
async def init(self, *, auto_reconnect: bool = True, reconnect_delay: float = -1):
"""Await the async initialization."""
self._load_command_dict()
await self.connect(
auto_reconnect=auto_reconnect, reconnect_delay=reconnect_delay
)
self._command_lookup = self._get_command_lookup(self._command_dict)
return self.disconnect
async def connect(
self, *, auto_reconnect: bool = False, reconnect_delay: float = -1
):
"""Connect to the AV Receiver - called by init only."""
if self._state == const.STATE_CONNECTED:
return
if reconnect_delay < 0:
reconnect_delay = self._reconnect_delay
self._auto_reconnect = False
await self._connect()
self._auto_reconnect = auto_reconnect
async def _connect(self):
"""Make Telnet connection."""
try:
open_future = telnetlib3.open_connection(self.host, self.port)
self._reader, self._writer = await asyncio.wait_for(
open_future, self.timeout
)
except Exception as error:
raise error from error
self._response_handler_task = asyncio.create_task(self._response_handler())
self._state = const.STATE_CONNECTED
self._command_queue_task = asyncio.create_task(self._process_command_queue())
if self._heart_beat_interval is not None and self._heart_beat_interval > 0:
self._heart_beat_task = asyncio.create_task(self._heart_beat())
_LOGGER.debug("Connected to %s", self.host)
self._avr.dispatcher.send(const.SIGNAL_TELNET_EVENT, const.EVENT_CONNECTED)
async def disconnect(self):
"""Disconnect from the AV Receiver."""
if self._state == const.STATE_DISCONNECTED:
return
if self._reconnect_task:
self._reconnect_task.cancel()
await self._reconnect_task
self._reconnect_task = None
await self._disconnect()
self._state = const.STATE_DISCONNECTED
_LOGGER.debug("Disconnected from %s", self.host)
self._avr.dispatcher.send(const.SIGNAL_TELNET_EVENT, const.EVENT_DISCONNECTED)
async def _disconnect(self):
"""Cancel response handler and pending tasks."""
if self._heart_beat_task:
self._heart_beat_task.cancel()
try:
await self._heart_beat_task
except asyncio.CancelledError:
pass
self._heart_beat_task = None
if self._response_handler_task:
self._response_handler_task.cancel()
try:
await self._response_handler_task
except asyncio.CancelledError:
pass
self._response_handler_task = None
if self._command_queue_task:
self._command_queue_task.cancel()
try:
await self._command_queue_task
except asyncio.CancelledError:
pass
self._command_queue_task = None
if self._expected_responses:
self._expected_responses.cancel_tasks()
if self._writer:
self._writer.close()
self._writer = None
self._reader = None
self._sequence = 0
self._command_queue.clear()
async def _handle_connection_error(self, error: Exception = "hearbeat"):
"""Handle connection failures and schedule reconnect."""
if self._reconnect_task:
return
await self._disconnect()
if self._auto_reconnect:
self._state = const.STATE_RECONNECTING
self._reconnect_task = asyncio.create_task(self._reconnect())
else:
self._state = const.STATE_DISCONNECTED
_LOGGER.debug("Disconnected from %s: %s", self.host, error)
self._avr.dispatcher.send(const.SIGNAL_TELNET_EVENT, const.EVENT_DISCONNECTED)
async def _reconnect(self):
"""Perform core reconnection logic."""
# pylint: disable=broad-except
while self._state != const.STATE_CONNECTED:
try:
await self._connect()
self._reconnect_task = None
return
except Exception as err:
# Occurs when we could not reconnect
_LOGGER.debug("Failed to reconnect to %s: %s", self.host, err)
await self._disconnect()
await asyncio.sleep(self._reconnect_delay)
except asyncio.CancelledError:
# Occurs when reconnect is cancelled via disconnect
return
async def _heart_beat(self):
"""Check for activity and send a heartbeat to check for connection."""
while self._state == const.STATE_CONNECTED:
old_last_activity = self._last_activity
last_activity = datetime.utcnow() - self._last_activity
threshold = timedelta(seconds=self._heart_beat_interval)
if last_activity > threshold:
self._heartbeat_command()
await asyncio.sleep(5)
if self._last_activity <= old_last_activity:
await self._handle_connection_error()
await asyncio.sleep(self._heart_beat_interval / 2)
def send_command(self, command: TelnetCommand, heartbeat=False):
"""Execute a command."""
if not heartbeat and self._state != const.STATE_CONNECTED:
_LOGGER.debug(
"Command failed %s - Not connected to device %s",
command.message,
self.host,
)
return
_LOGGER.debug("Command queued: %s", command.message)
self._command_queue.push(command)
def async_send_command(self, command: TelnetCommand) -> Coroutine:
"""Execute an async command and return awaitable coroutine."""
_LOGGER.debug("queueing command: %s", command.message)
# Give command a unique sequence id and increment
command.set_sequence(self._sequence)
self._sequence += 1
# Push command onto queue
status, cancel = self._command_queue.push(command)
# Determine the type of awaitable response to return
if status == const.QUEUE_FAILED:
_LOGGER.debug("Command not queued: %s", command.message)
return cancel.wait()
if status == const.QUEUE_CANCEL:
try:
_LOGGER.debug("Command overwritten: %s", command.message)
self._expected_responses[cancel].overwrite_command(command)
return self._expected_responses[cancel].wait()
except KeyError:
# Can happen when a query returns multiple responses to one query
_LOGGER.debug("Command already resolved: %s", command.message)
return none()
if status == const.QUEUE_NO_CANCEL:
_LOGGER.debug("Command queued: %s", command.message)
self._expected_responses[command] = ExpectedResponse(command, self)
return self._expected_responses[command].wait()
def resend_command(self, expected_response: "ExpectedResponse") -> None:
"""Resend a command that was not responded to."""
status, cancel = self._command_queue.push(expected_response.command)
if status == const.QUEUE_FAILED:
# A resend at higher qos was already sent
# This shouldn't happen
self._expected_responses[
expected_response.command
] = self._expected_responses[cancel]
if status == const.QUEUE_CANCEL:
# The resend will overwrite a queued command, set that commands response to
# trigger on resolution of this command
self._expected_responses[cancel] = self
_LOGGER.debug(
"QoS requeueing command: %s", expected_response.command.message
)
if status == const.QUEUE_NO_CANCEL:
# The resend is treated as if it is the original command
_LOGGER.debug(
"QoS requeueing command: %s", expected_response.command.message
)
async def _process_command_queue(self):
while True:
wait_time = const.DEFAULT_QUEUE_INTERVAL
if self._command_queue.is_empty:
await asyncio.sleep(wait_time)
continue
try:
time_since_last_command = datetime.utcnow() - self._last_command_time
threshold = timedelta(seconds=self._message_interval_limit)
wait_time = self._message_interval_limit
if (time_since_last_command > threshold) and (
command := self._command_queue.popcommand()
):
_LOGGER.debug("Sending command: %s", command.message)
# Send command message
self._writer.write(command.message)
await self._writer.drain()
# Record time sent and update the expected response
self._last_command_time = datetime.utcnow()
try:
self._expected_responses[command].set_sent(
self._last_command_time
)
except KeyError:
# QoS 0 command
pass
wait_time = (
self._message_interval_limit + const.DEFAULT_QUEUE_INTERVAL
)
else:
wait_time = (
threshold.total_seconds()
- time_since_last_command.total_seconds()
+ const.DEFAULT_QUEUE_INTERVAL
)
await asyncio.sleep(wait_time)
# pylint: disable=broad-except, fixme
except Exception as err:
# TODO: error handling
_LOGGER.critical(Exception(err))
await asyncio.sleep(self._message_interval_limit)
def _handle_event(self, resp: Message):
"""Handle a response event."""
if resp.state_update == {}:
_LOGGER.debug("No state update in message: %s", resp.message)
if self._avr.update_state(resp.state_update):
self._avr.dispatcher.send(const.SIGNAL_STATE_UPDATE, resp.message)
_LOGGER.debug("Event received: %s", resp.state_update)
if expected_response_items := self._expected_responses.popmatch(resp.group):
_, expected_response = expected_response_items
expected_response.set(resp)
else:
_LOGGER.debug("No expected response matched: %s", resp.group)
@property
def commands(self) -> dict:
"""Get the dict of commands."""
return self._command_lookup
@property
def state(self) -> str:
"""Get the current state of the connection."""
return self._state
class ExpectedResponse:
"""Define an awaitable command event response."""
__slots__ = (
"_attempts",
"_command",
"_command_timeout",
"_connection",
"_event",
"_expire_task",
"_qos_task",
"_response",
"_time_sent",
)
def __init__(self, command: TelnetCommand, connection: TelnetConnection):
"""Init a new instance of the CommandEvent."""
self._attempts = 0
self._command = command
self._command_timeout = const.DEFAULT_TELNET_TIMEOUT
self._connection = connection
self._event = asyncio.Event()
self._expire_task = None # type: asyncio.Task
self._qos_task = None # type: asyncio.Task
self._response = None
self._time_sent = None
async def cancel_tasks(self) -> None:
"""Cancel the QoS and/or expire tasks."""
if self._qos_task:
self._qos_task.cancel()
try:
await self._qos_task
except asyncio.CancelledError:
pass
self._qos_task = None
if self._expire_task:
self._expire_task.cancel()
try:
await self._expire_task
except asyncio.CancelledError:
pass
self._expire_task = None
async def _expire(self):
"""Wait until timeout has expired and remove expected response."""
# pylint: disable=protected-access
await asyncio.sleep(const.DEFAULT_COMMAND_EXPIRATION)
self.set(None)
async def wait(self) -> str:
"""Wait until the event is set."""
# pylint: disable=protected-access
await self._event.wait()
await self.cancel_tasks() # cancel any remaining QoS or expire tasks
await self._connection._expected_responses.cancel_expected_response(
self._command
)
return self._response
def overwrite_command(self, command) -> None:
"""Overwrite the stale command with newer one."""
self._command = command
def set(self, message: Message) -> None:
"""Set the response."""
self._response = message
self._event.set()
def set_sent(self, time=datetime.utcnow()) -> None:
"""Set the time that the command was sent."""
if not self._expire_task:
self._expire_task = asyncio.create_task(self._expire())
if self._attempts >= 1:
query = self._command.set_query(qos=0)
self._connection.send_command(query)
if self._attempts == 0:
self._command.raise_qos() # prioritize resends
self._attempts += 1
self._time_sent = time
self._qos_task = asyncio.create_task(self._resend_command())
async def _resend_command(self) -> None:
await asyncio.sleep(self._command_timeout)
if self._attempts <= self._command.retries:
self._connection.resend_command(self)
else:
_LOGGER.debug(
"Command %s failed after %s attempts",
self._command.message,
self._attempts,
)
self.set(None)
@property
def command(self) -> TelnetCommand:
"""Get the command that represents this event."""
return self._command
class ExpectedResponseQueue:
"""Define a queue of ExpectedResponse."""
def __init__(self):
"""Init the data structure."""
self._queue = defaultdict(
OrderedDict
) # type: Dict[OrderedDict[TelnetCommand, ExpectedResponse]]
def __getitem__(self, command: TelnetCommand) -> ExpectedResponse:
"""Get item shortcut through both dicts."""
return self._queue[command.group][command]
def __setitem__(self, command: TelnetCommand, expected_response: ExpectedResponse):
"""Set item shortcut through both dicts."""
self._queue[command.group][command] = expected_response
def get(self, group) -> Optional[OrderedDict]:
"""Get the (command, response) entries for group, if any."""
return self._queue.get(group)
def popmatch(self, group) -> Optional[Tuple[TelnetCommand, ExpectedResponse]]:
"""Pop the oldest matching expected response entry, if any."""
if match := self._queue.get(group):
try:
command, expected_response = match.popitem(last=False)
except KeyError:
return None
return (command, expected_response)
async def cancel_expected_response(self, command: TelnetCommand) -> None:
"""Cancel and delete the expected response for a specific command."""
try:
expected_response = self._queue[command.group][command]
expected_response.set(None)
await expected_response.cancel_tasks()
del self._queue[command.group][command]
try:
self._queue[command.group][command]
except KeyError:
return
_LOGGER.warning("Expected response: %s, was not deleted", expected_response)
raise AttributeError
except KeyError:
return
def cancel_tasks(self) -> None:
"""Cancel all tasks in the queue and clear dicts."""
for group in self._queue.values():
for expected_response in group.values():
expected_response.set(None)
self._queue = defaultdict(OrderedDict)
```
#### File: tests/denon/test_response.py
```python
from pyavreceiver.denon.response import DenonMessage
def test_separate(message_none):
"""Test separation of messages."""
assert message_none.separate("PWON") == ("PW", None, "ON")
assert message_none.separate("PWSTANDBY") == ("PW", None, "STANDBY")
assert message_none.separate("MVMAX 80") == ("MV", "MAX", "80")
assert message_none.separate("CVFL 60 ") == ("CV", "FL", "60")
assert message_none.separate("CVFL60") == ("CV", "FL", "60")
assert message_none.separate("CV FHL 44") == ("CV", "FHL", "44")
assert message_none.separate("CVNEW SPEC 55") == ("CV", "NEW SPEC", "55")
assert message_none.separate("CVUNKNOWNCOMMAND55") == (
"CV",
"UNKNOWNCOMMAND55",
None,
)
assert message_none.separate("MUON") == ("MU", None, "ON")
assert message_none.separate("SIPHONO") == ("SI", None, "PHONO")
assert message_none.separate("SI PHONO ") == ("SI", None, "PHONO")
assert message_none.separate("SIUSB DIRECT") == ("SI", None, "USB DIRECT")
assert message_none.separate("SINEW SOURCE VARIETY") == (
"SI",
None,
"NEW SOURCE VARIETY",
)
assert message_none.separate("SLPOFF") == ("SLP", None, "OFF")
assert message_none.separate("SLP OFF") == ("SLP", None, "OFF")
assert message_none.separate("MSDOLBY D+ +PL2X C") == (
"MS",
None,
"DOLBY D+ +PL2X C",
)
assert message_none.separate("MSYET ANOTHER POINTLESS DSP") == (
"MS",
None,
"YET ANOTHER POINTLESS DSP",
)
assert message_none.separate("PSDELAY 000") == ("PS", "DELAY", "000")
assert message_none.separate("PSTONE CTRL ON") == ("PS", "TONE CTRL", "ON")
assert message_none.separate("PSTONE CTRLOFF") == ("PS", "TONE CTRL", "OFF")
assert message_none.separate("PSSB MTRX ON") == ("PS", "SB", "MTRX ON")
assert message_none.separate("PSSB ON") == ("PS", "SB", "ON")
assert message_none.separate("PSMULTEQ BYP.LR") == ("PS", "MULTEQ", "BYP.LR")
assert message_none.separate("PSDCO OFF") == ("PS", "DCO", "OFF")
assert message_none.separate("PSLFE -8") == ("PS", "LFE", "-8")
assert message_none.separate("PSNEWPARAM OK") == ("PS", "NEWPARAM", "OK")
assert message_none.separate("PSUNKNOWNCOMMAND55") == (
"PS",
"UNKNOWNCOMMAND55",
None,
)
assert message_none.separate("MV60") == ("MV", None, "60")
assert message_none.separate("MV595") == ("MV", None, "595")
assert message_none.separate("Z2PSBAS 51") == ("Z2PS", "BAS", "51")
assert message_none.separate("Z260") == ("Z2", None, "60")
assert message_none.separate("Z2ON") == ("Z2", None, "ON")
assert message_none.separate("Z2PHONO") == ("Z2", None, "PHONO")
assert message_none.separate("Z3PSBAS 51") == ("Z3PS", "BAS", "51")
assert message_none.separate("Z360") == ("Z3", None, "60")
assert message_none.separate("Z3ON") == ("Z3", None, "ON")
assert message_none.separate("Z3PHONO") == ("Z3", None, "PHONO")
assert message_none.separate("NEWCMD 50") == ("NEWCMD", None, "50")
assert message_none.separate("NEWCMD WITH PARAMS 50") == (
"NEWCMD WITH PARAMS",
None,
"50",
)
assert message_none.separate("UNPARSABLE") == ("UNPARSABLE", None, None)
assert message_none.separate("FAKEFOR TESTS") == ("FAKEFO", None, "R TESTS")
assert message_none.separate("FAKENORTEST") == ("FAKEN", "OR", "TEST")
def test_format_db(message_none):
"""Test format to decibel."""
assert message_none.parse_value("MV", None, "60") == -20
assert message_none.parse_value("MV", None, "595") == -20.5
assert message_none.parse_value("MV", None, "80") == 0
assert message_none.parse_value("MV", None, "805") == 0.5
assert message_none.parse_value("MV", None, "00") == -80
assert message_none.parse_value("MV", "MAX", "80") == 0
assert message_none.parse_value("CV", "FL", "50") == 0
assert message_none.parse_value("CV", "SL", "39") == -11
assert message_none.parse_value("CV", "FHL", "545") == 4.5
assert message_none.parse_value("SSLEV", "FL", "50") == 0
assert message_none.parse_value("PS", "BAS", "50") == 0
assert message_none.parse_value("PS", "BAS", "39") == -11
assert message_none.parse_value("PS", "TRE", "545") == 4.5
assert message_none.parse_value("PS", "LFE", "-6") == -6
assert message_none.parse_value("Z2", None, "60") == -20
assert message_none.parse_value("Z2", None, "595") == -20.5
assert message_none.parse_value("Z2", None, "80") == 0
assert message_none.parse_value("Z2", None, "805") == 0.5
assert message_none.parse_value("Z2", None, "00") == -80
def test_attribute_assignment(command_dict):
"""Test assignment of attr."""
msg = DenonMessage("PWON", command_dict)
assert msg.parsed == ("PW", None, "ON")
assert str(msg) == "PWON"
assert repr(msg) == "PWON"
assert msg.group == "PW"
msg = DenonMessage("MV75", command_dict)
assert msg.parsed == ("MV", None, -5)
assert msg.message == "MV75"
assert msg.raw_value == "75"
msg = DenonMessage("MVMAX 80", command_dict)
assert msg.parsed == ("MV", "MAX", 0)
assert msg.message == "MVMAX 80"
assert msg.raw_value == "80"
msg = DenonMessage("CVFL 51", command_dict)
assert msg.parsed == ("CV", "FL", 1)
assert msg.message == "CVFL 51"
assert msg.raw_value == "51"
assert msg.group == "CVFL"
msg = DenonMessage("MSDOLBY D+ +PL2X C", command_dict)
assert msg.parsed == ("MS", None, "DOLBY D+ +PL2X C")
msg = DenonMessage("PSDYNVOL LOW", command_dict)
assert msg.parsed == ("PS", "DYNVOL", "LOW")
assert msg.message == "PSDYNVOL LOW"
assert msg.raw_value == "LOW"
assert msg.group == "PSDYNVOL"
msg = DenonMessage("PSDELAY 000", command_dict)
assert msg.parsed == ("PS", "DELAY", "000")
assert msg.message == "PSDELAY 000"
assert msg.raw_value == "000"
assert msg.group == "PSDELAY"
def test_state_update_dict(command_dict):
"""Test create the update dict."""
assert DenonMessage("PWON", command_dict).state_update == {"power": True}
assert DenonMessage("MVMAX 80", command_dict).state_update == {"max_volume": 0}
assert DenonMessage("PWSTANDBY", command_dict).state_update == {"power": False}
assert DenonMessage("MV75", command_dict).state_update == {"volume": -5}
assert DenonMessage("MV56", command_dict).state_update == {"volume": -24}
assert DenonMessage("CVFL 51", command_dict).state_update == {"channel_level_fl": 1}
assert DenonMessage("SSLEVFL 50", command_dict).state_update == {
"channel_level_fl": 0
}
assert DenonMessage("PSNEWPARAM LOW", command_dict).state_update == {
"PS_NEWPARAM": "LOW"
}
assert DenonMessage("MSDOLBY D+ +PL2X C", command_dict).state_update == {
"sound_mode": "DOLBY D+ +PL2X C"
}
assert DenonMessage("PSBAS 39", command_dict).state_update == {"bass": -11}
assert DenonMessage("MUON", command_dict).state_update == {"mute": True}
assert DenonMessage("SIPHONO", command_dict).state_update == {"source": "PHONO"}
assert DenonMessage("SIBD", command_dict).state_update == {"source": "BD"}
assert DenonMessage("SINEW SOURCE TYPE", command_dict).state_update == {
"source": "NEW SOURCE TYPE"
}
assert DenonMessage("DCAUTO", command_dict).state_update == {
"digital_signal_mode": "AUTO"
}
assert DenonMessage("PSTONE CTRL ON", command_dict).state_update == {
"tone_control": True
}
assert DenonMessage("PSSBMTRX ON", command_dict).state_update == {
"surround_back": "MTRX ON"
}
assert DenonMessage("PSDYNVOL MED", command_dict).state_update == {
"dsp_dynamic_range_control": "medium"
}
assert DenonMessage("NEWPARAM ANYVALUE", command_dict).state_update == {
"NEWPARAM": "ANYVALUE"
}
assert DenonMessage("PSNEWPARAM ANYVALUE", command_dict).state_update == {
"PS_NEWPARAM": "ANYVALUE"
}
assert DenonMessage("PSNEWPARAM", command_dict).state_update == {
"PS_NEWPARAM": None
}
def test_bad_value_handling(command_dict):
"""Test error handling for values that don't conform to spec."""
assert DenonMessage("MVSTRING", command_dict).state_update == {
"volume_string": None
}
assert DenonMessage("MV1000", command_dict).state_update == {}
def test_multiple_types(command_dict):
"""Test handling multiple types of value."""
assert DenonMessage("PSDIL OFF", command_dict).state_update == {
"dialog_level": False
}
assert DenonMessage("PSDIL ON", command_dict).state_update == {"dialog_level": True}
assert DenonMessage("PSDIL 55", command_dict).state_update == {"dialog_level": 5}
assert DenonMessage("PSDIL 45", command_dict).state_update == {"dialog_level": -5}
def test_unnamed_param(command_dict):
"""Test an unnamed parsed parameter."""
assert DenonMessage("PSDELAY 000", command_dict).state_update == {"PS_DELAY": "000"}
def test_zones(command_dict):
"""Test parsing zone commands."""
assert DenonMessage("ZMON", command_dict).state_update == {"zone1_power": True}
assert DenonMessage("ZMOFF", command_dict).state_update == {"zone1_power": False}
assert DenonMessage("Z2PSBAS 51", command_dict).state_update == {"zone2_bass": 1}
assert DenonMessage("Z3PSTRE 445", command_dict).state_update == {
"zone3_treble": -5.5
}
assert DenonMessage("Z260", command_dict).state_update == {"zone2_volume": -20}
assert DenonMessage("Z2ON", command_dict).state_update == {"zone2_power": True}
assert DenonMessage("Z2PHONO", command_dict).state_update == {
"zone2_source": "PHONO"
}
assert DenonMessage("Z2SOURCE", command_dict).state_update == {
"zone2_source": "SOURCE"
}
assert DenonMessage("Z360", command_dict).state_update == {"zone3_volume": -20}
assert DenonMessage("Z3OFF", command_dict).state_update == {"zone3_power": False}
assert DenonMessage("Z3SOURCE", command_dict).state_update == {
"zone3_source": "SOURCE"
}
def test_sequence(command_dict):
"""Test a long sequence."""
seq = [
"PW?"
"PWON"
"MV56"
"MVMAX 80"
"MUOFF"
"SITV"
"SVOFF"
"PSDYNVOL OFF"
"PWON"
"PWON"
"MV56"
"MVMAX 80"
]
for command in seq:
DenonMessage(command, command_dict)
invalid_seq = [
"90f9jf3^F*)UF(U(*#fjliuF(#)U(F@ujniljf(@#)&%T^GHkjbJBVKjY*(Y#*(@&5-00193ljl",
"",
" ",
" b b b ",
".:':>,",
"578934",
"None",
"\r",
"MV ",
" MV",
]
for command in invalid_seq:
DenonMessage(command, command_dict)
def test_learning_commands(command_dict):
"""Test saving learned commands."""
assert DenonMessage("PWON", command_dict).new_command is None
assert DenonMessage("PWSCREENSAVER", command_dict).new_command == {
"cmd": "PW",
"prm": None,
"val": "SCREENSAVER",
}
assert DenonMessage("PSNEW", command_dict).new_command == {
"cmd": "PS",
"prm": "NEW",
"val": None,
}
# The parser matches param to "EFF" and then sees "ECT" as value
# - this is not ideal behavior - the parser should know that "ECT"
# as an argument should be preceded by a space
assert DenonMessage("PSEFFECT", command_dict).parsed == ("PS", "EFF", "ECT")
assert DenonMessage("PSEFF ECT", command_dict).parsed == ("PS", "EFF", "ECT")
assert DenonMessage("CVATMOS RIGHT 52", command_dict).new_command == {
"cmd": "CV",
"prm": "ATMOS RIGHT",
"val": "52",
}
assert DenonMessage("NEWCMD MEDIUM", command_dict).new_command == {
"cmd": "NEWCMD",
"prm": None,
"val": "MEDIUM",
}
assert DenonMessage("UNPARSABLE", command_dict).new_command is None
``` |
{
"source": "JPHutchins/upnpclient",
"score": 3
} |
#### File: upnpclient/upnpclient/ssdp.py
```python
from .upnp import Device
from .util import _getLogger
import socket
import re
from datetime import datetime, timedelta
import select
import ifaddr
DISCOVER_TIMEOUT = 2
SSDP_TARGET = ("172.16.58.3", 1900)
SSDP_MX = DISCOVER_TIMEOUT
ST_ALL = "ssdp:all"
ST_ROOTDEVICE = "upnp:rootdevice"
class Entry(object):
def __init__(self, location):
self.location = location
def ssdp_request(ssdp_st, ssdp_mx=SSDP_MX):
"""Return request bytes for given st and mx."""
return "\r\n".join(
[
"M-SEARCH * HTTP/1.1",
"ST: {}".format(ssdp_st),
"MX: {:d}".format(ssdp_mx),
'MAN: "ssdp:discover"',
"HOST: {}:{}".format(*SSDP_TARGET),
"",
"",
]
).encode("utf-8")
def scan(timeout=5):
urls = []
sockets = []
ssdp_requests = [ssdp_request(ST_ALL), ssdp_request(ST_ROOTDEVICE)]
stop_wait = datetime.now() + timedelta(seconds=timeout)
for addr in get_addresses_ipv4():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, SSDP_MX)
sock.bind((addr, 0))
sockets.append(sock)
except socket.error:
pass
for sock in [s for s in sockets]:
try:
for req in ssdp_requests:
sock.sendto(req, SSDP_TARGET)
sock.setblocking(False)
except socket.error:
sockets.remove(sock)
sock.close()
try:
while sockets:
time_diff = stop_wait - datetime.now()
seconds_left = time_diff.total_seconds()
if seconds_left <= 0:
break
ready = select.select(sockets, [], [], seconds_left)[0]
for sock in ready:
try:
data, address = sock.recvfrom(1024)
response = data.decode("utf-8")
except UnicodeDecodeError:
_getLogger(__name__).debug(
"Ignoring invalid unicode response from %s", address
)
continue
except socket.error:
_getLogger(__name__).exception(
"Socket error while discovering SSDP devices"
)
sockets.remove(sock)
sock.close()
continue
locations = re.findall(
r"LOCATION: *(?P<url>\S+)\s+", response, re.IGNORECASE
)
if locations and len(locations) > 0:
urls.append(Entry(locations[0]))
finally:
for s in sockets:
s.close()
return set(urls)
def get_addresses_ipv4():
# Get all adapters on current machine
adapters = ifaddr.get_adapters()
# Get the ip from the found adapters
# Ignore localhost und IPv6 addresses
return list(
set(
addr.ip
for iface in adapters
for addr in iface.ips
if addr.is_IPv4 and addr.ip != "127.0.0.1"
)
)
def discover(timeout=5):
"""
Convenience method to discover UPnP devices on the network. Returns a
list of `upnp.Device` instances. Any invalid servers are silently
ignored.
"""
devices = {}
for entry in scan(timeout):
if entry.location in devices:
continue
try:
devices[entry.location] = Device(entry.location)
except Exception as exc:
log = _getLogger("ssdp")
log.error("Error '%s' for %s", exc, entry)
return list(devices.values())
``` |
{
"source": "jPhy/Gomoku",
"score": 4
} |
#### File: Gomoku/lib/board_test.py
```python
"Unit test for the game-board class"
import unittest
from .board import *
def place_stone(board, color, x, y):
board[x,y] = color
class TestBoard(unittest.TestCase):
def test_creation(self):
width = 20
height = 40
board = Board(height, width)
self.assertEqual(board.shape, (height,width))
for i in range(height):
for j in range(width):
# empty refers to "no stone laid" and should be defined in the module ``board``
self.assertEqual(board[i,j], empty)
def test_reset(self):
width = 20
height = 40
board = Board(height, width)
place_stone(board, white, 5, 5)
place_stone(board, black, 4, 5)
place_stone(board, white, 4, 3)
self.assertEqual(board.in_turn, black)
self.assertFalse( (board.board == np.zeros([height, width]) ).all() )
board.reset()
self.assertEqual(board.in_turn, white)
self.assertEqual(board.shape, (height,width))
for i in range(height):
for j in range(width):
# empty refers to "no stone laid" and should be defined in the module ``board``
self.assertEqual(board[i,j], empty)
def test_lay_stone(self):
width = height= 20
board = Board(width, height)
# try "place a black stone at 5,5" --> white starts therefore expect error
self.assertRaisesRegexp(InvalidMoveError, 'White is in turn', place_stone, board, black, 5, 5)
# "place a white stone at 5,5" should be OK
place_stone(board, white, 5, 5)
# "place another white stone" is an invalid move
self.assertRaisesRegexp(InvalidMoveError, 'Black is in turn', place_stone, board, white, 5, 4)
# place black stone at 5,5 is invalid since 5,5 is already occupied
self.assertRaisesRegexp(InvalidMoveError, r'Position \(5, 5\) is already taken', place_stone, board, white, 5, 5)
def test_log(self):
width = height= 20
board = Board(width, height)
self.assertEqual(board.log, [])
place_stone(board, white, 5, 5)
self.assertEqual(board.log, [(5, 5)])
place_stone(board, black, 1, 19)
self.assertEqual(board.log, [(5, 5), (1, 19)])
place_stone(board, white, 2, 8)
self.assertEqual(board.log, [(5, 5), (1, 19), (2, 8)])
board.reset()
self.assertEqual(board.log, [])
def test_full(self):
width = height= 4
board = Board(height, width)
in_turn = white
for i in range(width):
for j in range(height):
board[i,j] = in_turn
if in_turn == white:
in_turn = black
else:
in_turn = white
if not (i,j) == (width-1, height-1):
self.assertFalse(board.full())
else:
self.assertTrue(board.full())
self.assertTrue(board.full())
def test_winner(self):
width = height= 10
board = Board(width, height)
self.assertEqual(board.winner(), (None, []))
place_stone(board, white, 1,2)
self.assertEqual(board.winner(), (None, []))
place_stone(board, black, 0,2)
self.assertEqual(board.winner(), (None, []))
place_stone(board, white, 1,3)
self.assertEqual(board.winner(), (None, []))
place_stone(board, black, 0,3)
self.assertEqual(board.winner(), (None, []))
place_stone(board, white, 1,4)
self.assertEqual(board.winner(), (None, []))
place_stone(board, black, 0,4)
self.assertEqual(board.winner(), (None, []))
place_stone(board, white, 1,5)
self.assertEqual(board.winner(), (None, []))
place_stone(board, black, 0,5)
self.assertEqual(board.winner(), (None, []))
place_stone(board, white, 1,6)
self.assertEqual(board.winner()[0], white)
self.assertEqual(board.winner()[1], [(1,2), (1,3), (1,4), (1,5), (1,6)])
class TestGetLine(unittest.TestCase):
def setUp(self):
self.target_shape = (5,)
width = 7
height = 7
self.board = Board(width=width, height=height)
# make row
place_stone(self.board, white, 1,2)
place_stone(self.board, black, 1,3)
place_stone(self.board, white, 1,4)
place_stone(self.board, black, 1,5)
place_stone(self.board, white, 1,6)
# make column
place_stone(self.board, black, 2,6)
place_stone(self.board, white, 3,6)
place_stone(self.board, black, 4,6)
place_stone(self.board, white, 5,6)
# leave (6,6) empty
# make diagonal upleft to lowright
place_stone(self.board, black, 0,0)
place_stone(self.board, white, 1,1)
place_stone(self.board, black, 2,2)
place_stone(self.board, white, 3,3)
place_stone(self.board, black, 4,4)
# make diagonal lowleft to upright
place_stone(self.board, white, 5,0)
# leave (4,1) empty
place_stone(self.board, black, 3,2)
place_stone(self.board, white, 2,3)
# (1,4) is already white from "make column"
def test_get_column(self):
column, positions = self.board.get_column(2,6)
target_positions = [(2,6), (3,6), (4,6), (5,6), (6,6)]
self.assertEqual(column.shape, self.target_shape)
np.testing.assert_equal(column, np.array([black,white,black,white,empty]))
self.assertEqual(positions, target_positions)
def test_get_row(self):
row, positions = self.board.get_row(1,2)
target_positions = [(1,2), (1,3), (1,4), (1,5), (1,6)]
self.assertEqual(row.shape, self.target_shape)
np.testing.assert_equal(row, np.array([white,black,white,black,white]))
self.assertEqual(positions, target_positions)
def test_get_diagonal_upleft_to_lowright(self):
diagonal, positions = self.board.get_diagonal_upleft_to_lowright(0,0)
target_positions = [(0,0), (1,1), (2,2), (3,3), (4,4)]
self.assertEqual(diagonal.shape, self.target_shape)
np.testing.assert_equal(diagonal, np.array([black,white,black,white,black]))
self.assertEqual(positions, target_positions)
def test_diagonal_lowleft_to_upright(self):
diagonal, positions = self.board.get_diagonal_lowleft_to_upright(5,0)
target_positions = [(5,0), (4,1), (3,2), (2,3), (1,4)]
self.assertEqual(diagonal.shape, self.target_shape)
np.testing.assert_equal(diagonal, np.array([white,empty,black,white,white]))
self.assertEqual(positions, target_positions)
# no negative Y-index?
width = 7
height = 7
self.board = Board(width=width, height=height)
place_stone(self.board, white, 3,0)
place_stone(self.board, black, 2,1)
place_stone(self.board, white, 1,2)
place_stone(self.board, black, 0,3)
place_stone(self.board, white, -1,4)
self.assertRaises(IndexError, self.board.get_diagonal_lowleft_to_upright, 3,0)
# reach upmost row?
width = 7
height = 7
self.board = Board(width=width, height=height)
place_stone(self.board, white, 4,0)
place_stone(self.board, black, 3,1)
place_stone(self.board, white, 2,2)
place_stone(self.board, black, 1,3)
place_stone(self.board, white, 0,4)
line, positions = self.board.get_diagonal_lowleft_to_upright(4,0)
np.testing.assert_equal(line, [white, black, white, black, white])
np.testing.assert_equal(positions, [(4,0), (3,1), (2,2), (1,3), (0,4)])
```
#### File: Gomoku/lib/gui.py
```python
try:
# python 2
import Tkinter as tk
from tkMessageBox import Message
except ImportError:
# python 3
import tkinter as tk
from tkinter.messagebox import Message
from os import path
from .player import available_player_names, available_player_types, get_player_index
class Window(tk.Tk):
"Wrapper for the basic window"
def update(self, *args, **kwargs):
"""
Extend the base method ``update``.
Return ``True`` if the window is still open, return ``False``
if the window has been destroyed.
"""
try:
tk.Tk.update(self, *args, **kwargs)
return True
except tk.TclError as err:
if 'has been destroyed' in err.args[0]:
return False
else:
raise err
gui_invalid_move_message = Message(message='Invalid move!', icon='error', title='Gomoku')
import numpy as np
from . import player, board
from .board import black,white,empty , InvalidMoveError
class BoardGui(object):
"""
Gui of a Gomoku game board. Create a window with buttons
associated to a game Board (see "board.py")
:param board:
The game board for which to create a gui.
:param window:
The window to attach the gui to.
"""
# This is NOT a duplicate from class ``Window``
# Note that this function has an exit(0), not a return
def update(self, *args, **kwargs):
try:
tk.Canvas.update(self.window, *args, **kwargs)
except tk.TclError as err:
if 'has been destroyed' in err.args[0]:
exit(0)
else:
raise err
def __init__(self, board, window):
self.board = board
self.window = window
self.buttons = np.empty_like(board.board, dtype='object')
for i in range(self.board.height):
for j in range(self.board.width):
current_button = self.buttons[i,j] = tk.Button(window)
current_button.grid(row=i, column=j)
self.need_user_input = False # this variable is set True by the
# Human player ( ``Human.make_move()`` )
def game_running_buttons(self):
def button_command(i,j):
if self.need_user_input is True:
try:
self.board[j,i] = self.board.in_turn
except InvalidMoveError:
gui_invalid_move_message.show()
for i in range(self.board.width):
for j in range(self.board.height):
self.buttons[j,i].config( command=lambda x=i, y=j: button_command(x,y) )
# Note: the variable ``in_game`` is used to break loops that wait for user interactions
def game_running(self):
self.in_game = True
self.game_running_buttons()
def game_over(self):
self.in_game = False
self.game_over_buttons()
def game_message_buttons(self, message):
"""
Deactivate the game buttons; show error message ``message`` if pressed.
"""
def button_command():
Message(message=message, icon='error', title='Gomoku').show()
for i in range(self.board.width):
for j in range(self.board.height):
self.buttons[j,i].config(command=button_command)
def game_over_buttons(self):
self.game_message_buttons('The game is already over!\nStart a new game first.')
def game_paused_buttons(self):
self.game_message_buttons('Close the options dialog first.')
def renew_board(self):
"Draw the stone symbols onto the buttons"
for i in range(self.board.width):
for j in range(self.board.height):
if self.board[j,i] == black:
self.buttons[j,i].config(background='black', activebackground='black', highlightthickness=3, highlightbackground='lightgray')
elif self.board[j,i] == white:
self.buttons[j,i].config(background='white', activebackground='white', highlightthickness=3, highlightbackground='lightgray')
elif self.board[j,i] == empty:
self.buttons[j,i].config(background='darkgray', activebackground='darkgray', highlightthickness=3, highlightbackground='lightgray')
def highlight_winner(self, positions):
"""
Highlight the buttons with the coordinates ``(y,x)``
(see board.py) passed via ``positions``
:param positions:
iterable if tuples with the coordinates as specified in
'board.py'
"""
for y,x in positions:
self.buttons[y,x].config(highlightbackground='red')
def highlight_lastmove(self):
"""
Highlight the button with the coordinates of the last move.
"""
self.buttons[self.board.lastmove].config(highlightbackground='yellow')
class MainWindow(Window):
"""
Gui of Gomoku; the main window.
:param width, height:
width and height of the Gomoku game board
"""
def __init__(self, width, height):
self.width = width
self.height = height
Window.__init__(self)
self.title('Gomoku')
self.canvas_board = tk.Canvas(self)
self.canvas_board.pack()
self.board = board.Board(self.width,self.height)
self.gui = BoardGui(self.board, self.canvas_board)
self.canvas_controls = tk.Canvas(self)
self.canvas_controls.pack(side='bottom')
self.new_game_button = tk.Button(self.canvas_controls, text='New game')
self.new_game_button.grid(column=0, row=0)
self.options_button = tk.Button(self.canvas_controls, text='Options')
self.options_button.grid(column=1, row=0)
self.exit_button = tk.Button(self.canvas_controls, text='Exit')
self.exit_button.grid(column=2, row=0)
self.activate_buttons()
self.start_new_game = True
# set the players
# try reading from config file
# if that fails set to human
try:
from .config import white_player
except ImportError:
white_player = 'Human'
try:
from .config import black_player
except ImportError:
black_player = 'Human'
self.white_player_idx = get_player_index(white_player)
self.black_player_idx = get_player_index(black_player)
def mainloop(self):
# run until the user exits the program
while True:
# Start a new game only if desired by user.
# This bootstrap prevents the deletion of the old game board
# until the user presses the 'New game' button.
while not self.start_new_game:
if not self.update():
return
self.start_new_game = False
self.play_game()
def new_game(self): # button command
self.gui.game_over()
self.start_new_game = True
def play_game(self):
"Run a game of gomoku"
# enter "in_game" mode
self.gui.game_running()
# remove all stones from the board
self.board.reset()
self.gui.renew_board()
white_player = available_player_types[self.white_player_idx](white)
black_player = available_player_types[self.black_player_idx](black)
while True:
white_player.make_move(self.gui)
if not self.gui.in_game:
# game aborted
return
self.gui.update()
winner, positions = self.board.winner()
if (winner is not None) or (self.board.full()):
break
black_player.make_move(self.gui)
if not self.gui.in_game:
# game aborted
return
self.gui.update()
winner, positions = self.board.winner()
if (winner is not None) or (self.board.full()):
break
self.gui.renew_board()
self.gui.highlight_winner(positions)
if not self.gui.in_game:
# game aborted
return
elif winner == white:
Message(message='White wins!', icon='info', title='Gomoku').show()
elif winner == black:
Message(message='Black wins!', icon='info', title='Gomoku').show()
elif winner is None:
Message(message='Draw!', icon='info', title='Gomoku').show()
else:
raise RuntimeError('FATAL ERROR')
# end "in_game" mode
self.gui.game_over()
def buttons_option_mode(self):
def new_game_button_command():
Message(message='Close the options dialog first.', icon='error', title='Gomoku').show()
def options_button_command():
Message(message='The options dialog is already open!', icon='error', title='Gomoku').show()
self.gui.game_paused_buttons()
self.new_game_button.config(command=new_game_button_command)
self.options_button.config(command=options_button_command)
self.exit_button.config(command=self.destroy)
def activate_buttons(self):
self.gui.game_running_buttons()
self.new_game_button.config(command=self.new_game)
self.options_button.config(command=self.options)
self.exit_button.config(command=self.destroy)
def options(self): # button command
self.buttons_option_mode()
options_dialog = OptionsDialog(self.white_player_idx, self.black_player_idx)
while options_dialog.update():
try:
self.state()
except tk.TclError as err:
if 'has been destroyed' in err.args[0]:
options_dialog.destroy()
return
raise err
self.white_player_idx , self.black_player_idx = options_dialog.get_players()
options_dialog.set_defaults_if_desired()
self.activate_buttons()
if not self.gui.in_game:
self.gui.game_over()
class OptionsDialog(Window):
"""
Show a dialog to set the game options.
Return a dictionary of options.
"""
def __init__(self, current_white_player_index, current_black_player_index):
Window.__init__(self)
self.title('Gomoku - Options')
self.previous_white_player_index = current_white_player_index
self.previous_black_player_index = current_black_player_index
width = 250
height = 10
self.topspace = tk.Canvas(self, height=height, width=width)
self.topspace.pack()
player_width = 100
player_height = 20
self.cv_options = tk.Canvas(self)
self.cv_options.pack()
self.canvas_white_player = tk.Canvas(self.cv_options, width=player_width, height=player_height)
self.canvas_white_player.create_text(40,10, text='white player')
self.canvas_white_player.grid(column=0,row=0)
self.desired_white_player = tk.Variable(value=available_player_names[current_white_player_index])
self.dialog_white_player = tk.OptionMenu(self.cv_options, self.desired_white_player, *available_player_names)
self.dialog_white_player.grid(column=1,row=0)
self.canvas_black_player = tk.Canvas(self.cv_options, width=player_width, height=player_height)
self.canvas_black_player.create_text(40,10, text='black player')
self.canvas_black_player.grid(column=0,row=1)
self.desired_black_player = tk.Variable(value=available_player_names[current_black_player_index])
self.dialog_black_player = tk.OptionMenu(self.cv_options, self.desired_black_player, *available_player_names)
self.dialog_black_player.grid(column=1,row=1)
self.thinspace = tk.Canvas(self, height=height+1, width=width)
self.thinspace.pack()
self.checkbutton_new_defaults = tk.Checkbutton(self, text='save as default', command=self.set_if_new_defaults_desired)
self.new_defaults_desired = False
self.checkbutton_new_defaults.deselect()
self.checkbutton_new_defaults.pack()
self.middlespace = tk.Canvas(self, height=height+5, width=width)
self.middlespace.pack()
self.button_close = tk.Button(self, text='Done', command=self.destroy)
self.button_close.pack()
self.bottomspace = tk.Canvas(self, height=height, width=width)
self.bottomspace.pack()
def set_if_new_defaults_desired(self):
if self.new_defaults_desired:
self.new_defaults_desired = False
self.checkbutton_new_defaults.deselect()
else:
self.new_defaults_desired = True
self.checkbutton_new_defaults.select()
def set_defaults_if_desired(self):
if self.new_defaults_desired:
with open(path.join(path.split(__file__)[0], 'config.py'), 'w') as f:
f.write('white_player = "' + self.desired_white_player.get() + '"\n')
f.write('black_player = "' + self.desired_black_player.get() + '"\n')
def get_players(self):
"Return the indices of the desired white and black player."
white_player_idx = get_player_index(self.desired_white_player.get(), hint=self.previous_white_player_index)
black_player_idx = get_player_index(self.desired_black_player.get(), hint=self.previous_black_player_index)
return (white_player_idx,black_player_idx)
```
#### File: lib/player/__init__.py
```python
'Gomoku players'
from __future__ import print_function
from ..board import InvalidMoveError
from .lib import Playerlibrary
# base class
class Player(Playerlibrary):
"""
Describtion of a player to be used in the Game.
To implement your own AI, override the function
``_make_move``.
.. important::
Note the leading underscore. Do *NOT* override ``make_move``.
The member string ``name`` appears in the options dialog.
:param color:
The color that the player plays as described in "board.py".
"""
def __init__(self, color):
self.color = color
def make_move(self, gui):
"""
Place a stone onto the `board`.
This is a common function that *should not be overridden*.
Override ``_make_move`` instead.
:param gui:
The game ``BoardGui`` as described in "gui.py"
"""
gui.renew_board()
if hasattr(gui.board, 'lastmove'):
gui.highlight_lastmove()
gui.color_in_turn = self.color
moves_left = gui.board.moves_left
self._make_move(gui)
if not gui.in_game:
return
if not moves_left - 1 == gui.board.moves_left:
raise InvalidMoveError('Player "%s" did not place a stone.' % self.name)
def _make_move(self, gui):
"Override this function for specific players"
raise NotImplementedError
# Human player
class Human(Player):
"""
A human player using a gui for input.
:param color:
The color that the player plays as described in "board.py".
"""
name = 'Human'
def _make_move(self, gui):
# wait for user input
gui.need_user_input = True
moves_left = gui.board.moves_left
while gui.board.moves_left == moves_left and gui.in_game:
gui.update()
gui.need_user_input = False
# search for player types in all files of this folder
available_player_types = [Human]
from os import listdir, path
player_directory = path.split(__file__)[0]
print('Searching for players in', player_directory)
filenames = listdir(player_directory)
filenames.sort() # search in alphabetical order
for filename in filenames:
if filename[-3:] != '.py' or 'test' in filename or \
filename == '__init__.py' or filename == 'lib.py':
continue
print('Processing', filename)
exec('from . import ' + filename[:-3] + ' as playerlib')
# search for classes derived from the base class ``Player``
for objname in dir(playerlib):
obj = playerlib.__dict__[objname]
if type(obj) is not type or obj in available_player_types:
continue
if issubclass(obj, Player) and obj is not Player:
print(' found', obj.name)
available_player_types.append(obj)
# player management
available_player_names = [player.name for player in available_player_types]
def get_player_index(name, hint=None):
"""
Convert the player name into an integer valued index.
:param name:
string; the name if the player that is listed in ``available_player_names``
:param hint:
integer, optional; the first index do be checked.
"""
for i,n in enumerate(available_player_names):
if n == name:
return i
# the following is executed if the name is not found
raise ValueError('"%s" is not a registered player type' % name)
```
#### File: player/learning/learning.py
```python
from .. import Player, InvalidMoveError
from ..lib import black, white, empty
from os import path
import numpy as np
def dump_log(filename, old_log, new_log):
filepath = path.join(path.split(__file__)[0], filename)
new_log = [tuple(item) for item in new_log]
with open(filepath, 'w') as f:
f.write('log=')
f.write(repr([new_log] + old_log))
def load_oldlog(filename):
filepath = path.join(path.split(__file__)[0], filename)
try:
with open(filepath, 'r') as f:
exec(f.read())
return locals()['log']
except IOError:
print(' WARNING: "' + filepath + '" not found')
return []
def reduce_log(log):
"""
Cut the height and width of the board such that at least one stone is
placed on each boundary.
Return the reduced log.
"""
log = np.array(log)
x = log[:,0]
y = log[:,1]
x -= x.min()
y -= y.min()
return log
def make_board(log):
board = np.zeros((log[:,0].max() + 1 , log[:,1].max() + 1))
in_turn = white # white begins
for pos in log:
board[tuple(pos)] = in_turn
in_turn *= -1
return board
def match_log(oldlog, newlog):
"""
Return the index ``i`` such that oldlog[i - 1] matches newlog (up to
``reduce_log()`` and oldlog[i] is the move the opponent has taken
towards winning.
If no match is found, return None
"""
assert empty == 0
oldlog = np.array(oldlog)
newlog = np.array(reduce_log(newlog))
new_board = make_board(newlog)
for i in range(1, len(oldlog) + 1):
current_oldlog = reduce_log(oldlog[:i])
old_board = make_board(current_oldlog)
if old_board.shape == new_board.shape and (old_board == new_board).all():
return i
def remove_offset(oldlog, newlog):
"""
If two logs match up to the ``reduce_offset()``, this function
undoes the cut in oldlog such that it matches newlog.
Return oldlog[len(newlog)] from the uncut oldlog; i.e. the move to be
taken when preventing a previous mistake.
"""
newlog = np.array(newlog)
oldlog = np.array(oldlog[:len(newlog) + 2])
assert len(oldlog) == len(newlog) + 2, "Have len(oldlog) = " + str(len(oldlog)) + ", len(newlog) = " + str(len(newlog))
new_board = make_board(newlog)
reduced_board = make_board(oldlog[:-2])
x_offset = new_board.shape[0] - reduced_board.shape[0]
y_offset = new_board.shape[1] - reduced_board.shape[1]
return oldlog[-1][0] + x_offset , oldlog[-1][1] + y_offset
class Learning(Player):
name = 'Adaptive'
def __init__(self, *args, **kwargs):
self.white_logfile = kwargs.pop('white_logfile', "white.log")
self.black_logfile = kwargs.pop('black_logfile', "black.log")
self.check_oldlogs = True
super(Learning, self).__init__(*args, **kwargs)
self.logfile = self.white_logfile if self.color == white else self.black_logfile
self.reload_oldlogs()
def reload_oldlogs(self):
self.oldlogs = load_oldlog(self.white_logfile) if self.color == white else load_oldlog(self.black_logfile)
def _make_move(self, gui):
def place_stone():
if self.win_if_possible(gui): return True
if self.block_open_four(gui): return False
if self.extend_three_to_doubly_open_four(gui): return False
if self.block_to_doubly_open_four(gui): return False
if self.block_doubly_open_three(gui): return False
if self.stop_old_mistake(gui): return False
if self.block_twice_to_three_or_more(gui): return False
if self.extend_three_to_four(gui): return False
if self.block_open_three(gui): return False
if self.extend_twice_two_to_three(gui): return False
if self.block_doubly_open_two(gui): return False
if self.block_open_two(gui): return False
if self.extend_two_to_three(gui): return False
try:
gui.board[gui.board.height // 2, gui.board.width // 2] = self.color; return False
except InvalidMoveError:
try:
gui.board[gui.board.height // 2 + 1, gui.board.width // 2] = self.color; return False
except InvalidMoveError:
if self.extend_one(gui): return False
self.random_move(gui)
winner_is_me = place_stone()
if self.check_oldlogs and not winner_is_me:
move_to_make_opponent_win = self.get_move_to_make_opponent_win(gui)
if move_to_make_opponent_win is not None:
dump_log(
filename = self.logfile,
new_log = reduce_log(gui.board.log + [move_to_make_opponent_win]),
old_log = self.oldlogs
)
self.check_oldlogs = False
self.reload_oldlogs()
def get_move_to_make_opponent_win(self, gui):
"""
Check if a player of opponent color can win the game in the next
move.
Return the position where the oppoent has to place a stone to win
or, if not possible, None.
"""
opponent_color = -self.color
opponent_dummy_player = Player(opponent_color)
return opponent_dummy_player.check_if_immediate_win_possible(gui)
def stop_old_mistake(self, gui):
if not self.check_oldlogs:
return False
current_log = gui.board.log
if not self.oldlogs or not current_log:
return False
for oldlog in self.oldlogs:
if match_log(oldlog, current_log):
# Place stone where opponent would place
try:
next_move = remove_offset(oldlog, current_log)
gui.board[next_move] = self.color
return True
except AssertionError:
continue
# raise NotImplementedError("The player 'Adaptive' is not ready to use yet. Choose a different player.")
return False
```
#### File: player/learning/test_learning.py
```python
"Unit tests for the Random player"
from ..lib import black, white, empty, PlayerTest
from .learning import *
class TestLearning(PlayerTest):
Player = Learning
def remove_logfile(self):
from os import remove, path
try:
remove( path.join(path.split(__file__)[0], self.logfile_name) )
except OSError:
# nothing to do if file does not exist
pass
def tearDown(self):
self.remove_logfile()
def setUp(self):
self.logfile_name = 'test_adaptive.log'
self.remove_logfile()
np.random.seed(42425243212)
def test_match_log(self):
self.assertEqual( match_log([(1,2),(3,4)] , [(1,2),(3,4)]) , 2)
self.assertEqual( match_log([(1,2),(3,4)] , [(0,0),(2,2)]) , 2)
self.assertEqual( match_log([(1,2),(3,4),(9,99)], [(0,0),(2,2)]) , 2)
self.assertEqual( match_log([(1,3),(3,4)] , [(0,0),(2,2)]) , None)
self.assertEqual( match_log([(1,2),(9,3),(3,4)], [(1,1),(3,3),(9,2)]) , None) # wrong colors !!
self.assertEqual( match_log([(9,3),(3,4),(1,2)], [(1,1),(3,3),(9,2)]) , 3)
def test_remove_offset(self):
self.assertEqual( remove_offset([(1,2),(3,4),(1,1),(9,99)], [(0,0),(2,2)]) , (8,97))
self.assertEqual( remove_offset([(9,3),(3,4),(1,2),(111,111),(10,10)], [(1,1),(3,3),(9,2)]) , (10,9))
self.assertEqual( remove_offset([(6,1),(0,2),(7,0),(111,111),(7,8)], [(10,1),(3,3),(9,2)]) , (10,9))
self.assertRaises(AssertionError, remove_offset, [(6,1),(0,2),(7,0),(7,8)], [(10,1),(3,3),(9,2)] )
def base_test(self):
class DummyPlayer(Player):
def __init__(self, *args, **kwargs):
Player.__init__(self, *args, **kwargs)
self.i = 0
def _make_move(self, gui):
gui.board[log[self.i]] = self.color
self.i += 2
log = [(5, 7), (6, 8), (4, 6), (3, 5), (5, 5), (3, 7), (6, 6), (5, 6), (7, 5), (4, 8),
(6, 4), (6, 5), (8, 4), (9, 3), (7, 3), (8, 2), (7, 4), (5, 4), (7, 6), (7, 2), (7, 7)]
# first game, white (DummyPlayer) should win
adaptive_player1 = Learning(color=black, white_logfile='', black_logfile=self.logfile_name)
dummy_player1 = DummyPlayer(white)
gui1 = self.build_gui(np.zeros((13,16)))
for i in range(len(log)):
if i % 2: # if ``i`` is odd
adaptive_player1.make_move(gui1)
else: # if ``i`` is even
dummy_player1.make_move(gui1)
self.assertEqual(gui1.board.lastmove, log[i])
if i != len(log) - 1:
self.assertTrue(gui1.board.winner()[0] is None)
else:
self.assertEqual(gui1.board.winner()[0], white)
# second game, the Learning player should place its first stone at log[2]
adaptive_player2 = Learning(color=black, white_logfile='', black_logfile=self.logfile_name)
dummy_player2 = DummyPlayer(white)
gui2 = self.build_gui(np.zeros((13,16)))
dummy_player2.make_move(gui2)
self.assertEqual(gui2.board.lastmove, log[0])
self.assertTrue(gui2.board.winner()[0] is None)
adaptive_player2.make_move(gui2)
self.assertNotEqual(gui2.board.lastmove, log[1]) # ``Learning``'s move differs from first game?
self.assertEqual(gui2.board.lastmove, log[2])
```
#### File: lib/player/lib.py
```python
"Define basic subroutines useful for all AI players"
from ..board import black, white, empty, Board, InvalidMoveError
import numpy as np
import unittest
class Playerlibrary(object):
"""
A library class that holds basic subroutines that are useful for all
kinds of artificial-intelligence-type (AI-type) players, e.g. the
function ``win_if_possible`` that checks if the game can be won in
the next move.
All the functions are written to take the same arguments as
``Player.make_move`` such that the call from within ``make_move``
looks like e.g. ``self.win_if_possible(gui)``.
"""
def line_getter_functions(self, gui, length=5):
return [lambda x,y: gui.board.get_column(x,y,length=length), lambda x,y: gui.board.get_row(x,y, length=length),
lambda x,y: gui.board.get_diagonal_upleft_to_lowright(x,y, length=length),
lambda x,y: gui.board.get_diagonal_lowleft_to_upright(x,y, length=length)]
def random_move(self, gui):
moves_left = gui.board.moves_left
while moves_left == gui.board.moves_left:
x = np.random.randint(gui.board.width)
y = np.random.randint(gui.board.height)
try:
gui.board[y,x] = self.color
except InvalidMoveError:
continue
def extend_one(self, gui):
"Place a stone next to another one but only if extendable to five."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# search pattern: one of own color and four empty
if len(np.where(line == empty)[0]) == 4 and len(np.where(line == self.color)[0]) == 1:
index_own_color = np.where(line == self.color)[0][0]
if index_own_color == 0:
gui.board[positions[1]] = self.color
return True
else:
gui.board[positions[index_own_color - 1]] = self.color
return True
return False
def block_open_four(self, gui):
"Block a line of four stones if at least one end open."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search four of opponent's color and one empty
if len(np.where(line == empty)[0]) == 1 and len(np.where(line == -self.color)[0]) == 4:
index_of_empty = np.where(line == empty)[0][0]
gui.board[positions[index_of_empty]] = self.color
return True
return False
def block_doubly_open_two(self, gui):
"Block a line of two if both sides are open."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# select pattern [<all empty>, <opponent's color>, <opponent's color>, <all empty>]
if ( line == (empty, -self.color, -self.color, empty, empty) ).all():
gui.board[positions[3]] = self.color
return True
elif ( line == (empty, empty, -self.color, -self.color, empty) ).all():
gui.board[positions[1]] = self.color
return True
return False
def block_twice_to_three_or_more(self, gui):
'Prevent opponent from closing two lines of three or more simultaneously.'
line_getter_functions = self.line_getter_functions(gui)
line_positions = []
getter_functions = []
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in line_getter_functions:
try:
line, positions = f(i,j)
except IndexError:
continue
# search two of opponent's color and three empty in two crossing lines at an empty position
opponent_stones_in_line = len(np.where(line == -self.color)[0])
if opponent_stones_in_line >= 2 and len(np.where(line == empty)[0]) == 5 - opponent_stones_in_line:
for oldpos, old_getter in zip(line_positions, getter_functions):
for pos in positions:
if f != old_getter and pos in oldpos and gui.board[pos] == empty:
gui.board[pos] = self.color
return True
line_positions.append(positions)
getter_functions.append(f)
return False
def block_open_three(self, gui):
"Block a line of three."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search three of opponent's color and two empty
if len(np.where(line == empty)[0]) == 2 and len(np.where(line == -self.color)[0]) == 3:
indices_opponent = np.where(line == -self.color)[0]
if not (indices_opponent[1] == indices_opponent[0] + 1 and \
indices_opponent[2] == indices_opponent[1] + 1):
continue
if 0 not in indices_opponent:
gui.board[positions[indices_opponent[0] - 1]] = self.color
return True
else:
gui.board[positions[3]] = self.color
return True
return False
def block_open_two(self, gui):
"Block a line of two."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search pattern [<all empty or bpundary>, opponent, opponent, <all empty or boundary>]
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == -self.color)[0]) == 2:
indices_opponent = np.where(line == -self.color)[0]
if indices_opponent[1] == indices_opponent[0] + 1:
if indices_opponent[0] == 0:
gui.board[positions[3]] = self.color
return True
else:
gui.board[positions[indices_opponent[0]-1]] = self.color
return True
return False
def block_doubly_open_three(self, gui):
"Block a line of three but only if both sides are open."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
if ( line == (empty, -self.color, -self.color, -self.color, empty) ).all():
gui.board[positions[0]] = self.color
return True
return False
def extend_three_to_four(self, gui):
"""
Extend a line of three stones to a line of four stones but only
if there is enough space to be completed to five.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search three of own color and two empty
if len(np.where(line == empty)[0]) == 2 and len(np.where(line == self.color)[0]) == 3:
indices_empty = np.where(line == empty)[0]
if 0 not in indices_empty:
gui.board[positions[indices_empty[0]]] = self.color
return True
else:
gui.board[positions[indices_empty[1]]] = self.color
return True
return False
def block_to_doubly_open_four(self, gui):
"""
Prevent the opponent from getting a line of four with both ends
open.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui, length=6):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search pattern [empty, <extendable to 4 times opponent>, empty]
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == -self.color)[0]) == 3:
indices_empty = np.where(line == empty)[0]
if not (line[0] == empty and line[-1] == empty):
continue
else:
gui.board[positions[indices_empty[1]]] = self.color
return True
return False
def extend_three_to_doubly_open_four(self, gui):
"""
Extend a line of three stones to a line of four stones but only
if there is enough space to be completed to five ON BOTH SIDES.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui, length=6):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search pattern [empty, <extendable to 4 times own>, empty]
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == self.color)[0]) == 3:
indices_empty = np.where(line == empty)[0]
if not (line[0] == empty and line[-1] == empty):
continue
else:
gui.board[positions[indices_empty[1]]] = self.color
return True
return False
def extend_two_to_three(self, gui):
"""
Extend a line of two stones to a line of three stones but only
if there is enough space to be completed to five.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search two of own color and three empty
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == self.color)[0]) == 2:
indices_empty = np.where(line == empty)[0]
gui.board[positions[indices_empty[np.random.randint(3)]]] = self.color
return True
return False
def extend_twice_two_to_three(self, gui):
"""
Extend two crossing lines of two stones to two lines of three
stones but only if there is enough space to be completed to five.
"""
line_positions = []
getter_functions = []
for f in self.line_getter_functions(gui):
for i in range(gui.board.height):
for j in range(gui.board.width):
try:
line, positions = f(i,j)
except IndexError:
continue
# search two of own color and three empty in two crossing lines at an empty position
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == self.color)[0]) == 2:
for oldpos, old_getter in zip(line_positions, getter_functions):
for pos in positions:
if f != old_getter and pos in oldpos and gui.board[pos] == empty:
gui.board[pos] = self.color
return True
line_positions.append(positions)
getter_functions.append(f)
return False
def check_if_immediate_win_possible(self, gui):
"""
Check if it is possible to place a stone such thath the player wins
immediately.
Return the position to place the stone if possible, otherwise return None.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection:
# - can only place stones where field is ``empty``
# - line must sum to "+" or "-" 4 (4 times black=+1 or white=-1 and once empty=0)
# place stone if that leads to winning the game
if empty in line and line.sum() == self.color * 4:
for pos in positions:
if gui.board[pos] == empty:
return pos
raise RuntimeError("Check the implementation of ``check_if_immediate_win_possible``.")
# control reaches this point only if no winning move is found => return None
def win_if_possible(self, gui):
"""
Place a stone where the player wins immediately if possible.
Return ``True`` if a stone has been placed, otherwise return False.
"""
pos = self.check_if_immediate_win_possible(gui)
if pos is None:
return False
else:
gui.board[pos] = self.color
return True
class PlayerTest(unittest.TestCase):
"""
Library class for testing AI players.
Usage:
Create a subclass and set the member variable ``Player`` to the
AI you want to test:
>>> class MyTest(PlayerTest):
... Player = <Your AI>
"""
Player = None
@classmethod
def build_board(self, board_array):
"""
Build up a valid ``GameBoard`` holding the desired ``board_array``.
.. note::
You probably rather need `.build_gui`
:param board_array:
2D-array; e.g. [[white, empty],
[black, black]]
"""
board_array = np.asarray(board_array, dtype=int)
assert len(board_array.shape) == 2
height = board_array.shape[0]
width = board_array.shape[1]
board = Board(width=width, height=height)
white_indices = []
black_indices = []
# find positions that are not empty
for i in range(height):
for j in range(width):
value = board_array[i,j]
if value == empty:
continue
elif value == white:
white_indices.append((i,j))
elif value == black:
black_indices.append((i,j))
else:
raise AssertionError("Invalid ``board_array``")
# in a valid board, there are equally many black and white stones or
# one more white that black stone since white begins
assert len(white_indices) == len(black_indices) or len(white_indices) == len(black_indices) + 1
while black_indices:
board[white_indices.pop()] = white
board[black_indices.pop()] = black
assert board.winner()[0] is None
# if there is one more white stone
if white_indices:
board[white_indices.pop()] = white
return board
@classmethod
def build_gui(self, board_array):
"""
Build up a valid ``GameBoard`` packed in a ``BoardGui`` holding
the desired ``board_array``. The returned instance of ``BoardGui``
is ready to use in ``Player.make_move()``.
:param board_array:
2D-array; e.g. [[white, empty],
[black, black]]
"""
from ..gui import BoardGui, tk
board = self.build_board(board_array)
gui = BoardGui(board, tk.Tk())
gui.in_game = True
return gui
def base_test(self):
width = 20
height = 10
board = Board(height, width)
from ..gui import BoardGui, tk
board_gui = BoardGui(board, tk.Tk())
board_gui.in_game = True
if self.Player is not None:
white_player = self.Player(white)
black_player = self.Player(black)
while board_gui.board.winner()[0] is None and not board_gui.board.full():
white_player.make_move(board_gui)
black_player.make_move(board_gui)
```
#### File: lib/player/test_easy.py
```python
"Unit tests for the Random player"
from .lib import black, white, empty, PlayerTest
from .a_easy import *
class TestEasy(PlayerTest):
Player = Easy
def setUp(self):
np.random.seed(42425243212)
```
#### File: lib/player/test_hard.py
```python
"Unit tests for the Random player"
from .lib import black, white, empty, PlayerTest
from .c_hard import *
class TestHard(PlayerTest):
Player = Hard
def setUp(self):
np.random.seed(42425243212)
```
#### File: lib/player/test_medium.py
```python
"Unit tests for the Random player"
from .lib import black, white, empty, PlayerTest
from .b_medium import *
class TestMedium(PlayerTest):
Player = Medium
def setUp(self):
np.random.seed(42425243212)
``` |
{
"source": "j-piccinali/SPH-EXA_mini-app",
"score": 2
} |
#### File: scripts/reframe/make.py
```python
import reframe as rfm
import reframe.utility.sanity as sn
commits = ['f982fde']
testnames = ['sedov']
gpu_cap = 'sm_60'
# tc_ver = '20.08'
# {{{ build base
@rfm.simple_test
class Base_Build_Test(rfm.CompileOnlyRegressionTest):
# def __init__(self):
def __init__(self):
self.maintainers = ['JG']
self.prebuild_cmds += [
'git log --pretty=oneline -n1',
'module rm xalt', 'module list -t']
self.sourcesdir = 'src_gpu'
self.build_system = 'Make'
self.build_system.makefile = 'Makefile'
self.build_system.cxx = 'CC'
self.build_system.nvcc = 'nvcc'
self.build_system.max_concurrency = 2
self.prgenv_flags = {
# The makefile adds -DUSE_MPI
'PrgEnv-gnu': ['-I.', '-I./include', '-std=c++14', '-g', '-O3',
'-w', '-DUSE_MPI', '-DNDEBUG', '-fopenmp'],
'PrgEnv-intel': ['-I.', '-I./include', '-std=c++14', '-g', '-O3',
'-DUSE_MPI', '-DNDEBUG', '-qopenmp'],
'PrgEnv-cray': ['-I.', '-I./include', '-std=c++17', '-g', '-Ofast',
'-DUSE_MPI', '-DNDEBUG', '-fopenmp'],
# -fopenmp[=libcraymp] (Cray runtime - default)
# -fopenmp=libomp (Clang runtime)
'PrgEnv-pgi': ['-I.', '-I./include', '-std=c++14', '-g', '-O3',
'-DUSE_MPI', '-DNDEBUG', '-mp'],
}
# TODO: fullpath = f'{self.target_executable}.{self.testname}...
self.sanity_patterns = sn.assert_not_found(r'warning', self.stdout)
# {{{ hooks
@rfm.run_before('compile')
def setflags(self):
# self.build_system.cxxflags = \
# self.prgenv_flags[self.current_environ.name]
# self.modules += self.tool_modules[self.current_environ.name]
flags = (' '.join(map(str,
self.prgenv_flags[self.current_environ.name])))
self.build_system.options += [
self.target_executable, f'MPICXX={self.build_system.cxx}',
'SRCDIR=.', 'BUILDDIR=.', 'BINDIR=.', f'CXXFLAGS="{flags}"',
'CUDA_PATH=$CUDATOOLKIT_HOME',
f'TESTCASE={self.executable}',
]
sed_ifdef = (r'"s-#include \"cuda/sph.cuh\"-#ifdef USE_CUDA\n'
r'#include \"cuda/sph.cuh\"\n#endif-"')
self.prebuild_cmds += [
f'sed -i {sed_ifdef} include/sph/findNeighbors.hpp',
f'sed -i {sed_ifdef} include/sph/density.hpp',
f'sed -i {sed_ifdef} include/sph/IAD.hpp',
f'sed -i {sed_ifdef} include/sph/momentumAndEnergyIAD.hpp',
]
# }}}
# }}}
# {{{ mpi+omp
@rfm.parameterized_test(*[[commit, testname]
for commit in commits
for testname in testnames])
class MPIOMP_Build_Test(Base_Build_Test):
def __init__(self, commit, testname):
super().__init__()
self.commit = commit
self.testname = testname
self.descr = f'Build {testname} test ({commit}) with MPI+OpenMP'
self.valid_systems = ['*']
self.valid_prog_environs = ['PrgEnv-gnu', 'PrgEnv-cray', 'PrgEnv-pgi',
'PrgEnv-intel']
self.tags = {'sph', 'cpu'}
self.prebuild_cmds += [
f'git checkout {commit}',
'git log --pretty=oneline -n1',
'module rm xalt', 'module list -t']
self.executable = testname
self.target_executable = 'mpi+omp'
fullpath = f'{self.target_executable}.{testname}.{commit}.$PE_ENV'
self.postbuild_cmds = [
f'cp {self.target_executable}.app $SCRATCH/{fullpath}',
]
# }}}
# {{{ Cuda
@rfm.parameterized_test(*[[commit, testname]
for commit in commits
for testname in testnames])
class CUDA_Build_Test(Base_Build_Test):
def __init__(self, commit, testname):
super().__init__()
self.descr = f'Build {testname} test ({commit}) with CUDA'
self.valid_systems = ['*']
self.valid_prog_environs = ['PrgEnv-gnu', 'PrgEnv-cray', 'PrgEnv-pgi',
'PrgEnv-intel']
self.tags = {'sph', 'gpu'}
self.prebuild_cmds += [
f'git checkout {commit}',
'git log --pretty=oneline -n1',
'module rm xalt', 'module list -t']
self.modules = ['craype-accel-nvidia60']
self.executable = testname
self.target_executable = 'mpi+omp+cuda'
fullpath = f'{self.target_executable}.{testname}.{commit}.$PE_ENV'
self.postbuild_cmds = [
f'cp {self.target_executable}.app $SCRATCH/{fullpath}',
]
# self.variables = {'CUDA_PATH': '$CUDATOOLKIT_HOME'}
self.build_system.options = [
f'NVCCFLAGS="-std=c++14 --expt-relaxed-constexpr -arch={gpu_cap}"',
# --ptxas-options=-v -g -G"',
f'NVCCLDFLAGS="-arch={gpu_cap}"',
]
# }}}
# {{{ OpenACC
@rfm.parameterized_test(*[[commit, testname]
for commit in commits
for testname in testnames])
class OPENACC_Build_Test(Base_Build_Test):
def __init__(self, commit, testname):
super().__init__()
self.descr = f'Build {testname} test ({commit}) with OpenACC'
self.valid_systems = ['*']
self.valid_prog_environs = ['PrgEnv-pgi']
self.tags = {'sph', 'gpu'}
self.prebuild_cmds += [
f'git checkout {commit}',
'git log --pretty=oneline -n1',
'module rm xalt', 'module list -t']
self.modules = ['craype-accel-nvidia60']
self.executable = testname
self.target_executable = 'mpi+omp+acc'
atomic_flag = ''
fullpath = f'{self.target_executable}.{testname}.{commit}.$PE_ENV'
self.postbuild_cmds = [
f'cp {self.target_executable}.app $SCRATCH/{fullpath}',
]
openacc_flag = '-acc -ta=tesla,cc60 -Minfo=accel ' # {atomic_flag}
self.build_system.options = [f'LIB="{openacc_flag}"']
# }}}
# {{{ OpenMP Offload
@rfm.parameterized_test(*[[commit, testname]
for commit in commits
for testname in testnames])
class OPENMPgpu_Build_Test(Base_Build_Test):
def __init__(self, commit, testname):
super().__init__()
self.descr = f'Build {testname} test ({commit}) with OpenMP Offloading'
self.valid_systems = ['*']
self.valid_prog_environs = ['PrgEnv-cray']
self.tags = {'sph', 'gpu'}
self.prebuild_cmds += [
f'git checkout {commit}',
'git log --pretty=oneline -n1',
'module rm xalt', 'module list -t']
self.modules = ['craype-accel-nvidia60']
self.executable = testname
self.target_executable = 'mpi+omp+target'
fullpath = f'{self.target_executable}.{testname}.{commit}.$PE_ENV'
self.postbuild_cmds = [
f'cp {self.target_executable}.app $SCRATCH/{fullpath}',
]
offload_flag = (r'-fopenmp-targets=nvptx64 -Xopenmp-target '
f'-march={gpu_cap}')
self.build_system.options = [f'LIB="{offload_flag}"']
# }}}
``` |
{
"source": "jpic/django-nested-admin",
"score": 2
} |
#### File: tests/gfk/tests.py
```python
import time
from nested_admin.tests.base import BaseNestedAdminTestCase
from .models import GFKRoot, GFKA, GFKB
class TestGenericInlineAdmin(BaseNestedAdminTestCase):
root_model = GFKRoot
def test_add_to_empty_one_deep(self):
root = self.root_model.objects.create(slug='test')
self.load_admin(root)
self.add_inline(slug="test")
self.save_form()
a_set = root.a_set.all()
self.assertEqual(len(a_set), 1)
self.assertEqual(a_set[0].slug, 'test')
self.assertEqual(a_set[0].position, 0)
def test_add_to_empty_two_deep(self):
root = self.root_model.objects.create(slug='test')
a = GFKA.objects.create(slug='test', content_object=root, position=0)
self.load_admin(root)
self.add_inline([0], name="Test")
self.save_form()
b_set = a.b_set.all()
self.assertEqual(len(b_set), 1)
self.assertEqual(b_set[0].name, "Test")
self.assertEqual(b_set[0].position, 0)
def test_drag_existing_objs(self):
root = self.root_model.objects.create(slug='root')
x = GFKA.objects.create(slug='x', content_object=root, position=0)
y = GFKA.objects.create(slug='y', content_object=root, position=1)
GFKB.objects.create(name='X 0', content_object=x, position=0)
GFKB.objects.create(name='X 1', content_object=x, position=1)
GFKB.objects.create(name='X 2', content_object=x, position=2)
GFKB.objects.create(name='Y 0', content_object=y, position=0)
GFKB.objects.create(name='Y 1', content_object=y, position=1)
GFKB.objects.create(name='Y 2', content_object=y, position=2)
self.load_admin(root)
self.drag_and_drop_item(from_indexes=[1, 2], to_indexes=[0, 1],
screenshot_hack=True)
self.save_form()
y_2 = GFKB.objects.get(name='Y 2')
self.assertEqual(y_2.content_object, x, "item was not moved to the correct parent")
self.assertEqual(y_2.position, 1, "item was not moved to the correct position")
self.assertEqual(["%s" % i for i in x.b_set.all()], [
'root/x[0]/X 0[0]',
'root/x[0]/Y 2[1]',
'root/x[0]/X 1[2]',
'root/x[0]/X 2[3]'])
self.assertEqual(["%s" % i for i in y.b_set.all()], [
'root/y[1]/Y 0[0]',
'root/y[1]/Y 1[1]'])
def test_drag_add_drag(self):
root = self.root_model.objects.create(slug='root')
x = GFKA.objects.create(slug='x', content_object=root, position=0)
y = GFKA.objects.create(slug='y', content_object=root, position=1)
GFKB.objects.create(name='X 0', content_object=x, position=0)
GFKB.objects.create(name='X 1', content_object=x, position=1)
GFKB.objects.create(name='X 2', content_object=x, position=2)
GFKB.objects.create(name='Y 0', content_object=y, position=0)
GFKB.objects.create(name='Y 1', content_object=y, position=1)
GFKB.objects.create(name='Y 2', content_object=y, position=2)
self.load_admin(root)
self.add_inline(indexes=[0], name='X 3')
self.drag_and_drop_item(from_indexes=[1, 1], to_indexes=[0, 1],
screenshot_hack=True)
self.save_form()
y_1 = GFKB.objects.get(name='Y 1')
self.assertEqual(y_1.content_object, x, "Y1 was not moved to the correct parent")
self.assertEqual(y_1.position, 1, "Y1 was not moved to the correct position")
self.assertEqual(["%s" % i for i in x.b_set.all()], [
'root/x[0]/X 0[0]',
'root/x[0]/Y 1[1]',
'root/x[0]/X 1[2]',
'root/x[0]/X 2[3]',
'root/x[0]/X 3[4]'])
self.assertEqual(["%s" % i for i in y.b_set.all()], [
'root/y[1]/Y 0[0]',
'root/y[1]/Y 2[1]'])
def test_drag_new_item(self):
root = self.root_model.objects.create(slug='root')
x = GFKA.objects.create(slug='x', content_object=root, position=0)
y = GFKA.objects.create(slug='y', content_object=root, position=1)
GFKB.objects.create(name='X 0', content_object=x, position=0)
GFKB.objects.create(name='X 1', content_object=x, position=1)
GFKB.objects.create(name='X 2', content_object=x, position=2)
GFKB.objects.create(name='Y 0', content_object=y, position=0)
GFKB.objects.create(name='Y 1', content_object=y, position=1)
self.load_admin(root)
self.add_inline(indexes=[1], name='Y 2')
time.sleep(0.01)
self.drag_and_drop_item(from_indexes=[1, 2], to_indexes=[0, 1],
screenshot_hack=True)
self.save_form()
y_2 = GFKB.objects.get(name='Y 2')
self.assertEqual(y_2.content_object, x, "Y2 was not moved to the correct parent")
self.assertEqual(y_2.position, 1, "Y2 was not moved to the correct position")
self.assertEqual(["%s" % i for i in x.b_set.all()], [
'root/x[0]/X 0[0]',
'root/x[0]/Y 2[1]',
'root/x[0]/X 1[2]',
'root/x[0]/X 2[3]'])
self.assertEqual(["%s" % i for i in y.b_set.all()], [
'root/y[1]/Y 0[0]',
'root/y[1]/Y 1[1]'])
def test_delete_two_deep(self):
root = self.root_model.objects.create(slug='root')
x = GFKA.objects.create(slug='x', content_object=root, position=0)
y = GFKA.objects.create(slug='y', content_object=root, position=1)
GFKB.objects.create(name='X 0', content_object=x, position=0)
GFKB.objects.create(name='X 1', content_object=x, position=1)
GFKB.objects.create(name='X 2', content_object=x, position=2)
GFKB.objects.create(name='Y 0', content_object=y, position=0)
GFKB.objects.create(name='Y 1', content_object=y, position=1)
GFKB.objects.create(name='Y 2', content_object=y, position=2)
self.load_admin(root)
self.delete_inline(indexes=[1, 1])
self.save_form()
self.assertEqual(["%s" % i for i in x.b_set.all()], [
'root/x[0]/X 0[0]',
'root/x[0]/X 1[1]',
'root/x[0]/X 2[2]'])
self.assertEqual(["%s" % i for i in y.b_set.all()], [
'root/y[1]/Y 0[0]',
'root/y[1]/Y 2[1]'])
def test_delete_one_deep(self):
root = self.root_model.objects.create(slug='root')
x = GFKA.objects.create(slug='x', content_object=root, position=0)
y = GFKA.objects.create(slug='y', content_object=root, position=1)
GFKB.objects.create(name='X 0', content_object=x, position=0)
GFKB.objects.create(name='X 1', content_object=x, position=1)
GFKB.objects.create(name='X 2', content_object=x, position=2)
GFKB.objects.create(name='Y 0', content_object=y, position=0)
GFKB.objects.create(name='Y 1', content_object=y, position=1)
GFKB.objects.create(name='Y 2', content_object=y, position=2)
self.load_admin(root)
self.delete_inline(indexes=[0])
self.save_form()
self.assertEqual(len(GFKA.objects.filter(slug='x')), 0,
"GFKA instance was not deleted")
y = GFKA.objects.get(slug='y')
self.assertEqual(["%s" % i for i in y.b_set.all()], [
'root/y[0]/Y 0[0]',
'root/y[0]/Y 1[1]',
'root/y[0]/Y 2[2]'])
def test_delete_two_deep_undelete_one_deep(self):
"""
Test that, if an item is deleted, then the parent is deleted, and
then the parent is undeleted, that the item stays deleted.
"""
root = self.root_model.objects.create(slug='root')
x = GFKA.objects.create(slug='x', content_object=root, position=0)
y = GFKA.objects.create(slug='y', content_object=root, position=1)
GFKB.objects.create(name='X 0', content_object=x, position=0)
GFKB.objects.create(name='X 1', content_object=x, position=1)
GFKB.objects.create(name='X 2', content_object=x, position=2)
GFKB.objects.create(name='Y 0', content_object=y, position=0)
GFKB.objects.create(name='Y 1', content_object=y, position=1)
GFKB.objects.create(name='Y 2', content_object=y, position=2)
self.load_admin(root)
self.delete_inline(indexes=[0, 1])
self.delete_inline(indexes=[0])
self.undelete_inline(indexes=[0])
self.save_form()
self.assertEqual(len(GFKA.objects.filter(slug='x')), 1,
"GFKA instance should not be deleted")
self.assertEqual(["%s" % i for i in x.b_set.all()], [
'root/x[0]/X 0[0]',
'root/x[0]/X 2[1]'])
self.assertEqual(["%s" % i for i in y.b_set.all()], [
'root/y[1]/Y 0[0]',
'root/y[1]/Y 1[1]',
'root/y[1]/Y 2[2]'])
def test_remove_two_deep(self):
root = self.root_model.objects.create(slug='root')
x = GFKA.objects.create(slug='x', content_object=root, position=0)
y = GFKA.objects.create(slug='y', content_object=root, position=1)
GFKB.objects.create(name='X 0', content_object=x, position=0)
GFKB.objects.create(name='X 1', content_object=x, position=1)
GFKB.objects.create(name='X 2', content_object=x, position=2)
GFKB.objects.create(name='Y 0', content_object=y, position=0)
GFKB.objects.create(name='Y 1', content_object=y, position=1)
self.load_admin(root)
self.add_inline(indexes=[1], name='Y 2')
self.remove_inline(indexes=[1, 2])
self.save_form()
self.assertEqual(["%s" % i for i in x.b_set.all()], [
'root/x[0]/X 0[0]',
'root/x[0]/X 1[1]',
'root/x[0]/X 2[2]'])
self.assertEqual(["%s" % i for i in y.b_set.all()], [
'root/y[1]/Y 0[0]',
'root/y[1]/Y 1[1]'])
def test_drag_item_to_empty_parent(self):
root = self.root_model.objects.create(slug='root')
x = GFKA.objects.create(slug='x', content_object=root, position=0)
y = GFKA.objects.create(slug='y', content_object=root, position=1)
GFKB.objects.create(name='Y 0', content_object=y, position=0)
GFKB.objects.create(name='Y 1', content_object=y, position=1)
GFKB.objects.create(name='Y 2', content_object=y, position=2)
self.load_admin(root)
self.drag_and_drop_item(from_indexes=[1, 2], to_indexes=[0, 0])
self.save_form()
y_2 = GFKB.objects.get(name='Y 2')
self.assertEqual(y_2.content_object, x, "Y2 was not moved to the correct parent")
self.assertEqual(y_2.position, 0, "Y2 was not moved to the correct position")
self.assertEqual(["%s" % i for i in x.b_set.all()],
['root/x[0]/Y 2[0]'])
self.assertEqual(["%s" % i for i in y.b_set.all()], [
'root/y[1]/Y 0[0]',
'root/y[1]/Y 1[1]'])
def test_drag_item_to_new_empty_parent(self):
root = self.root_model.objects.create(slug='root')
x = GFKA.objects.create(slug='x', content_object=root, position=0)
GFKB.objects.create(name='X 0', content_object=x, position=0)
GFKB.objects.create(name='X 1', content_object=x, position=1)
GFKB.objects.create(name='X 2', content_object=x, position=2)
self.load_admin(root)
self.add_inline(slug="y")
self.drag_and_drop_item(from_indexes=[0, 2], to_indexes=[1, 0])
self.save_form()
x_2 = GFKB.objects.get(name='X 2')
y = GFKA.objects.get(slug='y')
self.assertEqual(x_2.content_object, y, "X2 was not moved to the correct parent")
self.assertEqual(x_2.position, 0, "X2 was not moved to the correct position")
self.assertEqual(["%s" % i for i in x.b_set.all()],
['root/x[0]/X 0[0]', 'root/x[0]/X 1[1]'])
self.assertEqual(["%s" % i for i in y.b_set.all()],
['root/y[1]/X 2[0]'])
def test_drag_existing_gfkb_to_new_parent_and_back(self):
root = self.root_model.objects.create(slug='test')
x = GFKA.objects.create(slug='x', content_object=root, position=0)
GFKB.objects.create(name='X 0', content_object=x, position=0)
self.load_admin(root)
self.add_inline(slug="y")
self.drag_and_drop_item(from_indexes=[0, 0], to_indexes=[1, 0])
self.drag_and_drop_item(from_indexes=[1, 0], to_indexes=[0, 0])
self.save_form()
self.assertEqual(len(GFKA.objects.all()), 2, "Save failed")
x_0 = GFKB.objects.get(name='X 0')
self.assertEqual(x_0.content_object, x, "X0 is in the wrong parent")
self.assertEqual(x_0.position, 0, "X0 has the wrong position")
```
#### File: tests/nested_delete_validationerrors/models.py
```python
from __future__ import unicode_literals
from django.db import models
from django.db.models import ForeignKey, CASCADE
from nested_admin.tests.compat import python_2_unicode_compatible
@python_2_unicode_compatible
class Parent(models.Model):
name = models.CharField(max_length=128)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Child(models.Model):
name = models.CharField(max_length=128)
parent = ForeignKey(Parent, on_delete=CASCADE, related_name='children')
position = models.PositiveIntegerField()
class Meta:
ordering = ['position']
def __str__(self):
return self.name
@python_2_unicode_compatible
class GrandChild(models.Model):
name = models.CharField(max_length=128)
parent = ForeignKey(Child, on_delete=CASCADE, related_name='children')
position = models.PositiveIntegerField()
class Meta:
ordering = ['position']
def __str__(self):
return self.name
``` |
{
"source": "jpic/django-threadlocals",
"score": 3
} |
#### File: threadlocals/tester/tests.py
```python
from django.test import RequestFactory, SimpleTestCase, Client
from threadlocals.threadlocals import set_thread_variable, get_thread_variable, get_current_request, get_current_session
class ThreadlocalsTest(SimpleTestCase):
def setUp(self):
set_thread_variable('request', None)
def tearDown(self):
set_thread_variable('request', None)
def test_get_thread_variable_default(self):
gotten = get_thread_variable('unset', 'default value')
self.assertEqual(gotten, 'default value')
def test_get_set_thread_variable(self):
set_thread_variable('test', { 'test': 'test'})
gotten = get_thread_variable('test')
self.assertEqual(gotten, { 'test': 'test'})
def test_get_current_request(self):
self.assertEqual(get_current_request(), None) # tests default (None)
request = RequestFactory().get(u'/')
set_thread_variable('request', request)
self.assertEqual(get_current_request(), request)
def test_get_current_session(self):
# c = Client()
# request = get_current_request()
# request.session = c.session
# self.assertEqual(get_current_session(), c.session)
pass # not testing for now because it might require a database and the function we're testing is dead simple. Feel free to add if its worth it to you.
def test_get_current_user(self):
pass
class ThreadLocalMiddlewareTest(SimpleTestCase):
def test_process_request(self):
"""
if ThreadLocalMiddleware is enabled in settings, then running the test client
should trigger the middleware and set the request in thread locals
"""
client = Client()
client.get(u'/')
self.assertEqual(get_current_request().path, u'/')
``` |
{
"source": "jpichon/atkinson",
"score": 3
} |
#### File: atkinson/dlrn/http_data.py
```python
import csv
import os.path
import requests
from toolchest import yaml
from atkinson.config.manager import ConfigManager
from atkinson.logging.logger import getLogger
def _raw_fetch(url, logger):
"""
Fetch remote data and return the text output.
:param url: The URL to fetch the data from
:param logger: A logger instance to use.
:return: Raw text data, None otherwise
"""
ret_data = None
try:
req = requests.get(url)
if req.status_code == requests.codes.ok:
ret_data = req.text
except requests.exceptions.ConnectionError as error:
logger.warning(error.request)
return ret_data
def _fetch_yaml(url, logger):
"""
Fetch remote data and process the text as yaml.
:param url: The URL to fetch the data from
:param logger: A logger instance to use.
:return: Parsed yaml data in the form of a dictionary
"""
ret_data = None
raw_data = _raw_fetch(url, logger)
if raw_data is not None:
ret_data = yaml.parse(raw_data)
return ret_data
def dlrn_http_factory(host, config_file=None, link_name=None,
logger=getLogger()):
"""
Create a DlrnData instance based on a host.
:param host: A host name string to build instances
:param config_file: A dlrn config file(s) to use in addition to
the default.
:param link_name: A dlrn symlink to use. This overrides the config files
link parameter.
:param logger: An atkinson logger to use. Default is the base logger.
:return: A DlrnData instance
"""
manager = None
files = ['dlrn.yml']
if config_file is not None:
if isinstance(config_file, list):
files.extend(config_file)
else:
files.append(config_file)
local_path = os.path.realpath(os.path.dirname(__file__))
manager = ConfigManager(filenames=files, paths=local_path)
if manager is None:
return None
config = manager.config
if host not in config:
return None
link = config[host]['link']
if link_name is not None:
link = link_name
return DlrnHttpData(config[host]['url'],
config[host]['release'],
link_name=link,
logger=logger)
class DlrnHttpData():
"""A class used to interact with the dlrn API"""
def __init__(self, url, release, link_name='current', logger=getLogger()):
"""
Class constructor
:param url: The URL to the host to obtain data.
:param releases: The release name to use for lookup.
:param link_name: The name of the dlrn symlink to fetch data from.
:param logger: An atkinson logger to use. Default is the base logger.
"""
self.url = os.path.join(url, release)
self.release = release
self._logger = logger
self._link_name = link_name
self._commit_data = {}
self._fetch_commit()
def _fetch_commit(self):
"""
Fetch the commit data from dlrn
"""
full_url = os.path.join(self.url,
self._link_name,
'commit.yaml')
data = _fetch_yaml(full_url, self._logger)
if data is not None and 'commits' in data:
pkg = data['commits'][0]
if pkg['status'] == 'SUCCESS':
self._commit_data = {'name': pkg['project_name'],
'dist_hash': pkg['distro_hash'],
'commit_hash': pkg['commit_hash'],
'extended_hash': pkg.get('extended_hash')}
else:
msg = '{0} has a status of error'.format(str(pkg))
self._logger.warning(msg)
def _build_url(self):
"""
Generate a url given a commit hash and distgit hash to match the format
base/AB/CD/ABCD123_XYZ987 where ABCD123 is the commit hash and XYZ987
is a portion of the distgit hash.
:return: A string with the full URL.
"""
first = self._commit_data['commit_hash'][0:2]
second = self._commit_data['commit_hash'][2:4]
third = self._commit_data['commit_hash']
for key in ['dist_hash', 'extended_hash']:
if self._commit_data.get(key, 'None') != 'None':
third += '_' + self._commit_data[key][0:8]
return os.path.join(self.url,
first,
second,
third)
@property
def commit(self):
"""
Get the dlrn commit information
:return: A dictionary of name, dist-git hash, commit hash and
extended hash.
An empty dictionary is returned otherwise.
"""
return self._commit_data
@property
def versions(self):
"""
Get the version data for the versions.csv file and return the
data in a dictionary
:return: A dictionary of packages with commit and dist-git hashes
"""
ret_dict = {}
full_url = os.path.join(self._build_url(), 'versions.csv')
data = _raw_fetch(full_url, self._logger)
if data is not None:
data = data.replace(' ', '_')
split_data = data.split()
reader = csv.DictReader(split_data)
for row in reader:
ret_dict[row['Project']] = {'source': row['Source_Sha'],
'state': row['Status'],
'distgit': row['Dist_Sha'],
'nvr': row['Pkg_NVR']}
else:
msg = 'Could not fetch {0}'.format(full_url)
self._logger.error(msg)
return ret_dict
``` |
{
"source": "jpichon/git_wrapper",
"score": 2
} |
#### File: git_wrapper/integration_tests/test_branch.py
```python
import git
import pytest
from git_wrapper import exceptions
from git_wrapper.repo import GitRepo
def test_apply_diff(repo_root, datadir):
repo = GitRepo(repo_root)
test_branch = "test_apply_diff"
# Create a diff file
diff_path = (datadir / "test.diff")
# Create a branch from a commit the diff will apply cleanly to
repo.git.branch(test_branch, "90946e854499ee371c22f6a492fd0f889ae2394f")
assert repo.repo.active_branch.name == 'master'
# Apply it
repo.branch.apply_diff(test_branch, diff_path, "Test commit message", True)
# Check latest commit
assert repo.repo.active_branch.name == test_branch
message = repo.repo.head.object.message
assert "Test commit message" in message
assert "Signed-off-by" in message
# Check the working directory is clean and the new files also committed
assert repo.repo.is_dirty(untracked_files=True) is False
def test_apply_patch(repo_root, patch_cleanup, datadir):
repo = GitRepo(repo_root)
test_branch = "test_apply_patch"
# Create patch file (based on git format-patch)
patch_path = (datadir / "test.patch")
# Create a branch from a commit the patch will apply cleanly to
repo.git.branch(test_branch, "0.1.0")
assert repo.repo.active_branch.name == 'master'
# Apply & check
repo.branch.apply_patch(test_branch, patch_path)
assert repo.repo.active_branch.name == test_branch
assert "Test patch" in repo.repo.head.object.message
def test_abort(repo_root, patch_cleanup, datadir):
repo = GitRepo(repo_root)
test_branch = "test_abort"
# Create a bad patch file
patch_path = (datadir / "test-bad.patch")
# Create a branch from a commit
repo.git.branch(test_branch, "0.1.0")
assert repo.repo.active_branch.name == 'master'
# Apply & check for the failure
with pytest.raises(exceptions.ChangeNotAppliedException):
repo.branch.apply_patch(test_branch, patch_path)
assert repo.repo.active_branch.name == test_branch
# Revert
repo.branch.abort_patch_apply()
assert "Test patch" not in repo.repo.head.object.message
def test_reset(repo_root):
repo = GitRepo(repo_root)
branch_name = "test_reset"
# Exercise repo refresh
repo.remote.fetch("origin")
# Save the current reference to origin/master
reset_to_commit = git.repo.fun.name_to_object(repo.repo, "origin/master")
# Create a new branch based on an old commit
repo.git.branch(branch_name, "0.0.1")
# Ensure branch head is different from the one we saved
branch_commit = repo.repo.branches[branch_name].commit
assert branch_commit.hexsha != reset_to_commit.hexsha
# Reset the branch to origin/master
repo.branch.hard_reset(
refresh=False, # Avoid race condition if something new merged
branch=branch_name,
remote="origin",
remote_branch="master"
)
# Ensure the new head matches the origin/master we saved
branch_commit = repo.repo.branches[branch_name].commit
assert branch_commit.hexsha == reset_to_commit.hexsha
def test_create_branch(repo_root):
repo = GitRepo(repo_root)
branch_name = "test_create"
tag_0_0_1_hexsha = "631b3a35723a038c01669e1933571693a166db81"
tag_0_1_0_hexsha = "2e6c014bc296be90a7ed04d155ea7d9da2240bbc"
assert branch_name not in repo.repo.branches
# Create the new branch
repo.branch.create(branch_name, "0.0.1")
assert branch_name in repo.repo.branches
assert repo.repo.branches[branch_name].commit.hexsha == tag_0_0_1_hexsha
# Branch already exists - do nothing
repo.branch.create(branch_name, "0.1.0")
assert branch_name in repo.repo.branches
assert repo.repo.branches[branch_name].commit.hexsha == tag_0_0_1_hexsha
# Branch already exists - reset it
repo.branch.create(branch_name, "0.1.0", True)
assert branch_name in repo.repo.branches
assert repo.repo.branches[branch_name].commit.hexsha == tag_0_1_0_hexsha
def test_create_and_checkout_branch(repo_root):
repo = GitRepo(repo_root)
branch_name = "test_create"
assert repo.repo.active_branch.name == 'master'
# Create and check out the new branch
repo.branch.create(branch_name, "0.0.1", checkout=True)
assert repo.repo.active_branch.name == branch_name
repo.repo.heads.master.checkout()
assert repo.repo.active_branch.name == 'master'
# Branch already exists - reset it and don't check it out
repo.branch.create(branch_name, "0.1.0", True, checkout=False)
assert repo.repo.active_branch.name == 'master'
# Branch already exists - reset it and check it out
repo.branch.create(branch_name, "0.0.1", True, checkout=True)
assert repo.repo.active_branch.name == branch_name
def test_remote_contains(repo_root, patch_cleanup, datadir):
repo = GitRepo(repo_root)
remote_branch = "origin/master"
# 1. Check a known commit
assert repo.branch.remote_contains(
remote_branch, "fc88bcb3158187ba9566dad896e3c688d8bc5109"
) is True
# 2. Confirm new commit doesn't exist on the remote
test_branch = "test_contains"
patch_path = (datadir / "test.patch")
repo.git.branch(test_branch, "0.1.0") # For patch to apply cleanly
repo.branch.apply_patch(test_branch, patch_path)
assert repo.branch.remote_contains(
remote_branch, repo.repo.head.object.hexsha
) is False
```
#### File: git_wrapper/integration_tests/test_rebase.py
```python
import pytest
from git_wrapper import exceptions
from git_wrapper.repo import GitRepo
def test_rebase(repo_root, rebase_cleanup):
repo = GitRepo(repo_root)
branch_name = "mybranches/test_repo"
rebase_to = "2e6c014bc296be90a7ed04d155ea7d9da2240bbc" # Hash for 0.1.0 tag
assert repo.repo.active_branch.name == "master"
assert repo.repo.head.object.hexsha != rebase_to
# Create a branch based on an old tag
repo.repo.git.branch(branch_name, "0.0.1")
# Rebase that branch
repo.branch.rebase_to_hash(branch_name=branch_name, hash_=rebase_to)
assert repo.repo.active_branch.name == branch_name
assert repo.repo.head.object.hexsha == rebase_to
def test_abort_rebase(repo_root, rebase_cleanup):
repo = GitRepo(repo_root)
# Set the stage for a failed rebase
repo.repo.git.checkout("2d88955411f2bd2162f24455f8e948ce435152c5")
repo.repo.git.cherry_pick("5ded3c1362229c874dea3ac8d63b89b0b104c57a")
current_head = repo.repo.head.object.hexsha
# Fail that rebase
assert repo.repo.is_dirty() is False
with pytest.raises(exceptions.RebaseException):
repo.branch.rebase_to_hash(current_head, "31777bbb03da53424c2b0eeae2504a237a4f1720")
assert repo.repo.is_dirty() is True
# Perform abort and ensure resulting repo status is clean
repo.branch.abort_rebase()
assert repo.repo.is_dirty() is False
assert repo.repo.head.object.hexsha == current_head
def test_abort_rebase_failure(repo_root):
repo = GitRepo(repo_root)
with pytest.raises(exceptions.AbortException):
repo.branch.abort_rebase()
```
#### File: git_wrapper/tests/test_repo.py
```python
from mock import Mock, patch, ANY
import shutil
import git
import pytest
from git_wrapper import exceptions
from git_wrapper.branch import GitBranch
from git_wrapper.commit import GitCommit
from git_wrapper.remote import GitRemote
from git_wrapper.repo import GitRepo
from git_wrapper.tag import GitTag
def test_repo(mock_repo):
"""
GIVEN GitRepo initialized with a path and no repo object
WHEN the object is created
THEN a repo added
"""
with patch('git_wrapper.repo.git') as git_mock:
attrs = {'Repo.return_value': mock_repo}
git_mock.configure_mock(**attrs)
git_util = GitRepo('./')
assert mock_repo == git_util.repo
def test_not_path_no_repo():
"""
GIVEN GitRepo initialized with no path or repo object
WHEN the object is created
THEN an exception is raised
"""
with pytest.raises(Exception):
GitRepo('', None)
def test_git_command(mock_repo):
"""
GIVEN GitRepo initialized with a path and repo
WHEN the git property is called
THEN a git object is returned
"""
git_util = GitRepo('./', mock_repo)
assert mock_repo.git is git_util.git
def test_remote_setter(mock_repo):
"""
GIVEN GitRepo is initialized with a path and repo
WHEN the remote setter is called
THEN the remote is set as expected
"""
repo = GitRepo('./', mock_repo)
new_remote = GitRemote(git_repo=repo, logger=None)
repo.remote = new_remote
assert repo.remote == new_remote
def test_remote_setter_wrong_type(mock_repo):
"""
GIVEN GitRepo is initialized with a path and repo
WHEN the remote setter is called with the wrong type
THEN a TypeError is raised
"""
repo = GitRepo('./', mock_repo)
with pytest.raises(TypeError):
repo.remote = repo
def test_branch_setter(mock_repo):
"""
GIVEN GitRepo is initialized with a path and repo
WHEN the branch setter is called
THEN the branch is set as expected
"""
repo = GitRepo('./', mock_repo)
new_branch = GitBranch(git_repo=repo, logger=None)
repo.branch = new_branch
assert repo.branch == new_branch
def test_branch_setter_wrong_type(mock_repo):
"""
GIVEN GitRepo is initialized with a path and repo
WHEN the branch setter is called with the wrong type
THEN a TypeError is raised
"""
repo = GitRepo('./', mock_repo)
with pytest.raises(TypeError):
repo.branch = repo
def test_commit_setter(mock_repo):
"""
GIVEN GitRepo is initialized with a path and repo
WHEN the commit setter is called
THEN the commit is set as expected
"""
repo = GitRepo('./', mock_repo)
new_commit = GitCommit(git_repo=repo, logger=None)
repo.commit = new_commit
assert repo.commit == new_commit
def test_commit_setter_wrong_type(mock_repo):
"""
GIVEN GitRepo is initialized with a path and repo
WHEN the commit setter is called with the wrong type
THEN a TypeError is raised
"""
repo = GitRepo('./', mock_repo)
with pytest.raises(TypeError):
repo.commit = repo
def test_tag_setter(mock_repo):
"""
GIVEN GitRepo is initialized with a path and repo
WHEN the tag setter is called
THEN the tag is set as expected
"""
repo = GitRepo('./', mock_repo)
new_tag = GitTag(git_repo=repo, logger=None)
repo.tag = new_tag
assert repo.tag == new_tag
def test_tag_setter_wrong_type(mock_repo):
"""
GIVEN GitRepo is initialized with a path and repo
WHEN the tag setter is called with the wrong type
THEN a TypeError is raised
"""
repo = GitRepo('./', mock_repo)
with pytest.raises(TypeError):
repo.tag = repo
def test_clone():
"""
GIVEN GitRepo without a path or repo
WHEN clone is called with a valid clone_from URL and clone_to path
THEN Repo.clone_from is called
"""
with patch('git.repo.base.Repo.clone_from') as mock_clone:
clone = GitRepo.clone('./', './testclone')
assert mock_clone.called is True
assert isinstance(clone, GitRepo)
def test_bare_clone():
"""
GIVEN GitRepo without a path or repo
WHEN clone is called with valid parameters and bare set to True
THEN Repo.clone_from is called with bare=True
"""
with patch('git.repo.base.Repo.clone_from') as mock_clone:
GitRepo.clone('./', './testclone', True)
mock_clone.assert_called_with('./', ANY, bare=True)
def test_clone_failed():
"""
GIVEN GitRepo without a path or repo
WHEN clone is called with a valid clone_from URL and clone_to path
AND Repo.clone_from fails with an exception
THEN a RepoCreationException is raised
"""
with patch('git.repo.base.Repo.clone_from') as mock_clone:
mock_clone.side_effect = git.GitCommandError('clone', '')
with pytest.raises(exceptions.RepoCreationException):
GitRepo.clone('./', './testclone')
def test_destroy_and_reclone(mock_repo, monkeypatch):
"""
GIVEN GitRepo initialized with a path and repo
WHEN destroy_and_reclone is called
THEN Repo.clone_from is called
WITH the expected remote url and local working dir
"""
monkeypatch.setattr(shutil, 'rmtree', Mock())
clone = GitRepo(repo=mock_repo)
local_dir = '/tmp/8f697668fgitwrappertest'
clone.repo.working_dir = local_dir
with patch('git.repo.base.Repo.clone_from') as mock_clone:
clone.destroy_and_reclone()
assert mock_clone.called is True
mock_clone.assert_called_with('http://example.com',
local_dir,
bare=False)
def test_destroy_no_path_no_repo(monkeypatch):
"""
GIVEN GitRepo initialized with no path or repo object
WHEN destroy_and_reclone is called
THEN an exception is raised
"""
monkeypatch.setattr(shutil, 'rmtree', Mock())
with pytest.raises(Exception):
clone = GitRepo('', None)
clone.destroy_and_reclone()
def test_destroy_no_remotes(mock_repo, monkeypatch):
"""
GIVEN GitRepo initialized with a path and repo
WHEN destroy_and_reclone is called
AND the repo does not have any remotes configured
THEN an exception is raised
"""
monkeypatch.setattr(shutil, 'rmtree', Mock())
clone = GitRepo(repo=mock_repo)
with pytest.raises(exceptions.RepoCreationException):
clone.repo.remotes = {}
clone.destroy_and_reclone()
def test_destroy_no_remote_named_origin(mock_repo, monkeypatch):
"""
GIVEN GitRepo initialized with a path and repo
WHEN destroy_and_reclone is called
AND the repo does not have a remote named origin
THEN Repo.clone_from is called
WITH the remote url and local working dir from another remote
"""
monkeypatch.setattr(shutil, 'rmtree', Mock())
clone = GitRepo(repo=mock_repo)
local_dir = '/tmp/8f697667fgitwrappertest'
clone.repo.working_dir = local_dir
remote = Mock(spec=git.Remote)
remote.configure_mock(name="onlyremote", url="http://example.com/another")
clone.repo.remotes = [remote]
with patch('git.repo.base.Repo.clone_from') as mock_clone:
clone.destroy_and_reclone()
assert mock_clone.called is True
mock_clone.assert_called_with('http://example.com/another',
local_dir,
bare=False)
def test_destroy_and_multiple_remotes(mock_repo, monkeypatch):
"""
GIVEN GitRepo initialized with a path and repo
WHEN destroy_and_reclone is called
AND the repo has multiple remotes
THEN Repo.clone_from is called
AND create_remote is called
"""
monkeypatch.setattr(shutil, 'rmtree', Mock())
clone = GitRepo(repo=mock_repo)
local_dir = '/tmp/8f697668fgitwrappertest'
clone.repo.working_dir = local_dir
remote = Mock(spec=git.Remote)
remote.configure_mock(name="otherremote", url="http://example.com/another")
clone.repo.remotes.append(remote)
with patch('git.repo.base.Repo.clone_from') as mock_clone:
new_repo_mock = Mock()
mock_clone.return_value = new_repo_mock
clone.destroy_and_reclone()
assert mock_clone.called is True
mock_clone.assert_called_with('http://example.com',
local_dir,
bare=False)
new_repo_mock.create_remote.assert_called_with(
"otherremote",
"http://example.com/another"
)
def test_destroy_and_remote_creation_fails(mock_repo, monkeypatch):
"""
GIVEN GitRepo initialized with a path and repo
WHEN destroy_and_reclone is called
AND the repo has several remotes
AND create_remote fails
THEN a RemoteException is raised
"""
monkeypatch.setattr(shutil, 'rmtree', Mock())
clone = GitRepo(repo=mock_repo)
local_dir = '/tmp/8f697668fgitwrappertest'
clone.repo.working_dir = local_dir
remote = Mock(spec=git.Remote)
remote.configure_mock(name="otherremote", url="http://example.com/another")
clone.repo.remotes.append(remote)
with patch('git.repo.base.Repo.clone_from') as mock_clone:
new_repo_mock = Mock()
mock_clone.return_value = new_repo_mock
with pytest.raises(exceptions.RemoteException):
new_repo_mock.create_remote.side_effect = git.GitCommandError('remote', '')
clone.destroy_and_reclone()
assert mock_clone.called is True
``` |
{
"source": "jpic/lookupy",
"score": 4
} |
#### File: lookupy/lookupy/dunderkey.py
```python
def dunderkey(*args):
"""Produces a nested key from multiple args separated by double
underscore
>>> dunderkey('a', 'b', 'c')
>>> 'a__b__c'
:param *args : *String
:rtype : String
"""
return '__'.join(args)
def dunder_partition(key):
"""Splits a dunderkey into 2 parts
The first part is everything before the final double underscore
The second part is after the final double underscore
>>> dunder_partition('a__b__c')
>>> ('a__b', 'c')
:param neskey : String
:rtype : 2 Tuple
"""
parts = key.rsplit('__', 1)
return tuple(parts) if len(parts) > 1 else (parts[0], None)
def dunder_init(key):
"""Returns the initial part of the dunder key
>>> dunder_init('a__b__c')
>>> 'a__b'
:param neskey : String
:rtype : String
"""
return dunder_partition(key)[0]
def dunder_last(key):
"""Returns the last part of the dunder key
>>> dunder_last('a__b__c')
>>> 'c'
:param neskey : String
:rtype : String
"""
return dunder_partition(key)[1]
def dunder_get(_dict, key):
"""Returns value for a specified dunderkey
A "dunderkey" is just a fieldname that may or may not contain
double underscores (dunderscores!) for referrencing nested keys in
a dict. eg::
>>> data = {'a': {'b': 1}}
>>> nesget(data, 'a__b')
1
key 'b' can be referrenced as 'a__b'
:param _dict : (dict)
:param key : (str) that represents a first level or nested key in the dict
:rtype : (mixed) value corresponding to the key
"""
parts = key.split('__', 1)
key = parts[0]
try:
result = _dict[key]
except KeyError:
return None
except TypeError:
try:
result = getattr(_dict, key)
except AttributeError:
return None
return result if len(parts) == 1 else dunder_get(result, parts[1])
def undunder_keys(_dict):
"""Returns dict with the dunder keys converted back to nested dicts
eg::
>>> undunder_keys({'a': 'hello', 'b__c': 'world'})
{'a': 'hello', 'b': {'c': 'world'}}
:param _dict : (dict) flat dict
:rtype : (dict) nested dict
"""
def f(key, value):
parts = key.split('__')
return {
parts[0]: value if len(parts) == 1 else f(parts[1], value)
}
result = {}
for r in [f(k, v) for k, v in _dict.items()]:
rk = list(r.keys())[0]
if rk not in result:
result.update(r)
else:
result[rk].update(r[rk])
return result
def dunder_truncate(_dict):
"""Returns dict with dunder keys truncated to only the last part
In other words, replaces the dunder keys with just last part of
it. In case many identical last parts are encountered, they are
not truncated further
eg::
>>> dunder_truncate({'a__p': 3, 'b__c': 'no'})
{'c': 'no', 'p': 3}
>>> dunder_truncate({'a__p': 'yay', 'b__p': 'no', 'c__z': 'dunno'})
{'a__p': 'yay', 'b__p': 'no', 'z': 'dunno'}
:param _dict : (dict) to flatten
:rtype : (dict) flattened result
"""
keylist = list(_dict.keys())
def decide_key(k, klist):
newkey = dunder_last(k)
return newkey if list(map(dunder_last, klist)).count(newkey) == 1 else k
original_keys = [decide_key(key, keylist) for key in keylist]
return dict(zip(original_keys, _dict.values()))
``` |
{
"source": "jpic/pinax",
"score": 2
} |
#### File: blog/templatetags/blog_tags.py
```python
import re
from django import template
from django.conf import settings
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
register = template.Library()
@register.inclusion_tag("blog/blog_item.html")
def show_blog_post(blog_post):
return {"blog_post": blog_post}
```
#### File: apps/tasks/feeds.py
```python
from datetime import datetime
from atomformat import Feed
from django.core.urlresolvers import reverse
from django.conf import settings
from django.template.defaultfilters import linebreaks, escape
from django.contrib.sites.models import Site
from pinax.apps.tasks.models import TaskHistory
ITEMS_PER_FEED = getattr(settings, "PINAX_ITEMS_PER_FEED", 20)
class BaseTaskFeed(Feed):
def item_id(self, item):
return "http://%s%s" % (
Site.objects.get_current().domain,
item.task.get_absolute_url(),
)
def item_title(self, item):
return item.summary
def item_updated(self, item):
return item.modified
def item_published(self, item):
return item.created
def item_content(self, item):
output = item.detail
if item.status:
output = "%s\n\nStatus: %s" % (output, item.status)
if item.comment:
output = "%s\n\nComment:\n%s" % (output, item.comment)
return {"type" : "html", }, linebreaks(escape(output))
def item_links(self, item):
return [{"href" : self.item_id(item)}]
def item_authors(self, item):
return [{"name" : item.owner.username}]
def feed_id(self):
return "http://%s/tasks/feeds/all/" % Site.objects.get_current().domain
def feed_title(self):
return "Tasks Changes"
def feed_updated(self):
qs = self.get_qs()
# We return an arbitrary date if there are no results, because there
# must be a feed_updated field as per the Atom specifications, however
# there is no real data to go by, and an arbitrary date can be static.
if qs.count() == 0:
return datetime(year=2008, month=7, day=1)
return qs.latest("modified").modified
def feed_links(self):
complete_url = "http://%s%s" % (
Site.objects.get_current().domain,
reverse("task_list"),
)
return ({"href": complete_url},)
def items(self):
return self.get_qs()[:ITEMS_PER_FEED]
def get_qs(self):
return TaskHistory.objects.filter(object_id__isnull=True).order_by("-modified")
class AllTaskFeed(BaseTaskFeed):
pass
```
#### File: apps/tasks/widgets.py
```python
from django import forms
from django.utils.encoding import force_unicode
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
class ReadOnlyWidget(forms.TextInput):
input_type = "hidden"
def __init__(self, field, *args, **kwargs):
self.field = field
super(ReadOnlyWidget, self).__init__(*args, **kwargs)
def render(self, *args, **kwargs):
field_name, value = args
field_type = self.field.__class__.__name__
field_value = super(ReadOnlyWidget, self).render(*args, **kwargs)
output = value
try:
if self.field.choices:
for choice in self.field.choices:
if value == choice[0]:
output = conditional_escape(force_unicode(choice[1]))
else:
output = escape(value)
except Exception,e:
output = e
return mark_safe("<span>%s</span>\n%s" % (output, field_value))
```
#### File: threadedcomments_extras/templatetags/comments_tag.py
```python
from django import template
register = template.Library()
@register.inclusion_tag("threadedcomments/comments.html", takes_context=True)
def comments(context, obj):
return {
"object": obj,
"request": context["request"],
"user": context["user"],
}
```
#### File: apps/tribes/models.py
```python
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from groups.base import Group
class Tribe(Group):
members = models.ManyToManyField(User,
related_name = "tribes",
verbose_name = _("members")
)
def get_absolute_url(self):
return reverse("tribe_detail", kwargs={"group_slug": self.slug})
```
#### File: tribes/tests/__init__.py
```python
from django.core.urlresolvers import reverse
from django.test import TestCase
from pinax.apps.tribes.models import Tribe
class TribesTest(TestCase):
fixtures = ["tribes_auth.json"]
urls = "pinax.apps.tribes.tests.tribes_urls"
def test_unauth_create_get(self):
"""
can an unauth'd user get to page?
"""
response = self.client.get(reverse("tribe_create"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response["location"], "http://testserver/account/login/?next=%s" % reverse("tribe_create"))
def test_auth_create_get(self):
"""
can an auth'd user get to page?
"""
logged_in = self.client.login(username="tester", password="<PASSWORD>")
self.assertTrue(logged_in)
response = self.client.get(reverse("tribe_create"))
self.assertEqual(response.status_code, 200)
def test_unauth_create_post(self):
"""
can an unauth'd user post to create a new tribe?
"""
response = self.client.post(reverse("tribe_create"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response["location"], "http://testserver/account/login/?next=%s" % reverse("tribe_create"))
def test_auth_create_post(self):
"""
can an auth'd user post to create a new tribe?
"""
logged_in = self.client.login(username="tester", password="<PASSWORD>")
self.assertTrue(logged_in)
response = self.client.post(reverse("tribe_create"), {
"slug": "test",
"name": "Test Tribe",
"description": "A test tribe.",
})
self.assertEqual(response.status_code, 302)
self.assertEqual(response["location"], "http://testserver/tribes/tribe/test/")
self.assertEqual(Tribe.objects.get(slug="test").creator.username, "tester")
self.assertEqual(Tribe.objects.get(slug="test").members.all()[0].username, "tester")
def test_auth_creator_membership(self):
"""
is membership for creator correct?
"""
logged_in = self.client.login(username="tester", password="<PASSWORD>")
self.assertTrue(logged_in)
response = self.client.post(reverse("tribe_create"), {
"slug": "test",
"name": "Test Tribe",
"description": "A test tribe.",
})
response = self.client.get(reverse("tribe_detail", args=["test"]))
self.assertEqual(Tribe.objects.get(slug="test").creator.username, "tester")
self.assertEqual(Tribe.objects.get(slug="test").members.all()[0].username, "tester")
self.assertEqual(response.context[0]["is_member"], True)
```
#### File: fixtures/generate/gen_notification.py
```python
import random
from django.contrib.auth.models import User
from notification.models import NoticeType, NoticeSetting, ObservedItem
def generate():
for user in User.objects.all():
for notice_type in NoticeType.objects.all():
en = random.random() <= 0.1
notice_setting = NoticeSetting.objects.create(
user=user,
notice_type=notice_type,
medium="1",
send=en
)
print "%sabled notices for %s on %s" % (en and 'En' or 'Dis',
user, notice_type)
if __name__ == '__main__':
generate()
```
#### File: pinax/middleware/security.py
```python
import re
from django.conf import settings
MASK_IN_EXCEPTION_EMAIL= ["password", "mail", "protected", "private"]
mask_re = re.compile("(" + "|".join(MASK_IN_EXCEPTION_EMAIL) + ")", re.I)
class HideSensistiveFieldsMiddleware(object):
"""
A middleware that masks sensitive fields when an exception occurs,
e.g. passwords in login attempts.
"""
def process_exception(self, request, exception):
if not request or not request.POST or settings.DEBUG:
return False
masked = False
mutable = True
if hasattr(request.POST, "_mutable"):
mutable = request.POST._mutable
request.POST._mutable = True
for name in request.POST:
if mask_re.search(name):
request.POST[name] = u"xxHIDDENxx"
masked = True
if hasattr(request.POST, "_mutable"):
request.POST._mutable = mutable
```
#### File: apps/friends_app/context_processors.py
```python
from friends.models import FriendshipInvitation
def invitations(request):
if request.user.is_authenticated():
return {
"invitations_count": FriendshipInvitation.objects.filter(
to_user = request.user,
status = "2"
).count()
}
else:
return {}
```
#### File: apps/friends_app/forms.py
```python
from django import forms
from django.contrib.auth.models import User
from friends.models import *
from friends.importer import import_vcards
# @@@ move to django-friends when ready
class ImportVCardForm(forms.Form):
vcard_file = forms.FileField(label="vCard File")
def save(self, user):
imported, total = import_vcards(self.cleaned_data["vcard_file"].content, user)
return imported, total
``` |
{
"source": "jpic/pip",
"score": 2
} |
#### File: pip/tests/test_index.py
```python
from pip.index import package_to_requirement
def test_package_name_should_be_converted_to_requirement():
"""
Test that it translates a name like Foo-1.2 to Foo==1.3
"""
assert package_to_requirement('Foo-1.2') == 'Foo==1.2'
assert package_to_requirement('Foo-dev') == 'Foo==dev'
assert package_to_requirement('Foo') == 'Foo'
``` |
{
"source": "jpic/pytezos",
"score": 3
} |
#### File: examples/now/test_now.py
```python
from unittest import TestCase
from pytezos import ContractInterface, pytezos, format_timestamp
code = """
parameter unit;
storage timestamp;
code { DROP ;
NOW ;
NIL operation ;
PAIR }
"""
class TimeContractTest(TestCase):
@classmethod
def setUpClass(cls):
cls.ci = ContractInterface.create_from(code)
def test_now(self):
res = self.ci.call().result(storage=0)
now = format_timestamp(pytezos.now())
self.assertEqual(now, res.storage)
```
#### File: pytezos/michelson/interface.py
```python
from os.path import basename, dirname, join, exists, expanduser
from pprint import pformat
from pytezos.operation.result import OperationResult
from pytezos.michelson.contract import Contract
from pytezos.michelson.converter import convert
from pytezos.michelson.micheline import skip_nones
from pytezos.michelson.formatter import micheline_to_michelson
from pytezos.operation.group import OperationGroup
from pytezos.operation.content import format_mutez
from pytezos.interop import Interop
from pytezos.tools.docstring import get_class_docstring
class ContractCallResult(OperationResult):
@classmethod
def from_contract_call(cls, operation_group: dict, address, contract: Contract):
results = cls.from_operation_group(operation_group, kind='transaction', destination=address)
assert len(results) == 1, results
result = results[0]
return cls(
parameters=contract.parameter.decode(data=result.parameters),
storage=contract.storage.decode(result.storage),
big_map_diff=contract.storage.big_map_diff_decode(result.big_map_diff),
operations=result.operations
)
@classmethod
def from_code_run(cls, code_run: dict, parameters, contract: Contract):
return cls(
parameters=contract.parameter.decode(parameters),
storage=contract.storage.decode(code_run['storage']),
big_map_diff=contract.storage.big_map_diff_decode(code_run.get('big_map_diff', [])),
operations=code_run.get('operations', [])
)
class ContractCall(Interop):
def __init__(self, parameters,
address=None, contract: Contract = None, factory=Contract, amount=0, shell=None, key=None):
super(ContractCall, self).__init__(shell=shell, key=key)
self.parameters = parameters
self.address = address
self.amount = amount
if contract is None:
assert address is not None
contract = factory.from_micheline(self.shell.contracts[address].code())
self.contract = contract
def _spawn(self, **kwargs):
return ContractCall(
parameters=self.parameters,
address=self.address,
contract=self.contract,
amount=kwargs.get('amount', self.amount),
shell=kwargs.get('shell', self.shell),
key=kwargs.get('key', self.key)
)
def __repr__(self):
res = [
super(ContractCall, self).__repr__(),
f'.address # {self.address}',
f'.amount # {self.amount}',
'\nParameters',
pformat(self.parameters),
'\nHelpers',
get_class_docstring(self.__class__)
]
return '\n'.join(res)
def with_amount(self, amount):
"""
Send funds to the contract too.
:param amount: amount in microtez (int) or tez (Decimal)
:return: ContractCall
"""
return self._spawn(amount=amount)
@property
def operation_group(self) -> OperationGroup:
"""
Show generated operation group.
:return: OperationGroup
"""
return OperationGroup(shell=self.shell, key=self.key) \
.transaction(destination=self.address,
amount=self.amount,
parameters=self.parameters) \
.fill()
def inject(self):
"""
Autofill, sign and inject resulting operation group.
"""
return self.operation_group.autofill().sign().inject()
def cmdline(self):
"""
Generate command line for tezos client.
:return: str
"""
arg = micheline_to_michelson(self.parameters['value'], inline=True)
source = self.key.public_key_hash()
amount = format_mutez(self.amount)
entrypoint = self.parameters['entrypoint']
return f'transfer {amount} from {source} to {self.address} "' \
f'--entrypoint "{entrypoint}" --arg "{arg}"'
def result(self, storage=None, source=None, sender=None, gas_limit=None):
"""
Simulate operation and parse the result.
:param storage: Python object only. If storage is specified, `run_code` is called instead of `run_operation`.
:param source: Can be specified for unit testing purposes
:param sender: Can be specified for unit testing purposes,
see https://tezos.gitlab.io/whitedoc/michelson.html#operations-on-contracts for the difference
:param gas_limit: Specify gas limit (default is gas hard limit)
:return: ContractCallResult
"""
chain_id = self.shell.chains.main.chain_id()
if storage is not None:
query = skip_nones(
script=self.contract.code,
storage=self.contract.storage.encode(storage),
entrypoint=self.parameters['entrypoint'],
input=self.parameters['value'],
amount=format_mutez(self.amount),
chain_id=chain_id,
source=sender,
payer=source,
gas=gas_limit
)
code_run_res = self.shell.head.helpers.scripts.run_code.post(query)
return ContractCallResult.from_code_run(
code_run_res, parameters=self.parameters, contract=self.contract)
else:
opg_with_metadata = self.operation_group.fill().run()
return ContractCallResult.from_contract_call(
opg_with_metadata, address=self.address, contract=self.contract)
def view(self):
"""
Get return value of a view method.
:return: object
"""
opg_with_metadata = self.operation_group.fill().run()
view_operation = OperationResult.get_contents(opg_with_metadata, source=self.address)[0]
view_contract = Contract.from_micheline(self.shell.contracts[view_operation['destination']].code())
return view_contract.parameter.decode(view_operation['parameters'])
class ContractEntrypoint(Interop):
def __init__(self, name, address=None, contract: Contract = None, factory=Contract, shell=None, key=None):
super(ContractEntrypoint, self).__init__(shell=shell, key=key)
if contract is None:
assert address is not None
code = self.shell.contracts[address].code()
contract = factory.from_micheline(code)
self.contract = contract
self.name = name
self.address = address
def _spawn(self, **kwargs):
return ContractEntrypoint(
name=self.name,
contract=self.contract,
address=self.address,
shell=kwargs.get('shell', self.shell),
key=kwargs.get('key', self.key),
)
def __repr__(self):
res = [
super(ContractEntrypoint, self).__repr__(),
f'.address # {self.address}',
f'\n{self.__doc__}'
]
return '\n'.join(res)
def __call__(self, *args, **kwargs):
if args:
if len(args) == 1:
data = args[0]
else:
data = list(args)
elif kwargs:
data = kwargs
else:
data = []
if self.name:
data = {self.name: data} if data else self.name
parameters = self.contract.parameter.encode(data)
return ContractCall(
parameters=parameters,
address=self.address,
contract=self.contract,
shell=self.shell,
key=self.key,
)
class ContractInterface(Interop):
__default_entry__ = 'call'
def __init__(self, address=None, contract: Contract = None, factory=Contract, shell=None, key=None):
super(ContractInterface, self).__init__(shell=shell, key=key)
if contract is None:
assert address is not None
code = self.shell.contracts[address].code()
contract = factory.from_micheline(code)
self.contract = contract
self.address = address
for entry_name, docstring in contract.parameter.entries(default=self.__default_entry__):
entry_point = ContractEntrypoint(
name=entry_name if entry_name != self.__default_entry__ else None,
address=self.address,
contract=contract,
shell=self.shell,
key=self.key
)
entry_point.__doc__ = docstring
setattr(self, entry_name, entry_point)
def _spawn(self, **kwargs):
return ContractInterface(
address=self.address,
contract=self.contract,
shell=kwargs.get('shell', self.shell),
key=kwargs.get('key', self.key)
)
def __repr__(self):
entrypoints, _ = zip(*self.contract.parameter.entries(default=self.__default_entry__))
res = [
super(ContractInterface, self).__repr__(),
f'.address # {self.address}',
'\nEntrypoints',
*list(map(lambda x: f'.{x}()', entrypoints)),
'\nHelpers',
get_class_docstring(self.__class__,
attr_filter=lambda x: not x.startswith('_') and x not in entrypoints)
]
return '\n'.join(res)
@classmethod
def create_from(cls, source, shell=None, factory=Contract):
if isinstance(source, str) and exists(expanduser(source)):
contract = factory.from_file(source)
else:
contract = factory(convert(source, output='micheline'))
return ContractInterface(contract=contract, shell=shell)
def big_map_get(self, path, block_id='head'):
"""
Get BigMap entry as Python object by plain key and block height
:param path: Json path to the key (or just key to access default BigMap location)
:param block_id: Block height / hash / offset to use, default is `head`
:return: object
"""
key = basename(path)
big_map_path = dirname(path)
big_map_path = join('/', big_map_path) if big_map_path else None
query = self.contract.storage.big_map_query(key, big_map_path)
value = self.shell.blocks[block_id].context.contracts[self.address].big_map_get.post(query)
return self.contract.storage.big_map_decode(value, big_map_path)
def storage(self, block_id='head'):
"""
Get storage as Pythons object at specified block height.
:param block_id: Block height / hash / offset to use, default is `head`
:return: object
"""
storage = self.shell.blocks[block_id].context.contracts[self.address].storage()
return self.contract.storage.decode(storage)
def operation_result(self, operation_group: dict) -> ContractCallResult:
"""
Get operation parameters, storage and big_map_diff as Python objects.
Can locate operation inside operation groups with multiple contents and/or internal operations.
:param operation_group: {'branch', 'protocol', 'contents', 'signature'}
:return: ContractCallResult
"""
return ContractCallResult.from_contract_call(
operation_group, address=self.address, contract=self.contract)
def manager(self):
"""
Get contract manager address (tz)
:return: str
"""
return self.shell.block.context.contracts[self.address].manager()
```
#### File: pytezos/michelson/micheline.py
```python
from typing import Dict
from datetime import datetime
from os.path import join, dirname, basename
from decimal import Decimal
from collections import namedtuple, defaultdict
from functools import lru_cache
from pytezos.encoding import parse_address, parse_public_key, forge_public_key, forge_address
from pytezos.michelson.forge import prim_tags
from pytezos.michelson.formatter import micheline_to_michelson
from pytezos.michelson.grammar import MichelsonParser
Nested = namedtuple('Nested', ['prim', 'args'])
Schema = namedtuple('Schema', ['metadata', 'bin_types', 'bin_to_json', 'json_to_bin'])
meaningful_types = ['key', 'key_hash', 'signature', 'timestamp', 'address']
@lru_cache(maxsize=None)
def michelson_parser():
return MichelsonParser()
class TypedDict(dict):
__key_type__ = str
def __getitem__(self, item):
return super(TypedDict, self).__getitem__(self.__key_type__(item))
def __setitem__(self, key, value):
return super(TypedDict, self).__setitem__(self.__key_type__(key), value)
@staticmethod
def make(key_type):
return type(f'{key_type.__name__.capitalize()}Dict', (TypedDict,), {'__key_type__': key_type})
def skip_nones(**kwargs) -> dict:
return {k: v for k, v in kwargs.items() if v is not None}
def is_micheline(value):
if isinstance(value, list):
def get_prim(x):
return x.get('prim') if isinstance(x, dict) else None
return set(map(get_prim, value)) == {'parameter', 'storage', 'code'}
elif isinstance(value, dict):
primitives = list(prim_tags.keys())
return any(map(lambda x: x in value, ['prim', 'args', 'annots', *primitives]))
else:
return False
def decode_literal(node, prim):
core_type, value = next(iter(node.items()))
if prim in ['int', 'nat']:
return int(value)
if prim == 'timestamp':
if core_type == 'int':
return int(value)
else:
return value
if prim == 'mutez':
return Decimal(value) / 10 ** 6
if prim == 'bool':
return value == 'True'
if core_type == 'bytes':
if prim in ['address', 'key_hash', 'contract']:
return parse_address(bytes.fromhex(value))
if prim == 'key':
return parse_public_key(bytes.fromhex(value))
return value
def encode_literal(value, prim, binary=False):
core_type = 'string'
if prim in ['int', 'nat']:
core_type = 'int'
elif prim == 'timestamp':
if isinstance(value, int):
core_type = 'int'
elif isinstance(value, datetime):
value = value.strftime('%Y-%m-%dT%H:%M:%SZ')
elif prim == 'mutez':
core_type = 'int'
if isinstance(value, Decimal):
value = int(value * 10 ** 6)
elif prim == 'bool':
core_type = 'prim'
value = 'True' if value else 'False'
elif prim == 'bytes':
if isinstance(value, bytes):
value = value.hex()
core_type = 'bytes'
elif binary:
if prim == 'key':
value = forge_public_key(value).hex()
core_type = 'bytes'
elif prim in ['address', 'contract', 'key_hash']:
value = forge_address(value, tz_only=prim == 'key_hash').hex()
core_type = 'bytes'
return {core_type: str(value)}
def get_flat_nested(nested: Nested):
flat_args = list()
for arg in nested.args:
if isinstance(arg, Nested) and arg.prim == nested.prim:
flat_args.extend(get_flat_nested(arg))
else:
flat_args.append(arg)
return flat_args
def collapse_micheline(code) -> dict:
metadata = dict()
def get_annotation(x, prefix, default=None):
return next((a[1:] for a in x.get('annots', []) if a[0] == prefix), default)
def parse_node(node, path='0', parent_prim=None, entry=None):
if node['prim'] in ['storage', 'parameter']:
return parse_node(node['args'][0])
fieldname = get_annotation(node, '%')
typename = get_annotation(node, ':')
metadata[path] = skip_nones(
prim=node['prim'],
typename=typename,
fieldname=fieldname,
entry=entry
)
if node['prim'] == 'option':
return parse_node(
node=node['args'][0],
path=path + '0',
parent_prim=parent_prim,
entry=fieldname
)
elif node['prim'] in ['lambda', 'contract']:
metadata[path]['parameter'] = micheline_to_michelson(node['args'][0], inline=True)
return dict(path=path, args=[]) # stop there
args = [
parse_node(arg, path=path + str(i), parent_prim=node['prim'])
for i, arg in enumerate(node.get('args', []))
]
if node['prim'] in ['pair', 'or']:
res = Nested(node['prim'], args)
is_struct = node['prim'] == 'pair' and (typename or fieldname)
if is_struct or parent_prim != node['prim']:
args = get_flat_nested(res)
else:
return res
if args:
metadata[path]['args'] = list(map(lambda x: x['path'], args))
return dict(path=path, args=args)
parse_node(code)
return metadata
def build_maps(metadata: dict):
bin_types = {k: v['prim'] for k, v in metadata.items()}
bin_to_json, json_to_bin = {}, {}
def is_unit(bin_path):
node = metadata[bin_path]
return node.get('prim') == 'unit'
def get_entry(bin_path):
node = metadata[bin_path]
entry = node.get('entry', node.get('fieldname', node.get('typename')))
return entry.replace('_Liq_entry_', '') if entry else None
def get_lr_path(bin_path):
entry = ''
for i in range(len(bin_path) - 1, 0, -1):
lpath = bin_path[:i]
if bin_types[lpath] in ['or', 'enum', 'router']:
entry = {'0': 'l', '1': 'r'}[bin_path[i]] + entry
else:
return entry
assert entry, bin_path
return entry
def get_key(bin_path):
node = metadata[bin_path]
default = node['prim'] if node['prim'] in meaningful_types else None
return node.get('typename', node.get('fieldname', node.get('entry', default)))
def parse_node(bin_path='0', json_path='/'):
node = metadata[bin_path]
if node['prim'] in ['list', 'set', 'map', 'big_map']:
index = 0 if node['prim'] in ['list', 'set'] else 1
parse_node(node['args'][index], join(json_path, '{}'))
elif node['prim'] == 'or':
entries = list(map(get_entry, node['args']))
named = all(entries) and len(entries) == len(set(entries))
if all(map(is_unit, node['args'])):
bin_types[bin_path] = 'enum'
for i, arg in enumerate(node['args']):
bin_types[arg] = entries[i] if named else str(i)
parse_node(arg, join(json_path, bin_types[arg]))
else:
if not named:
entries = list(map(get_lr_path, node['args']))
bin_types[bin_path] = 'router'
for i, arg in enumerate(node['args']):
parse_node(arg, join(json_path, entries[i]))
elif node['prim'] == 'pair':
keys = list(map(get_key, node['args']))
named = all(keys) and len(keys) == len(set(keys))
bin_types[bin_path] = 'namedtuple' if named else 'tuple'
for i, arg in enumerate(node['args']):
parse_node(arg, join(json_path, keys[i] if named else str(i)))
bin_to_json[bin_path], json_to_bin[json_path] = json_path, bin_path
parse_node()
return bin_types, bin_to_json, json_to_bin
def parse_micheline(data, bin_to_json: dict, bin_types: dict, bin_root='0'):
json_values = dict()
wild_root = bin_to_json[bin_root]
def get_json_path(bin_path, params: list):
wild_path = bin_to_json.get(bin_path)
if wild_root != '/' and wild_path.startswith(wild_root):
wild_path = join('/', wild_path[len(wild_root):])
return wild_path.format(*params)
def set_value(bin_path, params: list, value):
json_path = get_json_path(bin_path, params)
json_values[json_path] = value
def parse_node(node, bin_path, params):
bin_type = bin_types[bin_path]
if bin_type in ['map', 'big_map', 'namedtuple', 'router']:
set_value(bin_path, params, dict)
elif bin_type in ['list', 'set', 'tuple']:
set_value(bin_path, params, list)
if isinstance(node, dict):
if node.get('prim') == 'Pair':
for i, arg in enumerate(node['args']):
parse_node(arg, bin_path + str(i), params)
elif node.get('prim') == 'Left':
parse_node(node['args'][0], bin_path + '0', params)
elif node.get('prim') == 'Right':
parse_node(node['args'][0], bin_path + '1', params)
elif node.get('prim') == 'Elt':
assert False # should be already handled
elif node.get('prim') == 'Some':
parse_node(node['args'][0], bin_path + '0', params)
elif node.get('prim') == 'None':
set_value(bin_path + '0', params, None)
elif node.get('prim') == 'Unit':
if bin_type == 'unit':
set_value(bin_path, params, None)
else:
json_path = dirname(get_json_path(bin_path, params))
json_values[json_path] = bin_type
elif bin_type == 'big_map':
pass
else:
set_value(bin_path, params, decode_literal(node, bin_types[bin_path]))
elif isinstance(node, list):
if bin_type in ['map', 'big_map']:
key_type = str
for elt in node:
key = decode_literal(elt['args'][0], bin_types[bin_path + '0'])
parse_node(elt['args'][1], bin_path + '1', params + [key])
key_type = type(key)
set_value(bin_path, params, TypedDict.make(key_type))
elif bin_type in ['set', 'list']:
for i, arg in enumerate(node):
parse_node(arg, bin_path + '0', params + [i])
elif bin_type == 'lambda':
set_value(bin_path, params, micheline_to_michelson(node))
else:
assert False, (node, bin_path)
else:
assert False, (node, bin_path)
parse_node(data, bin_root, [])
return json_values
def make_json(json_values: dict):
root = json_values['/']
if isinstance(root, type):
tree = root()
else:
return root
def get_parent_node(path):
node = tree
keys = dirname(path).split('/')
for key in keys:
if not key:
continue
if isinstance(node, list):
node = node[int(key)]
else:
node = node[key]
return node
for json_path, value in json_values.items():
if json_path == '/':
continue
if isinstance(value, type):
value = value()
parent_node = get_parent_node(json_path)
key_path = basename(json_path)
if isinstance(parent_node, list):
parent_node.insert(int(key_path), value)
else:
parent_node[key_path] = value
return tree
def parse_json(data, json_to_bin: dict, bin_types: dict, json_root='/'):
bin_values = defaultdict(dict) # type: Dict[str, dict]
def parse_entry(bin_path, index):
for i in range(len(bin_path) - 1, 0, -1):
lpath = bin_path[:i]
if bin_types[lpath] in ['or', 'router', 'enum']:
bin_values[lpath][index] = bin_path[i]
elif bin_types[lpath] in ['list', 'set', 'map', 'big_map']:
return
def parse_node(node, json_path, index='0'):
bin_path = json_to_bin[json_path]
bin_type = bin_types[bin_path]
if isinstance(node, dict):
if bin_type in ['map', 'big_map']:
bin_values[bin_path][index] = len(node)
parse_entry(bin_path, index)
for i, (key, value) in enumerate(node.items()):
bin_values[bin_path + '0'][f'{index}:{i}'] = key
parse_node(value, join(json_path, '{}'), f'{index}:{i}')
elif bin_type in ['pair', 'or', 'namedtuple', 'router']:
for key, value in node.items():
parse_node(value, join(json_path, key), index)
else:
assert False, (node, json_path)
elif isinstance(node, list):
if bin_type in ['list', 'set']:
bin_values[bin_path][index] = len(node)
parse_entry(bin_path, index)
for i, value in enumerate(node):
parse_node(value, join(json_path, '{}'), f'{index}:{i}')
elif bin_type in ['pair', 'tuple']:
for i, value in enumerate(node):
parse_node(value, join(json_path, str(i)), index)
elif bin_type == 'lambda':
bin_values[bin_path][index] = node
elif bin_type == 'or':
assert False, (node, bin_path) # must be at least lr encoded
else:
if bin_type == 'enum':
parse_node(node, join(json_path, node), index)
else:
bin_values[bin_path][index] = node
parse_entry(bin_path, index)
parse_node(data, json_root)
return dict(bin_values)
def make_micheline(bin_values: dict, bin_types: dict, bin_root='0', binary=False):
def get_length(bin_path, index):
try:
length = bin_values[bin_path][index]
except KeyError:
length = 0 # TODO: make sure there is an option ahead
return length
def encode_node(bin_path, index='0'):
bin_type = bin_types[bin_path]
if bin_type in ['pair', 'tuple', 'namedtuple']:
return dict(
prim='Pair',
args=list(map(lambda x: encode_node(bin_path + x, index), '01'))
)
elif bin_type in ['map', 'big_map']:
length = get_length(bin_path, index)
return [
dict(
prim='Elt',
args=[encode_node(bin_path + '0', f'{index}:{i}'),
encode_node(bin_path + '1', f'{index}:{i}')]
)
for i in range(length)
]
elif bin_type in ['set', 'list']:
length = get_length(bin_path, index)
return [
encode_node(bin_path + '0', f'{index}:{i}')
for i in range(length)
]
elif bin_type in ['or', 'router', 'enum']:
entry = bin_values[bin_path][index]
return dict(
prim={'0': 'Left', '1': 'Right'}[entry],
args=[encode_node(bin_path + entry, index)]
)
elif bin_type == 'option':
try:
value = encode_node(bin_path + '0', index)
if value:
return dict(prim='Some', args=[value])
else:
return dict(prim='None')
except KeyError:
return dict(prim='None')
elif bin_type == 'lambda':
return michelson_to_micheline(bin_values[bin_path][index])
elif bin_type == 'unit':
return dict(prim='Unit')
else:
value = bin_values[bin_path][index]
if value == bin_type:
return dict(prim='Unit')
elif value is None:
return None
else:
return encode_literal(value, bin_type, binary)
return encode_node(bin_root)
def make_default(bin_types: dict, root='0'):
def encode_node(bin_path):
bin_type = bin_types[bin_path]
if bin_type == 'option':
return dict(prim='None')
elif bin_type in ['pair', 'tuple', 'namedtuple']:
return dict(
prim='Pair',
args=list(map(lambda x: encode_node(bin_path + x), '01'))
)
elif bin_type in ['map', 'big_map', 'set', 'list']:
return []
elif bin_type in ['int', 'nat', 'mutez', 'timestamp']:
return {'int': '0'}
elif bin_type in ['string', 'bytes']:
return {'string': ''}
elif bin_type == 'bool':
return {'prim': 'False'}
elif bin_type == 'unit':
return {'prim': 'Unit'}
else:
raise ValueError(f'Cannot create default value for `{bin_type}` at `{bin_path}`')
return encode_node(root)
def michelson_to_micheline(data):
"""
Converts michelson source text into Micheline expression
:param data: Michelson string
:return: Micheline expression
"""
return michelson_parser().parse(data)
```
#### File: contracts/KT19kJoPZrPor5yPE5T5rTfLAXRfZVsqzjwT/test_micheline_coding_KT19kJ.py
```python
from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline
class MichelineCodingTestKT19kJ(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
code = get_data(
path='contracts/KT19kJoPZrPor5yPE5T5rTfLAXRfZVsqzjwT/code_KT19kJ.json')
cls.schema = dict(
parameter=build_schema(code[0]),
storage=build_schema(code[1])
)
def test_micheline_inverse_storage_KT19kJ(self):
expected = get_data(
path='contracts/KT19kJoPZrPor5yPE5T5rTfLAXRfZVsqzjwT/storage_KT19kJ.json')
decoded = decode_micheline(expected, self.schema['storage'])
actual = encode_micheline(decoded, self.schema['storage'])
self.assertEqual(expected, actual)
```
#### File: contracts/KT1EUTxJch3jR9VuQ5wV4HeWbs5BnUfQp3N3/test_micheline_coding_KT1EUT.py
```python
from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline
class MichelineCodingTestKT1EUT(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
code = get_data(
path='contracts/KT1EUTxJch3jR9VuQ5wV4HeWbs5BnUfQp3N3/code_KT1EUT.json')
cls.schema = dict(
parameter=build_schema(code[0]),
storage=build_schema(code[1])
)
def test_micheline_inverse_storage_KT1EUT(self):
expected = get_data(
path='contracts/KT1EUTxJch3jR9VuQ5wV4HeWbs5BnUfQp3N3/storage_KT1EUT.json')
decoded = decode_micheline(expected, self.schema['storage'])
actual = encode_micheline(decoded, self.schema['storage'])
self.assertEqual(expected, actual)
def test_micheline_inverse_parameter_ooLpuA(self):
expected = get_data(
path='contracts/KT1EUTxJch3jR9VuQ5wV4HeWbs5BnUfQp3N3/parameter_ooLpuA.json')
decoded = decode_micheline(expected, self.schema['parameter'])
actual = encode_micheline(decoded, self.schema['parameter'])
self.assertEqual(expected, actual)
def test_micheline_inverse_parameter_ooCD9m(self):
expected = get_data(
path='contracts/KT1EUTxJch3jR9VuQ5wV4HeWbs5BnUfQp3N3/parameter_ooCD9m.json')
decoded = decode_micheline(expected, self.schema['parameter'])
actual = encode_micheline(decoded, self.schema['parameter'])
self.assertEqual(expected, actual)
def test_micheline_inverse_parameter_onpm7h(self):
expected = get_data(
path='contracts/KT1EUTxJch3jR9VuQ5wV4HeWbs5BnUfQp3N3/parameter_onpm7h.json')
decoded = decode_micheline(expected, self.schema['parameter'])
actual = encode_micheline(decoded, self.schema['parameter'])
self.assertEqual(expected, actual)
def test_micheline_inverse_parameter_ooy1mv(self):
expected = get_data(
path='contracts/KT1EUTxJch3jR9VuQ5wV4HeWbs5BnUfQp3N3/parameter_ooy1mv.json')
decoded = decode_micheline(expected, self.schema['parameter'])
actual = encode_micheline(decoded, self.schema['parameter'])
self.assertEqual(expected, actual)
def test_micheline_inverse_parameter_oosjDx(self):
expected = get_data(
path='contracts/KT1EUTxJch3jR9VuQ5wV4HeWbs5BnUfQp3N3/parameter_oosjDx.json')
decoded = decode_micheline(expected, self.schema['parameter'])
actual = encode_micheline(decoded, self.schema['parameter'])
self.assertEqual(expected, actual)
def test_micheline_inverse_parameter_oocS2Y(self):
expected = get_data(
path='contracts/KT1EUTxJch3jR9VuQ5wV4HeWbs5BnUfQp3N3/parameter_oocS2Y.json')
decoded = decode_micheline(expected, self.schema['parameter'])
actual = encode_micheline(decoded, self.schema['parameter'])
self.assertEqual(expected, actual)
def test_micheline_inverse_parameter_oocB4e(self):
expected = get_data(
path='contracts/KT1EUTxJch3jR9VuQ5wV4HeWbs5BnUfQp3N3/parameter_oocB4e.json')
decoded = decode_micheline(expected, self.schema['parameter'])
actual = encode_micheline(decoded, self.schema['parameter'])
self.assertEqual(expected, actual)
```
#### File: pytezos/tests/templates.py
```python
michelson_coding_test_case = """from unittest import TestCase
from tests import get_data
from pytezos.michelson.micheline import micheline_to_michelson, michelson_to_micheline
class MichelsonCodingTest{case}(TestCase):
def setUp(self):
self.maxDiff = None
"""
test_michelson_parse = """
def test_michelson_parse_{case}(self):
expected = get_data(
path='{json_path}')
actual = michelson_to_micheline(get_data(
path='{tz_path}'))
self.assertEqual(expected, actual)
"""
test_michelson_format = """
def test_michelson_format_{case}(self):
expected = get_data(
path='{tz_path}')
actual = micheline_to_michelson(get_data(
path='{json_path}'),
inline=True)
self.assertEqual(expected, actual)
"""
test_michelson_inverse = """
def test_michelson_inverse_{case}(self):
expected = get_data(
path='{json_path}')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
"""
micheline_coding_test_case = """from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, encode_micheline, decode_micheline
class MichelineCodingTest{case}(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
code = get_data(
path='{json_path}')
cls.schema = dict(
parameter=build_schema(code[0]),
storage=build_schema(code[1])
)
"""
test_micheline_inverse = """
def test_micheline_inverse_{case}(self):
expected = get_data(
path='{json_path}')
decoded = decode_micheline(expected, self.schema['{section}'])
actual = encode_micheline(decoded, self.schema['{section}'])
self.assertEqual(expected, actual)
"""
operation_forging_test_case = """from unittest import TestCase
from tests import get_data
from pytezos.operation.forge import forge_operation_group
class OperationForgingTest{case}(TestCase):
def setUp(self):
self.maxDiff = None
def test_forge_{case}(self):
expected = get_data(
path='{hex_path}')
actual = forge_operation_group(get_data(
path='{json_path}'))
self.assertEqual(expected, actual)
"""
big_map_test_case = """from unittest import TestCase
from tests import get_data
from pytezos.michelson.contract import ContractStorage
class BigMapCodingTest{case}(TestCase):
def setUp(self):
self.maxDiff = None
def test_big_map_{case}(self):
section = get_data(
path='{code_path}')
storage = ContractStorage(section)
big_map_diff = get_data(
path='{diff_path}')
expected = [
dict(key=item['key'], value=item.get('value'))
for item in big_map_diff
]
big_map = storage.big_map_diff_decode(expected)
actual = storage.big_map_diff_encode(big_map)
self.assertEqual(expected, actual)
"""
``` |
{
"source": "jpictor/dskit",
"score": 3
} |
#### File: dskit/app/pg2json.py
```python
import os
import sys
import psycopg2
import psycopg2.extras
import ujson
import json
import argparse
def pg_connect(**cargs):
pg_conn = psycopg2.connect(**cargs)
pg_conn.reset()
pg_conn.set_session(
isolation_level=psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ,
readonly=True,
deferrable=True,
autocommit=True)
cursor = pg_conn.cursor()
cursor.execute("set timezone='UTC'")
cursor.close()
return pg_conn
def server_side_cursor_fetchall(pg_conn, sql_query, sql_args=None, chunk_size=5000, using='default'):
sql = 'DECLARE ssc CURSOR FOR {}'.format(sql_query, sql_args)
sql_fetch_chunk = 'FETCH {} FROM ssc'.format(chunk_size)
cursor = pg_conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
try:
cursor.execute('BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED READ ONLY')
cursor.execute(sql)
try:
cursor.execute(sql_fetch_chunk)
while True:
rows = cursor.fetchall()
if not rows:
break
for row in rows:
yield row
cursor.execute(sql_fetch_chunk)
finally:
cursor.execute('CLOSE ssc')
finally:
cursor.close()
def get_pg_tables(pg_conn):
cursor = pg_conn.cursor()
cursor.execute("select relname from pg_class where relkind='r' and relname !~ '^(pg_|sql_)';")
tables = [x[0] for x in cursor.fetchall()]
return tables
def table2json(pg_conn, output_dir, table):
path = os.path.join(output_dir, '{}.txt'.format(table))
print('exporting table {} to json file {}'.format(table, path))
sql = "select * from \"{}\"".format(table)
f = open(path, 'w')
i = 0
for i, row in enumerate(server_side_cursor_fetchall(pg_conn, sql)):
try:
f.write(ujson.dumps(row) + '\n')
except Exception:
print('BAD ROW JSON')
# if i > 0 and i % 1000 == 0:
# print('wrote {} records'.format(i))
print('wrote {} records'.format(i))
f.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--host', help='database hostname', type=str, default='localhost')
parser.add_argument('--port', help='database port', type=int, default=5432)
parser.add_argument('--password', help='database password', type=str, default='password')
parser.add_argument('--user', help='database user', type=str, default='postgres')
parser.add_argument('name', help='database name', type=str)
parser.add_argument('output_dir', help='output directory for JSON files', type=str)
args = parser.parse_args()
db_source = {
'database': args.name,
'host': args.host,
'password': <PASSWORD>.password,
'user': args.user
}
if args.port:
db_source['port'] = args.port
def connect():
_pg_conn = pg_connect(**db_source)
try:
psycopg2.extras.register_hstore(_pg_conn)
except:
pass
return _pg_conn
pg_conn = connect()
tables = get_pg_tables(pg_conn)
pg_conn.close()
for table in tables:
try:
pg_conn = connect()
table2json(pg_conn, args.output_dir, table)
except psycopg2.ProgrammingError as e:
print('EXCEPTION: {}'.format(e))
finally:
pg_conn.close()
if __name__ == '__main__':
main()
```
#### File: dskit/app/spark_sql_job.py
```python
import time
import sys
from collections import defaultdict
import os
import argparse
import glob
import pytz
import datetime
from itertools import tee, izip, combinations, chain
import isodate
import ujson as json
import numpy as np
import logging
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.feature import IDF
from pyspark.sql import SQLContext, Row
from pyspark import SparkConf, SparkContext
from pyspark.sql.types import IntegerType, ArrayType, StringType, LongType, DoubleType, BooleanType, StructType, StructField
from pyspark.sql.types import DateType, TimestampType
from udf_functions import datetime_from_isodate, timestamp_from_isodate, datetime_from_timestamp
logger = logging.getLogger('')
class SparkSQLJob(object):
app_name = 'default name'
load_tables = None
def __init__(self):
self.app_label = self.app_name.lower().replace(' ', '_')
self.app_path = os.path.join(os.environ['SERVICE_ROOT'], 'app')
self.sc = None
self.conf = None
self.sql_context = None
self.args = None
self.data_dir = None
def add_args(self, parser):
"""
Implement in derived class to add arguments.
"""
def parse_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', help='directory of data dir with JSON files', type=str)
parser.add_argument('--output-type', type=str, dest='output_type', default='text')
parser.add_argument('--output-path', type=str, dest='output_path', default='./output.csv')
self.add_args(parser)
self.args = parser.parse_args()
self.data_dir = self.args.data_dir
def create_spark_conf(self):
## set up Spark SQL context
self.conf = SparkConf().setAppName(self.app_name)
def create_spark_context(self):
self.sc = SparkContext(conf=self.conf)
## pretty funny way of pulling the Spark logger
global logger
log4jLogger = self.sc._jvm.org.apache.log4j
logger = log4jLogger.LogManager.getLogger(__name__)
logger.info("*** pyspark script logger initialized")
## add extra python lib files
def add_py_file(filename):
self.sc.addPyFile(os.path.join(self.app_path, filename))
add_py_file('spark_sql_job.py')
add_py_file('csv_unicode.py')
add_py_file('udf_functions.py')
self.add_python_files()
def add_python_files(self):
"""
Implement in derived class to add more python files.
"""
def create_spark_sql_context(self):
self.sql_context = SQLContext(self.sc)
self.register_default_sql_proceedures()
self.register_sql_proceedures()
def register_default_sql_proceedures(self):
self.sql_context.registerFunction('timestamp_from_isodate', timestamp_from_isodate, IntegerType())
self.sql_context.registerFunction('datetime_from_isodate', datetime_from_isodate, TimestampType())
self.sql_context.registerFunction('datetime_from_timestamp', datetime_from_timestamp, TimestampType())
def register_sql_proceedures(self):
"""
Implement in derived method to add your custom SQL proceedures.
"""
def load_directory_as_db(self, dir_path, db_name):
"""
Loads a directory of .txt files containg JSON rows into the Spark
SQL context as tables named '<dir-name>_<file-name>'.
Filenames with a hyphen are treated as paritioned files to be
combined into one table using the name before the hyphen.
For example: logs-201601.txt, logs-201602.txt would be combined
into a 'logs' table.
"""
load_dir = os.path.join(self.data_dir, dir_path)
data_files = glob.glob(os.path.join(load_dir, '*.txt'))
file_groups = defaultdict(list)
for path in data_files:
path_noext, _ = os.path.splitext(path)
filename_noext = os.path.basename(path_noext)
i = filename_noext.find('-')
if i == -1:
table_name = filename_noext
else:
table_name = filename_noext[:i]
file_groups[table_name].append(path)
for table_name in sorted(file_groups.keys()):
register_name = '{}_{}'.format(db_name, table_name)
data_files = file_groups[table_name]
logger.info('REGISTERING {}:{}'.format(register_name, data_files))
data_files = filter(lambda x: os.path.getsize(x) > 0, data_files)
if self.load_tables and register_name not in self.load_tables:
continue
jdb = self.sql_context.read.json(data_files)
jdb.printSchema()
jdb.registerTempTable(register_name)
def load_data_dir(self):
data_dirs = os.listdir(self.data_dir)
logger.info('*** DATA-DIR:{} DATA-DIRS:{}'.format(self.data_dir, data_dirs))
for dirname in data_dirs:
self.load_directory_as_db(dirname, dirname)
def write_local_output(self, row_iter):
"""
utility to write local output file
"""
import csv_unicode
if self.args.output_type == 'text':
for row in row_iter:
print row
else:
with open(self.args.output_path, 'wb') as csv_file:
writer = csv_unicode.UnicodeWriter(csv_file)
for row in row_iter:
writer.writerow(row)
def run(self):
self.parse_args()
start_dt = datetime.datetime.utcnow()
try:
self.create_spark_conf()
self.create_spark_context()
self.create_spark_sql_context()
self.register_default_sql_proceedures()
self.load_data_dir()
self.task()
end_dt = datetime.datetime.utcnow()
seconds = long((end_dt - start_dt).total_seconds())
except Exception as e:
end_dt = datetime.datetime.utcnow()
seconds = long((end_dt - start_dt).total_seconds())
raise
def task(self):
"""
Implement in derived class to do stuff.
"""
```
#### File: dskit/examples/user_example.py
```python
from spark_sql_job import SparkSQLJob
class Job(SparkSQLJob):
app_name = 'Example Query User Table'
load_tables = [
'pictorlabs_auth_user'
]
def task(self):
sql = """
select u.id as id,
u.username as username,
u.email as email,
u.last_login as last_login_time_stamp,
datetime_from_timestamp(u.last_login) as last_login
from pictorlabs_auth_user u
order by u.last_login desc
"""
users_rdd = self.sql_context.sql(sql)
self.write_local_output(users_rdd.collect())
if __name__ == '__main__':
Job().run()
``` |
{
"source": "jpictor/tlsmd",
"score": 2
} |
#### File: src/baseapp/context_processors.py
```python
from django.conf import settings
def service_settings(request):
return {'SERVICE_VERSION': settings.SERVICE_VERSION}
```
#### File: src/baseapp/db_ext.py
```python
from django.db import connections
def get_named_connection(using='default'):
"""
Returns the connection for the argument database. The unit tests, however, need
to return the same database connection for 'default' and 'read'. In this function,
just return the default database any any requested database to make the unit tests
happy.
@param using: database connection label
@return: database connection object
"""
from django.conf import settings
if settings.TESTING:
using = 'default'
return connections[using]
def sql_execute(sql_query, sql_args=None, using='default'):
"""
Executes and returns all the rows in a SQL query. This should only
be used when the number of results is expected to be small.
@param sql_query: raw SQL query for database
@param sql_args: iterable of database arguments
@param using: database connection label
@return: None
"""
cursor = get_named_connection(using).cursor()
try:
cursor.execute(sql_query, sql_args)
finally:
cursor.close()
def sql_execute_fetchall(sql_query, sql_args=None, using='default'):
"""
Executes and returns all the rows in a SQL query. This should only
be used when the number of results is expected to be small.
@param sql_query: raw SQL query for database
@param sql_args: iterable of database arguments
@param using: database connection label
@return: list of SQL query result rows
"""
cursor = get_named_connection(using).cursor()
try:
cursor.execute(sql_query, sql_args)
rows = cursor.fetchall()
return rows
finally:
cursor.close()
def server_side_cursor_fetchall(sql_query, sql_args=None, chunk_size=5000, using='default'):
"""
Generator that iterates through rows of a SQL query using a server-side cursor.
Fetches rows in chunks for performance. This is for Postgres only. A version
for MySQL is needed.
@param sql_query: The query to execute.
@param chunk_size: The number of rows to fetch in a single chunk.
"""
sql = 'DECLARE ssc CURSOR FOR {}'.format(sql_query, sql_args)
sql_fetch_chunk = 'FETCH {} FROM ssc'.format(chunk_size)
cursor = get_named_connection(using).cursor()
try:
cursor.execute('BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED READ ONLY')
cursor.execute(sql)
try:
cursor.execute(sql_fetch_chunk)
while True:
rows = cursor.fetchall()
if not rows:
break
for row in rows:
yield row
cursor.execute(sql_fetch_chunk)
finally:
cursor.execute('CLOSE ssc')
finally:
cursor.close()
def server_side_cursor_fetchall_column_descriptions(sql_query, sql_args=None, chunk_size=5000, using='default'):
"""
Generator that iterates through rows of a SQL query using a server-side cursor.
Fetches rows in chunks for performance. This is for Postgres only.
@param sql_query: The query to execute.
@param chunk_size: The number of rows to fetch in a single chunk.
@return: (column_description, row_iter)
"""
sql = 'DECLARE ssc CURSOR FOR {}'.format(sql_query, sql_args)
sql_fetch_chunk = 'FETCH {} FROM ssc'.format(chunk_size)
cursor = connections[using].cursor()
try:
cursor.execute('BEGIN TRANSACTION')
cursor.execute(sql)
cursor.execute(sql_fetch_chunk)
except Exception:
cursor.close()
raise
def row_iter():
try:
while True:
rows = cursor.fetchall()
if not rows:
break
for row in rows:
yield row
cursor.execute(sql_fetch_chunk)
finally:
cursor.execute('CLOSE ssc')
cursor.close()
return cursor.description, row_iter()
```
#### File: src/baseapp/tasks.py
```python
from .celery_ext import app
from .logging_ext import logger
import django.db
@app.task(name='baseapp.healthcheck_task', bind=True)
def healthcheck_task(self):
"""
Healthcheck task, test connections to all services.
"""
healthy = True
msgs = []
## check MYSQL database
try:
cursor = django.db.connection.cursor()
cursor.execute('SELECT 1')
cursor.fetchone()
msgs.append('sql_db=OK')
except Exception:
msgs.append('sql_db=FAILED')
healthy = False
## create health check log response
msg = 'healthcheck: %s' % ','.join(msgs)
if healthy:
logger.info(msg)
else:
logger.error(msg)
```
#### File: src/mmLib/AtomMath.py
```python
import math
import numpy
import Constants
##
## Linear Algebra
##
def length(u):
"""Calculates the length of u.
"""
return math.sqrt(numpy.dot(u, u))
def normalize(u):
"""Returns the normalized vector along u.
"""
return u/math.sqrt(numpy.dot(u, u))
def cross(u, v):
"""Cross product of u and v:
Cross[u,v] = {-u3 v2 + u2 v3, u3 v1 - u1 v3, -u2 v1 + u1 v2}
"""
return numpy.array([ u[1]*v[2] - u[2]*v[1],
u[2]*v[0] - u[0]*v[2],
u[0]*v[1] - u[1]*v[0] ], float)
##
## Internal Linear Algebra (without using numpy)
##
def internal_cross(u, v):
"""Returns the cross product of two vectors. Should be identical to the
output of numpy.cross(u, v).
"""
return(u[1]*v[2] - v[1]*u[2],
u[2]*v[0] - v[2]*u[0],
u[0]*v[1] - v[0]*u[1])
def internal_dot(u, v):
"""Returns the dot product of two vectors. Should be identical to the
output of numpy.dot(u, v).
"""
return u[0]*v[0] + u[1]*v[1] + u[2]*v[2]
def internal_inv3x3(u):
"""Returns the inverse of a 3x3 matrix. Should be identical to the
output of numpy.linalg.inv(u).
"""
inv = [[0,0,0],[0,0,0],[0,0,0]]
c = []
c.append(internal_cross(u[1], u[2])) ## c[0]
c.append(internal_cross(u[2], u[0])) ## c[1]
c.append(internal_cross(u[0], u[1])) ## c[2]
d = internal_dot(u[0], c[0])
if(abs(d) < 1e-30):
return 0.0, inv
for i in range(0,3):
for j in range(0,3):
inv[i][j] = float(c[j][i]) / float(d)
return d, inv
##
## Rotation/Displacement
##
def rmatrix(alpha, beta, gamma):
"""Return a rotation matrix based on the Euler angles alpha,
beta, and gamma in radians.
"""
cosA = math.cos(alpha)
cosB = math.cos(beta)
cosG = math.cos(gamma)
sinA = math.sin(alpha)
sinB = math.sin(beta)
sinG = math.sin(gamma)
R = numpy.array(
[[cosB*cosG, cosG*sinA*sinB-cosA*sinG, cosA*cosG*sinB+sinA*sinG],
[cosB*sinG, cosA*cosG+sinA*sinB*sinG, cosA*sinB*sinG-cosG*sinA ],
[-sinB, cosB*sinA, cosA*cosB ]], float)
assert numpy.allclose(numpy.linalg.det(R), 1.0)
return R
def rmatrixu(u, theta):
"""Return a rotation matrix caused by a right hand rotation of theta
radians around vector u.
"""
if numpy.allclose(theta, 0.0) or numpy.allclose(numpy.dot(u,u), 0.0):
return numpy.identity(3, float)
x, y, z = normalize(u)
sa = math.sin(theta)
ca = math.cos(theta)
R = numpy.array(
[[1.0+(1.0-ca)*(x*x-1.0), -z*sa+(1.0-ca)*x*y, y*sa+(1.0-ca)*x*z],
[z*sa+(1.0-ca)*x*y, 1.0+(1.0-ca)*(y*y-1.0), -x*sa+(1.0-ca)*y*z],
[-y*sa+(1.0-ca)*x*z, x*sa+(1.0-ca)*y*z, 1.0+(1.0-ca)*(z*z-1.0)]], float)
try:
assert numpy.allclose(numpy.linalg.det(R), 1.0)
except AssertionError:
print "rmatrixu(%s, %f) determinant(R)=%f" % (
u, theta, numpy.linalg.det(R))
raise
return R
def dmatrix(alpha, beta, gamma):
"""Returns the displacement matrix based on rotation about Euler
angles alpha, beta, and gamma.
"""
return rmatrix(alpha, beta, gamma) - numpy.identity(3, float)
def dmatrixu(u, theta):
"""Return a displacement matrix caused by a right hand rotation of theta
radians around vector u.
"""
return rmatrixu(u, theta) - numpy.identity(3, float)
def rmatrixz(vec):
"""Return a rotation matrix which transforms the coordinate system
such that the vector vec is aligned along the z axis.
"""
u, v, w = normalize(vec)
d = math.sqrt(u*u + v*v)
if d != 0.0:
Rxz = numpy.array([ [ u/d, v/d, 0.0 ],
[ -v/d, u/d, 0.0 ],
[ 0.0, 0.0, 1.0 ] ], float)
else:
Rxz = numpy.identity(3, float)
Rxz2z = numpy.array([ [ w, 0.0, -d],
[ 0.0, 1.0, 0.0],
[ d, 0.0, w] ], float)
R = numpy.dot(Rxz2z, Rxz)
try:
assert numpy.allclose(numpy.linalg.det(R), 1.0)
except AssertionError:
print "rmatrixz(%s) determinant(R)=%f" % (vec, numpy.linalg.det(R))
raise
return R
##
## Quaternions
##
def rquaternionu(u, theta):
"""Returns a quaternion representing the right handed rotation of theta
radians about vector u. Quaternions are typed as Numeric Python
numpy.arrays of length 4.
"""
u = normalize(u)
half_sin_theta = math.sin(theta / 2.0)
x = u[0] * half_sin_theta
y = u[1] * half_sin_theta
z = u[2] * half_sin_theta
w = math.cos(theta / 2.0)
## create quaternion
q = numpy.array((x, y, z, w), float)
assert numpy.allclose(math.sqrt(numpy.dot(q,q)), 1.0)
return q
def addquaternion(q1, q2):
"""Adds quaternions q1 and q2. Quaternions are typed as Numeric
Python numpy.arrays of length 4.
"""
assert numpy.allclose(math.sqrt(numpy.dot(q1,q1)), 1.0)
assert numpy.allclose(math.sqrt(numpy.dot(q2,q2)), 1.0)
x1, y1, z1, w1 = q1
x2, y2, z2, w2 = q2
x = w1*x2 + x1*w2 + y1*z2 - z1*y2
y = w1*y2 + y1*w2 + z1*x2 - x1*z2
z = w1*z2 + z1*w2 + x1*y2 - y1*x2
w = w1*w2 - x1*x2 - y1*y2 - z1*z2
q = numpy.array((x, y, z, w), float)
## normalize quaternion
q = q / math.sqrt(numpy.dot(q,q))
assert numpy.allclose(math.sqrt(numpy.dot(q,q)), 1.0)
return q
def rmatrixquaternion(q):
"""Create a rotation matrix from q quaternion rotation.
Quaternions are typed as Numeric Python numpy.arrays of length 4.
"""
assert numpy.allclose(math.sqrt(numpy.dot(q,q)), 1.0)
x, y, z, w = q
xx = x*x
xy = x*y
xz = x*z
xw = x*w
yy = y*y
yz = y*z
yw = y*w
zz = z*z
zw = z*w
r00 = 1.0 - 2.0 * (yy + zz)
r01 = 2.0 * (xy - zw)
r02 = 2.0 * (xz + yw)
r10 = 2.0 * (xy + zw)
r11 = 1.0 - 2.0 * (xx + zz)
r12 = 2.0 * (yz - xw)
r20 = 2.0 * (xz - yw)
r21 = 2.0 * (yz + xw)
r22 = 1.0 - 2.0 * (xx + yy)
R = numpy.array([[r00, r01, r02],
[r10, r11, r12],
[r20, r21, r22]], float)
assert numpy.allclose(numpy.linalg.det(R), 1.0)
return R
def quaternionrmatrix(R):
"""Return a quaternion calculated from the argument rotation matrix R.
"""
assert numpy.allclose(numpy.linalg.det(R), 1.0)
t = numpy.trace(R) + 1.0
if t>1e-5:
w = math.sqrt(1.0 + numpy.trace(R)) / 2.0
w4 = 4.0 * w
x = (R[2,1] - R[1,2]) / w4
y = (R[0,2] - R[2,0]) / w4
z = (R[1,0] - R[0,1]) / w4
else:
if R[0,0]>R[1,1] and R[0,0]>R[2,2]:
S = math.sqrt(1.0 + R[0,0] - R[1,1] - R[2,2]) * 2.0
x = 0.25 * S
y = (R[0,1] + R[1,0]) / S
z = (R[0,2] + R[2,0]) / S
w = (R[1,2] - R[2,1]) / S
elif R[1,1]>R[2,2]:
S = math.sqrt(1.0 + R[1,1] - R[0,0] - R[2,2]) * 2.0
x = (R[0,1] + R[1,0]) / S
y = 0.25 * S
z = (R[1,2] + R[2,1]) / S
w = (R[0,2] - R[2,0]) / S
else:
S = math.sqrt(1.0 + R[2,2] - R[0,0] - R[1,1]) * 2
x = (R[0,2] + R[2,0]) / S
y = (R[1,2] + R[2,1]) / S
z = 0.25 * S
w = (R[0,1] - R[1,0] ) / S
q = numpy.array((x, y, z, w), float)
assert numpy.allclose(math.sqrt(numpy.dot(q,q)), 1.0)
return q
##
## Bond Angles
##
def calc_distance(a1, a2):
"""Returns the distance between two argument atoms.
"""
if a1 == None or a2 == None:
return None
return length(a1.position - a2.position)
def calc_angle(a1, a2, a3):
"""Return the angle between the three argument atoms.
"""
if a1 == None or a2 == None or a3 == None:
return None
a21 = a1.position - a2.position
a21 = a21 / (length(a21))
a23 = a3.position - a2.position
a23 = a23 / (length(a23))
return math.acos(numpy.dot(a21, a23))
def calc_torsion_angle_old(a1, a2, a3, a4):
"""Calculates the torsion angle between the four argument atoms.
Note: This "old" subroutine doesn't appear to do what it claims. Please
see the 'new' calc_torsion_angle() function below.
"""
if a1 == None or a2 == None or a3 == None or a4 == None:
return None
a12 = a2.position - a1.position
a23 = a3.position - a2.position
a34 = a4.position - a3.position
n12 = cross(a12, a23)
n34 = cross(a23, a34)
n12 = n12 / length(n12)
n34 = n34 / length(n34)
cross_n12_n34 = cross(n12, n34)
direction = cross_n12_n34 * a23
scalar_product = numpy.dot(n12, n34)
if scalar_product > 1.0:
scalar_product = 1.0
if scalar_product < -1.0:
scalar_product = -1.0
angle = math.acos(scalar_product)
## E.g, direction = [0.70710678, 0.0, 0.0]
if direction.all() < 0.0:
## True if _all_ elements of 'direction' are true (or if 'direction'
## is empty)
angle = -angle
return angle
def calc_torsion_angle(a1, a2, a3, a4, sqrt=math.sqrt, acos=math.acos):
"""Calculates the torsion angle between the four argument atoms.
"""
if a1 == None or a2 == None or a3 == None or a4 == None:
return None
v12x = a1.position[0] - a2.position[0]
v12y = a1.position[1] - a2.position[1]
v12z = a1.position[2] - a2.position[2]
v32x = a3.position[0] - a2.position[0]
v32y = a3.position[1] - a2.position[1]
v32z = a3.position[2] - a2.position[2]
v43x = a4.position[0] - a3.position[0]
v43y = a4.position[1] - a3.position[1]
v43z = a4.position[2] - a3.position[2]
vn13x = v12y*v32z - v12z*v32y
vn13y = v12z*v32x - v12x*v32z
vn13z = v12x*v32y - v12y*v32x
vn24x = v32z*v43y - v32y*v43z
vn24y = v32x*v43z - v32z*v43x
vn24z = v32y*v43x - v32x*v43y
v12 = vn13x*vn24x + vn13y*vn24y + vn13z*vn24z
v11 = vn13x**2 + vn13y**2 + vn13z**2
v22 = vn24x**2 + vn24y**2 + vn24z**2
angle = v12/sqrt(v11*v22)
if angle >= 1.0:
return 0.0
elif angle <= -1.0:
return -180.0
else:
angle = acos(angle) * Constants.RAD2DEG
vtmp = vn13x * (vn24y*v32z - vn24z*v32y) + \
vn13y * (vn24z*v32x - vn24x*v32z) + \
vn13z * (vn24x*v32y - vn24y*v32x) < 0.0
if vtmp:
return -angle
else:
return angle
##
## Atomic ADPs
##
def calc_CCuij(U, V):
"""Calculate the correlation coefficient for anisotropic ADP tensors U
and V.
"""
## FIXME: Check for non-positive Uij's, 2009-08-19
invU = linalg.inverse(U)
invV = linalg.inverse(V)
#invU = internal_inv3x3(U)
#invV = internal_inv3x3(V)
det_invU = numpy.linalg.det(invU)
det_invV = numpy.linalg.det(invV)
return ( math.sqrt(math.sqrt(det_invU * det_invV)) /
math.sqrt((1.0/8.0) * numpy.linalg.det(invU + invV)) )
def calc_Suij(U, V):
"""Calculate the similarity of anisotropic ADP tensors U and V.
"""
## FIXME: Check for non-positive Uij's, 2009-08-19
eqU = numpy.trace(U) / 3.0
eqV = numpy.trace(V) / 3.0
isoU = eqU * numpy.identity(3, float)
isoV = eqV * numpy.identity(3, float)
return ( calc_CCuij(U, (eqU/eqV)*V) /
(calc_CCuij(U, isoU) * calc_CCuij(V, isoV)) )
def calc_DP2uij(U, V):
"""Calculate the square of the volumetric difference in the probability
density function of anisotropic ADP tensors U and V.
"""
invU = linalg.inverse(U)
invV = linalg.inverse(V)
det_invU = numpy.linalg.det(invU)
det_invV = numpy.linalg.det(invV)
Pu2 = math.sqrt( det_invU / (64.0 * Constants.PI3) )
Pv2 = math.sqrt( det_invV / (64.0 * Constants.PI3) )
Puv = math.sqrt(
(det_invU * det_invV) / (8.0*Constants.PI3 * numpy.linalg.det(invU + invV)))
dP2 = Pu2 + Pv2 - (2.0 * Puv)
return dP2
def calc_anisotropy(U):
"""Calculates the anisotropy of a atomic ADP tensor U. Anisotropy is
defined as the smallest eigenvalue of U divided by the largest eigenvalue
of U.
"""
evals = linalg.eigenvalues(U)
return min(evals) / max(evals)
def diff_trace_UV(U, V):
"""Calculates the trace difference of anisotropic ADP tensors U and V.
"""
return abs((numpy.trace(U) - numpy.trace(V))/ 3.0)
def sum_square_diff(U, V):
"""Calculates the sum of the differences of anisotropic ADP tensors
U and V squared.
"""
return abs(numpy.sum(numpy.subtract(U,V)**2))
def calc_rosenfeld(a, b, d, U, V):
n = numpy.array([(a[0] - b[0])/d, (a[1] - b[1])/d, (a[2] - b[2])/d])
#Un = numpy.dot(numpy.dot(n, U), numpy.transpose(n))
#Vn = numpy.dot(numpy.dot(n, V), numpy.transpose(n))
Un = internal_dot(internal_dot(n, U), numpy.transpose(n))
Vn = internal_dot(internal_dot(n, V), numpy.transpose(n))
return abs(Un - Vn)
##
## Calculations on groups of atoms
##
def calc_atom_centroid(atom_iter):
"""Calculates the centroid of all contained Atom instances and
returns a Vector to the centroid.
"""
num = 0
centroid = numpy.zeros(3, float)
for atm in atom_iter:
if atm.position != None:
centroid += atm.position
num += 1
return centroid / num
def calc_atom_mean_temp_factor(atom_iter):
"""Calculates the average temperature factor of all contained
Atom instances and returns the average temperature factor.
"""
num_tf = 0
adv_tf = 0.0
for atm in atom_iter:
if atm.temp_factor != None:
adv_tf += atm.temp_factor
num_tf += 1
return adv_tf / num_tf
def calc_inertia_tensor(atom_iter, origin):
"""Calculate a moment-of-inertia tensor at the given origin assuming all
atoms have the same mass.
"""
I = numpy.zeros((3,3), float)
for atm in atom_iter:
x = atm.position - origin
I[0,0] += x[1]**2 + x[2]**2
I[1,1] += x[0]**2 + x[2]**2
I[2,2] += x[0]**2 + x[1]**2
I[0,1] += - x[0]*x[1]
I[1,0] += - x[0]*x[1]
I[0,2] += - x[0]*x[2]
I[2,0] += - x[0]*x[2]
I[1,2] += - x[1]*x[2]
I[2,1] += - x[1]*x[2]
evals, evecs = linalg.eigenvectors(I)
## order the tensor such that the largest
## principal component is along the z-axis, and
## the second largest is along the y-axis
if evals[0] >= evals[1] and evals[0] >= evals[2]:
if evals[1] >= evals[2]:
R = numpy.array((evecs[2], evecs[1], evecs[0]), float)
else:
R = numpy.array((evecs[1], evecs[2], evecs[0]), float)
elif evals[1] >= evals[0] and evals[1] >= evals[2]:
if evals[0] >= evals[2]:
R = numpy.array((evecs[2], evecs[0], evecs[1]), float)
else:
R = numpy.array((evecs[0], evecs[2], evecs[1]), float)
elif evals[2] >= evals[0] and evals[2] >= evals[1]:
if evals[0] >= evals[1]:
R = numpy.array((evecs[1], evecs[0], evecs[2]), float)
else:
R = numpy.array((evecs[0], evecs[1], evecs[2]), float)
## make sure the tensor is right-handed
if numpy.allclose(numpy.linalg.det(R), -1.0):
I = numpy.identity(3, float)
I[0,0] = -1.0
R = numpy.dot(I, R)
assert numpy.allclose(numpy.linalg.det(R), 1.0)
return R
```
#### File: src/mmLib/Library.py
```python
import os
import sys
import types
import ConsoleOutput
import mmCIF
###############################################################################
## Library Data Locations
##
(MMLIB_PATH, JUNK) = os.path.split(__file__)
DATA_PATH = os.path.join(MMLIB_PATH, "Data")
ELEMENT_DATA_PATH = os.path.join(MMLIB_PATH, "Data", "elements.cif")
MMLIB_MONOMER_DATA_PATH = os.path.join(MMLIB_PATH, "Data", "monomers.cif")
RCSB_MONOMER_DATA_FILE = os.path.join(MMLIB_PATH, "Data", "Monomers.zip")
RCSB_MONOMER_DATA_PATH = os.path.join(MMLIB_PATH, "Data", "Monomers")
###############################################################################
## Caches
##
ELEMENT_CACHE = {}
MONOMER_RES_NAME_CACHE = {}
ELEMENT_CIF_FILE = mmCIF.mmCIFFile()
ELEMENT_CIF_FILE.load_file(open(ELEMENT_DATA_PATH, "r"))
MMLIB_MONOMERS_CIF = mmCIF.mmCIFFile()
MMLIB_MONOMERS_CIF.load_file(open(MMLIB_MONOMER_DATA_PATH, "r"))
RCSB_USE_ZIP = None
RCSB_ZIP = None
###############################################################################
## Constants
##
ELEMENT_SYMBOL_DICT = {
"H" : True, "h" : True,
"He": True, "he": True, "HE": True,
"Li": True, "li": True, "LI": True,
"Be": True, "be": True, "BE": True,
"B" : True, "b" : True,
"C" : True, "c" : True,
"N" : True, "n" : True,
"O" : True, "o" : True,
"F" : True, "f" : True,
"Ne": True, "ne": True, "NE": True,
"Na": True, "na": True, "NA": True,
"Mg": True, "mg": True, "MG": True,
"Al": True, "al": True, "AL": True,
"Si": True, "si": True, "SI": True,
"P" : True, "p" : True,
"S" : True, "s" : True,
"Cl": True, "cl": True, "CL": True,
"Ar": True, "ar": True, "AR": True,
"K" : True, "k" : True,
"Ca": True, "ca": True, "CA": True,
"Sc": True, "sc": True, "SC": True,
"Ti": True, "ti": True, "TI": True,
"V" : True, "v" : True,
"Cr": True, "cr": True, "CR": True,
"Mn": True, "mn": True, "MN": True,
"Fe": True, "fe": True, "FE": True,
"Co": True, "co": True, "CO": True,
"Ni": True, "ni": True, "NI": True,
"Cu": True, "cu": True, "CU": True,
"Zn": True, "zn": True, "ZN": True,
"Ga": True, "ga": True, "GA": True,
"Ge": True, "ge": True, "GE": True,
"As": True, "as": True, "AS": True,
"Se": True, "se": True, "SE": True,
"Br": True, "br": True, "BR": True,
"Kr": True, "kr": True, "KR": True,
"Rb": True, "rb": True, "RB": True,
"Sr": True, "sr": True, "SR": True,
"Y" : True, "y" : True,
"Zr": True, "zr": True, "ZR": True,
"Nb": True, "nb": True, "NB": True,
"Mo": True, "mo": True, "MO": True,
"Tc": True, "tc": True, "TC": True,
"Ru": True, "ru": True, "RU": True,
"Rh": True, "rh": True, "RH": True,
"Pd": True, "pd": True, "PD": True,
"Ag": True, "ag": True, "AG": True,
"Cd": True, "cd": True, "CD": True,
"In": True, "in": True, "IN": True,
"Sn": True, "sn": True, "SN": True,
"Sb": True, "sb": True, "SB": True,
"Te": True, "te": True, "TE": True,
"I" : True, "i" : True,
"Xe": True, "xe": True, "XE": True,
"Cs": True, "cs": True, "CS": True,
"Ba": True, "ba": True, "BA": True,
"La": True, "la": True, "LA": True,
"Ce": True, "ce": True, "CE": True,
"Pr": True, "pr": True, "PR": True,
"Nd": True, "nd": True, "ND": True,
"Pm": True, "pm": True, "PM": True,
"Sm": True, "sm": True, "SM": True,
"Eu": True, "eu": True, "EU": True,
"Gd": True, "gd": True, "GD": True,
"Tb": True, "tb": True, "TB": True,
"Dy": True, "dy": True, "DY": True,
"Ho": True, "ho": True, "HO": True,
"Er": True, "er": True, "ER": True,
"Tm": True, "tm": True, "TM": True,
"Yb": True, "yb": True, "YB": True,
"Lu": True, "lu": True, "LU": True,
"Hf": True, "hf": True, "HF": True,
"Ta": True, "ta": True, "TA": True,
"W" : True, "w" : True,
"Re": True, "re": True, "RE": True,
"Os": True, "os": True, "OS": True,
"Ir": True, "ir": True, "IR": True,
"Pt": True, "pt": True, "PT": True,
"Au": True, "au": True, "AU": True,
"Hg": True, "hg": True, "HG": True,
"Tl": True, "tl": True, "TL": True,
"Pb": True, "pb": True, "PB": True,
"Bi": True, "bi": True, "BI": True,
"Po": True, "po": True, "PO": True,
"At": True, "at": True, "AT": True,
"Rn": True, "rn": True, "RN": True,
"Fr": True, "fr": True, "FR": True,
"Ra": True, "ra": True, "RA": True,
"Ac": True, "ac": True, "AC": True,
"Th": True, "th": True, "TH": True,
"Pa": True, "pa": True, "PA": True,
"U" : True, "u" : True }
AMINO_ACID3_LIST = [
"GLY", "ALA", "VAL", "LEU", "ILE", "PRO", "PHE", "TYR", "TRP",
"MET", "CYS", "SER", "THR", "ASP", "GLU", "HIS", "LYS", "ARG",
"ASN", "GLN"
]
AMINO_ACID31_DICT = {
"GLY":"G", "ALA":"A", "VAL":"V", "LEU":"L", "ILE":"I", "PRO":"P",
"PHE":"F", "TYR":"Y", "TRP":"W", "MET":"M", "CYS":"C", "SER":"S",
"THR":"T", "ASP":"D", "GLU":"E", "HIS":"H", "LYS":"K", "ARG":"R",
"ASN":"N", "GLN":"Q"
}
AMINO_ACID13_DICT = {
'A': 'ALA', 'C': 'CYS', 'E': 'GLU', 'D': 'ASP', 'G': 'GLY',
'F': 'PHE', 'I': 'ILE', 'H': 'HIS', 'K': 'LYS', 'M': 'MET',
'L': 'LEU', 'N': 'ASN', 'Q': 'GLN', 'P': 'PRO', 'S': 'SER',
'R': 'ARG', 'T': 'THR', 'W': 'TRP', 'V': 'VAL', 'Y': 'TYR'}
NUCLEIC_ACID_LIST = ["A", "G", "C", "T", "U"]
NUCLEIC_ACID_RES_NAME_DICT = {
"C": "C", "C+": "C", "Cr": "C", "+C": "C",
"G": "G", "G+": "G", "Gr": "G", "+G": "G",
"A": "A", "A+": "A", "Ar": "A", "+A": "A",
"T": "T", "T+": "T", "Tr": "T", "+T": "T",
"U": "U", "U+": "U", "Ur": "U", "+U": "U",
}
## Add alternate residue monomer names here:
ALT_RES_NAME_DICT = {
"C+": "C", "Cr": "C", "+C": "C",
"G+": "G", "Gr": "G", "+G": "G",
"A+": "A", "Ar": "A", "+A": "A",
"T+": "T", "Tr": "T", "+T": "T",
"U+": "U", "Ur": "U", "+U": "U",
"Ad": "A", "Td": "T", "Gd": "G", "Cd": "C",
}
###############################################################################
## Library Description Objects
##
class ElementDesc(object):
"""Element description class returned by library_get_element_desc().
"""
def __init__(self):
self.cif_data = None
self.name = None
self.symbol = None
self.group = None
self.period = None
self.atomic_number = None
self.atomic_weight = None
self.atomic_radius = None
self.covalent_radius = None
self.van_der_waals_radius = None
self.covalent_radius = None
self.electronegativity = None
self.color_rgbf = None
class MonomerDesc(object):
"""Monomer description class returned by library_get_monomer_desc().
"""
def __init__(self):
self.res_name = None
self.full_name = None
self.one_letter_code = None
self.type = None
self.pdbx_type = None
self.formula = None
self.rcsb_class_1 = None
self.chem_type = None
self.atom_list = []
self.atom_dict = {}
self.alt_atom_dict = {}
self.bond_list = []
self.torsion_angle_dict = {}
self.amino_acid = False
self.nucleic_acid = False
self.water = False
def is_amino_acid(self):
"""Returns True if the Monomer is an amino acid, otherwise returns
False.
"""
return self.amino_acid
def is_nucleic_acid(self):
"""Returns True if the Monomer is a nucleic acid, otherwise returns
False.
"""
return self.nucleic_acid
def is_standard_residue(self):
"""
"""
return self.amino_acid or self.nucleic_acid
def is_non_standard_residue(self):
"""
"""
return not self.amino_acid and not self.nucleic_acid
def is_water(self):
"""Returns True if the Monomer is a water molecule,
otherwise returns False.
"""
return self.water
###############################################################################
## Library API
##
def library_construct_element_desc(symbol):
"""Constructs the ElementDesc object for the given element symbol.
"""
cif_data = ELEMENT_CIF_FILE.get_data(symbol)
if cif_data is None:
ConsoleOutput.warning("element description not found for %s" % (symbol))
return None
## create element description
element_desc = ElementDesc()
element_desc.cif_data = cif_data
element = cif_data.get_table("element")
element_desc.name = element["name"]
element_desc.symbol = element["symbol"]
element_desc.number = int(element["number"])
element_desc.atomic_weight = float(element["atomic_weight"])
element_desc.vdw_radius = float(element["van_der_walls_radius"])
element_desc.covalent_radius = float(element.get("covalent_radius", 0.0))
rgb8 = element["color_rgb"]
element_desc.color_rgbf = (int(rgb8[1:3], 16) / 255.0,
int(rgb8[3:5], 16) / 255.0,
int(rgb8[5:7], 16) / 255.0)
return element_desc
def library_get_element_desc(symbol):
"""Loads/caches/returns an instance of the ElementDesc class for the given
element symbol. The source of the element data is the
mmLib/Data/elements.cif file.
"""
assert isinstance(symbol, str)
try:
return ELEMENT_CACHE[symbol]
except KeyError:
pass
element_desc = library_construct_element_desc(symbol)
if element_desc is None:
ConsoleOutput.warning("element description not found for %s" % (symbol))
return None
ELEMENT_CACHE[symbol] = element_desc
return element_desc
def library_use_monomer_zipfile():
"""Returns True if the zipfile version of the monomer library should be used,
or False if the uncompressed directory hierarchy should be used. If the
"""
## check if monomers are available in a zip file
global RCSB_USE_ZIP
global RCSB_ZIP
## this should only run once
if RCSB_USE_ZIP is None:
import zipfile
try:
RCSB_ZIP = zipfile.ZipFile(RCSB_MONOMER_DATA_FILE)
except IOError:
RCSB_USE_ZIP = False
else:
RCSB_USE_ZIP = True
return RCSB_USE_ZIP
def library_open_monomer_lib_zipfile(monomer_name):
"""Returns the open file object for the mmCIF monomer library file if it
is found in the monomer library zipfile.
"""
if library_use_monomer_zipfile():
## read data from zip file
try:
blob = RCSB_ZIP.read(monomer_name.upper())
except KeyError:
ConsoleOutput.warning("monomer description not found in zipfile for '%s'" % (monomer_name))
else:
from cStringIO import StringIO
return StringIO(blob)
return None
def library_open_monomer_lib_directory(monomer_name):
"""Returns the open file object for the mmCIF monomer library file if it
is found as an uncompressed mmCIF file at the path:
mmLib/Data/Monomers/NAME[0]/NAME.cif
"""
assert len(monomer_name) > 0
fil_name = "%s.cif" % (monomer_name.upper())
path = os.path.join(RCSB_MONOMER_DATA_PATH, fil_name[0], fil_name)
if os.path.isfile(path):
return open(path, "r")
return None
def library_open_monomer_lib_file(monomer_name):
"""Returns the open file object for the mmCIF monomer library file if it
is found from library_open_monomer_lib_directory() or
library_open_monomer_lib_zipfile(). library_open_monomer_lib_directory()
is checked first because loading the file from the directory sturcture
is much faster than loading it from a zipfile.
"""
libfil = library_open_monomer_lib_directory(monomer_name)
if libfil is not None:
return libfil
libfil = library_open_monomer_lib_zipfile(monomer_name)
return libfil
def library_construct_monomer_desc(res_name):
"""Constructs the MonomerDesc object for the given residue name.
"""
## return None when the res_name is an empty string
if len(res_name) < 1:
return None
if ALT_RES_NAME_DICT.has_key(res_name):
lookup_name = ALT_RES_NAME_DICT[res_name]
else:
lookup_name = res_name.upper()
libfil = library_open_monomer_lib_file(lookup_name)
if libfil is None:
ConsoleOutput.warning("monomer description not found for '%s'" % (res_name))
return None
## generate monomer description
mon_desc = MonomerDesc()
## data from RCSB library
rcsb_cif_file = mmCIF.mmCIFFile()
rcsb_cif_file.load_file(libfil)
rcsb_cif_data = rcsb_cif_file[0]
libfil.close()
chem_comp = rcsb_cif_data.get_table("chem_comp")[0]
mon_desc.res_name = chem_comp.get_lower("res_name")
mon_desc.full_name = chem_comp.get_lower("name")
mon_desc.type = chem_comp.get_lower("type")
mon_desc.pdbx_type = chem_comp.get_lower("pdbx_type")
mon_desc.formula = chem_comp.get_lower("formula")
mon_desc.rcsb_class_1 = chem_comp.get_lower("rcsb_class_1")
chem_comp_atom = rcsb_cif_data.get_table("chem_comp_atom")
if chem_comp_atom is not None:
for cif_row in chem_comp_atom:
name = cif_row.getitem_lower("atom_id")
try:
symbol = cif_row.getitem_lower("type_symbol")
except KeyError:
## this should occur when an atom name does not match the ones
## found in a monomer file
symbol = name
msg = "unrecognized atom name: '%s' in residue '%s'" % (
symbol, res_name)
ConsoleOutput.warning(msg)
mon_desc.atom_list.append({"name": name, "symbol": symbol})
mon_desc.atom_dict[name] = symbol
try:
alt_name = cif_row.getitem_lower("alt_atom_id")
except KeyError:
pass
else:
mon_desc.alt_atom_dict[name] = alt_name
chem_comp_bond = rcsb_cif_data.get_table("chem_comp_bond")
if chem_comp_bond is not None:
for cif_row in chem_comp_bond:
atom1 = cif_row.getitem_lower("atom_id_1")
atom2 = cif_row.getitem_lower("atom_id_2")
mon_desc.bond_list.append({"atom1": atom1, "atom2": atom2})
## data from mmLib supplemental library in mmLib/Data/monomers.cif
mmlib_cif_data = MMLIB_MONOMERS_CIF.get_data(res_name)
if mmlib_cif_data is not None:
## get additional chemical information on amino acids
chem_comp = mmlib_cif_data.get_table("chem_comp")
if chem_comp is not None:
mon_desc.one_letter_code = chem_comp["one_letter_code"]
mon_desc.chem_type = chem_comp["chem_type"]
## get torsion angle definitions
torsion_angles = mmlib_cif_data.get_table("torsion_angles")
if torsion_angles is not None:
for cif_row in torsion_angles:
mon_desc.torsion_angle_dict[cif_row["name"]] = (
cif_row["atom1"], cif_row["atom2"],
cif_row["atom3"], cif_row["atom4"])
## set some derived flags on the monomer description
mon_type = mon_desc.type.upper()
if mon_type == "L-PEPTIDE LINKING":
mon_desc.amino_acid = True
elif mon_type == "DNA LINKING" or mon_type == "RNA LINKING":
mon_desc.nucleic_acid = True
elif mon_type == "HOH" or mon_type == "WAT":
mon_desc.water = True
return mon_desc
def library_get_monomer_desc(res_name):
"""Loads/caches/returns the monomer description objec MonomerDesc
for the given monomer residue name.
"""
assert isinstance(res_name, str)
try:
return MONOMER_RES_NAME_CACHE[res_name]
except KeyError:
pass
mon_desc = library_construct_monomer_desc(res_name)
if mon_desc is None:
return None
MONOMER_RES_NAME_CACHE[res_name] = mon_desc
return mon_desc
def library_is_amino_acid(res_name):
"""Returns True if the res_name is an amino acid.
"""
assert isinstance(res_name, str)
mdesc = library_get_monomer_desc(res_name)
if mdesc is None:
return False
return mdesc.is_amino_acid()
def library_is_nucleic_acid(res_name):
"""Returns True if the res_name is a nucleic acid.
"""
assert isinstance(res_name, str)
mdesc = library_get_monomer_desc(res_name)
if mdesc is None:
return False
return mdesc.is_nucleic_acid()
def library_is_standard_residue(res_name):
"""Returns True if the res_name is a standard amino or nucleic acid.
"""
assert isinstance(res_name, str)
mdesc = library_get_monomer_desc(res_name)
if mdesc is None:
return False
return mdesc.is_standard_residue()
def library_is_water(res_name):
"""Return True if the res_name is water.
"""
assert isinstance(res_name, str)
if res_name == "HOH" or res_name == "WAT":
return True
return False
def library_guess_element_from_name(name0, res_name):
"""Try everything we can possibly think of to extract the element
symbol from the atom name. If available, use the monomer dictionary to
help narrow down the search.
"""
## strip any space from the name, and return now if there
## is nothing left to work with
name = name0.strip()
if name == "":
return None
if name0 != res_name:
## try the easy way out -- look up the atom in the monomer dictionary
mdesc = library_get_monomer_desc(res_name)
if mdesc is not None:
if mdesc.atom_dict.has_key(name):
symbol = mdesc.atom_dict[name]
if symbol is not None:
return symbol
if mdesc.is_amino_acid() and name == "OXT":
return "O"
if mdesc.is_amino_acid():
msg = "invalid amino acid atom name '%s' in residue '%s'" % (
name, res_name)
ConsoleOutput.warning(msg)
## okay, that didn't work...
## set the space_flag to true if the name starts with a space, which can
## indicate the name of the atom is only 1 character long.
if name0.startswith(" "):
space_flag = True
else:
space_flag = False
## remove all non-alpha chars from the name
alpha_name = ""
for c in name:
if c.isalpha() == True:
alpha_name += c
## look up two possible element symbols in the library:
## e1 is the possible one-character symbol
## e2 is the possible two-character symbol
if len(alpha_name) == 0:
return None
e1_symbol = alpha_name[0]
e1_valid = ELEMENT_SYMBOL_DICT.has_key(e1_symbol)
if len(alpha_name) > 1:
e2_symbol = alpha_name[:2]
e2_valid = ELEMENT_SYMBOL_DICT.has_key(e2_symbol)
else:
e2_symbol = None
e2_valid = False
## e1 or e2 must return something for us to proceed, otherwise,
## there's just no possible element symbol contained in the atom
## name
if e1_valid == False and e2_valid == False:
return None
elif e1_valid == True and e2_valid == False:
return e1_symbol
elif e1_valid == False and e2_valid == True:
return e2_symbol
## if we get here, then e1 and e2 are both valid elements
## we're out of choices, go by the space_flag: if there is a space
## before the atom name, then use the 1-char element symbol;
## if there is no space, then use the 2-char element symbol
if space_flag == True:
return e1_symbol
return e2_symbol
## <TESTING>
def test_module():
h = library_get_element_desc("H")
for cif_data in ELEMENT_CIF_FILE:
if len(cif_data.name) == 1:
print ' "%s" : True, "%s" : True,' % (
cif_data.name, cif_data.name.lower())
else:
print ' "%s": True, "%s": True, "%s": True,' % (
cif_data.name, cif_data.name.lower(), cif_data.name.upper())
if __name__ == "__main__":
test_module()
## </TESTING>
```
#### File: src/mmLib/mmCIF.py
```python
from __future__ import generators
import re
import copy
import itertools
##
## DATA STRUCTURES FOR HOLDING CIF INFORMATION
##
## mmCIF files are parsed into:
## mmCIFFile -> [mmCIFData] -> [mmCIFTable] -> [mmCIFRow]
##
## mmCIF dictionaries are parsed into:
## mmCIFDictionary -> [mmCIFData] -> [mmCIFTable] -> [mmCIFRow]
##
## mmCIF Maximum Line Length
MAX_LINE = 2048
class mmCIFError(Exception):
"""Base class of errors raised by Structure objects.
"""
pass
class mmCIFSyntaxError(Exception):
"""Base class of errors raised by Structure objects.
"""
def __init__(self, line_num, text):
Exception.__init__(self)
self.line_num = line_num
self.text = text
def __str__(self):
return "[line: %d] %s" % (self.line_num, self.text)
class mmCIFRow(dict):
"""Contains one row of data. In a mmCIF file, this is one complete
set of data found under a section. The data can be accessed by using
the column names as class attributes.
"""
__slots__ = ["table"]
def __eq__(self, other):
return id(self) == id(other)
def __deepcopy__(self, memo):
cif_row = mmCIFRow()
for key, val in self.iteritems():
cif_row[key] = val
return cif_row
def __contains__(self, column):
return dict.__contains__(self, column.lower())
def __setitem__(self, column, value):
assert value is not None
dict.__setitem__(self, column.lower(), value)
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __getitem__(self, column):
return dict.__getitem__(self, column.lower())
def getitem_lower(self, clower):
return dict.__getitem__(self, clower)
def __delitem__(self, column):
dict.__delitem__(self, column.lower())
def get(self, column, default = None):
return dict.get(self, column.lower(), default)
def get_lower(self, clower, default = None):
return dict.get(self, clower, default)
def has_key(self, column):
return dict.has_key(self, column.lower())
def has_key_lower(self, clower):
return dict.has_key(self, clower)
class mmCIFTable(list):
"""Contains columns and rows of data for a mmCIF section. Rows of data
are stored as mmCIFRow classes.
"""
__slots__ = ["name", "columns", "columns_lower", "data"]
def __init__(self, name, columns = None):
assert name is not None
list.__init__(self)
self.name = name
if columns is None:
self.columns = list()
self.columns_lower = dict()
else:
self.set_columns(columns)
def __deepcopy__(self, memo):
table = mmCIFTable(self.name, self.columns[:])
for row in self:
table.append(copy.deepcopy(row, memo))
return table
def __eq__(self, other):
return id(self) == id(other)
def is_single(self):
"""Return true if the table is not a _loop table with multiple
rows of data.
"""
return len(self) <= 1
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __getitem__(self, x):
"""Retrieves mmCIFRow at index x from the table if the argument is
an integer. If the argument is a string, then the data from the
first row is returned.
"""
if isinstance(x, int):
return list.__getitem__(self, x)
elif isinstance(x, str):
try:
return self[0][x]
except (IndexError, KeyError):
raise KeyError
raise TypeError, x
def __setitem__(self, x, value):
assert value is not None
if isinstance(x, int) and isinstance(value, mmCIFRow):
value.table = self
list.__setitem__(self, x, value)
elif isinstance(x, str):
try:
self[0][x] = value
except IndexError:
row = mmCIFRow()
row[x] = value
self.append(row)
def __delitem__(self, i):
self.remove(self[i])
def get(self, x, default = None):
try:
return self[x]
except KeyError:
return default
def append(self, row):
assert isinstance(row, mmCIFRow)
row.table = self
list.append(self, row)
def insert(self, i, row):
assert isinstance(row, mmCIFRow)
row.table = self
list.insert(self, i, row)
def remove(self, row):
assert isinstance(row, mmCIFRow)
del row.table
list.remove(self, row)
def set_columns(self, columns):
"""Sets the list of column(subsection) names to the list of names in
columns.
"""
self.columns = list()
self.columns_lower = dict()
for column in columns:
self.append_column(column)
def append_column(self, column):
"""Appends a column(subsection) name to the table.
"""
clower = column.lower()
if clower in self.columns_lower:
i = self.columns.index(self.columns_lower[clower])
self.columns[i] = column
self.columns_lower[clower] = column
else:
self.columns.append(column)
self.columns_lower[clower] = column
def has_column(self, column):
"""Tests if the table contains the column name.
"""
return column.lower() in self.columns_lower
def remove_column(self, column):
"""Removes the column name from the table.
"""
clower = column.lower()
if clower not in self.columns_lower:
return
self.columns.remove(self.columns_lower[clower])
del self.columns_lower[clower]
def autoset_columns(self):
"""Automatically sets the mmCIFTable column names by inspecting all
mmCIFRow objects it contains.
"""
clower_used = {}
for cif_row in self:
for clower in cif_row.iterkeys():
clower_used[clower] = True
if clower not in self.columns_lower:
self.append_column(clower)
for clower in self.columns_lower.keys():
if not clower_used.has_key(clower):
self.remove_column(clower)
def get_row1(self, clower, value):
"""Return the first row which which has column data matching value.
"""
fpred = lambda r: r.get_lower(clower) == value
itertools.ifilter(fpred, self)
for row in itertools.ifilter(fpred, self):
return row
return None
def get_row(self, *args):
"""Preforms a SQL-like 'AND' select aginst all the rows in the table,
and returns the first matching row found. The arguments are a
variable list of tuples of the form:
(<lower-case-column-name>, <column-value>)
For example:
get_row(('atom_id','CA'),('entity_id', '1'))
returns the first matching row with atom_id==1 and entity_id==1.
"""
if len(args) == 1:
clower, value = args[0]
for row in self:
if row.get_lower(clower) == value:
return row
else:
for row in self:
match_row = True
for clower, value in args:
if row.get_lower(clower) != value:
match_row = False
break
if match_row:
return row
return None
def new_row(self):
"""Creates a new mmCIF rows, addes it to the table, and returns it.
"""
cif_row = mmCIFRow()
self.append(cif_row)
return cif_row
def iter_rows(self, *args):
"""This is the same as get_row, but it iterates over all matching
rows in the table.
"""
for cif_row in self:
match_row = True
for clower, value in args:
if cif_row.get_lower(clower) != value:
match_row = False
break
if match_row:
yield cif_row
def row_index_dict(self, clower):
"""Return a dictionary mapping the value of the row's value in
column 'key' to the row itself. If there are multiple rows with
the same key value, they will be overwritten with the last found
row.
"""
dictx = dict()
for row in self:
try:
dictx[row.getitem_lower(clower)] = row
except KeyError:
pass
return dictx
class mmCIFData(list):
"""Contains all information found under a data_ block in a mmCIF file.
mmCIF files are represented differently here than their file format
would suggest. Since a mmCIF file is more-or-less a SQL database dump,
the files are represented here with their sections as "Tables" and
their subsections as "Columns". The data is stored in "Rows".
"""
__slots__ = ["name", "file"]
def __init__(self, name):
assert name is not None
list.__init__(self)
self.name = name
def __str__(self):
return "mmCIFData(name = %s)" % (self.name)
def __deepcopy__(self, memo):
data = mmCIFData(self.name)
for table in self:
data.append(copy.deepcopy(table, memo))
return data
def __eq__(self, other):
return id(self) == id(other)
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __getitem__(self, x):
if isinstance(x, int):
return list.__getitem__(self, x)
elif isinstance(x, str):
name = x.lower()
for ctable in self:
if ctable.name.lower() == name:
return ctable
raise KeyError, x
raise TypeError, x
def __setitem__(self, x, table):
"""
"""
assert isinstance(table, mmCIFTable)
try:
old_table = self[x]
except (KeyError, IndexError):
pass
else:
self.remove(old_table)
if isinstance(x, int):
table.data = self
list.__setitem__(self, x, table)
elif isinstance(x, str):
self.append(table)
def __delitem__(self, x):
"""Remove a mmCIFTable by index or table name.
"""
self.remove(self[x])
def append(self, table):
"""Append a mmCIFTable. This will trigger the removal of any table
with the same name.
"""
assert isinstance(table, mmCIFTable)
try:
del self[table.name]
except KeyError:
pass
table.data = self
list.append(self, table)
def insert(self, i, table):
assert isinstance(table, mmCIFTable)
try:
del self[table.name]
except KeyError:
pass
table.data = self
list.insert(self, i, table)
def remove(self, table):
assert isinstance(table, mmCIFTable)
del table.data
list.remove(self, table)
def has_key(self, x):
try:
self[x]
except KeyError:
return False
else:
return True
def get(self, x, default = None):
try:
return self[x]
except KeyError:
return default
def has_table(self, x):
try:
self[x]
except KeyError:
return False
else:
return True
def get_table(self, name):
"""Looks up and returns a stored mmCIFTable class by its name. This
name is the section key in the mmCIF file.
"""
try:
return self[name]
except KeyError:
return None
except IndexError:
return None
def new_table(self, name, columns=None):
"""Creates and returns a mmCIFTable object with the given name.
The object is added to this object before it is returned.
"""
cif_table = mmCIFTable(name, columns)
self.append(cif_table)
return cif_table
def split_tag(self, tag):
cif_table_name, cif_column_name = tag[1:].split(".")
return cif_table_name.lower(), cif_column_name.lower()
def join_tag(self, cif_table_name, cif_column_name):
return "_%s.%s" % (cif_table_name, cif_column_name)
def get_tag(self, tag):
"""Get.
"""
table_name, column = self.split_tag(tag)
try:
return self[table_name][column]
except KeyError:
return None
def set_tag(self, tag, value):
"""Set.x
"""
table_name, column = self.split_tag(tag)
self[table_name][column] = value
class mmCIFSave(mmCIFData):
"""Class to store data from mmCIF dictionary save_ blocks. We treat
them as non-nested sections along with data_ sections.
This may not be correct!
"""
pass
class mmCIFFile(list):
"""Class representing a mmCIF files.
"""
def __deepcopy__(self, memo):
cif_file = mmCIFFile()
for data in self:
cif_file.append(copy.deepcopy(data, memo))
return cif_file
def __str__(self):
l = [str(cdata) for cdata in self]
return "mmCIFFile([%s])" % (", ".join(l))
def __eq__(self, other):
return id(self) == id(other)
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __getitem__(self, x):
"""Retrieve a mmCIFData object by index or name.
"""
if isinstance(x, int):
return list.__getitem__(self, x)
elif isinstance(x, str):
name = x.lower()
for cdata in self:
if cdata.name.lower() == name:
return cdata
raise KeyError, x
raise TypeError, x
def __delitem__(self, x):
"""Remove a mmCIFData by index or data name. Raises IndexError
or KeyError if the mmCIFData object is not found, the error raised
depends on the argument type.
"""
self.remove(self[x])
def append(self, cdata):
"""Append a mmCIFData object. This will trigger the removal of any
mmCIFData object in the file with the same name.
"""
assert isinstance(cdata, mmCIFData)
try:
del self[cdata.name]
except KeyError:
pass
cdata.file = self
list.append(self, cdata)
def insert(self, i, cdata):
assert isinstance(cdata, mmCIFData)
try:
del self[cdata.name]
except KeyError:
pass
cdata.file = self
list.insert(self, i, cdata)
def has_key(self, x):
for cdata in self:
if cdata.name == x:
return True
return False
def get(self, x, default = None):
try:
return self[x]
except KeyError:
return default
def load_file(self, fil):
"""Load and append the mmCIF data from file object fil into self.
The fil argument must be a file object or implement its iterface.
"""
if isinstance(fil, str):
fileobj = open(fil, "r")
else:
fileobj = fil
mmCIFFileParser().parse_file(fileobj, self)
def save_file(self, fil):
if isinstance(fil, str):
fileobj = open(fil, "w")
else:
fileobj = fil
mmCIFFileWriter().write_file(fileobj, self)
def get_data(self, name):
"""Returns the mmCIFData object with the given name. Returns None
if no such object exists.
"""
try:
return self[name]
except KeyError:
return None
except IndexError:
return None
def new_data(self, name):
"""Creates a new mmCIFData object with the given name, adds it
to this mmCIFFile, and returns it.
"""
cif_data = mmCIFData(name)
self.append(cif_data)
return cif_data
class mmCIFDictionary(mmCIFFile):
"""Class representing a mmCIF dictionary. The constructor of this class
takes two arguments. The first is the string path for the file, or
alternativly a file object.
"""
pass
##
## FILE PARSERS/WRITERS
##
class mmCIFFileParser(object):
"""Stateful parser which uses the mmCIFElementFile tokenizer to read
a mmCIF file and convert it into the mmCIFData/mmCIFTable/mmCIFRow
data hierarchy.
"""
def parse_file(self, fileobj, cif_file):
self.line_number = 0
token_iter = self.gen_token_iter(fileobj)
try:
self.parse(token_iter, cif_file)
except StopIteration:
pass
else:
raise mmCIFError()
def syntax_error(self, err):
raise mmCIFSyntaxError(self.line_number, err)
def split_token(self, tokx):
"""Returns the mmCIF token split into a 2-tuple:
(reserved word, name) where directive is one of the mmCIF
reserved words: data_, loop_, global_, save_, stop_
"""
i = tokx.find("_")
if i == -1:
return None, None
rword = tokx[:i].lower()
if rword not in ("data", "loop", "global", "save", "stop"):
return None, None
name = tokx[i+1:]
return rword, name
def parse(self, token_iter, cif_file):
"""Stateful parser for mmCIF files.
XXX: loop_, data_, save_ tags are handled in a case-sensitive
manor. These tokens are case-insensitive.
"""
cif_table_cache = dict()
cif_data = None
cif_table = None
cif_row = None
state = ""
## ignore anything in the input file until a reserved word is
## found
while True:
tblx, colx, strx, tokx = token_iter.next()
if tokx is None:
continue
rword, name = self.split_token(tokx)
if rword is not None:
break
while True:
##
## PROCESS STATE CHANGES
##
if tblx is not None:
state = "RD_SINGLE"
elif tokx is not None:
rword, name = self.split_token(tokx)
if rword == "loop":
state = "RD_LOOP"
elif rword == "data":
state = "RD_DATA"
elif rword == "save":
state = "RD_SAVE"
elif rword == "stop":
return
elif rword == "global":
self.syntax_error("unable to handle global_ syntax")
else:
self.syntax_error("bad token #1: " + str(tokx))
else:
self.syntax_error("bad token #2")
return
##
## PROCESS DATA IN RD_SINGLE STATE
##
if state == "RD_SINGLE":
try:
cif_table = cif_table_cache[tblx]
except KeyError:
cif_table = cif_table_cache[tblx] = mmCIFTable(tblx)
try:
cif_data.append(cif_table)
except AttributeError:
self.syntax_error("section not contained in data_ block")
return
cif_row = mmCIFRow()
cif_table.append(cif_row)
else:
try:
cif_row = cif_table[0]
except IndexError:
self.syntax_error("bad token #3")
return
## check for duplicate entries
if colx in cif_table.columns:
self.syntax_error("redefined subsection (column)")
return
else:
cif_table.append_column(colx)
## get the next token from the file, it should be the data
## keyed by the previous token
tx, cx, strx, tokx = token_iter.next()
if tx is not None or (strx is None and tokx is None):
self.syntax_error("missing data for _%s.%s" % (tblx,colx))
if tokx is not None:
## check token for reserved words
rword, name = self.split_token(tokx)
if rword is not None:
if rword == "stop":
return
self.syntax_error("unexpected reserved word: %s" % (rword))
if tokx != ".":
cif_row[colx] = tokx
elif strx is not None:
cif_row[colx] = strx
else:
self.syntax_error("bad token #4")
tblx, colx, strx, tokx = token_iter.next()
continue
###
## PROCESS DATA IN RD_LOOP STATE
##
## This is entered upon the beginning of a loop, and
## the loop is read completely before exiting.
###
elif state == "RD_LOOP":
## the first section.subsection (tblx.colx) is read
## to create the section(table) name for the entire loop
tblx, colx, strx, tokx = token_iter.next()
if tblx is None or colx is None:
self.syntax_error("bad token #5")
return
if cif_table_cache.has_key(tblx):
self.syntax_error("_loop section duplication")
return
cif_table = mmCIFTable(tblx)
try:
cif_data.append(cif_table)
except AttributeError:
self.syntax_error("_loop section not contained in data_ block")
return
cif_table.append_column(colx)
## read the remaining subsection definitions for the loop_
while True:
tblx, colx, strx, tokx = token_iter.next()
if tblx is None:
break
if tblx != cif_table.name:
self.syntax_error("changed section names in loop_")
return
cif_table.append_column(colx)
## before starting to read data, check tokx for any control
## tokens
if tokx is not None:
rword, name = self.split_token(tokx)
if rword is not None:
if rword == "stop":
return
else:
self.syntax_error(
"unexpected reserved word: %s" % (rword))
## now read all the data
while True:
cif_row = mmCIFRow()
cif_table.append(cif_row)
for col in cif_table.columns:
if tokx is not None:
if tokx != ".":
cif_row[col] = tokx
elif strx is not None:
cif_row[col] = strx
tblx,colx,strx,tokx = token_iter.next()
## the loop ends when one of these conditions is met:
## condition #1: a new table is encountered
if tblx is not None:
break
## condition #2: a reserved word is encountered
if tokx is not None:
rword, name = self.split_token(tokx)
if rword is not None:
break
continue
elif state == "RD_DATA":
cif_data = mmCIFData(tokx[5:])
cif_file.append(cif_data)
cif_table_cache = dict()
cif_table = None
tblx,colx,strx,tokx = token_iter.next()
elif state == "RD_SAVE":
cif_data = mmCIFSave(tokx[5:])
cif_file.append(cif_data)
cif_table_cache = dict()
cif_table = None
tblx,colx,strx,tokx = token_iter.next()
def gen_token_iter(self, fileobj):
re_tok = re.compile(
r"(?:"
"(?:_(.+?)[.](\S+))" "|" # _section.subsection
"(?:['\"](.*?)(?:['\"]\s|['\"]$))" "|" # quoted strings
"(?:\s*#.*$)" "|" # comments
"(\S+)" # unquoted tokens
")")
file_iter = iter(fileobj)
## parse file, yielding tokens for self.parser()
while True:
ln = file_iter.next()
self.line_number += 1
## skip comments
if ln.startswith("#"):
continue
## semi-colen multi-line strings
if ln.startswith(";"):
lmerge = [ln[1:]]
while True:
ln = file_iter.next()
self.line_number += 1
if ln.startswith(";"):
break
lmerge.append(ln)
lmerge[-1] = lmerge[-1].rstrip()
yield (None, None, "".join(lmerge), None)
continue
## split line into tokens
tok_iter = re_tok.finditer(ln)
for tokm in tok_iter:
groups = tokm.groups()
if groups != (None, None, None, None):
yield groups
class mmCIFFileWriter(object):
"""Writes out a mmCIF file using the data in the mmCIFData list.
"""
def write_file(self, fil, cif_data_list):
self.fil = fil
## constant controlls the spacing between columns
self.SPACING = 2
## iterate through the data sections and write them
## out to the file
for cif_data in cif_data_list:
self.cif_data = cif_data
self.write_cif_data()
def write(self, x):
self.fil.write(x)
def writeln(self, x = ""):
self.fil.write(x + "\n")
def write_mstring(self, mstring):
self.write(self.form_mstring(mstring))
def form_mstring(self, mstring):
l = [";"]
lw = MAX_LINE - 2
for x in mstring.split("\n"):
if x == "":
l.append("\n")
continue
while len(x) > 0:
l.append(x[:lw])
l.append("\n")
x = x[lw:]
l.append(";\n")
return "".join(l)
def data_type(self, x):
"""Analyze x and return its type: token, qstring, mstring
"""
assert x is not None
if not isinstance(x, str):
x = str(x)
return x, "token"
if x == "" or x == ".":
return ".", "token"
if x.find("\n") != -1:
return x, "mstring"
if x.count(" ") != 0 or x.count("\t") != 0 or x.count("#") != 0:
if len(x) > (MAX_LINE - 2):
return x, "mstring"
if x.count("' ") != 0 or x.count('" ') != 0:
return x, "mstring"
return x, "qstring"
if len(x) < MAX_LINE:
return x, "token"
else:
return x, "mstring"
def write_cif_data(self):
if isinstance(self.cif_data, mmCIFSave):
self.writeln("save_%s" % self.cif_data.name)
else:
self.writeln("data_%s" % self.cif_data.name)
self.writeln("#")
for cif_table in self.cif_data:
## ignore tables without data rows
if len(cif_table) == 0:
continue
## special handling for tables with one row of data
elif len(cif_table) == 1:
self.write_one_row_table(cif_table)
## _loop tables
elif len(cif_table) > 1 and len(cif_table.columns) > 0:
self.write_multi_row_table(cif_table)
else:
raise mmCIFError()
self.writeln("#")
def write_one_row_table(self, cif_table):
row = cif_table[0]
## determine max key length for formatting output
kmax = 0
table_len = len(cif_table.name) + 2
for col in cif_table.columns:
klen = table_len + len(col)
assert klen < MAX_LINE
kmax = max(kmax, klen)
## we need a space after the tag
kmax += self.SPACING
vmax = MAX_LINE - kmax - 1
## write out the keys and values
for col in cif_table.columns:
cif_key = "_%s.%s" % (cif_table.name, col)
l = [cif_key.ljust(kmax)]
try:
x0 = row[col]
except KeyError:
x = "?"
dtype = "token"
else:
x, dtype = self.data_type(x0)
if dtype == "token":
if len(x) > vmax:
l.append("\n")
l.append("%s\n" % (x))
self.write("".join(l))
elif dtype == "qstring":
if len(x) > vmax:
l.append("\n")
self.write("".join(l))
self.write_mstring(x)
else:
l.append("'%s'\n" % (x))
self.write("".join(l))
elif dtype == "mstring":
l.append("\n")
self.write("".join(l))
self.write_mstring(x)
def write_multi_row_table(self, cif_table):
## write the key description for the loop_
self.writeln("loop_")
for col in cif_table.columns:
key = "_%s.%s" % (cif_table.name, col)
assert len(key) < MAX_LINE
self.writeln(key)
col_len_map = {}
col_dtype_map = {}
for row in cif_table:
for col in cif_table.columns:
## get data and data type
try:
x0 = row[col]
except KeyError:
lenx = 1
dtype = "token"
else:
x, dtype = self.data_type(x0)
## determine write length of data
if dtype == "token":
lenx = len(x)
elif dtype == "qstring":
lenx = len(x) + 2
else:
lenx = 0
try:
col_dtype = col_dtype_map[col]
except KeyError:
col_dtype_map[col] = dtype
col_len_map[col] = lenx
continue
## update the column charactor width if necessary
if col_len_map[col] < lenx:
col_len_map[col] = lenx
## modify column data type if necessary
if col_dtype != dtype:
if dtype == "mstring":
col_dtype_map[col] = "mstring"
elif col_dtype == "token" and dtype == "qstring":
col_dtype_map[col] = "qstring"
## form a write list of the column names with values of None to
## indicate a newline
wlist = []
llen = 0
for col in cif_table.columns:
dtype = col_dtype_map[col]
if dtype == "mstring":
llen = 0
wlist.append((None, None, None))
wlist.append((col, dtype, None))
continue
lenx = col_len_map[col]
if llen == 0:
llen = lenx
else:
llen += self.SPACING + lenx
if llen > (MAX_LINE - 1):
wlist.append((None, None, None))
llen = lenx
wlist.append((col, dtype, lenx))
## write out the data
spacing = " " * self.SPACING
add_space = False
listx = []
for row in cif_table:
for (col, dtype, lenx) in wlist:
if col is None:
add_space = False
listx.append("\n")
continue
if add_space == True:
add_space = False
listx.append(spacing)
if dtype == "token":
x = str(row.get(col, "."))
if x == "":
x = "."
x = x.ljust(lenx)
listx.append(x)
add_space = True
elif dtype == "qstring":
x = row.get(col, ".")
if x == "":
x = "."
elif x != "." and x != "?":
x = "'%s'" % (x)
x = x.ljust(lenx)
listx.append(x)
add_space = True
elif dtype == "mstring":
try:
listx.append(self.form_mstring(row[col]))
except KeyError:
listx.append(".\n")
add_space = False
add_space = False
listx.append("\n")
## write out strx if it gets big to avoid using a lot of
## memory
if len(listx) > 1024:
self.write("".join(listx))
listx = []
## write out the _loop section
self.write("".join(listx))
### <testing>
def test_module():
import sys
try:
path = sys.argv[1]
except IndexError:
print "usage: mmCIF.py <mmCIF file path>"
raise SystemExit
cif = mmCIFDictionary()
cif.load_file(path)
cif.save_file(sys.stdout)
if __name__ == '__main__':
test_module()
### </testing>
```
#### File: mmLib/tests/test_AtomMath.py
```python
from django.test import TestCase
from mmLib.AtomMath import (
calc_torsion_angle,
calc_angle
)
from mmLib import Structure
class AtomMathTestCase(TestCase):
def test_torsion_angle(self):
"""
## Example taken from 1HMP.pdb
#ATOM 25 N VAL A 8 55.799 56.415 16.693 1.00 25.51 N
#ATOM 26 CA VAL A 8 55.049 57.431 15.929 1.00 20.42 C
#ATOM 27 C VAL A 8 55.655 57.849 14.605 1.00 21.66 C
#ATOM 28 O VAL A 8 56.846 58.112 14.504 1.00 31.38 O
#ATOM 29 CB VAL A 8 54.697 58.659 16.709 1.00 16.90 C
#ATOM 30 CG1 VAL A 8 54.131 59.664 15.699 1.00 19.06 C
#ATOM 31 CG2 VAL A 8 53.640 58.304 17.738 1.00 14.10 C
#ATOM 32 N ILE A 9 54.810 57.974 13.593 1.00 20.18 N
#ATOM 33 CA ILE A 9 55.221 58.358 12.242 1.00 16.49 C
#ATOM 34 C ILE A 9 54.461 59.575 11.722 1.00 28.07 C
#ATOM 35 O ILE A 9 53.439 59.455 11.009 1.00 31.82 O
#ATOM 36 CB ILE A 9 55.028 57.196 11.301 1.00 13.73 C
#ATOM 37 CG1 ILE A 9 55.941 56.045 11.712 1.00 20.33 C
#ATOM 38 CG2 ILE A 9 55.327 57.611 9.860 1.00 13.91 C
#ATOM 39 CD1 ILE A 9 55.871 54.892 10.733 1.00 21.80 C
#ATOM 40 N SER A 10 54.985 60.748 12.087 1.00 30.09 N
"""
a1 = Structure.Atom(x=0.0, y=-1.0, z=0.0)
a2 = Structure.Atom(x=0.0, y=0.0, z=0.0)
a3 = Structure.Atom(x=1.0, y=0.0, z=0.0)
#a4 = Structure.Atom(x=1.0, y=1.0, z=-1.0)
a4 = Structure.Atom(res_name='GLY', x=1.0, y=1.0, z=-1.0)
# PHI: C'-N-CA-C'
a1 = Structure.Atom(x=55.655, y=57.849, z=14.605, res_name='VAL')
a2 = Structure.Atom(x=54.810, y=57.974, z=13.593, res_name='ILE')
a3 = Structure.Atom(x=55.221, y=58.358, z=12.242, res_name='ILE')
a4 = Structure.Atom(x=54.461, y=59.575, z=11.722, res_name='ILE')
#print "PHI: %.3f" % calc_torsion_angle(a1, a2, a3, a4)
# PSI: N-CA-C'-N
a1 = Structure.Atom(x=54.810, y=57.974, z=13.593, res_name='ILE')
a2 = Structure.Atom(x=55.221, y=58.358, z=12.242, res_name='ILE')
a3 = Structure.Atom(x=54.461, y=59.575, z=11.722, res_name='ILE')
a4 = Structure.Atom(x=54.985, y=60.748, z=12.087, res_name='SER')
#print "PSI: %.3f" % calc_torsion_angle(a1, a2, a3, a4)
#print "="*40
#print "a1:", a1.position
#print "calc_angle:", calc_angle(a1, a2, a3)
#print "calc_torsion_angle:", calc_torsion_angle(a1, a2, a3, a4)
```
#### File: mmLib/tests/test_Library.py
```python
import os
from unittest import skip
from django.test import TestCase
from mmLib.Library import library_get_element_desc, ELEMENT_CIF_FILE
class LibraryTestCase(TestCase):
def test_library_get_element_desc(self):
h = library_get_element_desc("H")
def test_element_file(self):
for cif_data in ELEMENT_CIF_FILE:
if len(cif_data.name) == 1:
print ' "%s" : True, "%s" : True,' % (cif_data.name, cif_data.name.lower())
else:
print ' "%s": True, "%s": True, "%s": True,' % (cif_data.name, cif_data.name.lower(), cif_data.name.upper())
```
#### File: mmLib/tests/test_TLS.py
```python
import os
import sys
import numpy
from unittest import skip
from django.test import TestCase
from mmLib.TLS import TLSGroup
class TLSTestCase(TestCase):
def test_tls_group(self):
tls = TLSGroup()
tls.name = "All protein"
tls.set_origin(18.885, 49.302, 13.315)
tls.set_T(0.0263, 0.0561, 0.0048, -0.0128, 0.0065, -0.0157)
tls.set_L(0.9730, 5.1496, 0.8488, 0.2151,-0.1296, 0.0815)
tls.set_S(0.0007, 0.0281, 0.0336, -0.0446,-0.2288, -0.0551, 0.0487, 0.0163)
print tls
print "eigenvalues(T)"
print numpy.linalg.eig(tls.T)
print "eigenvalues(L)"
print numpy.linalg.eig(tls.L)
```
#### File: mmLib/tests/test_UnitCell.py
```python
import os
import sys
import numpy
from unittest import skip
from django.test import TestCase
from mmLib.UnitCell import UnitCell
class UnitCellTestCase(TestCase):
def test_triclinic_unit_cell(self):
"""
TEST CASE: Triclinic unit cell
"""
uc = UnitCell(7.877, 7.210, 7.891, 105.563, 116.245, 79.836)
e = numpy.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]], float)
print uc
print "volume = ", uc.calc_v()
print "cell volume = ", uc.calc_volume()
print "fractionalization matrix =\n", uc.calc_fractionalization_matrix()
print "orthogonalization matrix =\n", uc.calc_orthogonalization_matrix()
print "orth * e =\n", numpy.dot(uc.calc_orthogonalization_matrix(), e)
print "calc_frac_to_orth"
vlist = [
numpy.array([0.0, 0.0, 0.0]),
numpy.array([0.5, 0.5, 0.5]),
numpy.array([1.0, 1.0, 1.0]),
numpy.array([-0.13614, 0.15714, -0.07165])
]
for v in vlist:
ov = uc.calc_frac_to_orth(v)
v2 = uc.calc_orth_to_frac(ov)
print "----"
print " ",v
print " ",ov
print " ",v2
print "----"
def test_triclinic_reciprocal_unit_cell(self):
"""
TEST CASE: Reciprocal of above unit cell
"""
uc = UnitCell(7.877, 7.210, 7.891, 105.563, 116.245, 79.836)
ruc = uc.calc_reciprocal_unit_cell()
print ruc
print "volume = ", ruc.calc_v()
print "cell volume = ", ruc.calc_volume()
def test_orthogonal_space_symmetry_operations(self):
"""
TEST CASE: Orthogonal space symmetry operations
"""
unitx = UnitCell(
a = 64.950,
b = 64.950,
c = 68.670,
alpha = 90.00,
beta = 90.00,
gamma = 120.00,
space_group = "P 32 2 1"
)
print unitx
print
for symop in unitx.space_group.iter_symops():
print "Fractional Space SymOp:"
print symop
print "Orthogonal Space SymOp:"
print unitx.calc_orth_symop(symop)
print
```
#### File: src/tlsmd/dumpdb.py
```python
import os
import sys
from tlsmdlib.datafile import *
def main():
datafile = TLSMDFile(sys.argv[1])
chain_id = sys.argv[2]
frag_id1 = sys.argv[3]
frag_id2 = sys.argv[4]
data = datafile.grh_get_tls_record(chain_id, frag_id1, frag_id2)
for key in data.keys():
print "%10s = %s" % (key, str(data[key]))
if __name__=="__main__":
main()
```
#### File: src/tlsmdlib/hcsssp.py
```python
import numpy
class HCSSSP(object):
"""Hop Constrained Single Source Shortest Path graph(V,E) minimization
based on the Bellman-Ford Algorithm but modified to work with a
2-dimensional cost(D) matrix, path(P) matrix, and travel(T) matrix.
"""
def HCSSSP_minimize(self, V, E, hops):
"""Hop-Constrained Single Source Shorted Path minimization,
loosely based on the Bellman-Ford SSSP algorithm using
Dynamic Programming. Returns the D, P, and T matrices.
"""
assert len(V)>0
assert len(E)>0
num_vertex = len(V)
## initialize D/P
infinity = 1e10
## a 2D cost matrix; the value at Dij describes the minimum
## cost to reach vertex j by traversing i edges
D = numpy.zeros((hops+1, num_vertex), float) + infinity
## like Bellman-Ford, initialize the source vertex distance to 0.0
for i in xrange(hops+1):
D[i,0] = 0.0
## a 2D previous vertex matrix; the value at Pij is the
## previous vertex of the path used to achieve cost Dij,
## except the previous vertex it describes is not the one
## in row i, but the one in row i-1 (the previous row)
P = numpy.zeros((hops+1, num_vertex), int) - 1
## a 2D "travel" matrix containing the edge used by the path
## through the previous matrix -- this is a Python 2D matrix and
## not a Numerical Python 2d array
T = []
for i in xrange(hops+1):
T.append([None for j in xrange(num_vertex)])
## now run the minimization
for h in xrange(1, hops+1):
for edge in E:
self.HCSSSP_minimize_relax(D, P, T, edge, h)
## now the matrix Dij and Pij are complete
return D, P, T
def HCSSSP_minimize_relax(self, D, P, T, edge, hop_constraint):
"""Relax vertices for the current number of hops using the cost array
from the costs calculated using the previous number of hops.
Current D for the given number of hops h is D[h], the D
array for the previous number of hops is D[h-1]
"""
vertex_i = edge[0]
vertex_j = edge[1]
weight = edge[2]
## get the cost vector for the current hop constraint (which we are
## in the process of calculating), and the cost vector for
## the previous hop constraint (which we assume has been calculated
## previously)
Dp = D[hop_constraint - 1]
Dc = D[hop_constraint]
## perform relaxation for the current number of hops against the
## cost vector for the previous number of hops; this results
## in the current cost vector being the minimum cost using at most
## one more hop(edge)
if Dc[vertex_j] > (Dp[vertex_i] + weight):
Dc[vertex_j] = Dp[vertex_i] + weight
P[hop_constraint,vertex_j] = vertex_i
T[hop_constraint][vertex_j]= edge
def HCSSSP_maximize(self, V, E, hops):
"""Hop-Constrained Single Source Shorted Path minimization,
loosely based on the Bellman-Ford SSSP algorithm using
Dynamic Programming. Returns the D, P, and T matrices.
"""
assert len(V)>0
assert len(E)>0
num_vertex = len(V)
## a 2D cost matrix; the value at Dij describes the minimum
## cost to reach vertex j by traversing i edges
D = numpy.zeros((hops+1, num_vertex), float)
## like Bellman-Ford, initialize the source vertex distance to 0.0
for i in xrange(hops+1):
D[i,0] = 0.0
## a 2D previous vertex matrix; the value at Pij is the
## previous vertex of the path used to achieve cost Dij,
## except the previous vertex it describes is not the one
## in row i, but the one in row i-1 (the previous row)
P = numpy.zeros((hops+1, num_vertex), int) - 1
## a 2D "travel" matrix containing the edge used by the path
## through the previous matrix -- this is a Python 2D matrix and
## not a Numerical Python 2d array
T = []
for i in xrange(hops+1):
T.append([None for j in xrange(num_vertex)])
## now run the minimization
for h in xrange(1, hops+1):
for edge in E:
self.HCSSSP_maximize_relax(D, P, T, edge, h)
## now the matrix Dij and Pij are complete
return D, P, T
def HCSSSP_maximize_relax(self, D, P, T, edge, hop_constraint):
"""Relax vertices for the current number of hops using the cost array
from the costs calculated using the previous number of hops.
Current D for the given number of hops h is D[h], the D
array for the previous number of hops is D[h-1]
"""
vertex_i = edge[0]
vertex_j = edge[1]
weight = edge[2]
## get the cost vector for the current hop constraint (which we are
## in the process of calculating), and the cost vector for
## the previous hop constraint (which we assume has been calculated
## previously)
Dp = D[hop_constraint - 1]
Dc = D[hop_constraint]
## perform relaxation for the current number of hops against the
## cost vector for the previous number of hops; this results
## in the current cost vector being the minimum cost using at most
## one more hop(edge)
if Dc[vertex_j] < (Dp[vertex_i] + weight):
Dc[vertex_j] = Dp[vertex_i] + weight
P[hop_constraint,vertex_j] = vertex_i
T[hop_constraint][vertex_j]= edge
def HCSSSP_path_iter(self, V, D, P, T, hop_constraint):
"""Iterate over the path from beginning to end yielding the tuple:
(hi, hj, edge) where hi is the row index (for D,P,T) of vertex
i in edge, and hj is the row index (should be hi+1) of vertex j
in edge.
"""
edge_list = []
num_vertex = len(D[0])
## start at the destination vertex
curr_v = num_vertex - 1
h = hop_constraint
while curr_v > 0:
prev_vertex = P[h,curr_v]
edge = T[h][curr_v]
curr_v = prev_vertex
h -= 1
edge_list.append((h, h+1, edge))
edge_list.reverse()
for edge in edge_list:
yield edge
```
#### File: src/tlsmdlib/models.py
```python
from django.db import models
from django.contrib.auth.models import User, Group
from django.contrib.postgres.fields import JSONField
from django.conf import settings
class TLSMDJob(models.Model):
## jobID == id
job_num = models.IntegerField(null=True, blank=True)
job_id = models.TextField(max_length=2048, db_index=True, null=True, blank=True)
STATE_CHOICES = (
('PROVIDER_HOME', 'PROVIDER_HOME'),
('VISIT', 'VISIT')
)
state = models.CharField(max_length=36, choices=STATE_CHOICES, db_index=True, null=True, blank=True)
structure_id = models.TextField(max_length=2048, db_index=True, null=True, blank=True)
header_id = models.TextField(max_length=2048, db_index=True, null=True, blank=True)
submit_time = models.DateTimeField(null=True, blank=True)
run_time_begin = models.DateTimeField(null=True, blank=True)
run_time_end = models.DateTimeField(null=True, blank=True)
chain_sizes = models.TextField(max_length=2048, db_index=True, null=True, blank=True)
submit_date = models.TextField(max_length=2048, db_index=True, null=True, blank=True)
ip_address = models.TextField(max_length=2048, db_index=True, null=True, blank=True)
email = models.TextField(max_length=2048, db_index=True, null=True, blank=True)
user_name = models.TextField(max_length=2048, db_index=True, null=True, blank=True)
user_comment = models.TextField(max_length=2048, db_index=True, null=True, blank=True)
private_job = models.BooleanField(default=False)
via_pdb = models.BooleanField(default=False)
pid = models.IntegerField(null=True, blank=True)
TLS_MODELS = (
('ISOT', 'Isotropic'),
('ANISO', 'Anisotropic')
)
tls_model = models.CharField(max_length=36, choices=TLS_MODELS, db_index=True, null=True, blank=True)
WEIGHTS = (
('NONE', 'NONE'),
('IUISO', 'IUISO')
)
weight = models.CharField(max_length=36, choices=WEIGHTS, db_index=True, null=True, blank=True)
INCLUDE_ATOMS = (
('ALL', 'ALL'),
('MAINCHAIN', 'MAINCHAIN')
)
include_atoms = models.CharField(max_length=36, choices=INCLUDE_ATOMS, db_index=True, null=True, blank=True)
PLOT_FORMATS = (
('PNG', 'PNG'),
('SVG', 'SVG')
)
plot_format = models.CharField(max_length=36, choices=PLOT_FORMATS, db_index=True, null=True, blank=True)
generate_jmol_view = models.BooleanField(default=False)
generate_jmol_animate = models.BooleanField(default=False)
generate_histogram = models.BooleanField(default=False)
cross_chain_analysis = models.BooleanField(default=False)
nparts = models.IntegerField(null=True, blank=True)
resolution = models.FloatField(null=True, blank=True)
data = JSONField(default=dict, blank=True)
created = models.DateTimeField(auto_now_add=True, null=False)
last_modified = models.DateTimeField(auto_now=True, null=False)
class Meta:
db_table = 'tlsmd_job'
ordering = ['id']
def __str__(self):
return '{}:{}'.format(self.type, self.name)
##== Create main 'status_page' table ===========================================
## CREATE TABLE status_page (jobID INT(5) NOT NULL auto_increment, job_num INT(5),
## job_id VARCHAR(19), state VARCHAR(10), structure_id VARCHAR(4),
## header_id VARCHAR(4), submit_time DECIMAL(13,2),
## run_time_begin DECIMAL(13,2), run_time_end DECIMAL(13,2),
## chain_sizes VARCHAR(255), submit_date VARCHAR(24),
## ip_address VARCHAR(15), email VARCHAR(320), user_name VARCHAR(100),
## user_comment VARCHAR(128), private_job BOOLEAN, via_pdb BOOLEAN,
## pid SMALLINT(5) UNSIGNED, tls_model ENUM('ISOT','ANISO'),
## weight ENUM('NONE','IUISO'), include_atoms ENUM('ALL','MAINCHAIN'),
## plot_format ENUM('PNG','SVG'), generate_jmol_view BOOLEAN,
## generate_jmol_animate BOOLEAN, generate_histogram BOOLEAN,
## cross_chain_analysis BOOLEAN, nparts INT(2), resolution DECIMAL(4,2),
## UNIQUE KEY `job_id` (`job_id`), PRIMARY KEY (jobID));
```
#### File: src/tlsmdlib/webtlsmd.py
```python
import os
import sys
import time
import socket
import string
import random
import math
import numpy
import re
import xmlrpclib
import cgitb; cgitb.enable()
import cgi
import subprocess
## Pymmlib
from mmLib import Library ## checks if is_{amino,nucleic}_acid()
## TLSMD
import conf, const, misc
## GLOBALS
webtlsmdd = xmlrpclib.ServerProxy(conf.WEBTLSMDD)
import mysql_support
mysql = mysql_support.MySQLConnect()
def timestring(secs):
tm_struct = time.localtime(secs)
return time.strftime("%Y-%m-%d %H:%M %Z", tm_struct)
def secdiffstring(secs):
secs = int(secs)
hours = secs / 3600
secs = secs - (hours * 3600)
min = secs / 60
secs = secs - (min * 60)
x = "%1d:%2d.%2d" % (hours, min, secs)
return x.replace(" ", "0")
def timediffstring(begin, end):
secs = int(end - begin)
return secdiffstring(secs)
def left_justify_string(keyword, value):
"""Returns a string with dotted separation.
"""
return '%s' % keyword .ljust(40, ".") + ": " + '%s\n' % value
def html_title(title):
"""Generates an HTML-formatted title.
"""
return '<center><h1>%s</h1></center>' % (title)
def html_nav_bar(page_name=None):
"""Site navigation bar.
"""
l = ['<div id="navcontainer">',
' <ul>',
' <li><a href="%s/index.html">Home</a></li>' % (
conf.TLSMD_BASE_URL),
' <li><a href="webtlsmd.cgi?page=submit1">Start a New Job</a></li>',
' <li><a href="webtlsmd.cgi">Job Status</a></li>',
' <li><a href="%s/examples/index.html">Examples</a></li>' % (
conf.TLSMD_BASE_URL),
' <li><a href="%s/documentation.html">Documentation</a></li>' % (
conf.TLSMD_BASE_URL),
' <li><a href="%s/references.html">References</a></li>' % (
conf.TLSMD_BASE_URL),
' </ul>',
'</div>'
]
return "\n".join(l)
def html_job_nav_bar(job_id):
"""Navigation bar to the TLSMD output files.
"""
if mysql.job_get_via_pdb(job_id) == 1 and \
mysql.job_get_state(job_id) != "running":
pdb_id = mysql.job_get_structure_id(job_id)
job_dir = os.path.join(conf.WEBTLSMDD_PDB_DIR, pdb_id)
job_url = os.path.join(conf.TLSMD_PUBLIC_URL, "pdb", pdb_id)
else:
job_dir = os.path.join(conf.TLSMD_WORK_DIR, job_id)
job_url = os.path.join(conf.TLSMD_PUBLIC_URL, "jobs", job_id)
analysis_dir = os.path.join(job_dir, "ANALYSIS")
analysis_index = os.path.join(analysis_dir, "index.html")
analysis_url = os.path.join(job_url, "ANALYSIS/index.html")
summary_index = os.path.join(job_dir, "ANALYSIS/index.html")
summary_url = os.path.join(job_url, "ANALYSIS/index.html")
logfile = os.path.join(job_dir, "log.txt")
log_url = os.path.join(job_url, "log.txt")
tarball = os.path.join(job_dir, "%s.tar.gz" % job_id)
tarball_url = os.path.join(job_url, "%s.tar.gz" % job_id)
## TODO: Should this only check for the logfile? 2009-05-27
if not os.path.isfile(analysis_index) and not os.path.isfile(logfile):
return ''
x = ''
x += '<center>'
## Summary page link
if (mysql.job_get_state(job_id) == 'running') and \
os.path.isfile(summary_index):
x += '<h3>View <a href="%s">Summary Analysis</a></h3>' % (summary_url)
if (mysql.job_get_state(job_id) != 'running') and \
os.path.isfile(analysis_index):
x += '<h3>View <a href="%s">Completed TLSMD Analysis</a></h3>' % (
analysis_url)
if os.path.isfile(logfile):
x += '<h3>View <a href="%s">TLSMD Logfile</a></h3>' % (log_url)
## tarball link
if os.path.isfile(tarball):
x += '<h3>Download <a href="%s">Local Copy of TLSMD Analysis output (tarball)</a></h3>' % (
tarball_url)
x += '</center>'
x += '<br>'
return x
def html_job_edit_form(fdict, pdb=False):
x = ''
x += '<center>'
x += '<form enctype="multipart/form-data" action="webtlsmd.cgi" method="post">'
x += '<input type="hidden" name="page" value="%s">' % (
fdict.get("page", "index"))
x += '<input type="hidden" name="edit_form" value="TRUE">'
x += '<input type="hidden" name="job_id" value="%s">' % (
fdict["job_id"])
x += '<table border="1" width="100%">'
## user/email/passcode/structure name
x += '<tr>'
x += '<th colspan="2">User Information</th>'
x += '<th>Session Information</th>'
x += '</tr>'
x += '<tr><td colspan="2">'
x += '<table>'
## keep job private
if not pdb:
x += '<tr><td></td>'
x += '<td>'
x += '<label>'
x += '<input type="checkbox" name="private_job" value="TRUE">'
x += 'Keep Job Private'
x += '</label>'
x += '</td>'
x += '</tr>'
## email address
x += '<tr>'
x += '<td class="r"><label>EMail Address:</td><td>'
x += '<input type="text" name="email" value="%s" size="25" maxlength="40">' % (
fdict.get("email", ""))
x += '</label></td>'
x += '</tr>'
## structure code
if not pdb:
x += '<tr>'
x += '<td class="r"><label>Structure Code:</td><td>'
x += '<input disabled type="text" name="structure_id" value="%s" size="4" maxlength="4">' % (
fdict.get("structure_id", ""))
x += '</label></td>'
x += '</tr>'
x += '</td>'
x += '</table>'
## session info
x += '<td valign="top"><table>'
x += '<tr><td class="r">TLSMD Job ID:</td>'
x += '<td><b>%s</b></td></tr>' % (fdict["job_id"])
x += '<tr><td class="r">Job State:</td>'
try:
x += '<td><b>%s</b></td></tr>' % (fdict["state"])
except:
x += '<td><b>None</b></td></tr>'
x += '<tr><td class="r">Submission IP Address: </td>'
x += '<td><b>%s</b></td></tr>' % (fdict["ip_address"])
x += '<tr><td class="r">Submission Date: </td>'
if fdict.has_key("submit_time"):
date = timestring(fdict["submit_time"])
else:
date = "No Time"
x += '<td><b>%s</b></td></tr>' % (date)
x += '</table></td>'
x += '</tr>'
## Select Chains for Analysis
chains = mysql.job_get_chain_sizes(fdict["job_id"]).rstrip(";")
if not pdb:
x += '<tr><th colspan="3">Select Chains for Analysis</th></tr>'
x += '<tr><td colspan="3">'
x += '<table>'
for c in chains.split(';'):
x += '<tr><td>'
x += '<label>'
chid, length, selected, type = misc.parse_chains(c)
name = "CHAIN%s" % chid
if type == "aa":
desc = "Chain %s (%s Amino Acid Residues)" % (chid, length)
elif type == "na":
desc = "Chain %s (%s Nucleic Acid Residues)" % (chid, length)
elif type == "ot":
desc = "Chain %s (%s Other Residues)" % (chid, length)
if selected == "1":
x += '<input type="checkbox" id="%s" name="%s" value="TRUE" checked="checked">' % (
name, name)
else:
x += '<input type="checkbox" id="%s" name="%s" value="FALSE">' % (
name, name)
x += '%s' % desc
x += '</label>'
x += '</td></tr>'
x += '</table></td></tr>'
else:
## select all the chains by default
for c in chains.split(';'):
chid, length, selected, type = misc.parse_chains(c)
name = "CHAIN%s" % chid
x += '<input type="hidden" name="%s" value="TRUE">' % name
x += '</table>'
## end form
x += '<tr><td colspan="3">'
x += '<table width="100%">'
x += '<tr>'
x += '<td class="l">'
if fdict.has_key("removebutton"):
x += '<input type="submit" name="submit" value="Remove Job">'
if fdict.has_key("signalbutton"):
x += '<input type="submit" name="submit" value="Signal Job">'
if fdict.has_key("killbutton"):
x += '<input type="submit" name="submit" value="Kill Job">'
if fdict.has_key("requeuebutton"):
x += '<input type="submit" name="submit" value="Requeue Job">'
x += '</td>'
x += '<td class="r">'
x += '<input type="submit" name="submit" value="Next">'
x += '</tr>'
x += '</table>'
x += '</td></tr>'
x += '</table>'
x += '</form>'
return x
def html_session_info_table(fdict):
"""Show user environment/session information.
"""
if fdict.has_key("submit_time"):
date = timestring(fdict["submit_time"])
else:
date = ""
l = ['<table class="inner_table">',
'<tr class="inner_title"><th>',
'<a id="cid2" href="javascript:',
"ToggleDivVisibility('cid2','id2',\
'Show Session Information',\
'Hide Session Information')",
'">Show Session Information</a>',
'</th></tr>',
'<tr><td class="c">',
'<div id="id2" style="display:none">',
'<table class="ninner_table">',
'<tr><td class="r">TLSMD Job ID:</td>',
'<td><b>%s</b></td></tr>' % (fdict["job_id"]),
'<tr><td class="r">Job State:</td>',
'<td><b>%s</b></td></tr>' % (fdict["state"]),
'<tr><td class="r">Submission IP Address: </td>',
'<td><b>%s</b></td></tr>' % (fdict.get("ip_addr", "")),
'<tr><td class="r">Submission Date: </td>',
'<td><b>%s</b></td></tr>' % (date),
'</table></div>',
'</table>']
return "".join(l)
def html_user_info_table(fdict):
"""Returns a small table of user data.
"""
l = ['<table class="inner_table">',
'<tr class="inner_title"><th colspan="2">User Information</th></tr>',
'<tr><td class="c">',
'<center>',
'<table class="ninner_table">',
## User name
'<tr>',
'<td class="r"><label for="user_name">Your Name</label></td>',
'<td class="l"><input type="text" id="user_name" name="user_name" ',
'value="%s" size="25" maxlength="40"></td>' % (
fdict.get("user_name","")),
'</tr>',
## User email address
'<tr>',
'<td class="r"><label for="email">EMail Address</label></td>',
'<td class="l"><input type="text" id="email" name="email" ',
'value="%s" size="25" maxlength="40"></td>' % (
fdict.get("email", "")),
'</tr>',
## User associated notes
'<tr>',
'<td class="r"><label for="user_comment">Associated Notes</label></td>',
'<td class="l"><input type="text" id="user_comment" name="user_comment" ',
'value="%s" size="40" maxlength="128"></td>' % (
fdict.get("user_comment","")),
'</tr>',
'</table>',
'</center>',
'</td></tr></table>']
return "".join(l)
def html_program_settings_table(fdict, run_mainchain_only = None):
"""Used in 'Step 2: Fill out Submission Form'. Also allows the user to
select advanced options before completing submission.
"""
## "TLSMD Program Options" table
l = ['<table class="inner_table">',
'<tr class="inner_title"><th>TLSMD Program Options</th></tr>',
'<tr><td class="c">',
'<table width="100%">']
## Center table: "Choose TLS Model"
which_model = mysql.job_get_tls_model(fdict["job_id"])
model_note = ''
if which_model == "ANISO":
model_note += 'Note: Your structure contains ANISOU records.<br>'
model_note += 'Note: TLSMD may find a more accurate result if it uses '
model_note += 'this information, but anisotropic analysis takes much '
model_note += 'longer to run!'
if which_model == "ISOT":
model_note += 'Note: Your structure does not contain any ANISOU '
model_note += 'records. You should choose to run your structure '
model_note += 'through TLSMD using the isotropic analysis model.'
l += ['<tr><td class="c" valign="top" colspan="2">',
'<table class="ninner_table">',
'<tr><td class="l">',
'<fieldset><legend>Choose TLS Model:</legend>',
'<div style="font-size:xx-small">%s</div>' % model_note,
'<p><label>',
'<input name="tls_model" type="radio" value="ISOT" '
'checked="checked">',
'Isotropic analysis</label></p>',
'<p><label>',
'<input name="tls_model" type="radio" value="ANISO"']
if which_model == "ISOT":
l += [' disabled']
l += ['>',
'Anisotropic analysis</label></p>',
'</fieldset>',
'</td>',
'</table>'
'</td>']
## Left table: "Keep Job Private"
l += ['<tr><td class="c" valign="top">',
'<table class="ninner_table">',
'<tr><td class="l">']
if conf.PRIVATE_JOBS:
l += ['<input type="checkbox" id="private_job" name="private_job" ',
'value="TRUE" checked="checked">']
else:
l += ['<input type="checkbox" id="private_job" name="private_job" ',
'value="TRUE">']
l += ['<label for="private_job">Keep Job Private</label>',
'</td></tr>']
## Left table: "4-Letter Structure ID"
l += ['<tr><td class="l">',
'<label for="structure_id">4-Letter Structure ID </label>',
'<input type="text" id="structure_id" name="structure_id" ',
'value="%s" size="4" maxlength="4">' % (
fdict.get("structure_id", "")),
'</td></tr>',
'</table></td>']
## Right table: "Select Chains for Analysis"
l += ['<td class="c" valign="top">',
'<table class="ninner_table">',
'<tr style="line-height:2em">',
'<th>Select Chains for Analysis</th></tr>']
chains = mysql.job_get_chain_sizes(fdict["job_id"]).rstrip(";")
for c in chains.split(';'):
chid, length, selected, type = misc.parse_chains(c)
name = "CHAIN%s" % chid
if type == "aa":
desc = "Chain %s (%s Amino Acid Residues)" % (chid, length)
elif type == "na":
desc = "Chain %s (%s Nucleic Acid Residues)" % (chid, length)
elif type == "ot":
desc = "Chain %s (%s Other Residues)" % (chid, length)
if selected == "1":
x = '<input type="checkbox" id="%s" name="%s" value="TRUE" checked="checked">' % (
name, name)
else:
x = '<input type="checkbox" id="%s" name="%s" value="FALSE">' % (
name, name)
l +=['<tr><td class="l">', x, desc, '</td></tr>']
l += ['</table></td>',
## End of "TLSMD Program Options" table
'</tr></table>']
if run_mainchain_only:
sanity_png = "%s/%s/sanity.png" % (conf.TLSMD_WORK_URL, fdict["job_id"])
l += ['<tr><td class="note">The variation in the B factors of ',
'adjacent atoms in some regions of your structure is not ',
'reasonable (you can see an analysis <a href="%s">here</a>) ' % (
sanity_png),
'However, if only main chain atoms are considered, the ',
'variation in B is more reasonable. Please select "Mainchain ',
'Atoms" from the "Atom Class Selection" section in the ',
'Advanced Program Options below.</td></tr>']
## "Advanced Program Options" table
l += ['<tr class="inner_title"><th>',
'<a id="cid1" href="javascript:',
"ToggleDivVisibility('cid1','id1',\
'Show Advanced Program Options',\
'Hide Advanced Program Options')",
'">Show Advanced Program Options</a>',
'</th></tr>',
'<tr><td class="c">']
if run_mainchain_only:
l += '<div id="id1" style="display:inline">'
else:
l += '<div id="id1" style="display:none">'
l += ['<table class="ninner_table">',
'<tr>',
'<td valign="top" class="l">',
'<fieldset><legend>Plot Output Format</legend>',
'<div style="font-size:xx-small">'
'Select the output format for plots.<br>',
'SVG works with the Adobe plugin and Firefox 1.5+.',
'</div>',
'<p><label>',
'<input name="plot_format" type="radio" value="PNG" tabindex="35" ',
'checked="checked">',
'PNG Images</label></p>',
'<p><label>',
'<input name="plot_format" type="radio" value="SVG" tabindex="35">',
'SVG</label></p>',
'</fieldset>',
'</td>',
'<td valign="top" class="l">',
'<fieldset><legend>Atom Class Selection</legend>',
'<div style="font-size:xx-small">',
'Analyze all protein atoms, or just the main chain atoms.<br>',
'</div>',
'<p><label>']
if run_mainchain_only:
l += ['<input name="include_atoms" type="radio" value="ALL">',
'All Atoms</label></p>',
'<p><label>',
'<input name="include_atoms" type="radio" value="MAINCHAIN" ',
'checked="checked">',
'Mainchain Atoms ({N,CA,C,O,CB} or {P,O5*,C5*,C4*,C3*,O3*})']
else:
l += ['<input name="include_atoms" type="radio" value="ALL" ',
'checked="checked">',
'All Atoms</label></p>',
'<p><label>',
'<input name="include_atoms" type="radio" value="MAINCHAIN">',
'Mainchain Atoms ({N,CA,C,O,CB} or {P,O5*,C5*,C4*,C3*,O3*})']
l += ['</label></p>',
'</fieldset>',
'</td>',
'</tr><tr>'
## Jmol toggle switches (default=True/"yes")
'<td valign="top" class="l">',
'<fieldset><legend>Jmol toggle switches</legend>',
'<p>',
'<label>Generate Jmol-viewer pages: </label>',
'<input name="generate_jmol_view" type="radio" value="True" ',
'checked="checked">yes',
'<input name="generate_jmol_view" type="radio" value="False">no',
'</p>',
'<p>',
'<label>Generate Jmol-animation pages: </label>',
'<input name="generate_jmol_animate" type="radio" value="True" ',
'checked="checked">yes',
'<input name="generate_jmol_animate" type="radio" value="False">no',
'</p>',
'</fieldset>',
'</td>',
## Histogram toggle switches (default=False/"no")
'<td valign="top" class="l">',
'<fieldset><legend>Histogram toggle switches</legend>',
'<p>',
'<label>Generate histogram plots: </label>',
'<input name="generate_histogram" type="radio" value="True">yes',
'<input name="generate_histogram" type="radio" value="False" ',
'checked="checked">no',
'</p>',
'<p></p>', ## formatting
'</fieldset>',
'</td>',
'</tr><tr>'
## select number of partitions per chain
'<td valign="top" class="l">',
'<fieldset><legend>Set number of partitions/chain</legend>',
'<div style="font-size:xx-small">default/max = %s</div><br>' % (
conf.NPARTS),
'<p>',
'<label>Maximum number of segments: </label>',
'<input name="nparts" type="text" size="2" maxlength="2" ',
'value="%s">' % (conf.NPARTS),
'</p>',
'</fieldset>',
'</td>',
## turn cross-chain analysis on/off
'<td valign="top" class="l">',
'<fieldset><legend>Cross-Chain analysis</legend>',
'<div style="font-size:xx-small">',
'Turn Cross-Chain analysis on/off.</div><br>',
'<p>',
'<label>Generate Cross-Chain analysis: </label>',
'<input name="cross_chain_analysis" type="radio" value="True">yes',
'<input name="cross_chain_analysis" type="radio" value="False" ',
'checked="checked">no',
'</p>',
'</fieldset>',
'</td>',
'</tr>',
'</table>',
'</div>',
'</td></tr>',
## End of "Advanced Program Options" table
'</table>']
return "".join(l)
def html_job_edit_form2(fdict, title="", run_mainchain_only = None):
if fdict.has_key("removebutton"):
remove_button = '<input type="submit" name="submit" value="Remove Job">'
else:
remove_button = ''
l = ['<script language=javascript type="text/javascript">',
'function ToggleDivVisibility(control_id, target_id, show_val, hide_val) {',
' var ctrl_element = document.getElementById(control_id);',
' var target_element = document.getElementById(target_id);',
' if (target_element.style.display != "none") {',
' target_element.style.display = "none";',
' ctrl_element.firstChild.nodeValue = show_val;',
' } else {',
' target_element.style.display = "inline";',
' ctrl_element.firstChild.nodeValue = hide_val;',
' }',
'}',
'</script>',
'<center>',
'<form enctype="multipart/form-data" action="webtlsmd.cgi" method="post">',
'<input type="hidden" name="page" value="%s">' % (
fdict.get("page", "index")),
'<input type="hidden" name="edit_form" value="TRUE">',
'<input type="hidden" name="job_id" value="%s">' % (
fdict["job_id"]),
'<table width="100%" class="submit_table">',
'<tr><th class="step_title">%s</th></tr>' % (title),
'<tr><td class="c">', html_user_info_table(fdict), '</td></tr>',
#'<tr><td class="c">', html_program_settings_table(fdict, run_mainchain_only), '</td></tr>',
'<tr><td class="c">']
if run_mainchain_only:
l += html_program_settings_table(fdict, run_mainchain_only = True)
else:
l += html_program_settings_table(fdict, run_mainchain_only = False)
l += ['</td></tr>',
'<tr><td class="c">', html_session_info_table(fdict), '</td></tr>',
'<tr><td class="c"><input type="submit" name="submit" value="Submit Job"></td></tr>',
'</table>',
'</form>',
'</center>']
return "".join(l)
def html_job_info_table(fdict):
"""Returns a table of information on a given job with data taken from the
MySQL database.
"""
x = ''
x += '<center>'
x += '<table class="explore_table">'
## user/email/passcode/structure name
x += '<tr class="explore_table_head">'
x += '<th colspan="2">User Information</th>'
x += '<th>Session Information</th>'
x += '</tr>'
x += '<tr><td colspan="2">'
x += '<table>'
## email address
x += '<tr class="explore_table_row">'
x += '<td class="r"><label>EMail Address:</td>'
x += '<td class="l"><b>%s</b>' % (fdict.get("email", ""))
x += '</label></td>'
x += '</tr>'
## structure code
x += '<tr>'
x += '<td class="r"><label>Structure Code:</td>'
x += '<td class="l"><b>%s</b>' % (fdict.get("structure_id", ""))
x += '</label></td>'
x += '</tr>'
## user comments
x += '<tr>'
x += '<td class="r"><label>Associated Notes:</td>'
x += '<td class="l"><b>%s</b>' % (fdict.get("user_comment", ""))
x += '</label></td>'
x += '</tr>'
x += '</table>'
x += '</td>'
##==========================================================================
## session info
x += '<td valign="top"><table>'
x += '<tr><td class="r">TLSMD Job ID:</td>'
x += '<td><b>%s</b></td></tr>' % (fdict["job_id"])
x += '<tr><td class="r">Job State:</td>'
if fdict.has_key("state"):
jobstate = (fdict["state"])
else:
jobstate = "unknown"
if jobstate == "died":
x += '<td class="perror"><b>%s</b></td></tr>' % (jobstate)
else:
x += '<td><b>%s</b></td></tr>' % (jobstate)
x += '<tr><td class="r">Submission IP Address: </td>'
x += '<td><b>%s</b></td></tr>' % (fdict.get("ip_address", ""))
x += '<tr><td class="r">Submission Date: </td>'
if fdict.has_key("submit_time"):
date = timestring(fdict["submit_time"])
else:
date = "---"
x += '<td><b>%s</b></td></tr>' % (date)
x += '<tr><td class="r">Processing Start Date: </td>'
if fdict.has_key("run_time_begin"):
date = timestring(fdict["run_time_begin"])
else:
date = "---"
x += '<td><b>%s</b></td></tr>' % (date)
x += '<tr><td class="r">Processing End Date: </td>'
if fdict.has_key("run_time_end"):
date = timestring(fdict["run_time_end"])
else:
date = "---"
x += '<td><b>%s</b></td></tr>' % (date)
x += '<tr><td class="r">Processing Time(HH:MM): </td>'
if fdict.has_key("run_time_end") and fdict.has_key("run_time_begin"):
if (fdict["run_time_begin"] == None) or \
(fdict["run_time_end"] == None):
hours = "----"
else:
hours = timediffstring(fdict["run_time_begin"],
fdict["run_time_end"])
else:
hours = "---"
x += '<td><b>%s</b></td></tr>' % (hours)
x += '</table></td>'
x += '</tr>'
##==========================================================================
## Selected Chains for Analysis
x += '<tr class="explore_table_head">'
x += '<th colspan="3">Selected Chains</th></tr>'
x += '<tr><td colspan="3">'
x += '<table cellpadding="5" style="text-align:center;">'
## Thumbnail image of user's structure
if conf.THUMBNAIL:
x += '<tr><th colspan="3">'
if fdict["via_pdb"] == 1 and \
fdict["state"] not in ["running", "queued", "died"]:
x += '<img src="%s"/>' % (conf.WEBTLSMDD_PDB_URL + "/" + \
fdict["structure_id"] + "/struct.png")
else:
x += '<img src="%s"/>' % (conf.TLSMD_WORK_URL + "/" + \
fdict["job_id"] + "/struct.png")
x += '</th></tr>'
## Selected chains information
x += '<tr><th><font size="-5">Chain</font></th>'
x += '<th><font size="-5">Processing Time (HH:MM.SS)</font></th>'
chains = mysql.job_get_chain_sizes(fdict["job_id"]).rstrip(";")
for c in chains.split(';'):
chid, length, selected, type = misc.parse_chains(c)
name = "CHAIN%s" % chid
if selected == "1":
if type == "aa":
desc = "Chain: %s (%s Amino Acid Residues)" % (chid, length)
elif type == "na":
desc = "Chain: %s (%s Nucleic Acid Residues)" % (chid, length)
x += '<tr>'
x += '<td>%s</td>' % desc
## TODO: Record running time for each chain, 2009-05-29
processing_time = False
#if cdict.has_key("processing_time"):
if processing_time:
#hours = secdiffstring(cdict["processing_time"])
hours = "0000"
else:
hours = "---"
x += '<td>%s</td>' % (hours)
x += '</tr>'
x += '</table></td></tr>'
##==========================================================================
## Detailed advanced settings list
x += '<tr class="explore_table_head">'
x += '<th colspan="3">Advanced Settings</th></tr>'
x += '<tr><td class="l"><pre>'
## TLS Model
if fdict.get("tls_model") is None or fdict.get("tls_model") == "ISOT":
x += left_justify_string('TLS Model', 'Isotropic')
elif fdict.get("tls_model") == "ANISO":
x += left_justify_string('TLS Model', 'Anisotropic')
## Least Squares Weighting (not reported)
if fdict.get("weight") is None or fdict.get("weight") == "IUISO":
x += left_justify_string('Least Squares Weighting', 'Inverse Atomic B_iso')
elif fdict.get("weight") == "NONE":
x += left_justify_string('Least Squares Weighting', 'No Weighting')
## Include Atoms
if fdict.get("include_atoms") in [None, "ALL"]:
x += left_justify_string('Include Atoms', 'Include All Atoms')
elif fdict.get("include_atoms") == "MAINCHAIN":
x += left_justify_string('Include Atoms', 'Main Chain Atoms')
elif fdict.get("include_atoms") == "CA":
x += left_justify_string('Include Atoms', 'C-Alpha Atoms')
## Jmol-viewer settings. 2008-11-13
if fdict.get("generate_jmol_view") == True:
x += left_justify_string('Generate Jmol-viewer files', 'True')
elif fdict.get("generate_jmol_view") == False:
x += left_justify_string('Generate Jmol-viewer files', 'False')
else:
x += left_justify_string('Generate Jmol-viewer files', 'n/a')
## Jmol-animation settings. 2008-11-13
if fdict.get("generate_jmol_animate") == True:
x += left_justify_string('Generate Jmol-animation files', 'True')
elif fdict.get("generate_jmol_animate") == False:
x += left_justify_string('Generate Jmol-animation files', 'False')
else:
x += left_justify_string('Generate Jmol-animation files', 'n/a')
## Histogram settings. 2008-11-13
if fdict.get("generate_histogram") == True:
x += left_justify_string('Generate histogram files', 'True')
elif fdict.get("generate_histogram") == False:
x += left_justify_string('Generate histogram files', 'False')
else:
x += left_justify_string('Generate histogram files', 'n/a')
## Number of segments settings. 2008-11-13
if fdict.get("nparts") == "":
x += left_justify_string('Maximum number of segments', 'n/a')
else:
x += left_justify_string('Maximum number of segments', '%s' % (
fdict["nparts"]))
## Cross-Chain analysis settings. 2008-11-25
if fdict.get("cross_chain_analysis") == True:
x += left_justify_string('Cross-Chain analysis', 'True')
elif fdict.get("cross_chain_analysis") == False:
x += left_justify_string('Cross-Chain analysis', 'False')
else:
x += left_justify_string('Cross-Chain analysis', 'n/a')
x += '</pre></td>'
x += '</tr>'
##==========================================================================
## end form
if fdict.has_key("removebutton"):
x += '<form enctype="multipart/form-data" action="webtlsmd.cgi" method="post">'
## Job ID, user, passwd
x += '<input type="hidden" name="page" value="%s">' % (
fdict.get("page", "index"))
x += '<input type="hidden" name="edit_form" value="TRUE">'
x += '<input type="hidden" name="job_id" value="%s">' % (
fdict["job_id"])
#x += '<input type="hidden" name="user" value="%s">' % (fdict["user"])
#x += '<input type="hidden" name="passwd" value="%s">' % (fdict["passwd"])
x += '<tr>'
x += '<td colspan="3" class="l">'
x += '<input type="submit" name="submit" value="Remove Job">'
if fdict.has_key("signalbutton"):
x += '<input type="submit" name="submit" value="Signal Job">'
if fdict.has_key("killbutton"):
x += '<input type="submit" name="submit" value="Kill Job">'
## FIXME: This is redundant
if fdict.has_key("removebutton"):
x += '</td>'
x += '</form>'
x += '</tr>'
x += '</table>'
return x
def check_job_id(form):
"""Retrieves and confirms the job_id from a incoming form. Returns
None on error, or the job_id on success.
"""
if form.has_key("job_id"):
job_id = form["job_id"].value
if len(job_id) < conf.MAX_JOB_ID_LEN:
if job_id.startswith("TLSMD"):
if mysql.job_exists(job_id):
return job_id
return None
def vet_struct_id(data, max_len):
if isinstance(data, unicode):
return False
if len(data) > max_len:
return False
if not data.isalnum():
return False
return True
def cleanup_input(data):
"""Vet all user-input via forms. Allow only alphanumeric characters and
some punctuation: " ", "_", ",", ".", "(", ")", "-", ":"
"""
data = re.sub(r'[^0-9A-Za-z ()_,.-:]', '', data)
return data
def vet_email(email_address):
"""Vet email addresses. The local part (the part before the '@') must not
exceed 64 characters and the domain part (after the '@') must not
exceed 255 characters. The entire email address length must not exceed
320 characters.
"""
## FIXME: Doesn't warn user!
if not re.match(r'^([^@\s]+)@((?:[-a-z0-9]+\.)+[a-z]{2,})$', email_address):
return False
local_part = re.sub(r'^([^@\s]+)@((?:[-a-z0-9]+\.)+[a-z]{2,})$', '\\1', email_address)
domain_part = re.sub(r'^([^@\s]+)@((?:[-a-z0-9]+\.)+[a-z]{2,})$', '\\2', email_address)
if len(local_part) > 64:
return False
if len(domain_part) > 255:
return False
return True
def vet_pdb_id(pdbid):
"""PDB ID must be exactly four characters long, alphanumeric, and
the first character must be an integer.
"""
if len(pdbid) < 4 or not \
pdbid.isalnum() or not \
re.match(r'^[0-9][A-Za-z0-9]{3}$', pdbid):
return False
return True
def extract_job_edit_form(form):
"""Extract the input from the Job Edit Form and update the MySQL
database with the information.
"""
if not form.has_key("edit_form"):
return False
job_id = check_job_id(form)
if job_id is None:
return False
mysql.job_set_submit_time(job_id, time.time())
## TODO: Immediately create job dir + log.txt + ANALYSIS dir, 2009-05-26
if form.has_key("private_job"):
mysql.job_set_private_job(job_id, "1") ## 1 = True
#mysql.job_set_private_job(job_id, True) ## 1 = True
if form.has_key("user_name"):
user_name = form["user_name"].value.strip()
## store only the first 100 characters
user_name = cleanup_input(user_name[:100])
mysql.job_set_user_name(job_id, user_name)
if form.has_key("email"):
email_address = form["email"].value.strip()
if vet_email(email_address) or email_address == "":
mysql.job_set_email(job_id, email_address)
else:
raise SubmissionException('Not a valid email address')
if form.has_key("structure_id"):
structure_id = form["structure_id"].value.strip()
if vet_struct_id(structure_id, 4):
## remove non-alphanumeric characters
structure_id = re.sub(r'[^A-Za-z0-9]', '', structure_id)
mysql.job_set_structure_id(job_id, structure_id)
if form.has_key("user_comment"):
user_comment = form["user_comment"].value.strip()
## store only the first 128 characters
user_comment = cleanup_input(user_comment[:128])
mysql.job_set_user_comment(job_id, user_comment)
conf.globalconf.user_comment = user_comment ## FIXME: Doesn't seem to work, 2010-07-08
#raise SubmissionException('User comment: %s' % conf.globalconf.user_comment)
## Selected chains for analysis
num_chains_selected = 0
update_chains = ""
chains = mysql.job_get_chain_sizes(job_id).rstrip(";")
#raise SubmissionException('FORM DUMP: [%s])' % (form)) ## DEBUG
for c in chains.split(';'):
chid, length, selected, type = misc.parse_chains(c)
name = "CHAIN%s" % chid
chname = str(name)
if form.has_key(chname):
update_chains = update_chains + "%s:%s:%s:%s;" % (
chid, length, "1", type)
num_chains_selected += 1
else:
update_chains = update_chains + "%s:%s:%s:%s;" % (
chid, length, "0", type)
if num_chains_selected == 0:
msg = "You did not select any chains. "
msg += "Will not proceed any further."
raise SubmissionException(msg)
mysql.job_set_chain_sizes(job_id, update_chains)
if form.has_key("tls_model"):
tls_model = form["tls_model"].value.strip()
if tls_model in ["ISOT", "ANISO"]:
mysql.job_set_tls_model(job_id, tls_model)
if form.has_key("weight"):
weight = form["weight"].value.strip()
if weight in ["NONE", "IUISO"]:
mysql.job_set_weight_model(job_id, weight)
if form.has_key("include_atoms"):
include_atoms = form["include_atoms"].value.strip()
if include_atoms in ["ALL", "MAINCHAIN"]:
mysql.job_set_include_atoms(job_id, include_atoms)
if form.has_key("plot_format"):
plot_format = form["plot_format"].value.strip()
if plot_format in ["PNG", "SVG"]:
mysql.job_set_plot_format(job_id, plot_format)
## Generate Jmol-viewer feature (default=True)
if form.has_key("generate_jmol_view"):
generate_jmol_view = form["generate_jmol_view"].value.strip()
if generate_jmol_view == "True":
mysql.job_set_jmol_view(job_id, "1")
else:
mysql.job_set_jmol_view(job_id, "0")
## Generate Jmol-animation feature (default=True)
if form.has_key("generate_jmol_animate"):
generate_jmol_animate = form["generate_jmol_animate"].value.strip()
if generate_jmol_animate == "True":
mysql.job_set_jmol_animate(job_id, "1")
else:
mysql.job_set_jmol_animate(job_id, "0")
## Generate Histogram plots (default=False)
if form.has_key("generate_histogram"):
generate_histogram = form["generate_histogram"].value.strip()
if generate_histogram == "True":
mysql.job_set_histogram(job_id, "1")
else:
mysql.job_set_histogram(job_id, "0")
## Generate Cross-Chain analysis (default=False)
if form.has_key("cross_chain_analysis"):
cross_chain_analysis = form["cross_chain_analysis"].value.strip()
if cross_chain_analysis == "True":
mysql.job_set_cross_chain_analysis(job_id, "1")
else:
mysql.job_set_cross_chain_analysis(job_id, "0")
## Select number of partition/chain (default/max=20)
if form.has_key("nparts"):
nparts_value = form["nparts"].value.strip()
if nparts_value.isdigit() == False:
msg = "Integer value required for "
msg += "'Maximum number of segments: %s'" % nparts_value
raise SubmissionException(msg)
return False
if int(nparts_value) > conf.NPARTS or int(nparts_value) < 1:
## not a valid input; force value to be int(2)
nparts_value = int(conf.NPARTS)
mysql.job_set_nparts(job_id, int(nparts_value))
return True
class Page(object):
def __init__(self, form):
self.form = form
def html_head_nocgi(self, title, redirect=None):
x = ''
x += '<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" '
x += ' "http://www.w3.org/TR/html4/loose.dtd">\n'
x += '<html>\n'
x += '<head>\n'
x += ' <title>%s</title>\n' % (title)
x += ' <meta http-equiv="Content-type" content="text/html;charset=UTF-8">\n'
x += ' <link rel="stylesheet" href="../tlsmd.css" type="text/css" media="screen">\n'
x += ' <link rel="stylesheet" href="../tlsmd_print.css" type="text/css" media="print">\n'
if redirect != None:
x += '<meta http-equiv="REFRESH" content="10; URL=%s">' % (redirect)
x += '</head>\n'
x += '<body>\n<div id="page">\n'
return x
def html_head(self, title, redirect=None):
if redirect == None:
return 'Content-Type: text/html\n\n' + self.html_head_nocgi(title)
else:
return 'Content-Type: text/html\n\n' + self.html_head_nocgi(title, redirect)
def html_foot(self):
l = ['<center>\n',
'<p><small><b>Version %s</b> Last Modified %s' % (
const.VERSION, const.RELEASE_DATE),
'</small></p>',
'</center>',
'</div></body></html>']
return "".join(l)
class ErrorPage(Page):
def __init__(self, form, text=None):
Page.__init__(self, form)
self.text = text
def html_page(self):
title = 'TLSMD: Error'
l = [self.html_head(title, None),
html_title(title),
html_nav_bar(),
'<br>',
'<center><p class="perror">Error<br>']
if self.text is not None:
l.append(self.text)
l.append('</p></center>')
l.append(self.html_foot())
return "".join(l)
class QueuePage(Page):
def __init__(self, form):
Page.__init__(self, form)
if self.form.has_key("admin"):
self.admin = self.verify_admin(self.form["admin"].value)
else:
self.admin = False
def verify_admin(self, passcode):
## class QueuePage
try:
code = open(conf.ADMIN_PASSWORD_FILE, "r").read().strip()
except IOError:
return False
return code == passcode
def rcsb_href(self, jdict):
## class QueuePage
if jdict.get("private_job", False):
return "----"
struct_id = jdict.get("structure_id", "xxxx")
if struct_id == None:
return "----"
elif struct_id.lower() == "xxxx":
return struct_id
return struct_id
def html_head_nocgi(self, title):
## class QueuePage
l = ['<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" ',
' "http://www.w3.org/TR/html4/loose.dtd">\n',
'<html>\n',
'<head>\n',
' <title>%s</title>' % (title),
' <meta http-equiv="Content-type" content="text/html;charset=UTF-8">\n'
' <link rel="stylesheet" href="../tlsmd.css" type="text/css" media="screen">',
' <link rel="stylesheet" href="../tlsmd_print.css" type="text/css" media="print">',
'</head>\n',
'<body>\n<div id="page">']
return "".join(l)
def html_foot(self):
## class QueuePage
l = ['<center>',
'<p><small><b>Version %s</b> Last updated %s PDT</p>' % (
const.VERSION, misc.timestamp()),
'</center>',
'</div></body></html>']
return "".join(l)
def html_page(self):
## class QueuePage
title = 'TLSMD: Job Status'
job_list = self.get_job_list()
l = [self.html_head(title, None),
html_title(title),
html_nav_bar("queue"),
self.html_private_form(),
'<center><b>Or click on the Job ID you wish to view</b></center>',
'<br>',
self.html_running_job_table(job_list),
'<br>',
self.html_queued_job_table(job_list),
'<br>',
self.html_completed_job_table(job_list)]
limbo = self.html_limbo_job_table(job_list)
if limbo != None:
l.append('<br>')
l.append(limbo)
l.append(self.html_foot())
return "".join(l)
def html_private_form(self):
## class QueuePage
l = ['<form action="webtlsmd.cgi" method="post">',
'<input type="hidden" name="page" value="explore">',
'<center>',
'<b>To access a private job, enter its Job ID below</b>',
'</center>',
'<center>',
'<input type="text" name="job_id" size="20">',
'</center>',
'</form>']
return "".join(l)
def explore_href(self, job_id):
"""Returns the URL of the job_id.
"""
## class QueuePage()
jdict = mysql.job_get_dict(job_id)
if self.admin:
page = "admin"
else:
page = "explore"
if self.admin:
if jdict["tls_model"] == "ANISO":
l = ['<a href="webtlsmd.cgi?page=%s&job_id=%s">%s</a> (ANISO)' % (
page, job_id, job_id)]
else:
l = ['<a href="webtlsmd.cgi?page=%s&job_id=%s">%s</a>' % (
page, job_id, job_id)]
if jdict["user_name"] != "":
l.append('<br>%s' % (jdict["user_name"]))
if jdict["user_comment"] != "" or jdict["user_comment"] != None:
l.append('<br>%s' % (jdict["user_comment"]))
if jdict["email"] != "":
l.append('<br>%s' % (jdict["email"]))
return "".join(l)
if mysql.job_get_private_job(job_id):
## Return job number only (non-clickable)
job_number = re.match(r'[^_]*', job_id)
if job_number:
return job_number.group(0)
return 'private'
return '<a href="webtlsmd.cgi?page=%s&job_id=%s">%s</a>' % (
page, job_id, job_id)
def chain_size_string(self, jdict):
"""Returns a list of chain IDs and their sizes.
"""
## class QueuePage()
if jdict.has_key("chains") == False:
return "---"
listx = []
for cdict in jdict["chains"]:
if cdict["selected"]:
## Only show chains used selected for analysis
listx.append("%s:%d" % (cdict["chain_id"], cdict["length"]))
strx = ''
while len(listx) > 0:
l3 = listx[:5]
listx = listx[5:]
strx += " ".join(l3)
if len(listx) > 0:
strx += '<br>'
return '%s' % (strx)
def get_job_list(self):
"""Get a list of all the jobs in the job queue file.
"""
job_list = []
if mysql.job_list() == None:
return job_list
return mysql.job_list()
def pid_exists(self, job_id):
"""Checks if a PID exists for a given job_id.
Returns True if PID exists; False otherwise.
"""
## class QueuePage()
pid = mysql.job_get_pid(job_id)
if pid == None:
## job PID somehow did not get stored in the database, so return
## False => state='syserror'; job may still be running!
return False
else:
pid = int(pid)
try:
#os.kill(pid, 0) ## This does not work, 2009-05-27
## NOTE: Three possible results:
## (1): os.kill(pid, 0) -> None: process exists, and you are process
## owner or root
## (2): os.kill(pid, 0) -> OSError, Operation not permitted:
## process exists, you are not owner or root
## (3): os.kill(pid, 0) -> OSError, No such process:
## process does not exist
if os.path.exists("/proc/%s" % pid):
return True ## process is still running
return False
except:
return False
def total_number_of_residues(self, jdict):
"""Calculate the total number of residues (with/without chains).
"""
chain_sizes = jdict["chain_sizes"]
total = 0
if chain_sizes == None:
return "NULL"
## Sum total number of residues from each chain (ignore type)
for c in chain_sizes.split(';'):
chid, length, selected, type = misc.parse_chains(c)
if selected == "1":
total += int(length)
return total
def html_running_job_table(self, job_list):
"""Constructs an HTML table of currently running TLSMD jobs.
"""
## class QueuePage()
## get an array of "running" jobs from the job dictionary
run_jdict = []
for jdict in job_list:
if jdict["state"] == "running":
if self.pid_exists(jdict["job_id"]) == False:
mysql.job_set_state(jdict["job_id"], "syserror")
run_jdict.append(jdict)
x = ['<center>',
'<b>%d Running Jobs</b>' % (len(run_jdict)),
'<table class="status_table">',
'<tr class="status_table_head">',
'<th>Job ID</th>',
'<th>Structure ID</th>',
'<th>Chain:Num Res</th>',
'<th>Submission Date</th>',
'<th colspan="2">Running Time (HH:MM.SS)</th>',
'</tr>']
## creates mutiple rows, _if_ there are multiple "running" jobs
row1 = True
for jdict in run_jdict:
if row1:
x.append('<tr class="status_table_row1">')
else:
x.append('<tr class="status_table_row2">')
row1 = not row1
x += ['<td>%s</td>' % (self.explore_href(jdict["job_id"])),
'<td>%s</td>' % (self.rcsb_href(jdict)),
#'<td>%s</td>' % (self.total_number_of_residues(jdict["chain_sizes"])),
'<td>%s</td>' % (self.total_number_of_residues(jdict)),
'<td>%s</td>' % (timestring(jdict["submit_time"]))]
if jdict["run_time_begin"] == None:
hours = "----"
elif jdict.has_key("run_time_begin"):
hours = timediffstring(float(jdict["run_time_begin"]), time.time())
else:
hours = "---"
## progress bar
try:
job_dir = conf.TLSMD_WORK_DIR + "/" + jdict["job_id"]
prog_file = open(job_dir + "/progress", 'r')
progress = int(float(prog_file.read().strip())*100)
prog_file.close()
except:
progress = 0
x += '<td class="l"><div class="prog-border">'
x += '<div class="prog-bar" style="width: %s%%;"></div>' % (
progress)
x += '</div></td>'
x += '<td class="r">%s</td></tr>' % (hours)
## for zero running jobs
if len(run_jdict) == 0:
x += ['<tr>',
'<td colspan="6" class="c">',
'No Jobs Running',
'</td>',
'</tr>']
x.append('</table></center>')
return "".join(x)
def html_queued_job_table(self, job_list):
"""Constructs an HTML table of currently queued TLSMD jobs.
"""
## class QueuePage()
queued_list = []
for jdict in job_list:
if jdict.get("state") == "queued":
## Populate queued list for XHTML table below
queued_list.append(jdict)
l = ['<center>',
'<b>%d Queued Jobs</b>' % (len(queued_list)),
'<table class="status_table">',
'<tr class="status_table_head">',
'<th>Job ID</th>',
'<th>Struct ID</th>',
'<th>Chain:Num Res</th>',
'<th>Submission Date</th>',
'</tr>']
row1 = True
for jdict in queued_list:
if row1:
l.append('<tr class="status_table_row1">')
else:
l.append('<tr class="status_table_row2">')
row1 = not row1
l += ['<td>%s</td>' % (self.explore_href(jdict["job_id"])),
'<td>%s</td>' % (self.rcsb_href(jdict)),
'<td>%s</td>' % (self.chain_size_string(jdict)),
'<td>%s</td>' % (timestring(jdict["submit_time"])),
'</tr>' ]
if len(queued_list) == 0:
l += ['<tr>',
'<td colspan="4" class="c">',
'No Jobs Queued',
'</td>',
'</tr>']
l.append('</table></center>')
return "".join(l)
def html_completed_job_table(self, job_list):
"""Constructs an HTML table of completed TLSMD jobs.
"""
## class QueuePage()
completed_list = []
for jdict in job_list:
if jdict.get("state") in ["success",
"errors", # completed w/errors
"warnings", # completed w/warnings
"killed",
"died",
"syserror",
"defunct"]:
completed_list.append(jdict)
l = ['<center><b>%d Completed Jobs</b></center>' % (
len(completed_list)),
'<center>',
'<table class="status_table">',
'<tr class="status_table_head">',
'<th>Job ID</th>',
'<th>Struct ID</th>',
'<th>Status</th>',
'<th>Submission Date</th>',
'<th>Total Residues</th>',
'<th>Processing Time (HH:MM.SS)</th>',
'</tr>']
if len(completed_list) == 0:
l += ['<tr>',
'<td colspan="6" class="c">',
'No Jobs Completed',
'</td>',
'</tr>']
row1 = True
for jdict in completed_list:
if row1:
l.append('<tr class="status_table_row1">')
else:
l.append('<tr class="status_table_row2">')
row1 = not row1
## "Job ID"
l.append('<td>%s</td>' % (self.explore_href(jdict["job_id"])))
## "Struct ID"
#l.append('<td>%s</td>' % (self.rcsb_href(jdict)))
if ((jdict["structure_id"] == None) or \
(jdict["structure_id"].lower() == "xxxx")):
l.append('<td>----</td>')
else:
l.append('<td>%s</td>' % (jdict["structure_id"]))
## Direct link to logfile
if jdict["via_pdb"] == 1:
pdb_id = mysql.job_get_structure_id(jdict["job_id"])
job_dir = os.path.join(conf.WEBTLSMDD_PDB_DIR, pdb_id)
job_url = os.path.join(conf.TLSMD_PUBLIC_URL, "pdb", pdb_id)
logfile = os.path.join(job_dir, "log.txt")
log_url = job_url + "/log.txt"
else:
logfile = os.path.join(conf.TLSMD_WORK_DIR, jdict["job_id"], "log.txt")
log_url = conf.TLSMD_WORK_URL + "/" + jdict["job_id"] + "/log.txt"
if os.path.isfile(logfile) and jdict["private_job"] == 0:
l.append('<td><a href="%s">%s</a></td>' % (log_url, jdict["state"]))
else:
l.append('<td>%s</td>' % jdict["state"])
## "Submission Date"
if ((jdict["submit_time"] == None) or \
(float(jdict["submit_time"]) == 0.00)):
l.append('<td>n/a</td>')
else:
l.append('<td>%s</td>' % (timestring(jdict["submit_time"])))
## "Total Residues"
l.append('<td class="r">%s</td>' % (
self.total_number_of_residues(jdict)))
## "Processing Time (HH:MM.SS)"
if jdict.has_key("run_time_begin") and jdict.has_key("run_time_end"):
if ((jdict["run_time_begin"] == None) or \
(jdict["run_time_end"] == None)):
hours = "----"
elif ((float(jdict["run_time_begin"]) == 0.0) or \
(float(jdict["run_time_end"]) == 0.0)):
hours = "---"
else:
hours = timediffstring(jdict["run_time_begin"],
jdict["run_time_end"])
else:
hours = "---"
l.append('<td class="r">%s</td>' % (hours))
l.append('</tr>')
l.append('</table>')
l.append('</center>')
return "".join(l)
def html_limbo_job_table(self, job_list):
"""Constructs an HTML table of those TLSMD jobs currently in limbo.
"""
## class QueuePage()
limbo_list = []
for jdict in job_list:
if jdict.get("state") not in ["queued",
"running",
"success",
"errors", # completed w/errors
"warnings", # completed w/warnings
"killed",
"died"]:
limbo_list.append(jdict)
if len(limbo_list) == 0:
return None
x = ''
x += '<center>'
x += '<b>Partially Submitted Jobs</b>'
x += '<table class="status_table">'
x += '<tr class="status_table_head">'
x += '<th>Job ID</th>'
x += '<th>Struct ID</th>'
x += '<th>State</th>'
x += '<th>Submission Date</th>'
x += '</tr>'
for jdict in limbo_list:
x += '<tr>'
## Return job number only (non-clickable)
job_number = re.match(r'[^_]*', jdict["job_id"])
#x += '<td>%s</td>' % (self.explore_href(jdict))
x += '<td>%s</td>' % (job_number.group(0))
x += '<td>%s</td>' % (self.rcsb_href(jdict))
x += '<td>%s</td>' % (jdict.get("state"))
x += '<td>%s</td>' % (timestring(jdict.get("submit_time")))
x += '</tr>'
x += '</table>'
x += '</center>'
return x
class ExploreJobPage(Page):
def html_page(self):
job_id = check_job_id(self.form)
if job_id is None:
title = 'TLSMD: Explore Job'
x = self.html_head(title, None)
x += html_title(title)
x += '<center><p class="perror">ERROR: Invalid Job ID</p></center>'
x += self.html_foot()
return x
title = 'TLSMD: Explore Job ID %s' % (job_id)
x = ''
x += self.html_head(title, None)
x += html_title(title)
x += html_nav_bar()
try:
x += html_job_nav_bar(job_id)
jdict = mysql.job_get_dict(job_id)
x += html_job_info_table(jdict)
except:
## NOTE: This should only happen if the job was archived for being
## older than "DELETE_DAYS" in the webtlsmdcleanup.py script.
x += '<center><p class="perror">'
x += 'ERROR: Job summary page no longer exists</p></center>'
x += self.html_foot()
return x
class AdminJobPage(Page):
def html_page(self):
job_id = check_job_id(self.form)
jdict = mysql.job_get_dict(job_id)
pdb = jdict.get('via_pdb', False)
if job_id is None:
title = 'TLSMD: View Job'
x = self.html_head(title, None)
x += html_title(title)
x += '<center><p class="perror">ERROR: Invalid Job ID</p></center>'
x += self.html_foot()
return x
title = 'TLSMD: Administrate Job %s' % (job_id)
x = ''
x += self.html_head(title, None)
x += html_title(title)
x += html_nav_bar()
if jdict.get("state") in ["killed", "died", "defunct"]:
x += html_job_nav_bar(job_id)
if self.form.has_key("submit") and self.form["submit"].value == "Remove Job":
x += self.remove(job_id)
elif self.form.has_key("submit") and self.form["submit"].value == "Signal Job":
x += self.kick(job_id)
elif self.form.has_key("submit") and self.form["submit"].value == "Kill Job":
x += self.kill(job_id)
elif self.form.has_key("submit") and self.form["submit"].value == "Requeue Job":
x += self.requeue(job_id)
else:
x += self.edit(job_id, pdb)
x += self.html_foot()
return x
def edit(self, job_id, pdb):
x = ''
## if the job is not in the "queued" state, then it is not safe to edit
state = mysql.job_get_state(job_id)
if state == "queued":
extract_job_edit_form(self.form)
## get the state dictionary for the entire job
fdict = mysql.job_get_dict(job_id)
fdict["page"] = "admin"
fdict["removebutton"] = True
if state == "queued" or state == "running":
fdict["signalbutton"] = True
fdict["killbutton"] = True
fdict["requeuebutton"] = True
if state in ["running", "success", "warnings", "errors", "died"]:
x += html_job_nav_bar(job_id)
x += html_job_info_table(fdict)
else:
x += html_job_edit_form(fdict, pdb)
return x
def remove(self, job_id):
webtlsmdd.remove_job(job_id)
x = ''
x += '<center>'
x += '<h3>Job %s has been removed.</h3>' % (job_id)
x += '</center>'
return x
def kick(self, job_id):
"""Kick PID of stuck job past current process and continue with
next step.
"""
if webtlsmdd.signal_job(job_id):
x = ''
x += '<center>'
x += '<h3>Job %s has been signaled ' % (job_id)
x += 'to kick it past the process it was stuck on.</h3>'
x += '</center>'
else:
x = ''
x += '<center>'
x += '<h3>Error: Can not signal job %s. ' % (job_id)
x += 'Might need to kill it.</h3>' % (job_id)
x += '</center>'
return x
def kill(self, job_id):
"""Kill PID of running job_id.
"""
if webtlsmdd.kill_job(job_id):
x = ''
x += '<center>'
x += '<h3>Job %s has died ' % (job_id)
x += 'or its associated pid has been manually killed.</h3>'
x += '</center>'
else:
x = ''
x += '<center>'
x += '<h3>Error: Can not remove job %s.</h3>' % (job_id)
x += '</center>'
return x
def requeue(self, job_id):
result = webtlsmdd.requeue_job(job_id)
x = ''
x += '<center>'
if result:
x += "<h3>Job %s has been pushed to the back.</h3>" % (job_id)
else:
x += "<h3>Job %s could not be requeued " % (job_id)
x += "because it is running.</h3>"
x += '</center>'
return x
class SubmissionException(Exception):
def __init__(self, err):
Exception.__init__(self)
self.err = err
def __str__(self):
return self.err
SUBMIT1_NOTE = """\
Analysis of large structures is computationally expensive, so you may have to
wait hours to days for the server to generate a complete analysis depending on
how heavily it is loaded.<br><br>
"""
class Submit1Page(Page):
def html_page(self):
title = 'TLSMD: Start a New Job'
l = [self.html_head(title, None),
html_title(title),
html_nav_bar(),
'<center>\n',
'<form enctype="multipart/form-data" action="webtlsmd.cgi" method="post">\n',
'<input type="hidden" name="page" value="submit2">\n',
'<table width="75%" class="submit_table">',
'<tr>\n',
'<th class="step_title" colspan=2>Step 1: Select a PDB file to analyze</th>',
'</tr>',
'<tr>\n',
'<td class="l">Upload local PDB File:</td>',
'<td><input name="pdbfile" size="40" type="file"></td>',
'</tr>',
'<tr>\n<td class="l">or</td></tr>\n',
'<tr>\n',
'<td class="l">Enter a PDB ID for custom analysis:</td>',
'<td><input name="pdbid" size="4" maxlength="4" type="text"></td>',
'</tr>',
'<tr>\n<td colspan="2" class="c">',
'<input value="Upload File and Proceed to Step 2, analysis options" type="submit">',
'</td></tr>',
'</table>',
'</form>\n',
## Submit from pdb.org ============================================
'<h4>OR</h4>\n',
'<form action="webtlsmd.cgi" method="post">\n',
'<input type="hidden" name="page" value="submit_pdb">\n',
'<table width="75%" class="submit_table">',
'<tr>\n<th colspan="2" class="step_title">Database of previously analyzed PDB entries</th></tr>',
'<tr>\n<td class="l">Enter a PDB ID:',
' <input name="pdbid" size="4" maxlength="4" type="text"></td><td></td></tr>',
'<tr>\n<td class="c" colspan="2"><input value="Submit PDB entry with default anaylsis"',
'type="submit"></td>',
'</tr>',
'<tr>\n<td colspan="2" class="small"><i>Uses default settings; very fast for PDB entries that have already been done, but no animated figures produced</i></td></tr>\n',
'</table>',
'</form>',
'</center>\n',
'<br><div class="warning">',
'TLSMD requires crystallographically refined B factors.',
'<br>Please do not submit NMR structures, theoretical models, ',
'<br>or any PDB file with unrefined Bs',
'</div>\n',
self.html_foot()]
return "".join(l)
class Submit2Page(Page):
def html_page(self):
title = 'TLSMD: Start a New Job'
l = [self.html_head(title, None),
html_title(title), html_nav_bar()]
run_mainchain_only = False
try:
job_id, run_mainchain_only = self.prepare_submission()
except SubmissionException, err:
l.append('<center><p class="perror">ERROR:<br>%s</p></center>' % (err))
else:
if run_mainchain_only:
l.append(self.job_edit_form(job_id, run_mainchain_only = True))
else:
l.append(self.job_edit_form(job_id, run_mainchain_only = False))
l.append(self.html_foot())
return "".join(l)
def job_edit_form(self, job_id, run_mainchain_only):
fdict = mysql.job_get_dict(job_id)
fdict["page"] = "submit3"
title = "Step 2: Fill out Submission Form, then Submit Job"
return html_job_edit_form2(fdict, title, run_mainchain_only)
def prepare_submission(self):
"""Prepares the uploaded structure by first running some sanity checks
on it.
"""
## class Submit2Page
if (self.form.has_key("pdbfile") == False or \
self.form["pdbfile"].file is None or \
self.form["pdbfile"].value <= ' '):
jobid = self.prepare_pdbid_entry()
return jobid, False
## allocate a new JobID
job_id = mysql.job_new()
## record user's IP address
ip_addr = os.environ.get("REMOTE_ADDR", "Unknown")
mysql.job_set_remote_addr(job_id, ip_addr)
## read in all of the lines in the structure file
infil = self.form["pdbfile"].file
line_list = []
while True:
ln = infil.readline()
if not ln:
break
line_list.append(ln)
## proceed no further if there were not sufficient lines in uploaded
## structure file
if len(line_list) < 10:
webtlsmdd.remove_job(job_id)
raise SubmissionException('Only Recieved %d lines of upload' % (
len(line_list)))
## basic sanity checks (for non-via-pdb.org structures)
run_mainchain_only = False
r, tmpfile = check_upload(job_id, line_list, mainchain = False)
if r != '':
## "All atoms" failed the sanity check. Let's try just the
## mainchain atoms.
r, garbage = check_upload(job_id, line_list, mainchain = True)
if r != '':
## No good. The structure failed both sanity checks.
## Can not proceed with this structure.
raise SubmissionException(str(r))
else:
run_mainchain_only = True
## TODO: Figure out how to do this without webtlsmdd, 2009-05-29
## pass the PDB file to the application server
result = webtlsmdd.set_structure_file(job_id, xmlrpclib.Binary("".join(line_list)))
if result != "":
raise SubmissionException(result)
return job_id, run_mainchain_only
def prepare_pdbid_entry(self):
"""Prepares the entered pdb id by first running some sanity checks
on it.
"""
## class Submit2Page
pdbid = self.form["pdbid"].value.upper()
if vet_pdb_id(pdbid) == False:
if pdbid is None or pdbid == "":
raise SubmissionException("No PDB file uploaded and no PDB ID given. Please try again.")
else:
raise SubmissionException("Invalid PDB ID '"+pdbid+"'. Please try again.")
## allocate a new JobID
job_id = mysql.job_new()
## record user's IP address
ip_addr = os.environ.get("REMOTE_ADDR", "Unknown")
mysql.job_set_remote_addr(job_id, ip_addr)
## Fetch and upload PDB entry by PDB ID for custom analysis
if not vet_struct_id(pdbid, 4):
raise SubmissionException("Not a valid PDB structure ID")
pdbfile_bin = webtlsmdd.fetch_pdb(pdbid)
pdbentry = pdbfile_bin.data
if len(pdbentry) == 0:
raise SubmissionException("Could not download PDB entry "+pdbid+" from RCSB.")
## Custom analysis from PDB ID: simple sanity check
## basic sanity checks
## If check_upload returns anything but a empty string, the server will
## inform the user of the problem and not proceed any further.
ln = pdbentry.split("\n")
r, garbage = check_upload(job_id, ln, mainchain = False)
if r != '':
raise SubmissionException(str(r))
result = webtlsmdd.set_structure_file(job_id, xmlrpclib.Binary(pdbentry))
if result != "":
raise SubmissionException("Failed to submit structure for PDB ID "+pdbid+": " + str(result) + "<br>Please try again.")
return job_id
SUBMIT3_CAP1 = """\
You may monitor the progress of your TLSMD submission by its Job ID
on the Job Status page, available by clicking the link on the top
of this page. All queued, running and completed jobs are listed on
the Job Status page. Through this page you may explore the output
of your job, and lookup your job by its Job ID if you have chosen to
keep your job private.
"""
class Submit3Page(Page):
def html_page(self):
try:
job_id = self.complete_submission()
except SubmissionException, err:
title = 'TLSMD: Job Submission Failed'
html = '<center><p class="perror">ERROR:<br>%s</p></center>' % (err)
else:
title = 'TLSMD: Job Submission Succeeded'
l = ['<center>',
'<table class="submit_table">',
'<tr><th class="step_title">',
'Step 3: Finished! Job successfully submitted.</th></tr>',
'<tr><td class="c">Your job ID is <B>%s</B></td></tr>' % (
job_id),
'<tr><td>%s</td></tr>' % (self.submission_summary_info(job_id)),
'<tr><td>',
'<p>Visit and bookmark your ',
'<a href="webtlsmd.cgi?page=explore&job_id=%s">Explore Job %s</a> ' % (
job_id, job_id),
'page, this page is the status page of your job, and it is ',
'updated as your job progresses through the queue. Once your ',
'job is complete, a link to the completed TLSMD analysis will appear ',
'on it.',
'</p>',
'<p>%s</p>' % (SUBMIT3_CAP1),
'</td></tr>',
'</table>',
'</center>']
html = "".join(l)
x = self.html_head(title, None)
x += html_title(title)
x += html_nav_bar()
x += html
x += self.html_foot()
return x
def complete_submission(self):
## check for submission key
if not self.form.has_key("submit"):
raise SubmissionException('Submission Error: Missing end of "submit"')
## get job_id; verify job exists
job_id = check_job_id(self.form)
if job_id is None:
raise SubmissionException('Submission Error: Could not assign job_id')
## make sure the job is in the right state to be submitted
state = mysql.job_get_state(job_id)
if state == "queued":
raise SubmissionException("Your job is already queued")
elif state == "running":
raise SubmissionException("Your job is already running")
## verify the submission IP address
## FIXME: This does not work yet, 2009-06-01
ip_addr = os.environ.get("REMOTE_ADDR", "Unknown")
#ip_addr_verify = mysql.job_get_remote_addr(job_id)
#if ip_addr != ip_addr_verify:
# raise SubmissionException('Submission IP Address Mismatch')
ip_addr_verify = ip_addr ## XXX: Temporary until above is fixed, 2009-06-05
## completely remove the job
if self.form["submit"].value == "Cancel Job Submission":
webtlsmdd.remove_job(job_id)
raise SubmissionException('You cancelled the job')
extract_job_edit_form(self.form)
## if everything with the form is okay, then change
## the job state to queued
mysql.job_set_state(job_id, "queued")
mysql.job_set_remote_addr(job_id, ip_addr_verify)
return job_id
def submission_summary_info(self, job_id):
"""Provides a summary table of the user-selected chains.
"""
## TODO: Post-sanity checks, 2009-01-08
#sanity = self.form["pdbfile"].value
chains = mysql.job_get_chain_sizes(job_id).rstrip(";")
## E.g.,
# name: CHAINA
# selected: True
# chain_id: A
# length: 39
# preview: MET ILE TYR ALA GLY
# desc: Chain A (39 Amino Acid Residues)
sum = '<table class="status_table">'
sum += '<tr class="status_table_head">'
sum += '<th>Chain<th>Analyze</th><th>Residues</th>'
#sum += '<th>Preview</th>
sum += '<th>Residue type</th>'
sum += '<th>Ignored residues/atoms</th>'
next_chain = ''
#for list in summary_data:
for c in chains.split(';'):
chid, length, selected, type = misc.parse_chains(c)
#if next_chain != list["chain_id"]:
if next_chain != chid:
sum += '</tr>'
row1 = True
next_chain = chid
if row1:
sum += '<tr class="status_table_row1">'
else:
sum += '<tr class="status_table_row2">'
row1 = not row1
## Chain id
sum += '<td class="c">%s</td>' % chid
## Analyze (i.e., chain selected by user)
if selected == "1":
sum += '<td class="c">True</td>'
else:
sum += '<td class="c">False</td>'
sum += '<td class="c">%s</td>' % length
## Preview
#sum += '<td>%s ...</td>' % list["preview"]
## Residue type
if type == "aa":
sum += '<td class="c">amino acid</td>'
elif type == "na":
sum += '<td class="c">nucleic acid</td>'
elif type == "ot":
sum += '<td class="c">other</td>'
## Ignored residues/atoms
sum += '<td class="c">none</td>'
sum += '</tr></table>'
return sum
class SubmitPDBPage(Page):
"""Handles requests submitted via a PDB ID
"""
def html_page(self):
if "pdbid" not in self.form:
raise SubmissionException("Please enter a PDB ID")
elif vet_pdb_id(self.form["pdbid"].value) == False:
raise SubmissionException("Invalid PDB ID. Please try again.")
pdbid = self.form["pdbid"].value.upper()
if not vet_struct_id(pdbid, 4):
raise SubmissionException("Not a valid PDB ID")
#if mysql.pdb_exists(pdbid) != None:
#raise SubmissionException("PDB: [%s]" % mysql.pdb_exists(pdbid)) ## DEBUG
if os.path.exists(conf.WEBTLSMDD_PDB_DIR + '/' + pdbid + '/ANALYSIS/index.html'):
return self.redirect_page(pdbid)
db_dir_head = conf.WEBTLSMDD_PDB_DIR
db_dir_tail = 'DATABASE/' + pdbid[1:3]
db_file = db_dir_head + '/' + db_dir_tail + '/'+ pdbid + '/ANALYSIS/index.html'
if os.path.exists(db_file):
return self.redirect_page_path(pdbid, db_dir_head, db_dir_tail)
pdbfile_bin = webtlsmdd.fetch_pdb(pdbid)
pdbfile = pdbfile_bin.data
if len(pdbfile) == 0:
raise SubmissionException("Could not download PDB File from RCSB.")
errors = 0
try:
mysql.set_pdb_db(pdbid)
except:
errors = 1
raise SubmissionException("Could not write to internal PDB DB")
l = []
if errors == 0:
job_id = self.prepare_submission(pdbfile)
mysql.job_set_via_pdb(job_id, "1")
mysql.job_set_jmol_view(job_id, "1")
mysql.job_set_jmol_animate(job_id, "1")
mysql.job_set_histogram(job_id, "1")
mysql.job_set_private_job(job_id, "0")
ip_addr = os.environ.get("REMOTE_ADDR", "Unknown")
mysql.job_set_remote_addr(job_id, ip_addr)
fdict = mysql.job_get_dict(job_id)
fdict["page"] = "submit3"
title = "Enter contact info:"
l = [self.html_head(title, None), html_title(title)]
l.append(html_job_edit_form(fdict, pdb=True))
l.append(self.html_foot())
return "".join(l)
def prepare_submission(self, pdbfile):
"""Run some sanity checks and if all is well, send the PDB as a
binary stream via XML-RPC to the webtlsmdd daemon.
"""
## class SubmitPDBPage
job_id = mysql.job_new()
## basic sanity checks
## If check_upload returns anything but a empty string, the server will
## inform the user of the problem and not proceed any further.
ln = pdbfile.split("\n")
r, garbage = check_upload(job_id, ln, mainchain = False)
if r != '':
raise SubmissionException(str(r))
result = webtlsmdd.set_structure_file(job_id, xmlrpclib.Binary(pdbfile))
if result != "":
raise SubmissionException("Failed to submit structure. Please try again.")
return job_id
def redirect_page(self, pdbid):
"""If a given PDB (from pdb.org) has already been analyzed, inform the
user and redirect them to the correct analysis page.
"""
## class SubmitPDBPage
## check to see if this job is still running
try:
os.chdir(conf.WEBTLSMDD_PDB_DIR + '/' + pdbid)
except OSError:
title = "This structure is currently being analyzed, please check back later."
page = [self.html_head(title),
html_title(title),
self.html_foot()]
return "".join(page)
title = "This structure has been analyzed previously"
analysis_url = "%s/pdb/%s/ANALYSIS" % (conf.TLSMD_PUBLIC_URL, pdbid)
analysis_title = "Analysis of %s" % (pdbid)
redirect = [self.html_head(title, redirect=analysis_url),
html_title(title),
'<center>',
'<br><h2>Click below to see the results:</h2>',
'<h3><a href="%s">%s</a>' % (analysis_url, analysis_title),
'<br><br>',
'<font size=-2>You will be redirected automatically in 10 seconds</font>'
'</center>'
]
redirect.append(self.html_foot())
return "".join(redirect)
def redirect_page_path(self, pdbid, path_head, path_tail):
"""If a given PDB (from pdb.org) has already been analyzed in the
TLSMD database, inform the user and redirect them to the correct
analysis page.
"""
## class SubmitPDBPage
## check to see if this job is still running
try:
os.chdir(os.path.join(path_head, path_tail, pdbid))
except OSError:
title = "This structure is currently being analyzed, please check back later."
page = [self.html_head(title),
html_title(title),
self.html_foot()]
return "".join(page)
title = "This structure has been analyzed previously"
analysis_url = "%s/pdb/%s/%s/ANALYSIS" % (conf.TLSMD_PUBLIC_URL, path_tail, pdbid)
analysis_title = "Analysis of %s" % (pdbid)
redirect = [self.html_head(title, redirect=analysis_url),
html_title(title),
'<center>',
'<br><h2>Click below to see the results:</h2>',
'<h3><a href="%s">%s</a>' % (analysis_url, analysis_title),
'<br><br>',
'<font size=-2>You will be redirected automatically in 10 seconds</font>'
'</center>'
]
redirect.append(self.html_foot())
return "".join(redirect)
def min_subsegment_stddev(atomnum, restype, resnum, chain, tfactor):
"""Calculates a running standard deviation for residue windows the same
size as whatever the global 'min_subsegment_size' in conf.py is set to.
"""
## TODO: Doesn't do anything yet, 2009-06-05
min_subsegment_size = conf.globalconf.min_subsegment_size
def running_stddev(tmpfile, atomnum, restype, resnum, chain, tfactor):
"""Calculates a running standard deviation for the average B-factors
of a given set of residues.
"""
n = atm = res_tfac = 0
avg_tfac = []
res_id = []
prevrestype = restype[0]
prevresnum = resnum[0]
prevchain = chain[0]
## Save B_{mean} per residue for each chain
fdat = open('%s/%s.dat' % (conf.WEBTMP_PATH, tmpfile),'w')
while n < len(tfactor):
if( (prevresnum == resnum[n]) and (prevrestype == restype[n]) ):
res_tfac = res_tfac + tfactor[n]
atm = atm + 1
else:
avg_tfac.append(res_tfac/atm) # store previous guy
res_id.append(resnum[n-1]) # store previous guy
## Now save values to *.dat file
fdat.write("%s\t%s\t%s\n" % (
resnum[n-1], res_tfac/atm, chain[n-1]))
res_tfac = tfactor[n]
atm = 1
prevrestype = restype[n]
prevresnum = resnum[n]
if(prevchain != chain[n]):
fdat.write("\n\n")
prevchain = chain[n]
n = n + 1
avg_tfac.append(res_tfac/atm) # store last guy
res_id.append(resnum[n-1]) # store last guy
## Save last value to *.dat file
fdat.write("%s\t%s\t%s\n" % (resnum[n-1], res_tfac/atm, chain[n-1]))
fdat.close()
## Save RMSD(B) +/-5 residues
### FIXME EAM
### Not correct, because it crosses chain boundaries
### and because the wrong value is calculated (std of mean,
### rather than the std of the atoms)
## TODO: Add chain_id to .std file, 2009-10-20
nbad = 0
fstd = open('%s/%s.std' % (conf.WEBTMP_PATH, tmpfile),'w')
for s in range(5, len(avg_tfac)-5):
stddev11 = numpy.std(avg_tfac[s-5:s+5])
fstd.write("%s\t%s\n" % (res_id[s], stddev11))
if stddev11 < conf.MIN_STDDEV_BFACT or \
stddev11 > conf.MAX_STDDEV_BFACT:
nbad = nbad + 1
if (s < len(res_id)) and (res_id[s+1] < res_id[s]):
fstd.write("\n\n")
fstd.close()
return nbad, tmpfile
_STDDEV_FOR_BAD_TFACT_TEMPLATE = """\
set style fill solid 0.15 noborder
set style data linespoints
set output '<webtmp_path>/<tmpfile>.png'
set yrange [0:*]
set ytics nomirror tc rgb 'blue'
#set y2range [0:1]
set y2label 'Å^2' norotate tc rgb 'red'
set y2tics nomirror tc rgb 'red'
set format y2 '%.1f'
set xlabel 'residue number'
set grid
set title 'Distribution of B factors in submitted structure (Å^2)'
set term png font '<gnuplot_font>' enhanced truecolor
plot '<webtmp_path>/<tmpfile>.std' using 1:($2<0.01 || $2>60.0) ? 999 : 0 axes x1y2 w filledcurve lt -1 notitle, \\
'<webtmp_path>/<tmpfile>.dat' using 1:2:(1+column(-2)) axes x1y1 with lines lc var title 'B_{mean} per residue', \\
'<webtmp_path>/<tmpfile>.std' using 1:2 axes x1y2 lt 1 pt 1 title 'RMSD(B) +/-5 residues', \\
0.05 axes x1y2 with lines lc rgb 'red' notitle
"""
def check_upload(job_id, file, mainchain = None):
"""Runs sanity checks on uploaded structure file.
"""
## NOTE:
## - Requires uploaded structures to be X-ray EXPDTA
## - Checks if the PDB file contains valid aa/na residues
## - PDB file must have at least 30 ATOMs
## - PDB file can not have lowercase alt. res. numbers
## - Checks standard deviation of temp. factors
## - Checks that not all occupancies are 0.00
## - Checks for properly formatted ATOM lines
tmpfile = None ## this is the second part of the return
atom_num = []
res_type = []
res_num = []
chain = []
temp_factors = []
bad_std = -1
num_total = 0
num_good = 0
occupancy = 0.0
ignore = 0
line_num = 0
for line in file:
line_num += 1
if line.startswith('HEADER'):
header_id = re.sub(r"^HEADER.{56}(....)", '\\1', line).strip()
## FIXME: Calls to MySQL can not be made in this def, 2009-06-16
#mysql.job_set_header_id(job_id, str(header_id))
#if line.startswith('EXPDTA NMR') or \
# line.startswith('EXPDTA SOLUTION NMR'):
# ## TODO: Might need to add "SOLID-STATE NMR", 2009-11-10
# msg = "NMR structure! "
# msg += "Please do not submit NMR structures, theoretical models, "
# msg += "or any PDB file with unrefined Bs."
# return msg
elif line.startswith('EXPDTA') and line.find('X-RAY DIFFRACTION') == -1:
msg = "Not an X-ray diffraction structure. TLSMD currently only "
msg += "performs analysis on X-ray models. Will not proceed."
return msg, tmpfile
elif re.match(r'^REMARK 2 RESOLUTION\. ([0-9\.]{1,}) ANGSTROMS.*', line):
resolution = re.sub(r'^REMARK 2 RESOLUTION\. ([0-9\.]{1,}) ANGSTROMS.*', '\\1', line).strip()
## FIXME: Calls to MySQL can not be made in this def, 2009-06-16
#mysql.job_set_resolution(job_id, resolution)
elif re.match('^ATOM.....................[0-9][a-z]', line):
## E.g., Don't allow "100b". Force it to be "100B"
example = re.sub(r'^ATOM.....................([0-9][a-z]).*', '\\1', line).strip()
msg = "Please change lowercase to uppercase for alternate "
msg += "residue numbers. (E.g., change \" %s \" to \" %s \")" % (
example, example.upper())
return msg, tmpfile
elif mainchain == True and line.startswith('ATOM') and \
const.RE_MAINCHAIN_ATOMS.match(line) and \
Library.library_is_standard_residue(line[17:20].strip()):
## Only pass mainchain atoms to the running_stddev() function
tmpfile = misc.generate_security_code()
num_total += 1
try:
int(line[7:11].strip())
int(line[23:26].strip())
float(line[56:60].strip())
float(line[60:66].strip())
except:
return "Not a proper ATOM line: <pre>%s</pre>" % line, tmpfile
if float(line[56:60].strip()) < 1.00:
## ignore occupancies < 1.00
ignore += 1
continue
else:
num_good += 1
atom_num.append(int(line[7:11].strip()))
res_type.append(line[17:20].strip())
res_num.append(int(line[23:26].strip()))
chain.append(line[21:22])
occupancy += float(line[56:60].strip())
temp_factors.append(float(line[60:66].strip()))
elif mainchain == False and line.startswith('ATOM') and (
Library.library_is_standard_residue(line[17:20].strip())):
tmpfile = job_id
num_total += 1
try:
int(line[7:11].strip())
int(line[23:26].strip())
float(line[56:60].strip())
float(line[60:66].strip())
except:
return "Not a proper ATOM line: <pre>%s</pre>" % line, tmpfile
if float(line[56:60].strip()) < 1.00:
## ignore occupancies < 1.00
ignore += 1
continue
else:
num_good += 1
atom_num.append(int(line[7:11].strip()))
res_type.append(line[17:20].strip())
res_num.append(int(line[23:26].strip()))
chain.append(line[21:22])
occupancy += float(line[56:60].strip())
temp_factors.append(float(line[60:66].strip()))
else:
continue
#return "Number of atoms: %s (%s) (%s)" % (num_total, len(temp_factors), num_good)
## TODO: Add check for ANISOU that are pure ISOT, 2010-03-23
## FIXME: This does not work yet.
#if(ignore == num_total):
# return "All occupancies are less than 1.0, so all atoms will be ignored. Nothing to do."
msg = "Not a PDB structure or has unrecognized residue names."
if mainchain and num_good < 5:
return msg, tmpfile
elif not mainchain and num_good < 30:
return msg, tmpfile
if(occupancy / num_good == 0.0):
return "All occupancies are 0.0. TLSMD won't run on this structure.", tmpfile
bad_std, tmpfile = running_stddev(tmpfile, atom_num, res_type, res_num,
chain, temp_factors)
if bad_std > 0:
## If there are a string of "bad" B-factors, return a plot showing the
## "bad" regions and do not proceed any further in the analysis.
f = open('%s/%s.gnu' % (conf.WEBTMP_PATH, tmpfile), 'w')
## modify script template
script = _STDDEV_FOR_BAD_TFACT_TEMPLATE
script = script.replace("<webtmp_path>", conf.WEBTMP_PATH)
script = script.replace("<tmpfile>", tmpfile)
script = script.replace("<gnuplot_font>", conf.GNUPLOT_FONT)
#script = script.replace("<min_stddev_bfact>", conf.MIN_STDDEV_BFACT)
#script = script.replace("<max_stddev_bfact>", conf.MAX_STDDEV_BFACT)
f.write(script)
f.close()
subprocess.Popen([r"%s" % conf.GNUPLOT, "%s/%s.gnu" % (
conf.WEBTMP_PATH, tmpfile)]).wait()
return_string = "Standard deviation of temperature factors is less "
return_string += "than %s or greater than %s for those residues in " % (
conf.MIN_STDDEV_BFACT, conf.MAX_STDDEV_BFACT)
return_string += "the shaded regions below:<br>"
return_string += "<center><img src='%s/%s.png'/></center>" % (
conf.WEBTMP_URL, tmpfile)
return_string += "<br><h3>NOTE: Your structure was run through a "
return_string += "sanity check twice: (1) using all atoms in your "
return_string += "structure; and (2) using only the mainchain atoms "
return_string += "({N,CA,C,O,CB} or {P,O5*,C5*,C4*,C3*,O3*}). "
return_string += "Both sanity checks failed.</h3>"
return return_string, tmpfile
return '', tmpfile
def main():
page = None
form = cgi.FieldStorage()
if form.has_key("page"):
if form["page"].value == "explore":
page = ExploreJobPage(form)
elif form["page"].value == "admin":
page = AdminJobPage(form)
elif form["page"].value == "submit1":
page = Submit1Page(form)
elif form["page"].value == "submit2":
page = Submit2Page(form)
elif form["page"].value == "submit3":
page = Submit3Page(form)
elif form["page"].value == "submit_pdb":
page = SubmitPDBPage(form)
if page is None:
page = QueuePage(form)
try:
print page.html_page()
except xmlrpclib.Fault, fault:
fault_html = "xmlrpclib.Fault:<br>"
fault_html += "fault code: %s<br>fault string: %s" % (
fault.faultCode, fault.faultString.replace("\n","<br>"))
page = ErrorPage(form, fault_html)
print page.html_page()
except socket.error, err:
page = ErrorPage(form, "socket.error: " + str(err))
print page.html_page()
except SubmissionException, err:
page = ErrorPage(form, str(err))
print page.html_page()
if __name__=="__main__":
main()
sys.exit(0)
```
#### File: src/tlsmd/run_tlsmd_bot.py
```python
import os
import sys
import time
import socket
import string
import random
import math
import numpy
import re
import xmlrpclib
import subprocess
## Pymmlib
from mmLib import Library ## checks if is_{amino,nucleic}_acid()
## TLSMD
from tlsmdlib import conf, console, const, misc, mysql_support
## GLOBALS
webtlsmdd = xmlrpclib.ServerProxy(conf.WEBTLSMDD)
mysql = mysql_support.MySQLConnect()
## JOB SELECT (example):
# mysql -B -N -e 'select id from pdb.remarks where tlsmd IS NULL order by rand() limit 1;'
# for i in `mysql -B -N -e 'select pdb_id from tlsmddb.via_pdb;'`; do mysql -e "UPDATE pdb.remarks SET tlsmd='1' WHERE id='$i';"; done
def timestring(secs):
tm_struct = time.localtime(secs)
return time.strftime("%Y-%m-%d %H:%M %Z", tm_struct)
def secdiffstring(secs):
secs = int(secs)
hours = secs / 3600
secs = secs - (hours * 3600)
min = secs / 60
secs = secs - (min * 60)
x = "%1d:%2d.%2d" % (hours, min, secs)
return x.replace(" ", "0")
def timediffstring(begin, end):
secs = int(end - begin)
return secdiffstring(secs)
def left_justify_string(keyword, value):
"""Returns a string with dotted separation.
"""
return '%s' % keyword .ljust(40, ".") + ": " + '%s\n' % value
def check_job_id(form):
"""Retrieves and confirms the job_id from a incomming form. Returns
None on error, or the job_id on success.
"""
if form.has_key("job_id"):
job_id = form["job_id"].value
if len(job_id) < conf.MAX_JOB_ID_LEN:
if job_id.startswith("TLSMD"):
if mysql.job_exists(job_id):
return job_id
return None
def vet_struct_id(data, max_len):
if isinstance(data, unicode):
return False
if len(data) > max_len:
return False
if not data.isalnum():
return False
return True
def start_job(pdbid):
pdbid = pdbid.upper()
if mysql.pdb_exists(pdbid) != None:
return "PDB: %s was already run" % pdbid
pdbfile_bin = webtlsmdd.fetch_pdb(pdbid)
pdbfile = pdbfile_bin.data
if len(pdbfile) == 0:
return "FAILED: Could not download PDB %s from RCSB." % pdbid
job_id = prepare_submission(pdbfile)
try:
mysql.set_pdb_db(pdbid)
except:
return "ERROR: Could not write to internal PDB DB"
mysql.job_set_via_pdb(job_id, "1")
mysql.job_set_jmol_view(job_id, "0")
mysql.job_set_jmol_animate(job_id, "0")
mysql.job_set_histogram(job_id, "0")
mysql.job_set_private_job(job_id, "0")
ip_addr = os.environ.get("REMOTE_ADDR", "Unknown")
mysql.job_set_remote_addr(job_id, ip_addr)
mysql.job_set_state(job_id, "queued")
return "NOTE: Starting PDB %s with job_id %s" % (pdbid, job_id)
def prepare_submission(pdbfile):
"""class SubmitPDBPage
"""
job_id = mysql.job_new()
## basic sanity checks
## If check_upload returns anything but a empty string, the server will
## inform the user of the problem and not proceed any further.
ln = pdbfile.split("\n")
r = check_upload(job_id, ln)
if r != '':
console.stdoutln("WARNING: %s" % str(r))
sys.exit(0)
result = webtlsmdd.set_structure_file(job_id, xmlrpclib.Binary(pdbfile))
if result != "":
console.stdoutln("ERROR: Failed to submit structure. %s. Please try again." % result)
sys.exit(0)
return job_id
def redirect_page(self, pdbid):
return ""
def running_stddev(atomnum, restype, resnum, chain, tfactor):
"""Calculates a running standard deviation for the average B-factors
of a given set of residues (controlled by the 'window' variable).
"""
tmpfile = misc.generate_security_code()
n = atm = res_tfac = 0
avg_tfac = []
res_id = []
prevrestype = restype[0]
prevresnum = resnum[0]
prevchain = chain[0]
## Save B_{mean} per residue for each chain
while n < len(tfactor):
if( (prevresnum == resnum[n]) and (prevrestype == restype[n]) ):
res_tfac = res_tfac + tfactor[n]
atm = atm + 1
else:
avg_tfac.append(res_tfac/atm) # store previous guy
res_id.append(resnum[n-1]) # store previous guy
res_tfac = tfactor[n]
atm = 1
prevrestype = restype[n]
prevresnum = resnum[n]
if(prevchain != chain[n]):
prevchain = chain[n]
n = n + 1
avg_tfac.append(res_tfac/atm) # store last guy
res_id.append(resnum[n-1]) # store last guy
## Save RMSD(B) +/-5 residues
## FIXME EAM
## Not correct, because it crosses chain boundaries and because the wrong
## value is calculated (std of mean, rather than the std of the atoms)
nbad = 0
for s in range(5, len(avg_tfac)-5):
stddev11 = numpy.std(avg_tfac[s-5:s+5])
if stddev11 < conf.MIN_STDDEV_BFACT or stddev11 > conf.MAX_STDDEV_BFACT:
nbad = nbad + 1
return nbad, tmpfile
def check_upload(job_id, file):
"""Runs sanity checks on uploaded file
"""
## Checks if PDB contains valids aa/na residues
## PDB must have at least 30 ATOMs
## PDB can not have lowercase alt. res. numbers
## Check Standard deviation of temp. factors
## Check that not all occupancies are 0.00
atom_num = []
res_type = []
res_num = []
chain = []
temp_factors = []
bad_std = -1
num_total = 0
num_good = 0
occupancy = 0.0
ignore = 0
line_num = 0
for line in file:
line_num += 1
if line.startswith('HEADER'):
header_id = re.sub(r"^HEADER.{56}(....)", '\\1', line).strip()
elif line.startswith('EXPDTA NMR'):
return "NMR structure! Skipping: %s [%s]" % (job_id, header_id)
elif re.match(r'^REMARK 2 RESOLUTION\. ([0-9\.]{1,}) ANGSTROMS.*', line):
resolution = re.sub(r'^REMARK 2 RESOLUTION\. ([0-9\.]{1,}) ANGSTROMS.*', '\\1', line).strip()
elif re.match('^ATOM.....................[0-9][a-z]', line):
## E.g., Don't allow "100b". Force it to be "100B"
return "Lowercase alternate residue names: %s [%s]" % (job_id, header_id)
elif line.startswith('ATOM') and (
Library.library_is_standard_residue(line[17:20].strip())):
num_total += 1
if float(line[56:60].strip()) < 1.00:
## ignore occupancies < 1.00
ignore += 1
continue
else:
num_good += 1
atom_num.append(int(line[7:11].strip()))
res_type.append(line[17:20].strip())
res_num.append(int(line[23:26].strip()))
chain.append(line[21:22])
occupancy += float(line[56:60].strip())
temp_factors.append(float(line[60:65].strip()))
else:
continue
if(len(atom_num) < 30):
return "Not a PDB structure or has unrecognized residue names: %s [%s]" % (
job_id, header_id)
if(occupancy / num_good == 0.0):
return "All occupancies are 0.0. TLSMD won't run on this structure: %s [%s]" % (
job_id, header_id)
bad_std, tmpfile = running_stddev(atom_num, res_type, res_num, chain, temp_factors)
if bad_std > 0:
## If there are a string of "bad" B-factors, return a plot showing the
## "bad" regions and do not proceed any further in the analysis.
return_string = "STDDEV %s > Bfact < %s for job_id: %s [%s]" % (
conf.MAX_STDDEV_BFACT, conf.MIN_STDDEV_BFACT, job_id, header_id)
return return_string
return ''
def main():
try:
pdbid = sys.argv[1]
except IndexError:
sys.exit(1)
r = start_job(pdbid.upper())
console.stdoutln("%s" % r)
if __name__=="__main__":
main()
sys.exit(0)
```
#### File: src/tlsmd/webtlsmdd.py
```python
import os
import sys
import shutil
import time
import string
import random
import traceback
## NOTE: The order of these signals is important!
from signal import SIG_IGN ## Needed for daemon_main()
from signal import SIGUSR1 ## Needed for SignalJob()
from signal import SIGHUP ## Needed for KillJob()
import signal
import cPickle
#import bsddb
import socket
import xmlrpclib
import SocketServer
import SimpleXMLRPCServer
import urllib
import gzip ## for fetching PDBs from pdb.org
import StringIO
## pymmlib
from mmLib import FileIO
## TLSMD
from tlsmdlib import conf, const, tls_calcs, email, misc, mysql_support
mysql = mysql_support.MySQLConnect()
def fatal(text):
sys.stderr.write("[FATAL ERROR] %s\n" % (text))
raise SystemExit
def SetStructureFile(webtlsmdd, job_id, struct_bin):
"""Creates job directory, saves structure file to the job directory,
and sets all jdict defaults.
"""
if not mysql.job_exists(job_id):
return False
try:
os.chdir(conf.TLSMD_WORK_DIR)
except OSError:
return "Unable to change to conf.TLSMD_WORK_DIR = '%s'" % (
conf.TLSMD_WORK_DIR)
## NOTE: This is the first place the webserver creates the job directory
try:
os.mkdir(job_id)
except OSError:
return "Unable to make job directory %s" % (job_id)
job_dir = os.path.join(conf.TLSMD_WORK_DIR, job_id)
os.chdir(job_dir)
try:
os.mkdir("ANALYSIS")
except OSError:
return "Unable to make ANALYSIS sub-directory for job_id: %s" % (
job_id)
## Copy sanity.png from "All atoms" sanity check (in tlsmdlib/webtlsmd.py)
## to job_dir
try:
src_png_file = "%s/%s.png" % (conf.WEBTMP_PATH, job_id)
dst_png_file = "%s/%s/sanity.png" % (conf.TLSMD_WORK_DIR, job_id)
if os.path.exists(src_png_file):
shutil.copy(src_png_file, dst_png_file)
except OSError:
return "Unable to copy sanity.png for job_id: %s" % job_id
#mysql.job_set_id(job_id)
#mysql.job_set_header_id(job_id, "test") ## DEBUG
## save PDB file
pdb_filename = conf.PDB_FILENAME
filobj = open(pdb_filename, "w")
filobj.write(struct_bin.data)
filobj.close()
## Generate summary/thumb 'struct.png' image
if conf.THUMBNAIL:
misc.render_struct(job_dir)
## Generate 'struct.r3d' for Raster3D
if conf.GEN_RAW_GREY:
misc.generate_raw_grey_struct(job_dir)
## set basic properties of the job
job_url = "%s/%s" % (conf.TLSMD_WORK_URL, job_id)
log_url = "%s/log.txt" % (job_url)
log_file = "%s/log.txt" % (job_dir)
if not os.path.exists(log_file):
open(log_file, 'w').close() ## touch log.txt
#tarball_url = "%s/%s.tar.gz" % (job_url, job_id)
analysis_dir = "%s/ANALYSIS" % (job_dir)
analysis_base_url = "%s/ANALYSIS" % (job_url)
analysis_url = "%s/ANALYSIS/index.html" % (job_url)
## TODO: Add version to MySQL status_page table
#mysql.job_set_version(job_id, const.VERSION)
## submission time and initial state
submit_time = time.time()
mysql.job_set_state(job_id, "submit1")
mysql.job_set_submit_time(job_id, submit_time)
## This is for internal use only
tm_struct = time.localtime(submit_time)
submit_date = time.strftime("%Y-%m-%d %H:%M:%S", tm_struct)
mysql.job_set_submit_date(job_id, submit_date)
## now load the structure and build the submission form
try:
struct = FileIO.LoadStructure(fil = pdb_filename)
except:
return "The Python Macromolecular Library was unable to load your structure file."
if not struct.structure_id:
struct.structure_id = "XXXX"
mysql.job_set_structure_id(job_id, struct.structure_id)
## Select Chains for Analysis
num_atoms = 0
num_aniso_atoms = 0
largest_chain_seen = 0
chain_descriptions = ""
chains = []
for chain in struct.iter_chains():
naa = chain.count_amino_acids()
nna = chain.count_nucleic_acids()
ota = chain.count_fragments()
num_frags = 0
## minimum number of residues (amino/nucleic) per chain
## TODO: Does this work better? 2009-07-24
if naa > 0:
if naa < conf.MIN_AMINO_PER_CHAIN:
continue
num_frags = naa
largest_chain_seen = max(naa, largest_chain_seen)
elif nna > 0:
if nna < conf.MIN_NUCLEIC_PER_CHAIN:
continue
num_frags = nna
largest_chain_seen = max(nna, largest_chain_seen)
elif naa == 0 and nna == 0:
## The chain has neither amino or nucleic acid atoms, so assign
## num_frags = ota -> "other atom" types
num_frags = ota
## this chain has nucleic acids in it, so generate r3d file for
## just the sugars
misc.generate_bases_r3d(job_dir, chain.chain_id)
misc.generate_sugars_r3d(job_dir, chain.chain_id)
## TODO: Allow for MIN_NUCLEIC_PER_CHAIN and MIN_AMINO_PER_CHAIN diffs, 2009-07-19
## TODO: Record ignored chains (because too small) in logfile, 2009-07-19
#if num_frags < conf.MIN_RESIDUES_PER_CHAIN:
#if naa < conf.MIN_AMINO_PER_CHAIN or nna < conf.MIN_NUCLEIC_PER_CHAIN:
#if (naa > 0 and naa < conf.MIN_AMINO_PER_CHAIN) or\
# (nna > 0 and nna < conf.MIN_NUCLEIC_PER_CHAIN):
# #log_file = open(log_file, 'w+')
# #log_file.write("Ignoring chain %s; too small" % chain.chain_id)
# #log_file.close()
# continue
## create chain description labels
## E.g., chains_descriptions = "A:10:0:aa;B:20:1:na;C:30:0:na;"
chain_descriptions = chain_descriptions + chain.chain_id + ":"
if naa > 0:
chain_descriptions = chain_descriptions + str(num_frags) + ":1:aa;"
elif nna > 0:
chain_descriptions = chain_descriptions + str(num_frags) + ":1:na;"
else:
chain_descriptions = chain_descriptions + str(num_frags) + ":0:ot;"
for atm in chain.iter_all_atoms():
num_atoms += 1
if atm.U is not None:
num_aniso_atoms += 1
if num_atoms < 1:
webtlsmdd.remove_job(job_id)
return 'Your submitted structure contained no atoms'
if largest_chain_seen > conf.LARGEST_CHAIN_ALLOWED:
webtlsmdd.remove_job(job_id)
return 'Your submitted structure contained a chain exceeding the %s residue limit' % (
conf.LARGEST_CHAIN_ALLOWED)
mysql.job_set_chain_sizes(job_id, chain_descriptions)
## set defaults
mysql.job_set_user_name(job_id, "")
mysql.job_set_email(job_id, "")
mysql.job_set_user_comment(job_id, conf.globalconf.user_comment)
mysql.job_set_plot_format(job_id, "PNG")
mysql.job_set_nparts(job_id, conf.globalconf.nparts)
mysql.job_set_via_pdb(job_id, "0")
mysql.job_set_private_job(job_id, "0")
mysql.job_set_jmol_view(job_id, "0")
mysql.job_set_jmol_animate(job_id, "0")
mysql.job_set_histogram(job_id, "0")
mysql.job_set_cross_chain_analysis(job_id, "0")
if conf.PRIVATE_JOBS:
mysql.job_set_private_job(job_id, "1")
if conf.globalconf.generate_jmol_view:
mysql.job_set_jmol_view(job_id, "1")
if conf.globalconf.generate_jmol_animate:
mysql.job_set_jmol_animate(job_id, "1")
if conf.globalconf.generate_histogram:
mysql.job_set_histogram(job_id, "1")
if conf.globalconf.cross_chain_analysis:
mysql.job_set_cross_chain_analysis(job_id, "1")
try:
aniso_ratio = float(num_aniso_atoms) / float(num_atoms)
except ZeroDivisionError:
return 'Your submitted structure contained no atoms'
if aniso_ratio > conf.ANISO_RATIO:
mysql.job_set_tls_model(job_id, "ANISO")
else:
mysql.job_set_tls_model(job_id, "ISOT")
mysql.job_set_weight_model(job_id, "NONE")
mysql.job_set_include_atoms(job_id, "ALL")
return ""
def RequeueJob(webtlsmdd, job_id):
"""Pushes job to the end of the list.
"""
## FIXME: This will no longer work! The BerkeleyDB code has been removed
## and now we must use MySQL, 2009-06-29
if mysql.job_get_state(job_id) == 'running':
return False
else:
return False ## temp. until fixed, 2009-07-01
#gdict = webtlsmdd.jobdb.retrieve_globals()
#job_num = gdict['next_job_num']
#gdict['next_job_num'] = job_num + 1
#webtlsmdd.jobdb.store_globals(gdict)
#webtlsmdd.jobdb.job_data_set(job_id, 'job_num', job_num)
#return True
def RemoveJob(webtlsmdd, job_id):
"""Removes the job from both the database and working directory.
If job is still running when this function is called, it will first call
KillJob(), then remove the associated data and files.
"""
if not mysql.job_exists(job_id):
return False
try:
job_dir = os.path.join(conf.TLSMD_WORK_DIR, job_id)
shutil.rmtree(job_dir)
except:
return False
## TODO: Also delete data in 'pdb_list' and 'via_pdb' tables, _if_ they
## were submitted via pdb.org. 2010-04-01
mysql.delete_jdict(job_id)
return True
def SignalJob(webtlsmdd, job_id):
"""Causes a job stuck on a certain task to skip that step and move on to
the next step. It will eventually have a state "warnings".
"""
## FIXME: Doesn't seem to work, 2009-06-12
if not mysql.job_exists(job_id):
return False
job_dir = os.path.join(conf.TLSMD_WORK_DIR, job_id)
if job_dir and os.path.isdir(job_dir):
try:
pid = int(mysql.job_get_pid(job_id))
except:
return False
try:
## Send signal SIGUSR1 and try to continue to job process.
os.kill(pid, SIGUSR1)
os.waitpid()
except:
return False
return True
def KillJob(webtlsmdd, job_id):
"""Kills jobs in state "running" by pid and moves them to the
"Completed Jobs" section as "killed" state.
"""
## FIXME: We want to keep the job_id around in order to inform the user
## that their job has been "killed", 2009-05-29
if not mysql.job_exists(job_id):
return False
job_dir = os.path.join(conf.TLSMD_WORK_DIR, job_id)
if job_dir and os.path.isdir(job_dir):
try:
if mysql.job_get_pid(job_id) == None:
sys.stderr.write("[WARNING]: Could not find job pid in database for job_id: %s\n" % job_id)
return False
else:
pid = int(mysql.job_get_pid(job_id))
sys.stderr.write("[NOTE]: Found pid='%s' for job_id: %s\n" % (pid, job_id))
except:
sys.stderr.write("[ERROR]: Could not connect to database for job_id: %s\n" % job_id)
return False
try:
os.kill(pid, SIGHUP)
os.waitpid()
sys.stderr.write("[NOTE]: Killing pid for job_id: %s\n" % job_id)
except:
sys.stderr.write("[ERROR]: Could not kill pid for job_id: %s\n" % job_id)
return False
return True
def Refmac5RefinementPrep(job_id, struct_id, chain_ntls, wilson):
"""Called with a list of tuples (job_id, struct_id, [chain_id, ntls], wilson).
Generates PDB and TLSIN files for refinement with REFMAC5 + PHENIX.
Returns a single string if there is an error, otherwise a
dictionary of results is returned.
"""
try:
struct_id = mysql.job_get_structure_id(job_id)
except:
return "Could not find the directory related to job_id: %s" % job_id
if mysql.job_get_via_pdb(job_id) == 1:
## If a job was submitted via pdb.org, the results/analyses files are
## in a different directory/path and so does the URL.
pdb_id = struct_id
job_dir = os.path.join(conf.WEBTLSMDD_PDB_DIR, pdb_id)
job_url = os.path.join(conf.TLSMD_PUBLIC_URL, "pdb", pdb_id)
analysis_dir = os.path.join(job_dir, "ANALYSIS")
analysis_base_url = "%s/ANALYSIS" % (job_url)
else:
## User-submitted (non-pdb.org) results/analyses files are in the
## standard place (aka directory/path/url) and are deleted every
## DELETE_DAYS (see webtlsmdcleanup.py) days.
job_dir = os.path.join(conf.TLSMD_WORK_DIR, job_id)
job_url = os.path.join(conf.TLSMD_PUBLIC_URL, "jobs", job_id)
analysis_dir = os.path.join(job_dir, "ANALYSIS")
analysis_base_url = "%s/ANALYSIS" % (job_url)
if not os.path.isdir(analysis_dir):
return "Job analysis directory does not exist: %s" % analysis_dir
old_dir = os.getcwd()
os.chdir(analysis_dir)
## input structure
pdbin = "%s.pdb" % (struct_id)
if not os.path.isfile(pdbin):
pdbin = None
for pdbx in glob.glob("*.pdb"):
if len(pdbx) == 8:
struct_id = pdbx[:4]
pdbin = pdbx
break
if pdbin is None:
os.chdir(old_dir)
return "Input PDB File %s Not Found" % (pdbin)
## the per-chain TLSOUT files from TLSMD must be merged
tlsins = []
for chain_id, ntls in chain_ntls:
tlsin = "%s_CHAIN%s_NTLS%d.tlsout" % (struct_id, chain_id, ntls)
if not os.path.isfile(tlsin):
os.chdir(old_dir)
return "Input TLSIN File %s Not Found" % (tlsin)
tlsins.append(tlsin)
job_num = job_id.split("_")[0]
secure_dir = job_id.split("_")[1]
if not os.path.exists(secure_dir):
os.mkdir(secure_dir)
## form unique pdbout/tlsout filenames from job_id
pdbout1 = "%s/%s_TLS+Biso.pdb" % (secure_dir, job_num)
pdbout2 = "%s/%s_pureTLS.pdb" % (secure_dir, job_num)
## the tlsout from this program is going to be the tlsin
## for refinement, so it's important for the filename to have
## the tlsin extension so the user is not confused
tlsout1 = "%s/%s_TLS+Biso.tlsin" % (secure_dir, job_num)
tlsout2 = "%s/%s_pureTLS.tlsin" % (secure_dir, job_num)
phenix = "%s/%s.phenix" % (secure_dir, job_num)
## make urls for linking
pdbout_url1 = "%s/%s" % (analysis_base_url, pdbout1)
pdbout_url2 = "%s/%s" % (analysis_base_url, pdbout2)
tlsout_url1 = "%s/%s" % (analysis_base_url, tlsout1)
tlsout_url2 = "%s/%s" % (analysis_base_url, tlsout2)
phenix_url = "%s/%s" % (analysis_base_url, phenix)
## create the REFMAC/PHENIX files
tls_calcs.refmac5_prep(pdbin, tlsins, pdbout1, tlsout1)
tls_calcs.phenix_prep(pdbin, tlsins, phenix)
tls_calcs.refmac_pure_tls_prep(pdbin, tlsins, wilson, pdbout2, tlsout2)
os.chdir(old_dir)
return dict(pdbout1 = pdbout1,
pdbout_url1 = pdbout_url1,
pdbout2 = pdbout2,
pdbout_url2 = pdbout_url2,
tlsout1 = tlsout1,
tlsout_url1 = tlsout_url1,
tlsout2 = tlsout2,
tlsout_url2 = tlsout_url2,
phenix = phenix,
phenix_url = phenix_url)
class WebTLSMDDaemon():
def __init__(self):
self.jobdb = None
def set_structure_file(self, job_id, struct_bin):
"""Creates job directory, saves structure file to the job directory,
and sets all jdict defaults.
"""
return SetStructureFile(self, job_id, struct_bin)
def remove_job(self, job_id):
"""Removes the job from both the database and working directory.
If job is still running when this function is called, it will first call
KillJob(), then remove the associated data and files.
"""
try:
KillJob(self, job_id)
except:
pass
return RemoveJob(self, job_id)
def delete_job(self, job_id):
"""Removes/Deletes the job from both the database and working directory.
Note that this will only be called for jobs that are no longer running.
"""
return RemoveJob(self, job_id)
def signal_job(self, job_id):
"""Signals a job stuck on a certain task to skip that step and move on
to the next step. It will eventually have a state 'warnings'.
"""
return SignalJob(self, job_id)
def kill_job(self, job_id):
"""Kills jobs in state 'running' by pid and moves them to the
'Completed Jobs' section as 'killed' state.
"""
return KillJob(self, job_id)
def requeue_job(self, job_id):
"""Pushes the job to the back of the queue.
"""
return RequeueJob(self, job_id)
def refmac5_refinement_prep(self, job_id, struct_id, chain_ntls, wilson):
"""Called with a list of tuples (job_id, struct_id, [chain_id, ntls], wilson).
Generates PDB and TLSIN files for refinement with REFMAC5 + PHENIX.
Returns a single string if there is an error, otherwise a
dictionary of results is returned.
"""
return Refmac5RefinementPrep(job_id, struct_id, chain_ntls, wilson)
def fetch_pdb(self, pdbid):
"""Retrieves the PDB file from RCSB.
"""
try:
cdata = urllib.urlopen("%s/%s.pdb.gz" % (conf.GET_PDB_URL,pdbid)).read()
sys.stdout.write("FOUND PDB: %s" % pdbid)
data = gzip.GzipFile(fileobj = StringIO.StringIO(cdata)).read()
except IOError:
return xmlrpclib.Binary("")
return xmlrpclib.Binary(data)
class WebTLSMD_XMLRPCRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
"""Override the standard XMLRPC request handler to open the database before
calling the method.
"""
## TODO: Can this be removed? 2009-06-01
def handle(self):
#self.server.webtlsmdd.jobdb = JobDatabase(self.server.webtlsmdd.db_file)
return SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.handle(self)
class WebTLSMD_XMLRPCServer(
SocketServer.ForkingMixIn,
SimpleXMLRPCServer.SimpleXMLRPCServer):
"""Use customized XMLRPC server which forks for requests and uses the
customized request handler.
"""
def __init__(self, host_port):
SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(
self,
host_port,
WebTLSMD_XMLRPCRequestHandler,
False)
def daemon_main():
rtype, baseurl, port = conf.WEBTLSMDD.split(":")
host_port = ("localhost", int(port))
sys.stdout.write("STARTING webtlsmdd.py DAEMON..................: %s\n" % misc.timestamp())
sys.stdout.write("webtlsmdd.py xmlrpc server version............: %s\n" % (const.VERSION))
sys.stdout.write("listening for incoming connections at URL.....: %s\n" % (conf.WEBTLSMDD))
sys.stdout.write("job (working) directory.......................: %s\n" % (conf.TLSMD_WORK_DIR))
os.chdir(conf.TLSMD_WORK_DIR)
## Switched from handle_SIGCHLD to SIG_IGN. <NAME>, 2008-03-10
signal.signal(signal.SIGCHLD, SIG_IGN)
webtlsmdd = WebTLSMDDaemon()
try:
xmlrpc_server = WebTLSMD_XMLRPCServer(host_port)
except socket.error:
sys.stderr.write("[ERROR] unable to bind to host,port: %s\n" % (str(host_port)))
raise SystemExit
xmlrpc_server.webtlsmdd = webtlsmdd
xmlrpc_server.register_instance(webtlsmdd)
xmlrpc_server.serve_forever()
def main():
try:
daemon_main()
except:
email.SendTracebackEmail("webtlsmdd.py exception")
raise
def inspect():
mysql = mysql_support.MySQLConnect()
if sys.argv[1] == "list":
for dbkey in mysql.job_list():
print dbkey["jobID"], dbkey["job_id"], dbkey["submit_date"]
## FIXME: This does not work yet. 2010-07-02
if sys.argv[1] == "remove":
print "This option does not work yet."
def usage():
print "webtlsmdd.py [list | remove] args..."
if __name__=="__main__":
if len(sys.argv) == 1:
try:
main()
except KeyboardInterrupt:
raise SystemExit
else:
inspect()
sys.exit(0)
``` |
{
"source": "jpic/virt-lightning",
"score": 2
} |
#### File: virt-lightning/virt_lightning/configuration.py
```python
import configparser
from abc import ABCMeta, abstractproperty
from pathlib import PosixPath
DEFAULT_CONFIGFILE = PosixPath("~/.config/virt-lightning/config.ini")
DEFAULT_CONFIGURATION = {
"main": {
"libvirt_uri": "qemu:///system",
"root_password": "<PASSWORD>",
"storage_pool": "virt-lightning",
"network_name": "virt-lightning",
"network_cidr": "192.168.123.0/24",
"network_auto_clean_up": True,
"ssh_key_file": "~/.ssh/id_rsa.pub",
"private_hub": "",
}
}
class AbstractConfiguration(metaclass=ABCMeta):
@abstractproperty
def libvirt_uri(self):
pass
@abstractproperty
def network_name(self):
pass
@abstractproperty
def network_cidr(self):
pass
@abstractproperty
def network_auto_clean_up(self):
pass
@abstractproperty
def root_password(self):
pass
@abstractproperty
def ssh_key_file(self):
pass
@abstractproperty
def storage_pool(self):
pass
def __repr__(self):
return "Configuration(libvirt_uri={uri}, username={username})".format(
uri=self.libvirt_uri, username=self.username
)
class Configuration(AbstractConfiguration):
def __init__(self):
self.data = configparser.ConfigParser()
self.data["main"] = DEFAULT_CONFIGURATION["main"]
if DEFAULT_CONFIGFILE.expanduser().exists():
self.load_file(DEFAULT_CONFIGFILE.expanduser())
def __get(self, key):
return self.data.get("main", key)
@property
def libvirt_uri(self):
return self.__get("libvirt_uri")
@property
def network_name(self):
return self.__get("network_name")
@property
def network_cidr(self):
return self.__get("network_cidr")
@property
def network_auto_clean_up(self):
return self.__get("network_auto_clean_up")
@property
def root_password(self):
return self.__get("root_password")
@property
def ssh_key_file(self):
return self.__get("ssh_key_file")
@property
def storage_pool(self):
return self.__get("storage_pool")
@property
def private_hub(self):
return [x for x in self.__get("private_hub").split(",") if x != ""]
def load_file(self, config_file):
self.data.read_string(config_file.read_text())
``` |
{
"source": "jpic/wtforms-alchemy",
"score": 2
} |
#### File: wtforms-alchemy/tests/test_validators.py
```python
from datetime import datetime, time
import sqlalchemy as sa
from sqlalchemy_utils import EmailType
from wtforms.validators import (
DataRequired,
Email,
InputRequired,
Length,
NumberRange,
Optional
)
from wtforms_components import DateRange, TimeRange
from tests import ModelFormTestCase
from wtforms_alchemy import ClassMap, ModelForm, Unique
class TestAutoAssignedValidators(ModelFormTestCase):
def test_auto_assigns_length_validators(self):
self.init()
self.assert_max_length('test_column', 255)
def test_assigns_validators_from_info_field(self):
self.init(info={'validators': Email()})
self.assert_has_validator('test_column', Email)
def test_assigns_unique_validator_for_unique_fields(self):
self.init(unique=True)
self.assert_has_validator('test_column', Unique)
def test_assigns_non_nullable_fields_as_required(self):
self.init(nullable=False)
self.assert_has_validator('test_column', DataRequired)
self.assert_has_validator('test_column', InputRequired)
def test_type_level_not_nullable_validators(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
test_column = sa.Column(sa.Unicode(255), nullable=False)
validator = DataRequired()
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
not_null_validator_type_map = ClassMap()
not_null_validator = validator
form = ModelTestForm()
assert validator in form.test_column.validators
def test_not_nullable_validator_with_type_decorator(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
test_column = sa.Column(EmailType, nullable=False)
validator = DataRequired()
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
not_null_validator_type_map = ClassMap(
[(sa.String, validator)]
)
not_null_validator = []
form = ModelTestForm()
assert validator in form.test_column.validators
def test_not_null_validator_as_empty_list(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
test_column = sa.Column(sa.Boolean, nullable=False)
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
not_null_validator_type_map = ClassMap()
not_null_validator = []
form = ModelTestForm()
assert list(form.test_column.validators) == []
def test_not_null_validator_as_none(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
test_column = sa.Column(sa.Boolean, nullable=False)
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
not_null_validator_type_map = ClassMap()
not_null_validator = None
form = ModelTestForm()
assert len(form.test_column.validators) == 1
assert isinstance(form.test_column.validators[0], Optional)
def test_not_nullable_booleans_are_required(self):
self.init(sa.Boolean, nullable=False)
self.assert_has_validator('test_column', InputRequired)
def test_not_nullable_fields_with_defaults_are_not_required(self):
self.init(nullable=False, default=u'default')
self.assert_not_required('test_column')
def test_assigns_nullable_integers_as_optional(self):
self.init(sa.Integer, nullable=True)
self.assert_optional('test_column')
def test_override_email_validator(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
test_column = sa.Column(EmailType, nullable=True)
def validator():
return Email('Wrong email')
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
email_validator = validator
form = ModelTestForm()
assert form.test_column.validators[1].message == 'Wrong email'
def test_override_optional_validator(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
test_column = sa.Column(EmailType, nullable=True)
class MyOptionalValidator(object):
def __init__(self, *args, **kwargs):
pass
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
optional_validator = MyOptionalValidator
form = ModelTestForm()
assert isinstance(form.test_column.validators[0], MyOptionalValidator)
def test_override_number_range_validator(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
test_column = sa.Column(sa.Integer, info={'min': 3}, nullable=True)
def number_range(min=-1, max=-1):
return NumberRange(min=min, max=max, message='Wrong number range')
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
number_range_validator = number_range
form = ModelTestForm()
assert form.test_column.validators[1].message == 'Wrong number range'
def test_override_date_range_validator(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
test_column = sa.Column(
sa.DateTime,
info={'min': datetime(2000, 1, 1)},
nullable=True
)
def date_range(min=None, max=None):
return DateRange(min=min, max=max, message='Wrong date range')
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
date_range_validator = date_range
form = ModelTestForm()
assert form.test_column.validators[1].message == 'Wrong date range'
def test_override_time_range_validator(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
test_column = sa.Column(
sa.Time,
info={'min': time(14, 30)},
nullable=True
)
def time_range(min=None, max=None):
return TimeRange(min=min, max=max, message='Wrong time')
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
time_range_validator = time_range
form = ModelTestForm()
assert form.test_column.validators[1].message == 'Wrong time'
def test_override_length_validator(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
test_column = sa.Column(sa.Unicode(255), nullable=True)
def length(min=-1, max=-1):
return Length(min=min, max=max, message='Wrong length')
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
length_validator = length
form = ModelTestForm()
assert form.test_column.validators[1].message == 'Wrong length'
def test_override_optional_validator_as_none(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
test_column = sa.Column(sa.Boolean, nullable=True)
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
optional_validator = None
form = ModelTestForm()
assert list(form.test_column.validators) == []
def test_override_unique_validator(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
test_column = sa.Column(
sa.Unicode(255), unique=True, nullable=True
)
def unique(column, get_session):
return Unique(
column, get_session=get_session, message='Not unique'
)
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
unique_validator = unique
@staticmethod
def get_session():
return None
form = ModelTestForm()
assert form.test_column.validators[2].message == 'Not unique'
``` |
{
"source": "jpiechowka/flask-app-starter",
"score": 2
} |
#### File: flask-app-starter/controllers/dashboard.py
```python
from flask import Blueprint, render_template, abort
from jinja2 import TemplateNotFound
dashboard = Blueprint('controllers', __name__, template_folder='templates')
@dashboard.route('/', methods=['GET'])
def serve_dashboard():
try:
return render_template('index.html', title="Title")
except TemplateNotFound:
abort(404)
``` |
{
"source": "jpienaar/iree",
"score": 2
} |
#### File: python/iree_tfl_tests/mobilenet_v1_test.py
```python
import absl.testing
import numpy
import iree.tflite.support.test_util as test_util
model_path = "https://storage.googleapis.com/iree-model-artifacts/tflite-integration-tests/mobilenet_v1.tflite"
class MobilenetV1Test(test_util.TFLiteModelTest):
def __init__(self, *args, **kwargs):
super(MobilenetV1Test, self).__init__(model_path, *args, **kwargs)
def compare_results(self, iree_results, tflite_results, details):
super(MobilenetV1Test, self).compare_results(iree_results, tflite_results,
details)
self.assertTrue(
numpy.isclose(iree_results[0], tflite_results[0], atol=1e-4).all())
def test_compile_tflite(self):
self.compile_and_execute()
if __name__ == '__main__':
absl.testing.absltest.main()
``` |
{
"source": "jpieper/legtool",
"score": 2
} |
#### File: jpieper/legtool/herkulex_tool.py
```python
import trollius as asyncio
from trollius import From, Return
import optparse
from legtool.servo import herkulex
def get_address(options):
if options.address is None:
return herkulex.HerkuleX.BROADCAST
return int(options.address)
@asyncio.coroutine
def do_enumerate(servo, options):
print(yield From(servo.enumerate()))
@asyncio.coroutine
def do_set_address(servo, options):
address = int(options.set_address)
if address == servo.BROADCAST:
raise RuntimeError('address cannot be set using broadcast address')
yield From(
servo.eep_write(get_address(options), servo.EEP_ID, [ address ]))
@asyncio.coroutine
def do_reboot(servo, options):
yield From(servo.reboot(get_address(options)))
@asyncio.coroutine
def do_status(servo, options):
print(yield From(servo.status(get_address(options))))
@asyncio.coroutine
def do_voltage(servo, options):
print(yield From(servo.voltage(get_address(options))))
@asyncio.coroutine
def do_temperature(servo, options):
print(yield From(servo.temperature_C(get_address(options))))
def main():
parser = optparse.OptionParser()
parser.add_option('-a', '--address', default=None,
help='servo to communicate with')
parser.add_option('-d', '--device', default='/dev/ttyUSB0',
help='serial port device')
parser.add_option('-e', '--enumerate', action='store_true', default=None,
help='enumerate servos on the bus')
parser.add_option('--set-address', dest='set_address', default=None,
help='set all servos on the bus to the given address')
parser.add_option('-r', '--reboot', action='store_true', default=None,
help='reboot servos')
parser.add_option('-s', '--status', action='store_true', default=None,
help='query status of servo')
parser.add_option('-v', '--voltage', action='store_true', default=None,
help='query voltage of servo')
parser.add_option('-t', '--temperature', action='store_true', default=None,
help='query temperature of servo')
(options, args) = parser.parse_args()
actions = {
'enumerate': do_enumerate,
'set_address': do_set_address,
'reboot': do_reboot,
'status': do_status,
'voltage': do_voltage,
'temperature': do_temperature,
}
action_func = None
for key in actions.keys():
if hasattr(options, key) and getattr(options, key) is not None:
if action_func is not None:
raise RuntimeError('more than one action set')
action_func = actions[key]
servo = herkulex.HerkuleX(options.device)
loop = asyncio.get_event_loop()
if action_func is not None:
loop.run_until_complete(action_func(servo, options))
return
else:
raise RuntimeError('no action specified')
if __name__ == '__main__':
main()
```
#### File: legtool/async/trollius_trace.py
```python
import sys
import traceback
import trollius
import trollius as asyncio
# The following is an ugly ugly hack to get useful stack traces from
# coroutines with trollius on python 2.
old_future_set = trollius.Future.set_exception
def Future_set_exception(self, exc):
tb = sys.exc_info()[2]
if not hasattr(exc, '__frames__'):
setattr(exc, '__frames__', [])
frames = getattr(exc, '__frames__')
if len(frames) == 0:
frames.append(str(exc))
frames[0:0] = traceback.format_tb(tb)
old_future_set(self, exc)
self._tb_logger.tb = frames
self = None
trollius.Future.set_exception = Future_set_exception
```
#### File: legtool/gait/ripple.py
```python
import bisect
import math
from .common import (STANCE, SWING, UNKNOWN)
from .common import (LegConfig, MechanicalConfig)
from .common import (LegResult, Command, GaitGraphLeg, GaitGraph, LegState)
from .common import (NotSupported, CommonState)
from ..tf import geometry
from ..tf import tf
class RippleConfig(object):
def __init__(self):
self.mechanical = MechanicalConfig()
self.max_cycle_time_s = 4.0
self.lift_height_mm = 80.0
self.lift_percent = 25.0
self.swing_percent = 80.0
self.position_margin_percent = 80.0
self.leg_order = []
self.body_z_offset_mm = 0.0
self.servo_speed_margin_percent = 70.0
self.statically_stable = False
self.static_center_factor = 3.0
self.static_stable_factor = 10.0
self.static_margin_mm = 20.0
def copy(self):
result = RippleConfig()
result.mechanical = self.mechanical.copy()
result.max_cycle_time_s = self.max_cycle_time_s
result.lift_height_mm = self.lift_height_mm
result.lift_percent = self.lift_percent
result.swing_percent = self.swing_percent
result.position_margin_percent = self.position_margin_percent
result.leg_order = self.leg_order[:]
result.body_z_offset_mm = self.body_z_offset_mm
result.servo_speed_margin_percent = self.servo_speed_margin_percent
result.statically_stable = self.statically_stable
result.static_center_factor = self.static_center_factor
result.static_stable_factor = self.static_stable_factor
result.static_margin_mm = self.static_margin_mm
return result
@staticmethod
def parse_leg_order(data):
'''A leg ordering is a comma separated list of leg numbers, or
of leg groups, where a leg group is a parenthesis grouped list
of leg numbers.
Return the programmatic representation of that ordering when
given a string version. On malformed input, make all attempts
to return something, even if only a subset of the input.
'''
result = []
if data == '':
return result
in_tuple = False
current_tuple = ()
current_item = ''
for x in data:
if x == '(':
if in_tuple:
return result
in_tuple = True
if x >= '0' and x <= '9':
current_item += x
else:
if len(current_item):
value = int(current_item)
current_item = ''
if in_tuple:
current_tuple += (value,)
else:
result.append(value)
if x == ')':
if not in_tuple:
return result
if len(current_tuple) == 1:
result.append(current_tuple[0])
elif len(current_tuple) > 1:
result.append(current_tuple)
current_tuple = ()
in_tuple = False
if len(current_item):
result.append(int(current_item))
return result
@staticmethod
def str_leg_order(data):
'''Given a leg ordering, return the canonical string
representation.'''
assert isinstance(data, list)
return str(data)[1:-1].replace(' ', '')
_FLOAT_ATTRIBUTES = [
'max_cycle_time_s',
'lift_height_mm',
'lift_percent',
'swing_percent',
'position_margin_percent',
'body_z_offset_mm',
'servo_speed_margin_percent',
'static_center_factor',
'static_stable_factor',
'static_margin_mm',
]
@staticmethod
def read_settings(config, group_name, leg_ik_map):
'''Populate a RippleConfig instance from the given
ConfigParser instance and group name.
:param config: Configuration to read
:param group_name: String containing the appropriate group
:param leg_ik_map: Mapping from leg number to IK instance'''
result = RippleConfig()
result.mechanical = MechanicalConfig.read_settings(
config, group_name + '.legs', leg_ik_map)
for x in RippleConfig._FLOAT_ATTRIBUTES:
if config.has_option(group_name, x):
setattr(result, x, config.getfloat(group_name, x))
if config.has_option(group_name, 'statically_stable'):
result.statically_stable = config.getboolean(
group_name, 'statically_stable')
if config.has_option(group_name, 'leg_order'):
result.leg_order = RippleConfig.parse_leg_order(
config.get(group_name, 'leg_order'))
return result
def write_settings(self, config, group_name):
'''Store this RippleConfig instance into the given
ConfigParser instance at the given group name.'''
config.add_section(group_name)
self.mechanical.write_settings(config, group_name + '.legs')
for x in self._FLOAT_ATTRIBUTES:
config.set(group_name, x, getattr(self, x))
config.set(group_name, 'statically_stable', self.statically_stable)
config.set(group_name, 'leg_order', self.str_leg_order(self.leg_order))
class RippleState(CommonState):
def __init__(self):
self.legs = {}
self.phase = 0.
self.action = 0
# robot_frame coordinates describing the start and end
# position of the current swing leg(s).
self.swing_start_pos = {}
self.swing_end_pos = {}
self.world_frame = tf.Frame()
self.robot_frame = tf.Frame(None, None, self.world_frame)
self.body_frame = tf.Frame(None, None, self.robot_frame)
self.cog_frame = tf.Frame(None, None, self.body_frame)
def copy(self):
result = RippleState()
super(RippleState, self).copy_into(result)
result.phase = self.phase
result.action = self.action
result.swing_start_pos = dict(
[(key, value.copy()) for key, value in
self.swing_start_pos.iteritems()])
result.swing_end_pos = dict(
[(key, value.copy()) for key, value in
self.swing_end_pos.iteritems()])
return result
def _sign(val):
return -1.0 if (val < 0.0) else 1.0
class Options(object):
cycle_time_s = 0.0
servo_speed_dps = 0.0
def _iterate_legs(leg_group):
"""Given a leg group (either a scalar leg number, or a tuple of
legs), iterate over all of them."""
if isinstance(leg_group, int):
yield leg_group
else:
for x in leg_group:
yield x
class RippleGait(object):
ACTION_START_SWING, ACTION_START_STANCE, ACTION_END = range(3)
def __init__(self, config):
assert config is not None
self.config = config
self.num_legs = len(config.leg_order)
self.cycle_time_s = None
self.state = self.get_idle_state()
self.idle_state = self.get_idle_state()
self.next_command = None
self.next_options = None
self._create_actions()
self._really_set_command(Command(), Options())
def _create_actions(self):
self.actions = []
if self.num_legs == 0:
return
# Create the action list.
swing_time = self._swing_phase_time()
for i in range(self.num_legs):
fraction = float(i) / self.num_legs
leg_group = self.config.leg_order[i]
self.actions.append(
(fraction, leg_group, self.ACTION_START_SWING))
self.actions.append(
(fraction + swing_time, leg_group, self.ACTION_START_STANCE))
self.actions.append((1.0, -1, self.ACTION_END))
def set_state(self, state, command):
'''Force the current leg state to the given configuration. If
a phase is present, it must be consistent, i.e. it should have
been read from this class along with the leg state.
This may raise NotSupported, if the command and state are
inconsistent with one another. In this case, neither the
state nor command are changed.
'''
old_state = self.state
self.state = state.copy()
# Make sure all the legs are in the correct frame.
assert state.phase == 0.0
for leg in self.state.legs.values():
if leg.mode == STANCE:
leg.point = self.state.world_frame.map_from_frame(
leg.frame, leg.point)
leg.frame = self.state.world_frame
elif leg.mode == SWING:
leg.point = self.state.robot_frame.map_from_frame(
leg.frame, leg.point)
leg.frame = self.state.robot_frame
try:
self.set_command(command)
except:
self.state = old_state
raise
return self.state
def _select_command_options(self, command):
if self.num_legs == 0:
return Options()
# First, iterate, solving IK for all legs in time until we
# find the point at which the first leg is unsolvable.
dt = 0.05
time_s = 0.0
my_state = self.idle_state.copy()
self._apply_body_command(my_state, command)
end_time_s = None
min_observed_speed = None
# Dictionary of (direction, leg_num) to old ik_result
old_ik_result = {}
fraction_in_stance = 1.0 - self._swing_phase_time()
margin = 0.01 * self.config.position_margin_percent * fraction_in_stance
while time_s < (0.5 * self.config.max_cycle_time_s / margin):
if end_time_s is not None:
break
time_s += dt
for direction in [-1, 1]:
frame = self._get_update_frame(
direction * time_s, command=command)
if end_time_s is not None:
break
for leg_num, leg in my_state.legs.iteritems():
# TODO: Need to do this for the lifted leg as well.
leg_robot_frame_point = frame.map_to_parent(leg.point)
leg_shoulder_point = leg.shoulder_frame.map_from_frame(
my_state.robot_frame, leg_robot_frame_point)
leg_config = self.config.mechanical.leg_config[leg_num]
result = leg_config.leg_ik.do_ik(leg_shoulder_point)
if result is None:
# Break, so that we can take action knowing
# how far we can go.
end_time_s = time_s
break
if (direction, leg_num) in old_ik_result:
this_old_result = old_ik_result[(direction, leg_num)]
largest_change_deg = \
leg_config.leg_ik.largest_change_deg(
result, this_old_result)
this_speed = largest_change_deg / dt
if (min_observed_speed is None or
this_speed < min_observed_speed):
min_observed_speed = this_speed
old_ik_result[(direction, leg_num)] = result
if min_observed_speed is None:
raise NotSupported()
result = Options()
if end_time_s is None:
# We can achieve this at the maximum time.
result.cycle_time_s = self.config.max_cycle_time_s
else:
result.cycle_time_s = (2.0 * end_time_s * margin)
# TODO jpieper: See if this cycle time is feasible. We will
# do this by checking to see if the swing leg has to move too
# fast.
min_swing_speed = (min_observed_speed *
(1.0 - self._swing_phase_time()) /
self._swing_phase_time())
result.servo_speed_dps = min_swing_speed
any_ik = self.config.mechanical.leg_config.values()[0].leg_ik
servo_speed_dps = any_ik.servo_speed_dps()
speed_margin = 0.01 * self.config.servo_speed_margin_percent
if min_swing_speed > speed_margin * servo_speed_dps:
# Slow the command down.
slow_down_factor = (min_swing_speed /
(speed_margin * servo_speed_dps))
command.translate_x_mm_s /= slow_down_factor
command.translate_y_mm_s /= slow_down_factor
command.rotate_deg_s /= slow_down_factor
result.cycle_time_s *= slow_down_factor
result.servo_speed_dps = speed_margin * servo_speed_dps
return result
def _do_commands_differ_body_only(self, command1, command2):
return (command1.translate_x_mm_s == command2.translate_x_mm_s and
command1.translate_y_mm_s == command2.translate_y_mm_s and
command1.rotate_deg_s == command2.rotate_deg_s)
def set_command(self, command):
'''Set the current command. This will raise a NotSupported
exception if the platform cannot achieve the desired command,
in this case, the desired command will not be changed.'''
command = command.copy()
# Determine if the command is valid or not, and select the
# options necessary for it.
#
# NOTE: This may modify command.
options = self._select_command_options(command)
self._really_set_command(command, options)
def is_command_pending(self):
return self.next_command is not None
def _really_set_command(self, command, options):
self.command = command
self.options = options
if self.num_legs == 0:
return
self.cycle_time_s = options.cycle_time_s
self._apply_body_command(self.state, command)
def _apply_body_command(self, state, command):
if not self.config.statically_stable:
state.body_frame.transform.translation.x = command.body_x_mm
state.body_frame.transform.translation.y = command.body_y_mm
state.body_frame.transform.translation.z = (
command.body_z_mm + self.config.body_z_offset_mm)
state.body_frame.transform.rotation = tf.Quaternion.from_euler(
math.radians(command.body_roll_deg),
math.radians(command.body_pitch_deg),
math.radians(command.body_yaw_deg))
def get_idle_state(self):
'''Return a state usable for the idle stance, regardless of
the current state of the gait generator.'''
result = RippleState()
for leg_data in self.config.mechanical.leg_config.iteritems():
leg_number, leg_config = leg_data
point = tf.Point3D(0., 0., 0.)
x_sign = _sign(leg_config.mount_x_mm)
point.x = leg_config.mount_x_mm + leg_config.idle_x_mm * x_sign
y_sign = _sign(leg_config.mount_y_mm)
point.y = leg_config.mount_y_mm + leg_config.idle_y_mm * y_sign
point.z = leg_config.idle_z_mm
leg_state = LegState()
leg_state.point = result.world_frame.map_from_frame(
result.body_frame, point)
leg_state.frame = result.world_frame
result.legs[leg_number] = leg_state
# For now, we are assuming that shoulders face away from
# the y axis.
rotation = (0.5 * math.pi
if leg_config.mount_x_mm > 0.0
else -0.5 * math.pi)
leg_state.shoulder_frame = tf.Frame(
tf.Point3D(leg_config.mount_x_mm,
leg_config.mount_y_mm,
leg_config.mount_z_mm),
tf.Quaternion.from_euler(0., 0., rotation),
result.body_frame)
leg_state.leg_ik = leg_config.leg_ik
result.body_frame.transform.translation.z = self.config.body_z_offset_mm
result.cog_frame.transform.translation.x = \
self.config.mechanical.body_cog_x_mm
result.cog_frame.transform.translation.y = \
self.config.mechanical.body_cog_y_mm
result.cog_frame.transform.translation.z = \
self.config.mechanical.body_cog_z_mm
return result
def get_gait_graph(self):
'''Return a GaitGraph instance for the current configuration
and command. This is independent of the current state, but is
dependent upon the configuration and command.'''
if self.num_legs == 0:
return GaitGraph()
# TODO jpieper: This could be generated from the actions list.
start_phase = 0.0
leg_cycle_time = 1.0 / self.num_legs
result = GaitGraph()
for leg_group in self.config.leg_order:
graph_leg = GaitGraphLeg()
graph_leg.sequence = [
(start_phase, SWING),
(start_phase + self._swing_phase_time(), STANCE)]
if start_phase != 0:
graph_leg.sequence = [(0.0, STANCE)] + graph_leg.sequence
for leg_number in _iterate_legs(leg_group):
result.leg[leg_number] = graph_leg
start_phase += leg_cycle_time
return result
def advance_time(self, delta_s):
'''Advance the phase and leg state by the given amount of
time.'''
return self.advance_phase(delta_s / self._phase_time())
def _do_action(self, action_index):
phase, leg_group, action = self.actions[action_index]
if action == self.ACTION_START_STANCE:
for leg_num in _iterate_legs(leg_group):
leg = self.state.legs[leg_num]
leg.mode = STANCE
leg.point = self.state.world_frame.map_from_frame(
leg.frame, leg.point)
leg.point.z = 0.0
leg.frame = self.state.world_frame
elif action == self.ACTION_START_SWING:
for leg_num in _iterate_legs(leg_group):
leg = self.state.legs[leg_num]
leg.mode = SWING
leg.point = self.state.robot_frame.map_from_frame(
leg.frame, leg.point)
leg.frame = self.state.robot_frame
self.state.swing_start_pos[leg_num] = leg.point.copy()
self.state.swing_end_pos[leg_num] = (
self._get_swing_end_pos(leg_num))
def _get_update_frame(self, dt, command=None):
if command is None:
command = self.command
update_frame = tf.Frame(parent=self.state.robot_frame)
vx = command.translate_x_mm_s
vy = command.translate_y_mm_s
if command.rotate_deg_s == 0:
dx = vx * dt
dy = vy * dt
else:
vyaw = math.radians(command.rotate_deg_s)
dx = (((math.cos(dt * vyaw) - 1) * vy +
math.sin(dt * vyaw) * vx) / vyaw)
dy = (((math.cos(dt * vyaw) - 1) * vx +
math.sin(dt * vyaw) * vy) / vyaw)
update_frame.transform.rotation = \
tf.Quaternion.from_euler(0, 0, dt * vyaw)
update_frame.transform.translation.x = dx
update_frame.transform.translation.y = dy
return update_frame
def _noaction_advance_phase(self, delta_phase, final_phase):
transform = self.state.robot_frame.transform
dt = delta_phase * self._phase_time()
update_frame = self._get_update_frame(dt)
new_transform = update_frame.transform_to_frame(self.state.world_frame)
self.state.robot_frame.transform = new_transform
# Update the legs which are in swing.
for leg_num, leg in self.state.legs.iteritems():
if leg.mode == SWING:
leg_phase = ((final_phase % (1.0 / self.num_legs)) /
self._swing_phase_time())
# Don't allow the phase to wrap-around on the final update.
if delta_phase > 0.0 and leg_phase == 0.0:
leg_phase = 1.0
assert leg.frame is self.state.robot_frame
delta = (self.state.swing_end_pos[leg_num] -
self.state.swing_start_pos[leg_num])
current = (self.state.swing_start_pos[leg_num] +
delta.scaled(leg_phase))
leg.point = current
lift_fraction = 0.01 * self.config.lift_percent
if leg_phase < lift_fraction:
leg.point.z = ((leg_phase / lift_fraction) *
self.config.lift_height_mm)
elif leg_phase < (1.0 - lift_fraction):
leg.point.z = self.config.lift_height_mm
else:
leg.point.z = (((1.0 - leg_phase) / lift_fraction) *
self.config.lift_height_mm)
# If we are running with a statically stable gait, update the
# body position dynamically.
if self.config.statically_stable:
desired_cog_vel = tf.Point3D()
cog_pos = self.state.cog_frame.map_to_frame(
self.state.robot_frame, tf.Point3D())
cog_pos.z = 0
# Do the part which pulls the COG back towards the center
# of the robot frame.
cog_dist = cog_pos.length()
scale = 1.0
if cog_dist < self.config.static_margin_mm:
scale = cog_dist / self.config.static_margin_mm
desired_cog_vel = tf.Point3D()
if cog_dist != 0.0:
static_center_velocity_mm_s = (
self.config.static_center_factor *
self.config.static_margin_mm /
self.options.cycle_time_s)
desired_cog_vel = desired_cog_vel + cog_pos.scaled(
-scale * static_center_velocity_mm_s /
cog_dist)
if (dt * desired_cog_vel.length()) > cog_dist:
desired_cog_vel = desired_cog_vel.scaled(
cog_dist / (dt * desired_cog_vel.length()))
# Now do the part which pulls the COG towards the support
# polygon.
# First, determine what support polygon we should be
# aiming for.
support_poly = self._get_current_support_poly_robot_frame()
support_centroid = geometry.poly_centroid(support_poly)
delta = cog_pos - support_centroid
if not geometry.point_in_poly(cog_pos, support_poly):
# We are not in the support polygon, thus move at the
# maximal velocity towards the centroid
scale = 1.0
else:
# We are in the support polygon. See by how much to
# determine what our scale should be.
dist = geometry.distance_to_poly(cog_pos, support_poly)
scale = max(0.0,
((self.config.static_margin_mm - dist) /
self.config.static_margin_mm))
static_stable_velocity_mm_s = (
self.config.static_stable_factor *
self.config.static_margin_mm /
self.options.cycle_time_s)
if delta.length() > 0.0:
poly_vel = delta.scaled(
-scale * static_stable_velocity_mm_s / delta.length())
else:
poly_vel = tf.Point3D()
if (dt * poly_vel.length()) > delta.length():
poly_vel = poly_vel.scaled(
delta.length() / (dt * poly_vel.length()))
cog_move = (desired_cog_vel + poly_vel).scaled(dt)
self.state.body_frame.transform.translation.x += cog_move.x
self.state.body_frame.transform.translation.y += cog_move.y
def _get_current_support_poly_robot_frame(self):
next_action = self.actions[self.state.action]
_, action_leg_group, action = next_action
if action == self.ACTION_END:
_, action_leg_group, action = self.actions[0]
legs = []
if action == self.ACTION_START_SWING:
# The next action is a lift, which means we should use all
# legs but this one.
legs = [x for x in self.state.legs.keys()
if x not in list(_iterate_legs(action_leg_group))]
elif action == self.ACTION_START_STANCE:
# We must have a leg in the air now, so use the current
# set of stance legs.
legs = [x for x in self.state.legs.keys()
if self.state.legs[x].mode == STANCE ]
poly = [self.state.robot_frame.map_from_frame(
self.state.legs[x].frame,
self.state.legs[x].point)
for x in legs]
return poly
def advance_phase(self, delta_phase):
'''Advance the phase and leg state by the given amount of
phase. Being independent of time, this is only really useful
for visualization or debugging.'''
if self.num_legs == 0:
self.state.phase = (self.state.phase + delta_phase) % 1.0
return self.state
old_phase = self.state.phase
next_phase = (self.state.phase + delta_phase)
cur_phase = old_phase
while True:
# Are there any actions between old_phase and new_phase?
if self.actions[self.state.action][0] > next_phase:
break
advance_phase = self.actions[self.state.action][0]
if advance_phase != cur_phase:
delta_phase = advance_phase - cur_phase
self._noaction_advance_phase(delta_phase, advance_phase)
cur_phase = advance_phase
self._do_action(self.state.action)
self.state.action += 1
if self.state.action >= len(self.actions):
self.state.action = 0
next_phase -= 1.0
cur_phase -= 1.0
if self.next_command:
next_command = self.next_command
self.next_command = None
next_options = self.next_options
self.next_options = None
self._really_set_command(next_command, next_options)
# Finally, advance the remainder of the phase and update the phase.
self._noaction_advance_phase(next_phase - cur_phase, next_phase)
self.state.phase = next_phase
return self.state
def _phase_time(self):
return self.cycle_time_s
def _swing_phase_time(self):
return (1.0 / self.num_legs) * 0.01 * self.config.swing_percent
def _get_swing_end_pos(self, leg_num):
# Target swing end positions such that during stance, the leg
# will spend half its travel time reaching the idle position,
# and half its travel time going beyond the idle position.
stance_phase_time = 1.0 - self._swing_phase_time()
dt = 0.5 * stance_phase_time * self._phase_time()
end_frame = self._get_update_frame(dt)
# TODO jpieper: This should map from whatever frame the idle
# state leg was actually in.
return end_frame.map_to_parent(self.idle_state.legs[leg_num].point)
```
#### File: legtool/tabs/servo_tab.py
```python
import functools
import trollius as asyncio
from trollius import Task, From, Return
import PySide.QtCore as QtCore
import PySide.QtGui as QtGui
from ..servo import selector
from .common import BoolContext
from . import gazebo_config_dialog
def spawn(callback):
def start():
Task(callback())
return start
class ServoTab(object):
def __init__(self, ui, status):
self.ui = ui
self.status = status
self.servo_controls = []
self.monitor_thread = None
self.servo_model = ''
self.servo_name_map = {}
self.ui.statusText.setText('not connected')
self.ui.connectButton.clicked.connect(
spawn(self.handle_connect_clicked))
self.ui.typeCombo.currentIndexChanged.connect(self.handle_type_change)
self.handle_type_change()
self.ui.configureGazeboButton.clicked.connect(
self.handle_configure_gazebo)
servo_layout = QtGui.QVBoxLayout()
servo_layout.setSpacing(0)
servo_layout.setContentsMargins(0, 0, 0, 0)
self.ui.scrollContents.setLayout(servo_layout)
self.ui.servoCountSpin.valueChanged.connect(self.handle_servo_count)
self.handle_servo_count()
self.ui.powerCombo.currentIndexChanged.connect(
spawn(self.handle_power))
self.ui.captureCurrentButton.clicked.connect(
spawn(self.handle_capture_current))
self.update_connected(False)
self.ui.addPoseButton.clicked.connect(self.handle_add_pose)
self.ui.removePoseButton.clicked.connect(self.handle_remove_pose)
self.ui.moveToPoseButton.clicked.connect(
spawn(self.handle_move_to_pose))
self.ui.updatePoseButton.clicked.connect(self.handle_update_pose)
self.ui.poseList.currentItemChanged.connect(
self.handle_poselist_current_changed)
self.controller = None
self.servo_update = BoolContext()
def resizeEvent(self, event):
pass
def poses(self):
result = []
for i in range(self.ui.poseList.count()):
result.append(self.ui.poseList.item(i).text())
return result
def pose(self, name):
for i in range(self.ui.poseList.count()):
if self.ui.poseList.item(i).text() == name:
return self.ui.poseList.item(i).data(QtCore.Qt.UserRole)
return dict([(i, 0.0) for i in range(self.ui.servoCountSpin.value())])
@asyncio.coroutine
def handle_connect_clicked(self):
val = self.ui.typeCombo.currentText().lower()
try:
self.controller = yield From(
selector.select_servo(
val,
serial_port=self.ui.serialPortCombo.currentText(),
model_name=self.servo_model,
servo_name_map=self.servo_name_map))
self.ui.statusText.setText('connected')
self.update_connected(True)
except Exception as e:
self.ui.statusText.setText('error: %s' % str(e))
self.update_connected(False)
def handle_type_change(self):
val = self.ui.typeCombo.currentText().lower()
self.ui.serialPortCombo.setEnabled(val == 'herkulex')
self.ui.configureGazeboButton.setEnabled(val == 'gazebo')
def handle_configure_gazebo(self):
servo_name_map = self.servo_name_map.copy()
for x in range(self.ui.servoCountSpin.value()):
if not x in servo_name_map:
servo_name_map[x] = ''
dialog = gazebo_config_dialog.GazeboConfigDialog(
self.servo_model, servo_name_map)
dialog.setModal(True)
result = dialog.exec_()
if result == QtGui.QDialog.Rejected:
return
self.servo_model = dialog.model_name()
self.servo_name_map = dialog.servo_name_map()
def handle_servo_count(self):
count = self.ui.servoCountSpin.value()
while len(self.servo_controls) > count:
# Remove the last one
last = self.servo_controls[-1]
widget = last['widget']
self.ui.scrollContents.layout().removeWidget(widget)
widget.deleteLater()
self.servo_controls = self.servo_controls[:-1]
while len(self.servo_controls) < count:
# Add a new one.
servo_id = len(self.servo_controls)
label = QtGui.QLabel()
label.setText('ID %d:' % servo_id)
slider = QtGui.QSlider(QtCore.Qt.Horizontal)
slider.setRange(-180, 180)
doublespin = QtGui.QDoubleSpinBox()
doublespin.setRange(-180, 180)
doublespin.setDecimals(1)
save = QtGui.QPushButton()
save.setText("Save")
move = QtGui.QPushButton()
move.setText("Move")
current = QtGui.QLabel()
current.setText('N/A')
current.setMinimumWidth(60)
widget = QtGui.QWidget()
layout = QtGui.QHBoxLayout(widget)
layout.addWidget(label)
layout.addWidget(slider)
layout.addWidget(doublespin)
layout.addWidget(save)
layout.addWidget(move)
layout.addWidget(current)
slider.valueChanged.connect(
functools.partial(self.handle_servo_slider, servo_id))
doublespin.valueChanged.connect(
functools.partial(self.handle_servo_spin, servo_id))
save.clicked.connect(
functools.partial(self.handle_servo_save, servo_id))
move.clicked.connect(
functools.partial(self.handle_servo_move, servo_id))
self.ui.scrollContents.layout().addWidget(widget)
self.servo_controls.append({
'widget': widget,
'label': label,
'slider': slider,
'doublespin': doublespin,
'save': save,
'move': move,
'current': current})
@asyncio.coroutine
def handle_power(self):
text = self.ui.powerCombo.currentText().lower()
value = None
if text == 'free':
value = selector.POWER_FREE
elif text == 'brake':
value = selector.POWER_BRAKE
elif text == 'drive':
value = selector.POWER_ENABLE
else:
raise NotImplementedError()
yield From(self.controller.enable_power(value))
def update_connected(self, value):
self.ui.controlGroup.setEnabled(value)
self.ui.posesGroup.setEnabled(value)
if self.monitor_thread is not None:
self.monitor_thread.cancel()
self.monitor_thread = None
if value:
self.handle_power()
self.monitor_thread = Task(self.monitor_status())
@asyncio.coroutine
def monitor_status(self):
voltages = {}
temperatures = {}
ident = 0
while True:
if (self.controller is not None and
hasattr(self.controller, 'get_voltage')):
try:
ident = (ident + 1) % len(self.servo_controls)
this_voltage = yield From(
self.controller.get_voltage([ident]))
voltages.update(this_voltage)
# Get all temperatures.
this_temp = yield From(
self.controller.get_temperature([ident]))
temperatures.update(this_temp)
def non_None(value):
return [x for x in value if x is not None]
message = "Servo status: "
if len(non_None(voltages.values())):
message += "%.1f/%.1fV" % (
min(non_None(voltages.values())),
max(non_None(voltages.values())))
if len(non_None(temperatures.values())):
message += " %.1f/%.1fC" % (
min(non_None(temperatures.values())),
max(non_None(temperatures.values())))
self.status.showMessage(message, 10000)
except Exception as e:
traceback.print_exc()
print "Error reading servo:", type(e), e
yield From(asyncio.sleep(2.0))
@asyncio.coroutine
def set_single_pose(self, servo_id, value):
yield From(
self.controller.set_single_pose(servo_id, value, pose_time=0.2))
def handle_servo_slider(self, servo_id, event):
if self.servo_update.value:
return
with self.servo_update:
control = self.servo_controls[servo_id]
value = control['slider'].value()
control['doublespin'].setValue(value)
Task(self.set_single_pose(servo_id, value))
def handle_servo_spin(self, servo_id, event):
if self.servo_update.value:
return
with self.servo_update:
control = self.servo_controls[servo_id]
value = control['doublespin'].value()
control['slider'].setSliderPosition(int(value))
Task(self.set_single_pose(servo_id, value))
def handle_servo_save(self, servo_id):
if self.ui.poseList.currentRow() < 0:
return
current_data = self.ui.poseList.currentItem().data(
QtCore.Qt.UserRole)
current_data[servo_id] = (
self.servo_controls[servo_id]['doublespin'].value())
self.ui.poseList.currentItem().setData(
QtCore.Qt.UserRole, current_data)
self.handle_poselist_current_changed(None, None)
def handle_servo_move(self, servo_id):
if self.ui.poseList.currentRow() < 0:
return
data = self.ui.poseList.currentItem().data(QtCore.Qt.UserRole)
self.servo_controls[servo_id]['doublespin'].setValue(data[servo_id])
@asyncio.coroutine
def handle_capture_current(self):
with self.servo_update:
results = yield From(
self.controller.get_pose(range(len(self.servo_controls))))
for ident, angle in results.iteritems():
if angle is None:
continue
control = self.servo_controls[ident]
control['slider'].setSliderPosition(int(angle))
control['doublespin'].setValue(angle)
def add_list_pose(self, name):
self.ui.poseList.addItem(name)
item = self.ui.poseList.item(self.ui.poseList.count() - 1)
item.setFlags(QtCore.Qt.ItemIsEnabled |
QtCore.Qt.ItemIsSelectable |
QtCore.Qt.ItemIsEditable |
QtCore.Qt.ItemIsSelectable)
return item
def get_new_pose_name(self):
poses = set([self.ui.poseList.item(x).text()
for x in range(self.ui.poseList.count())])
count = 0
while True:
name = 'new_pose_%d' % count
if name not in poses:
return name
count += 1
def generate_pose_data(self):
return dict(
[ (i, control['doublespin'].value())
for i, control in enumerate(self.servo_controls) ])
def handle_add_pose(self):
pose_name = self.get_new_pose_name()
item = self.add_list_pose(pose_name)
item.setData(QtCore.Qt.UserRole, self.generate_pose_data())
self.ui.poseList.editItem(item)
def handle_remove_pose(self):
if self.ui.poseList.currentRow() < 0:
return
pose_name = self.ui.poseList.currentItem().text()
del self.poses[pose_name]
self.ui.poseList.takeItem(self.ui.poseList.currentRow())
@asyncio.coroutine
def handle_move_to_pose(self):
if self.ui.poseList.currentRow() < 0:
return
values = self.ui.poseList.currentItem().data(QtCore.Qt.UserRole)
yield From(self.controller.set_pose(values, pose_time=1.0))
with self.servo_update:
for ident, angle_deg in values.iteritems():
control = self.servo_controls[ident]
control['slider'].setSliderPosition(int(angle_deg))
control['doublespin'].setValue(angle_deg)
def handle_update_pose(self):
if self.ui.poseList.currentRow() < 0:
return
self.ui.poseList.currentItem().setData(
QtCore.Qt.UserRole, self.generate_pose_data())
self.handle_poselist_current_changed(None, None)
def handle_poselist_current_changed(self, current, previous):
if self.ui.poseList.currentRow() < 0:
return
data = self.ui.poseList.currentItem().data(QtCore.Qt.UserRole)
for i, control in enumerate(self.servo_controls):
control['current'].setText('%.1f' % data[i])
def read_settings(self, config):
if not config.has_section('servo'):
return
self.ui.typeCombo.setCurrentIndex(config.getint('servo', 'type'))
self.ui.serialPortCombo.setEditText(config.get('servo', 'port'))
self.ui.servoCountSpin.setValue(config.getint('servo', 'count'))
self.servo_model = config.get('servo', 'model')
if config.has_section('servo.names'):
self.servo_name_map = {}
for name, value in config.items('servo.names'):
self.servo_name_map[int(name)] = value
if config.has_section('servo.poses'):
for name, value in config.items('servo.poses'):
this_data = {}
for element in value.split(','):
ident, angle_deg = element.split('=')
this_data[int(ident)] = float(angle_deg)
item = self.add_list_pose(name)
item.setData(QtCore.Qt.UserRole, this_data)
def write_settings(self, config):
config.add_section('servo')
config.add_section('servo.poses')
config.add_section('servo.names')
config.set('servo', 'type', self.ui.typeCombo.currentIndex())
config.set('servo', 'port', self.ui.serialPortCombo.currentText())
config.set('servo', 'count', self.ui.servoCountSpin.value())
config.set('servo', 'model', self.servo_model)
for key, value in self.servo_name_map.iteritems():
config.set('servo.names', str(key), value)
for row in range(self.ui.poseList.count()):
item = self.ui.poseList.item(row)
pose_name = item.text()
values = item.data(QtCore.Qt.UserRole)
config.set(
'servo.poses', pose_name,
','.join(['%d=%.2f' % (ident, angle_deg)
for ident, angle_deg in values.iteritems()]))
```
#### File: legtool/tf/test_geometry.py
```python
from . import geometry
from . import tf
_SQUARE = [tf.Point3D(0., 0., 0.),
tf.Point3D(10., 0., 0.),
tf.Point3D(10., 10., 0.),
tf.Point3D(0., 10., 0.)]
_TRIANGLE = [tf.Point3D(-10., 0., 0.),
tf.Point3D(10., 0., 0.),
tf.Point3D(0., 10., 0.)]
def check_vectors_close(v1, v2):
assert abs(v1.x - v2.x) < 0.001
assert abs(v1.y - v2.y) < 0.001
assert abs(v1.z - v2.z) < 0.001
def test_area():
result = geometry.signed_poly_area(_SQUARE)
assert abs(result - 100.0) < 0.1
result = geometry.signed_poly_area(list(reversed(_SQUARE)))
assert abs(result + 100.0) < 0.1
result = geometry.signed_poly_area(_TRIANGLE)
assert abs(result - 100.0) < 0.1
def test_centroid():
result = geometry.poly_centroid(_SQUARE)
check_vectors_close(result, tf.Point3D(5., 5., 0.))
result = geometry.poly_centroid(list(reversed(_SQUARE)))
check_vectors_close(result, tf.Point3D(5., 5., 0.))
result = geometry.poly_centroid(_TRIANGLE)
check_vectors_close(result, tf.Point3D(0., 3.333, 0.))
def test_point_in_poly():
point_in_poly = geometry.point_in_poly
assert point_in_poly(tf.Point3D(5., 5., 0.), _SQUARE) == True
assert point_in_poly(tf.Point3D(15., 5., 0.), _SQUARE) == False
assert point_in_poly(tf.Point3D(-5., 5., 0.), _SQUARE) == False
assert point_in_poly(tf.Point3D(-5., 5., 0.), _SQUARE) == False
assert point_in_poly(tf.Point3D(0., 1., 0.), _TRIANGLE) == True
assert point_in_poly(tf.Point3D(0., -1., 0.), _TRIANGLE) == False
assert point_in_poly(tf.Point3D(0., 9., 0.), _TRIANGLE) == True
assert point_in_poly(tf.Point3D(0., 11., 0.), _TRIANGLE) == False
assert point_in_poly(tf.Point3D(3., 9., 0.), _TRIANGLE) == False
def test_distance_to_segment():
dut = geometry.distance_to_segment
def test(qx, qy, x1, y1, x2, y2, expected):
result = dut(tf.Point3D(qx, qy, 0.),
tf.Point3D(x1, y1, 0.),
tf.Point3D(x2, y2, 0.))
assert abs(result - expected) < 0.01
test(0., 0., 0., 0., 0., 0., 0.)
test(0., 0., 0., 0., 3., 0., 0.)
test(1., 0., 0., 0., 3., 0., 0.)
test(1., 1., 0., 0., 3., 0., 1.)
test(1., -1., 0., 0., 3., 0., 1.)
test(1., -1., 3., 0., 0., 0., 1.)
test(5., 0., 3., 0., 0., 0., 2.)
test(0., 0., 2., 4., 2., -3., 2.)
test(1., 0., 2., 4., 2., -3., 1.)
test(5., 0., 2., 4., 2., -3., 3.)
test(5., 8., 2., 4., 2., -3., 5.)
def test_distance_to_poly():
dut = geometry.distance_to_poly
def test(qx, qy, poly, expected):
result = dut(tf.Point3D(qx, qy, 0.), poly)
assert abs(result - expected) < 0.01
test(0., 0., _SQUARE, 0.)
test(-1., 0., _SQUARE, 1.)
test(4., 2., _SQUARE, 2.)
```
#### File: legtool/tf/test_quaternion.py
```python
from math import degrees
from math import radians
from numpy import array
import sys
import unittest
from .quaternion import Quaternion
class QuaternionTest(unittest.TestCase):
def check_vector_close(self, vector, x, y, z):
self.assertAlmostEqual(vector[0], x)
self.assertAlmostEqual(vector[1], y)
self.assertAlmostEqual(vector[2], z)
def test_basic(self):
v = array([10, 0, 0])
v = Quaternion.from_euler(0, 0, radians(90)).rotate(v)
self.check_vector_close(v, 0, -10.0, 0)
v = Quaternion.from_euler(0, 0, radians(-90)).rotate(v)
self.check_vector_close(v, 10, 0, 0)
v = Quaternion.from_euler(0, radians(90), 0).rotate(v)
self.check_vector_close(v, 10, 0, 0)
v = Quaternion.from_euler(radians(90), 0, 0).rotate(v)
self.check_vector_close(v, 0, 0, -10)
v = Quaternion.from_euler(0, 0, radians(90)).rotate(v)
self.check_vector_close(v, 0, 0, -10)
v = Quaternion.from_euler(0, radians(90), 0).rotate(v)
self.check_vector_close(v, 0, 10, 0)
v = Quaternion.from_euler(radians(90), 0, 0).rotate(v)
self.check_vector_close(v, 0, 10, 0)
v = Quaternion.from_euler(0, 0, radians(90)).rotate(v)
self.check_vector_close(v, 10, 0, 0)
def check_euler(self, euler, roll_rad, pitch_rad, yaw_rad):
self.assertAlmostEqual(degrees(euler.yaw),
degrees(yaw_rad))
self.assertAlmostEqual(degrees(euler.pitch),
degrees(pitch_rad))
self.assertAlmostEqual(degrees(euler.roll),
degrees(roll_rad))
def test_euler_and_back(self):
tests = [ (45, 0, 0),
(0, 45, 0),
(0, 0, 45),
(0, 90, 0),
(0, 90, 20),
(0, -90, 0),
(0, -90, -10),
(0, -90, 30),
(10, 20, 30),
(-30, 10, 20), ]
for test in tests:
try:
self.check_euler(
Quaternion.from_euler(radians(test[0]),
radians(test[1]),
radians(test[2])).euler(),
radians(test[0]),
radians(test[1]),
radians(test[2]))
except:
print >> sys.stderr, 'in test:', test
raise
def test_multiplication(self):
x90 = Quaternion.from_euler(0, radians(90), 0)
xn90 = Quaternion.from_euler(0, -radians(90), 0)
y90 = Quaternion.from_euler(radians(90), 0, 0)
result = xn90 * y90 * x90
vector = array([0, 1, 0])
vector = result.rotate(vector)
self.check_vector_close(vector, 1, 0, 0)
initial = Quaternion.from_euler(0, 0, radians(45))
initial = Quaternion.from_euler(0, 0, radians(45)) * initial
self.check_euler(initial.euler(), 0, 0, radians(90))
initial = Quaternion.from_euler(0, radians(10), 0) * initial
vector = initial.rotate(vector)
self.check_vector_close(vector, 0, -0.9848078, -0.17364818)
self.check_euler(initial.euler(), radians(10), 0, radians(90))
def test_multiplication2(self):
attitude = Quaternion.from_euler(radians(-5), 0, radians(90))
attitude = attitude * Quaternion.from_euler(0, 0, radians(90))
self.check_euler(attitude.euler(), 0, radians(5), radians(180))
def test_multiplication3(self):
attitude = Quaternion.from_euler(radians(-3), radians(3), 0)
attitude = attitude * Quaternion.integrate_rotation_rate(
0, 0, radians(-5), 1)
self.check_euler(attitude.euler(),
radians(-3.24974326),
radians(2.727438544),
radians(-4.99563857))
```
#### File: legtool/tf/test_tf.py
```python
import math
from . import tf
def check_vectors_close(v1, v2):
assert abs(v1.x - v2.x) < 0.001
assert abs(v1.y - v2.y) < 0.001
assert abs(v1.z - v2.z) < 0.001
def check_mapping(f1, f1p, f2p):
check_vectors_close(f1.map_to_parent(tf.Point3D(*f1p)), tf.Point3D(*f2p))
check_vectors_close(f1.map_from_parent(tf.Point3D(*f2p)), tf.Point3D(*f1p))
def check_frames(f1, f1p, f2, f2p):
check_vectors_close(f1.map_to_frame(f2, tf.Point3D(*f1p)), tf.Point3D(*f2p))
check_vectors_close(f2.map_from_frame(f1, tf.Point3D(*f1p)),
tf.Point3D(*f2p))
check_vectors_close(f2.map_to_frame(f1, tf.Point3D(*f2p)), tf.Point3D(*f1p))
check_vectors_close(f1.map_from_frame(f2, tf.Point3D(*f2p)),
tf.Point3D(*f1p))
def test_simple_transforms():
frame = tf.Frame(tf.Point3D(10., 0., 0.),
tf.Quaternion.from_euler(0, 0, 0))
check_mapping(frame, (0., 0., 0.), (10., 0., 0.))
check_mapping(frame, (1., 0., 0.), (11., 0., 0.))
check_mapping(frame, (1., 2., 0.), (11., 2., 0.))
check_mapping(frame, (1., 2., 3.), (11., 2., 3.))
frame = tf.Frame(tf.Point3D(0., 10., 0.),
tf.Quaternion.from_euler(0., 0., math.radians(90.)))
check_mapping(frame, (0., 0., 0.), (0., 10., 0.))
check_mapping(frame, (1., 0., 0.), (0., 9., 0.))
check_mapping(frame, (-1., 0., -2.), (0., 11., -2.))
frame = tf.Frame(tf.Point3D(0., 0., 3.),
tf.Quaternion.from_euler(math.radians(-90.), 0., 0.))
check_mapping(frame, (0., 0., 0.), (0., 0., 3.))
check_mapping(frame, (0., 0., 1.), (-1., 0., 3.))
def test_frame_chains():
root = tf.Frame(tf.Point3D(0., 0., 0.), tf.Quaternion())
child1 = tf.Frame(tf.Point3D(10., 2., 0.), tf.Quaternion(), root)
child2 = tf.Frame(tf.Point3D(-3., -5., 0.), tf.Quaternion(), root)
check_mapping(child1, (0., 0., 0.), (10., 2., 0.))
check_frames(child1, (0., 0., 0.), root, (10., 2., 0.))
check_frames(child2, (0., 0., 0.), root, (-3., -5., 0.))
check_frames(child1, (0., 0., 0.), child2, (13., 7., 0.))
subchild1 = tf.Frame(tf.Point3D(1., 2., 0.), tf.Quaternion(), child1)
check_mapping(subchild1, (0., 0., 0.), (1., 2., 0.))
check_frames(subchild1, (0., 0., 0.), child1, (1., 2., 0.))
check_frames(subchild1, (0., 0., 0.), root, (11., 4., 0.))
check_frames(subchild1, (0., 0., 0.), child2, (14., 9., 0.))
# TODO jpieper: Finally, test a frame that is unlinked, and thus
# will be referenced above the current "root" node.
``` |
{
"source": "jpieper-tri/bazel",
"score": 2
} |
#### File: build_defs/pkg/make_rpm.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import fileinput
import os
import re
import shutil
import subprocess
import sys
from tempfile import mkdtemp
# pylint: disable=g-direct-third-party-import
from third_party.py import gflags
gflags.DEFINE_string('name', '', 'The name of the software being packaged.')
gflags.DEFINE_string('version', '',
'The version of the software being packaged.')
gflags.DEFINE_string('release', '',
'The release of the software being packaged.')
gflags.DEFINE_string('arch', '',
'The CPU architecture of the software being packaged.')
gflags.DEFINE_string('spec_file', '',
'The file containing the RPM specification.')
gflags.DEFINE_string('out_file', '',
'The destination to save the resulting RPM file to.')
# Setup to safely create a temporary directory and clean it up when done.
@contextlib.contextmanager
def Cd(newdir, cleanup=lambda: True):
"""Change the current working directory.
This will run the provided cleanup function when the context exits and the
previous working directory is restored.
Args:
newdir: The directory to change to. This must already exist.
cleanup: An optional cleanup function to be executed when the context exits.
Yields:
Nothing.
"""
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
cleanup()
@contextlib.contextmanager
def Tempdir():
"""Create a new temporary directory and change to it.
The temporary directory will be removed when the context exits.
Yields:
The full path of the temporary directory.
"""
dirpath = mkdtemp()
def Cleanup():
shutil.rmtree(dirpath)
with Cd(dirpath, Cleanup):
yield dirpath
def GetFlagValue(flagvalue, strip=True):
if flagvalue:
if flagvalue[0] == '@':
with open(flagvalue[1:], 'r') as f:
flagvalue = f.read()
if strip:
return flagvalue.strip()
return flagvalue
WROTE_FILE_RE = re.compile(r'Wrote: (?P<rpm_path>.+)', re.MULTILINE)
def FindOutputFile(log):
"""Find the written file from the log information."""
m = WROTE_FILE_RE.search(log)
if m:
return m.group('rpm_path')
return None
def CopyAndRewrite(input_file, output_file, replacements=None):
"""Copies the given file and optionally rewrites with replacements.
Args:
input_file: The file to copy.
output_file: The file to write to.
replacements: A dictionary of replacements.
Keys are prefixes scan for, values are the replacements to write after
the prefix.
"""
with open(output_file, 'w') as output:
for line in fileinput.input(input_file):
if replacements:
for prefix, text in replacements.items():
if line.startswith(prefix):
line = prefix + ' ' + text + '\n'
break
output.write(line)
def Which(program):
def IsExe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ['PATH'].split(os.pathsep):
filename = os.path.join(path, program)
if IsExe(filename):
return filename
return None
class NoRpmbuildFound(Exception):
pass
def FindRpmbuild():
path = Which('rpmbuild')
if path:
return path
else:
raise NoRpmbuildFound()
class RpmBuilder(object):
"""A helper class to manage building the RPM file."""
SOURCE_DIR = 'SOURCES'
BUILD_DIR = 'BUILD'
TEMP_DIR = 'TMP'
DIRS = [SOURCE_DIR, BUILD_DIR, TEMP_DIR]
def __init__(self, name, version, release, arch):
self.name = name
self.version = GetFlagValue(version)
self.release = GetFlagValue(release)
self.arch = arch
self.files = []
self.rpmbuild_path = FindRpmbuild()
self.rpm_path = None
def AddFiles(self, files):
"""Add a set of files to the current RPM."""
self.files += files
def SetupWorkdir(self, spec_file, original_dir):
"""Create the needed structure in the workdir."""
# Create directory structure.
for name in RpmBuilder.DIRS:
if not os.path.exists(name):
os.makedirs(name, 0o777)
# Copy the files.
for f in self.files:
dst_dir = os.path.join(RpmBuilder.BUILD_DIR, os.path.dirname(f))
if not os.path.exists(dst_dir):
os.makedirs(dst_dir, 0o777)
shutil.copy(os.path.join(original_dir, f), dst_dir)
# Copy the spec file, updating with the correct version.
spec_origin = os.path.join(original_dir, spec_file)
self.spec_file = os.path.basename(spec_file)
replacements = {}
if self.version:
replacements['Version:'] = self.version
if self.release:
replacements['Release:'] = self.release
CopyAndRewrite(spec_origin, self.spec_file, replacements)
def CallRpmBuild(self, dirname):
"""Call rpmbuild with the correct arguments."""
args = [
self.rpmbuild_path,
'--define',
'_topdir %s' % dirname,
'--define',
'_tmppath %s/TMP' % dirname,
'--bb',
self.spec_file,
]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = p.communicate()[0]
if p.returncode == 0:
# Find the created file.
self.rpm_path = FindOutputFile(output)
if p.returncode != 0 or not self.rpm_path:
print('Error calling rpmbuild:')
print(output)
# Return the status.
return p.returncode
def SaveResult(self, out_file):
"""Save the result RPM out of the temporary working directory."""
if self.rpm_path:
shutil.copy(self.rpm_path, out_file)
print('Saved RPM file to %s' % out_file)
else:
print('No RPM file created.')
def Build(self, spec_file, out_file):
"""Build the RPM described by the spec_file."""
print('Building RPM for %s at %s' % (self.name, out_file))
original_dir = os.getcwd()
spec_file = os.path.join(original_dir, spec_file)
out_file = os.path.join(original_dir, out_file)
with Tempdir() as dirname:
self.SetupWorkdir(spec_file, original_dir)
status = self.CallRpmBuild(dirname)
self.SaveResult(out_file)
return status
def main(argv=()):
try:
builder = RpmBuilder(FLAGS.name, FLAGS.version, FLAGS.release, FLAGS.arch)
builder.AddFiles(argv[1:])
return builder.Build(FLAGS.spec_file, FLAGS.out_file)
except NoRpmbuildFound:
print('ERROR: rpmbuild is required but is not present in PATH')
return 1
if __name__ == '__main__':
FLAGS = gflags.FLAGS
main(FLAGS(sys.argv))
``` |
{
"source": "jpierel14/roman-sn-redshifts",
"score": 3
} |
#### File: roman-sn-redshifts/romanz/romanz.py
```python
import numpy as np
from astropy import units as u
from astropy import table
from scipy import interpolate as scinterp
class SNANAHostLib():
"""Class for parsing a SNANA HOSTLIB file.
The file may contain a weight map, and must contain host galaxy data,
following the standard SNANA HOSTLIB format.
"""
def __init__(self, filename):
"""Read in a SNANA HOSTLIB file"""
# find the 'VARNAMES' line, use it to define the start of the hostlib
# section (as opposed to the wgtmap section)
nwgtmapstart = -1
ngaldataheader = -1
ngaldatastart = -1
iline = 0
with open(filename, 'r') as read_obj:
for line in read_obj:
if len(line.strip().lstrip('#'))==0:
continue
if line.strip().startswith('NVAR_WGTMAP:'):
wgtmaphdrline = line.split()
varnames_wgtmap = wgtmaphdrline[3:]
if line.strip().startswith('WGT:') and nwgtmapstart<0:
nwgtmapstart = iline
if line.strip().startswith('GAL:') and ngaldatastart<0:
ngaldatastart = iline
if line.strip().startswith('VARNAMES:'):
ngaldataheader = iline
iline += 1
if ngaldataheader < 0:
raise RuntimeError(r"{filename} is not an SNANA HOSTLIB file")
if nwgtmapstart >= 0:
self.wgtmaptable = table.Table.read(
filename, format='ascii.basic',
names=['label']+varnames_wgtmap+['wgt','snmagshift'],
data_start=nwgtmapstart-1,
data_end=ngaldataheader-2,
comment='#'
)
else:
self.wgtmaptable = None
galdatatable = table.Table.read(filename, format='ascii.basic',
header_start=ngaldataheader-1,
data_start=ngaldatastart-1, comment='#'
)
galdatatable.remove_columns(['VARNAMES:'])
self.galdatatable = galdatatable
return
class CatalogBasedRedshiftSim():
"""Class for projecting redshift completeness from an input
galaxy catalog.
"""
def __init__(self):
self.postsurvey = False
self.galaxies = None
def read_galaxy_catalog(self, filename):
"""Read in a catalog of galaxy properties
Parameters
----------
filename : str
full path to the file containing galaxy properties (e.g. Mass, SFR,
magnitudes, etc.). May be a SNANA HOSTLIB file, or any formtat that
can be auto-parsed by astropy.table.Table.read()
"""
# TODO: check if it is a hostlib without try/except
try :
self.galaxies = table.Table.read(filename)
except:
try:
hostlib = SNANAHostLib(filename)
self.galaxies = hostlib.galdatatable
except:
raise RuntimeError(
f"Can't read in {filename}. "
"It may not be a valid hostlib or astropy-readable table.")
return
def assign_snhost_prob(self, snr_model='AH18S',
logmasscolname='logmass',
logsfrcolname='logsfr',
verbose=True):
"""Add a column to the 'galaxies' catalog that gives the relative
probability for each galaxy hosting a SN in any given observer-frame
year. This is computed based on the predicted SN rate (number of SN
explosions per observer-frame year) of each galaxy, adopting the
specified SN rate model.
Parameters
----------
snr_model : str
'A+B' : SNR = A*M + B*SFR (Scannapieco & Bildsten 2005)
'AH18S' : the smooth logarithmic sSFR model (Andersen & Hjorth 2018)
'AH18PW' : the piecewise sSFR model (Andersen & Hjorth 2018)
logmasscolname : str
name of column in the galaxies Table containing the log10(Mass)
logsfrcolname : str
name of column in the galaxies Table containing the
log10(StarFormationRate)
verbose : bool
Set to True to print messages.
"""
if self.galaxies is None:
print("No 'galaxies' catalog loaded. Use 'read_galaxy_catalog()'")
if snr_model.lower()=='a+b':
# Note: adopting the A and B values from Andersen & Hjorth 2018
# but dividing by 1e-4 (so the SNR below actually counts the number
# of SN explodiing per 10000 yrs)
A = 4.66 * 1e-10
B = 4.88
snr = A * 10 ** self.galaxies[logmasscolname] + B * 10 ** self.galaxies[logsfrcolname]
# divide by the total snr to get relative probabilities
snr /= np.nanmax(snr)
snrcolname = 'snr_A+B'
snrcol = table.Column(data=snr, name='snr_A+B')
elif snr_model.lower() == 'ah18s':
logssfr = self.galaxies[logsfrcolname] - self.galaxies[logmasscolname]
ssnr = ssnr_ah18_smooth(logssfr)
snr = ssnr * 10 ** self.galaxies[logmasscolname]
snr /= np.nanmax(snr)
snrcolname = 'snr_AH18_smooth'
snrcol = table.Column(data=snr, name=snrcolname)
elif snr_model.lower() == 'ah18pw':
logssfr = self.galaxies[logsfrcolname] - self.galaxies[logmasscolname]
ssnr = ssnr_ah18_piecewise(logssfr)
snr = ssnr * 10 ** self.galaxies[logmasscolname]
snr /= np.nanmax(snr)
snrcolname = 'snr_AH18_piecewise'
else:
raise RuntimeError(r"{snr_model} is not a know SN rate model.")
snrcol = table.Column(data=snr, name=snrcolname)
if snrcolname in self.galaxies.colnames:
self.galaxies[snrcolname] = snr
else:
self.galaxies.add_column(snrcol)
if verbose:
print(f"Added/updated relative SN rate column using {snr_model} model")
return
def pick_host_galaxies(self, nsn, snrcolname='snr_AH18_piecewise',
replace=False, verbose=True):
"""Do a random draw to assign 'nsn' supernovae to galaxies in the
galaxies catalog, based on the (pre-defined) relative SN rates.
TODO: (Alternatively, read in a SNANA output file (.dump file maybe?)
that has already run a survey simulation and picked host galaxies.)
Parameters
----------
replace
nsn : int
number of SN to assign to host galaxies
snrcolname : str
name of the column in the galaxies catalog that gives the relative
SN rate (or 'weight') for each galaxy. This may be created by the
assign_snhost_prob() method.
replace : bool
Whether to sample with replacement. If True, a galaxy may host
more than one SN. If False, then assign no more than one SN to
each galaxy (requires nsn<len(galaxies))
"""
if ~replace and nsn > len(self.galaxies):
raise RuntimeError(
r'Picking hosts without replacement, but Nsn > len(galaxies)')
# Pick SN host galaxies
galindices = np.arange(len(self.galaxies))
psnhost = self.galaxies[snrcolname]/np.sum(self.galaxies[snrcolname])
snindices = np.random.choice(
galindices, nsn, replace=replace, p=psnhost)
# Add a boolean 'host' column to the galaxies catalog
ishost = np.zeros(len(self.galaxies), dtype=bool)
ishost[snindices] = True
hostcol = table.Column(name='host', data=ishost)
if 'host' in self.galaxies.colnames:
self.galaxies['host'] = hostcol
else:
self.galaxies.add_column(hostcol, index=1)
# TODO: Alternate approach: read in a SNANA output file (.dump file
# maybe?) that has already run a survey simulation and picked hosts.
if verbose:
print(f"Assigned {nsn} SNe to hosts using {snrcolname} probabilities.")
return
def apply_specz_completeness_map(self, filename,
defining_columns_galcat,
defining_columns_speczmap,
efficiency_columns_speczmap,
fill_value = np.nan
):
"""Read in a 'map' for spectroscopic redshift completeness, which
maps from one or more galaxy properties (mag, SFR, z...) onto a
probability of getting a spec-z.
Preferred format of the input file is a .ecsv file, but anything
that astropy.table can read is OK in principle.
Then apply the specz completeness map to the catalog
of host galaxy properties (already read in) to define exactly which
of the galaxies gets a redshift.
If the method 'pick_host_galaxies' has already been run
(so the flag postsurvey == True), then only galaxies defined as SN
hosts are assigned a redshift.
Parameters
----------
filename : str
path to astropy-readable file
defining_columns_galcat : listlike
list of strings specifying the column names in the galaxy catalog
(self.galaxies) for parameters that are used to define the specz
efficiency (e.g. if this is a SFR-based specz map then this may
be ['logSFR'])
defining_columns_speczmap : listlike, same length as above
list of strings specifying the corresponding column names in the
specz map file (given by 'filename'). Must be the same length as
defining_columns_galcat, giving corresponding column names in the
same order.
efficiency_columns_speczmap : listlike, same length as above
list of column names giving the specz
efficiency (or completeness fraction) for each row in the specz
map file.
"""
if (len(defining_columns_galcat)!=len(defining_columns_speczmap) or
len(defining_columns_galcat)!=len(efficiency_columns_speczmap)):
raise RuntimeError(
'You must specify the same number of columns from the '
'galaxy catalog and the specz efficiency catalog.')
# TODO : make a masked array to remove NaNs ? ?
speczmap = table.Table.read(filename)
# TODO : build a separate interpolating function for each of
# the possible input parameters ?
interpolatordict = {}
for i in range(len(defining_columns_galcat)):
colname_galcat = defining_columns_galcat[i]
xobs = self.galaxies[colname_galcat]
colname_param = defining_columns_speczmap[i]
x = speczmap[colname_param]
colname_efficiency = efficiency_columns_speczmap[i]
y = speczmap[colname_efficiency]
interpolator = scinterp.interp1d(
x, y, bounds_error=False, fill_value=fill_value)
interpolatordict[colname_galcat] = interpolator
return(interpolatordict)
def make_photoz_accuracy_map(self):
"""For every galaxy in the catalog of galaxy properties, apply a
photo-z function that defines the 'measured' photo-z value and
uncertainty (photoz pdf). Includes catastrophic outliers.
"""
pass
def report_redshift_completeness(self):
"""Produce a report of the overall redshift completeness, accuracy
and precision, based on multiple spectroscopic 'filters' and the
random assignment of photo-z values.
"""
pass
def ssnr_ah18_smooth(logssfr):
""" Returns the Type Ia specific SN rate per Tyr
(number of SN Ia exploding per 10^12 yr per solar mass)
for a galaxy, using the model of Andersen & Hjorth 2018, which is based
on the specific star formation rate, given as log10(SSFR).
"""
a = (1.5)*1e-13 # (1.12)*1e-13
b = 0.5 # 0.73
k = 0.4 # 0.49
ssfr0 = 1.7e-10# 1.665e-10
# logssfr0 = -9.778585762157661 # log10(ssfr0)
ssfr = np.power(10.,logssfr)
ssnr = (a + (a/k) * np.log10(ssfr/ssfr0 + b)) * 1e12
#ssnr = np.max(ssnr, 0.7)
return(ssnr)
def ssnr_ah18_piecewise(logssfr):
""" Returns the Type Ia specific SN rate per Tyr
(number of SN Ia exploding per 10^12 yr per solar mass)
for a galaxy, using the piecwise linear model
of <NAME> 2018, which is based
on the specific star formation rate, given as log10(SSFR).
"""
# Note that the alpha scaling parameter
# has been multiplied by 1e12 to get units of Tyr-1
alpha = (1.12)* 1e5
beta = 0.586
ssfr2 = 1.01e-11
ssfr1 = 1.04e-9
S1 = np.power(ssfr1, beta)
S2 = np.power(ssfr2, beta)
if not np.iterable(logssfr):
logssfr = np.array([logssfr])
ssfr = np.power(10.,logssfr)
ilow = np.where(ssfr<=ssfr2)[0]
imid = np.where((ssfr>ssfr2) & (ssfr<ssfr1))[0]
ihi = np.where(ssfr>=ssfr1)[0]
ssnrmid = alpha * np.power(ssfr[imid], beta)
ssnr = alpha * np.where(ssfr<=ssfr2, S2,
np.where(ssfr>=ssfr1, S1,
np.power(ssfr, beta)))
if len(ssnr)==1:
ssnr = ssnr[0]
return(ssnr)
```
#### File: roman-sn-redshifts/romanz/specplot.py
```python
import os
import numpy as np
from matplotlib import pyplot as plt
from astropy.table import Table, Column
# from astropy.io import fits, ascii
#from astropy.coordinates import SkyCoord
#from astropy import units as u
from astropy.cosmology import FlatLambdaCDM
import sncosmo
#from scipy.interpolate import interp1d
#from scipy.integrate import trapz
# TODO: Adjust the `datadir` variable to use a local path from the install dir
datadir = os.path.abspath("./data")
#Data_overview_table_name = os.path.join(datadir, "gal_lib_short1.dat") #name of table which contains galaxy ID information and location for DEIMOS and vUDS spectra
hostlib_filename = os.path.join(datadir,'cosmos_example_hostlib.txt') #name of SNANA HOSTLIB file (includes observed and rest-frame-synthetic photometry)
eazy_templates_filename = os.path.join(datadir,"Akari_Hosts_subset_SNR_v7.HOSTLIB")
vUDS_spec_location = os.path.join(datadir,"vUDS_spec/")
DEIMOS_spec_location = os.path.join(datadir,'deimos_spec/')
sim_spec_location = os.path.join(datadir,'sim_spec/')
HST_table_name = os.path.join(datadir,'cosmos_match.dat') #name of table with HST ID information to locate and match spec files
HST_cosmos_folder_location = os.path.join(datadir,'COSMOS_3DHST_SPECTRA/') #location of all cosmos tile folders (i.e. directory which contains cosmos-02, cosmos-03, etc.)
medsmooth = lambda f,N : np.array(
[np.median( f[max(0,i-N):min(len(f),max(0,i-N)+2*N)])
for i in range(len(f))])
flcdm = FlatLambdaCDM(H0=73, Om0=0.27)
def load_eazypy_templates(eazytemplatefilename,
format='ascii.commented_header',
verbose=True,
**kwargs):
"""Read in the galaxy SED templates (basis functions for the
eazypy SED fitting / simulation) and store as the 'eazytemplatedata'
property.
We read in an astropy Table object with N rows and M+1 columns, where
N is the number of wavelength steps and M is the
number of templates (we expect 13).
The first column is the wavelength array, common to all templates.
We translate the Nx(M+1) Table data into a np structured array,
then reshape as a (M+1)xN numpy ndarray, with the first row giving
the wavelength array and each subsequent row giving a single
template flux array.
See the function simulate_eazy_sed_from_coeffs() to construct
a simulated galaxy SED with a linear combination from this matrix.
"""
eazytemplates = Table.read(eazytemplatefilename,
format=format, **kwargs)
tempdata = eazytemplates.as_array()
eazytemplatedata = tempdata.view(np.float64).reshape(
tempdata.shape + (-1,)).T
if verbose:
print("Loaded Eazypy template SEDs from {0}".format(
eazytemplatefilename))
return eazytemplatedata
def scale_to_match_imag(wave, flam, imag, medsmooth_window=20):
"""KLUDGE!! Using sncosmo to make this galaxy SED into a Source so
we can integrate into mags using the sncosmo bandmag, and rescale
to match a pre-defined mag
wave: wavelength in angstroms
flam: flambda in erg/s/cm2/A
imag: sdss i band magnitude to scale to
"""
# check that we cover the i band
if wave[0]>6600:
wave = np.append([6580], wave)
flam = np.append([1e-20], flam)
if wave[-1]<8380:
wave = np.append(wave, [8400])
flam = np.append(flam, [1e-20])
if medsmooth_window>1:
# If a smoothing window size is given, use only the smoothed flux
flam = medsmooth(flam, medsmooth_window)
# Make a dummy sncosmo Source and scale it to the given sdss i band mag
phase = np.array([-1, 0, 1, 2]) # need at least 4 phase positions for a source
flux = np.array([flam, flam, flam, flam])
galsource = sncosmo.TimeSeriesSource(phase, wave, flux)
galsource.set_peakmag(imag, 'sdssi', 'ab')
fout = galsource.flux(0,wave)
return(wave, fout)
def simulate_eazy_sed_from_coeffs(eazycoeffs, eazytemplatedata, z,
returnfluxunit='flambda', returnwaveunit='A',
limitwaverange=True, savetofile='', **outfile_kwargs):
"""
Generate a simulated SED from a given set of input eazy-py coefficients
and eazypy templates.
NB: Requires the eazy-py package to apply the IGM absorption!
(https://github.com/gbrammer/eazy-py)
Optional Args:
returnfluxunit: ['AB', 'flambda', 'fnu'] TODO: add Jy
'AB'= return log(flux) as monochromatic AB magnitudes
'AB25' = return AB mags, rescaled to a zeropoint of 25: m=-2.5*log10(fnu)+25
'flambda' = return flux density in erg/s/cm2/A
'fnu' = return flux density in erg/s/cm2/Hz
returnwaveunit: ['A' or 'nm'] limitwaverange: limit the output
wavelengths to the range covered by PFS savetofile: filename for saving
the output spectrum as a two-column ascii data file (suitable for use
with the SubaruPFS ETC from <NAME>.
Returns
-------
wave : observed-frame wavelength, Angstroms or nm
flux : flux density of best-fit template, erg/s/cm2/A or AB mag
"""
# the input data units are Angstroms for wavelength
# and cgs for flux (flambda): erg s-1 cm-2 Ang-1
wave_em = eazytemplatedata[0] # rest-frame (emitted) wavelength
wave_obs = wave_em * (1 + z) # observer-frame wavelength
obsfluxmatrix = eazytemplatedata[1:]
flam = np.dot(eazycoeffs, obsfluxmatrix) # flux in erg/s/cm2/A
if limitwaverange:
# to simplify things, we only work with data over the Subaru PFS
# + WFIRST prism wavelength range, from 200 to 2500 nm
# (2000 to 25000 Angstroms)
iuvoir = np.where((wave_obs>2000) & (wave_obs<25000))[0]
wave_obs = wave_obs[iuvoir]
wave_em = wave_em[iuvoir]
flam = flam[iuvoir]
# convert flux units to fnu using : fnu=(lam^2/c)*flam ; c = 3.e18 A/s
fnu = (wave_em * wave_em / 3.e18) * flam # flux in erg/s/cm2/Hz
# Confusing previous setup from GB, used to convert to AB mags w/ zpt=25
#fnu_factor = 10 ** (-0.4 * (25 + 48.6))
# flam_spec = 1. / (1 + z) ** 2
# obsflux = sedsimflux * fnu_factor * flam_spec
try:
import eazy.igm
igmz = eazy.igm.Inoue14().full_IGM(z, wave_obs)
fnu *= igmz
except:
pass
if returnfluxunit=='AB':
# convert from flux density fnu into monochromatic AB mag:
returnflux = -2.5 * np.log10(fnu) - 48.6
elif returnfluxunit=='AB25':
# convert from flux density fnu into AB mags for zpt=25:
returnflux = -2.5 * np.log10(fnu) + 25
elif returnfluxunit=='fnu':
returnflux = fnu
elif returnfluxunit.startswith('flam'):
returnflux = flam
else:
print("I don't recognize flux unit {}".format(returnfluxunit))
return None,None
if returnwaveunit=='nm':
returnwave = wave_obs / 10.
elif returnwaveunit.startswith('A'):
returnwave = wave_obs
else:
print("I don't recognize wave unit {}".format(returnwaveunit))
return None,None
if savetofile:
out_table = Table()
outcol1 = Column(data=wave_obs, name='Angstroms')
outcol2 = Column(data=flam, name='flambda')
out_table.add_columns([outcol1, outcol2])
out_table.write(savetofile, **outfile_kwargs)
return returnwave, returnflux
def mAB_from_flambda(flambda, wave):
""" Convert from flux density f_lambda in erg/s/cm2/A
into AB mag
flambda: flux density f_lambda (erg/s/cm2/A)
wave : wavelength in angstroms
(see https://en.wikipedia.org/wiki/AB_magnitude)
"""
return(-2.5 * np.log10(3.34e4 * wave * wave * (flambda / 3631)))
def plot_spec_comparison(galid, showphot=True, showvuds=True, showdeimos=True,
showhst=True, showeazy=True,
medsmooth_deimos=20, medsmooth_vuds=20,
medsmooth_hst=20,
rescaledeimos=True, rescalevuds=False, ax=None):
"""Plot flux vs wavelength for the given galaxy ID, showing the observed
the Eazy-simulated spectrum.
TBD : also plot the simulated photometry from the Akari catalog.
"""
if ax is None:
fig = plt.figure(figsize=[12,4])
ax = fig.add_subplot(1,1,1)
# read in the eazy spectral templates data
# NOTE: could do this without loading the whole hostlib as a SnanaSimData object, would just need to grab
# the code from snhostspec
#sim1 = snhostspec.SnanaSimData()
#sim1.load_hostlib_catalog("DATA/cosmos_example_hostlib.txt")
#sim1.
eazytemplatedata = load_eazypy_templates(eazy_templates_filename)
# ---------------------------------
# Simulated and Observed photometry :
# --------------------------------
# plot the EAZY simulated spectrum
eazycoeffs = np.array([mastercat[col][ithisgal_mastercat]
for col in mastercat.colnames
if col.startswith('coeff_specbasis')])
outfilename = "DATA/cosmos_example_spectra/cosmos_example_host_simspec_" +\
"{:6d}.fits".format(galid)
wobs, mobs = simulate_eazy_sed_from_coeffs(
eazycoeffs, eazytemplatedata, z,
returnwaveunit='A', returnfluxunit='AB25',
savetofile=outfilename, overwrite=True)
if showeazy:
ax.plot(wobs, mobs, label='EAZY SED fit', color='0.5', zorder=10)
ax.set_xlim(3000,19000)
#ax.set_ylim(-0.25*1e-16,0.3*1e-16)
#ax.set_ylim(27, 20)
ax.text(0.95,0.95, galid, ha='right', va='top', transform=ax.transAxes)
ax.text(0.95,0.88, "z={0}".format(z), ha='right', va='top', transform=ax.transAxes)
ax = plt.gca()
ax.set_xlim(3000, 19000)
ax.set_ylim(magmin-2,magmax+1)
ax.legend(loc='upper left')
ax.invert_yaxis()
ax.grid()
ax.set_xlabel('Observed Wavelength (Angstroms)')
ax.set_ylabel("AB mag")
plt.tight_layout()
#plt.savefig("cosmos_example_spec_eazysims.pdf")
return
```
#### File: roman-sn-redshifts/tests/test_romanz.py
```python
import unittest
from romanz import romanz
_TEST_HOSTLIB_ = "romanz/data/Akari_Hosts_subset_SNR_v7.HOSTLIB"
_TEST_GALCAT_ = "romanz/data/Akari_input_catalog_20210415.fits"
_TEST_SPECZCOMPLETENESSMAP_ = "romanz/data/specz_completeness_map_preromandata_cosmos.ecsv"
class TestCatalogBasedRedshiftSim(unittest.TestCase):
"""Test class for projecting redshift completeness from an input
galaxy catalog.
"""
#def setUpClass(cls) -> None:
# """ Called once, before all tests"""
def setUp(self) -> None:
"""Called before each and every test"""
self.romanz_sim = romanz.CatalogBasedRedshiftSim()
self.romanz_sim.read_galaxy_catalog(_TEST_GALCAT_)
return
def test_read_catalog(self):
"""Read in a catalog of galaxy properties"""
# TODO : convert to a unittest setup step?
self.assertTrue(self.romanz_sim.galaxies is not None)
self.assertTrue(len(self.romanz_sim.galaxies) > 0)
def test_pick_host_galaxies(self):
"""Use a SN rate function to define the relative probability of each
galaxy hosting a SN in any given year.
"""
# Assign probabilities based on spec. SFR
self.romanz_sim.assign_snhost_prob(
snr_model='AH18PW', logmasscolname='logmass',
logsfrcolname='logsfr')
# Random sampling to pick host galaxies
self.romanz_sim.pick_host_galaxies(
100, snrcolname='snr_AH18_piecewise', replace=False)
def test_apply_specz_completeness_map(self):
""" Read in a 'map' for spectroscopic redshift completeness, which
maps from one or more galaxy properties (mag, SFR, z...) onto a
probability of getting a spec-z.
Then apply this specz completeness map to a catalog
of SN host galaxy properties (already read in) to define exactly which
of those host galaxies gets a redshift.
"""
interpolatordict = self.romanz_sim.apply_specz_completeness_map(
_TEST_SPECZCOMPLETENESSMAP_,
defining_columns_galcat=['g_LSST', 'r_LSST', 'i_LSST', 'z_LSST'],
defining_columns_speczmap=['mag', 'mag', 'mag', 'mag'],
efficiency_columns_speczmap=['efficiency_g', 'efficiency_r',
'efficiency_i', 'efficiency_z'],
)
def test_make_photoz_accuracy_map(self):
"""For every galaxy in the catalog of galaxy properties, apply a
photo-z function that defines the 'measured' photo-z value and
uncertainty (photoz pdf). Includes catastrophic outliers.
"""
self.assertEqual(True, False)
def test_report_redshift_completeness(self):
"""Produce a report of the overall redshift completeness, accuracy
and precision, based on multiple spectroscopic 'filters' and the
random assignment of photo-z values.
"""
self.assertEqual(True, False)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "Jpifer13/PythonReusableClasses",
"score": 3
} |
#### File: PythonReusableClasses/src/pandas_dataset_class.py
```python
import os
import sys
import logging
import pandas
from pandas import DataFrame as df
from pandas import read_csv
__version__ = "Version 1.0"
__application__ = "IrisDatasetClass"
class PandasDatasetClass:
"""
This is a dataset class object.
It is structured to be a general dataset class
"""
def __init__(self, column_names, data_url=None, data_path=None, logger=None):
"""
__init__ Constructs an instance of this class.
Arguments:
column_names {[List]} -- [List of the column names you want to use]
Keyword Arguments:
data_url {[string]} -- [Url to the data] (default: {None})
data_path {[string]} -- [Windows path to data] (default: {None})
"""
# Initiate dataset
self._dataset = None
self._data_url = None
self._data_path = None
# Initialize logger
self._logger = logger
if self._logger:
self._logger.info(f"PandasDatasetClass accessed.")
self._logger.info("Loading dataset...")
self.load_dataset(column_names, data_url=data_url, data_path=data_path)
@property
def dataset(self):
"""
Get this database instance
Returns:
[DataFrame] -- [Returns instance of the dataset]
"""
return self._dataset
@property
def data_url(self):
"""
Get the url used to create this dataset
Returns:
[string] -- [The url where this dataset was retrieved]
"""
return self._data_url
@data_url.setter
def set_data_url(self, new_url, update_current_dataset=None, column_names=None):
"""
This method updates the data_url property and can update the entire dataset as well with the new url
Arguments:
new_url {[string]} -- [New url for this dataset]
Keyword Arguments:
update_current_dataset {[bool]} -- [If true updates the dataset with new url data] (default: {None})
column_names {[list]} -- [list of new column names for this dataset] (default: {None})
"""
self._data_url = new_url
# Update this dataset with new data if told to
if update_current_dataset and column_names:
self.load_dataset(column_names, data_url=self._data_url)
self._logger.info(f'Successfully updated dataset with data from: {self._data_url}')
elif update_current_dataset and not column_names:
self._logger.exception('Can not update! No column names were given for new dataset!')
else:
self._logger.info(f'Successfully updated dataset url: {self._data_url}')
@property
def data_path(self):
"""
Get the path used to create this dataset
Returns:
[string] -- [The path where this dataset was retrieved]
"""
return self.data_path
@data_path.setter
def set_data_path(self, new_path, update_current_dataset=None, column_names=None):
"""
This method updates the data_path property and can update the entire dataset as well with the new path
Arguments:
data_path {[string]} -- [New path for this dataset]
Keyword Arguments:
update_current_dataset {[bool]} -- [If true updates the dataset with new url data] (default: {None})
column_names {[list]} -- [list of new column names for this dataset] (default: {None})
"""
self._data_path = new_path
# Update this dataset with new data if told to
if update_current_dataset and column_names:
self.load_dataset(column_names, data_path=self.data_path)
self._logger.info(f'Successfully updated dataset with data from: {self.data_path}')
elif update_current_dataset and not column_names:
self._logger.exception('Can not update! No column names were given for new dataset!')
else:
self._logger.info(f'Successfully updated dataset path: {self.data_path}')
def load_dataset(self, column_names, data_url=None, data_path=None):
"""
This method loads the dataset
Arguments:
column_names {[List]} -- [List of the column names you want to use]
Keyword Arguments:
data_url {[string]} -- [Url to the data] (default: {None})
data_path {[string]} -- [Windows path to data] (default: {None})
"""
if data_url:
# Load dataset
self._data_url = data_url
self._column_names = column_names
self._dataset = read_csv(self._data_url, names=self._column_names)
self._logger.info(f'Dataset at location: {self._data_url} successfully loaded!')
elif data_path:
# Load dataset
self._data_path = data_path
self._column_names = column_names
self._dataset = read_csv(self._data_path, names=self._column_names)
self._logger.info(f'Dataset at location: {self._data_path} successfully loaded!')
else:
# No data url or path
self._logger.exception(f'No path or url to data has been provided:\n data_path = {data_path}\n data_url = {data_url}')
def __str__(self):
"""
Prints out the object as a string
Returns:
[string] -- [type of object, (name)]
"""
className = type(self).__name__
return "{},({})".format(className, self._name)
``` |
{
"source": "jpigree/ez-repo",
"score": 2
} |
#### File: ez-repo/ez_repo/logger.py
```python
from logging import Logger, getLogger, StreamHandler, INFO, CRITICAL, disable
from colorlog import ColoredFormatter
formatter = ColoredFormatter(
"%(log_color)s%(levelname)-8s%(reset)s %(message)s",
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
},
secondary_log_colors={},
style='%'
)
def disable_logs():
disable(CRITICAL)
def init_logger(logger):
streamH = StreamHandler()
streamH.setFormatter(formatter)
logger.addHandler(streamH)
def set_global_level(level):
LOGGER.setLevel(level)
getLogger().setLevel(level)
LOGGER = Logger("artefact", INFO)
init_logger(LOGGER)
init_logger(getLogger())
``` |
{
"source": "jpihl/pyclapper",
"score": 3
} |
#### File: jpihl/pyclapper/clapper.py
```python
import time
import array
import pyaudio
CHUNK_SIZE = 1024
MIN_VOLUME = 10000
def clapper(stopped, clap_callback):
stream = pyaudio.PyAudio().open(
format=pyaudio.paInt16,
channels=2,
rate=44100,
input=True,
frames_per_buffer=1024)
last = 0
while True:
if stopped.wait(timeout=0):
break
chunk = array.array('h', stream.read(CHUNK_SIZE))
volume = max(chunk)
if volume >= MIN_VOLUME:
now = time.time()
diff = now - last
if diff > 0.2 and diff < 1.0:
clap_callback()
last = now
``` |
{
"source": "jpike/PythonProgrammingForKids",
"score": 4
} |
#### File: BasicConcepts/Functions/CallingFunctionMultipleTimes.py
```python
def PrintGreeting():
print("Hello!")
print("Before PrintGreeting() function call (1st time).")
# Here is where we call our custom function for the first time.
PrintGreeting()
print("After PrintGreeting() function call (1st time).")
# Notice here how we can call our function as many times
# we want within our program.
PrintGreeting()
PrintGreeting()
```
#### File: BasicConcepts/Functions/FunctionWithLiteralReturnValue.py
```python
def GetNumber():
# Here is the return statement for our function.
# Notice the "return" keyword, a space, and then the value we want to return.
return 1
# Here we're calling our function. When functions return a value,
# we can store that value in a variable and use that variable later.
number = GetNumber()
print(number)
```
#### File: BasicConcepts/Functions/FunctionWithMultipleParameters.py
```python
def PrintGreeting(first_name, last_name):
print("Hello " + first_name + " " + last_name + "!")
# Since we have 2 parameters for our function,
# we must now pass two parameters to our function
# when we call it.
first_name = input("Enter first name: ")
last_name = input("Enter last name: ")
PrintGreeting(first_name, last_name)
```
#### File: BasicConcepts/Functions/FunctionWithSingleParameterFromUserInput.py
```python
def PrintGreeting(name):
print("Hello " + name + "!")
# We can get the values we pass for parameters from anywhere.
# In this case, we're getting the value for the name parameter
# from the user.
name = input("Enter a name: ")
PrintGreeting(name)
```
#### File: BasicConcepts/Functions/PrintBoxDynamicHeight.py
```python
def PrintBox(box_height_in_characters):
for row in range(box_height_in_characters):
print("xxxx")
box_height_in_characters = int(input("Enter box height: "))
PrintBox(box_height_in_characters)
```
#### File: BasicConcepts/Functions/ProgramFlowWhenCallingFunction.py
```python
def PrintGreeting():
print("Hello!")
# Here is a print statement to print out some text before we call
# our function to let us more easily see when our function executes.
print("Before PrintGreeting() function call.")
# Here is where we call our custom function for the first time.
PrintGreeting()
# Here is a print statement to print out some text after we call
# our function to let us more easily see when our function executes.
print("After PrintGreeting() function call.")
``` |
{
"source": "jpiland16/three.py-packaged",
"score": 3
} |
#### File: three/components/Sphere.py
```python
import numpy as np
from three.mathutils import Matrix
import math
from three.components import *
#Sphere component, can be used for a lot of things, including
#detecting collisions
class Sphere(Shape):
def __init__(self, radius = 1, center = (0,0,0)):
super().__init__()
self.radius = radius
self.center = np.asarray(center)
def setPosition(self, newPos):
self.center = np.asarray(newPos)
def align(self,matrix):
self.center = np.asarray(matrix.getPosition())
def intersectSphere(self,other):
distance = abs(np.linalg.norm(self.center - other.center))
addedRadius = self.radius + other.radius
return (addedRadius >= distance)
def intersectsPlane(self, plane):
return (abs(plane.distanceToPoint(self.center)) <= self.radius)
#returns the minimum translation vector needed to prevent an overlap
#will move other
#TODO: check other shapes?
def preventOverlap(self,other):
if not self.intersectSphere(other):
return None
distanceVec = other.center - self.center
distanceLen = math.sqrt(distanceVec[0]**2+distanceVec[1]**2+distanceVec[2]**2)
minTransLen = distanceLen - self.radius - other.radius
distanceNorm = (distanceVec[0]/distanceLen,distanceVec[1]/distanceLen,distanceVec[2]/distanceLen)
minimumTranslationVector = (-distanceNorm[0]*minTransLen,-distanceNorm[1]*minTransLen,-distanceNorm[2]*minTransLen)
return minimumTranslationVector
```
#### File: three/core/Mesh.py
```python
import numpy as np
from OpenGL.GL import *
from three.core import Object3D, Uniform, UniformList
class Mesh(Object3D):
def __init__(self, geometry, material):
super().__init__()
self.geometry = geometry
self.material = material
self.visible = True
self.uniformList = UniformList()
self.uniformList.addUniform( Uniform("mat4", "modelMatrix", self.transform) )
# casting shadow stored as a boolean
# because it affects if mesh is included during rendering pass where shadow map texture is generated
self.castShadow = False
# receiving shadow stored in a uniform
# because it affects appearance of this object when rendered
self.uniformList.addUniform( Uniform("bool", "receiveShadow", 0) )
def setCastShadow(self, state=True):
self.castShadow = state
def setReceiveShadow(self, state=True):
if state:
self.uniformList.setUniformValue("receiveShadow", 1)
else:
self.uniformList.setUniformValue("receiveShadow", 0)
# passing shaderProgramID as a parameter because
# usually Mesh will render with it's own Material's shader
# but when doing shadow passes, uses a different shader
def render(self, shaderProgramID=None):
if not self.visible:
return
# automatically activate vertex bindings stored in associated VAO
vao = self.geometry.getVAO(shaderProgramID)
glBindVertexArray(vao)
# update mesh uniform data here,
# otherwise this code is repeated for shadow pass and standard pass in renderer class
self.uniformList.setUniformValue( "modelMatrix", self.getWorldMatrix() )
self.uniformList.update( shaderProgramID )
# update material uniform data
# textureNumber starts at 1 because slot 0 reserved for shadow map (if any)
textureNumber = 1
for uniform in self.material.uniformList.values():
if uniform.type == "sampler2D":
# used to activate a particular texture slot
uniform.textureNumber = textureNumber
# increment textureNumber in case additional textures are in use
textureNumber += 1
uniform.update( shaderProgramID )
# update material render settings
self.material.updateRenderSettings()
# draw the mesh
glDrawArrays(self.material.drawStyle, 0, self.geometry.vertexCount)
```
#### File: three/core/Sprite.py
```python
from three.core import Mesh
from three.geometry import Geometry
class Sprite(Mesh):
def __init__(self, material):
geometry = Geometry()
# position and UV data are the same
vertexData = [[0,0], [1,0], [1,1], [0,0], [1,1], [0,1]]
geometry.setAttribute("vec2", "vertexData", vertexData)
geometry.vertexCount = 6
super().__init__(geometry, material)
```
#### File: three/core/Uniform.py
```python
from OpenGL.GL import *
# objects that have associated uniforms (that may affect rendering a scene):
# Material, Mesh, Camera, Light, Fog
class Uniform(object):
def __init__(self, type, name, value):
# type: float | vec2 | vec3 | vec4 | mat4 | bool | sampler2D
self.type = type
# name of corresponding variable in shader program
self.name = name
# value to be sent to shader.
# float/vecN/matN: numeric data
# bool: 0 for False, 1 for True
# sampler2D: buffer ID where texture was stored
self.value = value
# only used for uniform sampler2D variables;
# used to activate a particular texture slot
self.textureNumber = None
# for each shader program that uses this data,
# store the uniform variable's location (for increased performance)
self.locationTable = {}
def initializeLocation(self, shaderProgramID):
location = glGetUniformLocation(shaderProgramID, self.name)
self.locationTable[shaderProgramID] = location
# DEBUG
# print("Initializing shader program", shaderProgramID, "variable", self.name, "to location", location )
# transmit currently stored value to corresponding variable in currently active shader
def update(self, shaderProgramID):
if shaderProgramID not in self.locationTable:
self.initializeLocation(shaderProgramID)
location = self.locationTable[shaderProgramID]
# if the shader program does not reference the variable, then exit
if location == -1:
return
if self.type == "bool":
glUniform1i(location, self.value)
elif self.type == "float":
glUniform1f(location, self.value)
elif self.type == "vec2":
glUniform2f(location, self.value[0], self.value[1])
elif self.type == "vec3":
glUniform3f(location, self.value[0], self.value[1], self.value[2])
elif self.type == "vec4":
glUniform4f(location, self.value[0], self.value[1], self.value[2], self.value[3])
elif self.type == "mat4":
glUniformMatrix4fv(location, 1, GL_TRUE, self.value )
elif self.type == "sampler2D":
# the associations are:
# shader variable ID <-> texture slot <-> texture buffer ID/reference
# requires textureNumber to be set before update is called
# point uniform variable to get data from specific texture slot
glUniform1i(location, self.textureNumber)
# activate texture slot
glActiveTexture( GL_TEXTURE0 + self.textureNumber )
# associate texture buffer reference to currently active texture "slot"
glBindTexture( GL_TEXTURE_2D, self.value )
# textureNumber=0 will be reserved for shadow map
if self.textureNumber == 0:
# when rendering shadow map texture, any fragment out of bounds of the shadow camera frustum
# should fail the depth test (not be drawn in shadow), so set components to 1.0
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, [1.0, 1.0, 1.0, 1.0])
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)
else:
# textures (other than shadow map) default to repeat
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
# ---------------------------------------------------------------------
# helper class for managing collections (dictionaries) of uniforms
# used by all classes that contain data that may affect rendering:
# Material, Mesh, Camera, Light, Fog
class UniformList(object):
def __init__(self):
self.data = {}
# add uniform to collection.
# store under uniform's variable name by default,
# but can override by setting indexName (useful for Light objects)
def addUniform(self, uniform, indexName=None):
if indexName is None:
indexName = uniform.name
self.data[indexName] = uniform
def getUniformValue(self, indexName):
return self.data[indexName].value
def setUniformValue(self, indexName, value):
self.data[indexName].value = value
# call update method on all uniforms in collection
def update(self, shaderProgramID):
for uniform in self.data.values():
uniform.update( shaderProgramID )
```
#### File: three/geometry/BoxGeometry.py
```python
from three.geometry import Geometry, QuadGeometry
from three.mathutils import MatrixFactory
import numpy as np
from math import pi
class BoxGeometry(Geometry):
def __init__(self, width=2, height=2, depth=2, widthResolution=1, heightResolution=1, depthResolution=1):
super().__init__()
# create from 6 quads.
vertexPositionData = []
vertexUVData = []
vertexNormalData = []
frontQuad = QuadGeometry(width, height, widthResolution, heightResolution)
frontMatrix = MatrixFactory.makeTranslation(0,0,depth/2)
vertexPositionData += self.applyMat4ToVec3List( frontMatrix, frontQuad.attributeData["vertexPosition"]["value"] )
vertexUVData += frontQuad.attributeData["vertexUV"]["value"]
vertexNormalData += self.applyMat4ToVec3List( frontMatrix, frontQuad.attributeData["vertexNormal"]["value"] )
backQuad = QuadGeometry(width, height, widthResolution, heightResolution)
backMatrix = MatrixFactory.makeTranslation(0,0,-depth/2) @ MatrixFactory.makeRotationY(pi)
vertexPositionData += self.applyMat4ToVec3List( backMatrix, backQuad.attributeData["vertexPosition"]["value"] )
vertexUVData += backQuad.attributeData["vertexUV"]["value"]
vertexNormalData += self.applyMat4ToVec3List( backMatrix, backQuad.attributeData["vertexNormal"]["value"] )
leftQuad = QuadGeometry(depth, height, depthResolution, heightResolution)
leftMatrix = MatrixFactory.makeTranslation(-width/2,0,0) @ MatrixFactory.makeRotationY(-pi/2)
vertexPositionData += self.applyMat4ToVec3List( leftMatrix, leftQuad.attributeData["vertexPosition"]["value"] )
vertexUVData += leftQuad.attributeData["vertexUV"]["value"]
vertexNormalData += self.applyMat4ToVec3List( leftMatrix, leftQuad.attributeData["vertexNormal"]["value"] )
rightQuad = QuadGeometry(depth, height, depthResolution, heightResolution)
rightMatrix = MatrixFactory.makeTranslation(width/2,0,0) @ MatrixFactory.makeRotationY(pi/2)
vertexPositionData += self.applyMat4ToVec3List( rightMatrix, rightQuad.attributeData["vertexPosition"]["value"] )
vertexUVData += rightQuad.attributeData["vertexUV"]["value"]
vertexNormalData += self.applyMat4ToVec3List( rightMatrix, rightQuad.attributeData["vertexNormal"]["value"] )
topQuad = QuadGeometry(width, depth, widthResolution, depthResolution)
topMatrix = MatrixFactory.makeTranslation(0,height/2,0) @ MatrixFactory.makeRotationX(-pi/2)
vertexPositionData += self.applyMat4ToVec3List( topMatrix, topQuad.attributeData["vertexPosition"]["value"] )
vertexUVData += topQuad.attributeData["vertexUV"]["value"]
vertexNormalData += self.applyMat4ToVec3List( topMatrix, topQuad.attributeData["vertexNormal"]["value"] )
bottomQuad = QuadGeometry(width, depth, widthResolution, depthResolution)
bottomMatrix = MatrixFactory.makeTranslation(0,-height/2,0) @ MatrixFactory.makeRotationX(pi/2)
vertexPositionData += self.applyMat4ToVec3List( bottomMatrix, bottomQuad.attributeData["vertexPosition"]["value"] )
vertexUVData += bottomQuad.attributeData["vertexUV"]["value"]
vertexNormalData += self.applyMat4ToVec3List( bottomMatrix, bottomQuad.attributeData["vertexNormal"]["value"] )
self.setAttribute("vec3", "vertexPosition", vertexPositionData)
self.setAttribute("vec2", "vertexUV", vertexUVData)
self.setAttribute("vec3", "vertexNormal", vertexNormalData)
self.vertexCount = len(vertexPositionData)
def applyMat4ToVec3List(self, matrix, originalVectorList):
newVectorList = []
count = len(originalVectorList)
for index in range(count):
v = originalVectorList[index]
v.append(1) # convert to homogeneous coordinates
v = list(matrix @ v)
v.pop(3) # convert back to vec3
newVectorList.append(v)
return newVectorList
```
#### File: three/geometry/OctahedronGeometry.py
```python
from three.geometry import SphereGeometry
class OctahedronGeometry(SphereGeometry):
def __init__(self, radius=1):
super().__init__(radius=radius, xResolution=4, yResolution=2)
```
#### File: three/material/PointBasicMaterial.py
```python
from three.core import *
from three.material import *
class PointBasicMaterial(Material):
def __init__(self, color=[1,1,1], alpha=1, texture=None, size=1,
usePerspective=True, useVertexColors=False, alphaTest=0.75):
# vertex shader code
vsCode = """
in vec3 vertexPosition;
in vec3 vertexColor;
out vec3 vColor;
// adjust projected size of sprites
uniform bool usePerspective;
uniform float size;
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
uniform mat4 modelMatrix;
void main()
{
vColor = vertexColor;
vec4 eyePosition = viewMatrix * modelMatrix * vec4(vertexPosition, 1.0);
if ( usePerspective )
gl_PointSize = 500 * size / length(eyePosition);
else
gl_PointSize = size;
gl_Position = projectionMatrix * eyePosition;
}
"""
# fragment shader code
fsCode = """
uniform vec3 color;
uniform float alpha;
uniform bool useVertexColors;
in vec3 vColor;
uniform bool useTexture;
uniform sampler2D image;
uniform float alphaTest;
void main()
{
vec4 baseColor = vec4(color, alpha);
if ( useVertexColors )
baseColor *= vec4(vColor, 1.0);
if ( useTexture )
baseColor *= texture(image, gl_PointCoord);
gl_FragColor = baseColor;
if (gl_FragColor.a < alphaTest)
discard;
}
"""
# initialize shaders
super().__init__(vsCode, fsCode)
# set render values
self.drawStyle = GL_POINTS
# set default uniform values
self.setUniform( "vec3", "color", color )
self.setUniform( "float", "alpha", alpha )
self.setUniform( "float", "size", size )
self.setUniform( "float", "alphaTest", alphaTest )
if useVertexColors:
self.setUniform( "bool", "useVertexColors", 1 )
else:
self.setUniform( "bool", "useVertexColors", 0 )
if usePerspective:
self.setUniform( "bool", "usePerspective", 1 )
else:
self.setUniform( "bool", "usePerspective", 0 )
if texture is None:
self.setUniform( "bool", "useTexture", 0 )
self.setUniform( "sampler2D", "image", -1 )
else:
self.setUniform( "bool", "useTexture", 1 )
self.setUniform( "sampler2D", "image", texture )
```
#### File: three/material/SurfaceLightMaterial.py
```python
from three.core import *
from three.material import SurfaceBasicMaterial
class SurfaceLightMaterial(SurfaceBasicMaterial):
def __init__(self, color=[1,1,1], alpha=1, texture=None, wireframe=False, lineWidth=1, useVertexColors=False, alphaTest=0):
super().__init__(color=color, alpha=alpha, texture=texture, wireframe=wireframe, lineWidth=lineWidth, useVertexColors=useVertexColors, alphaTest=alphaTest)
self.setUniform( "bool", "useLight", 1 )
```
#### File: three.py-packaged/three/TestSurfaceMaterials.py
```python
from three.core import *
from three.cameras import *
from three.geometry import *
from three.material import *
from three.lights import *
from random import random
class TestSurfaceMaterials(Base):
def initialize(self):
self.setWindowTitle('Surface Materials')
self.setWindowSize(800,800)
self.renderer = Renderer()
self.renderer.setViewportSize(800,800)
self.renderer.setClearColor(0.25,0.25,0.25)
self.scene = Scene()
self.camera = PerspectiveCamera()
self.camera.transform.setPosition(0, 0, 8)
self.cameraControls = FirstPersonController(self.input, self.camera)
self.scene.add( AmbientLight( strength=0.2 ) )
self.scene.add( DirectionalLight( direction=[-1,-1,-2] ) )
self.sphereList = []
sphereGeom = SphereGeometry(radius=0.9)
gridTexture = OpenGLUtils.initializeTexture("images/color-grid.png")
gridMaterial = SurfaceLightMaterial(texture=gridTexture)
wireMaterial = SurfaceBasicMaterial(color=[0.8,0.8,0.8], wireframe=True, lineWidth=2)
lightMaterial = SurfaceLightMaterial(color=[0.5,0.5,1.0])
rainbowMaterial = SurfaceLightMaterial(useVertexColors=True)
vertexColorData = []
for i in range(sphereGeom.vertexCount):
color = [random(), random(), random()]
vertexColorData.append(color)
sphereGeom.setAttribute("vec3", "vertexColor", vertexColorData)
sphere1 = Mesh( sphereGeom, wireMaterial )
sphere1.transform.translate(-3, 0, 0, Matrix.LOCAL)
self.sphereList.append(sphere1)
sphere2 = Mesh( sphereGeom, lightMaterial )
sphere2.transform.translate(-1, 0, 0, Matrix.LOCAL)
self.sphereList.append(sphere2)
sphere3 = Mesh( sphereGeom, rainbowMaterial )
sphere3.transform.translate(1, 0, 0, Matrix.LOCAL)
self.sphereList.append(sphere3)
sphere4 = Mesh( sphereGeom, gridMaterial )
sphere4.transform.translate(3, 0, 0, Matrix.LOCAL)
self.sphereList.append(sphere4)
for sphere in self.sphereList:
self.scene.add(sphere)
def update(self):
self.cameraControls.update()
if self.input.resize():
size = self.input.getWindowSize()
self.camera.setAspectRatio( size["width"]/size["height"] )
self.renderer.setViewportSize(size["width"], size["height"])
for sphere in self.sphereList:
sphere.transform.rotateY(0.01, Matrix.LOCAL)
self.renderer.render(self.scene, self.camera)
# instantiate and run the program
TestSurfaceMaterials().run()
``` |
{
"source": "jpillora/mitsubishi_hass",
"score": 2
} |
#### File: custom_components/echonetlite/config_flow.py
```python
from __future__ import annotations
import logging
import asyncio
from typing import Any
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import HomeAssistant, callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.exceptions import HomeAssistantError
from homeassistant.data_entry_flow import AbortFlow
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
from pychonet.HomeAirConditioner import ENL_FANSPEED, ENL_AIR_VERT, ENL_AIR_HORZ
import pychonet as echonet
from .const import DOMAIN, USER_OPTIONS
_LOGGER = logging.getLogger(__name__)
# TODO adjust the data schema to the data that you need
STEP_USER_DATA_SCHEMA = vol.Schema(
{
vol.Required("host"): str,
vol.Required("title"): str,
}
)
async def validate_input(hass: HomeAssistant, data: dict[str, Any]) -> dict[str, Any]:
"""Validate the user input allows us to connect."""
_LOGGER.warning(f"IP address is {data['host']}")
instances = await hass.async_add_executor_job(echonet.discover, data["host"])
if len(instances) == 0:
raise CannotConnect
return {"host": data["host"], "title": data["title"], "instances": instances}
async def discover_devices(hass: HomeAssistant, discovery_info: list):
# Then build default object and grab static such as UID and property maps...
for instance in discovery_info['instances']:
device = await hass.async_add_executor_job(echonet.EchonetInstance, instance['eojgc'], instance['eojcc'], instance['eojci'], instance['netaddr'])
device_data = await hass.async_add_executor_job(device.update, [0x83,0x9f,0x9e])
instance['getPropertyMap'] = device_data[0x9f]
instance['setPropertyMap'] = device_data[0x9e]
if device_data[0x83]:
instance['UID'] = await hass.async_add_executor_job(device.getIdentificationNumber)
else:
instance['UID'] = f'{instance["netaddr"]}-{instance["eojgc"]}{instance["eojcc"]}{instance["eojci"]}'
_LOGGER.debug(discovery_info)
# Return info that you want to store in the config entry.
return discovery_info
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for echonetlite."""
discover_task = None
discovery_info = {}
instances = None
VERSION = 1
async def _async_do_task(self, task):
self.discovery_info = await task # A task that take some time to complete.
self.hass.async_create_task(
self.hass.config_entries.flow.async_configure(flow_id=self.flow_id)
)
return self.discovery_info
async def async_step_user(self, user_input: dict[str, Any] | None = None) -> FlowResult:
errors = {}
"""Handle the initial step."""
if user_input is None:
return self.async_show_form(
step_id="user", data_schema=STEP_USER_DATA_SCHEMA
)
try:
self.discovery_info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
return await self.async_step_discover(user_input)
return self.async_show_form(
step_id="user", data_schema=STEP_USER_DATA_SCHEMA, errors=errors
)
async def async_step_discover(self, user_input=None):
errors = {}
if not self.discover_task:
_LOGGER.debug('Step 1')
try:
self.discover_task = self.hass.async_create_task(self._async_do_task(discover_devices(self.hass, self.discovery_info)))
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
else:
return self.async_show_progress(step_id="discover", progress_action="user")
return self.async_show_progress_done(next_step_id="finish")
async def async_step_finish(self, user_input=None):
#_LOGGER.debug("Step 4")
return self.async_create_entry(title=self.discovery_info["title"], data=self.discovery_info)
@staticmethod
@callback
def async_get_options_flow(config_entry):
return OptionsFlowHandler(config_entry)
class CannotConnect(HomeAssistantError):
"""Error to indicate we cannot connect."""
class OptionsFlowHandler(config_entries.OptionsFlow):
def __init__(self, config):
self._config_entry = config
async def async_step_init(self, user_input=None):
"""Manage the options."""
data_schema_structure = {}
# Handle HVAC User configurable options
for instance in self._config_entry.data["instances"]:
if instance['eojgc'] == 0x01 and instance['eojcc'] == 0x30:
for option in list(USER_OPTIONS.keys()):
if option in instance['setPropertyMap']:
data_schema_structure.update({vol.Optional(
USER_OPTIONS[option]['option'],
default=self._config_entry.options.get(USER_OPTIONS[option]['option']) if self._config_entry.options.get(USER_OPTIONS[option]['option']) is not None else [] ,
):cv.multi_select(USER_OPTIONS[option]['option_list'])})
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(data_schema_structure),
)
``` |
{
"source": "jpindar/TC77_temperature_sensor",
"score": 3
} |
#### File: jpindar/TC77_temperature_sensor/tc77server.py
```python
from flask import Flask, request, render_template
import time
import datetime
import os
import tc77
app = Flask(__name__)
@app.route('/')
@app.route('/timeandtemp')
def show_both():
Celsius = tc77.getTemperature()
Fahrenheit = 9.0/5.0 * Celsius + 32.0
now = datetime.datetime.now()
templateData = {
'title': 'Time and Temperature',
'time': now.strftime("%Y-%m-%d %H:%M"),
'tempC': "{:.1f}".format(Celsius),
'tempF': "{:.1f}".format(Fahrenheit)
}
return render_template('temperature.html', **templateData)
@app.route('/temp')
def show_temp():
Celsius = tc77.getTemperature()
templateData = {
'title': 'Temperature',
'tempC': "{:.1f}".format(Celsius)
}
return render_template('temperatureonly.html', **templateData)
@app.route("/time")
def show_time():
now = datetime.datetime.now()
timeString = now.strftime("%Y-%m-%d %H:%M")
templateData = {
'title': 'Time',
'time': timeString
}
return render_template('time.html', **templateData)
# this fixes the CORS issue
@app.after_request
def after_request(response):
# this is all that is necessary to allow access via GET and via iframes
response.headers.add('Access-Control-Allow-Origin', '*') # asterisk is wildcard meaning any site can access this
return response
if __name__ == '__main__':
tc77.init()
# enable Flask's development server
# 0.0.0.0 means publicly available
# app.run(host='0.0.0.0', port=80, debug=False)
app.run(host='0.0.0.0', port=80, debug=True)
``` |
{
"source": "jpinedaf/bayesian-ngc1333",
"score": 2
} |
#### File: jpinedaf/bayesian-ngc1333/innocent_script.py
```python
import os
import sys
import warnings
import mpi4py
import numpy as np
from astropy import log
from astropy.io import fits
# Those import warnings are annoying
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import pymultinest
from pyspeckit.spectrum.models.ammonia import cold_ammonia_model
from pyspecnest.ammonia import get_nh3_model
from pyspecnest.chaincrunch import pars_xy, lnZ_xy, get_zero_evidence
# All the I/O functions now reside here
import opencube
# Configuration for line modelling setup (gets passed to pyspeckit)
from config import line_names, npars
# Path settings
from config import name_id, proj_dir, file_Zs
from config import default_yx, default_npeaks
# Kwargs for digesting and saving spectral cube (making it on the
# fly every time we need a spectrum is too slow!)
from config import cube_save_kwargs
# Finally, MultiNest settings and priors
from config import n_live_points, sampling_efficiency, get_priors_xoff_wrapped
# Compatibility with Python 2.7
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def mpi_rank():
"""
Returns the rank of the calling process.
"""
comm = mpi4py.MPI.COMM_WORLD
rank = comm.Get_rank()
return rank
# pop culture references always deserve their own function
def i_am_root():
"""
Checks if the running subprocess is of rank 0
"""
try:
return True if mpi_rank() == 0 else False
except AttributeError:
# not running MPI
return True
try:
npeaks = int(sys.argv[1])
except IndexError:
npeaks = default_npeaks
log.info("npeaks not specified, setting to {}".format(npeaks))
try:
yx = int(sys.argv[2]), int(sys.argv[3])
except IndexError:
yx = default_yx
log.info("xy-pixel not specified, setting to {}".format(yx[::-1]))
try:
plotting = bool(int(sys.argv[4]))
except IndexError:
plotting = 0
if not plotting:
plot_fit, plot_corner, show_fit, show_corner = False, False, False, False
else: # defaults a for non-batch run
plot_fit = True
plot_corner = True
show_fit = True
show_corner = True
from chainconsumer import ChainConsumer # Optional if no plotting is done
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
y, x = yx
sp = opencube.get_spectrum(x, y, **cube_save_kwargs)
fittype_fmt = 'cold_ammonia_x{}'
fitmodel = cold_ammonia_model
sp.specfit.Registry.add_fitter(fittype_fmt.format(npeaks), npars=npars,
function=fitmodel(line_names=line_names))
# is this still needed?
opencube.update_model(sp, fittype_fmt.format(npeaks))
# needed because of this:
# https://github.com/pyspeckit/pyspeckit/issues/179
sp.specfit.fitter.npeaks = npeaks
# npeaks > 1 seems to break because of fitter.parnames is not mirrored
if len(sp.specfit.fitter.parnames) == npars and npeaks > 1:
sp.specfit.fitter.parnames *= npeaks
# TODO: make a wrapper function for pyspecnest instead!
priors = get_priors_xoff_wrapped(npeaks)
nh3_model = get_nh3_model(sp, line_names, sp.error,
priors=priors, npeaks=npeaks)
# Safeguard - check some common causes of failure before scheduling
# a job that would just throw tens of thousands of errors at us
no_valid_chans = not np.any(np.isfinite(nh3_model.ydata))
sanity_check = np.isfinite(
nh3_model.log_likelihood([15, 5, 15, 0.2, 7, 0.5] * npeaks,
nh3_model.npars, nh3_model.dof))
if no_valid_chans or not sanity_check:
# This should fail if, e.g., the errors are not finite
log.error("no valid pixels at x={}; y={}. Aborting.".format(*yx[::-1]))
sys.exit()
# The first process gets to make the directory structure!
output_dir = os.path.join(proj_dir, 'nested-sampling/')
fig_dir = os.path.join(output_dir, 'figs/')
suffix = 'x{1}y{0}'.format(*yx)
chains_dir = '{}chains/{}_{}'.format(output_dir, name_id, suffix)
if not os.path.exists(chains_dir):
try: # hacks around a race condition
os.makedirs(chains_dir)
except OSError as e:
if e.errno != 17:
raise
chains_dir = '{}/{}-'.format(chains_dir, npeaks)
# Run MultiNest on the model+priors specified
pymultinest.run(nh3_model.xoff_symmetric_log_likelihood,
nh3_model.prior_uniform, nh3_model.npars,
outputfiles_basename=chains_dir,
verbose=True, n_live_points=n_live_points,
sampling_efficiency=sampling_efficiency)
# The remainder of the script is not essential for sampling, and can be safely
# moved out into a script of its own.
if i_am_root() and plot_fit:
# parse the results as sensible output
from pyspecnest.chaincrunch import analyzer_xy
a = analyzer_xy(x, y, npeaks, output_dir=output_dir,
name_id=name_id, npars=npars)
a_lnZ = a.get_stats()['global evidence']
log.info('ln(Z) for model with {} line(s) = {:.1f}'.format(npeaks, a_lnZ))
try:
lnZ0 = fits.getdata(file_Zs)[0]
except (FileNotFoundError, OSError) as e:
cubes = opencube.make_cube_shh()
lnZ0 = get_zero_evidence(data=cubes.cube, rms=cubes.errorcube,
normalize=False)
Zs = lnZ_xy(list(np.arange(npeaks+1)), x=x, y=y, output_dir=output_dir,
name_id=name_id, silent=True, lnZ0=(lnZ0[y, x], 0))
log.info('ln(Z{}/Z{}) = {:.2f}'.format(npeaks, npeaks-1,
Zs[npeaks] - Zs[npeaks-1]))
if npeaks > 1:
log.info('ln(Z{}/Z{}) = {:.2f}'.format(npeaks, 0, Zs[npeaks] - Zs[0]))
if plot_fit and i_am_root():
sp.plotter(errstyle='fill')
mle_pars = pars_xy(x=x, y=y, npars=npars, npeaks=npeaks,
output_dir=output_dir, name_id=name_id)
mle_parinfo = sp.specfit.fitter._make_parinfo(mle_pars, npeaks=npeaks)[0]
try:
sp.specfit.plot_fit(xarr=sp.xarr, pars=mle_parinfo,
show_components=True)
except TypeError:
# eh? does it want pars or parinfo?
sp.specfit.plot_fit(xarr=sp.xarr, pars=mle_pars, show_components=True)
# annotate the Bayes factors
plt.annotate('ln(Z{}/Z{}) = {:.2f}'.format(npeaks, npeaks-1,
Zs[npeaks] - Zs[npeaks-1]), xy=(0.05, 0.90),
xycoords='axes fraction')
if npeaks > 1:
plt.annotate('ln(Z{}/Z{}) = {:.2f}'.format(npeaks, 0,
Zs[npeaks] - Zs[0]), xy=(0.05, 0.85),
xycoords='axes fraction')
if show_fit:
plt.show()
fig_name = "{}-fit-{}-x{}".format(name_id, suffix, npeaks)
plt.savefig(os.path.join(fig_dir, fig_name + ".pdf"))
if plot_corner and i_am_root():
mle_multinest = pars_xy(x=x, y=y, npars=npars, npeaks=npeaks,
output_dir=output_dir, name_id=name_id)
unfrozen_slice = nh3_model.get_nonfixed_slice(a.data.shape, axis=1)
c = ChainConsumer()
parameters = nh3_model.get_names(latex=True, no_fixed=True)
c.add_chain(a.data[:, 2:][unfrozen_slice], parameters=parameters)
c.configure(statistics="max", summary=True)
fig = c.plotter.plot(figsize="column")
fig.get_size_inches()
fig.set_size_inches(9, 7)
fig_name = "{}-corner-{}-x{}".format(name_id, suffix, npeaks)
plt.savefig(fig_dir + fig_name + ".pdf")
if show_corner:
plt.show()
``` |
{
"source": "jpinedaf/line_fit_cube_samples",
"score": 2
} |
#### File: line_fit_cube_samples/samples/pyspeckit_NH3.py
```python
import pyspeckit
import astropy.io.fits as fits
import numpy as np
from spectral_cube import SpectralCube
from radio_beam import Beam
import astropy.units as u
from skimage.morphology import remove_small_objects,closing,disk,opening
from pyspeckit.spectrum.models import ammonia
fit_dir = 'fit/'
data_dir = 'data/'
OneOneIntegrated = data_dir + 'NH3_11_TdV.fits'
OneOneMom1 = data_dir + 'NH3_11_mom1.fits'
OneOneMom2 = data_dir + 'NH3_11_mom2.fits'
OneOneFile = data_dir + 'NH3_11.fits'
OneOnePeak = data_dir + 'NH3_11_Tpeak.fits'
RMSFile_11 = data_dir + 'NH3_11_rms.fits'
TwoTwoFile = data_dir + 'NH3_22.fits'
RMSFile_22 = data_dir + 'NH3_22_rms.fits'
# Define frequencies used to determine the velocity
freq11 = 23.694506*u.GHz
freq22 = 23.722633335*u.GHz
def mom_map(do_plot=False):
"""
AAA
"""
cube11sc = SpectralCube.read(OneOneFile)
cube11_v = cube11sc.with_spectral_unit(u.km/u.s, velocity_convention='radio')
chan = np.arange(cube11sc.header['NAXIS3'])
slab = cube11_v.spectral_slab(3.44*u.km/u.s, 5.0*u.km/u.s)
w11 = slab.moment(order=0, axis=0)
moment1 = slab.moment1(axis=0)
moment2 = slab.moment2(axis=0)
Tpeak = slab.max(axis=0)
w11.write(OneOneIntegrated, format='fits', overwrite=True)
moment1.write(OneOneMom1, format='fits', overwrite=True)
moment2.write(OneOneMom2, format='fits', overwrite=True)
Tpeak.write(OneOnePeak, format='fits', overwrite=True)
a=[ 5, 72, 251, 373, 490, 666]
b=[45, 220, 335, 450, 630, 683]
index_rms = np.hstack(np.arange(start, stop+1) for start, stop in zip(a, b))
mask_rms = np.zeros(cube11_v.shape, dtype=bool)
mask_rms[index_rms] = True
mask_rms = mask_rms & np.isfinite((cube11_v.unmasked_data[:,:,:]).value)
cube11_rms = cube11_v.with_mask(mask_rms)
rms11 = cube11_rms.std(axis=0)
rms11.write(RMSFile_11, overwrite=True)
if do_plot:
import matplotlib.pyplot as plt
# compare full_cube and the one only with noise (no signal)
ii = 153; jj = 144
plt.plot(chan, cube11sc[:, jj, ii].value, 'red', chan, cube11_rms[:, jj, ii], 'blue')
plt.show()
plt.imshow(Tpeak.value/rms11.value, origin='lower', vmin=0.3, vmax=10)
plt.show()
plt.close()
cube22sc = SpectralCube.read(TwoTwoFile)
spectral_axis_22 = cube22sc.with_spectral_unit(u.km/u.s, velocity_convention='radio').spectral_axis
good_channels_22 = (spectral_axis_22 < 3.5*u.km/u.s) | (spectral_axis_22 > 5.0*u.km/u.s)
masked_cube22 = cube22sc.with_mask(good_channels_22[:, np.newaxis, np.newaxis])
rms22 = masked_cube22.std(axis=0)
rms22.write(RMSFile_22, overwrite=True)
def hmm1_cubefit(vmin=3.4, vmax=5.0, tk_ave=10., do_plot=False, snr_min=5.0,
multicore=1, do_thin=False):
"""
Fit NH3(1,1) and (2,2) cubes for H-MM1.
It fits all pixels with SNR larger than requested.
Initial guess is based on moment maps and neighboring pixels.
The fitting can be done in parallel mode using several cores,
however, this is dangerous for large regions, where using a
good initial guess is important.
It stores the result in a FITS cube.
TODO:
-convert FITS cube into several FITS files
-Improve initial guess
Parameters
----------
vmin : numpy.float
Minimum centroid velocity to plot, in km/s.
vmax : numpy.float
Maximum centroid velocity to plot, in km/s.
tk_ave : numpy.float
Mean kinetic temperature of the region, in K.
do_plot : bool
If True, then a map of the region to map is shown.
snr_min : numpy.float
Minimum signal to noise ratio of the spectrum to be fitted.
multicore : int
Numbers of cores to use for parallel processing.
"""
cube11sc = SpectralCube.read(OneOneFile)
cube22sc = SpectralCube.read(TwoTwoFile)
cube11_v = cube11sc.with_spectral_unit(u.km/u.s,velocity_convention='radio',
rest_value=freq11)
cube22_v = cube22sc.with_spectral_unit(u.km/u.s,velocity_convention='radio',
rest_value=freq22)
from pyspeckit.spectrum.units import SpectroscopicAxis
spec11 = SpectroscopicAxis(cube11_v.spectral_axis, refX=freq11,
velocity_convention='radio')
spec22 = SpectroscopicAxis(cube22_v.spectral_axis, refX=freq22,
velocity_convention='radio')
errmap11 = fits.getdata(RMSFile_11)
errmap22 = fits.getdata(RMSFile_22)
errmap_K = errmap11 #[errmap11, errmap22]
Tpeak11 = fits.getdata(OneOnePeak)
moment1 = fits.getdata(OneOneMom1)
moment2 = (fits.getdata(OneOneMom2))**0.5
snr = cube11sc.filled_data[:].value/errmap11
peaksnr = Tpeak11 / errmap11
planemask = (peaksnr > snr_min) # *(errmap11 < 0.15)
planemask = remove_small_objects(planemask, min_size=40)
planemask = opening(planemask, disk(1))
#planemask = (peaksnr>20) * (errmap11 < 0.2)
mask = (snr>3)*planemask
maskcube = cube11sc.with_mask(mask.astype(bool))
maskcube = maskcube.with_spectral_unit(u.km/u.s, velocity_convention='radio')
slab = maskcube.spectral_slab(vmax*u.km/u.s, vmin*u.km/u.s)
w11 = slab.moment(order=0, axis=0).value
peakloc = np.nanargmax(w11)
ymax, xmax = np.unravel_index(peakloc, w11.shape)
moment2[np.isnan(moment2)] = 0.2
moment2[moment2 < 0.2] = 0.2
## Load FITS files
cube11 = pyspeckit.Cube(OneOneFile, maskmap=planemask)
cube22 = pyspeckit.Cube(TwoTwoFile, maskmap=planemask)
# Stack files
cubes = pyspeckit.CubeStack([cube11, cube22], maskmap=planemask)
cubes.unit = "K"
# Define initial guess
guesses = np.zeros((6,) + cubes.cube.shape[1:])
moment1[moment1 < vmin] = vmin+0.2
moment1[moment1 > vmax] = vmax-0.2
guesses[0, :, :] = tk_ave # Kinetic temperature
guesses[1, :, :] = 7 # Excitation Temp
guesses[2, :, :] = 14.5 # log(column)
guesses[3, :, :] = moment2 # Line width / 5 (the NH3 moment overestimates linewidth)
guesses[4, :, :] = moment1 # Line centroid
guesses[5, :, :] = 0.5 # F(ortho) - ortho NH3 fraction (fixed)
if do_plot:
import matplotlib.pyplot as plt
plt.imshow(w11*planemask, origin='lower')
plt.show()
print('start fit')
cubes.specfit.Registry.add_fitter('cold_ammonia',
ammonia.cold_ammonia_model(), 6)
if do_thin:
file_out = "{0}H-MM1_cold_parameter_maps_snr{1}_thin_v1.fits".format(fit_dir, snr_min)
else:
file_out = "{0}H-MM1_cold_parameter_maps_snr{1}_thick_v1.fits".format(fit_dir, snr_min)
cubes.fiteach(fittype='cold_ammonia', guesses=guesses,
integral=False, verbose_level=3,
fixed=[do_thin, False, False, False, False, True],
signal_cut=2,
limitedmax=[True, False, False, False, True, True],
maxpars=[20, 15, 20, 0.4, vmax, 1],
limitedmin=[True, True, True, True, True, True],
minpars=[5, 2.8, 12.0, 0.05, vmin, 0],
start_from_point=(xmax, ymax),
use_neighbor_as_guess=True,
position_order=1/peaksnr,
errmap=errmap_K, multicore=multicore)
# Store fits into FITS cube
fitcubefile = fits.PrimaryHDU(
data=np.concatenate([cubes.parcube, cubes.errcube]),
header=cubes.header)
fitcubefile.header.set('PLANE1', 'TKIN')
fitcubefile.header.set('PLANE2', 'TEX')
fitcubefile.header.set('PLANE3', 'COLUMN')
fitcubefile.header.set('PLANE4', 'SIGMA')
fitcubefile.header.set('PLANE5', 'VELOCITY')
fitcubefile.header.set('PLANE6', 'FORTHO')
fitcubefile.header.set('PLANE7', 'eTKIN')
fitcubefile.header.set('PLANE8', 'eTEX')
fitcubefile.header.set('PLANE9', 'eCOLUMN')
fitcubefile.header.set('PLANE10', 'eSIGMA')
fitcubefile.header.set('PLANE11', 'eVELOCITY')
fitcubefile.header.set('PLANE12', 'eFORTHO')
fitcubefile.header.set('CDELT3', 1)
fitcubefile.header.set('CTYPE3', 'FITPAR')
fitcubefile.header.set('CRVAL3', 0)
fitcubefile.header.set('CRPIX3', 1)
fitcubefile.writeto(file_out, overwrite=True)
``` |
{
"source": "jpinedaf/multicube",
"score": 2
} |
#### File: multicube/examples/example-gauss-2d.py
```python
import numpy as np
import matplotlib.pylab as plt
from multicube.subcube import SubCube
from multicube.astro_toolbox import make_test_cube
from multiprocessing import cpu_count
# generating a dummy FITS file
make_test_cube((300, 10, 10), outfile='foo.fits', sigma=(10, 5))
sc = SubCube('foo.fits')
# TODO: move this to astro_toolbox.py
# as a general synthetic cube generator routine
# let's tinker with the cube a bit!
# this will introduce a radial velocity gradient:
def tinker_ppv(arr):
scale_roll = 15
rel_shift = 30
rel_str = 5
shifted_component = np.roll(arr, rel_shift) / rel_str
for y, x in np.ndindex(arr.shape[1:]):
roll = np.sqrt((x - 5)**2 + (y - 5)**2) * scale_roll
arr[:, y, x] = np.roll(arr[:, y, x], int(roll))
return arr + shifted_component
sc.cube = tinker_ppv(sc.cube)
sc.update_model('gaussian')
npeaks = 2
sc.specfit.npeaks = npeaks
sc.specfit.fitter.npeaks = npeaks
sc.specfit.fitter.make_parinfo(npeaks=npeaks)
main_comp = [[0.1, sc.xarr.min().value, 0.1], # minpars
[2.0, sc.xarr.max().value, 2.0], # maxpars
[10, 10, 10], # finesse
[False] * 3, # fixed
[True] * 3, # limitedmin
[True] * 3] # limitedmax
sidekick = [[0.1, -10, 0.1], # minpars
[0.5, -10, 0.5], # maxpars
[3, 1, 3], # finesse
[False] * 3, # fixed
[True] * 3, # limitedmin
[True] * 3] # limitedmax
total_zero = [[0.0, 5, 1.0], # minpars
[0.0, 5, 1.0], # maxpars
[1, 1, 1], # finesse
[True] * 3, # fixed
[True] * 3, # limitedmin
[True] * 3] # limitedmax
unpack = lambda a, b: [i + j for i, j in zip(a, b)]
# Estimating SNR
sc.get_snr_map()
# Making a guess grid based on parameter permutations
sc.make_guess_grid(*unpack(main_comp, total_zero))
sc.expand_guess_grid(*unpack(main_comp, sidekick))
# Generating spectral models for all guesses
sc.generate_model(multicore=cpu_count() - 1)
# Calculating the best guess on the grid
sc.best_guess()
sc.fiteach(fittype = sc.fittype,
guesses = sc.best_guesses,
multicore = cpu_count() - 1,
errmap = sc._rms_map,
**sc.best_fitargs)
# computing chi^2 statistics to judge the goodness of fit:
sc.get_chi_squared(sigma=sc.header['RMSLVL'], refresh=True)
sc.chi_squared_stats()
sc.show_fit_param(1, cmap='coolwarm')
clb = sc.mapplot.FITSFigure.colorbar
clb.set_axis_label_text(sc.xarr.unit.to_string('latex_inline'))
# and overlay the pixels that didn't converge properly:
sc.mark_bad_fits(cut = 1e-40) # voila! all the pixels are fit!
plt.show()
```
#### File: multicube/multicube/multicube.py
```python
import numpy as np
import pyspeckit
from subcube import SubCube
class MultiCube:
def __init__(self, *args):
"""
A collection of Specfit objects mapped to SubCubes
by the mapper method. Includes* methods to fit multiple
guess grids for different models, and the means to
decide between the results of those fits.
*In a distant[citation needed] future.
Input parameters: see ~pyspeckit.Cube
"""
# parent cube, used for attribute propagation
self.supercube = pyspeckit.Cube(*args)
# making a bunch of references to make our life easier
self.cube = self.SuperCube.cube
self.xarr = self.SuperCube.xarr
self.header = self.SuperCube.header
# FIXME: rewrite mapplot to include the mapper/judge methods!
# doesn't work in its current implementation, will need
# to rewire different params and errors, for variable
# number of parameters across different models
self.multiplot = self.SuperCube.mapplot
# MultiCube's own instances:
self.multigrid = {}
self.tesseract = {}
def __repr__(self):
return ('Parent: MultiCube with TODO models\n'
'Child: %s' % self.SuperCube.__repr__())
def spawn(self, model, guesses=None):
"""
Add a new model and a SubCube for it through Cube()
The idea is to pass a reference to large data instances
of SuperCube to avoid excessive memory usage.
Not implemented yet.
"""
self.tesseract[model]=SubCube()
raise NotImplementedError
def mapper(model):
"""
Returns a list of SubCubes for a given model?
"""
raise NotImplementedError
def judge_multimodel(subcubes, model, method):
"""
Decide which model to use.
First milestone: have one component added only
when residual has SNR>3
Actual goal: proper model selection via DIC.
"""
raise NotImplementedError
def multifit(self, multigrid=None):
"""
Fit the optimized guesses. This should be delegated
to SubCubes maybe? MultiCube should only call the
judge function.
Not really, this approach would allow to juggle all
the SubCubes defined! In this case, multifit is a
wrapper for SubCube.fiteach() method. This will do.
"""
raise NotImplementedError
``` |
{
"source": "jpinsonault/creature_sim",
"score": 3
} |
#### File: jpinsonault/creature_sim/Components.py
```python
from namedlist import namedlist
PolygonShape = namedlist('PolygonShape', ["points"])
Position = namedlist('Position', ["x", "y"])
Orientation = namedlist('Orientation', ["angle"])
Color = namedlist('Color', ["r", "g", "b"])
MoveSpeed = namedlist('MoveSpeed', ["speed"])
TurnSpeed = namedlist('TurnSpeed', ["speed"])
Health = namedlist('Health', ["amount"])
FoodSeen = namedlist('FoodSeen', ["number"])
# Collider = namedlist('Collider', ["entity", "bounding_square", "shape"])
class Collider(object):
def __init__(self, entity, bounding_square, shape):
self.entity = entity
self.bounding_square = bounding_square
self.shape = shape
def __repr__(self):
repr_string = "{}(entity={}, bounding_square={}, shape={})"
name = self.__class__.__name__
return repr_string.format(name, self.entity, self.bounding_square, self.shape)
```
#### File: jpinsonault/creature_sim/ShapeUtils.py
```python
from math import sqrt
def get_bounding_square(points):
max_radius = get_bounding_circle(points)
return (-max_radius, -max_radius, max_radius * 2, max_radius * 2)
def get_bounding_circle(points):
greatest_distance = 0
for point in points:
distance = point[0]**2 + point[1]**2
greatest_distance = max(distance, greatest_distance)
# Return the radius of the bounding circle
return sqrt(greatest_distance)
``` |
{
"source": "jpinsonault/diskusage",
"score": 2
} |
#### File: diskusage/activities/FolderScanActivity.py
```python
import curses
import subprocess
import Keys
from Activity import Activity
from CentralDispatch import CentralDispatch
from EventTypes import KeyStroke, ButtonEvent
from FolderScanApp import ScanComplete, ScanStarted
from activities.HelpActivity import HelpActivity
from foldercore import breadth_first, make_folder_tree
from printers import make_top_bar, make_bottom_bar, make_spacer
from ContextUtils import move_menu_left, move_menu_right, is_hidden
class FolderScanActivity(Activity):
def __init__(self):
super().__init__()
def on_start(self):
self.application.subscribe(event_type=KeyStroke, activity=self, callback=self.on_key_stroke)
self.application.subscribe(event_type=ScanComplete, activity=self, callback=self.on_scan_complete)
self.application.subscribe(event_type=ScanStarted, activity=self, callback=self.on_scan_started)
self.application.subscribe(event_type=ButtonEvent, activity=self, callback=self.on_button_event)
self.display_state = {"top_bar": {"items": {"title": "Beagle's Folder Analyzer",
"help": "Press 'h' for help"},
"fixed_size": 2,
"line_generator": make_top_bar},
"folder_tree": {"to_depth": 4,
"folder_data": [],
"selected_folder": None,
"context_menu": {"label": "Menu",
"items": ["open in explorer", "delete"],
"hidden": True},
"text_input": {"label": "Send us your reckons",
"size": 30},
"line_generator": make_folder_tree,
"focus": True},
"spacer": {"line_generator": make_spacer},
"bottom_bar": {"fixed_size": 2,
"items": {"status": "Folder scan in progress"},
"line_generator": make_bottom_bar}}
self._refresh_timer(shutdown_signal=self.application.shutdown_signal)
# self.event_queue.put(KeyStroke(curses.KEY_F1))
def on_button_event(self, event: ButtonEvent):
if event.identifier == "open in explorer":
folder = self.display_state["folder_tree"]["selected_folder"]
subprocess.Popen(r'explorer /select,"{}"'.format(folder.path))
def on_key_stroke(self, event: KeyStroke):
self._handle_folder_tree_input(fold_tree_context=self.display_state["folder_tree"], event=event)
if chr(event.key) == "h":
self.application.segue_to(HelpActivity())
elif chr(event.key) == "e":
raise Exception("This is just a test")
else:
self.display_state["top_bar"]["items"]["last_key"] = f"Last key: {event.key}"
self.refresh_screen()
def on_scan_complete(self, event: ScanComplete):
self.update_bottom_bar("status", "Scan complete")
def on_scan_started(self, event: ScanStarted):
self.update_bottom_bar("status", "Folder scan in progress")
def _handle_folder_tree_input(self, fold_tree_context, event):
if chr(event.key) == " " or chr(event.key) == Keys.ENTER:
self.toggle_context_menu()
elif event.key == curses.KEY_UP:
self.move_selected_folder_up()
elif event.key == curses.KEY_DOWN:
self.move_selected_folder_down()
elif event.key == Keys.LEFT_BRACKET:
self.move_display_depth_up(fold_tree_context)
elif event.key == Keys.RIGHT_BRACKET:
self.move_display_depth_down(fold_tree_context)
context_menu = fold_tree_context["context_menu"]
if not is_hidden(context_menu):
if event.key == curses.KEY_LEFT:
move_menu_left(context=context_menu)
elif event.key == curses.KEY_RIGHT:
move_menu_right(context=context_menu)
elif event.key == Keys.ENTER:
selected_item = context_menu["items"][context_menu["selected_index"]]
button_event = ButtonEvent(identifier=selected_item)
self.application.event_queue.put(button_event)
self.toggle_context_menu()
def _refresh_timer(self, shutdown_signal):
timer = CentralDispatch.timer(1.0, self._refresh_timer, shutdown_signal)
if not shutdown_signal.done() and self.application is not None:
timer.start()
self.refresh_tree_state()
if not shutdown_signal.done():
self.application.main_thread.submit_async(self.refresh_screen)
def refresh_tree_state(self):
if self.application.folder_scan_tree is not None:
context = self.display_state["folder_tree"]
context["folder_data"] = breadth_first(self.application.folder_scan_tree, to_depth=context["to_depth"])
if context["selected_folder"] is None:
context["selected_folder"] = context["folder_data"][0][0]
self.update_scroll_percent()
def _index_of_selected_folder(self):
context = self.display_state["folder_tree"]
folders = [folder for folder, _ in context["folder_data"]]
while context["selected_folder"] is not None and context["selected_folder"] not in folders:
context["selected_folder"] = context["selected_folder"].parent
index = folders.index(context["selected_folder"])
return index, folders
def move_display_depth_up(self,context):
context["to_depth"] = max(1, context["to_depth"] - 1)
self.refresh_tree_state()
def move_display_depth_down(self, context):
context["to_depth"] = min(10, context["to_depth"] + 1)
self.refresh_tree_state()
def move_selected_folder_up(self):
index, folders = self._index_of_selected_folder()
new_selected = folders[max(0, index-1)]
self.display_state["folder_tree"]["selected_folder"] = new_selected
self.update_scroll_percent()
def move_selected_folder_down(self):
index, folders = self._index_of_selected_folder()
new_selected = folders[min(len(folders)-1, index+1)]
self.display_state["folder_tree"]["selected_folder"] = new_selected
self.update_scroll_percent()
def update_scroll_percent(self):
index, folders = self._index_of_selected_folder()
percent = int(index/len(folders)*100)
self.update_bottom_bar("scroll_percent", f"Scroll: {percent}%")
self.refresh_screen()
def update_bottom_bar(self, tag, value):
self.display_state["bottom_bar"]["items"][tag] = value
self.refresh_screen()
def toggle_context_menu(self):
context = self.display_state["folder_tree"]["context_menu"]
context["hidden"] = not is_hidden(context)
context["selected_index"] = 0
def toggle_text_input(self):
context = self.display_state["folder_tree"]["text_input"]
context["hidden"] = not is_hidden(context)
context["selected_index"] = 0
```
#### File: jpinsonault/diskusage/Application.py
```python
import curses
import traceback
from collections import defaultdict, namedtuple
from concurrent.futures import Future
from enum import Enum
from queue import Queue
from loguru import logger
from Activity import Activity
from CentralDispatch import CentralDispatch, SerialDispatchQueue
from EventTypes import StopApplication, ExceptionOccured, KeyStroke
from activities.LogViewerActivity import LogViewerActivity
from activities.ShowExceptionActivity import ShowExceptionActivity
from loguru import logger
class Segue(Enum):
PUSH = 0
REPLACE = 1
LabeledCallback = namedtuple("LabeledCallback", ["label", "callback"])
class Application:
def __init__(self, curses_screen):
self.log_filename = "application.log"
self.curses_screen = curses_screen
self.event_subscribers = defaultdict(set)
self.stack = []
self.event_queue = Queue()
self.shutdown_signal: Future = None
self.main_thread: SerialDispatchQueue = None
self.last_exception = None
def handle_shutdown(self, shutdown_event):
if shutdown_event.exception:
try:
raise shutdown_event.exception
except Exception as e:
logger.info("Shutdown because of error:")
logger.info(f"{e.__class__.__name__}: {e}")
logger.info(traceback.format_exc())
else:
logger.info("Exited Normally")
def subscribe(self, event_type, activity, callback):
self.event_subscribers[event_type].add(LabeledCallback(activity, callback))
def unsubscribe_all(self, from_activity):
for event_type, subscribers in self.event_subscribers.items():
for labeled_callback in subscribers.copy():
if labeled_callback.label == from_activity:
self.event_subscribers[event_type].remove(labeled_callback)
def setup_logger(self):
logger.add(self.log_filename, format="{time:HH:mm:ss} {module} {message}")
def start(self, activity: Activity):
self.setup_logger()
curses.curs_set(0)
CentralDispatch.default_exception_handler = self._shutdown_app_exception_handler
self.main_thread = CentralDispatch.create_serial_queue()
self.subscribe(event_type=ExceptionOccured, activity=self, callback=self.on_exception)
self.subscribe(event_type=KeyStroke, activity=self, callback=self.on_key_stroke)
self.shutdown_signal = CentralDispatch.future(self._event_monitor)
self.start_key_monitor()
self.on_start()
self.segue_to(activity)
shutdown_event = self.shutdown_signal.result()
self.handle_shutdown(shutdown_event)
def on_start(self): pass
def _stop_activity(self, activity):
activity._stop()
self.unsubscribe_all(activity)
def _start_activity(self, activity):
activity._start(application=self)
def _segue_to(self, activity: Activity, segue_type):
if len(self.stack) > 0:
if segue_type == Segue.REPLACE:
current_activity = self.stack.pop()
else:
current_activity = self.stack[-1]
current_activity._stop()
current_activity.on_stop()
self.unsubscribe_all(current_activity)
self.stack.append(activity)
activity._start(application=self)
def segue_to(self, activity: Activity, segue_type=Segue.PUSH):
self.main_thread.submit_async(self._segue_to, activity, segue_type=segue_type)
def _pop_activity(self):
current_activity = self.stack.pop()
if len(self.stack) > 0:
returning_activity = self.stack[-1]
self._stop_activity(current_activity)
self._start_activity(returning_activity)
else:
# We've popped the last activity
self.event_queue.put(StopApplication())
def pop_activity(self):
self.main_thread.submit_async(self._pop_activity)
def _dispatch_event(self, callback, event):
callback(event)
def dispatch_event(self, event):
for labeled_callback in self.event_subscribers[type(event)]:
self.main_thread.submit_async(self._dispatch_event, labeled_callback.callback, event)
def _event_monitor(self):
event = self.event_queue.get()
while not isinstance(event, StopApplication):
self.dispatch_event(event)
event = self.event_queue.get()
# Return the last event, because it might contain an exception
return event
def _key_monitor(self, screen):
while not self.shutdown_signal.done():
key = screen.getch()
# 3 = ctrl-c
if key == 3:
self.event_queue.put(StopApplication())
return
else:
self.event_queue.put(KeyStroke(key))
def start_key_monitor(self):
CentralDispatch.future(self._key_monitor, self.curses_screen)
def _debug_message(self, lines):
self.curses_screen.clear()
for index, line in enumerate(lines):
self.curses_screen.addstr(index, 0, line)
self.curses_screen.refresh()
def debug_message(self, message: str):
lines = message.split("\n")
self.main_thread.submit_async(self._debug_message, lines)
def on_key_stroke(self, event: KeyStroke):
if event.key == curses.KEY_F1:
self.segue_to(LogViewerActivity())
def on_exception(self, event: ExceptionOccured):
if self.last_exception is not None:
logger.info("While handling one exception, another occurred.\nOriginal exception: {}")
logger.info(f"{self.last_exception.__class__.__name__}: {self.last_exception}")
logger.info(traceback.format_exc())
self.event_queue.put(StopApplication(exception=event.exception))
else:
self.last_exception = event.exception
self.segue_to(ShowExceptionActivity(event.exception))
def _shutdown_app_exception_handler(self, function):
def inner_function(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception as e:
self.event_queue.put(ExceptionOccured(exception=e))
return inner_function
```
#### File: jpinsonault/diskusage/CentralDispatch.py
```python
import threading
import traceback
from concurrent.futures import Future
from concurrent.futures.thread import ThreadPoolExecutor
from queue import Queue, Empty
import functools
from loguru import logger
def perform_on(func, dispatch_queue, do_async=False):
@functools.wraps(func)
def inner_function(*args, **kwargs):
future = dispatch_queue.submit_async(func, *args, **kwargs)
if do_async:
return future
else:
return future.result()
return inner_function
def wait_on_futures(futures_queue: Queue):
try:
future = futures_queue.get_nowait()
while True:
_ = future.result()
future = futures_queue.get_nowait()
except Empty:
return
def wrap_with_try(func):
@functools.wraps(func)
def inner_function(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
logger.error(f"Error: {e}")
logger.error(traceback.format_exc())
raise e
return inner_function
class AppShutDownSignal: pass
class SerialDispatchQueue:
def __init__(self, exception_handler):
self.exception_handler = exception_handler
self.task_threadpool = ThreadPoolExecutor(1)
self.futures_queue = Queue()
def submit_async(self, block, *args, **kwargs) -> Future:
task = self.exception_handler(block)
future = self.task_threadpool.submit(task, *args, **kwargs)
self.futures_queue.put(future)
return future
def await_result(self, block, *args, **kwargs):
future = self.submit_async(block, *args, **kwargs)
return future.result()
def finish_work(self) -> Future:
return CentralDispatch.exhaust_futures(self.futures_queue)
class ConcurrentDispatchQueue:
def __init__(self, size, exception_handler):
self.exception_handler = exception_handler
self.task_threadpool = ThreadPoolExecutor(size)
self.futures_queue = Queue()
def submit_async(self, block, *args, **kwargs) -> Future:
task = self.exception_handler(block)
future = self.task_threadpool.submit(task, *args, **kwargs)
self.futures_queue.put(future)
return future
def await_result(self, block, *args, **kwargs):
future = self.submit_async(block, *args, **kwargs)
return future.result()
def finish_work(self) -> Future:
return CentralDispatch.exhaust_futures(self.futures_queue)
class CentralDispatch:
default_exception_handler = wrap_with_try
@staticmethod
def create_serial_queue() -> SerialDispatchQueue:
return SerialDispatchQueue(exception_handler=CentralDispatch.default_exception_handler)
@staticmethod
def create_concurrent_queue(size) -> ConcurrentDispatchQueue:
return ConcurrentDispatchQueue(size, exception_handler=CentralDispatch.default_exception_handler)
@staticmethod
def future(block, *args, **kwargs) -> Future:
dispatch_queue = SerialDispatchQueue(exception_handler=CentralDispatch.default_exception_handler)
return dispatch_queue.submit_async(block, *args, **kwargs)
@staticmethod
def timer(interval, callback, *args) -> threading.Timer:
task = CentralDispatch.default_exception_handler(callback)
timer = threading.Timer(interval, task, args)
timer.daemon = True
return timer
@staticmethod
def exhaust_futures(future_queue: Queue) -> Future:
return CentralDispatch.future(wait_on_futures, future_queue)
@classmethod
def concat(cls, *args):
def _concat(*args):
for future in args:
future.result()
return CentralDispatch.future(_concat, *args)
```
#### File: jpinsonault/diskusage/ContextUtils.py
```python
from typing import Optional
def get_fixed_size(context):
return context.get("fixed_size", 0)
def get_text(context):
return context.get("text", "")
def move_menu_left(context):
selected_index = context.get("selected_index", 0)
context["selected_index"] = (selected_index - 1) % len(context["items"])
def move_menu_right(context):
selected_index = context.get("selected_index", 0)
context["selected_index"] = (selected_index + 1) % len(context["items"])
def get_cursor_index(context):
return context.get("cursor_index", 0)
def get_text_length(context):
if "text" in context:
return len(context["text"])
else:
return -1
def get_x(context):
return context.get("x", 0)
def scroll_up(context):
index = get_selected_index(context)
set_selected_index(context, new_index=index - 1)
def scroll_down(context):
index = get_selected_index(context)
set_selected_index(context, new_index=index + 1)
def scroll_to_top(context):
set_selected_index(context, new_index=0)
def scroll_to_bottom(context):
set_selected_index(context, new_index=len(context["items"]))
def is_hidden(context):
return context.get("hidden", False)
def set_selected_index(context, new_index) -> int:
selected_index = max(0, new_index)
selected_index = min(len(context["items"]) - 1, selected_index)
context["selected_index"] = selected_index
return selected_index
def get_selected_index(context) -> int:
selected_index = context.get("selected_index", 0)
selected_index = max(0, selected_index)
selected_index = min(len(context["items"]) - 1, selected_index)
return selected_index
def get_items_len(context) -> int:
items = context.get("items", None)
if items is None:
return None
else:
return len(items)
```
#### File: jpinsonault/diskusage/FolderScanApp.py
```python
from collections import namedtuple
from pathlib import Path
from queue import Queue
from Application import Application
from CentralDispatch import CentralDispatch
from folder import Folder
from foldercore import folder_from_path, sub_paths
ScanResult = namedtuple("ScanResult", ["folder"])
ScanError = namedtuple("ScanError", ["error"])
class ScanComplete: pass
class ScanStarted: pass
class FolderScanApp(Application):
def __init__(self, args, curses_screen):
super().__init__(curses_screen)
self.args = args
self.folder_scan_future = None
self.collect_results_dispatch_queue = None
self.folder_work_dispatch_queue = None
self.folder_scan_tree = None
def on_start(self):
self.collect_results_dispatch_queue = CentralDispatch.create_serial_queue()
self.folder_work_dispatch_queue = CentralDispatch.create_concurrent_queue(size=5)
self.start_folder_scan(self.args.path)
def start_folder_scan(self, path):
CentralDispatch.future(self._scan_folder, Path(path))
def _scan_folder(self, root_path: Path):
self.folder_scan_tree = folder_from_path(root_path, None)
for sub_folder_path in sub_paths(root_path):
self.folder_work_dispatch_queue.submit_async(
self.analyze_folder_task, sub_folder_path, self.folder_scan_tree
)
self.folder_work_dispatch_queue.finish_work().result()
self.collect_results_dispatch_queue.finish_work().result()
self.event_queue.put(ScanComplete())
def collect_results(self, new_folder: Folder):
new_folder.parent.insert_folder(new_folder)
def analyze_folder_task(self, path: Path, parent: Folder):
folder = folder_from_path(path, parent)
for sub_folder_path in sub_paths(folder.path):
if not self.shutdown_signal.done():
self.folder_work_dispatch_queue.submit_async(self.analyze_folder_task, sub_folder_path, folder)
if not self.shutdown_signal.done():
self.collect_results_dispatch_queue.submit_async(self.collect_results, folder)
```
#### File: jpinsonault/diskusage/printers.py
```python
import curses
from functools import partial
from itertools import islice
from typing import Optional
from loguru import logger
from ContextUtils import get_text, get_cursor_index, get_x, get_selected_index
from PrintItem import PrintItem
def print_empty_line(screen, y):
pass
def print_line(x, text, screen, y):
screen.addstr(y, x, text, curses.A_NORMAL)
def print_highlighted_line(x, text, screen, y):
screen.addstr(y, x, text, curses.A_REVERSE)
def print_bold_line(x, text, screen, y):
screen.addstr(y, x, text, curses.A_BOLD)
def make_top_bar(context, remaining_height):
def print_top_bar(screen, y):
items = [text for key, text in context["items"].items()]
screen.addstr(y, 0, " | ".join(items), curses.A_BOLD)
return [print_top_bar, print_empty_line]
def make_bottom_bar(context, remaining_height):
def print_bottom_bar(screen, y) -> int:
items = [text for key, text in context["items"].items()]
screen.addstr(y, 0, " | ".join(items), curses.A_BOLD)
return [print_empty_line, print_bottom_bar]
def make_spacer(context, remaining_height):
"""
Fills up the remaining space with empty lines
Useful for shoving ui bits to the bottom of the screen
"""
return [print_empty_line for _ in range(remaining_height)]
def start_stop(index, window_size, list_size):
if window_size % 2 == 0:
up = window_size//2
down = window_size//2 - 1
else:
up = window_size//2
down = window_size//2
# if topped out
if index - up < 0:
return 0, min(window_size, list_size)
# if bottomed out
elif index + down > list_size:
return max(0, list_size-window_size), list_size
else:
return index - up, index + down+1
def cut_items_to_window(selected_index, items, window_size):
start, stop = start_stop(selected_index, window_size, len(items))
return start, stop, islice(items, start, stop)
def default_item_printer(screen, y_index, item, mode):
screen.addstr(y_index, 0, str(item), mode)
def tuple_item_printer(screen, y_index, item, mode):
screen.addstr(y_index, 0, str(item[0]), mode)
def make_scroll_list(screen, context, remaining_height) -> []:
num_rows, num_cols = screen.getmaxyx()
selected_index = get_selected_index(context)
is_focused = get_focused(context)
start, stop, visible_items = cut_items_to_window(selected_index, context["items"], remaining_height)
print_items = []
for index, item in zip(range(start, stop), visible_items):
sanitized = str(item)[:num_cols]
if index == selected_index and is_focused:
print_items.append(partial(print_highlighted_line, 0, sanitized))
else:
print_items.append(partial(print_line, 0, sanitized))
return print_items
def print_context_menu(context, screen, y):
selected_index = context.get("selected_index", 0)
x = context["x"]
label = f"{context.get('label', '')}: "
screen.addstr(y, x, label, curses.A_BOLD)
x += len(label)
for index in range(len(context["items"])):
item = context["items"][index]
text = f"[{item}]"
if index == selected_index:
screen.addstr(y, x, text, curses.A_REVERSE)
else:
screen.addstr(y, x, text, curses.A_NORMAL)
x += len(text) + 1
def make_context_menu(context, remaining_heigh=0) -> []:
return [partial(print_context_menu, context),
print_empty_line]
def get_focused(context) -> bool:
return context.get("focused", False)
def print_text_input(x, context, screen, y):
cursor_index = get_cursor_index(context)
text = get_text(context)
is_focused = get_focused(context)
x_index = x
label = f"{context.get('label', '')}: "
screen.addstr(y, x_index, label, curses.A_BOLD)
x_index += len(label)
screen.addch(y, x_index, "[", curses.A_BOLD)
x_index += 1
for index in range(len(text)):
char = text[index]
if index == cursor_index and is_focused:
screen.addch(y, x_index, char, curses.A_REVERSE)
else:
screen.addch(y, x_index, char, curses.A_NORMAL)
x_index += 1
if cursor_index == len(text) and is_focused:
screen.addch(y, x_index, "]", curses.A_REVERSE)
x_index += 1
else:
screen.addch(y, x_index, "]", curses.A_BOLD)
x_index += 1
def print_text_line(screen, context, start_index, remaining_height):
screen.addstr(start_index, get_x(context), context["text"])
return context["fixed_size"]
def make_text_input(context, remaining_height):
return [partial(print_text_input, 0, context)]
def make_text_editor(screen, context, remaining_height) -> []:
num_rows, num_cols = screen.getmaxyx()
selected_index = get_selected_index(context)
is_focused = get_focused(context)
# text_lines = context["l"]
start, stop, visible_items = cut_items_to_window(selected_index, context["items"], remaining_height)
print_items = []
for index, item in zip(range(start, stop), visible_items):
sanitized = str(item)[:num_cols]
if index == selected_index and is_focused:
print_items.append(partial(print_highlighted_line, 0, sanitized))
else:
print_items.append(partial(print_line, 0, sanitized))
return print_items
``` |
{
"source": "jpinsonault/imdb_cast_matcher",
"score": 4
} |
#### File: jpinsonault/imdb_cast_matcher/match_cast.py
```python
import argparse
import sys
from imdb import IMDb
args = None
def parse_args():
global args
parser = argparse.ArgumentParser()
parser.add_argument('first_movie')
parser.add_argument('second_movie')
args = parser.parse_args()
def main():
imdb = IMDb()
# Get 2 movies
first_movie = confirm_movie(imdb, args.first_movie)
second_movie = confirm_movie(imdb, args.second_movie)
imdb.update(first_movie)
imdb.update(second_movie)
print("Comparing '{}' and '{}'".format(first_movie["title"], second_movie["title"]))
# Compare cast
in_both = []
for first_movie_person in first_movie["cast"]:
for second_movie_person in second_movie["cast"]:
if first_movie_person["name"] == second_movie_person["name"]:
in_both.append(first_movie_person)
for person in in_both:
print(person["name"])
def confirm_movie(imdb, movie_name):
return imdb.search_movie(movie_name)[0]
if __name__ == '__main__':
parse_args()
main()
``` |
{
"source": "jpinsonault/language_generator",
"score": 3
} |
#### File: jpinsonault/language_generator/generate_words.py
```python
from word_generator import random_chain_weights
from word_generator import generate_word
from word_generator import Options
from word_generator import uniform_random_weights
import phonemes
from pprint import pprint
def main():
vowel_counts = uniform_random_weights(1, 1)
init_counts = uniform_random_weights(1, 1)
syllables = uniform_random_weights(1, 4)
final_counts = uniform_random_weights(0, 0)
vowel_ps = random_chain_weights(phonemes.vowels)
final_c_ps = random_chain_weights(phonemes.consonants)
init_c_ps = random_chain_weights(phonemes.consonants)
options = Options(init_c_ps, vowel_ps, final_c_ps, syllables, init_counts, vowel_counts, final_counts)
words = [generate_word(options) for _ in range(10)]
pprint(words)
if __name__ == '__main__':
main()
``` |
{
"source": "jpinzonc/jupyterlab_templates",
"score": 2
} |
#### File: jupyterlab_templates/tests/test_init.py
```python
from mock import patch, MagicMock
from jupyterlab_templates import _jupyter_server_extension_paths
class TestInit:
def setup(self):
pass
# setup() before each test method
def teardown(self):
pass
# teardown() after each test method
@classmethod
def setup_class(cls):
pass
# setup_class() before any methods in this class
@classmethod
def teardown_class(cls):
pass
# teardown_class() after any methods in this class
def test__jupyter_server_extension_paths(self):
assert _jupyter_server_extension_paths() == [{"module": "jupyterlab_templates.extension"}]
``` |
{
"source": "jpinzonc/python",
"score": 3
} |
#### File: python/hungap-utils/dateutils.py
```python
import pytz
import datetime
import pandas as pd
# Encapsulates helper method (or variables in the future) for timezone related constructions and conversions
class MyTimeZone:
# Initialise with a valid time zone name - i.e. the local time zone
def __init__(self, tz_name):
self.tz = pytz.timezone(tz_name)
# Change timezone of a panda series to local time zone
def series_as_local_time(self, d):
return pd.to_datetime(d).dt.tz_localize(self.tz)
# Create a new datetime object in local time
def local_time(self, year, month, day, hour, minute):
return self.tz.localize(datetime.datetime(year, month, day, hour, minute))
``` |
{
"source": "jpipas/python-skyapi",
"score": 3
} |
#### File: python-skyapi/skyapi/skyapiclient.py
```python
from __future__ import unicode_literals
import functools
import re
import requests
from requests.auth import HTTPBasicAuth
# Handle library reorganisation Python 2 > Python 3.
try:
from urllib.parse import urljoin
from urllib.parse import urlencode
except ImportError:
from urlparse import urljoin
from urllib import urlencode
import logging
_logger = logging.getLogger('skyapi.client')
SKY_API_ENDPOINT = 'https://api.sky.blackbaud.com/'sky-a
def _enabled_or_noop(fn):
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
if self.enabled:
return fn(self, *args, **kwargs)
return wrapper
class SkyAPIError(Exception):
pass
class SkyAPITokenError(Exception):
pass
class SkyAPIClient(object):
"""
Sky API class to communicate with the Blackbaud API
"""
def __init__(self, subscription_key=None, access_token=None, enabled=True, timeout=None, request_hooks=None, request_headers=None):
super(SkyAPIClient, self).__init__()
self.enabled = enabled
self.timeout = timeout
if access_token:
self.auth = SkyAPIOAuth(access_token, subscription_key)
self.base_url = SKY_API_ENDPOINT
else:
raise Exception("You must provide an OAuth access token")
self.request_headers = request_headers or requests.utils.default_headers()
self.request_hooks = request_hooks or requests.hooks.default_hooks()
def _make_request(self, **kwargs):
_logger.info(u'{method} Request: {url}'.format(**kwargs))
if kwargs.get('json'):
_logger.debug('PAYLOAD: {json}'.format(**kwargs))
response = requests.request(**kwargs)
_logger.debug(u'{method} Response: {status} {text}' \
.format(method=kwargs['method'], status=response.status_code, text=response.text))
return response
@_enabled_or_noop
def _post(self, url, data=None):
"""
Handle authenticated POST requests
:param url: The url for the endpoint including path parameters
:type url: :py:class:`str`
:param data: The request body parameters
:type data: :py:data:`none` or :py:class:`dict`
:returns: The JSON output from the API or an error message
"""
url = urljoin(self.base_url, url)
try:
r = self._make_request(**dict(
method='POST',
url=url,
json=data,
auth=self.auth,
timeout=self.timeout,
hooks=self.request_hooks,
headers=self.request_headers
))
except requests.exceptions.RequestException as e:
raise e
else:
if r.status_code == 401:
raise SkyAPITokenError(r.json())
if r.status_code == 400:
# in case of a 500 error, the response might not be a JSON
try:
error_data = r.json()
except ValueError:
error_data = {"response": r}
raise SkyAPIError(error_data)
if r.status_code >= 403:
# in case of a 500 error, the response might not be a JSON
try:
error_data = r.json()
except ValueError:
error_data = {"response": r}
if r.status_code == 204:
return None
return r.json()
@_enabled_or_noop
def _get(self, url, **queryparams):
"""
Handle authenticated GET requests
:param url: The url for the endpoint including path parameters
:type url: :py:class:`str`
:param queryparams: The query string parameters
:returns: The JSON output from the API
"""
url = urljoin(self.base_url, url)
if len(queryparams):
url += '?' + urlencode(queryparams)
try:
r = self._make_request(**dict(
method='GET',
url=url,
auth=self.auth,
timeout=self.timeout,
hooks=self.request_hooks,
headers=self.request_headers
))
except requests.exceptions.RequestException as e:
raise e
else:
if r.status_code >= 400:
raise SkyAPIError(r.json())
return r.json()
@_enabled_or_noop
def _delete(self, url):
"""
Handle authenticated DELETE requests
:param url: The url for the endpoint including path parameters
:type url: :py:class:`str`
:returns: The JSON output from the API
"""
url = urljoin(self.base_url, url)
try:
r = self._make_request(**dict(
method='DELETE',
url=url,
auth=self.auth,
timeout=self.timeout,
hooks=self.request_hooks,
headers=self.request_headers
))
except requests.exceptions.RequestException as e:
raise e
else:
if r.status_code >= 400:
raise SkyAPIError(r.json())
if r.status_code == 204:
return
return r.json()
@_enabled_or_noop
def _patch(self, url, data=None):
"""
Handle authenticated PATCH requests
:param url: The url for the endpoint including path parameters
:type url: :py:class:`str`
:param data: The request body parameters
:type data: :py:data:`none` or :py:class:`dict`
:returns: The JSON output from the API
"""
url = urljoin(self.base_url, url)
try:
r = self._make_request(**dict(
method='PATCH',
url=url,
json=data,
auth=self.auth,
timeout=self.timeout,
hooks=self.request_hooks,
headers=self.request_headers
))
except requests.exceptions.RequestException as e:
raise e
else:
if r.status_code >= 400:
raise SkyAPIError(r.json())
return r.json()
@_enabled_or_noop
def _put(self, url, data=None):
"""
Handle authenticated PUT requests
:param url: The url for the endpoint including path parameters
:type url: :py:class:`str`
:param data: The request body parameters
:type data: :py:data:`none` or :py:class:`dict`
:returns: The JSON output from the API
"""
url = urljoin(self.base_url, url)
try:
r = self._make_request(**dict(
method='PUT',
url=url,
json=data,
auth=self.auth,
timeout=self.timeout,
hooks=self.request_hooks,
headers=self.request_headers
))
except requests.exceptions.RequestException as e:
raise e
else:
if r.status_code >= 400:
raise SkyAPIError(r.json())
return r.json()
class SkyAPIOAuth(requests.auth.AuthBase):
"""
Authentication class for authentication with OAuth2. Acquiring an OAuth2
for Sky API can be done by following the instructions in the
documentation found at
https://developer.blackbaud.com/skyapi/docs/authorization/auth-code-flow
"""
def __init__(self, access_token, subscription_key):
"""
Initialize the OAuth and save the access token
:param access_token: The access token provided by OAuth authentication
:type access_token: :py:class:`str`
:param subscription_key: The Blackbaud API Subscription key for your application
:type subscription_key: :py:class:`str`
"""
self._access_token = access_token
self._subscription_key = subscription_key
def __call__(self, r):
"""
Authorize with the access token provided in __init__
"""
r.headers['Authorization'] = 'Bearer ' + self._access_token
r.headers['Bb-Api-Subscription-Key'] = self._subscription_key
r.headers['Content-Type'] = 'application/json'
return r
``` |
{
"source": "jpirkl743/QR-MeNow",
"score": 2
} |
#### File: jpirkl743/QR-MeNow/userProfileHandler.py
```python
from userProfile import *
def makeUserProfile(firstName, lastName, phoneNumber, address, email):
user = profile(firstName, lastName, phoneNumber, address, email)
return user
``` |
{
"source": "jpirnay/gui-files-meerk40t",
"score": 2
} |
#### File: jpirnay/gui-files-meerk40t/meerk40t_settings.py
```python
import wx
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class Settings(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: Settings.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE | wx.FRAME_TOOL_WINDOW | wx.STAY_ON_TOP
wx.Frame.__init__(self, *args, **kwds)
self.SetSize((412, 183))
self.radio_units = wx.RadioBox(self, wx.ID_ANY, "Units", choices=["mm", "cm", "inch", "mils"], majorDimension=1, style=wx.RA_SPECIFY_ROWS)
self.combo_language = wx.ComboBox(self, wx.ID_ANY, choices=[], style=wx.CB_DROPDOWN)
self.check_list_box_1 = wx.CheckListBox(self, wx.ID_ANY, choices=["Invert Mouse Wheel Zoom", "Autoclose Shutdown"])
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_RADIOBOX, self.on_radio_units, self.radio_units)
self.Bind(wx.EVT_COMBOBOX, self.on_combo_language, self.combo_language)
self.Bind(wx.EVT_CHECKLISTBOX, self.on_checklist_settings, self.check_list_box_1)
# end wxGlade
def __set_properties(self):
# begin wxGlade: Settings.__set_properties
self.SetTitle("Settings")
self.radio_units.SetBackgroundColour(wx.Colour(192, 192, 192))
self.radio_units.SetToolTip("Set default units for guides")
self.radio_units.SetSelection(0)
self.combo_language.SetToolTip("Select the desired language to use.")
# end wxGlade
def __do_layout(self):
# begin wxGlade: Settings.__do_layout
sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_6 = wx.BoxSizer(wx.VERTICAL)
sizer_5 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, "Language"), wx.HORIZONTAL)
sizer_5.Add(self.radio_units, 0, wx.EXPAND, 0)
sizer_2.Add(self.combo_language, 0, 0, 0)
sizer_5.Add(sizer_2, 0, wx.EXPAND, 0)
sizer_1.Add(sizer_5, 1, wx.EXPAND, 0)
sizer_6.Add(self.check_list_box_1, 1, wx.EXPAND, 0)
sizer_1.Add(sizer_6, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
def on_radio_units(self, event): # wxGlade: Settings.<event_handler>
print("Event handler 'on_radio_units' not implemented!")
event.Skip()
def on_combo_language(self, event): # wxGlade: Settings.<event_handler>
print("Event handler 'on_combo_language' not implemented!")
event.Skip()
def on_checklist_settings(self, event): # wxGlade: Settings.<event_handler>
print("Event handler 'on_checklist_settings' not implemented!")
event.Skip()
# end of class Settings
class MyApp(wx.App):
def OnInit(self):
self.Settings = Settings(None, wx.ID_ANY, "")
self.SetTopWindow(self.Settings)
self.Settings.Show()
return True
# end of class MyApp
if __name__ == "__main__":
app = MyApp(0)
app.MainLoop()
``` |
{
"source": "jpirnay/meerk40t",
"score": 2
} |
#### File: meerk40t/balor/sender.py
```python
import threading
import time
import usb.core
import usb.util
from meerk40t.balor.command_list import CommandList, CommandSource
class BalorException(Exception):
pass
class BalorConfigException(BalorException):
pass
class BalorMachineException(BalorException):
pass
class BalorCommunicationException(BalorException):
pass
class BalorDataValidityException(BalorException):
pass
# Marked with ? - currently not seen in the wild
DISABLE_LASER = 0x0002
RESET = 0x0003
ENABLE_LASER = 0x0004
EXECUTE_LIST = 0x0005
SET_PWM_PULSE_WIDTH = 0x0006 # ?
GET_REGISTER = 0x0007
GET_SERIAL_NUMBER = 0x0009 # In EzCAD mine is 32012LI43405B, Version 4.02, LMC V4 FIB
GET_LIST_STATUS = 0x000A
GET_XY_POSITION = 0x000C # Get current galvo position
SET_XY_POSITION = 0x000D # Travel the galvo xy to specified position
LASER_SIGNAL_OFF = 0x000E # ?
LASER_SIGNAL_ON = 0x000F # ?
WRITE_CORRECTION_LINE = 0x0010 # ?
RESET_LIST = 0x0012
RESTART_LIST = 0x0013
WRITE_CORRECTION_TABLE = 0x0015
SET_CONTROL_MODE = 0x0016
SET_DELAY_MODE = 0x0017
SET_MAX_POLY_DELAY = 0x0018
SET_END_OF_LIST = 0x0019
SET_FIRST_PULSE_KILLER = 0x001A
SET_LASER_MODE = 0x001B
SET_TIMING = 0x001C
SET_STANDBY = 0x001D
SET_PWM_HALF_PERIOD = 0x001E
STOP_EXECUTE = 0x001F # Since observed in the wild
STOP_LIST = 0x0020 # ?
WRITE_PORT = 0x0021
WRITE_ANALOG_PORT_1 = 0x0022 # At end of cut, seen writing 0x07FF
WRITE_ANALOG_PORT_2 = 0x0023 # ?
WRITE_ANALOG_PORT_X = 0x0024 # ?
READ_PORT = 0x0025
SET_AXIS_MOTION_PARAM = 0x0026
SET_AXIS_ORIGIN_PARAM = 0x0027
GO_TO_AXIS_ORIGIN = 0x0028
MOVE_AXIS_TO = 0x0029
GET_AXIS_POSITION = 0x002A
GET_FLY_WAIT_COUNT = 0x002B # ?
GET_MARK_COUNT = 0x002D # ?
SET_FPK_2E = 0x002E # First pulse killer related, SetFpkParam2
# My ezcad lists 40 microseconds as FirstPulseKiller
# EzCad sets it 0x0FFB, 1, 0x199, 0x64
FIBER_CONFIG_1 = 0x002F #
FIBER_CONFIG_2 = 0x0030 #
LOCK_INPUT_PORT = 0x0031 # ?
SET_FLY_RES = 0x0032 # Unknown fiber laser parameter being set
# EzCad sets it: 0x0000, 0x0063, 0x03E8, 0x0019
FIBER_OPEN_MO = 0x0033 # "IPG (i.e. fiber) Open MO" - MO is probably Master Oscillator
# (In BJJCZ documentation, the pin 18 on the IPG connector is
# called "main oscillator"; on the raycus docs it is "emission enable.")
# Seen at end of marking operation with all
# zero parameters. My Ezcad has an "open MO delay"
# of 8 ms
FIBER_GET_StMO_AP = 0x0034 # Unclear what this means; there is no
# corresponding list command. It might be to
# get a status register related to the source.
# It is called IPG_GETStMO_AP in the dll, and the abbreviations
# MO and AP are used for the master oscillator and power amplifier
# signal lines in BJJCZ documentation for the board; LASERST is
# the name given to the error code lines on the IPG connector.
GET_USER_DATA = 0x0036 # ?
GET_FLY_PULSE_COUNT = 0x0037 # ?
GET_FLY_SPEED = 0x0038 # ?
ENABLE_Z_2 = 0x0039 # ? AutoFocus on/off
ENABLE_Z = 0x003A # AutoFocus on/off
SET_Z_DATA = 0x003B # ?
SET_SPI_SIMMER_CURRENT = 0x003C # ?
IS_LITE_VERSION = 0x0040 # Tell laser to nerf itself for ezcad lite apparently
GET_MARK_TIME = (
0x0041 # Seen at end of cutting, only and always called with param 0x0003
)
SET_FPK_PARAM = 0x0062 # Probably "first pulse killer" = fpk
class Sender:
"""This is a simplified control class for the BJJCZ (Golden Orange,
Beijing JCZ) LMCV4-FIBER-M and compatible boards. All operations are blocking
so, it should probably run in its own thread for nontrivial applications.
It does have an .abort() method that it is expected will be called
asynchronously from another thread."""
sleep_time = 0.001
# We include this "blob" here (the contents of which are all well-understood) to
# avoid introducing a dependency on job generation from within the sender.
# It just consists of the new job command followed by a bunch of NOPs.
_abort_list_chunk = bytearray([0x51, 0x80] + [0x00] * 10) + bytearray( # New job
([0x02, 0x80] + [0x00] * 10) * 255
) # NOP
_packet_size = 256 * 12
def get_packet_size(self):
return (
self._packet_size
) # TODO maybe this should get it from the usb connection class,
# n.b. not instance which will not exist at the time it's needed necessarily
def __init__(self, service, debug=False):
self.service = service
self._lock = threading.Lock()
self._terminate_execution = False
self._footswitch_callback = None
self._usb_connection = None
self._write_port = 0x0000
self._debug = debug
def is_open(self):
return self._usb_connection is not None
def open(self):
mock = self.service.mock
machine_index = self.service.machine_index
if self._usb_connection is not None:
raise BalorCommunicationException("Attempting to open an open connection.")
if not mock:
connection = UsbConnection(machine_index, debug=self._debug)
else:
connection = MockConnection(machine_index, debug=self._debug)
connection.open()
self._usb_connection = connection
self._init_machine()
time.sleep(
0.05
) # We sacrifice this time at the altar of the unknown race condition
return True
def close(self):
self.abort()
if self._usb_connection is not None:
self._usb_connection.close()
self._usb_connection = None
def job(self, *args, **kwargs):
return CommandList(*args, **kwargs, sender=self)
def command(self, *args, **kwargs):
self._send_command(*args, **kwargs)
def _send_command(self, *args, **kwargs):
if self._usb_connection is None:
self.open()
return self._usb_connection.send_command(*args, **kwargs)
def _send_correction_entry(self, *args):
if self._usb_connection is None:
self.open()
self._usb_connection.send_correction_entry(*args)
def _send_list_chunk(self, *args):
if self._usb_connection is None:
self.open()
self._usb_connection.send_list_chunk(*args)
def _init_machine(self):
"""Initialize the machine."""
self.serial_number = self.raw_get_serial_no()
self.version = self.raw_get_version()
self.raw_get_st_mo_ap()
cor_file = self.service.corfile if self.service.corfile_enabled else None
first_pulse_killer = self.service.first_pulse_killer
pwm_pulse_width = self.service.pwm_pulse_width
pwm_half_period = self.service.pwm_half_period
standby_param_1 = self.service.standby_param_1
standby_param_2 = self.service.standby_param_2
timing_mode = self.service.timing_mode
delay_mode = self.service.delay_mode
laser_mode = self.service.laser_mode
control_mode = self.service.control_mode
fpk2_p1 = self.service.fpk2_p1
fpk2_p2 = self.service.fpk2_p2
fpk2_p3 = self.service.fpk2_p3
fpk2_p4 = self.service.fpk2_p3
fly_res_p1 = self.service.fly_res_p1
fly_res_p2 = self.service.fly_res_p2
fly_res_p3 = self.service.fly_res_p3
fly_res_p4 = self.service.fly_res_p4
# Unknown function
self.raw_reset()
# Load in-machine correction table
cor_table = None
if cor_file is not None:
try:
cor_table = self._read_correction_file(cor_file)
except FileNotFoundError:
raise BalorConfigException(".cor file location did not exist")
self._send_correction_table(cor_table)
self.raw_enable_laser()
self.raw_set_control_mode(control_mode, 0)
self.raw_set_laser_mode(laser_mode, 0)
self.raw_set_delay_mode(delay_mode, 0)
self.raw_set_timing(timing_mode, 0)
self.raw_set_standby(standby_param_1, standby_param_2, 0, 0)
self.raw_set_first_pulse_killer(first_pulse_killer, 0)
self.raw_set_pwm_half_period(pwm_half_period, 0)
# unknown function
self.raw_set_pwm_pulse_width(pwm_pulse_width, 0)
# "IPG_OpenMO" (main oscillator?)
self.raw_fiber_open_mo(0, 0)
# Unclear if used for anything
self._send_command(GET_REGISTER, 0)
# 0x0FFB is probably a 12 bit rendering of int12 -5
# Apparently some parameters for the first pulse killer
self.raw_set_fpk_param_2(fpk2_p1, fpk2_p2, fpk2_p3, fpk2_p4)
# Unknown fiber laser related command
self.raw_set_fly_res(fly_res_p1, fly_res_p2, fly_res_p3, fly_res_p4)
# Is this appropriate for all laser engraver machines?
self.raw_write_port(self._write_port)
# Conjecture is that this puts the output port out of a
# high impedance state (based on the name in the DLL,
# ENABLEZ)
# Based on how it's used, it could also be about latching out
# of the data that has been set up.
self.raw_enable_z()
# We don't know what this does, since this laser's power is set
# digitally
self.raw_write_analog_port_1(0x07FF, 0)
self.raw_enable_z()
def _read_correction_file(self, filename):
table = []
with open(filename, "rb") as f:
f.seek(0x24)
for j in range(65):
for k in range(65):
dx = int.from_bytes(f.read(4), "little", signed=True)
dx = dx if dx >= 0 else -dx + 0x8000
dy = int.from_bytes(f.read(4), "little", signed=True)
dy = dy if dy >= 0 else -dy + 0x8000
table.append([dx & 0xFFFF, dy & 0xFFFF])
return table
def _send_correction_table(self, table=None):
"""Send the onboard correction table to the machine."""
self.raw_write_correction_table(True)
if table is None:
for n in range(65**2):
self.raw_write_correction_line(0, 0, 0 if n == 0 else 1)
else:
for n in range(65**2):
self.raw_write_correction_line(
table[n][0], table[n][1], 0 if n == 0 else 1
)
def is_ready(self):
"""Returns true if the laser is ready for more data, false otherwise."""
self._send_command(GET_REGISTER, 0x0001)
return bool(self._usb_connection.status & 0x20)
def is_busy(self):
"""Returns true if the machine is busy, false otherwise;
Note that running a lighting job counts as being busy."""
self._send_command(GET_REGISTER, 0x0001)
return bool(self._usb_connection.status & 0x04)
def is_ready_and_not_busy(self):
self._send_command(GET_REGISTER, 0x0001)
return bool(self._usb_connection.status & 0x20) and not bool(
self._usb_connection.status & 0x04
)
def wait_finished(self):
while not self.is_ready_and_not_busy():
time.sleep(self.sleep_time)
if self._terminate_execution:
return
def execute(
self, command_list: CommandSource, loop_count=1, callback_finished=None
):
"""Run a job. loop_count is the number of times to repeat the
job; if it is inf, it repeats until aborted. If there is a job
already running, it will be aborted and replaced. Optionally,
calls a callback function when the job is finished.
The loop job can either be regular data in multiples of 3072 bytes, or
it can be a callable that provides data as above on command."""
self._terminate_execution = False
with self._lock:
self.wait_finished()
self.raw_reset_list()
self.port_on(bit=0)
if command_list.movement:
self.raw_fiber_open_mo(1, 0)
loop_index = 0
execute_list = False
while loop_index < loop_count:
packet_count = 0
if command_list.tick is not None:
command_list.tick(command_list, loop_index)
for packet in command_list.packet_generator():
if self._terminate_execution:
return False
ready = False
while not ready:
# Wait until ready.
if self._terminate_execution:
return False
self._send_command(
GET_REGISTER, 0x0001 if not execute_list else 0x0000
) # 0x0007
ready = bool(self._usb_connection.status & 0x20)
self._usb_connection.send_list_chunk(packet)
self.raw_set_end_of_list(
0x0001 if not execute_list else 0x0000
) # 0x00019
if not execute_list and packet_count >= 1:
self.raw_execute_list()
execute_list = True
packet_count += 1
if not execute_list:
self.raw_execute_list()
execute_list = True
# when done, SET_END_OF_LIST(0), SET_CONTROL_MODE(1), 7(1)
self.raw_set_control_mode(1, 0)
busy = True
while busy:
# Wait until no longer busy.
if self._terminate_execution:
return False
self._send_command(GET_REGISTER, 0x0001) # 0x0007
busy = bool(self._usb_connection.status & 0x04)
loop_index += 1
self.port_on(bit=0)
# self.raw_set_standby(0x70D0, 0x0014)
if command_list.movement:
self.raw_fiber_open_mo(0, 0)
if callback_finished is not None:
callback_finished()
return True
loop_job = execute
def abort(self):
"""Aborts any job in progress and puts the machine back into an
idle ready condition."""
self._terminate_execution = True
with self._lock:
self.raw_stop_execute()
self.raw_fiber_open_mo(0, 0)
self.raw_reset_list()
self._send_list_chunk(self._abort_list_chunk)
self.raw_set_end_of_list()
self.raw_execute_list()
while self.is_busy():
time.sleep(self.sleep_time)
self.set_xy(0x8000, 0x8000)
def set_footswitch_callback(self, callback_footswitch):
"""Sets the callback function for the footswitch."""
self._footswitch_callback = callback_footswitch
def get_condition(self):
"""Returns the 16-bit condition register value (from whatever
command was run last.)"""
return self._usb_connection.status
def port_toggle(self, bit):
self._write_port ^= 1 << bit
self.raw_write_port(self._write_port)
def port_on(self, bit):
self._write_port |= 1 << bit
self.raw_write_port(self._write_port)
def port_off(self, bit):
self._write_port = ~((~self._write_port) | (1 << bit))
self.raw_write_port(self._write_port)
def get_port(self, bit=None):
if bit is None:
return self._write_port
return (self._write_port >> bit) & 1
def light_on(self):
self.port_on(bit=8) # 0x100
def light_off(self):
self.port_off(bit=8)
def read_port(self):
port = self.raw_read_port()
if port[0] & 0x8000 and self._footswitch_callback:
callback = self._footswitch_callback
self._footswitch_callback = None
callback(port)
return port
def set_xy(self, x, y):
"""Change the galvo position. If the machine is running a job,
this will abort the job."""
self.raw_set_xy_position(x, y)
def get_xy(self):
"""Returns the galvo position."""
return self.raw_get_xy_position()
#############################
# Raw LMC Interface Commands.
#############################
def raw_disable_laser(self):
"""
No parameters.
@return:
"""
return self._send_command(DISABLE_LASER)
def raw_reset(self):
self._send_command(RESET)
def raw_enable_laser(self):
"""
No parameters.
@return:
"""
return self._send_command(ENABLE_LASER)
def raw_execute_list(self):
"""
No parameters.
@return: value response
"""
return self._send_command(EXECUTE_LIST)
def raw_set_pwm_pulse_width(self, s1: int, value: int):
"""
2 Param: Stack, Value
@param s1:
@param value:
@return: value response
"""
return self._send_command(SET_PWM_PULSE_WIDTH, s1, value)
def raw_get_version(self):
"""
No set parameters but 1 is always sent.
@return: value response
"""
return self._send_command(GET_REGISTER, 1)
def raw_get_serial_no(self):
"""
No parameters
Reply is presumably a serial number.
@return: value response
"""
return self._send_command(GET_SERIAL_NUMBER)
def raw_get_list_status(self):
"""
No parameters
@return: value response
"""
return self._send_command(GET_LIST_STATUS)
def raw_get_xy_position(self):
"""
No parameters
The reply to this is the x, y coords and should be parsed.
@return: value response
"""
return self._send_command(GET_XY_POSITION)
def raw_set_xy_position(self, x, y):
"""
Move to X Y location
@param x:
@param y:
@return: value response
"""
return self._send_command(SET_XY_POSITION, int(x), int(y))
def raw_laser_signal_off(self):
"""
No parameters
@return: value response
"""
return self._send_command(LASER_SIGNAL_OFF)
def raw_laser_signal_on(self):
"""
No parameters
@return: value response
"""
return self._send_command(LASER_SIGNAL_ON)
def raw_write_correction_line(self, dx, dy, nonfirst):
"""
3 parameters
Writes a single line of a correction table. 1 entries.
dx, dy, first, 0.
Does not read reply.
@param dx:
@param dy:
@param nonfirst: either 0x0000 for first entry or 0x0100 for non-first.
@return:
"""
self._send_command(WRITE_CORRECTION_LINE, dx, dy, nonfirst, read=False)
def raw_reset_list(self):
"""
No parameters.
@return: value response
"""
return self._send_command(RESET_LIST)
def raw_restart_list(self):
"""
No parameters.
@return: value response
"""
return self._send_command(RESTART_LIST)
def raw_write_correction_table(self, has_table: bool):
"""
1 parameter
If the parameter is true, no table needs to be sent.
@param has_table:
@return: value response
"""
return self._send_command(WRITE_CORRECTION_TABLE, int(has_table))
def raw_set_control_mode(self, s1: int, value: int):
"""
2 parameters.
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(SET_CONTROL_MODE, int(s1), int(value))
def raw_set_delay_mode(self, s1: int, value: int):
"""
2 parameters.
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(SET_DELAY_MODE, int(s1), int(value))
def raw_set_max_poly_delay(self, s1: int, value: int):
"""
2 parameters.
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(SET_MAX_POLY_DELAY, int(s1), int(value))
def raw_set_end_of_list(self, a=0, b=0):
"""
No parameters
@return: value response
"""
# It does so have parameters, in the pcap...
return self._send_command(SET_END_OF_LIST, a, b)
def raw_set_first_pulse_killer(self, s1: int, value: int):
"""
2 parameters.
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(SET_FIRST_PULSE_KILLER, s1, value)
def raw_set_laser_mode(self, s1: int, value: int):
"""
2 parameters.
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(SET_LASER_MODE, s1, value)
def raw_set_timing(self, s1: int, value: int):
"""
2 parameters.
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(SET_TIMING, s1, value)
def raw_set_standby(self, v1: int, v2: int, v3: int, value: int):
"""
4 parameters
variable, variable, variable, value
@param v1:
@param v2:
@param v3:
@param value:
@return: value response
"""
return self._send_command(SET_STANDBY, v1, v2, v3, value)
def raw_set_pwm_half_period(self, s1: int, value: int):
"""
2 parameters
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(SET_PWM_HALF_PERIOD, s1, value)
def raw_stop_execute(self):
"""
No parameters.
@return: value response
"""
return self._send_command(STOP_EXECUTE)
def raw_stop_list(self):
"""
No parameters
@return: value response
"""
return self._send_command(STOP_LIST)
def raw_write_port(self, v1: int = 0, s1: int = 0, value: int = 0):
"""
3 parameters.
variable, stack, value
@param v1:
@param s1:
@param value:
@return: value response
"""
return self._send_command(WRITE_PORT, v1, s1, value)
def raw_write_analog_port_1(self, s1: int, value: int):
"""
2 parameters.
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(WRITE_ANALOG_PORT_1, s1, value)
def raw_write_analog_port_2(self, s1: int, value: int):
"""
3 parameters.
0, stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(WRITE_ANALOG_PORT_2, 0, s1, value)
def raw_write_analog_port_x(self, v1: int, s1: int, value: int):
"""
3 parameters.
variable, stack, value
@param v1:
@param s1:
@param value:
@return: value response
"""
return self._send_command(WRITE_ANALOG_PORT_X, v1, s1, value)
def raw_read_port(self):
"""
No parameters
@return: Status Information
"""
return self._send_command(READ_PORT)
def raw_set_axis_motion_param(self, v1: int, s1: int, value: int):
"""
3 parameters.
variable, stack, value
@return: value response
"""
return self._send_command(SET_AXIS_MOTION_PARAM, v1, s1, value)
def raw_set_axis_origin_param(self, v1: int, s1: int, value: int):
"""
3 parameters.
variable, stack, value
@return: value response
"""
return self._send_command(SET_AXIS_ORIGIN_PARAM, v1, s1, value)
def raw_goto_axis_origin(self, v0: int):
"""
1 parameter
variable
@param v0:
@return: value response
"""
return self._send_command(GO_TO_AXIS_ORIGIN, v0)
def raw_move_axis_to(self, axis, coord):
"""
This typically accepted 1 32 bit int and used bits 1:8 and then 16:24 as parameters.
@param axis: axis being moved
@param coord: coordinate being matched
@return: value response
"""
return self._send_command(MOVE_AXIS_TO, axis, coord)
def raw_get_axis_pos(self, s1: int, value: int):
"""
2 parameters
stack, value
@param s1:
@param value:
@return: axis position?
"""
return self._send_command(GET_AXIS_POSITION, s1, value)
def raw_get_fly_wait_count(self, b1: bool):
"""
1 parameter
bool
@param b1:
@return: flywaitcount?
"""
return self._send_command(GET_FLY_WAIT_COUNT, int(b1))
def raw_get_mark_count(self, p1: bool):
"""
1 parameter
bool
@param p1:
@return: markcount?
"""
return self._send_command(GET_MARK_COUNT, int(p1))
def raw_set_fpk_param_2(self, v1, v2, v3, s1):
"""
4 parameters
variable, variable, variable stack
@param v1:
@param v2:
@param v3:
@param s1:
@return: value response
"""
return self._send_command(SET_FPK_2E, v1, v2, v3, s1)
def raw_set_fiber_config(self, p1, p2):
"""
Calls fiber_config_2 with setting parameters
@param p1:
@param p2:
@return:
"""
self.raw_fiber_config_1(0, p1, p2)
def raw_get_fiber_config(self):
"""
Calls fiber_config_1 with getting parameters.
@return:
"""
self.raw_fiber_config_1(1, 0, 0)
def raw_fiber_config_1(self, p1, p2, p3):
"""
Seen to call both a get and set config value.
@param p1:
@param p2:
@param p3:
@return:
"""
return self._send_command(FIBER_CONFIG_1, p1, p2, p3)
def raw_fiber_config_2(self, v1, v2, v3, s1):
return self._send_command(FIBER_CONFIG_2, v1, v2, v3, s1)
def raw_lock_input_port(self, p1):
"""
p1 varies based on call., 1 for get, 2, for enable, 4 for clear
@param p1:
@return:
"""
self._send_command(LOCK_INPUT_PORT, p1)
def raw_clear_lock_input_port(self):
self.raw_lock_input_port(0x04)
def raw_enable_lock_input_port(self):
self.raw_lock_input_port(0x02)
def raw_get_lock_input_port(self):
self.raw_lock_input_port(0x01)
def raw_set_fly_res(self, p1, p2, p3, p4):
"""
On-the-fly settings.
@param p1:
@param p2:
@param p3:
@param p4:
@return:
"""
return self._send_command(SET_FLY_RES, p1, p2, p3, p4)
def raw_fiber_open_mo(self, s1: int, value: int):
"""
2 parameters
stack, value
@param s1:
@param value:
@return: value response
"""
return self._send_command(FIBER_OPEN_MO, s1, value)
def raw_get_st_mo_ap(self):
"""
No parameters
@return: value response
"""
return self._send_command(FIBER_GET_StMO_AP)
def raw_get_user_data(self):
"""
No parameters
@return: user_parameters
"""
return self._send_command(GET_USER_DATA)
def raw_get_fly_pulse_count(self):
"""
@return: fly pulse count
"""
return self._send_command(GET_FLY_PULSE_COUNT)
def raw_get_fly_speed(self, p1, p2, p3, p4):
"""
@param p1:
@param p2:
@param p3:
@param p4:
@return:
"""
self._send_command(GET_FLY_SPEED, p1, p2, p3, p4)
def raw_enable_z(self):
"""
No parameters. Autofocus on/off
@return: value response
"""
return self._send_command(ENABLE_Z)
def raw_enable_z_2(self):
"""
No parameters
Alternate command. Autofocus on/off
@return: value response
"""
return self._send_command(ENABLE_Z_2)
def raw_set_z_data(self, v1, s1, v2):
"""
3 parameters
variable, stack, variable
@param v1:
@param s1:
@param v2:
@return: value response
"""
return self._send_command(SET_Z_DATA, v1, s1, v2)
def raw_set_spi_simmer_current(self, v1, s1):
"""
2 parameters
variable, stack
@param v1:
@param s1:
@return: value response
"""
return self._send_command(SET_SPI_SIMMER_CURRENT, v1, s1)
def raw_is_lite_version(self):
"""
no parameters.
Only called for true.
@return: value response
"""
return self._send_command(IS_LITE_VERSION, 1)
def raw_get_mark_time(self):
"""
Parameter is always set to 3.
@return:
"""
self._send_command(GET_MARK_TIME, 3)
def raw_set_fpk_param(self, v1, v2, v3, s1):
"""
Probably "first pulse killer" = fpk
4 parameters
variable, variable, variable, stack
@param v1:
@param v2:
@param v3:
@param s1:
@return: value response
"""
return self._send_command(SET_FPK_PARAM, v1, v2, v3, s1)
class UsbConnection:
chunk_size = 12 * 256
ep_hodi = 0x01 # endpoint for the "dog," i.e. dongle.
ep_hido = 0x81 # fortunately it turns out that we can ignore it completely.
ep_homi = 0x02 # endpoint for host out, machine in. (query status, send ops)
ep_himo = 0x88 # endpoint for host in, machine out. (receive status reports)
def __init__(self, machine_index=0, debug=None):
self.machine_index = machine_index
self.device = None
self.status = None
self._debug = debug
def open(self):
devices = list(usb.core.find(find_all=True, idVendor=0x9588, idProduct=0x9899))
if len(devices) == 0:
raise BalorMachineException("No compatible engraver machine was found.")
try:
device = list(devices)[self.machine_index]
except IndexError:
# Can't find device
raise BalorMachineException("Invalid machine index %d" % self.machine_index)
# if the permissions are wrong, these will throw usb.core.USBError
device.set_configuration()
try:
device.reset()
except usb.core.USBError:
pass
self.device = device
if self._debug:
self._debug("Connected.")
def close(self):
self.status = None
if self._debug:
self._debug("Disconnected.")
def is_ready(self):
self.send_command(READ_PORT, 0)
return self.status & 0x20
def send_correction_entry(self, correction):
"""Send an individual correction table entry to the machine."""
# This is really a command and should just be issued without reading.
query = bytearray([0x10] + [0] * 11)
query[2 : 2 + 5] = correction
if self.device.write(self.ep_homi, query, 100) != 12:
raise BalorCommunicationException("Failed to write correction entry")
def send_command(self, code, *parameters, read=True):
"""Send a command to the machine and return the response.
Updates the host condition register as a side effect."""
query = bytearray([0] * 12)
query[0] = code & 0x00FF
query[1] = (code >> 8) & 0x00FF
for n, parameter in enumerate(parameters):
query[2 * n + 2] = parameter & 0x00FF
query[2 * n + 3] = (parameter >> 8) & 0x00FF
if self.device.write(self.ep_homi, query, 100) != 12:
raise BalorCommunicationException("Failed to write command")
if self._debug:
self._debug("---> " + str(query))
if read:
response = self.device.read(self.ep_himo, 8, 100)
if len(response) != 8:
raise BalorCommunicationException("Invalid response")
if self._debug:
self._debug("<--- " + str(response))
self.status = response[6] | (response[7] << 8)
return response[2] | (response[3] << 8), response[4] | (response[5] << 8)
else:
return 0, 0
def send_list_chunk(self, data):
"""Send a command list chunk to the machine."""
if len(data) != self.chunk_size:
raise BalorDataValidityException("Invalid chunk size %d" % len(data))
sent = self.device.write(self.ep_homi, data, 100)
if sent != len(data):
raise BalorCommunicationException("Could not send list chunk")
if self._debug:
self._debug("---> " + str(data))
class MockConnection:
def __init__(self, machine_index=0, debug=None):
self.machine_index = machine_index
self._debug = debug
self.device = True
@property
def status(self):
import random
return random.randint(0, 255)
def open(self):
self.device = True
if self._debug:
self._debug("Connected.")
def close(self):
if self._debug:
self._debug("Disconnected.")
def send_correction_entry(self, correction):
"""Send an individual correction table entry to the machine."""
pass
def send_command(self, code, *parameters, read=True):
"""Send a command to the machine and return the response.
Updates the host condition register as a side effect."""
if self._debug:
self._debug("---> " + str(code) + " " + str(parameters))
time.sleep(0.005)
# This should be replaced with a robust connection to the simulation code
# so the fake laser can give sensical responses
if read:
import random
return random.randint(0, 255), random.randint(0, 255)
else:
return 0, 0
def send_list_chunk(self, data):
"""Send a command list chunk to the machine."""
if len(data) != 0xC00:
raise BalorDataValidityException("Invalid chunk size %d" % len(data))
if self._debug:
self._debug("---> " + str(data))
```
#### File: core/node/elem_image.py
```python
import threading
from copy import copy
from PIL.Image import DecompressionBombError
from meerk40t.core.node.node import Node
from meerk40t.core.units import UNITS_PER_INCH
from meerk40t.image.imagetools import RasterScripts
from meerk40t.svgelements import Matrix
class ImageNode(Node):
"""
ImageNode is the bootstrapped node type for the 'elem image' type.
ImageNode contains a main matrix, main image. A processed image and a processed matrix.
The processed matrix must be concated with the main matrix to be accurate.
"""
def __init__(
self,
image=None,
matrix=None,
overscan=None,
direction=None,
dpi=500,
operations=None,
**kwargs,
):
super(ImageNode, self).__init__(type="elem image", **kwargs)
self.image = image
self.matrix = matrix
self.processed_image = None
self.processed_matrix = None
self.process_image_failed = False
self.text = None
self._needs_update = False
self._update_thread = None
self._update_lock = threading.Lock()
self.settings = kwargs
self.overscan = overscan
self.direction = direction
self.dpi = dpi
self.step_x = None
self.step_y = None
self.lock = False
self.invert = False
self.red = 1.0
self.green = 1.0
self.blue = 1.0
self.lightness = 1.0
self.view_invert = False
self.dither = True
self.dither_type = "Floyd-Steinberg"
if operations is None:
operations = list()
self.operations = operations
def __copy__(self):
return ImageNode(
image=self.image,
matrix=copy(self.matrix),
overscan=self.overscan,
direction=self.direction,
dpi=self.dpi,
operations=self.operations,
**self.settings,
)
def __repr__(self):
return "%s('%s', %s, %s)" % (
self.__class__.__name__,
self.type,
str(self.image),
str(self._parent),
)
@property
def active_image(self):
if self.processed_image is not None:
return self.processed_image
else:
return self.image
@property
def active_matrix(self):
if self.processed_matrix is None:
return self.matrix
return self.processed_matrix * self.matrix
def preprocess(self, context, matrix, commands):
"""
Preprocess step during the cut planning stages.
We require a context to calculate the correct step values relative to the device
"""
self.step_x, self.step_y = context.device.dpi_to_steps(self.dpi)
self.matrix *= matrix
self._bounds_dirty = True
self.process_image()
@property
def bounds(self):
if self._bounds_dirty:
image_width, image_height = self.active_image.size
matrix = self.active_matrix
x0, y0 = matrix.point_in_matrix_space((0, 0))
x1, y1 = matrix.point_in_matrix_space((image_width, image_height))
x2, y2 = matrix.point_in_matrix_space((0, image_height))
x3, y3 = matrix.point_in_matrix_space((image_width, 0))
self._bounds_dirty = False
self._bounds = (
min(x0, x1, x2, x3),
min(y0, y1, y2, y3),
max(x0, x1, x2, x3),
max(y0, y1, y2, y3),
)
return self._bounds
def default_map(self, default_map=None):
default_map = super(ImageNode, self).default_map(default_map=default_map)
default_map.update(self.settings)
image = self.active_image
default_map["width"] = image.width
default_map["height"] = image.height
default_map["element_type"] = "Image"
default_map["matrix"] = self.matrix
default_map["dpi"] = self.dpi
default_map["overscan"] = self.overscan
default_map["direction"] = self.direction
return default_map
def drop(self, drag_node):
# Dragging element into element.
if drag_node.type.startswith("elem"):
self.insert_sibling(drag_node)
return True
return False
def revalidate_points(self):
bounds = self.bounds
if bounds is None:
return
if len(self._points) < 9:
self._points.extend([None] * (9 - len(self._points)))
self._points[0] = [bounds[0], bounds[1], "bounds top_left"]
self._points[1] = [bounds[2], bounds[1], "bounds top_right"]
self._points[2] = [bounds[0], bounds[3], "bounds bottom_left"]
self._points[3] = [bounds[2], bounds[3], "bounds bottom_right"]
cx = (bounds[0] + bounds[2]) / 2
cy = (bounds[1] + bounds[3]) / 2
self._points[4] = [cx, cy, "bounds center_center"]
self._points[5] = [cx, bounds[1], "bounds top_center"]
self._points[6] = [cx, bounds[3], "bounds bottom_center"]
self._points[7] = [bounds[0], cy, "bounds center_left"]
self._points[8] = [bounds[2], cy, "bounds center_right"]
def update_point(self, index, point):
return False
def add_point(self, point, index=None):
return False
def update(self, context):
self._needs_update = True
self.text = "Processing..."
context.signal("refresh_scene", "Scene")
if self._update_thread is None:
def clear(result):
if self.process_image_failed:
self.text = "Process image could not exist in memory."
else:
self.text = None
self._needs_update = False
self._update_thread = None
context.signal("refresh_scene", "Scene")
context.signal("image updated", self)
self.processed_image = None
self.processed_matrix = None
self._update_thread = context.threaded(
self.process_image_thread, result=clear, daemon=True
)
def process_image_thread(self):
while self._needs_update:
self._needs_update = False
self.process_image()
# Unset cache.
self.wx_bitmap_image = None
self.cache = None
def process_image(self):
if self.step_x is None:
step = UNITS_PER_INCH / self.dpi
self.step_x = step
self.step_y = step
from PIL import Image, ImageEnhance, ImageFilter, ImageOps
from meerk40t.image.actualize import actualize
from meerk40t.image.imagetools import dither
image = self.image
main_matrix = self.matrix
r = self.red * 0.299
g = self.green * 0.587
b = self.blue * 0.114
v = self.lightness
c = r + g + b
try:
c /= v
r = r / c
g = g / c
b = b / c
except ZeroDivisionError:
pass
if image.mode != "L":
image = image.convert("RGB")
image = image.convert("L", matrix=[r, g, b, 1.0])
if self.invert:
image = image.point(lambda e: 255 - e)
# Calculate device real step.
step_x, step_y = self.step_x, self.step_y
if (
main_matrix.a != step_x
or main_matrix.b != 0.0
or main_matrix.c != 0.0
or main_matrix.d != step_y
):
try:
image, actualized_matrix = actualize(
image,
main_matrix,
step_x=step_x,
step_y=step_y,
inverted=self.invert,
)
except (MemoryError, DecompressionBombError):
self.process_image_failed = True
return
else:
actualized_matrix = Matrix(main_matrix)
if self.invert:
empty_mask = image.convert("L").point(lambda e: 0 if e == 0 else 255)
else:
empty_mask = image.convert("L").point(lambda e: 0 if e == 255 else 255)
# Process operations.
for op in self.operations:
name = op["name"]
if name == "crop":
try:
if op["enable"] and op["bounds"] is not None:
crop = op["bounds"]
left = int(crop[0])
upper = int(crop[1])
right = int(crop[2])
lower = int(crop[3])
image = image.crop((left, upper, right, lower))
except KeyError:
pass
elif name == "edge_enhance":
try:
if op["enable"]:
if image.mode == "P":
image = image.convert("L")
image = image.filter(filter=ImageFilter.EDGE_ENHANCE)
except KeyError:
pass
elif name == "auto_contrast":
try:
if op["enable"]:
if image.mode not in ("RGB", "L"):
# Auto-contrast raises NotImplementedError if P
# Auto-contrast raises OSError if not RGB, L.
image = image.convert("L")
image = ImageOps.autocontrast(image, cutoff=op["cutoff"])
except KeyError:
pass
elif name == "tone":
try:
if op["enable"] and op["values"] is not None:
if image.mode == "L":
image = image.convert("P")
tone_values = op["values"]
if op["type"] == "spline":
spline = ImageNode.spline(tone_values)
else:
tone_values = [q for q in tone_values if q is not None]
spline = ImageNode.line(tone_values)
if len(spline) < 256:
spline.extend([255] * (256 - len(spline)))
if len(spline) > 256:
spline = spline[:256]
image = image.point(spline)
if image.mode != "L":
image = image.convert("L")
except KeyError:
pass
elif name == "contrast":
try:
if op["enable"]:
if op["contrast"] is not None and op["brightness"] is not None:
contrast = ImageEnhance.Contrast(image)
c = (op["contrast"] + 128.0) / 128.0
image = contrast.enhance(c)
brightness = ImageEnhance.Brightness(image)
b = (op["brightness"] + 128.0) / 128.0
image = brightness.enhance(b)
except KeyError:
pass
elif name == "gamma":
try:
if op["enable"] and op["factor"] is not None:
if image.mode == "L":
gamma_factor = float(op["factor"])
def crimp(px):
px = int(round(px))
if px < 0:
return 0
if px > 255:
return 255
return px
if gamma_factor == 0:
gamma_lut = [0] * 256
else:
gamma_lut = [
crimp(pow(i / 255, (1.0 / gamma_factor)) * 255)
for i in range(256)
]
image = image.point(gamma_lut)
if image.mode != "L":
image = image.convert("L")
except KeyError:
pass
elif name == "unsharp_mask":
try:
if (
op["enable"]
and op["percent"] is not None
and op["radius"] is not None
and op["threshold"] is not None
):
unsharp = ImageFilter.UnsharpMask(
radius=op["radius"],
percent=op["percent"],
threshold=op["threshold"],
)
image = image.filter(unsharp)
except (KeyError, ValueError): # Value error if wrong type of image.
pass
elif name == "halftone":
try:
if op["enable"]:
image = RasterScripts.halftone(
image,
sample=op["sample"],
angle=op["angle"],
oversample=op["oversample"],
black=op["black"],
)
except KeyError:
pass
if empty_mask is not None:
background = Image.new(image.mode, image.size, "white")
background.paste(image, mask=empty_mask)
image = background # Mask exists use it to remove any pixels that were pure reject.
if self.dither and self.dither_type is not None:
if self.dither_type != "Floyd-Steinberg":
image = dither(image, self.dither_type)
image = image.convert("1")
inverted_main_matrix = Matrix(main_matrix).inverse()
self.processed_matrix = actualized_matrix * inverted_main_matrix
self.processed_image = image
# self.matrix = actualized_matrix
self.altered()
self.process_image_failed = False
@staticmethod
def line(p):
N = len(p) - 1
try:
m = [(p[i + 1][1] - p[i][1]) / (p[i + 1][0] - p[i][0]) for i in range(0, N)]
except ZeroDivisionError:
m = [1] * N
# b = y - mx
b = [p[i][1] - (m[i] * p[i][0]) for i in range(0, N)]
r = list()
for i in range(0, p[0][0]):
r.append(0)
for i in range(len(p) - 1):
x0 = p[i][0]
x1 = p[i + 1][0]
range_list = [int(round((m[i] * x) + b[i])) for x in range(x0, x1)]
r.extend(range_list)
for i in range(p[-1][0], 256):
r.append(255)
r.append(round(int(p[-1][1])))
return r
@staticmethod
def spline(p):
"""
Spline interpreter.
Returns all integer locations between different spline interpolation values
@param p: points to be quad spline interpolated.
@return: integer y values for given spline points.
"""
try:
N = len(p) - 1
w = [(p[i + 1][0] - p[i][0]) for i in range(0, N)]
h = [(p[i + 1][1] - p[i][1]) / w[i] for i in range(0, N)]
ftt = (
[0]
+ [3 * (h[i + 1] - h[i]) / (w[i + 1] + w[i]) for i in range(0, N - 1)]
+ [0]
)
A = [(ftt[i + 1] - ftt[i]) / (6 * w[i]) for i in range(0, N)]
B = [ftt[i] / 2 for i in range(0, N)]
C = [h[i] - w[i] * (ftt[i + 1] + 2 * ftt[i]) / 6 for i in range(0, N)]
D = [p[i][1] for i in range(0, N)]
except ZeroDivisionError:
return list(range(256))
r = list()
for i in range(0, p[0][0]):
r.append(0)
for i in range(len(p) - 1):
a = p[i][0]
b = p[i + 1][0]
r.extend(
int(
round(
A[i] * (x - a) ** 3
+ B[i] * (x - a) ** 2
+ C[i] * (x - a)
+ D[i]
)
)
for x in range(a, b)
)
for i in range(p[-1][0], 256):
r.append(255)
r.append(round(int(p[-1][1])))
return r
```
#### File: gui/propertypanels/waitproperty.py
```python
import wx
from meerk40t.gui.choicepropertypanel import ChoicePropertyPanel
_ = wx.GetTranslation
class WaitPropertyPanel(wx.Panel):
name = "Wait"
def __init__(self, *args, context=None, node=None, **kwds):
kwds["style"] = kwds.get("style", 0)
wx.Panel.__init__(self, *args, **kwds)
self.context = context
self.operation = node
choices = [
{
"attr": "wait",
"object": self.operation,
"default": 1.0,
"type": float,
"label": _("Wait time for pause in execution (in seconds)"),
"tip": _("Set the wait time for pausing the laser execution."),
},
]
self.panel = ChoicePropertyPanel(
self, wx.ID_ANY, context=self.context, choices=choices
)
main_sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(main_sizer)
self.Layout()
def pane_hide(self):
self.panel.pane_hide()
def pane_show(self):
self.panel.pane_show()
def set_widgets(self, node):
self.operation = node
```
#### File: gui/scene/scene.py
```python
import platform
import threading
import time
import wx
from meerk40t.core.element_types import elem_nodes
from meerk40t.core.units import Length
from meerk40t.gui.laserrender import (
DRAW_MODE_ANIMATE,
DRAW_MODE_FLIPXY,
DRAW_MODE_INVERT,
DRAW_MODE_REFRESH,
)
from meerk40t.gui.scene.guicolors import GuiColors
from meerk40t.gui.scene.sceneconst import (
HITCHAIN_DELEGATE,
HITCHAIN_DELEGATE_AND_HIT,
HITCHAIN_HIT,
HITCHAIN_HIT_AND_DELEGATE,
ORIENTATION_RELATIVE,
RESPONSE_ABORT,
RESPONSE_CHAIN,
RESPONSE_CONSUME,
RESPONSE_DROP,
)
from meerk40t.gui.scene.scenespacewidget import SceneSpaceWidget
from meerk40t.kernel import Job, Module
from meerk40t.svgelements import Matrix, Point
# TODO: _buffer can be updated partially rather than fully rewritten, especially with some layering.
class Scene(Module, Job):
"""
The Scene Module holds all the needed references to widgets and catches the events from the ScenePanel which
stores this object primarily.
Scene overloads both Module being registered in "module/Scene" and the Job to handle the refresh.
The Scene is the infinite space of the scene as seen through the panel's viewpoint. It serves to zoom, pan, and
manipulate various elements. This is done through a matrix which translates the scene space to window space. The
scene space to window space. The widgets are stored in a tree within the scene. The primary widget is the
SceneSpaceWidget which draws elements in two different forms. It first draws the scene and all scenewidgets added
to the scene and then the interface widget which contains all the non-scene widget elements.
"""
def __init__(self, context, path, gui, **kwargs):
Module.__init__(self, context, path)
Job.__init__(
self,
job_name="Scene-%s" % path,
process=self.refresh_scene,
conditional=lambda: self.screen_refresh_is_requested,
run_main=True,
)
self.log = context.channel("scene")
self.log_events = context.channel("scene-events")
self.gui = gui
self.matrix = Matrix()
self.hittable_elements = list()
self.hit_chain = list()
self.widget_root = SceneSpaceWidget(self)
self.matrix_root = Matrix()
self.screen_refresh_lock = threading.Lock()
self.interval = 1.0 / 60.0 # 60fps
self.last_position = None
self._down_start_time = None
self._down_start_pos = None
self._cursor = None
self._reference = None # Reference Object
self.attraction_points = [] # Clear all
self.default_stroke = None
self.default_fill = None
self.compute = True
self.has_background = False
self.colors = GuiColors(self.context)
self.screen_refresh_is_requested = True
self.background_brush = wx.Brush(self.colors.color_background)
# Stuff for magnet-lines
self.magnet_x = []
self.magnet_y = []
self.magnet_attraction = 2
# 0 off, `1..x` increasing strength (quadratic behaviour)
self.magnet_attract_x = True # Shall the X-Axis be affected
self.magnet_attract_y = True # Shall the Y-Axis be affected
self.magnet_attract_c = True # Shall the center be affected
# Stuff related to grids and guides
self.tick_distance = 0
self.auto_tick = False # by definition do not auto_tick
self.reset_grids()
self.tool_active = False
self.grid_points = None # Points representing the grid - total of primary + secondary + circular
def reset_grids(self):
self.draw_grid_primary = True
self.tick_distance = 0
# Secondary grid, perpendicular, but with definable center and scaling
self.draw_grid_secondary = False
self.grid_secondary_cx = None
self.grid_secondary_cy = None
self.grid_secondary_scale_x = 1
self.grid_secondary_scale_y = 1
# Circular grid
self.draw_grid_circular = False
self.grid_circular_cx = None
self.grid_circular_cy = None
def clear_magnets(self):
self.magnet_x = []
self.magnet_y = []
self.context.signal("magnets", False)
def toggle_x_magnet(self, x_value):
prev = self.has_magnets()
if x_value in self.magnet_x:
self.magnet_x.remove(x_value)
# print("Remove x magnet for %.1f" % x_value)
now = self.has_magnets()
else:
self.magnet_x += [x_value]
# print("Add x magnet for %.1f" % x_value)
now = True
if prev != now:
self.context.signal("magnets", now)
def toggle_y_magnet(self, y_value):
prev = self.has_magnets()
if y_value in self.magnet_y:
self.magnet_y.remove(y_value)
# print("Remove y magnet for %.1f" % y_value)
now = self.has_magnets()
else:
self.magnet_y += [y_value]
now = True
# print("Add y magnet for %.1f" % y_value)
if prev != now:
self.context.signal("magnets", now)
def magnet_attracted_x(self, x_value, useit):
delta = float("inf")
x_val = None
if useit:
for mag_x in self.magnet_x:
if abs(x_value - mag_x) < delta:
delta = abs(x_value - mag_x)
x_val = mag_x
return delta, x_val
def magnet_attracted_y(self, y_value, useit):
delta = float("inf")
y_val = None
if useit:
for mag_y in self.magnet_y:
if abs(y_value - mag_y) < delta:
delta = abs(y_value - mag_y)
y_val = mag_y
return delta, y_val
def revised_magnet_bound(self, bounds=None):
dx = 0
dy = 0
if self.has_magnets() and self.magnet_attraction > 0:
if self.tick_distance > 0:
s = "{amount}{units}".format(
amount=self.tick_distance, units=self.context.units_name
)
len_tick = float(Length(s))
# Attraction length is 1/3, 4/3, 9/3 of a grid-unit
# fmt: off
attraction_len = 1 / 3 * self.magnet_attraction * self.magnet_attraction * len_tick
# print("Attraction len=%s, attract=%d, alen=%.1f, tlen=%.1f, factor=%.1f" % (s, self.magnet_attraction, attraction_len, len_tick, attraction_len / len_tick ))
# fmt: on
else:
attraction_len = float(Length("1mm"))
delta_x1, x1 = self.magnet_attracted_x(bounds[0], self.magnet_attract_x)
delta_x2, x2 = self.magnet_attracted_x(bounds[2], self.magnet_attract_x)
delta_x3, x3 = self.magnet_attracted_x(
(bounds[0] + bounds[2]) / 2, self.magnet_attract_c
)
delta_y1, y1 = self.magnet_attracted_y(bounds[1], self.magnet_attract_y)
delta_y2, y2 = self.magnet_attracted_y(bounds[3], self.magnet_attract_y)
delta_y3, y3 = self.magnet_attracted_y(
(bounds[1] + bounds[3]) / 2, self.magnet_attract_c
)
if delta_x3 < delta_x1 and delta_x3 < delta_x2:
if delta_x3 < attraction_len:
if not x3 is None:
dx = x3 - (bounds[0] + bounds[2]) / 2
# print("X Take center , x=%.1f, dx=%.1f" % ((bounds[0] + bounds[2]) / 2, dx)
elif delta_x1 < delta_x2 and delta_x1 < delta_x3:
if delta_x1 < attraction_len:
if not x1 is None:
dx = x1 - bounds[0]
# print("X Take left side, x=%.1f, dx=%.1f" % (bounds[0], dx))
elif delta_x2 < delta_x1 and delta_x2 < delta_x3:
if delta_x2 < attraction_len:
if not x2 is None:
dx = x2 - bounds[2]
# print("X Take right side, x=%.1f, dx=%.1f" % (bounds[2], dx))
if delta_y3 < delta_y1 and delta_y3 < delta_y2:
if delta_y3 < attraction_len:
if not y3 is None:
dy = y3 - (bounds[1] + bounds[3]) / 2
# print("Y Take center , x=%.1f, dx=%.1f" % ((bounds[1] + bounds[3]) / 2, dy))
elif delta_y1 < delta_y2 and delta_y1 < delta_y3:
if delta_y1 < attraction_len:
if not y1 is None:
dy = y1 - bounds[1]
# print("Y Take top side, y=%.1f, dy=%.1f" % (bounds[1], dy))
elif delta_y2 < delta_y1 and delta_y2 < delta_y3:
if delta_y2 < attraction_len:
if not y2 is None:
dy = y2 - bounds[3]
# print("Y Take bottom side, y=%.1f, dy=%.1f" % (bounds[3], dy))
return dx, dy
def has_magnets(self):
return len(self.magnet_x) + len(self.magnet_y) > 0
def module_open(self, *args, **kwargs):
context = self.context
context.schedule(self)
context.setting(int, "draw_mode", 0)
context.setting(bool, "mouse_zoom_invert", False)
context.setting(bool, "mouse_pan_invert", False)
context.setting(bool, "mouse_wheel_pan", False)
context.setting(float, "zoom_factor", 0.1)
context.setting(float, "pan_factor", 25.0)
context.setting(int, "fps", 40)
if context.fps <= 0:
context.fps = 60
self.interval = 1.0 / float(context.fps)
self.commit()
def commit(self):
context = self.context
self._init_widget(self.widget_root, context)
def module_close(self, *args, **kwargs):
self._final_widget(self.widget_root, self.context)
self.screen_refresh_lock.acquire() # calling shutdown live locks here since it's already shutting down.
self.context.unschedule(self)
def _init_widget(self, widget, context):
try:
widget.init(context)
except AttributeError:
pass
for w in widget:
if w is None:
continue
self._init_widget(w, context)
def _final_widget(self, widget, context):
try:
widget.final(context)
except AttributeError:
pass
for w in widget:
if w is None:
continue
self._final_widget(w, context)
def set_fps(self, fps):
"""
Set the scene frames per second which sets the interval for the Job.
"""
if fps == 0:
fps = 1
self.context.fps = fps
self.interval = 1.0 / float(self.context.fps)
def request_refresh_for_animation(self):
"""Called on the various signals trying to animate the screen."""
try:
if self.context.draw_mode & DRAW_MODE_ANIMATE == 0:
self.request_refresh()
except AttributeError:
pass
def request_refresh(self, origin=None, *args):
"""Request an update to the scene."""
try:
if self.context.draw_mode & DRAW_MODE_REFRESH == 0:
self.screen_refresh_is_requested = True
except AttributeError:
pass
def refresh_scene(self, *args, **kwargs):
"""
Called by the Scheduler at a given the specified framerate.
Called in the UI thread.
"""
if self.screen_refresh_is_requested:
if self.screen_refresh_lock.acquire(timeout=0.2):
try:
self.update_buffer_ui_thread()
except RuntimeError:
return
self.gui.Refresh()
self.gui.Update()
self.screen_refresh_is_requested = False
self.screen_refresh_lock.release()
else:
self.screen_refresh_is_requested = False
def update_buffer_ui_thread(self):
"""Performs redrawing of the data in the UI thread."""
dm = self.context.draw_mode
buf = self.gui._Buffer
if buf is None or buf.GetSize() != self.gui.ClientSize or not buf.IsOk():
self.gui.set_buffer()
buf = self.gui._Buffer
dc = wx.MemoryDC()
dc.SelectObject(buf)
self.background_brush.SetColour(self.colors.color_background)
dc.SetBackground(self.background_brush)
dc.Clear()
w, h = dc.Size
if dm & DRAW_MODE_FLIPXY != 0:
dc.SetUserScale(-1, -1)
dc.SetLogicalOrigin(w, h)
gc = wx.GraphicsContext.Create(dc)
gc.Size = dc.Size
font = wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD)
gc.SetFont(font, wx.BLACK)
self.draw(gc)
if dm & DRAW_MODE_INVERT != 0:
dc.Blit(0, 0, w, h, dc, 0, 0, wx.SRC_INVERT)
gc.Destroy()
dc.SelectObject(wx.NullBitmap)
del dc
def _signal_widget(self, widget, *args, **kwargs):
"""
Calls the signal widget with the given args. Calls signal for the entire widget node tree.
"""
try:
widget.signal(*args)
except AttributeError:
pass
for w in widget:
if w is None:
continue
self._signal_widget(w, *args, **kwargs)
def animate_tick(self):
pass
def notify_added_to_parent(self, parent):
"""
Called when node is added to parent. Notifying the scene as a whole.
"""
pass
def notify_added_child(self, child):
"""
Called when a child is added to the tree. Notifies scene as a whole.
"""
try:
child.init(self.context)
except AttributeError:
pass
def notify_removed_from_parent(self, parent):
"""
Called when a widget is removed from its parent. Notifies scene as a whole.
"""
pass
def notify_removed_child(self, child):
"""
Called when a widget's child is removed. Notifies scene as a whole.
"""
try:
child.final(self.context)
except AttributeError:
pass
def notify_moved_child(self, child):
"""
Called when a widget is moved from one widget parent to another.
"""
pass
def draw(self, canvas):
"""
Scene Draw routine to be called on paint when the _Buffer bitmap needs to be redrawn.
"""
if self.widget_root is not None:
self.widget_root.draw(canvas)
if self.log:
self.log("Redraw Canvas")
def convert_scene_to_window(self, position):
"""
Convert the scene space to the window space for a particular point.
The position given in the scene, produces the position on the screen.
"""
point = self.widget_root.scene_widget.matrix.point_in_matrix_space(position)
return point[0], point[1]
def convert_window_to_scene(self, position):
"""
Convert the window space to the scene space for a particular point.
The position given is the window pixel, produces the position within the scene.
"""
point = self.widget_root.scene_widget.matrix.point_in_inverse_space(position)
return point[0], point[1]
def rebuild_hittable_chain(self):
"""
Iterates through the tree and adds all hittable elements to the hittable_elements list.
This is dynamically rebuilt on the mouse event.
"""
self.hittable_elements.clear()
self.rebuild_hit_chain(self.widget_root, self.matrix_root)
def rebuild_hit_chain(self, current_widget, current_matrix=None):
"""
Iterates through the hit chain to find elements which respond to their hit() function that they are HITCHAIN_HIT
and registers this within the hittable_elements list if they are able to hit at the current time. Given the
dimensions of the widget and the current matrix within the widget tree.
HITCHAIN_HIT means that this is a hit value and should the termination of this branch of the widget tree.
HITCHAIN_DELEGATE means that this is not a hittable widget and should not receive mouse events.
HITCHAIN_HIT_AND_DELEGATE means that this is a hittable widget, but other widgets within it might also matter.
HITCHAIN_DELEGATE_AND_HIT means that other widgets in the tree should be checked first, but after those this
widget should be checked.
The hitchain is the current matrix and current widget in the order of depth.
"""
# If there is a matrix for the widget concatenate it.
if current_widget.matrix is not None:
matrix_within_scene = Matrix(current_widget.matrix)
matrix_within_scene.post_cat(current_matrix)
else:
matrix_within_scene = Matrix(current_matrix)
# Add to list and recurse for children based on response.
response = current_widget.hit()
if response == HITCHAIN_HIT:
self.hittable_elements.append((current_widget, matrix_within_scene))
# elif response == HITCHAIN_HIT_WITH_PRIORITY:
# self.hittable_elements.insert(0, (current_widget, matrix_within_scene))
elif response == HITCHAIN_DELEGATE:
for w in current_widget:
self.rebuild_hit_chain(w, matrix_within_scene)
elif response == HITCHAIN_HIT_AND_DELEGATE:
self.hittable_elements.append((current_widget, matrix_within_scene))
for w in current_widget:
self.rebuild_hit_chain(w, matrix_within_scene)
elif response == HITCHAIN_DELEGATE_AND_HIT:
for w in current_widget:
self.rebuild_hit_chain(w, matrix_within_scene)
self.hittable_elements.append((current_widget, matrix_within_scene))
def find_hit_chain(self, position):
"""
Processes the hittable_elements list and find which elements are hit at a given position.
This gives the actual hits with regard to the position of the event.
"""
self.hit_chain.clear()
for current_widget, current_matrix in self.hittable_elements:
try:
hit_point = Point(current_matrix.point_in_inverse_space(position))
except ZeroDivisionError:
current_matrix.reset()
# Some object is zero matrixed, reset it.
return
if current_widget.contains(hit_point.x, hit_point.y):
self.hit_chain.append((current_widget, current_matrix))
def event(self, window_pos, event_type="", nearest_snap=None):
"""
Scene event code. Processes all the events for a particular mouse event bound in the ScenePanel.
Many mousedown events trigger the specific start of the hitchain matching, and processes the given hitchain.
Subsequent delegation of the events will be processed with regard to whether the matching event struck a
particular widget. This permits a hit widget to get all further events.
Responses to events are:
RESPONSE_ABORT: Aborts any future mouse events within the sequence.
RESPONSE_CONSUME: Consumes the event and prevents any event further in the hitchain from getting the event
RESPONSE_CHAIN: Permit the event to move to the next event in the hitchain
RESPONSE_DROP: Remove this item from the hitchain and continue to process the events. Future events will not
consider the dropped element within the hitchain.
"""
if self.log_events:
self.log_events(
"%s: %s %s" % (event_type, str(window_pos), str(nearest_snap))
)
if window_pos is None:
# Capture Lost
for i, hit in enumerate(self.hit_chain):
if hit is None:
continue # Element was dropped.
current_widget, current_matrix = hit
current_widget.event(None, None, event_type, None)
return
if self.last_position is None:
self.last_position = window_pos
dx = window_pos[0] - self.last_position[0]
dy = window_pos[1] - self.last_position[1]
window_pos = (
window_pos[0],
window_pos[1],
self.last_position[0],
self.last_position[1],
dx,
dy,
)
self.last_position = window_pos
try:
previous_top_element = self.hit_chain[0][0]
except (IndexError, TypeError):
previous_top_element = None
if event_type in (
"kb_shift_release",
"kb_shift_press",
"kb_ctrl_release",
"kb_ctrl_press",
"kb_alt_release",
"kb_alt_press",
):
# print("Keyboard-Event raised: %s" % event_type)
self.rebuild_hittable_chain()
for current_widget, current_matrix in self.hittable_elements:
space_pos = window_pos
if current_matrix is not None and not current_matrix.is_identity():
space_cur = current_matrix.point_in_inverse_space(window_pos[0:2])
space_last = current_matrix.point_in_inverse_space(window_pos[2:4])
sdx = space_cur[0] - space_last[0]
sdy = space_cur[1] - space_last[1]
space_pos = (
space_cur[0],
space_cur[1],
space_last[0],
space_last[1],
sdx,
sdy,
)
try:
# We ignore the 'consume' etc. for the time being...
response = current_widget.event(
window_pos, space_pos, event_type, None
)
except AttributeError:
pass
return
if event_type in (
"leftdown",
"middledown",
"rightdown",
"wheeldown",
"wheelup",
"hover",
):
self._down_start_time = time.time()
self._down_start_pos = window_pos
self.rebuild_hittable_chain()
self.find_hit_chain(window_pos)
for i, hit in enumerate(self.hit_chain):
if hit is None:
continue # Element was dropped.
current_widget, current_matrix = hit
if current_widget is None:
continue
space_pos = window_pos
if current_matrix is not None and not current_matrix.is_identity():
space_cur = current_matrix.point_in_inverse_space(window_pos[0:2])
space_last = current_matrix.point_in_inverse_space(window_pos[2:4])
sdx = space_cur[0] - space_last[0]
sdy = space_cur[1] - space_last[1]
space_pos = (
space_cur[0],
space_cur[1],
space_last[0],
space_last[1],
sdx,
sdy,
)
if (
i == 0
and event_type == "hover"
and previous_top_element is not current_widget
):
if previous_top_element is not None:
if self.log_events:
self.log_events(
"Converted %s: %s" % ("hover_end", str(window_pos))
)
previous_top_element.event(
window_pos, window_pos, "hover_end", None
)
current_widget.event(window_pos, space_pos, "hover_start", None)
if self.log_events:
self.log_events(
"Converted %s: %s" % ("hover_start", str(window_pos))
)
previous_top_element = current_widget
if (
event_type == "leftup"
and time.time() - self._down_start_time <= 0.30
and abs(complex(*window_pos[:2]) - complex(*self._down_start_pos[:2]))
< 50
): # Anything within 0.3 seconds will be converted to a leftclick
response = current_widget.event(
window_pos, space_pos, "leftclick", nearest_snap
)
if self.log_events:
self.log_events("Converted %s: %s" % ("leftclick", str(window_pos)))
elif event_type == "leftup":
if self.log_events:
self.log_events(
f"Did not convert to click, {time.time() - self._down_start_time}"
)
response = current_widget.event(
window_pos, space_pos, event_type, nearest_snap
)
# print ("Leftup called for widget #%d" % i )
# print (response)
else:
response = current_widget.event(
window_pos, space_pos, event_type, nearest_snap
)
if type(response) is tuple:
# We get two additional parameters which are the screen location of the nearest snap point
params = response[1:]
response = response[0]
if len(params) > 1:
new_x_space = params[0]
new_y_space = params[1]
new_x = window_pos[0]
new_y = window_pos[1]
snap_x = None
snap_y = None
sdx = new_x_space - space_pos[0]
if current_matrix is not None and not current_matrix.is_identity():
sdx *= current_matrix.value_scale_x()
snap_x = window_pos[0] + sdx
sdy = new_y_space - space_pos[1]
if current_matrix is not None and not current_matrix.is_identity():
sdy *= current_matrix.value_scale_y()
# print("Shift x by %.1f pixel (%.1f), Shift y by %.1f pixel (%.1f)" % (sdx, odx, sdy, ody))
snap_y = window_pos[1] + sdy
dx = new_x - self.last_position[0]
dy = new_y - self.last_position[1]
if snap_x is None:
nearest_snap = None
else:
# We are providing the space and screen coordinates
snap_space = current_matrix.point_in_inverse_space(
(snap_x, snap_y)
)
nearest_snap = (
snap_space[0],
snap_space[1],
snap_x,
snap_y,
)
# print ("Snap provided", nearest_snap)
else:
params = None
if response == RESPONSE_ABORT:
self.hit_chain.clear()
return
elif response == RESPONSE_CONSUME:
# if event_type in ("leftdown", "middledown", "middleup", "leftup", "move", "leftclick"):
# widgetname = type(current_widget).__name__
# print("Event %s was consumed by %s" % (event_type, widgetname))
return
elif response == RESPONSE_CHAIN:
continue
elif response == RESPONSE_DROP:
self.hit_chain[i] = None
continue
else:
break
def cursor(self, cursor, always=False):
"""
Routine to centralize and correct cursor info.
@param cursor: Changed cursor
@param always: Force cursor change
@return:
"""
if cursor == "sizing":
new_cursor = wx.CURSOR_SIZING
elif cursor in ("size_nw", "size_se"):
new_cursor = wx.CURSOR_SIZENWSE
elif cursor in ("size_sw", "size_ne"):
new_cursor = wx.CURSOR_SIZENESW
elif cursor in ("size_n", "size_s", "skew_y"):
new_cursor = wx.CURSOR_SIZENS
elif cursor in ("size_e", "size_w", "skew_x"):
new_cursor = wx.CURSOR_SIZEWE
elif cursor == "arrow":
new_cursor = wx.CURSOR_ARROW
elif cursor == "cross":
new_cursor = wx.CROSS_CURSOR
elif cursor == "rotate1":
new_cursor = wx.CURSOR_CROSS
elif cursor == "rotate2":
new_cursor = wx.CURSOR_CROSS
elif cursor == "rotmove":
new_cursor = wx.CURSOR_HAND
elif cursor == "reference":
new_cursor = wx.CURSOR_BULLSEYE
else:
new_cursor = wx.CURSOR_ARROW
self.log("Invalid cursor.")
if platform.system() == "Linux":
if cursor == "sizing":
new_cursor = wx.CURSOR_SIZENWSE
elif cursor in ("size_nw", "size_se"):
new_cursor = wx.CURSOR_SIZING
elif cursor in ("size_sw", "size_ne"):
new_cursor = wx.CURSOR_SIZING
if new_cursor != self._cursor or always:
self._cursor = new_cursor
self.gui.scene_panel.SetCursor(wx.Cursor(self._cursor))
self.log("Cursor changed to %s" % cursor)
def add_scenewidget(self, widget, properties=ORIENTATION_RELATIVE):
"""
Delegate to the SceneSpaceWidget scene.
"""
self.widget_root.scene_widget.add_widget(-1, widget, properties)
def add_interfacewidget(self, widget, properties=ORIENTATION_RELATIVE):
"""
Delegate to the SceneSpaceWidget interface.
"""
self.widget_root.interface_widget.add_widget(-1, widget, properties)
def validate_reference(self):
"""
Check whether the reference is still valid
"""
found = False
if self._reference:
for e in self.context.elements.flat(types=elem_nodes):
# Here we ignore the lock-status of an element
if e is self._reference:
found = True
break
if not found:
self._reference = None
@property
def reference_object(self):
return self._reference
@reference_object.setter
def reference_object(self, ref_object):
self._reference = ref_object
```
#### File: meerk40t/lihuiyu/lihuiyuemulator.py
```python
from meerk40t.core.cutcode import CutCode, RawCut
from meerk40t.core.parameters import Parameters
from meerk40t.core.units import UNITS_PER_MIL
from meerk40t.kernel import Module
from meerk40t.numpath import Numpath
from meerk40t.svgelements import Color
class LihuiyuEmulator(Module):
def __init__(self, context, path):
Module.__init__(self, context, path)
self.context.setting(bool, "fix_speeds", False)
self.parser = LihuiyuParser()
self.parser.fix_speeds = self.context.fix_speeds
self.parser.channel = self.context.channel("lhy")
def pos(p):
if p is None:
return
x0, y0, x1, y1 = p
self.context.signal("emulator;position", (x0, y0, x1, y1))
self.parser.position = pos
def __repr__(self):
return "LihuiyuEmulator(%s)" % self.name
def initialize(self, *args, **kwargs):
context = self.context
active = self.context.root.active
send = context.channel("%s/usb_send" % active)
send.watch(self.parser.write_packet)
def finalize(self, *args, **kwargs):
context = self.context
active = self.context.root.active
send = context.channel("%s/usb_send" % active)
send.unwatch(self.parser.write_packet)
class LihuiyuParser:
"""
LihuiyuParser parses LHYMicro-GL code with a state diagram. This should accurately reconstruct the values.
When the position is changed it calls a self.position() function if one exists.
"""
def __init__(self):
self.channel = None
self.position = None
self.board = "M2"
self.header_skipped = False
self.count_lines = 0
self.count_flag = 0
self.settings = Parameters({"speed": 20.0, "power": 1000.0})
self.speed_code = None
self.x = 0.0
self.y = 0.0
self.number_value = ""
self.distance_x = 0
self.distance_y = 0
self.filename = ""
self.laser = 0
self.left = False
self.top = False
self.x_on = False
self.y_on = False
self.small_jump = False
self.returning_compact = True
self.returning_finished = False
self.mode = None
self.raster_step = 0
self.paused_state = False
self.compact_state = False
self.finish_state = False
self.horizontal_major = False
self.fix_speeds = False
self.number_consumer = {}
def parse(self, data, elements):
self.path = Numpath()
def position(p):
if p is None:
return
from_x, from_y, to_x, to_y = p
if self.program_mode:
if self.laser:
self.path.line(complex(from_x, from_y), complex(to_x, to_y))
self.position = position
self.write(data)
self.path.uscale(UNITS_PER_MIL)
elements.elem_branch.add(
type="elem numpath",
path=self.path,
stroke=Color("black"),
**self.settings.settings,
)
elements.signal("refresh_scene", 0)
@property
def program_mode(self):
return self.compact_state
@property
def default_mode(self):
return not self.compact_state
@property
def raster_mode(self):
return self.settings.get("raster_step", 0) != 0
def new_file(self):
self.header_skipped = False
self.count_flag = 0
self.count_lines = 0
@staticmethod
def remove_header(data):
count_lines = 0
count_flag = 0
for i in range(len(data)):
b = data[i]
c = chr(b)
if c == "\n":
count_lines += 1
elif c == "%":
count_flag += 1
if count_lines >= 3 and count_flag >= 5:
return data[i:]
def header_write(self, data):
"""
Write data to the emulator including the header. This is intended for saved .egv files which include a default
header.
"""
if self.header_skipped:
self.write(data)
else:
data = LihuiyuParser.remove_header(data)
self.write(data)
def write_packet(self, packet):
self.write(packet[1:31])
def write(self, data):
for b in data:
self.process(b, chr(b))
def distance_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
self.append_distance(int(self.number_value))
self.number_value = ""
def speedcode_b1_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode B1 = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_b2_consumer
def speedcode_b2_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode B2 = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_accel_consumer
def speedcode_accel_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 1:
if self.channel:
self.channel("Speedcode Accel = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_mult_consumer
def speedcode_mult_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode Accel = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_dratio_b1_consumer
def speedcode_dratio_b1_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode Dratio b1 = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_dratio_b2_consumer
def speedcode_dratio_b2_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode Dratio b2 = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.distance_consumer
def raster_step_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Raster Step = %s" % self.number_value)
self.raster_step = int(self.number_value)
self.number_value = ""
self.number_consumer = self.distance_consumer
def mode_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 1:
if self.channel:
self.channel("Set Mode = %s" % self.number_value)
self.mode = int(self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_mult_consumer
def append_distance(self, amount):
if self.x_on:
self.distance_x += amount
if self.y_on:
self.distance_y += amount
def execute_distance(self):
if self.distance_x != 0 or self.distance_y != 0:
dx = self.distance_x
dy = self.distance_y
if self.left:
dx = -dx
if self.top:
dy = -dy
self.distance_x = 0
self.distance_y = 0
ox = self.x
oy = self.y
self.x += dx
self.y += dy
if self.position:
self.position((ox, oy, self.x, self.y))
if self.channel:
self.channel("Moving (%d %d) now at %d %d" % (dx, dy, self.x, self.y))
def process(self, b, c):
if c == "I":
self.finish_state = False
self.compact_state = False
self.paused_state = False
self.distance_x = 0
self.distance_y = 0
if self.finish_state: # In finished all commands are black holed
return
if ord("0") <= b <= ord("9"):
self.number_consumer(c)
return
else:
self.number_consumer = self.distance_consumer
self.number_value = ""
if self.compact_state:
# Every command in compact state executes distances.
self.execute_distance()
if c == "|":
self.append_distance(25)
self.small_jump = True
elif ord("a") <= b <= ord("y"):
self.append_distance(b + 1 - ord("a"))
self.small_jump = False
elif c == "z":
self.append_distance(26 if self.small_jump else 255)
self.small_jump = False
elif c == "B": # Move to Right.
if self.left and self.horizontal_major:
# Was T switched to B with horizontal rastering.
if self.raster_step:
self.distance_y += self.raster_step
self.left = False
self.x_on = True
self.y_on = False
if self.channel:
self.channel("Right")
elif c == "T": # Move to Left
if not self.left and self.horizontal_major:
# Was T switched to B with horizontal rastering.
if self.raster_step:
self.distance_y += self.raster_step
self.left = True
self.x_on = True
self.y_on = False
if self.channel:
self.channel("Left")
elif c == "R": # Move to Bottom
if self.top and not self.horizontal_major:
# Was L switched to R with vertical rastering.
if self.raster_step:
self.distance_x += self.raster_step
self.top = False
self.x_on = False
self.y_on = True
if self.channel:
self.channel("Bottom")
elif c == "L": # Move to Top
if not self.top and not self.horizontal_major:
# Was R switched to L with vertical rastering.
if self.raster_step:
self.distance_x += self.raster_step
self.top = True
self.x_on = False
self.y_on = True
if self.channel:
self.channel("Top")
elif c == "U":
self.laser = 0
if self.channel:
self.channel("Laser Off")
elif c == "D":
self.laser = 1
if self.channel:
self.channel("Laser On")
elif c == "F":
if self.channel:
self.channel("Finish")
self.returning_compact = False
self.returning_finished = True
elif c == "@":
if self.channel:
self.channel("Reset")
self.returning_finished = False
self.returning_compact = False
elif c in "C":
if self.channel:
self.channel("Speedcode")
self.speed_code = ""
elif c in "V":
self.raster_step = None
if self.channel:
self.channel("Velocity")
self.number_consumer = self.speedcode_b1_consumer
elif c in "G":
if self.channel:
self.channel("Step Value")
self.number_consumer = self.raster_step_consumer
elif c == "S":
if self.channel:
self.channel("Mode Set")
self.laser = 0
self.execute_distance()
self.mode = None
self.number_consumer = self.mode_consumer
elif c == "E":
if self.channel:
self.channel("Execute State")
if self.mode is None:
if self.returning_compact:
self.compact_state = True
if self.returning_finished:
self.finish_state = True
if self.horizontal_major:
self.left = not self.left
self.x_on = True
self.y_on = False
if self.raster_step:
self.distance_y += self.raster_step
else:
# vertical major
self.top = not self.top
self.x_on = False
self.y_on = True
if self.raster_step:
self.distance_x += self.raster_step
elif self.mode == 0:
# Homes then moves position.
pass
elif self.mode == 1:
self.compact_state = True
self.horizontal_major = self.x_on
if self.channel:
self.channel("Setting Axis: h=" + str(self.x_on))
elif self.mode == 2:
# Rail unlocked.
self.compact_state = True
self.returning_finished = False
self.returning_compact = True
self.laser = 0
elif c == "P":
if self.channel:
self.channel("Pause")
self.laser = 0
if self.paused_state:
# Home sequence triggered by 2 P commands in the same packet.
# This should resume if not located within the same packet.
if self.position:
self.position((self.x, self.y, 0, 0))
self.x = 0
self.y = 0
self.distance_y = 0
self.distance_x = 0
self.finish_state = True
self.paused_state = False
else:
self.execute_distance() # distance is executed by a P command
self.paused_state = True
elif c == "N":
if self.channel:
self.channel("N")
self.execute_distance() # distance is executed by an N command.
self.laser = 0
self.compact_state = False
if self.position:
self.position(None)
elif c == "M":
self.x_on = True
self.y_on = True
if self.channel:
a = "Top" if self.top else "Bottom"
b = "Left" if self.left else "Right"
self.channel("Diagonal %s %s" % (a, b))
class EGVBlob:
def __init__(self, data: bytearray, name=None):
self.name = name
self.data = data
self.operation = "blob"
self._cutcode = None
self._cut = None
def __repr__(self):
return "EGV(%s, %d bytes)" % (self.name, len(self.data))
def as_cutobjects(self):
parser = LihuiyuParser()
self._cutcode = CutCode()
self._cut = RawCut()
def new_cut():
if self._cut is not None and len(self._cut):
self._cutcode.append(self._cut)
self._cut = RawCut()
self._cut.settings = dict(parser.settings)
def position(p):
if p is None or self._cut is None:
new_cut()
return
from_x, from_y, to_x, to_y = p
if parser.program_mode:
if len(self._cut.plot) == 0:
self._cut.plot_append(int(from_x), int(from_y), parser.laser)
self._cut.plot_append(int(to_x), int(to_y), parser.laser)
else:
new_cut()
parser.position = position
parser.header_write(self.data)
cutcode = self._cutcode
self._cut = None
self._cutcode = None
return cutcode
def generate(self):
yield "blob", "egv", LihuiyuParser.remove_header(self.data)
class EgvLoader:
@staticmethod
def remove_header(data):
count_lines = 0
count_flag = 0
for i in range(len(data)):
b = data[i]
c = chr(b)
if c == "\n":
count_lines += 1
elif c == "%":
count_flag += 1
if count_lines >= 3 and count_flag >= 5:
return data[i:]
@staticmethod
def load_types():
yield "Engrave Files", ("egv",), "application/x-egv"
@staticmethod
def load(kernel, elements_modifier, pathname, **kwargs):
import os
basename = os.path.basename(pathname)
with open(pathname, "rb") as f:
op_branch = elements_modifier.get(type="branch ops")
op_branch.add(
data=bytearray(EgvLoader.remove_header(f.read())),
data_type="egv",
type="blob",
name=basename,
)
return True
```
#### File: meerk40t/moshi/device.py
```python
import threading
import time
from meerk40t.kernel import (
STATE_ACTIVE,
STATE_BUSY,
STATE_END,
STATE_IDLE,
STATE_INITIALIZE,
STATE_PAUSE,
STATE_TERMINATE,
STATE_UNKNOWN,
STATE_WAIT,
Service,
)
from ..core.parameters import Parameters
from ..core.plotplanner import PlotPlanner
from ..core.spoolers import Spooler
from ..core.units import UNITS_PER_MIL, ViewPort
from ..device.basedevice import (
DRIVER_STATE_FINISH,
DRIVER_STATE_MODECHANGE,
DRIVER_STATE_PROGRAM,
DRIVER_STATE_RAPID,
DRIVER_STATE_RASTER,
PLOT_FINISH,
PLOT_JOG,
PLOT_LEFT_UPPER,
PLOT_RAPID,
PLOT_SETTING,
PLOT_START,
)
from .moshiblob import (
MOSHI_EPILOGUE,
MOSHI_ESTOP,
MOSHI_FREEMOTOR,
MOSHI_LASER,
MOSHI_PROLOGUE,
MOSHI_READ,
MoshiBlob,
swizzle_table,
)
STATUS_OK = 205 # Seen before file send. And after file send.
STATUS_PROCESSING = 207 # PROCESSING
STATUS_ERROR = 237 # ERROR
STATUS_RESET = 239 # Seen during reset
def plugin(kernel, lifecycle=None):
if lifecycle == "plugins":
from .gui import gui
return [gui.plugin]
if lifecycle == "register":
kernel.register("provider/device/moshi", MoshiDevice)
if lifecycle == "preboot":
suffix = "moshi"
for d in kernel.derivable(suffix):
kernel.root(
"service device start -p {path} {suffix}\n".format(
path=d, suffix=suffix
)
)
def get_code_string_from_moshicode(code):
"""
Moshiboard CH341 codes into code strings.
"""
if code == STATUS_OK:
return "OK"
elif code == STATUS_PROCESSING:
return "Processing"
elif code == STATUS_ERROR:
return "Error"
elif code == STATUS_RESET:
return "Resetting"
elif code == 0:
return "USB Failed"
else:
return "UNK %02x" % code
class MoshiDevice(Service, ViewPort):
"""
MoshiDevice is driver for the Moshiboard boards.
"""
def __init__(self, kernel, path, *args, **kwargs):
Service.__init__(self, kernel, path)
self.name = "MoshiDevice"
self.setting(bool, "opt_rapid_between", True)
self.setting(int, "opt_jog_mode", 0)
self.setting(int, "opt_jog_minimum", 256)
self.setting(int, "usb_index", -1)
self.setting(int, "usb_bus", -1)
self.setting(int, "usb_address", -1)
self.setting(int, "usb_version", -1)
self.setting(bool, "mock", False)
self.setting(bool, "home_right", False)
self.setting(bool, "home_bottom", False)
self.setting(str, "home_x", "0mm")
self.setting(str, "home_y", "0mm")
self.setting(bool, "enable_raster", True)
self.setting(int, "packet_count", 0)
self.setting(int, "rejected_count", 0)
self.setting(str, "label", path)
self.setting(int, "rapid_speed", 40)
_ = self._
choices = [
{
"attr": "bedwidth",
"object": self,
"default": "330mm",
"type": str,
"label": _("Width"),
"tip": _("Width of the laser bed."),
},
{
"attr": "bedheight",
"object": self,
"default": "210mm",
"type": str,
"label": _("Height"),
"tip": _("Height of the laser bed."),
},
{
"attr": "scale_x",
"object": self,
"default": 1.000,
"type": float,
"label": _("X Scale Factor"),
"tip": _(
"Scale factor for the X-axis. Board units to actual physical units."
),
},
{
"attr": "scale_y",
"object": self,
"default": 1.000,
"type": float,
"label": _("Y Scale Factor"),
"tip": _(
"Scale factor for the Y-axis. Board units to actual physical units."
),
},
]
self.register_choices("bed_dim", choices)
ViewPort.__init__(
self,
self.bedwidth,
self.bedheight,
user_scale_x=self.scale_x,
user_scale_y=self.scale_y,
native_scale_x=UNITS_PER_MIL,
native_scale_y=UNITS_PER_MIL,
# flip_x=self.flip_x,
# flip_y=self.flip_y,
# swap_xy=self.swap_xy,
origin_x=1.0 if self.home_right else 0.0,
origin_y=1.0 if self.home_bottom else 0.0,
)
self.state = 0
self.driver = MoshiDriver(self)
self.add_service_delegate(self.driver)
self.controller = MoshiController(self)
self.add_service_delegate(self.controller)
self.spooler = Spooler(self, driver=self.driver)
self.add_service_delegate(self.spooler)
_ = self.kernel.translation
@self.console_command(
"spool",
help=_("spool <command>"),
regex=True,
input_type=(None, "plan", "device"),
output_type="spooler",
)
def spool(command, channel, _, data=None, remainder=None, **kwgs):
spooler = self.spooler
if data is not None:
# If plan data is in data, then we copy that and move on to next step.
spooler.jobs(data.plan)
channel(_("Spooled Plan."))
self.signal("plan", data.name, 6)
if remainder is None:
channel(_("----------"))
channel(_("Spoolers:"))
for d, d_name in enumerate(self.match("device", suffix=True)):
channel("%d: %s" % (d, d_name))
channel(_("----------"))
channel(_("Spooler on device %s:" % str(self.label)))
for s, op_name in enumerate(spooler.queue):
channel("%d: %s" % (s, op_name))
channel(_("----------"))
return "spooler", spooler
@self.console_command("usb_connect", help=_("Connect USB"))
def usb_connect(command, channel, _, **kwargs):
"""
Force USB Connection Event for Moshiboard
"""
try:
self.controller.open()
except ConnectionRefusedError:
channel("Connection Refused.")
@self.console_command("usb_disconnect", help=_("Disconnect USB"))
def usb_disconnect(command, channel, _, **kwargs):
"""
Force USB Disconnect Event for Moshiboard
"""
try:
self.controller.close()
except ConnectionError:
channel("Usb is not connected.")
@self.console_command("start", help=_("Start Pipe to Controller"))
def pipe_start(command, channel, _, data=None, **kwargs):
"""
Start output sending.
"""
self.controller.update_state(STATE_ACTIVE)
self.controller.start()
channel("Moshi Channel Started.")
@self.console_command("hold", input_type="moshi", help=_("Hold Controller"))
def pipe_pause(command, channel, _, **kwargs):
"""
Pause output sending.
"""
self.controller.update_state(STATE_PAUSE)
self.controller.pause()
channel(_("Moshi Channel Paused."))
@self.console_command("resume", input_type="moshi", help=_("Resume Controller"))
def pipe_resume(command, channel, _, **kwargs):
"""
Resume output sending.
"""
self.controller.update_state(STATE_ACTIVE)
self.controller.start()
channel(_("Moshi Channel Resumed."))
@self.console_command(("estop", "abort"), help=_("Abort Job"))
def pipe_abort(command, channel, _, **kwargs):
"""
Abort output job. Usually due to the functionality of Moshiboards this will do
nothing as the job will have already sent to the backend.
"""
self.controller.estop()
channel(_("Moshi Channel Aborted."))
@self.console_command(
"status",
input_type="moshi",
help=_("Update moshiboard controller status"),
)
def realtime_status(channel, _, **kwargs):
"""
Updates the CH341 Status information for the Moshiboard.
"""
try:
self.controller.update_status()
except ConnectionError:
channel(_("Could not check status, usb not connected."))
@self.console_command(
"continue",
help=_("abort waiting process on the controller."),
)
def realtime_pause(**kwargs):
"""
Abort the waiting process for Moshiboard. This is usually a wait from BUSY (207) state until the board
reports its status as READY (205)
"""
self.controller.abort_waiting = True
@property
def viewbuffer(self):
return self.controller.viewbuffer()
@property
def current(self):
"""
@return: the location in units for the current known position.
"""
return self.device_to_scene_position(self.driver.native_x, self.driver.native_y)
class MoshiDriver(Parameters):
"""
A driver takes spoolable commands and turns those commands into states and code in a language
agnostic fashion. The Moshiboard Driver overloads the Driver class to take spoolable values from
the spooler and converts them into Moshiboard specific actions.
"""
def __init__(self, service, channel=None, *args, **kwargs):
super().__init__()
self.service = service
self.name = str(self.service)
self.state = 0
self.native_x = 0
self.native_y = 0
self.plot_planner = PlotPlanner(self.settings)
self.plot_data = None
self.program = MoshiBlob()
self.is_paused = False
self.hold = False
self.paused = False
self.service._buffer_size = 0
self.preferred_offset_x = 0
self.preferred_offset_y = 0
name = self.service.label
self.pipe_channel = service.channel("%s/events" % name)
def __repr__(self):
return "MoshiDriver(%s)" % self.name
def hold_work(self):
"""
Required.
Spooler check. to see if the work cycle should be held.
@return: hold?
"""
return self.hold or self.paused
def hold_idle(self):
"""
Required.
Spooler check. Should the idle job be processed or held.
@return:
"""
return False
def laser_off(self, *values):
"""
Turn laser off in place.
@param values:
@return:
"""
pass
def laser_on(self, *values):
"""
Turn laser on in place.
@param values:
@return:
"""
pass
def plot(self, plot):
"""
Gives the driver a bit of cutcode that should be plotted.
@param plot:
@return:
"""
self.plot_planner.push(plot)
def plot_start(self):
"""
Called at the end of plot commands to ensure the driver can deal with them all as a group.
@return:
"""
if self.plot_data is None:
self.plot_data = self.plot_planner.gen()
self.plotplanner_process()
def move_abs(self, x, y):
x, y = self.service.physical_to_device_position(x, y, 1)
self.rapid_mode()
self._move_absolute(int(x), int(y))
def move_rel(self, dx, dy):
dx, dy = self.service.physical_to_device_length(dx, dy, 1)
self.rapid_mode()
x = self.native_x + dx
y = self.native_y + dy
self._move_absolute(int(x), int(y))
self.rapid_mode()
def home(self, *values):
"""
Send a home command to the device. In the case of Moshiboards this is merely a move to
0,0 in absolute position.
"""
adjust_x = self.service.home_x
adjust_y = self.service.home_y
try:
adjust_x = values[0]
adjust_y = values[1]
except IndexError:
pass
adjust_x, adjust_y = self.service.physical_to_device_position(
adjust_x, adjust_y, 1
)
self.rapid_mode()
self.speed = 40
self.program_mode(adjust_x, adjust_y, adjust_x, adjust_y)
self.rapid_mode()
self.native_x = adjust_x
self.native_y = adjust_y
def unlock_rail(self):
"""
Unlock the Rail or send a "FreeMotor" command.
"""
self.rapid_mode()
try:
self.service.controller.unlock_rail()
except AttributeError:
pass
def rapid_mode(self, *values):
"""
Ensure the driver is currently in a default state. If we are not in a default state the driver
should end the current program.
"""
if self.state == DRIVER_STATE_RAPID:
return
if self.pipe_channel:
self.pipe_channel("Rapid Mode")
if self.state == DRIVER_STATE_FINISH:
pass
elif self.state in (
DRIVER_STATE_PROGRAM,
DRIVER_STATE_MODECHANGE,
DRIVER_STATE_RASTER,
):
self.program.termination()
self.push_program()
self.state = DRIVER_STATE_RAPID
self.service.signal("driver;mode", self.state)
def finished_mode(self, *values):
"""
Ensure the driver is currently in a finished state. If we are not in a finished state the driver
should end the current program and return to rapid mode.
Finished is required between rasters since it's an absolute home.
"""
if self.state == DRIVER_STATE_FINISH:
return
if self.pipe_channel:
self.pipe_channel("Finished Mode")
if self.state in (DRIVER_STATE_PROGRAM, DRIVER_STATE_MODECHANGE):
self.rapid_mode()
if self.state == self.state == DRIVER_STATE_RASTER:
self.pipe_channel("Final Raster Home")
self.home()
self.state = DRIVER_STATE_FINISH
def program_mode(self, *values):
"""
Ensure the laser is currently in a program state. If it is not currently in a program state we begin
a program state.
If the driver is currently in a program state the assurance is made.
"""
if self.state == DRIVER_STATE_PROGRAM:
return
if self.pipe_channel:
self.pipe_channel("Program Mode")
if self.state == DRIVER_STATE_RASTER:
self.finished_mode()
self.rapid_mode()
try:
offset_x = int(values[0])
except (ValueError, IndexError):
offset_x = 0
try:
offset_y = int(values[1])
except (ValueError, IndexError):
offset_y = 0
try:
move_x = int(values[2])
except (ValueError, IndexError):
move_x = 0
try:
move_y = int(values[3])
except (ValueError, IndexError):
move_y = 0
self.start_program_mode(offset_x, offset_y, move_x, move_y)
def start_program_mode(
self,
offset_x,
offset_y,
move_x=None,
move_y=None,
speed=None,
normal_speed=None,
):
if move_x is None:
move_x = offset_x
if move_y is None:
move_y = offset_y
if speed is None and self.speed is not None:
speed = int(self.speed)
if speed is None:
speed = 20
if normal_speed is None:
normal_speed = speed
# Normal speed is rapid. Passing same speed so PPI isn't crazy.
self.program.vector_speed(speed, normal_speed)
self.program.set_offset(0, offset_x, offset_y)
self.state = DRIVER_STATE_PROGRAM
self.service.signal("driver;mode", self.state)
self.program.move_abs(move_x, move_y)
self.native_x = move_x
self.native_y = move_y
def raster_mode(self, *values):
"""
Ensure the driver is currently in a raster program state. If it is not in a raster program state
we write the raster program state.
"""
if self.state == DRIVER_STATE_RASTER:
return
if self.pipe_channel:
self.pipe_channel("Raster Mode")
if self.state == DRIVER_STATE_PROGRAM:
self.finished_mode()
self.rapid_mode()
try:
offset_x = int(values[0])
except (ValueError, IndexError):
offset_x = 0
try:
offset_y = int(values[1])
except (ValueError, IndexError):
offset_y = 0
try:
move_x = int(values[2])
except (ValueError, IndexError):
move_x = 0
try:
move_y = int(values[3])
except (ValueError, IndexError):
move_y = 0
self.start_raster_mode(offset_x, offset_y, move_x, move_y)
def start_raster_mode(
self, offset_x, offset_y, move_x=None, move_y=None, speed=None
):
if move_x is None:
move_x = offset_x
if move_y is None:
move_y = offset_y
if speed is None and self.speed is not None:
speed = int(self.speed)
if speed is None:
speed = 160
self.program.raster_speed(speed)
self.program.set_offset(0, offset_x, offset_y)
self.state = DRIVER_STATE_RASTER
self.service.signal("driver;mode", self.state)
self.program.move_abs(move_x, move_y)
self.native_x = move_x
self.native_y = move_y
def set(self, attribute, value):
"""
Sets a laser parameter this could be speed, power, wobble, number_of_unicorns, or any unknown parameters for
yet to be written drivers.
@param key:
@param value:
@return:
"""
if attribute == "power":
self._set_power(value)
if attribute == "ppi":
self._set_power(value)
if attribute == "pwm":
self._set_power(value)
if attribute == "overscan":
self._set_overscan(value)
if attribute == "speed":
self._set_speed(value)
if attribute == "step":
self._set_step(value)
def _set_power(self, power=1000.0):
self.power = power
if self.power > 1000.0:
self.power = 1000.0
if self.power <= 0:
self.power = 0.0
def _set_overscan(self, overscan=None):
self.overscan = overscan
def _set_speed(self, speed=None):
"""
Set the speed for the driver.
"""
if self.speed != speed:
self.speed = speed
if self.state in (DRIVER_STATE_PROGRAM, DRIVER_STATE_RASTER):
self.state = DRIVER_STATE_MODECHANGE
def _set_step(self, step_x=None, step_y=None):
"""
Set the raster step for the driver.
"""
if self.raster_step_x != step_x or self.raster_step_y != step_y:
self.raster_step_x = step_x
self.raster_step_y = step_y
if self.state in (DRIVER_STATE_PROGRAM, DRIVER_STATE_RASTER):
self.state = DRIVER_STATE_MODECHANGE
def set_position(self, x, y):
"""
This should set an offset position.
* Note: This may need to be replaced with something that has better concepts behind it. Currently, this is only
used in step-repeat.
@param x:
@param y:
@return:
"""
self.native_x = x
self.native_y = y
def wait(self, t):
"""
Wait asks that the work be stalled or current process held for the time t in seconds. If wait_finished is
called first this should pause the machine without current work acting as a dwell.
@param t:
@return:
"""
time.sleep(float(t))
def wait_finish(self, *values):
"""
Wait finish should hold the calling thread until the current work has completed. Or otherwise prevent any data
from being sent with returning True for the until that criteria is met.
@param values:
@return:
"""
self.hold = True
# self.temp_holds.append(lambda: len(self.output) != 0)
def function(self, function):
"""
This command asks that this function be executed at the appropriate time within the spooled cycle.
@param function:
@return:
"""
function()
def beep(self):
self.service("beep\n")
def console(self, value):
self.service(value)
def signal(self, signal, *args):
"""
This asks that this signal be broadcast.
@param signal:
@param args:
@return:
"""
self.service.signal(signal, *args)
def pause(self, *args):
"""
Asks that the laser be paused.
@param args:
@return:
"""
self.paused = True
def resume(self, *args):
"""
Asks that the laser be resumed.
To work this command should usually be put into the realtime work queue for the laser.
@param args:
@return:
"""
self.paused = False
def reset(self, *args):
"""
This command asks that this device be emergency stopped and reset. Usually that queue data from the spooler be
deleted.
@param args:
@return:
"""
self.service.spooler.clear_queue()
self.rapid_mode()
try:
self.service.controller.estop()
except AttributeError:
pass
def status(self):
"""
Asks that this device status be updated.
@return:
"""
parts = list()
parts.append(f"x={self.native_x}")
parts.append(f"y={self.native_y}")
parts.append(f"speed={self.speed}")
parts.append(f"power={self.power}")
status = ";".join(parts)
self.service.signal("driver;status", status)
def push_program(self):
self.pipe_channel("Pushed program to output...")
if len(self.program):
self.service.controller.push_program(self.program)
self.program = MoshiBlob()
self.program.channel = self.pipe_channel
def plotplanner_process(self):
"""
Processes any data in the plot planner. Getting all relevant (x,y,on) plot values and performing the cardinal
movements. Or updating the laser state based on the settings of the cutcode.
@return:
"""
if self.plot_data is None:
return False
if self.hold:
return True
for x, y, on in self.plot_data:
if self.hold_work():
time.sleep(0.05)
continue
on = int(on)
if on > 1:
# Special Command.
if on & (
PLOT_RAPID | PLOT_JOG
): # Plot planner requests position change.
# self.rapid_jog(x, y)
self.native_x = x
self.native_y = y
if self.state != DRIVER_STATE_RAPID:
self._move_absolute(x, y)
continue
elif on & PLOT_FINISH: # Plot planner is ending.
self.finished_mode()
break
elif on & PLOT_START:
self._ensure_program_or_raster_mode(
self.preferred_offset_x,
self.preferred_offset_y,
self.native_x,
self.native_y,
)
elif on & PLOT_LEFT_UPPER:
self.preferred_offset_x = x
self.preferred_offset_y = y
elif on & PLOT_SETTING: # Plot planner settings have changed.
p_set = self.plot_planner.settings
s_set = self.settings
if p_set.power != s_set.power:
self._set_power(p_set.power)
if (
p_set.speed != s_set.speed
or p_set.raster_step_x != s_set.raster_step_x
or p_set.raster_step_y != s_set.raster_step_y
):
self._set_speed(p_set.speed)
self._set_step(p_set.raster_step_x, p_set.raster_step_y)
self.rapid_mode()
self.settings.update(p_set.settings)
continue
self._goto_absolute(x, y, on & 1)
self.plot_data = None
return False
def _ensure_program_or_raster_mode(self, x, y, x1=None, y1=None):
"""
Ensure blob mode makes sure it's in program or raster mode.
"""
if self.state in (DRIVER_STATE_RASTER, DRIVER_STATE_PROGRAM):
return
if x1 is None:
x1 = x
if y1 is None:
y1 = y
if self.raster_step_x == 0 and self.raster_step_y == 0:
self.program_mode(x, y, x1, y1)
else:
if self.service.enable_raster:
self.raster_mode(x, y, x1, y1)
else:
self.program_mode(x, y, x1, y1)
def _goto_absolute(self, x, y, cut):
"""
Goto absolute position. Cut flags whether this should be with or without the laser.
"""
self._ensure_program_or_raster_mode(x, y)
old_current = self.service.current
if self.state == DRIVER_STATE_PROGRAM:
if cut:
self.program.cut_abs(x, y)
else:
self.program.move_abs(x, y)
else:
# DRIVER_STATE_RASTER
if x == self.native_x and y == self.native_y:
return
if cut:
if x == self.native_x:
self.program.cut_vertical_abs(y=y)
if y == self.native_y:
self.program.cut_horizontal_abs(x=x)
else:
if x == self.native_x:
self.program.move_vertical_abs(y=y)
if y == self.native_y:
self.program.move_horizontal_abs(x=x)
self.native_x = x
self.native_y = y
new_current = self.service.current
self.service.signal(
"driver;position",
(old_current[0], old_current[1], new_current[0], new_current[1]),
)
def _move_absolute(self, x, y):
"""
Move to a position x, y. This is an absolute position.
"""
old_current = self.service.current
self._ensure_program_or_raster_mode(x, y)
self.program.move_abs(x, y)
self.native_x = x
self.native_y = y
new_current = self.service.current
self.service.signal(
"driver;position",
(old_current[0], old_current[1], new_current[0], new_current[1]),
)
def laser_disable(self, *values):
self.laser_enabled = False
def laser_enable(self, *values):
self.laser_enabled = True
class MoshiController:
"""
The Moshiboard Controller takes data programs built by the MoshiDriver and sends to the Moshiboard
according to established moshi protocols.
The output device is concerned with sending the moshiblobs to the control board and control events and
to the CH341 chip on the Moshiboard. We use the same ch341 driver as the Lhystudios boards. Giving us
access to both libusb drivers and windll drivers.
The protocol for sending rasters is as follows:
Check processing-state of board, seeking 205
Send Preamble.
Check processing-state of board, seeking 205
Send bulk data of moshiblob. No checks between packets.
Send Epilogue.
While Check processing-state is 207:
wait 0.2 seconds
Send Preamble
Send 0,0 offset 0,0 move.
Send Epilogue
Checks done before the Epilogue will have 205 state.
"""
def __init__(self, context, channel=None, *args, **kwargs):
self.context = context
self.state = STATE_UNKNOWN
self.is_shutdown = False
self._thread = None
self._buffer = (
bytearray()
) # Threadsafe buffered commands to be sent to controller.
self._programs = [] # Programs to execute.
self.context._buffer_size = 0
self._main_lock = threading.Lock()
self._status = [0] * 6
self._usb_state = -1
self._connection = None
self.max_attempts = 5
self.refuse_counts = 0
self.connection_errors = 0
self.count = 0
self.abort_waiting = False
name = self.context.label
self.pipe_channel = context.channel("%s/events" % name)
self.usb_log = context.channel("%s/usb" % name, buffer_size=500)
self.usb_send_channel = context.channel("%s/usb_send" % name)
self.recv_channel = context.channel("%s/recv" % name)
self.ch341 = context.open("module/ch341", log=self.usb_log)
self.usb_log.watch(lambda e: context.signal("pipe;usb_status", e))
def viewbuffer(self):
"""
Viewbuffer is used by the BufferView class if such a value exists it provides a view of the
buffered data. Without this class the BufferView displays nothing. This is optional for any output
device.
"""
buffer = "Current Working Buffer: %s\n" % str(self._buffer)
for p in self._programs:
buffer += "%s\n" % str(p.data)
return buffer
def added(self, *args, **kwargs):
self.start()
def shutdown(self, *args, **kwargs):
if self._thread is not None:
self.is_shutdown = True
def __repr__(self):
return "MoshiController()"
def __len__(self):
"""Provides the length of the buffer of this device."""
return len(self._buffer) + sum(map(len, self._programs))
def realtime_read(self):
"""
The `a7xx` values used before the AC01 commands. Read preamble.
Also seen randomly 3.2 seconds apart. Maybe keep-alive.
@return:
"""
self.pipe_channel("Realtime: Read...")
self.realtime_pipe(swizzle_table[MOSHI_READ][0])
def realtime_prologue(self):
"""
Before a jump / program / turned on:
@return:
"""
self.pipe_channel("Realtime: Prologue")
self.realtime_pipe(swizzle_table[MOSHI_PROLOGUE][0])
def realtime_epilogue(self):
"""
Status 205
After a jump / program
Status 207
Status 205 Done.
@return:
"""
self.pipe_channel("Realtime: Epilogue")
self.realtime_pipe(swizzle_table[MOSHI_EPILOGUE][0])
def realtime_freemotor(self):
"""
Freemotor command
@return:
"""
self.pipe_channel("Realtime: FreeMotor")
self.realtime_pipe(swizzle_table[MOSHI_FREEMOTOR][0])
def realtime_laser(self):
"""
Laser Command Toggle.
@return:
"""
self.pipe_channel("Realtime: Laser Active")
self.realtime_pipe(swizzle_table[MOSHI_LASER][0])
def realtime_stop(self):
"""
Stop command (likely same as freemotor):
@return:
"""
self.pipe_channel("Realtime: Stop")
self.realtime_pipe(swizzle_table[MOSHI_ESTOP][0])
def realtime_pipe(self, data):
if self._connection is not None:
try:
self._connection.write_addr(data)
except ConnectionError:
self.pipe_channel("Connection error")
else:
self.pipe_channel("Not connected")
def open(self):
self.pipe_channel("open()")
if self._connection is None:
connection = self.ch341.connect(
driver_index=self.context.usb_index,
chipv=self.context.usb_version,
bus=self.context.usb_bus,
address=self.context.usb_address,
mock=self.context.mock,
)
self._connection = connection
if self.context.mock:
self._connection.mock_status = 205
self._connection.mock_finish = 207
else:
self._connection.open()
if self._connection is None:
raise ConnectionRefusedError("ch341 connect did not return a connection.")
def close(self):
self.pipe_channel("close()")
if self._connection is not None:
self._connection.close()
self._connection = None
else:
raise ConnectionError
def push_program(self, program):
self.pipe_channel("Pushed: %s" % str(program.data))
self._programs.append(program)
self.start()
def unlock_rail(self):
self.pipe_channel("Control Request: Unlock")
if self._main_lock.locked():
return
else:
self.realtime_freemotor()
def start(self):
"""
Controller state change to `Started`.
@return:
"""
if self._thread is None or not self._thread.is_alive():
self._thread = self.context.threaded(
self._thread_data_send,
thread_name="MoshiPipe(%s)" % self.context.path,
result=self.stop,
)
self.update_state(STATE_INITIALIZE)
def pause(self):
"""
Pause simply holds the controller from sending any additional packets.
If this state change is done from INITIALIZE it will start the processing.
Otherwise, it must be done from ACTIVE or IDLE.
"""
if self.state == STATE_INITIALIZE:
self.start()
self.update_state(STATE_PAUSE)
if self.state == STATE_ACTIVE or self.state == STATE_IDLE:
self.update_state(STATE_PAUSE)
def resume(self):
"""
Resume can only be called from PAUSE.
"""
if self.state == STATE_PAUSE:
self.update_state(STATE_ACTIVE)
def estop(self):
"""
Abort the current buffer and data queue.
"""
self._buffer = bytearray()
self._programs.clear()
self.context.signal("pipe;buffer", 0)
self.realtime_stop()
self.update_state(STATE_TERMINATE)
self.pipe_channel("Control Request: Stop")
def stop(self, *args):
"""
Start the shutdown of the local send thread.
"""
if self._thread is not None:
try:
self._thread.join() # Wait until stop completes before continuing.
except RuntimeError:
pass # Thread is current thread.
self._thread = None
def update_state(self, state):
"""
Update the local state for the output device
"""
if state == self.state:
return
self.state = state
if self.context is not None:
self.context.signal("pipe;thread", self.state)
def update_buffer(self):
"""
Notify listening processes that the buffer size of this output has changed.
"""
if self.context is not None:
self.context._buffer_size = len(self._buffer)
self.context.signal("pipe;buffer", self.context._buffer_size)
def update_packet(self, packet):
"""
Notify listening processes that the last sent packet has changed.
"""
if self.context is not None:
self.context.signal("pipe;packet_text", packet)
self.usb_send_channel(packet)
def _send_buffer(self):
"""
Send the current Moshiboard buffer
"""
self.pipe_channel("Sending Buffer...")
while len(self._buffer) != 0:
queue_processed = self.process_buffer()
self.refuse_counts = 0
if queue_processed:
# Packet was sent.
if self.state not in (
STATE_PAUSE,
STATE_BUSY,
STATE_ACTIVE,
STATE_TERMINATE,
):
self.update_state(STATE_ACTIVE)
self.count = 0
else:
# No packet could be sent.
if self.state not in (
STATE_PAUSE,
STATE_BUSY,
STATE_TERMINATE,
):
self.update_state(STATE_IDLE)
if self.count > 50:
self.count = 50
time.sleep(0.02 * self.count)
# will tick up to 1 second waits if there's never a queue.
self.count += 1
def _thread_data_send(self):
"""
Main threaded function to send data. While the controller is working the thread
will be doing work in this function.
"""
self.pipe_channel("Send Thread Start... %d" % len(self._programs))
self._main_lock.acquire(True)
self.count = 0
self.is_shutdown = False
while True:
self.pipe_channel("While Loop")
try:
if self.state == STATE_INITIALIZE:
# If we are initialized. Change that to active since we're running.
self.update_state(STATE_ACTIVE)
if self.is_shutdown:
break
if len(self._buffer) == 0 and len(self._programs) == 0:
self.pipe_channel("Nothing to process")
break # There is nothing to run.
if self._connection is None:
self.open()
# Stage 0: New Program send.
if len(self._buffer) == 0:
self.context.signal("pipe;running", True)
self.pipe_channel("New Program")
self.wait_until_accepting_packets()
self.realtime_prologue()
self._buffer = self._programs.pop(0).data
assert len(self._buffer) != 0
# Stage 1: Send Program.
self.context.signal("pipe;running", True)
self.pipe_channel("Sending Data... %d bytes" % len(self._buffer))
self._send_buffer()
self.update_status()
self.realtime_epilogue()
if self.is_shutdown:
break
# Stage 2: Wait for Program to Finish.
self.pipe_channel("Waiting for finish processing.")
if len(self._buffer) == 0:
self.wait_finished()
self.context.signal("pipe;running", False)
except ConnectionRefusedError:
if self.is_shutdown:
break
# The attempt refused the connection.
self.refuse_counts += 1
if self.refuse_counts >= 5:
self.context.signal("pipe;state", "STATE_FAILED_RETRYING")
self.context.signal("pipe;failing", self.refuse_counts)
self.context.signal("pipe;running", False)
time.sleep(3) # 3-second sleep on failed connection attempt.
continue
except ConnectionError:
# There was an error with the connection, close it and try again.
if self.is_shutdown:
break
self.connection_errors += 1
time.sleep(0.5)
try:
self.close()
except ConnectionError:
pass
continue
self.context.signal("pipe;running", False)
self._thread = None
self.is_shutdown = False
self.update_state(STATE_END)
self._main_lock.release()
self.pipe_channel("Send Thread Finished...")
def process_buffer(self):
"""
Attempts to process the program send from the buffer.
@return: queue process success.
"""
if len(self._buffer) > 0:
buffer = self._buffer
else:
return False
length = min(32, len(buffer))
packet = buffer[:length]
# Packet is prepared and ready to send. Open Channel.
self.send_packet(packet)
self.context.packet_count += 1
# Packet was processed. Remove that data.
self._buffer = self._buffer[length:]
self.update_buffer()
return True # A packet was prepped and sent correctly.
def send_packet(self, packet):
"""
Send packet to the CH341 connection.
"""
if self._connection is None:
raise ConnectionError
self._connection.write(packet)
self.update_packet(packet)
def update_status(self):
"""
Request a status update from the CH341 connection.
"""
if self._connection is None:
raise ConnectionError
self._status = self._connection.get_status()
if self.context is not None:
try:
self.context.signal(
"pipe;status",
self._status,
get_code_string_from_moshicode(self._status[1]),
)
except IndexError:
pass
self.recv_channel(str(self._status))
def wait_until_accepting_packets(self):
"""
Wait until the device can accept packets.
"""
i = 0
while self.state != STATE_TERMINATE:
self.update_status()
status = self._status[1]
if status == 0:
raise ConnectionError
if status == STATUS_ERROR:
raise ConnectionRefusedError
if status == STATUS_OK:
return
time.sleep(0.05)
i += 1
if self.abort_waiting:
self.abort_waiting = False
return # Wait abort was requested.
def wait_finished(self):
"""
Wait until the device has finished the current sending buffer.
"""
self.pipe_channel("Wait Finished")
i = 0
original_state = self.state
if self.state != STATE_PAUSE:
self.pause()
while True:
if self.state != STATE_WAIT:
self.update_state(STATE_WAIT)
self.update_status()
status = self._status[1]
if status == 0:
self.close()
self.open()
continue
if status == STATUS_OK:
break
if self.context is not None:
self.context.signal("pipe;wait", status, i)
i += 1
if self.abort_waiting:
self.abort_waiting = False
return # Wait abort was requested.
if status == STATUS_PROCESSING:
time.sleep(0.5) # Half a second between requests.
self.update_state(original_state)
```
#### File: meerk40t/test/test_core_cutcode.py
```python
import random
import unittest
from PIL import Image, ImageDraw
from meerk40t.core.cutcode import CutCode, LineCut, QuadCut, RasterCut
from meerk40t.core.node.elem_image import ImageNode
from meerk40t.core.node.elem_path import PathNode
from meerk40t.core.node.op_cut import CutOpNode
from meerk40t.core.node.op_engrave import EngraveOpNode
from meerk40t.core.node.op_image import ImageOpNode
from meerk40t.core.node.op_raster import RasterOpNode
from meerk40t.svgelements import Path, Point, SVGImage, Matrix
class TestCutcode(unittest.TestCase):
def test_cutcode(self):
"""
Test intro to Cutcode.
:return:
"""
cutcode = CutCode()
settings = dict()
cutcode.append(LineCut(Point(0, 0), Point(100, 100), settings=settings))
cutcode.append(LineCut(Point(100, 100), Point(0, 0), settings=settings))
cutcode.append(LineCut(Point(50, -50), Point(100, -100), settings=settings))
cutcode.append(
QuadCut(Point(0, 0), Point(100, 100), Point(200, 0), settings=settings)
)
path = Path(*list(cutcode.as_elements()))
self.assertEqual(
path, "M 0,0 L 100,100 L 0,0 M 50,-50 L 100,-100 M 0,0 Q 100,100 200,0"
)
def test_cutcode_cut(self):
"""
Convert a Cut operation into Cutcode and back.
:return:
"""
initial = "M 0,0 L 100,100 L 0,0 M 50,-50 L 100,-100 M 0,0 Q 100,100 200,0"
path = Path(initial)
laserop = CutOpNode()
laserop.add_node(PathNode(path))
cutcode = CutCode(laserop.as_cutobjects())
path = list(cutcode.as_elements())[0]
self.assertEqual(path, initial)
def test_cutcode_engrave(self):
"""
Convert an Engrave operation into Cutcode and back.
:return:
"""
initial = "M 0,0 L 100,100 L 0,0 M 50,-50 L 100,-100 M 0,0 Q 100,100 200,0"
path = Path(initial)
laserop = EngraveOpNode()
laserop.add_node(PathNode(path))
cutcode = CutCode(laserop.as_cutobjects())
path = list(cutcode.as_elements())[0]
self.assertEqual(path, initial)
# def test_cutcode_raster(self):
# """
# Convert CutCode from Raster operation
#
# :return:
# """
# laserop = RasterOpNode()
# laserop.operation = "Raster"
#
# # Add Path
# initial = "M 0,0 L 100,100 L 0,0 M 50,-50 L 100,-100 M 0,0 Q 100,100 200,0"
# path = Path(initial)
# laserop.add_node(PathNode(path))
#
# # Add SVG Image
# image = Image.new("RGBA", (256, 256), (255, 255, 255, 0))
# draw = ImageDraw.Draw(image)
# draw.ellipse((50, 50, 150, 150), "white")
# draw.ellipse((100, 100, 105, 105), "black")
# image = image.convert("L")
# inode = ImageNode(image=image, matrix=Matrix(), dpi=1000)
# inode.step_x = 1
# inode.step_y = 1
# inode.process_image()
# laserop.add_node(inode)
#
# # raster_step is default to 0 and not set.
# laserop.raster_step_x = 2
# laserop.raster_step_y = 2
# cutcode = CutCode(laserop.as_cutobjects())
# self.assertEqual(len(cutcode), 1)
# rastercut = cutcode[0]
# self.assertTrue(isinstance(rastercut, RasterCut))
# self.assertEqual(rastercut.offset_x, 100)
# self.assertEqual(rastercut.offset_y, 100)
# image = rastercut.image
# self.assertTrue(isinstance(image, Image.Image))
# self.assertIn(image.mode, ("L", "1"))
# self.assertEqual(image.size, (3, 3))
# self.assertEqual(rastercut.path, "M 100,100 L 100,106 L 106,106 L 106,100 Z")
# def test_cutcode_raster_crosshatch(self):
# """
# Convert CutCode from Raster operation, crosshatched
#
# :return:
# """
# # Initialize with Raster Defaults, +crosshatch
# rasterop = RasterOpNode(raster_direction=4, dpi=500)
#
# # Add Path
# initial = "M 0,0 L 100,100 L 0,0 M 50,-50 L 100,-100 M 0,0 Q 100,100 200,0"
# path = Path(initial)
# rasterop.add_node(PathNode(path))
#
# # Add SVG Image
# image = Image.new("RGBA", (256, 256), (255, 255, 255, 0))
# draw = ImageDraw.Draw(image)
# draw.ellipse((50, 50, 150, 150), "white")
# draw.ellipse((100, 100, 105, 105), "black")
# inode = ImageNode(image=image, matrix=Matrix(), dpi=1000.0/3.0)
# inode.step_x = 3
# inode.step_y = 3
#
# rasterop.add_node(inode)
#
# for i in range(2): # Check for knockon.
# cutcode = CutCode(rasterop.as_cutobjects())
# self.assertEqual(len(cutcode), 2)
#
# rastercut0 = cutcode[0]
# self.assertTrue(isinstance(rastercut0, RasterCut))
# self.assertEqual(rastercut0.offset_x, 100)
# self.assertEqual(rastercut0.offset_y, 100)
# image0 = rastercut0.image
# self.assertTrue(isinstance(image0, Image.Image))
# self.assertIn(image0.mode, ("L", "1"))
# self.assertEqual(image0.size, (3, 3)) # default step value 2, 6/2
# self.assertEqual(
# rastercut0.path, "M 100,100 L 100,106 L 106,106 L 106,100 Z"
# )
#
# rastercut1 = cutcode[1]
# self.assertTrue(isinstance(rastercut1, RasterCut))
# self.assertEqual(rastercut1.offset_x, 100)
# self.assertEqual(rastercut1.offset_y, 100)
# image1 = rastercut1.image
# self.assertTrue(isinstance(image1, Image.Image))
# self.assertIn(image1.mode, ("L", "1"))
# self.assertEqual(image1.size, (3, 3)) # default step value 2, 6/2
# self.assertEqual(
# rastercut1.path, "M 100,100 L 100,106 L 106,106 L 106,100 Z"
# )
#
# self.assertIs(image0, image1)
def test_cutcode_image(self):
"""
Convert CutCode from Image operation
Test image-based crosshatched setting
:return:
"""
laserop = ImageOpNode()
# Add Path
initial = "M 0,0 L 100,100 L 0,0 M 50,-50 L 100,-100 M 0,0 Q 100,100 200,0"
path = Path(initial)
laserop.add_node(PathNode(path))
# Add SVG Image1
image = Image.new("RGBA", (256, 256), (255, 255, 255, 0))
draw = ImageDraw.Draw(image)
draw.ellipse((50, 50, 150, 150), "white")
draw.ellipse((100, 100, 105, 105), "black")
inode1 = ImageNode(image=image, matrix=Matrix(), dpi=1000.0 / 3.0)
inode1.step_x = 3
inode1.step_y = 3
inode1.process_image()
laserop.add_node(inode1)
# Add SVG Image2
image2 = Image.new("RGBA", (256, 256), (255, 255, 255, 0))
draw = ImageDraw.Draw(image2)
draw.ellipse((50, 50, 150, 150), "white")
draw.ellipse((80, 80, 120, 120), "black")
inode2 = ImageNode(image=image2, matrix=Matrix(), dpi=500, direction=4)
inode2.step_x = 2
inode2.step_y = 2
inode2.process_image()
laserop.add_node(inode2) # crosshatch
for i in range(2): # Check for knockon
cutcode = CutCode(laserop.as_cutobjects())
self.assertEqual(len(cutcode), 3)
rastercut = cutcode[0]
self.assertTrue(isinstance(rastercut, RasterCut))
self.assertEqual(rastercut.offset_x, 100)
self.assertEqual(rastercut.offset_y, 100)
image = rastercut.image
self.assertTrue(isinstance(image, Image.Image))
self.assertIn(image.mode, ("L", "1"))
self.assertEqual(image.size, (2, 2)) # step value 2, 6/2
self.assertEqual(
rastercut.path, "M 100,100 L 100,106 L 106,106 L 106,100 Z"
)
rastercut1 = cutcode[1]
self.assertTrue(isinstance(rastercut1, RasterCut))
self.assertEqual(rastercut1.offset_x, 80)
self.assertEqual(rastercut1.offset_y, 80)
image1 = rastercut1.image
self.assertTrue(isinstance(image1, Image.Image))
self.assertIn(image1.mode, ("L", "1"))
self.assertEqual(image1.size, (21, 21)) # default step value 2, 40/2 + 1
self.assertEqual(rastercut1.path, "M 80,80 L 80,122 L 122,122 L 122,80 Z")
rastercut2 = cutcode[2]
self.assertTrue(isinstance(rastercut2, RasterCut))
self.assertEqual(rastercut2.offset_x, 80)
self.assertEqual(rastercut2.offset_y, 80)
image2 = rastercut2.image
self.assertTrue(isinstance(image2, Image.Image))
self.assertIn(image2.mode, ("L", "1"))
self.assertEqual(image2.size, (21, 21)) # default step value 2, 40/2 + 1
self.assertEqual(rastercut2.path, "M 80,80 L 80,122 L 122,122 L 122,80 Z")
def test_cutcode_image_crosshatch(self):
"""
Convert CutCode from Image Operation.
Test ImageOp Crosshatch Setting
:return:
"""
laserop = ImageOpNode(raster_direction=4)
# Add Path
initial = "M 0,0 L 100,100 L 0,0 M 50,-50 L 100,-100 M 0,0 Q 100,100 200,0"
path = Path(initial)
laserop.add_node(PathNode(path))
# Add SVG Image1
image1 = Image.new("RGBA", (256, 256), (255, 255, 255, 0))
draw = ImageDraw.Draw(image1)
draw.ellipse((50, 50, 150, 150), "white")
draw.ellipse((100, 100, 105, 105), "black")
inode = ImageNode(image=image1, matrix=Matrix(), dpi=1000.0 / 3.0)
inode.step_x = 3
inode.step_y = 3
inode.process_image()
laserop.add_node(inode)
# Add SVG Image2
image2 = Image.new("RGBA", (256, 256), (255, 255, 255, 0))
draw = ImageDraw.Draw(image2)
draw.ellipse((50, 50, 150, 150), "white")
draw.ellipse((80, 80, 120, 120), "black")
inode = ImageNode(image=image2, matrix=Matrix(), dpi=500.0)
inode.step_x = 2
inode.step_y = 2
inode.process_image()
laserop.add_node(inode)
# Add SVG Image3
inode = ImageNode(image=image2, matrix=Matrix(), dpi=1000.0/3.0)
inode.step_x = 3
inode.step_y = 3
inode.process_image()
laserop.add_node(inode)
cutcode = CutCode(laserop.as_cutobjects())
self.assertEqual(len(cutcode), 6)
rastercut1_0 = cutcode[0]
self.assertTrue(isinstance(rastercut1_0, RasterCut))
self.assertEqual(rastercut1_0.offset_x, 100)
self.assertEqual(rastercut1_0.offset_y, 100)
image = rastercut1_0.image
self.assertTrue(isinstance(image, Image.Image))
self.assertIn(image.mode, ("L", "1"))
self.assertEqual(image.size, (2, 2)) # step value 2, 6/2
self.assertEqual(rastercut1_0.path, "M 100,100 L 100,106 L 106,106 L 106,100 Z")
rastercut1_1 = cutcode[1]
self.assertTrue(isinstance(rastercut1_1, RasterCut))
self.assertEqual(rastercut1_1.offset_x, 100)
self.assertEqual(rastercut1_1.offset_y, 100)
image = rastercut1_1.image
self.assertTrue(isinstance(image, Image.Image))
self.assertIn(image.mode, ("L", "1"))
self.assertEqual(image.size, (2, 2)) # step value 2, 6/2
self.assertEqual(rastercut1_1.path, "M 100,100 L 100,106 L 106,106 L 106,100 Z")
rastercut2_0 = cutcode[2]
self.assertTrue(isinstance(rastercut2_0, RasterCut))
self.assertEqual(rastercut2_0.offset_x, 80)
self.assertEqual(rastercut2_0.offset_y, 80)
image1 = rastercut2_0.image
self.assertTrue(isinstance(image1, Image.Image))
self.assertIn(image1.mode, ("L", "1"))
self.assertEqual(image1.size, (21, 21)) # default step value 2, 40/2 + 1
self.assertEqual(rastercut2_0.path, "M 80,80 L 80,122 L 122,122 L 122,80 Z")
rastercut2_1 = cutcode[3]
self.assertTrue(isinstance(rastercut2_1, RasterCut))
self.assertEqual(rastercut2_1.offset_x, 80)
self.assertEqual(rastercut2_1.offset_y, 80)
image2 = rastercut2_1.image
self.assertTrue(isinstance(image2, Image.Image))
self.assertIn(image2.mode, ("L", "1"))
self.assertEqual(image2.size, (21, 21)) # default step value 2, 40/2 + 1
self.assertEqual(rastercut2_0.path, "M 80,80 L 80,122 L 122,122 L 122,80 Z")
rastercut3_0 = cutcode[4]
self.assertTrue(isinstance(rastercut3_0, RasterCut))
self.assertEqual(rastercut3_0.offset_x, 80)
self.assertEqual(rastercut3_0.offset_y, 80)
image3 = rastercut3_0.image
self.assertTrue(isinstance(image3, Image.Image))
self.assertIn(image3.mode, ("L", "1"))
self.assertEqual(image3.size, (14, 14)) # default step value 3, ceil(40/3) + 1
self.assertEqual(rastercut3_0.path, "M 80,80 L 80,122 L 122,122 L 122,80 Z")
rastercut3_1 = cutcode[5]
self.assertTrue(isinstance(rastercut3_1, RasterCut))
self.assertEqual(rastercut3_1.offset_x, 80)
self.assertEqual(rastercut3_1.offset_y, 80)
image4 = rastercut3_1.image
self.assertTrue(isinstance(image4, Image.Image))
self.assertIn(image4.mode, ("L", "1"))
self.assertEqual(image4.size, (14, 14)) # default step value 3, ceil(40/3) + 1
self.assertEqual(rastercut2_0.path, "M 80,80 L 80,122 L 122,122 L 122,80 Z")
# def test_cutcode_image_nostep(self):
# """
# Convert CutCode from Image Operation
# Test default value without step.
# Reuse Checks for Knockon-Effect
#
# :return:
# """
# laserop = ImageOpNode()
#
# # Add Path
# initial = "M 0,0 L 100,100 L 0,0 M 50,-50 L 100,-100 M 0,0 Q 100,100 200,0"
# path = Path(initial)
# laserop.add_node(PathNode(path))
#
# # Add SVG Image1
# image = Image.new("RGBA", (256, 256), (255, 255, 255, 0))
# draw = ImageDraw.Draw(image)
# draw.ellipse((50, 50, 150, 150), "white")
# draw.ellipse((100, 100, 105, 105), "black")
# inode = ImageNode(image=image, matrix=Matrix(), dpi=1000)
# inode.step_x = 1
# inode.step_y = 1
# inode.process_image()
# laserop.add_node(inode)
#
# for i in range(4):
# cutcode = CutCode(laserop.as_cutobjects())
# self.assertEqual(len(cutcode), 1)
#
# rastercut = cutcode[0]
# self.assertTrue(isinstance(rastercut, RasterCut))
# self.assertEqual(rastercut.offset_x, 100)
# self.assertEqual(rastercut.offset_y, 100)
# image = rastercut.image
# self.assertTrue(isinstance(image, Image.Image))
# self.assertIn(image.mode, ("L", "1"))
# self.assertEqual(image.size, (6, 6)) # step value 1, 6/2
# self.assertEqual(
# rastercut.path, "M 100,100 L 100,106 L 106,106 L 106,100 Z"
# )
#
# laserop.raster_step_x = i # Raster_Step should be ignored, set for next loop
# laserop.raster_step_y = i # Raster_Step should be ignored, set for next loop
def test_cutcode_direction_flags(self):
"""
Test the direction flags for different cutcode objects when flagged normal vs. reversed.
@return:
"""
path = Path("M0,0")
for i in range(1000):
v = random.randint(0, 5)
if v == 0:
path.line(path.current_point.x, random.randint(0, 5000))
if v == 1:
path.line(random.randint(0, 5000), path.current_point.y)
if v == 2:
path.line(path.current_point.x, path.current_point.y)
else:
path.line((random.randint(0, 5000), random.randint(0, 5000)))
laserop = CutOpNode()
laserop.add_node(PathNode(path=path))
cutcode = CutCode(laserop.as_cutobjects())
for cut in cutcode.flat():
major = cut.major_axis()
x_dir = cut.x_dir()
y_dir = cut.y_dir()
cut.reverse()
cut.reverse()
cut.reverse()
ry_dir = cut.y_dir()
rx_dir = cut.x_dir()
self.assertEqual(major, cut.major_axis())
if major == 1:
self.assertNotEqual(y_dir, ry_dir)
else:
self.assertNotEqual(x_dir, rx_dir)
``` |
{
"source": "jpiron/scaraplate",
"score": 2
} |
#### File: scaraplate/tests/test_cli.py
```python
from click.testing import CliRunner
from scaraplate.__main__ import main
def test_help():
runner = CliRunner()
result = runner.invoke(main, ["--help"])
assert result.exit_code == 0
def test_rollup_help():
runner = CliRunner()
result = runner.invoke(main, ["rollup", "--help"])
assert result.exit_code == 0
``` |
{
"source": "jpitts/pretix-eth-payment-plugin",
"score": 2
} |
#### File: pretix-eth-payment-plugin/pretix_eth/payment.py
```python
import decimal
import json
import logging
import time
from collections import OrderedDict
import requests
from django import forms
from django.db import transaction as db_transaction
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpRequest
from django.template.loader import get_template
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from eth_utils import (
import_string,
to_bytes,
)
from requests import Session
from requests.exceptions import ConnectionError
from pretix.base.models import OrderPayment, Quota
from pretix.base.payment import BasePaymentProvider, PaymentException
from eth_utils import to_wei, from_wei
from .providers import (
TransactionProviderAPI,
TokenProviderAPI,
)
from .models import (
Transaction,
)
logger = logging.getLogger(__name__)
ETH_CHOICE = ('ETH', _('ETH'))
DAI_CHOICE = ('DAI', _('DAI'))
DEFAULT_TRANSACTION_PROVIDER = 'pretix_eth.providers.BlockscoutTransactionProvider'
DEFAULT_TOKEN_PROVIDER = 'pretix_eth.providers.BlockscoutTokenProvider'
class Ethereum(BasePaymentProvider):
identifier = 'ethereum'
verbose_name = _('Ethereum')
public_name = _('Ethereum')
@cached_property
def transaction_provider(self) -> TransactionProviderAPI:
transaction_provider_class = import_string(self.settings.get(
'TRANSACTION_PROVIDER',
'pretix_eth.providers.BlockscoutTransactionProvider',
))
return transaction_provider_class()
@cached_property
def token_provider(self) -> TokenProviderAPI:
token_provider_class = import_string(self.settings.get(
'TOKEN_PROVIDER',
'pretix_eth.providers.BlockscoutTokenProvider',
))
return token_provider_class()
@property
def settings_form_fields(self):
form_fields = OrderedDict(
list(super().settings_form_fields.items())
+ [
('ETH', forms.CharField(
label=_('Ethereum wallet address'),
help_text=_('Leave empty if you do not want to accept ethereum.'),
required=False
)),
('DAI', forms.CharField(
label=_('DAI wallet address'),
help_text=_('Leave empty if you do not want to accept DAI.'),
required=False
)),
('TRANSACTION_PROVIDER', forms.CharField(
label=_('Transaction Provider'),
help_text=_(
f'This determines how the application looks up '
f'transfers of Ether. Leave empty to use the default '
f'provider: {DEFAULT_TRANSACTION_PROVIDER}'
),
required=False
)),
('TOKEN_PROVIDER', forms.CharField(
label=_('Token Provider'),
help_text=_(
f'This determines how the application looks up token '
f'transfers. Leave empty to use the default provider: '
f'{DEFAULT_TOKEN_PROVIDER}'
),
required=False
)),
]
)
form_fields.move_to_end('ETH', last=True)
form_fields.move_to_end('DAI', last=True)
form_fields.move_to_end('TRANSACTION_PROVIDER', last=True)
form_fields.move_to_end('TOKEN_PROVIDER', last=True)
return form_fields
def is_allowed(self, request, **kwargs):
return bool(
(self.settings.DAI or self.settings.ETH) and super().is_allowed(request)
)
@property
def payment_form_fields(self):
if self.settings.ETH and self.settings.DAI:
currency_type_choices = (DAI_CHOICE, ETH_CHOICE)
elif self.settings.DAI:
currency_type_choices = (DAI_CHOICE,)
elif self.settings.ETH:
currency_type_choices = (ETH_CHOICE,)
else:
raise ImproperlyConfigured("Must have one of `ETH` or `DAI` enabled for payments")
form_fields = OrderedDict(
list(super().payment_form_fields.items())
+ [
('currency_type', forms.ChoiceField(
label=_('Payment currency'),
help_text=_('Select the currency you used for payment.'),
widget=forms.Select,
choices=currency_type_choices,
initial='ETH'
)),
('txn_hash', forms.CharField(
label=_('Transaction hash'),
help_text=_('Enter the hash of the transaction in which you paid with the selected currency.'), # noqa: E501
required=True,
)),
]
)
return form_fields
def checkout_confirm_render(self, request):
template = get_template('pretix_eth/checkout_payment_confirm.html')
ctx = {
'request': request,
'event': self.event,
'settings': self.settings,
'provider': self,
'txn_hash': request.session['payment_ethereum_txn_hash'],
'currency_type': request.session['payment_ethereum_currency_type'],
}
return template.render(ctx)
def checkout_prepare(self, request, total):
form = self.payment_form(request)
if form.is_valid():
request.session['payment_ethereum_txn_hash'] = form.cleaned_data['txn_hash']
request.session['payment_ethereum_currency_type'] = form.cleaned_data['currency_type'] # noqa: E501
self._get_rates_checkout(request, total['total'])
return True
return False
def payment_prepare(self, request: HttpRequest, payment: OrderPayment):
form = self.payment_form(request)
if form.is_valid():
request.session['payment_ethereum_txn_hash'] = form.cleaned_data['txn_hash']
request.session['payment_ethereum_currency_type'] = form.cleaned_data['currency_type'] # noqa: E501
self._get_rates(request, payment)
return True
return False
def payment_is_valid_session(self, request):
return all((
'payment_ethereum_txn_hash' in request.session,
'payment_ethereum_currency_type' in request.session,
'payment_ethereum_time' in request.session,
'payment_ethereum_amount' in request.session,
))
def execute_payment(self, request: HttpRequest, payment: OrderPayment):
txn_hash = request.session['payment_ethereum_txn_hash']
txn_hash_bytes = to_bytes(hexstr=txn_hash)
currency_type = request.session['payment_ethereum_currency_type']
payment_timestamp = request.session['payment_ethereum_time']
payment_amount = request.session['payment_ethereum_amount']
if Transaction.objects.filter(txn_hash=txn_hash_bytes).exists():
raise PaymentException(
f'Transaction with hash {txn_hash} already used for payment'
)
payment.info_data = {
'txn_hash': txn_hash,
'currency_type': currency_type,
'time': payment_timestamp,
'amount': payment_amount,
}
payment.save(update_fields=['info'])
if currency_type == 'ETH':
transaction = self.transaction_provider.get_transaction(txn_hash)
is_valid_payment = all((
transaction.success,
transaction.to == self.settings.ETH,
transaction.value >= payment_amount,
transaction.timestamp >= payment_timestamp,
))
elif currency_type == 'DAI':
transfer = self.token_provider.get_ERC20_transfer(txn_hash)
is_valid_payment = all((
transfer.success,
transfer.to == self.settings.DAI,
transfer.value >= payment_amount,
transfer.timestamp >= payment_timestamp,
))
else:
# unkown currency
raise ImproperlyConfigured(f"Unknown currency: {currency_type}")
if is_valid_payment:
with db_transaction.atomic():
try:
payment.confirm()
except Quota.QuotaExceededException as e:
raise PaymentException(str(e))
else:
Transaction.objects.create(txn_hash=txn_hash_bytes, order_payment=payment)
def _get_rates_from_api(self, total, currency):
try:
if currency == 'ETH':
rate = requests.get(f'https://api.bitfinex.com/v1/pubticker/eth{self.event.currency}') # noqa: E501
rate = rate.json()
final_price = to_wei((
total / decimal.Decimal(rate['last_price'])
).quantize(decimal.Decimal('1.00000')), 'ether')
elif currency == 'DAI':
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest'
parameters = {
'symbol': currency,
'convert': self.event.currency,
}
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': '<KEY>',
}
session = Session()
session.headers.update(headers)
response = session.get(url, params=parameters)
data = json.loads(response.text)
final_price = (
total / decimal.Decimal(data['data'][currency]['quote'][self.event.currency]['price']) # noqa: E501
).quantize(decimal.Decimal('1.00'))
else:
raise ImproperlyConfigured("Unrecognized currency: {0}".format(self.event.currency))
return round(final_price, 2)
except ConnectionError:
logger.exception('Internal eror occured.')
raise PaymentException(
_('Please try again and get in touch with us if this problem persists.')
)
def _get_rates_checkout(self, request: HttpRequest, total):
final_price = self._get_rates_from_api(total, request.session['payment_ethereum_currency_type']) # noqa: E501
request.session['payment_ethereum_amount'] = final_price
request.session['payment_ethereum_time'] = int(time.time())
def _get_rates(self, request: HttpRequest, payment: OrderPayment):
final_price = self._get_rates_from_api(payment.amount, request.session['payment_ethereum_currency_type']) # noqa: E501
request.session['payment_ethereum_amount'] = final_price
request.session['payment_ethereum_time'] = int(time.time())
def payment_form_render(self, request: HttpRequest, total: decimal.Decimal):
# this ensures that the form will pre-populate the transaction hash into the form.
if 'txhash' in request.GET:
request.session['payment_ethereum_txn_hash'] = request.GET.get('txhash')
if 'currency' in request.GET:
request.session['payment_ethereum_currency_type'] = request.GET.get('currency')
form = self.payment_form(request)
template = get_template('pretix_eth/checkout_payment_form.html')
ctx = {
'request': request,
'form': form,
'ETH_per_ticket': from_wei(self._get_rates_from_api(total, 'ETH'), 'ether'),
'DAI_per_ticket': self._get_rates_from_api(total, 'DAI'),
'ETH_address': self.settings.get('ETH'),
'DAI_address': self.settings.get('DAI'),
}
return template.render(ctx)
def payment_pending_render(self, request: HttpRequest, payment: OrderPayment):
template = get_template('pretix_eth/pending.html')
if request.session['payment_ethereum_currency_type'] == 'ETH':
cur = self.settings.ETH
else:
cur = self.settings.DAI
ctx = {
'request': request,
'event': self.event,
'settings': self.settings,
'payment_info': cur,
'order': payment.order,
'provname': self.verbose_name,
'coin': payment.info_data['currency_type'],
'amount': payment.info_data['amount'],
}
return template.render(ctx)
def payment_control_render(self, request: HttpRequest, payment: OrderPayment):
template = get_template('pretix_eth/control.html')
ctx = {
'request': request,
'event': self.event,
'settings': self.settings,
'payment_info': payment.info_data,
'order': payment.order,
'provname': self.verbose_name,
}
return template.render(ctx)
abort_pending_allowed = True
def payment_refund_supported(self, payment: OrderPayment):
return False
def payment_partial_refund_supported(self, payment: OrderPayment):
return False
```
#### File: tests/core/test_provider_payment_execution.py
```python
import decimal
import time
import pytest
from django.test import RequestFactory
from django.utils import timezone
from django.contrib.sessions.backends.db import SessionStore
from eth_utils import to_hex
from pretix.base.models import Order, OrderPayment
from pretix.base.payment import PaymentException
from pretix_eth.providers import (
TokenProviderAPI,
Transfer,
TransactionProviderAPI,
Transaction,
)
ZERO_HASH = b'\x00' * 32
ZERO_ADDRESS = '0x0000000000000000000000000000000000000000'
ETH_ADDRESS = '0xeee0123400000000000000000000000000000000'
DAI_ADDRESS = '0xda10123400000000000000000000000000000000'
class FixtureTransactionProvider(TransactionProviderAPI):
def __init__(self, transaction):
self._transaction = transaction
def get_transaction(self, from_address):
return self._transaction
class FixtureTokenProvider(TokenProviderAPI):
def __init__(self, transfer):
self._transfer = transfer
def get_ERC20_transfer(self, from_address):
return self._transfer
@pytest.fixture
def order_and_payment(transactional_db, event):
order = Order.objects.create(
event=event,
email='<EMAIL>',
locale='en_US',
datetime=timezone.now(),
total=decimal.Decimal('100.00'),
status=Order.STATUS_PENDING,
)
payment = OrderPayment.objects.create(
order=order,
amount='100.00',
state=OrderPayment.PAYMENT_STATE_PENDING,
)
return order, payment
@pytest.mark.django_db
def test_provider_execute_successful_payment_in_ETH(provider, order_and_payment):
order, payment = order_and_payment
# setup a transaction provider which returns a fixed transaction
transaction = Transaction(
hash=ZERO_HASH,
sender=ZERO_ADDRESS,
success=True,
to=ETH_ADDRESS,
timestamp=int(time.time()),
value=100,
)
tx_provider = FixtureTransactionProvider(transaction)
provider.transaction_provider = tx_provider
assert provider.transaction_provider is tx_provider
assert order.status == order.STATUS_PENDING
assert payment.state == payment.PAYMENT_STATE_PENDING
provider.settings.set('ETH', ETH_ADDRESS)
factory = RequestFactory()
session = SessionStore()
session.create()
# setup all the necessary session data for the payment to be valid
session['payment_ethereum_txn_hash'] = to_hex(ZERO_HASH)
session['payment_ethereum_currency_type'] = 'ETH'
session['payment_ethereum_time'] = int(time.time()) - 10
session['payment_ethereum_amount'] = 100
request = factory.get('/checkout')
request.event = provider.event
request.session = session
provider.execute_payment(request, payment)
order.refresh_from_db()
payment.refresh_from_db()
assert order.status == order.STATUS_PAID
assert payment.state == payment.PAYMENT_STATE_CONFIRMED
@pytest.mark.django_db
def test_provider_execute_successful_payment_in_DAI(provider, order_and_payment):
order, payment = order_and_payment
# setup a transfer provider which returns a fixed transfer
transfer = Transfer(
hash=ZERO_HASH,
sender=ZERO_ADDRESS,
success=True,
to=DAI_ADDRESS,
timestamp=int(time.time()),
value=100,
)
token_provider = FixtureTokenProvider(transfer)
provider.token_provider = token_provider
assert provider.token_provider is token_provider
assert order.status == order.STATUS_PENDING
assert payment.state == payment.PAYMENT_STATE_PENDING
provider.settings.set('DAI', DAI_ADDRESS)
factory = RequestFactory()
session = SessionStore()
session.create()
# setup all the necessary session data for the payment to be valid
session['payment_ethereum_txn_hash'] = to_hex(ZERO_HASH)
session['payment_ethereum_currency_type'] = 'DAI'
session['payment_ethereum_time'] = int(time.time()) - 10
session['payment_ethereum_amount'] = 100
request = factory.get('/checkout')
request.event = provider.event
request.session = session
provider.execute_payment(request, payment)
order.refresh_from_db()
payment.refresh_from_db()
assert order.status == order.STATUS_PAID
assert payment.state == payment.PAYMENT_STATE_CONFIRMED
@pytest.mark.django_db
def test_cannot_replay_same_transaction(provider, order_and_payment):
order, payment = order_and_payment
# setup a transaction provider which returns a fixed transaction
transaction = Transaction(
hash=ZERO_HASH,
sender=ZERO_ADDRESS,
success=True,
to=ETH_ADDRESS,
timestamp=int(time.time()),
value=100,
)
tx_provider = FixtureTransactionProvider(transaction)
provider.transaction_provider = tx_provider
assert provider.transaction_provider is tx_provider
assert order.status == order.STATUS_PENDING
assert payment.state == payment.PAYMENT_STATE_PENDING
provider.settings.set('ETH', ETH_ADDRESS)
factory = RequestFactory()
session = SessionStore()
session.create()
# setup all the necessary session data for the payment to be valid
session['payment_ethereum_txn_hash'] = to_hex(ZERO_HASH)
session['payment_ethereum_currency_type'] = 'ETH'
session['payment_ethereum_time'] = int(time.time()) - 10
session['payment_ethereum_amount'] = 100
request = factory.get('/checkout')
request.event = provider.event
request.session = session
provider.execute_payment(request, payment)
order.refresh_from_db()
payment.refresh_from_db()
with pytest.raises(
PaymentException,
match=r'Transaction with hash .* already used for payment',
):
provider.execute_payment(request, payment)
```
#### File: tests/integration/test_token_provider.py
```python
from pretix_eth.providers import BlockscoutTokenProvider
from eth_utils import (
is_boolean,
is_checksum_address,
is_bytes,
is_integer,
)
MAINNET_DAI_TXN_HASH = '0x4122bca6b9304170d02178c616185594b05ca1562e8893afa434f4df8d600dfa'
def test_blockscout_transaction_provider():
provider = BlockscoutTokenProvider()
tx = provider.get_ERC20_transfer(MAINNET_DAI_TXN_HASH)
assert is_bytes(tx.hash) and len(tx.hash)
assert is_checksum_address(tx.sender)
assert is_checksum_address(tx.to)
assert is_integer(tx.value)
assert is_integer(tx.timestamp)
assert is_boolean(tx.success)
``` |
{
"source": "Jpiv1212/PersonalProjects",
"score": 3
} |
#### File: PersonalProjects/Mineshaft Explorer/generate3.py
```python
import random
def generate(columns, rows, often, spawnx, spawny):
level = [[0 for i in range(columns)] for i in range(rows)]
level[spawny][spawnx] = 1
elevator = [spawny, spawnx]
while elevator == [spawny, spawnx]:
elevator = [random.randint(0, rows-1), random.randint(0, columns-1)]
level[elevator[0]][elevator[1]] = 16
x = spawnx
y = spawny
way = random.randint(1, 4)
tried = []
one = []
while True:
try:
if level[y][x+1] == 16:
level[y][x+1] = 2
except: pass
if level[y][x-1] == 16 and x-1 >= 0:
level[y][x-1] = 2
try:
if level[y+1][x] == 16:
level[y+1][x] = 2
except: pass
if level[y-1][x] == 16 and y-1 >= 0:
level[y-1][x] = 2
change = False
if random.randint(1, often) == 1:
change = "eggs"
if way == 1:
try:
if level[y][x+1] == 0:
good = 0
try:
if level[y][x+2] in [0, 16]:
good += 1
except: good += 1
try:
if level[y+1][x+1] in [0, 16]:
good += 1
except: good += 1
try:
if y-1 >= 0:
if level[y-1][x+1] in [0, 16]:
good += 1
else: good += 1
except: good += 1
if good < 3:
change = True
else:
x += 1
level[y][x] = 1
tried = []
elif level[y][x+1] == 1:
one = [y, x+1]
change = True
else:
change = True
except: change = True
elif way == 2:
if level[y][x-1] == 0 and x-1 >= 0:
good = 0
if x-2 >= 0:
if level[y][x-2] in [0, 16]:
good += 1
else: good += 1
try:
if level[y+1][x-1] in [0, 16]:
good += 1
except: good += 1
if y-1 >= 0:
if level[y-1][x-1] in [0, 16]:
good += 1
else: good += 1
if good < 3:
change = True
else:
x -= 1
level[y][x] = 1
tried = []
elif level[y][x-1] == 1 and x-1 >= 0:
one = [y, x-1]
change = True
else:
change = True
elif way == 3:
try:
if level[y+1][x] == 0:
good = 0
try:
if level[y+2][x] in [0, 16]:
good += 1
except: good += 1
try:
if level[y+1][x+1] in [0, 16]:
good += 1
except: good += 1
try:
if x-1 >= 0:
if level[y+1][x-1] in [0, 16]:
good += 1
else:
good += 1
except: good += 1
if good < 3:
change = True
else:
y += 1
level[y][x] = 1
tried = []
elif level[y+1][x] == 1:
one = [y+1, x]
change = True
else:
change = True
except: change = True
elif way == 4:
if level[y-1][x] == 0 and y-1 >= 0:
good = 0
if y-2 >= 0:
if level[y-2][x] in [0, 16]:
good += 1
else: good += 1
try:
if level[y-1][x+1] in [0, 16]:
good += 1
except: good += 1
try:
if x-1 >= 0:
if level[y-1][x-1] in [0, 16]:
good += 1
else: good += 1
except: good += 1
if good < 3:
change = True
else:
y -= 1
level[y][x] = 1
tried = []
elif level[y-1][x] == 1 and y-1 >= 0:
one = [y-1, x]
change = True
else:
change = True
if change:
if change == True:
tried += [way]
if len(tried) == 4:
level[y][x] = 2
if one not in [[y, x+1], [y, x-1], [y+1, x], [y-1, x]]:
return changef(level, columns, rows, elevator)
y,x = one[0],one[1]
tried = []
else:
way = random.choice([x for x in [1, 2, 3, 4] if x not in tried])
change = False
def changef(level, columns, rows, elevator):
level[elevator[0]][elevator[1]] = 16
for i,row in enumerate(level):
for j,column in enumerate(level[i]):
if level[i][j] not in [0, 16]:
dirs = 0
if j+1 < columns:
if level[i][j+1]:
dirs += 1
if j-1 >= 0:
if level[i][j-1]:
dirs += 2
if i+1 < rows:
if level[i+1][j]:
dirs += 4
if i-1 >= 0:
if level[i-1][j]:
dirs += 8
level[i][j] = dirs
return level, elevator
```
#### File: PersonalProjects/Mineshaft Explorer/Mineshaft Explorer.py
```python
from livewires import games, color
from generate4 import *
import pygame,math
screen_width = 620
screen_height = 620
tilesize = 20
games.init(screen_width = screen_width,screen_height = screen_height, fps=50)
shaft123 = games.load_image("hidden\\shaft.png", transparent=False)
tiles = [games.load_image("hidden\\plank1.png",transparent=False),
games.load_image("hidden\\plank2.png",transparent=False),
games.load_image("hidden\\plank3.png",transparent=False),
games.load_image("hidden\\plank4.png",transparent=False),
games.load_image("hidden\\plank5.png",transparent=False)]
tiles = [pygame.transform.rotate(tiles[2],180), #l end 1
tiles[2], #r end 2
tiles[0], #horizontal 3
pygame.transform.rotate(tiles[2],90), #t end 4
tiles[1], #tl corner 5
pygame.transform.rotate(tiles[1],270), #tr corner 6
pygame.transform.rotate(tiles[3],270), #3t 7
pygame.transform.rotate(tiles[2],270), #b end 8
pygame.transform.rotate(tiles[1],90), #bl corner 9
pygame.transform.rotate(tiles[1],180), #br corner 10
pygame.transform.rotate(tiles[3],90), #3b 11
pygame.transform.rotate(tiles[0],90), #vertical 12
tiles[3], #3l 13
pygame.transform.rotate(tiles[3],180), #3r 14
tiles[4], #4 way 15
games.load_image("hidden\\elevator.png",transparent=False)] #elevator 16
class Start(games.Sprite):
def __init__(self,x,y,score):
super(Start, self).__init__(x=-283746,
y=-239847,
image=tiles[7])
self.px = x
self.py = y
if score >= 0:
sctext = "Total Score: "+str(score)
else:
sctext = "Welcome to Mineshaft Explorer"
games.screen.background = pygame.Surface((screen_width,screen_height))
self.presstxt = games.Text(value="Press Enter to Start",x=screen_width/2,y=screen_height/2+20,size=40,color=color.red)
games.screen.add(self.presstxt)
self.score = games.Text(value=sctext,x=screen_width/2,y=screen_height/2-20,size=40,color=color.red)
games.screen.add(self.score)
def update(self):
if games.keyboard.is_pressed(games.K_RETURN):
self.presstxt.destroy()
self.score.destroy()
games.screen.add(Player(self.px,self.py))
self.destroy()
return False
class Player(games.Sprite):
def __init__(self,x,y):
self.start()
super(Player, self).__init__(left=x*tilesize,
top=y*tilesize,
image=games.load_image("hidden\\player.png")
)
self.elevc = [[0,0],[0,0],[0,0]]
level,self.elevc[0] = generate(screen_width//tilesize,screen_height//tilesize,1,x,y)
self.spritelist = []
self.xc = x
self.yc = y
self.default = self.image
self.movingdir = None
self.moveleft = 0
self.full = [(x,y) for x in range(screen_width//tilesize) for y in range(screen_height//tilesize)]
level2,self.elevc[1] = generate(screen_width//tilesize,screen_height//tilesize,2,self.elevc[0][1],self.elevc[0][0])
level3,self.elevc[2] = generate(screen_width//tilesize,screen_height//tilesize,3,self.elevc[1][1],self.elevc[1][0])
back = pygame.Surface((screen_width, screen_height))
self.val=4
shaft = pygame.transform.scale(games.load_image("hidden\\shaft.png", transparent=False), (screen_width, screen_height))
a=255*.6
shaft.fill((a,a,a), special_flags=pygame.BLEND_RGB_MULT)
back.blit(shaft, (0,0))
self.nomove = list(filter(lambda x: level[x[1]][x[0]]==0,self.full))
self.levels = [level, level2, level3]
self.levelsp = [1,.6,.6**2]
self.levelshow(level3, .6**2, back)
self.levelshow(level2, .6, back)
self.levelshow(level, 1, back)
games.screen.set_background(back)
self.advancing=0
self.currlevel = games.Text(value="Level: "+str(self.val-3),left=5,top=5,color=color.red,size=25,is_collideable=False)
games.screen.add(self.currlevel)
self.score=0
self.coinlist = []
self.tscore = games.Text(value="Score: "+str(self.score),left=5,top=30,color=color.red,size=25,is_collideable=False)
games.screen.add(self.tscore)
self.spritelist.append(self.tscore)
self.spritelist.append(self.currlevel)
self.make_coins()
self.make_enemies()
self.cooldown = 50
def levelshow(self, lvl, perc, scr):
size = tilesize*perc
startx = screen_width*(1-perc)/2
starty = screen_height*(1-perc)/2
a = 255*perc
tmptiles = []
for i in tiles:
tile = pygame.transform.scale(i, (math.ceil(size), math.ceil(size)))
tile.fill((a,a,a), special_flags=pygame.BLEND_RGB_MULT)
tmptiles.append(tile)
for j in range(len(lvl)):
for i in range(len(lvl[j])):
num = lvl[j][i]-1
if num >= 0: scr.blit(tmptiles[lvl[j][i]-1],(int(startx+size*i),int(starty+size*j)))
def make_coins(self):
image = games.load_image("hidden\\coin.png")
back = games.screen.background
for row in range(len(self.levels[0])):
for col in range(len(self.levels[0][row])):
if self.levels[0][row][col] not in [0, 16] and (col,row) != (self.xc,self.yc):
if random.randint(1, round(1+2000/(self.val+7))) == 1:
self.coinlist.append([col,row])
back.blit(image,(col*tilesize,row*tilesize))
games.screen.set_background(back)
def make_enemies(self):
for row in range(len(self.levels[0])):
for col in range(len(self.levels[0][row])):
if self.levels[0][row][col] not in [0, 16] and (col,row) != (self.xc,self.yc):
if random.randint(1, round(1+10000/(self.val+7))) == 1:
enemy = Enemy(col,row,self.levels[0],self)
self.spritelist.append(enemy)
games.screen.add(enemy)
def checktile(self):
if (self.xc,self.yc) in self.nomove or self.xc<0 or self.xc>=screen_width//tilesize or self.yc<0 or self.yc>=screen_height//tilesize:
return False
return True
def add_score(self, value):
self.score+=value
self.tscore.value = "Score: "+str(self.score)
self.tscore.left = 5
def advance(self):
for i in self.spritelist:
if type(i) in [Bullet, Enemy]:
self.spritelist.remove(i)
i.destroy()
self.advancing-=1
mult = (1/.6)**(1/80)
amult = 1/mult
back = pygame.Surface((screen_width, screen_height))
if self.advancing == 0:
sizex=screen_width
sizey=screen_height
startx=0
starty=0
a=255*.6
else:
a = 255*.6*(mult**(80-self.advancing))
sizex = screen_width*(mult**(80-self.advancing))
sizey = screen_height*(mult**(80-self.advancing))
startx = (screen_width-sizex)/2
starty = (screen_height-sizey)/2
#sizex=400*(1/.6)
#sizey=sizex
#shaft = games.load_image("hidden\\shaft.png", transparent=False)
shaft = pygame.transform.scale(shaft123, (math.floor(sizex), math.floor(sizey)))
shaft.fill((a,a,a), special_flags=pygame.BLEND_RGB_MULT)
back.blit(shaft,(int(startx),int(starty)))
if self.advancing == 79:
self.levelsp.pop(0)
self.levels.pop(0)
self.elevc.pop(0)
self.add_score(100*(self.val-3))
if self.advancing == 0:
self.levelsp = [1*amult,.6*amult,.6**2*amult]
l,elev = generate(screen_width//tilesize,screen_height//tilesize,self.val,self.elevc[-1][1],self.elevc[-1][0])
self.val+=1
self.currlevel.value = "Level: "+str(self.val-3)
self.currlevel.left = 5
self.levels.append(l)
self.elevc.append(elev)
for n,i in enumerate(self.levels[::-1]):
self.levelsp[-n-1] *= mult
self.levelshow(i, self.levelsp[-n-1], back)
games.screen.set_background(back)
self.nomove = list(filter(lambda x: self.levels[0][x[1]][x[0]]==0,self.full))
if self.advancing == 0:
self.make_coins()
self.make_enemies()
def update(self):
for sprite in self.overlapping_sprites:
if type(sprite) == Enemy:
self.lose()
return False
self.currlevel.elevate()
self.tscore.elevate()
oldc = [self.xc,self.yc]
if self.moveleft:
if self.movingdir=="up":
self.y-=2
self.moveleft-=2
elif self.movingdir=="right":
self.x+=2
self.moveleft-=2
elif self.movingdir=="down":
self.y+=2
self.moveleft-=2
elif self.movingdir=="left":
self.x-=2
self.moveleft-=2
if self.moveleft <= 0:
self.moveleft = False
elif self.advancing:
self.image = tiles[15]
self.advance()
elif oldc in self.coinlist:
self.coinlist.remove(oldc)
self.add_score(50)
image = tiles[self.levels[0][oldc[1]][oldc[0]]-1]
back = games.screen.background
back.blit(image,(oldc[0]*tilesize,oldc[1]*tilesize))
games.screen.set_background(back)
else:
if oldc == self.elevc[0][::-1] and games.keyboard.is_pressed(games.K_SPACE):
self.advancing=80
self.image=self.default
self.levelsp = [1,.6,.6**2]
if games.keyboard.is_pressed(games.K_w) and not self.moveleft:
self.movingdir = "up"
self.moveleft = tilesize
self.yc-=1
if not self.checktile():
self.moveleft = 0
self.xc,self.yc=oldc
if games.keyboard.is_pressed(games.K_d) and not self.moveleft:
self.movingdir = "right"
self.moveleft = tilesize
self.xc+=1
if not self.checktile():
self.moveleft = 0
self.xc,self.yc=oldc
if games.keyboard.is_pressed(games.K_s) and not self.moveleft:
self.movingdir = "down"
self.moveleft = tilesize
self.yc+=1
if not self.checktile():
self.moveleft = 0
self.xc,self.yc=oldc
if games.keyboard.is_pressed(games.K_a) and not self.moveleft:
self.movingdir = "left"
self.moveleft = tilesize
self.xc-=1
if not self.checktile():
self.moveleft = 0
self.xc,self.yc=oldc
if self.cooldown <= 0:
if games.keyboard.is_pressed(games.K_LEFT):
bullet = Bullet(self.xc,self.yc,"left",self)
self.spritelist.append(bullet)
games.screen.add(bullet)
self.cooldown=50
elif games.keyboard.is_pressed(games.K_UP):
bullet = Bullet(self.xc,self.yc,"up",self)
self.spritelist.append(bullet)
games.screen.add(bullet)
self.cooldown=50
elif games.keyboard.is_pressed(games.K_RIGHT):
bullet = Bullet(self.xc,self.yc,"right",self)
self.spritelist.append(bullet)
games.screen.add(bullet)
self.cooldown=50
elif games.keyboard.is_pressed(games.K_DOWN):
bullet = Bullet(self.xc,self.yc,"down",self)
self.spritelist.append(bullet)
games.screen.add(bullet)
self.cooldown=50
self.cooldown-=1
def lose(self):
for sprite in self.spritelist:
sprite.destroy()
games.screen.add(Start(screen_width//tilesize-1,0,self.score))
self.destroy()
class Bullet(games.Sprite):
default = games.load_image("hidden\\bullet.png")
def __init__(self,x,y,movedir,player):
self.movingdir = movedir
self.player=player
self.xc=x
self.yc=y
self.moveleft=0
angles = {"left":0,
"down":90,
"right":180,
"up":270}
super(Bullet, self).__init__(left=x*tilesize,
top=y*tilesize,
image=pygame.transform.rotate(Bullet.default,angles[movedir]))
def checktile(self):
if (self.xc,self.yc) in self.player.nomove or self.xc<0 or self.xc>=screen_width//tilesize or self.yc<0 or self.yc>=screen_height//tilesize:
return False
return True
def update(self):
if not self.checktile():
self.player.spritelist.remove(self)
self.destroy()
return False
for i in self.overlapping_sprites:
if type(i) == Enemy:
i.hp-=1
self.player.spritelist.remove(self)
self.destroy()
return False
if self.moveleft:
if self.movingdir=="up":
self.y-=4
self.moveleft-=4
elif self.movingdir=="right":
self.x+=4
self.moveleft-=4
elif self.movingdir=="down":
self.y+=4
self.moveleft-=4
elif self.movingdir=="left":
self.x-=4
self.moveleft-=4
if self.moveleft <= 0:
self.moveleft = False
else:
if self.movingdir=="up":
self.yc-=1
elif self.movingdir == "left":
self.xc-=1
elif self.movingdir == "down":
self.yc+=1
else:
self.xc+=1
self.moveleft=tilesize
class Enemy(games.Sprite):
default = games.load_image("hidden\\enemy.png")
def __init__(self, x, y, level, player):
super(Enemy, self).__init__(left=x*tilesize,
top=y*tilesize,
image=Enemy.default)
self.xc = x
self.yc = y
self.player = player
self.level = level
self.moveleft = 0
self.movedir = None
self.hp=3
def checktile(self):
if (self.xc,self.yc) in self.player.nomove or self.xc<0 or self.xc>=screen_width//tilesize or self.yc<0 or self.yc>=screen_height//tilesize:
return False
return True
def update(self):
if self.hp<=0:
self.player.spritelist.remove(self)
self.player.add_score(100)
self.destroy()
return False
oldc = [self.xc,self.yc]
if self.moveleft:
if self.movingdir=="up":
self.y-=2
self.moveleft-=2
elif self.movingdir=="right":
self.x+=2
self.moveleft-=2
self.image = Enemy.default
elif self.movingdir=="down":
self.y+=2
self.moveleft-=2
elif self.movingdir=="left":
self.x-=2
self.moveleft-=2
self.image = pygame.transform.flip(Enemy.default,True,False)
else:
possible = ["left","right","up","down"]
if self.player.xc == self.xc:
try: ttiles = [self.level[n][self.xc] for n in range(self.player.yc,self.yc,int((self.yc-self.player.yc)/abs(self.yc-self.player.yc)))]
except: ttiles = [0]
if 0 not in ttiles:
if self.yc>self.player.yc:
self.movingdir="up"
self.yc-=1
else:
self.movingdir="down"
self.yc+=1
self.moveleft=tilesize
if not self.checktile():
self.moveleft = 0
self.xc,self.yc=oldc
else:
self.movingdir=random.choice(["left","right","up","down"])
if self.movingdir=="left":
self.xc-=1
elif self.movingdir=="right":
self.xc+=1
elif self.movingdir=="up":
self.yc-=1
else:
self.yc+=1
self.moveleft=tilesize
if not self.checktile():
self.moveleft = 0
self.xc,self.yc=oldc
elif self.player.yc == self.yc:
ttiles = [self.level[self.yc][n] for n in range(self.player.xc,self.xc,int((self.xc-self.player.xc)/abs(self.xc-self.player.xc)))]
if 0 not in ttiles:
if self.xc>self.player.xc:
self.movingdir="left"
self.xc-=1
else:
self.movingdir="right"
self.xc+=1
self.moveleft=tilesize
if not self.checktile():
self.moveleft = 0
self.xc,self.yc=oldc
else:
self.movingdir=random.choice(["left","right","up","down"])
if self.movingdir=="left":
self.xc-=1
elif self.movingdir=="right":
self.xc+=1
elif self.movingdir=="up":
self.yc-=1
else:
self.yc+=1
self.moveleft=tilesize
if not self.checktile():
self.moveleft = 0
self.xc,self.yc=oldc
else:
self.movingdir=random.choice(["left","right","up","down"])
if self.movingdir=="left":
self.xc-=1
elif self.movingdir=="right":
self.xc+=1
elif self.movingdir=="up":
self.yc-=1
else:
self.yc+=1
self.moveleft=tilesize
if not self.checktile():
self.moveleft = 0
self.xc,self.yc=oldc
def main():
games.screen.add(Start(screen_width//tilesize-1,0,-3))
games.screen.mainloop()
main()
``` |
{
"source": "JPIvan/optimisation",
"score": 2
} |
#### File: optimisation/tests/test_line_search.py
```python
import numpy as np
from pytest import approx, fixture, raises, mark
import context # noqa
from src.line_search import LineSearch
from src.least_squares import least_squares
from src.wrappers import ObjectiveFunctionWrapper
@fixture
def quadratic_objective_1d():
objective = ObjectiveFunctionWrapper(
func=lambda x: (x - 4)**2,
jac=lambda x: np.array(2*(x - 4), ndmin=2),
)
minimum = 4
start_points = [3, 5]
search_directions = [-objective.jac(x) for x in start_points]
return objective, minimum, start_points, search_directions
@fixture
def division_by_zero_objective_1d():
objective = ObjectiveFunctionWrapper(
func=lambda x: 1/x,
jac=lambda x: np.array(-1/(x**2), ndmin=2),
)
invalid_start = 0 # division by zero
token_search_direction = 1 # dummy value - no real choice at start point
return objective, invalid_start, token_search_direction
@fixture
def defined_only_on_part_of_domain_objective_1d():
objective = ObjectiveFunctionWrapper(
func=lambda x: x**2 if abs(x) < 1 else None,
jac=lambda x: 2*x if abs(x) < 1 else None
)
invalid_start = 2 # invalid start
token_search_direction = np.array(-4, ndmin=2) # okay search direction
return objective, invalid_start, token_search_direction
@fixture
def set_of_quadratic_objectives_nd():
objectives, start_points, search_directions, true_mins = [], [], [], []
rng = np.random.default_rng(seed=8008135)
for n in range(2, 11):
A = rng.uniform(low=-1, high=1, size=(n, n))
b = rng.uniform(low=-1, high=1, size=(n, 1))
LS = least_squares(A, b)
def _deriv(x, A=A, b=b): # analytical derivative
return 2*A.T@A@x - 2*A.T@b
objectives.append(ObjectiveFunctionWrapper(
func=least_squares(A, b),
jac=_deriv,
))
start_points.append(rng.uniform(low=-1, high=1, size=(n, 1)))
search_directions.append(-objectives[-1].jac(start_points[-1]))
# derivative at last start point
true_mins.append(LS.solve_minimum()['x*'])
return objectives, start_points, search_directions, true_mins
class TestLineSearch:
@mark.parametrize("method,assertion", [
("goldensection", "minimum_found"),
("backtracking", "function_decreased"),
])
def test_linesearch_correct_1d(
self, method, assertion, quadratic_objective_1d):
""" Check if one dimensional problems which are well specified are
solved correctly.
"""
objective, minimum, start, searchdir = quadratic_objective_1d
linesearch = LineSearch(objective) # minimum at x = 4
linesearch_method = getattr(linesearch, method)
for x0, dx in zip(start, searchdir):
solution = linesearch_method(
x=x0,
dx=dx,
)
if assertion == "minimum_found":
assert solution.x == approx(minimum)
elif assertion == "function_decreased":
assert np.linalg.norm(solution.x-minimum) < 1
else:
raise ValueError("Bad assertion.")
@mark.parametrize("method", ["goldensection", "backtracking"])
def test_linesearch_bad_search_direction(
self, method, quadratic_objective_1d
):
""" Check that search fail explicitly when a bad search direction is
given and a minimum cannot be bracketed.
"""
objective, minimum, start, searchdir = quadratic_objective_1d
linesearch = LineSearch(objective)
linesearch_method = getattr(linesearch, method)
for x0, dx in zip(start, searchdir):
with raises(ValueError):
linesearch_method(x=x0, dx=-dx)
# give bad search direction; negative of the direction
# defined in the fixture
@mark.parametrize("method", ["goldensection", "backtracking"])
def test_linesearch_division_by_zero_start(
self, method, division_by_zero_objective_1d,
):
""" Check behaviour when a start point that causes division by zero
is given.
Search should not fail silently, an explicit error is expected.
"""
objective, invalid_start, searchdir = division_by_zero_objective_1d
linesearch = LineSearch(objective)
linesearch_method = getattr(linesearch, method)
with raises(ZeroDivisionError):
linesearch_method(x=invalid_start, dx=searchdir)
@mark.parametrize("method", ["goldensection", "backtracking"])
def test_linesearch_undefined_domain_start(
self, method, defined_only_on_part_of_domain_objective_1d,
):
""" Check behaviour when an undefined start point is given.
Search should not fail silently, an explicit error is expected.
"""
objective, invalid_start, searchdir = \
defined_only_on_part_of_domain_objective_1d
linesearch = LineSearch(objective)
linesearch_method = getattr(linesearch, method)
with raises((ValueError, TypeError, AttributeError)):
linesearch_method(x=invalid_start, dx=searchdir)
@mark.parametrize("method", ["goldensection", "backtracking"])
def test_linesearch_correct_nd(
self, method, set_of_quadratic_objectives_nd,
):
""" Check if n-dimensional problems which are well specified are solved
correctly.
"""
objectives, start_points, search_directions, true_mins = \
set_of_quadratic_objectives_nd
for i, objective in enumerate(objectives):
linesearch = LineSearch(objectives[i])
linesearch_method = getattr(linesearch, method)
solution = linesearch_method(
x=start_points[i],
dx=search_directions[i],
)
norm_start = np.linalg.norm(start_points[i] - true_mins[i])
norm_after_linesearch = np.linalg.norm(solution.x - true_mins[i])
assert norm_start > norm_after_linesearch
``` |
{
"source": "jpivarski/awkward-1.0",
"score": 2
} |
#### File: _connect/_numba/arrayview.py
```python
import operator
import numba
import numba.core.typing
import numba.core.typing.ctypes_utils
import awkward as ak
np = ak.nplike.NumpyMetadata.instance()
########## for code that's built up from strings
def code_to_function(code, function_name, externals=None, debug=False):
if debug:
print("################### " + function_name) # noqa: T201
print(code) # noqa: T201
namespace = {} if externals is None else dict(externals)
exec(code, namespace)
return namespace[function_name]
########## Lookup
class Lookup:
def __init__(self, layout):
positions = []
sharedptrs = []
arrays = []
tolookup(layout, positions, sharedptrs, arrays)
assert len(positions) == len(sharedptrs)
def find(x):
for i, array in enumerate(arrays):
if x is array:
return i
assert isinstance(x, int)
return x
self.original_positions = positions
self.positions = [find(x) for x in positions]
self.sharedptrs_hold = sharedptrs
self.arrays = arrays
def arrayptr(x):
if isinstance(x, int):
return x
else:
return x.ctypes.data
def sharedptr(x):
if x is None:
return -1
elif x == 0:
return 0
else:
return x.ptr()
self.nplike = ak.nplike.of(layout)
self.arrayptrs = self.nplike.array(
[arrayptr(x) for x in positions], dtype=np.intp
)
self.sharedptrs = self.nplike.array(
[sharedptr(x) for x in sharedptrs], dtype=np.intp
)
def _view_as_array(self):
return self.nplike.vstack(
[self.nplike.arange(len(self.arrayptrs)), self.arrayptrs, self.sharedptrs]
).T
def tolookup(layout, positions, sharedptrs, arrays):
if isinstance(layout, ak.layout.NumpyArray):
return ak._connect._numba.layout.NumpyArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.NumpyForm):
return ak._connect._numba.layout.NumpyArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.layout.RegularArray):
return ak._connect._numba.layout.RegularArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.RegularForm):
return ak._connect._numba.layout.RegularArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(
layout,
(
ak.layout.ListArray32,
ak.layout.ListArrayU32,
ak.layout.ListArray64,
ak.layout.ListOffsetArray32,
ak.layout.ListOffsetArrayU32,
ak.layout.ListOffsetArray64,
),
):
return ak._connect._numba.layout.ListArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, (ak.forms.ListForm, ak.forms.ListOffsetForm)):
return ak._connect._numba.layout.ListArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(
layout,
(
ak.layout.IndexedArray32,
ak.layout.IndexedArrayU32,
ak.layout.IndexedArray64,
),
):
return ak._connect._numba.layout.IndexedArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.IndexedForm):
return ak._connect._numba.layout.IndexedArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(
layout,
(ak.layout.IndexedOptionArray32, ak.layout.IndexedOptionArray64),
):
return ak._connect._numba.layout.IndexedOptionArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.IndexedOptionForm):
return ak._connect._numba.layout.IndexedOptionArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.layout.ByteMaskedArray):
return ak._connect._numba.layout.ByteMaskedArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.ByteMaskedForm):
return ak._connect._numba.layout.ByteMaskedArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.layout.BitMaskedArray):
return ak._connect._numba.layout.BitMaskedArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.BitMaskedForm):
return ak._connect._numba.layout.BitMaskedArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.layout.UnmaskedArray):
return ak._connect._numba.layout.UnmaskedArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.UnmaskedForm):
return ak._connect._numba.layout.UnmaskedArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.layout.RecordArray):
return ak._connect._numba.layout.RecordArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.RecordForm):
return ak._connect._numba.layout.RecordArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.layout.Record):
return ak._connect._numba.layout.RecordType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(
layout,
(
ak.layout.UnionArray8_32,
ak.layout.UnionArray8_U32,
ak.layout.UnionArray8_64,
),
):
return ak._connect._numba.layout.UnionArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.UnionForm):
return ak._connect._numba.layout.UnionArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.layout.VirtualArray):
return ak._connect._numba.layout.VirtualArrayType.tolookup(
layout, positions, sharedptrs, arrays
)
elif isinstance(layout, ak.forms.VirtualForm):
return ak._connect._numba.layout.VirtualArrayType.form_tolookup(
layout, positions, sharedptrs, arrays
)
else:
raise AssertionError(
f"unrecognized Content or Form type: {type(layout)}"
+ ak._util.exception_suffix(__file__)
)
def tonumbatype(form):
if isinstance(form, ak.forms.NumpyForm):
return ak._connect._numba.layout.NumpyArrayType.from_form(form)
elif isinstance(form, ak.forms.RegularForm):
return ak._connect._numba.layout.RegularArrayType.from_form(form)
elif isinstance(form, (ak.forms.ListForm, ak.forms.ListOffsetForm)):
return ak._connect._numba.layout.ListArrayType.from_form(form)
elif isinstance(form, ak.forms.IndexedForm):
return ak._connect._numba.layout.IndexedArrayType.from_form(form)
elif isinstance(form, ak.forms.IndexedOptionForm):
return ak._connect._numba.layout.IndexedOptionArrayType.from_form(form)
elif isinstance(form, ak.forms.ByteMaskedForm):
return ak._connect._numba.layout.ByteMaskedArrayType.from_form(form)
elif isinstance(form, ak.forms.BitMaskedForm):
return ak._connect._numba.layout.BitMaskedArrayType.from_form(form)
elif isinstance(form, ak.forms.UnmaskedForm):
return ak._connect._numba.layout.UnmaskedArrayType.from_form(form)
elif isinstance(form, ak.forms.RecordForm):
return ak._connect._numba.layout.RecordArrayType.from_form(form)
elif isinstance(form, ak.forms.UnionForm):
return ak._connect._numba.layout.UnionArrayType.from_form(form)
elif isinstance(form, ak.forms.VirtualForm):
return ak._connect._numba.layout.VirtualArrayType.from_form(form)
else:
raise AssertionError(
f"unrecognized Form type: {type(form)}"
+ ak._util.exception_suffix(__file__)
)
@numba.extending.typeof_impl.register(Lookup)
def typeof_Lookup(obj, c):
return LookupType()
class LookupType(numba.types.Type):
arraytype = numba.types.Array(numba.intp, 1, "C")
def __init__(self):
super().__init__(name="ak.LookupType()")
@numba.extending.register_model(LookupType)
class LookupModel(numba.core.datamodel.models.StructModel):
def __init__(self, dmm, fe_type):
members = [("arrayptrs", fe_type.arraytype), ("sharedptrs", fe_type.arraytype)]
super().__init__(dmm, fe_type, members)
@numba.extending.unbox(LookupType)
def unbox_Lookup(lookuptype, lookupobj, c):
arrayptrs_obj = c.pyapi.object_getattr_string(lookupobj, "arrayptrs")
sharedptrs_obj = c.pyapi.object_getattr_string(lookupobj, "sharedptrs")
proxyout = c.context.make_helper(c.builder, lookuptype)
proxyout.arrayptrs = c.pyapi.to_native_value(
lookuptype.arraytype, arrayptrs_obj
).value
proxyout.sharedptrs = c.pyapi.to_native_value(
lookuptype.arraytype, sharedptrs_obj
).value
c.pyapi.decref(arrayptrs_obj)
c.pyapi.decref(sharedptrs_obj)
is_error = numba.core.cgutils.is_not_null(c.builder, c.pyapi.err_occurred())
return numba.extending.NativeValue(proxyout._getvalue(), is_error)
########## ArrayView
class ArrayView:
@classmethod
def fromarray(cls, array):
behavior = ak._util.behaviorof(array)
layout = ak.operations.convert.to_layout(
array,
allow_record=False,
allow_other=False,
numpytype=(np.number, bool, np.bool_),
)
while isinstance(layout, ak.layout.VirtualArray) and isinstance(
layout.generator, ak.layout.SliceGenerator
):
layout = layout.array
layout = ak.operations.convert.regularize_numpyarray(
layout, allow_empty=False, highlevel=False
)
if isinstance(layout, ak.partition.PartitionedArray):
numba_type = None
for part in layout.partitions:
if numba_type is None:
numba_type = ak._connect._numba.layout.typeof(part)
elif numba_type != ak._connect._numba.layout.typeof(part):
raise ValueError(
"partitioned arrays can only be used in Numba if all "
"partitions have the same numba_type"
+ ak._util.exception_suffix(__file__)
)
return PartitionedView(
ak._connect._numba.layout.typeof(part),
behavior,
[Lookup(x) for x in layout.partitions],
ak.nplike.of(layout).asarray(layout.stops, dtype=np.intp),
0,
len(layout),
(),
)
else:
return ArrayView(
ak._connect._numba.layout.typeof(layout),
behavior,
Lookup(layout),
0,
0,
len(layout),
(),
)
def __init__(self, type, behavior, lookup, pos, start, stop, fields):
self.type = type
self.behavior = behavior
self.lookup = lookup
self.pos = pos
self.start = start
self.stop = stop
self.fields = fields
def toarray(self):
layout = self.type.tolayout(self.lookup, self.pos, self.fields)
sliced = layout.getitem_range_nowrap(self.start, self.stop)
return ak._util.wrap(sliced, self.behavior)
@numba.extending.typeof_impl.register(ArrayView)
def typeof_ArrayView(obj, c):
return ArrayViewType(obj.type, obj.behavior, obj.fields)
def wrap(type, viewtype, fields):
if fields is None:
return ArrayViewType(type, viewtype.behavior, viewtype.fields)
else:
return ArrayViewType(type, viewtype.behavior, fields)
class ArrayViewType(numba.types.IterableType, numba.types.Sized):
def __init__(self, type, behavior, fields):
super().__init__(
name="ak.ArrayView({}, {}, {})".format(
type.name,
ak._connect._numba.repr_behavior(behavior),
repr(fields),
)
)
self.type = type
self.behavior = behavior
self.fields = fields
@property
def iterator_type(self):
return IteratorType(self)
@numba.extending.register_model(ArrayViewType)
class ArrayViewModel(numba.core.datamodel.models.StructModel):
def __init__(self, dmm, fe_type):
members = [
("pos", numba.intp),
("start", numba.intp),
("stop", numba.intp),
("arrayptrs", numba.types.CPointer(numba.intp)),
("sharedptrs", numba.types.CPointer(numba.intp)),
("pylookup", numba.types.pyobject),
]
super().__init__(dmm, fe_type, members)
@numba.core.imputils.lower_constant(ArrayViewType)
def lower_const_Array(context, builder, viewtype, array):
return lower_const_view(context, builder, viewtype, array._numbaview)
def lower_const_view(context, builder, viewtype, view):
lookup = view.lookup
arrayptrs = lookup.arrayptrs
sharedptrs = lookup.sharedptrs
pos = view.pos
start = view.start
stop = view.stop
arrayptrs_val = context.make_constant_array(
builder, numba.typeof(arrayptrs), arrayptrs
)
sharedptrs_val = context.make_constant_array(
builder, numba.typeof(sharedptrs), sharedptrs
)
proxyout = context.make_helper(builder, viewtype)
proxyout.pos = context.get_constant(numba.intp, pos)
proxyout.start = context.get_constant(numba.intp, start)
proxyout.stop = context.get_constant(numba.intp, stop)
proxyout.arrayptrs = context.make_helper(
builder, numba.typeof(arrayptrs), arrayptrs_val
).data
proxyout.sharedptrs = context.make_helper(
builder, numba.typeof(sharedptrs), sharedptrs_val
).data
proxyout.pylookup = context.add_dynamic_addr(
builder, id(lookup), info=str(type(lookup))
)
return proxyout._getvalue()
@numba.extending.unbox(ArrayViewType)
def unbox_Array(viewtype, arrayobj, c):
view_obj = c.pyapi.object_getattr_string(arrayobj, "_numbaview")
out = unbox_ArrayView(viewtype, view_obj, c)
c.pyapi.decref(view_obj)
return out
def unbox_ArrayView(viewtype, view_obj, c):
lookup_obj = c.pyapi.object_getattr_string(view_obj, "lookup")
pos_obj = c.pyapi.object_getattr_string(view_obj, "pos")
start_obj = c.pyapi.object_getattr_string(view_obj, "start")
stop_obj = c.pyapi.object_getattr_string(view_obj, "stop")
lookup_val = c.pyapi.to_native_value(LookupType(), lookup_obj).value
lookup_proxy = c.context.make_helper(c.builder, LookupType(), lookup_val)
proxyout = c.context.make_helper(c.builder, viewtype)
proxyout.pos = c.pyapi.number_as_ssize_t(pos_obj)
proxyout.start = c.pyapi.number_as_ssize_t(start_obj)
proxyout.stop = c.pyapi.number_as_ssize_t(stop_obj)
proxyout.arrayptrs = c.context.make_helper(
c.builder, LookupType.arraytype, lookup_proxy.arrayptrs
).data
proxyout.sharedptrs = c.context.make_helper(
c.builder, LookupType.arraytype, lookup_proxy.sharedptrs
).data
proxyout.pylookup = lookup_obj
c.pyapi.decref(lookup_obj)
c.pyapi.decref(pos_obj)
c.pyapi.decref(start_obj)
c.pyapi.decref(stop_obj)
if c.context.enable_nrt:
c.context.nrt.decref(c.builder, LookupType(), lookup_val)
is_error = numba.core.cgutils.is_not_null(c.builder, c.pyapi.err_occurred())
return numba.extending.NativeValue(proxyout._getvalue(), is_error)
@numba.extending.box(ArrayViewType)
def box_Array(viewtype, viewval, c):
arrayview_obj = box_ArrayView(viewtype, viewval, c)
out = c.pyapi.call_method(arrayview_obj, "toarray", ())
c.pyapi.decref(arrayview_obj)
return out
def dict2serializable(obj):
if obj is None:
return None
else:
return tuple(obj.items())
def serializable2dict(obj):
if obj is None:
return None
else:
return dict(obj)
def box_ArrayView(viewtype, viewval, c):
serializable2dict_obj = c.pyapi.unserialize(
c.pyapi.serialize_object(serializable2dict)
)
behavior2_obj = c.pyapi.unserialize(
c.pyapi.serialize_object(dict2serializable(viewtype.behavior))
)
behavior_obj = c.pyapi.call_function_objargs(
serializable2dict_obj, (behavior2_obj,)
)
ArrayView_obj = c.pyapi.unserialize(c.pyapi.serialize_object(ArrayView))
type_obj = c.pyapi.unserialize(c.pyapi.serialize_object(viewtype.type))
fields_obj = c.pyapi.unserialize(c.pyapi.serialize_object(viewtype.fields))
proxyin = c.context.make_helper(c.builder, viewtype, viewval)
pos_obj = c.pyapi.long_from_ssize_t(proxyin.pos)
start_obj = c.pyapi.long_from_ssize_t(proxyin.start)
stop_obj = c.pyapi.long_from_ssize_t(proxyin.stop)
lookup_obj = proxyin.pylookup
out = c.pyapi.call_function_objargs(
ArrayView_obj,
(type_obj, behavior_obj, lookup_obj, pos_obj, start_obj, stop_obj, fields_obj),
)
c.pyapi.decref(serializable2dict_obj)
c.pyapi.decref(behavior2_obj)
c.pyapi.decref(behavior_obj)
c.pyapi.decref(ArrayView_obj)
c.pyapi.decref(type_obj)
c.pyapi.decref(fields_obj)
c.pyapi.decref(pos_obj)
c.pyapi.decref(start_obj)
c.pyapi.decref(stop_obj)
return out
@numba.core.typing.templates.infer_global(len)
class type_len(numba.core.typing.templates.AbstractTemplate):
def generic(self, args, kwargs):
if len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], ArrayViewType):
return numba.intp(args[0])
@numba.extending.lower_builtin(len, ArrayViewType)
def lower_len(context, builder, sig, args):
proxyin = context.make_helper(builder, sig.args[0], args[0])
return builder.sub(proxyin.stop, proxyin.start)
@numba.core.typing.templates.infer_global(operator.getitem)
class type_getitem(numba.core.typing.templates.AbstractTemplate):
def generic(self, args, kwargs):
if len(args) == 2 and len(kwargs) == 0 and isinstance(args[0], ArrayViewType):
viewtype, wheretype = args
if isinstance(wheretype, numba.types.Integer):
return viewtype.type.getitem_at_check(viewtype)(viewtype, wheretype)
elif (
isinstance(wheretype, numba.types.SliceType) and not wheretype.has_step
):
return viewtype.type.getitem_range(viewtype)(viewtype, wheretype)
elif isinstance(wheretype, numba.types.StringLiteral):
return viewtype.type.getitem_field(viewtype, wheretype.literal_value)(
viewtype, wheretype
)
else:
raise TypeError(
"only an integer, start:stop range, or a *constant* "
"field name string may be used as ak.Array "
"slices in compiled code" + ak._util.exception_suffix(__file__)
)
@numba.extending.lower_builtin(operator.getitem, ArrayViewType, numba.types.Integer)
def lower_getitem_at(context, builder, sig, args):
rettype, (viewtype, wheretype) = sig.return_type, sig.args
viewval, whereval = args
viewproxy = context.make_helper(builder, viewtype, viewval)
return viewtype.type.lower_getitem_at_check(
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
wheretype,
whereval,
True,
True,
)
@numba.extending.lower_builtin(operator.getitem, ArrayViewType, numba.types.slice2_type)
def lower_getitem_range(context, builder, sig, args):
rettype, (viewtype, wheretype) = sig.return_type, sig.args
viewval, whereval = args
viewproxy = context.make_helper(builder, viewtype, viewval)
whereproxy = context.make_helper(builder, wheretype, whereval)
return viewtype.type.lower_getitem_range(
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
whereproxy.start,
whereproxy.stop,
True,
)
@numba.extending.lower_builtin(
operator.getitem, ArrayViewType, numba.types.StringLiteral
)
def lower_getitem_field(context, builder, sig, args):
_, (viewtype, wheretype) = sig.return_type, sig.args
viewval, whereval = args
return viewtype.type.lower_getitem_field(
context, builder, viewtype, viewval, wheretype.literal_value
)
@numba.core.typing.templates.infer_getattr
class type_getattr(numba.core.typing.templates.AttributeTemplate):
key = ArrayViewType
def generic_resolve(self, viewtype, attr):
if attr == "ndim":
return numba.intp
else:
return viewtype.type.getitem_field(viewtype, attr)
@numba.extending.lower_getattr_generic(ArrayViewType)
def lower_getattr_generic(context, builder, viewtype, viewval, attr):
if attr == "ndim":
return context.get_constant(numba.intp, viewtype.type.ndim)
else:
return viewtype.type.lower_getitem_field(
context, builder, viewtype, viewval, attr
)
class IteratorType(numba.types.common.SimpleIteratorType):
def __init__(self, viewtype):
super().__init__(
f"ak.Iterator({viewtype.name})",
viewtype.type.getitem_at_check(viewtype),
)
self.viewtype = viewtype
@numba.core.typing.templates.infer
class type_getiter(numba.core.typing.templates.AbstractTemplate):
key = "getiter"
def generic(self, args, kwargs):
if len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], ArrayViewType):
return IteratorType(args[0])(args[0])
@numba.core.datamodel.registry.register_default(IteratorType)
class IteratorModel(numba.core.datamodel.models.StructModel):
def __init__(self, dmm, fe_type):
members = [
("view", fe_type.viewtype),
("length", numba.intp),
("at", numba.types.EphemeralPointer(numba.intp)),
]
super().__init__(dmm, fe_type, members)
@numba.extending.lower_builtin("getiter", ArrayViewType)
def lower_getiter(context, builder, sig, args):
rettype, (viewtype,) = sig.return_type, sig.args
(viewval,) = args
viewproxy = context.make_helper(builder, viewtype, viewval)
proxyout = context.make_helper(builder, rettype)
proxyout.view = viewval
proxyout.length = builder.sub(viewproxy.stop, viewproxy.start)
proxyout.at = numba.core.cgutils.alloca_once_value(
builder, context.get_constant(numba.intp, 0)
)
if context.enable_nrt:
context.nrt.incref(builder, viewtype, viewval)
return numba.core.imputils.impl_ret_new_ref(
context, builder, rettype, proxyout._getvalue()
)
@numba.extending.lower_builtin("iternext", IteratorType)
@numba.core.imputils.iternext_impl(numba.core.imputils.RefType.BORROWED)
def lower_iternext(context, builder, sig, args, result):
(itertype,) = sig.args
(iterval,) = args
proxyin = context.make_helper(builder, itertype, iterval)
at = builder.load(proxyin.at)
is_valid = builder.icmp_signed("<", at, proxyin.length)
result.set_valid(is_valid)
with builder.if_then(is_valid, likely=True):
result.yield_(
lower_getitem_at(
context,
builder,
itertype.yield_type(itertype.viewtype, numba.intp),
(proxyin.view, at),
)
)
nextat = numba.core.cgutils.increment_index(builder, at)
builder.store(nextat, proxyin.at)
########## RecordView
class RecordView:
@classmethod
def fromrecord(cls, record):
behavior = ak._util.behaviorof(record)
layout = ak.operations.convert.to_layout(
record,
allow_record=True,
allow_other=False,
numpytype=(np.number, bool, np.bool_),
)
assert isinstance(layout, ak.layout.Record)
arraylayout = layout.array
return RecordView(
ArrayView(
ak._connect._numba.layout.typeof(arraylayout),
behavior,
Lookup(arraylayout),
0,
0,
len(arraylayout),
(),
),
layout.at,
)
def __init__(self, arrayview, at):
self.arrayview = arrayview
self.at = at
def torecord(self):
arraylayout = self.arrayview.toarray().layout
return ak._util.wrap(
ak.layout.Record(arraylayout, self.at), self.arrayview.behavior
)
@numba.extending.typeof_impl.register(RecordView)
def typeof_RecordView(obj, c):
return RecordViewType(numba.typeof(obj.arrayview))
class RecordViewType(numba.types.Type):
def __init__(self, arrayviewtype):
super().__init__(name=f"ak.RecordViewType({arrayviewtype.name})")
self.arrayviewtype = arrayviewtype
@property
def behavior(self):
return self.arrayviewtype.behavior
@property
def fields(self):
return self.arrayviewtype.fields
def typer_field(self, key):
return self.arrayviewtype.type.getitem_field_record(self, key)
def lower_field(self, context, builder, val, key):
return self.arrayviewtype.type.lower_getitem_field_record(
context, builder, self, val, key
)
@numba.extending.register_model(RecordViewType)
class RecordViewModel(numba.core.datamodel.models.StructModel):
def __init__(self, dmm, fe_type):
members = [("arrayview", fe_type.arrayviewtype), ("at", numba.intp)]
super().__init__(dmm, fe_type, members)
@numba.core.imputils.lower_constant(RecordViewType)
def lower_const_Record(context, builder, recordviewtype, record):
arrayview_val = lower_const_view(
context, builder, recordviewtype.arrayviewtype, record._numbaview.arrayview
)
proxyout = context.make_helper(builder, recordviewtype)
proxyout.arrayview = arrayview_val
proxyout.at = context.get_constant(numba.intp, record._layout.at)
return proxyout._getvalue()
@numba.extending.unbox(RecordViewType)
def unbox_RecordView(recordviewtype, recordobj, c):
recordview_obj = c.pyapi.object_getattr_string(recordobj, "_numbaview")
arrayview_obj = c.pyapi.object_getattr_string(recordview_obj, "arrayview")
at_obj = c.pyapi.object_getattr_string(recordview_obj, "at")
arrayview_val = unbox_ArrayView(
recordviewtype.arrayviewtype, arrayview_obj, c
).value
proxyout = c.context.make_helper(c.builder, recordviewtype)
proxyout.arrayview = arrayview_val
proxyout.at = c.pyapi.number_as_ssize_t(at_obj)
c.pyapi.decref(recordview_obj)
c.pyapi.decref(at_obj)
if c.context.enable_nrt:
c.context.nrt.decref(c.builder, recordviewtype.arrayviewtype, arrayview_val)
is_error = numba.core.cgutils.is_not_null(c.builder, c.pyapi.err_occurred())
return numba.extending.NativeValue(proxyout._getvalue(), is_error)
@numba.extending.box(RecordViewType)
def box_RecordView(recordviewtype, viewval, c):
RecordView_obj = c.pyapi.unserialize(c.pyapi.serialize_object(RecordView))
proxyin = c.context.make_helper(c.builder, recordviewtype, viewval)
arrayview_obj = box_ArrayView(recordviewtype.arrayviewtype, proxyin.arrayview, c)
at_obj = c.pyapi.long_from_ssize_t(proxyin.at)
recordview_obj = c.pyapi.call_function_objargs(
RecordView_obj, (arrayview_obj, at_obj)
)
out = c.pyapi.call_method(recordview_obj, "torecord", ())
c.pyapi.decref(RecordView_obj)
c.pyapi.decref(arrayview_obj)
c.pyapi.decref(at_obj)
c.pyapi.decref(recordview_obj)
return out
@numba.core.typing.templates.infer_global(operator.getitem)
class type_getitem_record(numba.core.typing.templates.AbstractTemplate):
def generic(self, args, kwargs):
if len(args) == 2 and len(kwargs) == 0 and isinstance(args[0], RecordViewType):
recordviewtype, wheretype = args
if isinstance(wheretype, numba.types.StringLiteral):
return recordviewtype.arrayviewtype.type.getitem_field_record(
recordviewtype, wheretype.literal_value
)(recordviewtype, wheretype)
else:
raise TypeError(
"only a *constant* field name string may be used as "
"ak.Record slices in compiled code"
+ ak._util.exception_suffix(__file__)
)
@numba.extending.lower_builtin(
operator.getitem, RecordViewType, numba.types.StringLiteral
)
def lower_getitem_field_record(context, builder, sig, args):
_, (recordviewtype, wheretype) = sig.return_type, sig.args
recordviewval, whereval = args
return recordviewtype.arrayviewtype.type.lower_getitem_field_record(
context, builder, recordviewtype, recordviewval, wheretype.literal_value
)
@numba.core.typing.templates.infer_getattr
class type_getattr_record(numba.core.typing.templates.AttributeTemplate):
key = RecordViewType
def generic_resolve(self, recordviewtype, attr):
for methodname, typer, lower in ak._util.numba_methods(
recordviewtype.arrayviewtype.type, recordviewtype.arrayviewtype.behavior
):
if attr == methodname:
class type_method(numba.core.typing.templates.AbstractTemplate):
key = methodname
def generic(self, args, kwargs):
if len(kwargs) == 0:
sig = typer(recordviewtype, args)
sig = numba.core.typing.templates.Signature(
sig.return_type, sig.args, recordviewtype
)
numba.extending.lower_builtin(
methodname,
recordviewtype,
*[
x.literal_type
if isinstance(x, numba.types.Literal)
else x
for x in args
],
)(lower)
return sig
return numba.types.BoundFunction(type_method, recordviewtype)
for attrname, typer, _ in ak._util.numba_attrs(
recordviewtype.arrayviewtype.type, recordviewtype.arrayviewtype.behavior
):
if attr == attrname:
return typer(recordviewtype)
return recordviewtype.typer_field(attr)
@numba.extending.lower_getattr_generic(RecordViewType)
def lower_getattr_generic_record(context, builder, recordviewtype, recordviewval, attr):
for attrname, typer, lower in ak._util.numba_attrs(
recordviewtype.arrayviewtype.type, recordviewtype.arrayviewtype.behavior
):
if attr == attrname:
return lower(
context,
builder,
typer(recordviewtype)(recordviewtype),
(recordviewval,),
)
return recordviewtype.lower_field(context, builder, recordviewval, attr)
def register_unary_operator(unaryop):
@numba.core.typing.templates.infer_global(unaryop)
class type_binary_operator(numba.core.typing.templates.AbstractTemplate):
def generic(self, args, kwargs):
if len(args) == 1 and len(kwargs) == 0:
behavior = None
if isinstance(args[0], RecordViewType):
left = args[0].arrayviewtype.type
behavior = args[0].arrayviewtype.behavior
for typer, lower in ak._util.numba_unaryops(
unaryop, left, behavior
):
numba.extending.lower_builtin(unaryop, *args)(lower)
return typer(unaryop, args[0])
for unaryop in (
abs,
operator.inv,
operator.invert,
operator.neg,
operator.not_,
operator.pos,
operator.truth,
):
register_unary_operator(unaryop)
def register_binary_operator(binop):
@numba.core.typing.templates.infer_global(binop)
class type_binary_operator(numba.core.typing.templates.AbstractTemplate):
def generic(self, args, kwargs):
if len(args) == 2 and len(kwargs) == 0:
left, right, behavior = None, None, None
if isinstance(args[0], RecordViewType):
left = args[0].arrayviewtype.type
behavior = args[0].arrayviewtype.behavior
if isinstance(args[1], RecordViewType):
right = args[1].arrayviewtype.type
if behavior is None:
behavior = args[1].arrayviewtype.behavior
if left is not None or right is not None:
for typer, lower in ak._util.numba_binops(
binop, left, right, behavior
):
numba.extending.lower_builtin(binop, *args)(lower)
return typer(binop, args[0], args[1])
for binop in (
operator.add,
operator.and_,
operator.contains,
operator.eq,
operator.floordiv,
operator.ge,
operator.gt,
operator.le,
operator.lshift,
operator.lt,
operator.mod,
operator.mul,
operator.ne,
operator.or_,
operator.pow,
operator.rshift,
operator.sub,
operator.truediv,
operator.xor,
) + (() if not hasattr(operator, "matmul") else (operator.matmul,)):
register_binary_operator(binop)
########## __contains__
@numba.extending.overload(operator.contains)
def overload_contains(obj, element):
if isinstance(obj, (ArrayViewType, RecordViewType)) and (
(element == numba.types.none)
or (isinstance(element, (numba.types.Number, numba.types.Boolean)))
or (
isinstance(element, numba.types.Optional)
and isinstance(element.type, (numba.types.Number, numba.types.Boolean))
)
):
statements = []
def add_statement(indent, name, arraytype, is_array):
if is_array:
statements.append("for x in " + name + ":")
name = "x"
indent = indent + " "
if isinstance(arraytype, ak._connect._numba.layout.RecordArrayType):
if arraytype.is_tuple:
for fi, ft in enumerate(arraytype.contenttypes):
add_statement(indent, name + "[" + repr(fi) + "]", ft, False)
else:
for fn, ft in zip(arraytype.recordlookup, arraytype.contenttypes):
add_statement(indent, name + "[" + repr(fn) + "]", ft, False)
elif arraytype.ndim == 1 and not arraytype.is_recordtype:
if arraytype.is_optiontype:
statements.append(
indent + "if (element is None and {0} is None) or "
"({0} is not None and element == {0}): return True".format(name)
)
else:
statements.append(indent + f"if element == {name}: return True")
else:
if arraytype.is_optiontype:
statements.append(
indent + "if (element is None and {0} is None) or "
"({0} is not None and element in {0}): return True".format(name)
)
else:
statements.append(indent + f"if element in {name}: return True")
if isinstance(obj, ArrayViewType):
add_statement("", "obj", obj.type, True)
else:
add_statement("", "obj", obj.arrayviewtype.type, False)
return code_to_function(
"""
def contains_impl(obj, element):
{}
return False""".format(
"\n ".join(statements)
),
"contains_impl",
)
########## np.array and np.asarray
array_supported = (
numba.types.boolean,
numba.types.int8,
numba.types.int16,
numba.types.int32,
numba.types.int64,
numba.types.uint8,
numba.types.uint16,
numba.types.uint32,
numba.types.uint64,
numba.types.float32,
numba.types.float64,
)
@numba.extending.overload(ak.nplike.numpy.array)
def overload_np_array(array, dtype=None):
if isinstance(array, ArrayViewType):
ndim = array.type.ndim
inner_dtype = array.type.inner_dtype
if ndim is not None and inner_dtype in array_supported:
declare_shape = []
compute_shape = []
specify_shape = ["len(array)"]
ensure_shape = []
array_name = "array"
for i in range(ndim - 1):
declare_shape.append(f"shape{i} = -1")
compute_shape.append(
"{}for x{} in {}:".format(" " * i, i, array_name)
)
compute_shape.append("{} if shape{} == -1:".format(" " * i, i))
compute_shape.append(
"{0} shape{1} = len(x{1})".format(" " * i, i)
)
compute_shape.append(
"{0} elif shape{1} != len(x{1}):".format(" " * i, i)
)
compute_shape.append(
"{} raise ValueError('cannot convert to NumPy because "
"subarray lengths are not regular')".format(" " * i)
)
specify_shape.append(f"shape{i}")
ensure_shape.append("if shape{0} == -1: shape{0} = 0".format(i))
array_name = f"x{i}"
fill_array = []
index = []
array_name = "array"
for i in range(ndim):
fill_array.append(
"{0}for i{1}, x{1} in enumerate({2}):".format(
" " * i, i, array_name
)
)
index.append(f"i{i}")
array_name = f"x{i}"
fill_array.append(
"{}out[{}] = x{}".format(" " * ndim, "][".join(index), ndim - 1)
)
return code_to_function(
"""
def array_impl(array, dtype=None):
{}
{}
{}
out = numpy.zeros(({}), {})
{}
return out
""".format(
"\n ".join(declare_shape),
"\n ".join(compute_shape),
"\n ".join(ensure_shape),
", ".join(specify_shape),
f"numpy.{inner_dtype}" if dtype is None else "dtype",
"\n ".join(fill_array),
),
"array_impl",
{"numpy": ak.nplike.numpy},
)
@numba.extending.type_callable(ak.nplike.numpy.asarray)
def type_asarray(context):
def typer(arrayview):
if (
isinstance(arrayview, ArrayViewType)
and isinstance(arrayview.type, ak._connect._numba.layout.NumpyArrayType)
and arrayview.type.ndim == 1
and arrayview.type.inner_dtype in array_supported
):
return numba.types.Array(arrayview.type.inner_dtype, 1, "C")
return typer
@numba.extending.lower_builtin(ak.nplike.numpy.asarray, ArrayViewType)
def lower_asarray(context, builder, sig, args):
rettype, (viewtype,) = sig.return_type, sig.args
(viewval,) = args
viewproxy = context.make_helper(builder, viewtype, viewval)
assert isinstance(viewtype.type, ak._connect._numba.layout.NumpyArrayType)
whichpos = ak._connect._numba.layout.posat(
context, builder, viewproxy.pos, viewtype.type.ARRAY
)
arrayptr = ak._connect._numba.layout.getat(
context, builder, viewproxy.arrayptrs, whichpos
)
bitwidth = ak._connect._numba.layout.type_bitwidth(rettype.dtype)
itemsize = context.get_constant(numba.intp, bitwidth // 8)
data = numba.core.cgutils.pointer_add(
builder,
arrayptr,
builder.mul(viewproxy.start, itemsize),
context.get_value_type(numba.types.CPointer(rettype.dtype)),
)
shape = context.make_tuple(
builder,
numba.types.UniTuple(numba.types.intp, 1),
(builder.sub(viewproxy.stop, viewproxy.start),),
)
strides = context.make_tuple(
builder,
numba.types.UniTuple(numba.types.intp, 1),
(itemsize,),
)
out = numba.np.arrayobj.make_array(rettype)(context, builder)
numba.np.arrayobj.populate_array(
out,
data=data,
shape=shape,
strides=strides,
itemsize=itemsize,
meminfo=None,
parent=None,
)
return out._getvalue()
########## PartitionedView
class PartitionedView:
def __init__(self, type, behavior, lookups, stops, start, stop, fields):
self.type = type
self.behavior = behavior
self.lookups = lookups
self.stops = stops
self.start = start
self.stop = stop
self.fields = fields
def toarray(self):
output = []
partition_start = 0
for partitionid, lookup in enumerate(self.lookups):
partition_stop = self.stops[partitionid]
if partition_start <= self.start and self.stop <= partition_stop:
layout = self.type.tolayout(lookup, 0, self.fields)
output.append(
layout[self.start - partition_start : self.stop - partition_start]
)
break
elif partition_start <= self.start < partition_stop:
layout = self.type.tolayout(lookup, 0, self.fields)
output.append(
layout[
self.start - partition_start : partition_stop - partition_start
]
)
elif partition_start < self.stop <= partition_stop:
layout = self.type.tolayout(lookup, 0, self.fields)
output.append(layout[0 : self.stop - partition_start])
break
elif self.start < partition_start and partition_stop < self.stop:
layout = self.type.tolayout(lookup, 0, self.fields)
output.append(layout[0 : partition_stop - partition_start])
partition_start = partition_stop
return ak._util.wrap(
ak.partition.IrregularlyPartitionedArray(output), self.behavior
)
@numba.extending.typeof_impl.register(PartitionedView)
def typeof_PartitionedView(obj, c):
return PartitionedViewType(obj.type, obj.behavior, obj.fields)
class PartitionedViewType(numba.types.IterableType, numba.types.Sized):
stopstype = numba.types.Array(numba.intp, 1, "C")
def __init__(self, type, behavior, fields):
super().__init__(
name="ak.PartitionedView({}, {}, {})".format(
type.name,
ak._connect._numba.repr_behavior(behavior),
repr(fields),
)
)
self.type = type
self.behavior = behavior
self.fields = fields
@property
def iterator_type(self):
return PartitionedIteratorType(self)
def toArrayViewType(self):
return ArrayViewType(self.type, self.behavior, self.fields)
def getitem_field(self, key):
return PartitionedViewType(self.type, self.behavior, self.fields + (key,))
def lower_get_localstart(self, context, builder, stops, partitionid):
out = numba.core.cgutils.alloca_once_value(
builder, context.get_constant(numba.intp, 0)
)
with builder.if_then(
builder.icmp_signed("!=", partitionid, context.get_constant(numba.intp, 0))
):
stopsproxy = context.make_helper(builder, self.stopstype, stops)
newval = numba.np.arrayobj._getitem_array_single_int(
context,
builder,
numba.intp,
self.stopstype,
stopsproxy,
builder.sub(partitionid, context.get_constant(numba.intp, 1)),
)
builder.store(newval, out)
return builder.load(out)
def lower_get_localstop(self, context, builder, stops, partitionid):
stopsproxy = context.make_helper(builder, self.stopstype, stops)
return numba.np.arrayobj._getitem_array_single_int(
context, builder, numba.intp, self.stopstype, stopsproxy, partitionid
)
def lower_get_partitionid(
self, context, builder, pyapi, pylookups, partitionid, viewlength
):
lookup_obj = pyapi.list_getitem(pylookups, partitionid) # borrowed
lookup = pyapi.to_native_value(LookupType(), lookup_obj).value
lookupproxy = context.make_helper(builder, LookupType(), value=lookup)
viewproxy = context.make_helper(builder, self.toArrayViewType())
viewproxy.pos = context.get_constant(numba.intp, 0)
viewproxy.start = context.get_constant(numba.intp, 0)
viewproxy.stop = viewlength
viewproxy.arrayptrs = context.make_helper(
builder, LookupType.arraytype, lookupproxy.arrayptrs
).data
viewproxy.sharedptrs = context.make_helper(
builder, LookupType.arraytype, lookupproxy.sharedptrs
).data
viewproxy.pylookup = lookup_obj
return viewproxy._getvalue()
@numba.extending.register_model(PartitionedViewType)
class PartitionedViewModel(numba.core.datamodel.models.StructModel):
def __init__(self, dmm, fe_type):
members = [
("pylookups", numba.types.pyobject),
("partitionid", numba.types.CPointer(numba.intp)),
("stops", fe_type.stopstype),
("view", numba.types.CPointer(fe_type.toArrayViewType())),
("start", numba.intp),
("stop", numba.intp),
]
super().__init__(dmm, fe_type, members)
@numba.extending.unbox(PartitionedViewType)
def unbox_PartitionedArray(partviewtype, arrayobj, c):
partview_obj = c.pyapi.object_getattr_string(arrayobj, "_numbaview")
out = unbox_PartitionedView(partviewtype, partview_obj, c)
c.pyapi.decref(partview_obj)
return out
def unbox_PartitionedView(partviewtype, partview_obj, c):
lookups_obj = c.pyapi.object_getattr_string(partview_obj, "lookups")
stops_obj = c.pyapi.object_getattr_string(partview_obj, "stops")
start_obj = c.pyapi.object_getattr_string(partview_obj, "start")
stop_obj = c.pyapi.object_getattr_string(partview_obj, "stop")
proxyout = c.context.make_helper(c.builder, partviewtype)
proxyout.pylookups = lookups_obj
partitionid = c.context.get_constant(numba.intp, 0)
proxyout.partitionid = numba.core.cgutils.alloca_once_value(c.builder, partitionid)
proxyout.stops = c.pyapi.to_native_value(partviewtype.stopstype, stops_obj).value
viewlength = partviewtype.lower_get_localstop(
c.context, c.builder, proxyout.stops, partitionid
)
proxyout.view = numba.core.cgutils.alloca_once_value(
c.builder,
partviewtype.lower_get_partitionid(
c.context, c.builder, c.pyapi, proxyout.pylookups, partitionid, viewlength
),
)
proxyout.start = c.pyapi.number_as_ssize_t(start_obj)
proxyout.stop = c.pyapi.number_as_ssize_t(stop_obj)
c.pyapi.decref(lookups_obj)
c.pyapi.decref(stops_obj)
c.pyapi.decref(start_obj)
c.pyapi.decref(stop_obj)
is_error = numba.core.cgutils.is_not_null(c.builder, c.pyapi.err_occurred())
return numba.extending.NativeValue(proxyout._getvalue(), is_error)
@numba.extending.box(PartitionedViewType)
def box_PartitionedArray(partviewtype, partviewval, c):
arrayview_obj = box_PartitionedView(partviewtype, partviewval, c)
out = c.pyapi.call_method(arrayview_obj, "toarray", ())
c.pyapi.decref(arrayview_obj)
return out
def box_PartitionedView(partviewtype, partviewval, c):
serializable2dict_obj = c.pyapi.unserialize(
c.pyapi.serialize_object(serializable2dict)
)
behavior2_obj = c.pyapi.unserialize(
c.pyapi.serialize_object(dict2serializable(partviewtype.behavior))
)
behavior_obj = c.pyapi.call_function_objargs(
serializable2dict_obj, (behavior2_obj,)
)
PartitionedView_obj = c.pyapi.unserialize(c.pyapi.serialize_object(PartitionedView))
type_obj = c.pyapi.unserialize(c.pyapi.serialize_object(partviewtype.type))
fields_obj = c.pyapi.unserialize(c.pyapi.serialize_object(partviewtype.fields))
proxyin = c.context.make_helper(c.builder, partviewtype, partviewval)
lookups_obj = proxyin.pylookups
stops_obj = c.pyapi.from_native_value(
partviewtype.stopstype, proxyin.stops, c.env_manager
)
start_obj = c.pyapi.long_from_ssize_t(proxyin.start)
stop_obj = c.pyapi.long_from_ssize_t(proxyin.stop)
out = c.pyapi.call_function_objargs(
PartitionedView_obj,
(
type_obj,
behavior_obj,
lookups_obj,
stops_obj,
start_obj,
stop_obj,
fields_obj,
),
)
c.pyapi.decref(serializable2dict_obj)
c.pyapi.decref(behavior2_obj)
c.pyapi.decref(behavior_obj)
c.pyapi.decref(PartitionedView_obj)
c.pyapi.decref(type_obj)
c.pyapi.decref(fields_obj)
c.pyapi.decref(stops_obj)
c.pyapi.decref(start_obj)
c.pyapi.decref(stop_obj)
return out
@numba.core.typing.templates.infer_global(operator.getitem)
class type_getitem_partitioned(numba.core.typing.templates.AbstractTemplate):
def generic(self, args, kwargs):
if (
len(args) == 2
and len(kwargs) == 0
and isinstance(args[0], PartitionedViewType)
):
partviewtype, wheretype = args
if isinstance(wheretype, numba.types.Integer):
arrayviewtype = partviewtype.toArrayViewType()
rettype = partviewtype.type.getitem_at_check(arrayviewtype)
return rettype(partviewtype, wheretype)
elif (
isinstance(wheretype, numba.types.SliceType) and not wheretype.has_step
):
return partviewtype(partviewtype, wheretype)
elif isinstance(wheretype, numba.types.StringLiteral):
rettype = partviewtype.getitem_field(wheretype.literal_value)
return rettype(partviewtype, wheretype)
else:
raise TypeError(
"only an integer, start:stop range, or a *constant* "
"field name string may be used as ak.Array "
"slices in compiled code" + ak._util.exception_suffix(__file__)
)
@numba.extending.lower_builtin(
operator.getitem, PartitionedViewType, numba.types.Integer
)
def lower_getitem_at_partitioned(context, builder, sig, args):
rettype, (partviewtype, wheretype) = sig.return_type, sig.args
partviewval, whereval = args
partviewproxy = context.make_helper(builder, partviewtype, partviewval)
length = builder.sub(partviewproxy.stop, partviewproxy.start)
regular_atval = numba.core.cgutils.alloca_once_value(builder, whereval)
with builder.if_then(
builder.icmp_signed("<", whereval, context.get_constant(numba.intp, 0))
):
builder.store(builder.add(whereval, length), regular_atval)
atval = builder.load(regular_atval)
with builder.if_then(
builder.or_(
builder.icmp_signed("<", atval, context.get_constant(numba.intp, 0)),
builder.icmp_signed(">=", atval, length),
)
):
context.call_conv.return_user_exc(
builder, ValueError, ("slice index out of bounds",)
)
localstart = partviewtype.lower_get_localstart(
context, builder, partviewproxy.stops, builder.load(partviewproxy.partitionid)
)
localstop = partviewtype.lower_get_localstop(
context, builder, partviewproxy.stops, builder.load(partviewproxy.partitionid)
)
with builder.if_then(
builder.not_(
builder.and_(
builder.icmp_signed("<=", localstart, atval),
builder.icmp_signed(">", atval, localstop),
)
),
likely=False,
):
searchsorted_sig = numba.intp(partviewtype.stopstype, wheretype)
searchsorted_args = (partviewproxy.stops, atval)
def searchsorted_impl(stops, where):
return ak.nplike.numpy.searchsorted(stops, where, side="right")
partitionid_val = context.compile_internal(
builder, searchsorted_impl, searchsorted_sig, searchsorted_args
)
builder.store(partitionid_val, partviewproxy.partitionid)
pyapi = context.get_python_api(builder)
gil = pyapi.gil_ensure()
builder.store(
partviewtype.lower_get_partitionid(
context,
builder,
pyapi,
partviewproxy.pylookups,
builder.load(partviewproxy.partitionid),
builder.sub(localstop, localstart),
),
partviewproxy.view,
)
pyapi.gil_release(gil)
viewtype = partviewtype.toArrayViewType()
viewval = builder.load(partviewproxy.view)
viewproxy = context.make_helper(builder, viewtype, value=viewval)
reallocalstart = partviewtype.lower_get_localstart(
context, builder, partviewproxy.stops, builder.load(partviewproxy.partitionid)
)
subatval = builder.sub(atval, reallocalstart)
return viewtype.type.lower_getitem_at_check(
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
numba.intp,
subatval,
False,
False,
)
@numba.extending.lower_builtin(
operator.getitem, PartitionedViewType, numba.types.slice2_type
)
def lower_getitem_range_partitioned(context, builder, sig, args):
_, (partviewtype, wheretype) = sig.return_type, sig.args
partviewval, whereval = args
whereproxy = context.make_helper(builder, wheretype, whereval)
start = whereproxy.start
stop = whereproxy.stop
partviewproxy = context.make_helper(builder, partviewtype, partviewval)
length = builder.sub(partviewproxy.stop, partviewproxy.start)
regular_start = numba.core.cgutils.alloca_once_value(builder, start)
regular_stop = numba.core.cgutils.alloca_once_value(builder, stop)
with builder.if_then(
builder.icmp_signed("<", start, context.get_constant(numba.intp, 0))
):
builder.store(builder.add(start, length), regular_start)
with builder.if_then(
builder.icmp_signed("<", stop, context.get_constant(numba.intp, 0))
):
builder.store(builder.add(stop, length), regular_stop)
with builder.if_then(
builder.icmp_signed(
"<", builder.load(regular_start), context.get_constant(numba.intp, 0)
)
):
builder.store(context.get_constant(numba.intp, 0), regular_start)
with builder.if_then(builder.icmp_signed(">", builder.load(regular_start), length)):
builder.store(length, regular_start)
with builder.if_then(
builder.icmp_signed(
"<", builder.load(regular_stop), builder.load(regular_start)
)
):
builder.store(builder.load(regular_start), regular_stop)
with builder.if_then(builder.icmp_signed(">", builder.load(regular_stop), length)):
builder.store(length, regular_stop)
proxyout = context.make_helper(builder, partviewtype)
proxyout.pylookups = partviewproxy.pylookups
proxyout.partitionid = numba.core.cgutils.alloca_once_value(
builder, builder.load(partviewproxy.partitionid)
)
proxyout.stops = partviewproxy.stops
proxyout.view = numba.core.cgutils.alloca_once_value(
builder, builder.load(partviewproxy.view)
)
proxyout.start = builder.load(regular_start)
proxyout.stop = builder.load(regular_stop)
if context.enable_nrt:
context.nrt.incref(builder, partviewtype.stopstype, proxyout.stops)
return proxyout._getvalue()
@numba.extending.lower_builtin(
operator.getitem, PartitionedViewType, numba.types.StringLiteral
)
def lower_getitem_field_partitioned(context, builder, sig, args):
_, (partviewtype, _) = sig.return_type, sig.args
partviewval, whereval = args
if context.enable_nrt:
partviewproxy = context.make_helper(builder, partviewtype, partviewval)
context.nrt.incref(builder, partviewtype.stopstype, partviewproxy.stops)
return partviewval
@numba.core.typing.templates.infer_global(len)
class type_len_partitioned(numba.core.typing.templates.AbstractTemplate):
def generic(self, args, kwargs):
if (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], PartitionedViewType)
):
return numba.intp(args[0])
@numba.extending.lower_builtin(len, PartitionedViewType)
def lower_len_partitioned(context, builder, sig, args):
proxyin = context.make_helper(builder, sig.args[0], args[0])
return builder.sub(proxyin.stop, proxyin.start)
@numba.core.typing.templates.infer_getattr
class type_getattr_partitioned(numba.core.typing.templates.AttributeTemplate):
key = PartitionedViewType
def generic_resolve(self, partviewtype, attr):
if attr == "ndim":
return numba.intp
else:
return partviewtype.getitem_field(attr)
@numba.extending.lower_getattr_generic(PartitionedViewType)
def lower_getattr_generic_partitioned(
context, builder, partviewtype, partviewval, attr
):
if attr == "ndim":
return context.get_constant(numba.intp, partviewtype.type.ndim)
elif context.enable_nrt:
partviewproxy = context.make_helper(builder, partviewtype, partviewval)
context.nrt.incref(builder, partviewtype.stopstype, partviewproxy.stops)
return partviewval
class PartitionedIteratorType(numba.types.common.SimpleIteratorType):
def __init__(self, partviewtype):
super().__init__(
f"ak.PartitionedIterator({partviewtype.name})",
partviewtype.type.getitem_at_check(partviewtype.toArrayViewType()),
)
self.partviewtype = partviewtype
@numba.core.typing.templates.infer
class type_getiter_partitioned(numba.core.typing.templates.AbstractTemplate):
key = "getiter"
def generic(self, args, kwargs):
if (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], PartitionedViewType)
):
return PartitionedIteratorType(args[0])(args[0])
@numba.core.datamodel.registry.register_default(PartitionedIteratorType)
class PartitionedIteratorModel(numba.core.datamodel.models.StructModel):
def __init__(self, dmm, fe_type):
members = [
("partview", fe_type.partviewtype),
("length", numba.intp),
("at", numba.types.EphemeralPointer(numba.intp)),
]
super().__init__(dmm, fe_type, members)
@numba.extending.lower_builtin("getiter", PartitionedViewType)
def lower_getiter_partitioned(context, builder, sig, args):
rettype, (partviewtype,) = sig.return_type, sig.args
(partviewval,) = args
partviewproxy = context.make_helper(builder, partviewtype, partviewval)
partitionid = context.get_constant(numba.intp, 0)
viewlength = partviewtype.lower_get_localstop(
context, builder, partviewproxy.stops, partitionid
)
partoutproxy = context.make_helper(builder, partviewtype)
partoutproxy.pylookups = partviewproxy.pylookups
partoutproxy.partitionid = numba.core.cgutils.alloca_once_value(
builder, partitionid
)
partoutproxy.stops = partviewproxy.stops
pyapi = context.get_python_api(builder)
gil = pyapi.gil_ensure()
partoutproxy.view = numba.core.cgutils.alloca_once_value(
builder,
partviewtype.lower_get_partitionid(
context, builder, pyapi, partviewproxy.pylookups, partitionid, viewlength
),
)
pyapi.gil_release(gil)
partoutproxy.start = partviewproxy.start
partoutproxy.stop = partviewproxy.stop
proxyout = context.make_helper(builder, rettype)
proxyout.partview = partoutproxy._getvalue()
proxyout.length = builder.sub(partviewproxy.stop, partviewproxy.start)
proxyout.at = numba.core.cgutils.alloca_once_value(
builder, context.get_constant(numba.intp, 0)
)
if context.enable_nrt:
context.nrt.incref(builder, partviewtype.stopstype, partoutproxy.stops)
return numba.core.imputils.impl_ret_new_ref(
context, builder, rettype, proxyout._getvalue()
)
@numba.extending.lower_builtin("iternext", PartitionedIteratorType)
@numba.core.imputils.iternext_impl(numba.core.imputils.RefType.BORROWED)
def lower_iternext_partitioned(context, builder, sig, args, result):
(itertype,) = sig.args
(iterval,) = args
proxyin = context.make_helper(builder, itertype, iterval)
partviewproxy = context.make_helper(
builder, itertype.partviewtype, proxyin.partview
)
at = builder.load(proxyin.at)
is_valid = builder.icmp_signed("<", at, proxyin.length)
result.set_valid(is_valid)
with builder.if_then(is_valid, likely=True):
maybestop = itertype.partviewtype.lower_get_localstop(
context,
builder,
partviewproxy.stops,
builder.load(partviewproxy.partitionid),
)
with builder.if_then(builder.icmp_signed("==", at, maybestop)):
builder.store(
builder.add(
builder.load(partviewproxy.partitionid),
context.get_constant(numba.intp, 1),
),
partviewproxy.partitionid,
)
localstart = itertype.partviewtype.lower_get_localstart(
context,
builder,
partviewproxy.stops,
builder.load(partviewproxy.partitionid),
)
localstop = itertype.partviewtype.lower_get_localstop(
context,
builder,
partviewproxy.stops,
builder.load(partviewproxy.partitionid),
)
pyapi = context.get_python_api(builder)
gil = pyapi.gil_ensure()
builder.store(
itertype.partviewtype.lower_get_partitionid(
context,
builder,
pyapi,
partviewproxy.pylookups,
builder.load(partviewproxy.partitionid),
builder.sub(localstop, localstart),
),
partviewproxy.view,
)
pyapi.gil_release(gil)
realstart = itertype.partviewtype.lower_get_localstart(
context,
builder,
partviewproxy.stops,
builder.load(partviewproxy.partitionid),
)
outview = builder.load(partviewproxy.view)
outviewtype = itertype.partviewtype.toArrayViewType()
outviewproxy = context.make_helper(builder, outviewtype, outview)
result.yield_(
itertype.partviewtype.type.lower_getitem_at_check(
context,
builder,
itertype.partviewtype.type.getitem_at_check(outviewtype),
outviewtype,
outview,
outviewproxy,
numba.intp,
builder.sub(at, realstart),
False,
False,
)
)
nextat = numba.core.cgutils.increment_index(builder, at)
builder.store(nextat, proxyin.at)
```
#### File: _v2/_connect/avro.py
```python
import json
import numpy as np
import awkward as ak
import awkward.forth
class _ReachedEndofArrayError(Exception):
pass
class ReadAvroFT:
def __init__(self, file, limit_entries, debug_forth=False):
self.data = file
self.blocks = 0
self.marker = 0
self.is_primitive = False
numbytes = 1024
self.temp_header = bytearray()
self.metadata = {}
while True:
try:
self.temp_header += self.data.read(numbytes)
if not self.check_valid():
raise ak._v2._util.error(
TypeError("invalid Avro file: first 4 bytes are not b'Obj\x01'")
)
pos = 4
pos, self.pairs = self.decode_varint(4, self.temp_header)
self.pairs = self.decode_zigzag(self.pairs)
if self.pairs < 0:
pos, self.header_size = self.decode_varint(pos, self.temp_header)
self.header_size = self.decode_zigzag(self.pairs)
self.pairs = abs(self.pairs)
pos = self.cont_spec(pos)
break
except _ReachedEndofArrayError: # noqa: AK101
numbytes *= 2
ind = 2
self.update_pos(pos)
exec_code = []
init_code = [": init-out\n"]
header_code = "input stream \n"
(
self.form,
self.exec_code,
self.form_next_id,
declarations,
form_keys,
init_code,
container,
) = self.rec_exp_json_code(
self.metadata["avro.schema"], exec_code, ind, 0, [], [], init_code, {}
)
first_iter = True
init_code.append(";\n")
self.update_pos(17)
header_code = header_code + "".join(declarations)
init_code = "".join(init_code)
exec_code.insert(0, "0 do \n")
exec_code.append("\nloop")
exec_code = "".join(exec_code)
forth_code = f"""
{header_code}
{init_code}
{exec_code}"""
if debug_forth:
print(forth_code) # noqa: T201
machine = awkward.forth.ForthMachine64(forth_code)
break_flag = False
while True:
try:
pos, num_items, len_block = self.decode_block()
temp_data = self.data.read(len_block)
if len(temp_data) < len_block:
raise _ReachedEndofArrayError # noqa: AK101
self.update_pos(len_block)
except _ReachedEndofArrayError: # noqa: AK101
break
if limit_entries is not None and self.blocks > limit_entries:
temp_diff = int(self.blocks - limit_entries)
self.blocks -= temp_diff
num_items -= temp_diff
break_flag = True
else:
pass
if first_iter:
machine.begin({"stream": np.frombuffer(temp_data, dtype=np.uint8)})
machine.stack_push(num_items)
machine.call("init-out")
machine.resume()
first_iter = False
else:
machine.begin_again(
{"stream": np.frombuffer(temp_data, dtype=np.uint8)}, True
)
machine.stack_push(num_items)
machine.resume()
self.update_pos(16)
if break_flag:
break
for elem in form_keys:
if "offsets" in elem:
container[elem] = machine.output_Index64(elem)
else:
container[elem] = machine.output_NumpyArray(elem)
self.outcontents = (self.form, self.blocks, container)
def update_pos(self, pos):
self.marker += pos
self.data.seek(self.marker)
def decode_block(self):
temp_data = self.data.read(10)
pos, info = self.decode_varint(0, temp_data)
info1 = self.decode_zigzag(info)
self.update_pos(pos)
self.blocks += int(info1)
temp_data = self.data.read(10)
pos, info = self.decode_varint(0, temp_data)
info2 = self.decode_zigzag(info)
self.update_pos(pos)
return pos, info1, info2
def cont_spec(self, pos):
temp_count = 0
while temp_count < self.pairs:
pos, dat = self.decode_varint(pos, self.temp_header)
dat = self.decode_zigzag(dat)
key = self.temp_header[pos : pos + int(dat)]
pos = pos + int(dat)
if len(key) < int(dat):
raise _ReachedEndofArrayError # noqa: AK101
pos, dat = self.decode_varint(pos, self.temp_header)
dat = self.decode_zigzag(dat)
val = self.temp_header[pos : pos + int(dat)]
pos = pos + int(dat)
if len(val) < int(dat):
raise _ReachedEndofArrayError # noqa: AK101
if key == b"avro.schema":
self.metadata[key.decode()] = json.loads(val.decode())
else:
self.metadata[key.decode()] = val
temp_count += 1
return pos
def check_valid(self):
init = self.temp_header[0:4]
if len(init) < 4:
raise _ReachedEndofArrayError # noqa: AK101
return init == b"Obj\x01"
def decode_varint(self, pos, _data):
shift = 0
result = 0
while True:
if pos >= len(_data):
raise _ReachedEndofArrayError # noqa: AK101
i = _data[pos]
pos += 1
result |= (i & 0x7F) << shift
shift += 7
if not i & 0x80:
break
return pos, result
def decode_zigzag(self, n):
return (n >> 1) ^ (-(n & 1))
def dum_dat(self, dtype, count):
if dtype["type"] == "int":
return f"0 node{count}-data <- stack "
elif dtype["type"] == "long":
return f"0 node{count}-data <- stack "
elif dtype["type"] == "float":
return f"0 node{count}-data <- stack "
elif dtype["type"] == "double":
return f"0 node{count}-data <- stack "
elif dtype["type"] == "boolean":
return f"0 node{count}-data <- stack "
elif dtype["type"] == "bytes":
return f"1 node{count}-offsets +<- stack 97 node{count+1}-data <- stack "
elif dtype["type"] == "string":
return f"0 node{count}-offsets +<- stack "
elif dtype["type"] == "enum":
return f"0 node{count}-index <- stack "
else:
raise AssertionError # noqa: AK101
def rec_exp_json_code(
self,
file,
exec_code,
ind,
form_next_id,
declarations,
form_keys,
init_code,
container,
):
if isinstance(file, (str, list)):
file = {"type": file}
if file["type"] == "null":
aform = ak._v2.forms.IndexedOptionForm(
"i64",
ak._v2.forms.EmptyForm(form_key=f"node{form_next_id+1}"),
form_key=f"node{form_next_id}",
)
declarations.append(f"output node{form_next_id+1}-data uint8 \n")
declarations.append(f"output node{form_next_id}-index int64 \n")
form_keys.append(f"node{form_next_id+1}-data")
form_keys.append(f"node{form_next_id}-index")
exec_code.append(
"\n" + " " * ind + f"-1 node{form_next_id}-index <- stack"
)
exec_code.append(
"\n" + " " * ind + f"0 node{form_next_id+1}-data <- stack"
)
return (
aform,
exec_code,
form_next_id,
declarations,
form_keys,
init_code,
container,
)
elif file["type"] == "record":
temp = form_next_id
aformcont = []
aformfields = []
for elem in file["fields"]:
aformfields.append(elem["name"])
(
aform,
exec_code,
form_next_id,
declarations,
form_keys,
init_code,
container,
) = self.rec_exp_json_code(
elem,
exec_code,
ind,
form_next_id + 1,
declarations,
form_keys,
init_code,
container,
)
aformcont.append(aform)
aform = ak._v2.forms.RecordForm(
aformcont, aformfields, form_key=f"node{temp}"
)
return (
aform,
exec_code,
form_next_id,
declarations,
form_keys,
init_code,
container,
)
elif file["type"] == "string":
aform = ak._v2.forms.ListOffsetForm(
"i64",
ak._v2.forms.NumpyForm(
"uint8",
parameters={"__array__": "char"},
form_key=f"node{form_next_id+1}",
),
form_key=f"node{form_next_id}",
)
declarations.append(f"output node{form_next_id+1}-data uint8 \n")
declarations.append(f"output node{form_next_id}-offsets int64 \n")
form_keys.append(f"node{form_next_id+1}-data")
form_keys.append(f"node{form_next_id}-offsets")
init_code.append(f"0 node{form_next_id}-offsets <- stack\n")
if self.is_primitive:
exec_code.append("\n" + " " * ind + "0 do")
exec_code.append("\n" + " " * ind + "stream zigzag-> stack\n")
exec_code.append(
"\n" + " " * ind + f"dup node{form_next_id}-offsets +<- stack\n"
)
exec_code.append(
"\n" + " " * (ind + 1) + f"stream #B-> node{form_next_id+1}-data"
)
if self.is_primitive:
exec_code.append("\n" + " " * ind + "loop")
return (
aform,
exec_code,
form_next_id + 1,
declarations,
form_keys,
init_code,
container,
)
elif file["type"] == "int":
aform = ak._v2.forms.NumpyForm(
primitive="int32", form_key=f"node{form_next_id}"
)
declarations.append(f"output node{form_next_id}-data int32 \n")
form_keys.append(f"node{form_next_id}-data")
if self.is_primitive:
exec_code.append(
"\n" + " " * ind + f"stream #zigzag-> node{form_next_id}-data"
)
else:
exec_code.append(
"\n" + " " * ind + f"stream zigzag-> node{form_next_id}-data"
)
return (
aform,
exec_code,
form_next_id,
declarations,
form_keys,
init_code,
container,
)
elif file["type"] == "long":
aform = ak._v2.forms.NumpyForm("int64", form_key=f"node{form_next_id}")
form_keys.append(f"node{form_next_id}-data")
declarations.append(f"output node{form_next_id}-data int64 \n")
if self.is_primitive:
exec_code.append(
"\n" + " " * ind + f"stream #zigzag-> node{form_next_id}-data"
)
else:
exec_code.append(
"\n" + " " * ind + f"stream zigzag-> node{form_next_id}-data"
)
return (
aform,
exec_code,
form_next_id,
declarations,
form_keys,
init_code,
container,
)
elif file["type"] == "float":
aform = ak._v2.forms.NumpyForm("float32", form_key=f"node{form_next_id}")
declarations.append(f"output node{form_next_id}-data float32 \n")
form_keys.append(f"node{form_next_id}-data")
if self.is_primitive:
exec_code.append(
"\n" + " " * ind + f"stream #f-> node{form_next_id}-data"
)
else:
exec_code.append(
"\n" + " " * ind + f"stream f-> node{form_next_id}-data"
)
return (
aform,
exec_code,
form_next_id,
declarations,
form_keys,
init_code,
container,
)
elif file["type"] == "double":
aform = ak._v2.forms.NumpyForm("float64", form_key=f"node{form_next_id}")
declarations.append(f"output node{form_next_id}-data float64 \n")
form_keys.append(f"node{form_next_id}-data")
if self.is_primitive:
exec_code.append(
"\n" + " " * ind + f"stream #d-> node{form_next_id}-data"
)
else:
exec_code.append(
"\n" + " " * ind + f"stream d-> node{form_next_id}-data"
)
return (
aform,
exec_code,
form_next_id,
declarations,
form_keys,
init_code,
container,
)
elif file["type"] == "boolean":
aform = ak._v2.forms.NumpyForm("bool", form_key=f"node{form_next_id}")
declarations.append(f"output node{form_next_id}-data bool\n")
form_keys.append(f"node{form_next_id}-data")
if self.is_primitive:
exec_code.append(
"\n" + " " * ind + f"stream #?-> node{form_next_id}-data"
)
else:
exec_code.append(
"\n" + " " * ind + f"stream ?-> node{form_next_id}-data"
)
return (
aform,
exec_code,
form_next_id,
declarations,
form_keys,
init_code,
container,
)
elif file["type"] == "bytes":
declarations.append(f"output node{form_next_id+1}-data uint8\n")
declarations.append(f"output node{form_next_id}-offsets int64\n")
form_keys.append(f"node{form_next_id+1}-data")
form_keys.append(f"node{form_next_id}-offsets")
aform = ak._v2.forms.ListOffsetForm(
"i64",
ak._v2.forms.NumpyForm(
"uint8",
form_key=f"node{form_next_id+1}",
parameters={"__array__": "byte"},
),
parameters={"__array__": "bytestring"},
form_key=f"node{form_next_id}",
)
init_code.append(f"0 node{form_next_id}-offsets <- stack\n")
exec_code.append("\n" + " " * ind + "stream zigzag-> stack\n")
exec_code.append(
"\n" + " " * ind + f"dup node{form_next_id}-offsets +<- stack\n"
)
exec_code.append(
"\n" + " " * (ind + 1) + f"stream #B-> node{form_next_id+1}-data"
)
return (
aform,
exec_code,
form_next_id + 1,
declarations,
form_keys,
init_code,
container,
)
elif isinstance(file["type"], list):
flag = 0
type_idx = ""
temp = form_next_id
null_present = False
out = len(file["type"])
for elem in file["type"]:
if isinstance(elem, dict) and elem["type"] == "record":
flag = 1
else:
flag = 0
if "null" in file["type"] and flag == 0 and out == 2:
declarations.append(f"output node{form_next_id}-mask int8\n")
form_keys.append(f"node{form_next_id}-mask")
type_idx = "null_non_record"
elif "null" in file["type"] and flag == 1:
declarations.append(f"output node{form_next_id}-index int64\n")
form_keys.append(f"node{form_next_id}-index")
type_idx = "null_record"
else:
for elem in file["type"]:
if elem == "null":
declarations.append(f"output node{form_next_id}-mask int8\n")
form_keys.append(f"node{form_next_id}-mask")
flag = 1
mask_idx = form_next_id
form_next_id = form_next_id + 1
null_present = True
declarations.append(f"output node{form_next_id}-tags int8\n")
form_keys.append(f"node{form_next_id}-tags")
declarations.append(f"output node{form_next_id}-index int64\n")
form_keys.append(f"node{form_next_id}-index")
union_idx = form_next_id
type_idx = "no_null"
exec_code.append("\n" + " " * (ind) + "stream zigzag-> stack case")
if type_idx == "null_non_record":
temp = form_next_id
dum_idx = 0
idxx = file["type"].index("null")
if out == 2:
dum_idx = 1 - idxx
elif out > 2:
if idxx == 0:
dum_idx = 1
else:
dum_idx = idxx - 1
for i in range(out):
if file["type"][i] == "null":
if isinstance(file["type"][dum_idx], dict):
aa = (
"\n"
+ " " * (ind + 1)
+ self.dum_dat(file["type"][dum_idx], temp + 1)
)
else:
aa = (
"\n"
+ " " * (ind + 1)
+ self.dum_dat(
{"type": file["type"][dum_idx]}, temp + 1
)
)
exec_code.append(
"\n"
+ " " * (ind)
+ f"{i} of 0 node{temp}-mask <- stack {aa} endof"
)
else:
exec_code.append(
"\n" + " " * (ind) + f"{i} of 1 node{temp}-mask <- stack"
)
(
aform1,
exec_code,
form_next_id,
declarations,
form_keys,
init_code,
container,
) = self.rec_exp_json_code(
{"type": file["type"][i]},
exec_code,
ind + 1,
form_next_id + 1,
declarations,
form_keys,
init_code,
container,
)
exec_code.append(" endof")
aform = ak._v2.forms.ByteMaskedForm(
"i8", aform1, True, form_key=f"node{temp}"
)
if type_idx == "null_record":
temp = form_next_id
idxx = file["type"].index("null")
if out == 2:
dum_idx = 1 - idxx
elif out > 2:
if idxx == 0:
dum_idx = 1
else:
dum_idx = 0
idxx = file["type"].index("null")
for i in range(out):
if file["type"][i] == "null":
exec_code.append(
"\n"
+ " " * (ind)
+ f"{i} of -1 node{temp}-index <- stack endof"
)
else:
exec_code.append(
"\n"
+ " " * (ind)
+ f"{i} of countvar{form_next_id}{i} @ node{form_next_id}-index <- stack 1 countvar{form_next_id}{i} +! "
)
init_code.append(f"variable countvar{form_next_id}{i}\n")
(
aform1,
exec_code,
form_next_id,
declarations,
form_keys,
init_code,
container,
) = self.rec_exp_json_code(
{"type": file["type"][i]},
exec_code,
ind + 1,
form_next_id + 1,
declarations,
form_keys,
init_code,
container,
)
exec_code.append("\nendof")
aform = ak._v2.forms.IndexedOptionForm(
"i64", aform1, form_key=f"node{temp}"
)
if type_idx == "no_null":
if null_present:
idxx = file["type"].index("null")
if out == 2:
dum_idx = 1 - idxx
elif out > 2:
if idxx == 0:
dum_idx = 1
else:
dum_idx = idxx - 1
temp = form_next_id
temp_forms = []
for i in range(out):
if file["type"][i] == "null":
exec_code.append(
"\n"
+ " " * (ind)
+ f"{i} of 0 node{mask_idx}-mask <- stack 0 node{union_idx}-tags <- stack 0 node{union_idx}-index <- stack endof"
)
else:
if null_present:
exec_code.append(
"\n"
+ " " * (ind)
+ f"{i} of 1 node{mask_idx}-mask <- stack {i} node{union_idx}-tags <- stack"
)
else:
exec_code.append(
"\n"
+ " " * (ind)
+ f"{i} of {i} node{union_idx}-tags <- stack 1 countvar{form_next_id}{i} +!"
)
init_code.append(f"variable countvar{form_next_id}{i} \n")
exec_code.append(
"\n"
+ " " * (ind + 1)
+ f"countvar{form_next_id}{i} @ node{union_idx}-index <- stack"
)
exec_code.append(
"\n"
+ " " * (ind + 1)
+ f"1 countvar{form_next_id}{i} +!"
)
(
aform1,
exec_code,
form_next_id,
declarations,
form_keys,
init_code,
container,
) = self.rec_exp_json_code(
{"type": file["type"][i]},
exec_code,
ind + 1,
form_next_id + 1,
declarations,
form_keys,
init_code,
container,
)
temp_forms.append(aform1)
exec_code.append("\n endof")
if null_present:
aform = ak._v2.forms.ByteMaskedForm(
"i8",
ak._v2.forms.UnionForm(
"i8", "i64", temp_forms, form_key=f"node{union_idx}"
),
True,
form_key=f"node{mask_idx}",
)
else:
aform = ak._v2.forms.UnionForm(
"i8", "i64", aform1, form_key=f"node{mask_idx}"
)
exec_code.append("\n" + " " * (ind + 1) + "endcase")
return (
aform,
exec_code,
form_next_id,
declarations,
form_keys,
init_code,
container,
)
elif isinstance(file["type"], dict):
(
aform,
exec_code,
form_next_id,
declarations,
form_keys,
init_code,
container,
) = self.rec_exp_json_code(
file["type"],
exec_code,
ind,
form_next_id,
declarations,
form_keys,
init_code,
container,
)
return (
aform,
exec_code,
form_next_id,
declarations,
form_keys,
init_code,
container,
)
elif file["type"] == "fixed":
form_keys.append(f"node{form_next_id+1}-data")
declarations.append(f"output node{form_next_id+1}-data uint8 \n")
aform = ak._v2.forms.RegularForm(
ak._v2.forms.NumpyForm(
"uint8",
form_key=f"node{form_next_id+1}",
parameters={"__array__": "byte"},
),
parameters={"__array__": "bytestring"},
size=file["size"],
form_key=f"node{form_next_id}",
)
temp = file["size"]
exec_code.append(
"\n" + " " * ind + f"{temp} stream #B-> node{form_next_id+1}-data"
)
return (
aform,
exec_code,
form_next_id + 1,
declarations,
form_keys,
init_code,
container,
)
elif file["type"] == "enum":
aform = ak._v2.forms.IndexedForm(
"i64",
ak._v2.forms.ListOffsetForm(
"i64",
ak._v2.forms.NumpyForm(
"uint8",
parameters={"__array__": "char"},
form_key=f"node{form_next_id+2}",
),
form_key=f"node{form_next_id+1}",
),
parameters={"__array__": "categorical"},
form_key=f"node{form_next_id}",
)
form_keys.append(f"node{form_next_id}-index")
declarations.append(f"output node{form_next_id}-index int64 \n")
tempar = file["symbols"]
offset, dat = [0], []
prev = 0
for x in tempar:
offset.append(len(x) + prev)
prev = offset[-1]
for elem in x:
dat.append(np.uint8(ord(elem)))
container[f"node{form_next_id+1}-offsets"] = np.array(
offset, dtype=np.int64
)
container[f"node{form_next_id+2}-data"] = np.array(dat, dtype=np.uint8)
exec_code.append(
"\n" + " " * ind + f"stream zigzag-> node{form_next_id}-index"
)
return (
aform,
exec_code,
form_next_id + 2,
declarations,
form_keys,
init_code,
container,
)
elif file["type"] == "array":
temp = form_next_id
declarations.append(f"output node{form_next_id}-offsets int64\n")
init_code.append(f"0 node{form_next_id}-offsets <- stack\n")
exec_code.append("\n" + " " * ind + "stream zigzag-> stack")
exec_code.append("\n" + " " * ind + "dup 0 <")
exec_code.append(
"\n" + " " * ind + "if stream zigzag-> stack drop negate then"
)
exec_code.append(
"\n" + " " * ind + f"dup node{form_next_id}-offsets +<- stack"
)
if isinstance(file["items"], str):
self.is_primitive = True
else:
exec_code.append("\n" + " " * ind + "0 do")
form_keys.append(f"node{form_next_id}-offsets")
(
aformtemp,
exec_code,
form_next_id,
declarations,
form_keys,
init_code,
container,
) = self.rec_exp_json_code(
{"type": file["items"]},
exec_code,
ind + 1,
form_next_id + 1,
declarations,
form_keys,
init_code,
container,
)
if self.is_primitive:
self.is_primitive = False
else:
exec_code.append("\n" + " " * ind + "loop")
exec_code.append("\n" + " " * ind + "1 stream skip")
aform = ak._v2.forms.ListOffsetForm(
"i64", aformtemp, form_key=f"node{temp}"
)
return (
aform,
exec_code,
form_next_id,
declarations,
form_keys,
init_code,
container,
)
elif file["type"] == "map":
# print(file["name"])
# exec_code.append("\npos, inn = decode_varint(pos,fields)"
# exec_code.append("\nout = abs(decode_zigzag(inn))"
# exec_code.append("\nprint(\"length\",out)"
# exec_code.append("\nfor i in range(out):"
# exec_code.append("\n"+" "*(ind+1)+"print(\"{{\")"
# exec_code = exec_code+aa
# exec_code = exec_code+bb
# exec_code = exec_code+ccfa
# exec_code = exec_code+dd
# exec_code = exec_code+ee
# pos,exec_code,count = self.rec_exp_json_code({"type": file["values"]},fields,pos,exec_code,ind+1, aform,count+1)
# exec_code.append("\n "*(ind+1)+"print(\":\")"
# exec_code = exec_code+ff
# pos,exec_code,count = self.rec_exp_json_code({"type": file["values"]},fields,pos,exec_code,ind+1, aform,count+1)
# exec_code.append("\n"+" "*(ind+1)+"print(\"}}\")"
# exec_code.append("\n"+" "*ind+"pos, inn = decode_varint(pos,fields)"
# jj = "\n"+" "*ind+"out = decode_zigzag(inn)"
# kk = "\n"+" "*ind+'''if out != 0:
# raise
# '''
# exec_code = exec_code+gg
# exec_code = exec_code+hh
# exec_code = exec_code+jj
# exec_code = exec_code+kk
raise ak._v2._util.error(NotImplementedError)
```
#### File: _v2/operations/ak_copy.py
```python
import awkward as ak
np = ak.nplike.NumpyMetadata.instance()
# @ak._v2._connect.numpy.implements("copy")
def copy(array):
"""
Returns a deep copy of the array (no memory shared with original).
This is identical to `np.copy` and `copy.deepcopy`.
It's only useful to explicitly copy an array if you're going to change it
in-place. This doesn't come up often because Awkward Arrays are immutable.
That is to say, the Awkward Array library doesn't have any operations that
change an array in-place, but the data in the array might be owned by another
library that can change it in-place.
For example, if the array comes from NumPy:
>>> underlying_array = np.array([1.1, 2.2, 3.3, 4.4, 5.5])
>>> wrapper = ak.Array(underlying_array)
>>> duplicate = ak.copy(wrapper)
>>> underlying_array[2] = 123
>>> underlying_array
array([ 1.1, 2.2, 123. , 4.4, 5.5])
>>> wrapper
<Array [1.1, 2.2, 123, 4.4, 5.5] type='5 * float64'>
>>> duplicate
<Array [1.1, 2.2, 3.3, 4.4, 5.5] type='5 * float64'>
There is an exception to this rule: you can add fields to records in an
#ak.Array in-place. However, this changes the #ak.Array wrapper without
affecting the underlying layout data (it *replaces* its layout), so a
shallow copy will do:
>>> original = ak.Array([{"x": 1}, {"x": 2}, {"x": 3}])
>>> shallow_copy = copy.copy(original)
>>> shallow_copy["y"] = original.x**2
>>> shallow_copy
<Array [{x: 1, y: 1}, ... y: 4}, {x: 3, y: 9}] type='3 * {"x": int64, "y": int64}'>
>>> original
<Array [{x: 1}, {x: 2}, {x: 3}] type='3 * {"x": int64}'>
This is key to Awkward Array's efficiency (memory and speed): operations that
only change part of a structure re-use pieces from the original ("structural
sharing"). Changing data in-place would result in many surprising long-distance
changes, so we don't support it. However, an #ak.Array's data might come from
a mutable third-party library, so this function allows you to make a true copy.
"""
with ak._v2._util.OperationErrorContext(
"ak._v2.fill_none",
dict(array=array),
):
return _impl(array)
def _impl(array):
layout = ak._v2.operations.to_layout(
array,
allow_record=True,
allow_other=False,
)
return ak._v2._util.wrap(layout.deep_copy(), ak._v2._util.behavior_of(array))
```
#### File: _v2/operations/ak_fill_none.py
```python
import numbers
from collections.abc import Iterable
import awkward as ak
np = ak.nplike.NumpyMetadata.instance()
def fill_none(array, value, axis=-1, highlevel=True, behavior=None):
"""
Args:
array: Data in which to replace None with a given value.
value: Data with which to replace None.
axis (None or int): If None, replace all None values in the array
with the given value; if an int, The dimension at which this
operation is applied. The outermost dimension is `0`, followed
by `1`, etc., and negative values count backward from the
innermost: `-1` is the innermost dimension, `-2` is the next
level up, etc.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Replaces missing values (None) with a given `value`.
For example, in the following `array`,
ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]])
The None values could be replaced with `0` by
>>> ak.fill_none(array, 0)
<Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'>
The replacement value doesn't strictly need the same type as the
surrounding data. For example, the None values could also be replaced
by a string.
>>> ak.fill_none(array, "hi")
<Array [[1.1, 'hi', 2.2], ... ['hi', 3.3, 4.4]] type='3 * var * union[float64, s...'>
The list content now has a union type:
>>> ak.type(ak.fill_none(array, "hi"))
3 * var * union[float64, string]
The values could be floating-point numbers or strings.
"""
with ak._v2._util.OperationErrorContext(
"ak._v2.fill_none",
dict(
array=array, value=value, axis=axis, highlevel=highlevel, behavior=behavior
),
):
return _impl(array, value, axis, highlevel, behavior)
def _impl(array, value, axis, highlevel, behavior):
arraylayout = ak._v2.operations.to_layout(
array, allow_record=True, allow_other=False
)
nplike = ak.nplike.of(arraylayout)
# Convert value type to appropriate layout
if (
isinstance(value, np.ndarray)
and issubclass(value.dtype.type, (np.bool_, np.number))
and len(value.shape) != 0
):
valuelayout = ak._v2.operations.to_layout(
nplike.asarray(value)[np.newaxis], allow_record=False, allow_other=False
)
elif isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or (
isinstance(value, np.ndarray)
and issubclass(value.dtype.type, (np.bool_, np.number))
):
valuelayout = ak._v2.operations.to_layout(
nplike.asarray(value), allow_record=False, allow_other=False
)
elif (
isinstance(value, Iterable)
and not (isinstance(value, (str, bytes)))
or isinstance(value, (ak._v2.highlevel.Record, ak._v2.record.Record))
):
valuelayout = ak._v2.operations.to_layout(
value, allow_record=True, allow_other=False
)
if isinstance(valuelayout, ak._v2.record.Record):
valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]
elif len(valuelayout) == 0:
offsets = ak._v2.index.Index64(
nplike.array([0, 0], dtype=np.int64), nplike=nplike
)
valuelayout = ak._v2.contents.ListOffsetArray(offsets, valuelayout)
else:
valuelayout = ak._v2.contents.RegularArray(valuelayout, len(valuelayout), 1)
else:
valuelayout = ak._v2.operations.to_layout(
[value], allow_record=False, allow_other=False
)
def maybe_fillna(layout):
if layout.is_OptionType:
return layout.fill_none(valuelayout)
else:
return layout
if axis is None:
def action(layout, depth, depth_context, **kwargs):
layout = maybe_fillna(layout)
else:
def action(layout, depth, depth_context, **kwargs):
posaxis = layout.axis_wrap_if_negative(depth_context["posaxis"])
depth_context["posaxis"] = posaxis
if posaxis + 1 < depth:
return layout
elif posaxis + 1 == depth:
return maybe_fillna(layout)
depth_context = {"posaxis": axis}
out = arraylayout.recursively_apply(action, depth_context=depth_context)
return ak._v2._util.wrap(
out, ak._v2._util.behavior_of(array, behavior=behavior), highlevel
)
```
#### File: _v2/operations/ak_from_avro_file.py
```python
import awkward as ak
import pathlib
np = ak.nplike.NumpyMetadata.instance()
def from_avro_file(
file, debug_forth=False, limit_entries=None, highlevel=True, behavior=None
):
"""
Args:
file (string or fileobject): Avro file to be read as Awkward Array.
debug_forth (bool): If True, prints the generated Forth code for debugging.
limit_entries (int): The number of rows of the Avro file to be read into the Awkward Array.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Reads Avro files as Awkward Arrays.
Internally this function uses AwkwardForth DSL. The function recursively parses the Avro schema, generates
Awkward form and Forth code for that specific Avro file and then reads it.
"""
import awkward._v2._connect.avro
with ak._v2._util.OperationErrorContext(
"ak._v2.from_avro_file",
dict(
file=file,
highlevel=highlevel,
behavior=behavior,
debug_forth=debug_forth,
limit_entries=limit_entries,
),
):
if isinstance(file, pathlib.Path):
file = str(file)
if isinstance(file, str):
try:
with open(file, "rb") as opened_file:
form, length, container = awkward._v2._connect.avro.ReadAvroFT(
opened_file, limit_entries, debug_forth
).outcontents
return _impl(form, length, container, highlevel, behavior)
except ImportError:
raise ak._v2._util.error(
"the filename is incorrect or the file does not exist"
)
else:
if not hasattr(file, "read"):
raise ak._v2._util.error(
TypeError("the fileobject provided is not of the correct type.")
)
else:
form, length, container = awkward._v2._connect.avro.ReadAvroFT(
file, limit_entries, debug_forth
).outarr
return _impl(form, length, container, highlevel, behavior)
def _impl(form, length, container, highlevel, behavior):
return ak._v2.from_buffers(
form=form,
length=length,
container=container,
highlevel=highlevel,
behavior=behavior,
)
```
#### File: _v2/operations/ak_to_arrow.py
```python
import awkward as ak
np = ak.nplike.NumpyMetadata.instance()
def to_arrow(
array,
list_to32=False,
string_to32=False,
bytestring_to32=False,
emptyarray_to=None,
categorical_as_dictionary=False,
extensionarray=True,
count_nulls=True,
):
"""
Args:
array: Array-like data (anything #ak.to_layout recognizes).
list_to32 (bool): If True, convert Awkward lists into 32-bit Arrow lists
if they're small enough, even if it means an extra conversion. Otherwise,
signed 32-bit #ak.types.ListType maps to Arrow `ListType`,
signed 64-bit #ak.types.ListType maps to Arrow `LargeListType`,
and unsigned 32-bit #ak.types.ListType picks whichever Arrow type its
values fit into.
string_to32 (bool): Same as the above for Arrow `string` and `large_string`.
bytestring_to32 (bool): Same as the above for Arrow `binary` and `large_binary`.
emptyarray_to (None or dtype): If None, #ak.types.UnknownType maps to Arrow's
null type; otherwise, it is converted a given numeric dtype.
categorical_as_dictionary (bool): If True, #ak.layout.IndexedArray and
#ak.layout.IndexedOptionArray labeled with `__array__ = "categorical"`
are mapped to Arrow `DictionaryArray`; otherwise, the projection is
evaluated before conversion (always the case without
`__array__ = "categorical"`).
extensionarray (bool): If True, this function returns extended Arrow arrays
(at all levels of nesting), which preserve metadata so that Awkward \u2192
Arrow \u2192 Awkward preserves the array's #ak.types.Type (though not
the #ak.forms.Form). If False, this function returns generic Arrow arrays
that might be needed for third-party tools that don't recognize Arrow's
extensions. Even with `extensionarray=False`, the values produced by
Arrow's `to_pylist` method are the same as the values produced by Awkward's
#ak.to_list.
count_nulls (bool): If True, count the number of missing values at each level
and include these in the resulting Arrow array, which makes some downstream
applications faster. If False, skip the up-front cost of counting them.
Converts an Awkward Array into an Apache Arrow array.
This produces arrays of type `pyarrow.Array`. You might need to further
manipulations (using the pyarrow library) to build a `pyarrow.ChunkedArray`,
a `pyarrow.RecordBatch`, or a `pyarrow.Table`. For the latter, see #ak.to_arrow_table.
This function always preserves the values of a dataset; i.e. the Python objects
returned by #ak.to_list are identical to the Python objects returned by Arrow's
`to_pylist` method. With `extensionarray=True`, this function also preserves the
data type (high-level #ak.types.Type, though not the low-level #ak.forms.Form),
even through Parquet, making Parquet a good way to save Awkward Arrays for later
use. If any third-party tools don't recognize Arrow's extension arrays, set this
option to False for plain Arrow arrays.
See also #ak.from_arrow, #ak.to_arrow_table, #ak.to_parquet, #ak.from_arrow_schema.
"""
with ak._v2._util.OperationErrorContext(
"ak._v2.to_arrow",
dict(
array=array,
list_to32=list_to32,
string_to32=string_to32,
bytestring_to32=bytestring_to32,
emptyarray_to=emptyarray_to,
categorical_as_dictionary=categorical_as_dictionary,
extensionarray=extensionarray,
count_nulls=count_nulls,
),
):
return _impl(
array,
list_to32,
string_to32,
bytestring_to32,
emptyarray_to,
categorical_as_dictionary,
extensionarray,
count_nulls,
)
def _impl(
array,
list_to32,
string_to32,
bytestring_to32,
emptyarray_to,
categorical_as_dictionary,
extensionarray,
count_nulls,
):
layout = ak._v2.operations.to_layout(array, allow_record=True, allow_other=False)
if isinstance(layout, ak._v2.record.Record):
layout = layout.array[layout.at : layout.at + 1]
record_is_scalar = True
else:
record_is_scalar = False
return layout.to_arrow(
list_to32=list_to32,
string_to32=string_to32,
bytestring_to32=bytestring_to32,
emptyarray_to=emptyarray_to,
categorical_as_dictionary=categorical_as_dictionary,
extensionarray=extensionarray,
count_nulls=count_nulls,
record_is_scalar=record_is_scalar,
)
```
#### File: _v2/operations/ak_validity_error.py
```python
import awkward as ak
def validity_error(array, exception=False):
"""
Args:
array (#ak.Array, #ak.Record, #ak.layout.Content, #ak.layout.Record, #ak.ArrayBuilder, #ak.layout.ArrayBuilder):
Array or record to check.
exception (bool): If True, validity errors raise exceptions.
Returns an empty string if there are no errors and a str containing the error message
if there are.
Checks for errors in the structure of the array, such as indexes that run
beyond the length of a node's `content`, etc. Either an error is raised or
a string describing the error is returned.
See also #ak.is_valid.
"""
with ak._v2._util.OperationErrorContext(
"ak._v2.validity_error",
dict(array=array, exception=exception),
):
return _impl(array, exception)
def _impl(array, exception):
layout = ak._v2.operations.to_layout(array, allow_record=False, allow_other=False)
out = layout.validity_error(path="highlevel")
if out not in (None, "") and exception:
raise ak._v2._util.error(ValueError(out))
else:
return out
```
#### File: tests/v2/test_0046-start-indexedarray.py
```python
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
to_list = ak._v2.operations.to_list
def test_basic():
content = ak._v2.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4]))
ind = np.array([2, 2, 0, 3, 4], dtype=np.int32)
index = ak._v2.index.Index32(ind)
array = ak._v2.contents.IndexedArray(index, content)
assert to_list(array) == [2.2, 2.2, 0.0, 3.3, 4.4]
ind[3] = 1
assert to_list(array) == [2.2, 2.2, 0.0, 1.1, 4.4]
ind = np.array([2, 2, 0, 3, 4], dtype=np.uint32)
index = ak._v2.index.IndexU32(ind)
array = ak._v2.contents.IndexedArray(index, content)
assert to_list(array) == [2.2, 2.2, 0.0, 3.3, 4.4]
ind[3] = 1
assert to_list(array) == [2.2, 2.2, 0.0, 1.1, 4.4]
ind = np.array([2, 2, 0, 3, 4], dtype=np.int64)
index = ak._v2.index.Index64(ind)
array = ak._v2.contents.IndexedArray(index, content)
assert to_list(array) == [2.2, 2.2, 0.0, 3.3, 4.4]
ind[3] = 1
assert to_list(array) == [2.2, 2.2, 0.0, 1.1, 4.4]
ind = np.array([2, 2, 0, 3, 4], dtype=np.int32)
index = ak._v2.index.Index32(ind)
array = ak._v2.contents.IndexedOptionArray(index, content)
assert to_list(array) == [2.2, 2.2, 0.0, 3.3, 4.4]
ind[3] = 1
assert to_list(array) == [2.2, 2.2, 0.0, 1.1, 4.4]
ind = np.array([2, 2, 0, 3, 4], dtype=np.int64)
index = ak._v2.index.Index64(ind)
array = ak._v2.contents.IndexedOptionArray(index, content)
assert to_list(array) == [2.2, 2.2, 0.0, 3.3, 4.4]
ind[3] = 1
assert to_list(array) == [2.2, 2.2, 0.0, 1.1, 4.4]
def test_type():
content = ak._v2.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4]))
index = ak._v2.index.Index32(np.array([2, 2, 0, 3, 4], dtype=np.int32))
array = ak._v2.contents.IndexedArray(index, content)
assert ak._v2.operations.type(array) == ak._v2.types.NumpyType("float64")
array = ak._v2.contents.IndexedOptionArray(index, content)
assert ak._v2.operations.type(array) == ak._v2.types.OptionType(
ak._v2.types.NumpyType("float64")
)
def test_null():
content = ak._v2.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4]))
index = ak._v2.index.Index64(np.array([2, 2, 0, -1, 4], dtype=np.int64))
array = ak._v2.contents.IndexedOptionArray(index, content)
assert to_list(array) == [2.2, 2.2, 0.0, None, 4.4]
def test_carry():
content = ak._v2.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4]))
index = ak._v2.index.Index64(np.array([2, 2, 0, 3, 4], dtype=np.int64))
indexedarray = ak._v2.contents.IndexedArray(index, content)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5], dtype=np.int64))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, indexedarray)
assert to_list(listoffsetarray) == [[2.2, 2.2, 0.0], [], [3.3, 4.4]]
assert to_list(listoffsetarray[::-1]) == [[3.3, 4.4], [], [2.2, 2.2, 0.0]]
assert listoffsetarray.typetracer[::-1].form == listoffsetarray[::-1].form
assert to_list(listoffsetarray[[2, 0]]) == [[3.3, 4.4], [2.2, 2.2, 0.0]]
assert listoffsetarray.typetracer[[2, 0]].form == listoffsetarray[[2, 0]].form
assert to_list(listoffsetarray[[2, 0], 1]) == [4.4, 2.2] # invokes carry
assert listoffsetarray.typetracer[[2, 0], 1].form == listoffsetarray[[2, 0], 1].form
assert to_list(listoffsetarray[2:, 1]) == [4.4] # invokes carry
assert listoffsetarray.typetracer[2:, 1].form == listoffsetarray[2:, 1].form
index = ak._v2.index.Index64(np.array([2, 2, 0, 3, -1], dtype=np.int64))
indexedarray = ak._v2.contents.IndexedOptionArray(index, content)
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, indexedarray)
assert to_list(listoffsetarray) == [[2.2, 2.2, 0.0], [], [3.3, None]]
assert to_list(listoffsetarray[::-1]) == [[3.3, None], [], [2.2, 2.2, 0.0]]
assert to_list(listoffsetarray[[2, 0]]) == [[3.3, None], [2.2, 2.2, 0.0]]
assert to_list(listoffsetarray[[2, 0], 1]) == [None, 2.2] # invokes carry
assert to_list(listoffsetarray[2:, 1]) == [None] # invokes carry
def test_others():
content = ak._v2.contents.NumpyArray(
np.array(
[[0.0, 0.0], [0.1, 1.0], [0.2, 2.0], [0.3, 3.0], [0.4, 4.0], [0.5, 5.0]]
)
)
index = ak._v2.index.Index64(np.array([4, 0, 3, 1, 3], dtype=np.int64))
indexedarray = ak._v2.contents.IndexedArray(index, content)
assert indexedarray[3, 0] == 0.1
assert indexedarray[3, 1] == 1.0
assert to_list(indexedarray[3, ::-1]) == [1.0, 0.1]
assert indexedarray.typetracer[3, ::-1].form == indexedarray[3, ::-1].form
assert to_list(indexedarray[3, [1, 1, 0]]) == [1.0, 1.0, 0.1]
assert indexedarray.typetracer[3, [1, 1, 0]].form == indexedarray[3, [1, 1, 0]].form
assert to_list(indexedarray[3:, 0]) == [0.1, 0.3]
assert indexedarray.typetracer[3:, 0].form == indexedarray[3:, 0].form
assert to_list(indexedarray[3:, 1]) == [1.0, 3.0]
assert indexedarray.typetracer[3:, 1].form == indexedarray[3:, 1].form
assert to_list(indexedarray[3:, ::-1]) == [[1.0, 0.1], [3.0, 0.3]]
assert indexedarray.typetracer[3:, ::-1].form == indexedarray[3:, ::-1].form
assert to_list(indexedarray[3:, [1, 1, 0]]) == [[1.0, 1.0, 0.1], [3.0, 3.0, 0.3]]
assert (
indexedarray.typetracer[3:, [1, 1, 0]].form == indexedarray[3:, [1, 1, 0]].form
)
def test_missing():
content = ak._v2.contents.NumpyArray(
np.array(
[[0.0, 0.0], [0.1, 1.0], [0.2, 2.0], [0.3, 3.0], [0.4, 4.0], [0.5, 5.0]]
)
)
index = ak._v2.index.Index64(np.array([4, 0, 3, -1, 3], dtype=np.int64))
indexedarray = ak._v2.contents.IndexedOptionArray(index, content)
assert to_list(indexedarray[3:, 0]) == [None, 0.3]
assert indexedarray.typetracer[3:, 0].form == indexedarray[3:, 0].form
assert to_list(indexedarray[3:, 1]) == [None, 3.0]
assert indexedarray.typetracer[3:, 1].form == indexedarray[3:, 1].form
assert to_list(indexedarray[3:, ::-1]) == [None, [3.0, 0.3]]
assert indexedarray.typetracer[3:, ::-1].form == indexedarray[3:, ::-1].form
assert to_list(indexedarray[3:, [1, 1, 0]]) == [None, [3.0, 3.0, 0.3]]
assert (
indexedarray.typetracer[3:, [1, 1, 0]].form == indexedarray[3:, [1, 1, 0]].form
)
def test_builder():
assert to_list(
ak._v2.highlevel.Array([1.1, 2.2, 3.3, None, 4.4], check_valid=True)
) == [
1.1,
2.2,
3.3,
None,
4.4,
]
assert to_list(
ak._v2.highlevel.Array([None, 2.2, 3.3, None, 4.4], check_valid=True)
) == [
None,
2.2,
3.3,
None,
4.4,
]
assert to_list(
ak._v2.highlevel.Array([[1.1, 2.2, 3.3], [], [None, 4.4]], check_valid=True)
) == [[1.1, 2.2, 3.3], [], [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
[[1.1, 2.2, 3.3], [], None, [None, 4.4]], check_valid=True
)
) == [[1.1, 2.2, 3.3], [], None, [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
[[1.1, 2.2, 3.3], None, [], [None, 4.4]], check_valid=True
)
) == [[1.1, 2.2, 3.3], None, [], [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
[[1.1, 2.2, 3.3], None, [], [None, 4.4]], check_valid=True
)
) != [[1.1, 2.2, 3.3], [], None, [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
[[None, 1.1, 2.2, 3.3], [], [None, 4.4]], check_valid=True
)
) == [[None, 1.1, 2.2, 3.3], [], [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
[[None, 1.1, 2.2, 3.3], [], None, [None, 4.4]], check_valid=True
)
) == [[None, 1.1, 2.2, 3.3], [], None, [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
[[None, 1.1, 2.2, 3.3], None, [], [None, 4.4]], check_valid=True
)
) == [[None, 1.1, 2.2, 3.3], None, [], [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
[[None, 1.1, 2.2, 3.3], None, [], [None, 4.4]], check_valid=True
)
) != [[None, 1.1, 2.2, 3.3], [], None, [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
[None, [1.1, 2.2, 3.3], [], [None, 4.4]], check_valid=True
)
) == [None, [1.1, 2.2, 3.3], [], [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
[None, [1.1, 2.2, 3.3], [], None, [None, 4.4]], check_valid=True
)
) == [None, [1.1, 2.2, 3.3], [], None, [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
[None, [1.1, 2.2, 3.3], None, [], [None, 4.4]], check_valid=True
)
) == [None, [1.1, 2.2, 3.3], None, [], [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
[None, [1.1, 2.2, 3.3], None, [], [None, 4.4]], check_valid=True
)
) != [None, [1.1, 2.2, 3.3], [], None, [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array([None, None, None, None, None], check_valid=True)
) == [
None,
None,
None,
None,
None,
]
assert to_list(
ak._v2.highlevel.Array([[None, None, None], [], [None, None]], check_valid=True)
) == [[None, None, None], [], [None, None]]
def test_json():
assert to_list(
ak._v2.highlevel.Array("[1.1, 2.2, 3.3, null, 4.4]", check_valid=True)
) == [
1.1,
2.2,
3.3,
None,
4.4,
]
assert to_list(
ak._v2.highlevel.Array("[null, 2.2, 3.3, null, 4.4]", check_valid=True)
) == [
None,
2.2,
3.3,
None,
4.4,
]
assert to_list(
ak._v2.highlevel.Array("[[1.1, 2.2, 3.3], [], [null, 4.4]]", check_valid=True)
) == [[1.1, 2.2, 3.3], [], [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
"[[1.1, 2.2, 3.3], [], null, [null, 4.4]]", check_valid=True
)
) == [[1.1, 2.2, 3.3], [], None, [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
"[[1.1, 2.2, 3.3], null, [], [null, 4.4]]", check_valid=True
)
) == [[1.1, 2.2, 3.3], None, [], [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
"[[1.1, 2.2, 3.3], null, [], [null, 4.4]]", check_valid=True
)
) != [[1.1, 2.2, 3.3], [], None, [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
"[[null, 1.1, 2.2, 3.3], [], [null, 4.4]]", check_valid=True
)
) == [[None, 1.1, 2.2, 3.3], [], [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
"[[null, 1.1, 2.2, 3.3], [], null, [null, 4.4]]", check_valid=True
)
) == [[None, 1.1, 2.2, 3.3], [], None, [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
"[[null, 1.1, 2.2, 3.3], null, [], [null, 4.4]]", check_valid=True
)
) == [[None, 1.1, 2.2, 3.3], None, [], [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
"[[null, 1.1, 2.2, 3.3], null, [], [null, 4.4]]", check_valid=True
)
) != [[None, 1.1, 2.2, 3.3], [], None, [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
"[null, [1.1, 2.2, 3.3], [], [null, 4.4]]", check_valid=True
)
) == [None, [1.1, 2.2, 3.3], [], [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
"[null, [1.1, 2.2, 3.3], [], null, [null, 4.4]]", check_valid=True
)
) == [None, [1.1, 2.2, 3.3], [], None, [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
"[null, [1.1, 2.2, 3.3], null, [], [null, 4.4]]", check_valid=True
)
) == [None, [1.1, 2.2, 3.3], None, [], [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array(
"[null, [1.1, 2.2, 3.3], null, [], [null, 4.4]]", check_valid=True
)
) != [None, [1.1, 2.2, 3.3], [], None, [None, 4.4]]
assert to_list(
ak._v2.highlevel.Array("[null, null, null, null, null]", check_valid=True)
) == [
None,
None,
None,
None,
None,
]
assert to_list(
ak._v2.highlevel.Array(
"[[null, null, null], [], [null, null]]", check_valid=True
)
) == [[None, None, None], [], [None, None]]
```
#### File: tests/v2/test_0674-categorical-validation.py
```python
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
pyarrow = pytest.importorskip("pyarrow")
def test_categorical_is_valid():
# validate a categorical array by its content
arr = ak._v2.Array([2019, 2020, 2021, 2020, 2019])
categorical = ak._v2.behaviors.categorical.to_categorical(arr)
assert ak._v2.operations.is_valid(categorical)
def test_optional_categorical_from_arrow():
# construct categorical array from option-typed DictionaryArray
indices = pyarrow.array([0, 1, 0, 1, 2, 0, 2])
nan_indices = pyarrow.array([0, 1, 0, 1, 2, None, 0, 2])
dictionary = pyarrow.array([2019, 2020, 2021])
dict_array = pyarrow.DictionaryArray.from_arrays(indices, dictionary)
categorical_array = ak._v2.operations.from_arrow(dict_array)
assert categorical_array.layout.parameter("__array__") == "categorical"
option_dict_array = pyarrow.DictionaryArray.from_arrays(nan_indices, dictionary)
option_categorical_array = ak._v2.operations.from_arrow(option_dict_array)
assert option_categorical_array.layout.parameter("__array__") == "categorical"
def test_categorical_from_arrow_ChunkedArray():
indices = [0, 1, 0, 1, 2, 0, 2]
indices_new_schema = [0, 1, 0, 1, 0]
dictionary = pyarrow.array([2019, 2020, 2021])
dictionary_new_schema = pyarrow.array([2019, 2020])
dict_array = pyarrow.DictionaryArray.from_arrays(pyarrow.array(indices), dictionary)
dict_array_new_schema = pyarrow.DictionaryArray.from_arrays(
pyarrow.array(indices_new_schema), dictionary_new_schema
)
batch = pyarrow.RecordBatch.from_arrays([dict_array], ["year"])
batch_new_schema = pyarrow.RecordBatch.from_arrays(
[dict_array_new_schema], ["year"]
)
batches = [batch] * 3
batches_mixed_schema = [batch] + [batch_new_schema]
table = pyarrow.Table.from_batches(batches)
table_mixed_schema = pyarrow.Table.from_batches(batches_mixed_schema)
array = ak._v2.operations.from_arrow(table)
array_mixed_schema = ak._v2.operations.from_arrow(table_mixed_schema)
assert np.asarray(array.layout.contents[0].index).tolist() == indices * 3
assert (
np.asarray(array_mixed_schema.layout.contents[0].index).tolist()
== indices + indices_new_schema
)
```
#### File: tests/v2/test_0835-datetime-type-pyarrow.py
```python
import datetime
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
pyarrow = pytest.importorskip("pyarrow")
to_list = ak._v2.operations.to_list
def test_from_arrow():
import awkward._v2._connect.pyarrow
array = awkward._v2._connect.pyarrow.handle_arrow(
pyarrow.array(
[datetime.datetime(2002, 1, 23), datetime.datetime(2019, 2, 20)],
type=pyarrow.date64(),
)
)
assert to_list(array) == [
np.datetime64("2002-01-23T00:00:00.000"),
np.datetime64("2019-02-20T00:00:00.000"),
]
array = awkward._v2._connect.pyarrow.handle_arrow(
pyarrow.array(
[datetime.datetime(2002, 1, 23), datetime.datetime(2019, 2, 20)],
type=pyarrow.date32(),
)
)
assert to_list(array) == [
np.datetime64("2002-01-23"),
np.datetime64("2019-02-20"),
]
array = awkward._v2._connect.pyarrow.handle_arrow(
pyarrow.array(
[datetime.time(1, 0, 0), datetime.time(2, 30, 0)],
type=pyarrow.time64("us"),
)
)
assert to_list(array) == [
np.datetime64("1970-01-01T01:00:00.000"),
np.datetime64("1970-01-01T02:30:00.000"),
]
array = awkward._v2._connect.pyarrow.handle_arrow(
pyarrow.array(
[datetime.time(1, 0, 0), datetime.time(2, 30, 0)],
type=pyarrow.time64("ns"),
)
)
assert to_list(array) == [3600000000000, 9000000000000]
array = awkward._v2._connect.pyarrow.handle_arrow(
pyarrow.array(
[datetime.time(1, 0, 0), datetime.time(2, 30, 0)],
type=pyarrow.time32("s"),
)
)
assert to_list(array) == [
np.datetime64("1970-01-01T01:00:00.000"),
np.datetime64("1970-01-01T02:30:00.000"),
]
array = awkward._v2._connect.pyarrow.handle_arrow(
pyarrow.array(
[datetime.time(1, 0, 0), datetime.time(2, 30, 0)],
type=pyarrow.time32("ms"),
)
)
assert to_list(array) == [
np.datetime64("1970-01-01T01:00:00.000"),
np.datetime64("1970-01-01T02:30:00.000"),
]
array = awkward._v2._connect.pyarrow.handle_arrow(
pyarrow.array(
[datetime.datetime(2002, 1, 23), datetime.datetime(2019, 2, 20)],
type=pyarrow.timestamp("s"),
)
)
assert to_list(array) == [
np.datetime64("2002-01-23T00:00:00.000"),
np.datetime64("2019-02-20T00:00:00.000"),
]
array = awkward._v2._connect.pyarrow.handle_arrow(
pyarrow.array(
[datetime.datetime(2002, 1, 23), datetime.datetime(2019, 2, 20)],
type=pyarrow.timestamp("ms"),
)
)
assert to_list(array) == [
np.datetime64("2002-01-23T00:00:00.000"),
np.datetime64("2019-02-20T00:00:00.000"),
]
array = awkward._v2._connect.pyarrow.handle_arrow(
pyarrow.array(
[datetime.datetime(2002, 1, 23), datetime.datetime(2019, 2, 20)],
type=pyarrow.timestamp("us"),
)
)
assert to_list(array) == [
np.datetime64("2002-01-23T00:00:00.000"),
np.datetime64("2019-02-20T00:00:00.000"),
]
array = awkward._v2._connect.pyarrow.handle_arrow(
pyarrow.array(
[datetime.datetime(2002, 1, 23), datetime.datetime(2019, 2, 20)],
type=pyarrow.timestamp("ns"),
)
)
assert to_list(array) == [1011744000000000000, 1550620800000000000]
array = awkward._v2._connect.pyarrow.handle_arrow(
pyarrow.array(
[datetime.timedelta(5), datetime.timedelta(10)],
type=pyarrow.duration("s"),
)
)
assert to_list(array) == [
np.timedelta64(5, "D"),
np.timedelta64(10, "D"),
]
array = awkward._v2._connect.pyarrow.handle_arrow(
pyarrow.array(
[datetime.timedelta(5), datetime.timedelta(10)],
type=pyarrow.duration("ms"),
)
)
assert to_list(array) == [
np.timedelta64(5, "D"),
np.timedelta64(10, "D"),
]
array = awkward._v2._connect.pyarrow.handle_arrow(
pyarrow.array(
[datetime.timedelta(5), datetime.timedelta(10)],
type=pyarrow.duration("us"),
)
)
assert to_list(array) == [
np.timedelta64(5, "D"),
np.timedelta64(10, "D"),
]
array = awkward._v2._connect.pyarrow.handle_arrow(
pyarrow.array(
[datetime.timedelta(5), datetime.timedelta(10)],
type=pyarrow.duration("ns"),
)
)
assert to_list(array) == [432000000000000, 864000000000000]
```
#### File: tests/v2/test_1055-fill_none-numpy-dimension.py
```python
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test():
a = ak._v2.operations.values_astype(ak.Array([1, None]), np.float32)
assert ak._v2.operations.fill_none(a, np.float32(0)).tolist() == [1, 0]
assert str(ak._v2.operations.fill_none(a, np.float32(0)).type) == "2 * float32"
assert ak._v2.operations.fill_none(a, np.array(0, np.float32)).tolist() == [1, 0]
assert (
str(ak._v2.operations.fill_none(a, np.array(0, np.float32)).type)
== "2 * float32"
)
assert ak._v2.operations.fill_none(a, np.array([0], np.float32)).tolist() == [
1,
[0],
]
assert (
str(ak._v2.operations.fill_none(a, np.array([0], np.float32)).type)
== "2 * union[float32, 1 * float32]"
)
assert ak._v2.operations.fill_none(a, np.array([[0]], np.float32)).tolist() == [
1,
[[0]],
]
assert (
str(ak._v2.operations.fill_none(a, np.array([[0]], np.float32)).type)
== "2 * union[float32, 1 * 1 * float32]"
)
assert ak._v2.operations.fill_none(a, 0).tolist() == [1, 0]
assert str(ak._v2.operations.fill_none(a, 0).type) == "2 * float64"
assert ak._v2.operations.fill_none(a, [0]).tolist() == [1, [0]]
assert (
str(ak._v2.operations.fill_none(a, [0]).type) == "2 * union[float32, 1 * int64]"
)
assert ak._v2.operations.fill_none(a, [[0]]).tolist() == [1, [[0]]]
assert (
str(ak._v2.operations.fill_none(a, [[0]]).type)
== "2 * union[float32, 1 * var * int64]"
)
```
#### File: tests/v2/test_1136-regulararray-zeros-in-shape.py
```python
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test_toRegularArray():
assert (
str(
ak._v2.operations.type(
ak._v2.highlevel.Array(
ak._v2.operations.from_numpy(
np.empty((2, 3, 5, 7, 11), np.float64)
).layout.toRegularArray()
)
)
)
== "2 * 3 * 5 * 7 * 11 * float64"
)
assert (
str(
ak._v2.operations.type(
ak._v2.highlevel.Array(
ak._v2.operations.from_numpy(
np.empty((2, 3, 5, 7, 11, 0), np.float64)
).layout.toRegularArray()
)
)
)
== "2 * 3 * 5 * 7 * 11 * 0 * float64"
)
assert (
str(
ak._v2.operations.type(
ak._v2.highlevel.Array(
ak._v2.operations.from_numpy(
np.empty((2, 3, 5, 7, 0, 11), np.float64)
).layout.toRegularArray()
)
)
)
== "2 * 3 * 5 * 7 * 0 * 11 * float64"
)
assert (
str(
ak._v2.operations.type(
ak._v2.highlevel.Array(
ak._v2.operations.from_numpy(
np.empty((2, 3, 5, 0, 7, 11), np.float64)
).layout.toRegularArray()
)
)
)
== "2 * 3 * 5 * 0 * 7 * 11 * float64"
)
assert (
str(
ak._v2.operations.type(
ak._v2.highlevel.Array(
ak._v2.operations.from_numpy(
np.empty((2, 3, 0, 5, 7, 11), np.float64)
).layout.toRegularArray()
)
)
)
== "2 * 3 * 0 * 5 * 7 * 11 * float64"
)
assert (
str(
ak._v2.operations.type(
ak._v2.highlevel.Array(
ak._v2.operations.from_numpy(
np.empty((2, 0, 3, 5, 7, 11), np.float64)
).layout.toRegularArray()
)
)
)
== "2 * 0 * 3 * 5 * 7 * 11 * float64"
)
assert (
str(
ak._v2.operations.type(
ak._v2.highlevel.Array(
ak._v2.operations.from_numpy(
np.empty((0, 2, 3, 5, 7, 11), np.float64)
).layout.toRegularArray()
)
)
)
== "0 * 2 * 3 * 5 * 7 * 11 * float64"
)
assert (
str(
ak._v2.operations.type(
ak._v2.highlevel.Array(
ak._v2.operations.from_numpy(
np.empty((2, 3, 5, 7, 0, 11, 0), np.float64)
).layout.toRegularArray()
)
)
)
== "2 * 3 * 5 * 7 * 0 * 11 * 0 * float64"
)
assert (
str(
ak._v2.operations.type(
ak._v2.highlevel.Array(
ak._v2.operations.from_numpy(
np.empty((2, 3, 5, 0, 7, 11, 0), np.float64)
).layout.toRegularArray()
)
)
)
== "2 * 3 * 5 * 0 * 7 * 11 * 0 * float64"
)
assert (
str(
ak._v2.operations.type(
ak._v2.highlevel.Array(
ak._v2.operations.from_numpy(
np.empty((2, 3, 0, 5, 7, 0, 11), np.float64)
).layout.toRegularArray()
)
)
)
== "2 * 3 * 0 * 5 * 7 * 0 * 11 * float64"
)
assert (
str(
ak._v2.operations.type(
ak._v2.highlevel.Array(
ak._v2.operations.from_numpy(
np.empty((2, 0, 3, 5, 7, 0, 11), np.float64)
).layout.toRegularArray()
)
)
)
== "2 * 0 * 3 * 5 * 7 * 0 * 11 * float64"
)
assert (
str(
ak._v2.operations.type(
ak._v2.highlevel.Array(
ak._v2.operations.from_numpy(
np.empty((0, 2, 3, 5, 7, 0, 11), np.float64)
).layout.toRegularArray()
)
)
)
== "0 * 2 * 3 * 5 * 7 * 0 * 11 * float64"
)
def test_actual():
x = ak._v2.operations.from_numpy(
np.arange(2 * 3 * 4, dtype=np.int64).reshape(2, 3, 4)
)
s = x[..., :0]
result = ak._v2.operations.zip({"q": s, "t": s})
assert str(ak._v2.operations.type(result)) == "2 * 3 * 0 * {q: int64, t: int64}"
```
#### File: tests/v2/test_1149-datetime-sort.py
```python
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
import datetime
to_list = ak._v2.operations.to_list
def test_date_time():
numpy_array = np.array(
["2020-07-27T10:41:11", "2019-01-01", "2020-01-01"], "datetime64[s]"
)
array = ak._v2.contents.NumpyArray(numpy_array)
assert str(array.form.type) == "datetime64[s]"
assert to_list(array) == [
np.datetime64("2020-07-27T10:41:11"),
np.datetime64("2019-01-01T00:00:00"),
np.datetime64("2020-01-01T00:00:00"),
]
for i in range(len(array)):
assert array[i] == numpy_array[i]
date_time = np.datetime64("2020-07-27T10:41:11.200000011", "us")
array1 = ak._v2.contents.NumpyArray(
np.array(["2020-07-27T10:41:11.200000011"], "datetime64[us]")
)
assert np.datetime64(array1[0], "us") == date_time
assert to_list(ak._v2.operations.from_iter(array1)) == [
np.datetime64("2020-07-27T10:41:11.200000")
]
def test_date_time_sort_argsort_unique():
numpy_array = np.array(
["2020-07-27T10:41:11", "2019-01-01", "2020-01-01"], "datetime64[s]"
)
array = ak._v2.contents.NumpyArray(numpy_array)
assert to_list(array.sort()) == [
datetime.datetime(2019, 1, 1, 0, 0),
datetime.datetime(2020, 1, 1, 0, 0),
datetime.datetime(2020, 7, 27, 10, 41, 11),
]
assert to_list(array.argsort()) == [1, 2, 0]
assert array.is_unique() is True
assert to_list(array.unique()) == [
datetime.datetime(2019, 1, 1, 0, 0),
datetime.datetime(2020, 1, 1, 0, 0),
datetime.datetime(2020, 7, 27, 10, 41, 11),
]
def test_time_delta_sort_argsort_unique():
numpy_array = np.array(["41", "1", "20"], "timedelta64[D]")
array = ak._v2.highlevel.Array(numpy_array).layout
assert str(array.form.type) == "timedelta64[D]"
assert to_list(array) == [
np.timedelta64("41", "D"),
np.timedelta64("1", "D"),
np.timedelta64("20", "D"),
]
assert to_list(array.sort()) == [
datetime.timedelta(days=1),
datetime.timedelta(days=20),
datetime.timedelta(days=41),
]
assert to_list(array.argsort()) == [1, 2, 0]
assert array.is_unique() is True
assert to_list(array.unique()) == [
datetime.timedelta(days=1),
datetime.timedelta(days=20),
datetime.timedelta(days=41),
]
```
#### File: tests/v2/test_1154-arrow-tables-should-preserve-parameters.py
```python
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
pytest.importorskip("pyarrow")
def test():
a = ak._v2.highlevel.Array(
ak._v2.contents.RecordArray(
[
ak._v2.contents.NumpyArray(np.array([1.1, 2.2, 3.3])),
ak._v2.contents.NumpyArray(np.array([1, 2, 3])),
],
["x", "y"],
parameters={"__record__": "Hello"},
)
)
assert ak._v2.operations.from_arrow(
ak._v2.operations.to_arrow_table(a)
).type.content.parameters == {"__record__": "Hello"}
```
#### File: tests/v2/test_1189-fix-singletons-for-non-optional-data.py
```python
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test():
ak_array = ak._v2.Array([1, 2, 3])
assert ak._v2.operations.singletons(ak_array).tolist() == [[1], [2], [3]]
# FIXME: action to be taken on deciding between [None] [] ; see Issue #983
def test2():
a = ak._v2.Array([[3, 1, 2], [4, 5], []])
assert ak._v2.operations.argmin(a, axis=1, keepdims=True).tolist() == [
[1],
[0],
[None],
]
assert ak._v2.operations.singletons(
ak._v2.operations.argmin(a, axis=1)
).tolist() == [[1], [0], []]
assert a[ak._v2.operations.argmin(a, axis=1, keepdims=True)].tolist() == [
[1],
[4],
[None],
]
assert a[
ak._v2.operations.singletons(ak._v2.operations.argmin(a, axis=1))
].tolist() == [[1], [4], []]
def test_numpyarray():
a = ak._v2.contents.NumpyArray(np.arange(12).reshape(4, 3))
assert ak._v2.operations.singletons(a).tolist() == [
[[0], [1], [2]],
[[3], [4], [5]],
[[6], [7], [8]],
[[9], [10], [11]],
]
def test_empyarray():
e = ak._v2.contents.EmptyArray()
assert ak._v2.operations.singletons(e).tolist() == []
``` |
{
"source": "jpivarski/cassius",
"score": 3
} |
#### File: cassius/applications/TreePlotter.py
```python
import numpy
from cassius import *
from cassius.color import lighten
def dataFileName(config):
def finddata(element):
if element.tag == "data" and "file" in element.attrib:
return element.attrib["file"]
else:
for child in element:
search = finddata(child)
if search is not None: return search
return None
return finddata(config.getroot())
def categoryLabel(pmml):
for child in pmml.find("TreeModel").find("MiningSchema"):
if child.tag == "MiningField" and child.attrib.get("usageType") == "predicted":
return child.attrib["name"]
class Rect:
def __init__(self, xmin, xmax, ymin, ymax):
self.xmin, self.xmax, self.ymin, self.ymax = xmin, xmax, ymin, ymax
def __repr__(self):
xmin, xmax, ymin, ymax = self.xmin, self.xmax, self.ymin, self.ymax
if xmin is None: xmin = "-oo"
else: xmin = "%g" % xmin
if xmax is None: xmax = "oo"
else: xmax = "%g" % xmax
if ymin is None: ymin = "-oo"
else: ymin = "%g" % ymin
if ymax is None: ymax = "oo"
else: ymax = "%g" % ymax
return "Rect(%s to %s, %s to %s)" % (xmin, xmax, ymin, ymax)
def intersect(self, other):
if self.xmin is None:
xmin = other.xmin
elif other.xmin is None:
xmin = self.xmin
else:
xmin = max(self.xmin, other.xmin)
if self.xmax is None:
xmax = other.xmax
elif other.xmax is None:
xmax = self.xmax
else:
xmax = min(self.xmax, other.xmax)
if self.ymin is None:
ymin = other.ymin
elif other.ymin is None:
ymin = self.ymin
else:
ymin = max(self.ymin, other.ymin)
if self.ymax is None:
ymax = other.ymax
elif other.ymax is None:
ymax = self.ymax
else:
ymax = min(self.ymax, other.ymax)
return Rect(xmin, xmax, ymin, ymax)
def path(self):
xmin, xmax, ymin, ymax = self.xmin, self.xmax, self.ymin, self.ymax
if xmin is None: xmin = -Infinity
if xmax is None: xmax = Infinity
if ymin is None: ymin = -Infinity
if ymax is None: ymax = Infinity
return [MoveTo(xmin, ymin), EdgeTo(xmax, ymin), EdgeTo(xmax, ymax), EdgeTo(xmin, ymax), ClosePolygon()]
def describeTree(pmml, featureX, featureY, feature_means, categories):
regions = {}
for category in categories:
regions[category] = []
def recurse(node, rect):
satisfies_others = True
xmin, xmax, ymin, ymax = None, None, None, None
predicate = node.find("SimplePredicate")
if predicate is not None:
if predicate.attrib["field"] == featureX:
if predicate.attrib["operator"] == "lessOrEqual":
xmax = float(predicate.attrib["value"])
elif predicate.attrib["operator"] == "greaterThan":
xmin = float(predicate.attrib["value"])
elif predicate.attrib["field"] == featureY:
if predicate.attrib["operator"] == "lessOrEqual":
ymax = float(predicate.attrib["value"])
elif predicate.attrib["operator"] == "greaterThan":
ymin = float(predicate.attrib["value"])
elif predicate.attrib["field"] in feature_means:
mean = feature_means[predicate.attrib["field"]]
cut = float(predicate.attrib["value"])
if predicate.attrib["operator"] == "lessThan": satisfies_others = satisfies_others and mean < cut
if predicate.attrib["operator"] == "lessOrEqual": satisfies_others = satisfies_others and mean <= cut
if predicate.attrib["operator"] == "greaterThan": satisfies_others = satisfies_others and mean > cut
if predicate.attrib["operator"] == "greaterOrEqual": satisfies_others = satisfies_others and mean >= cut
if satisfies_others:
rect = rect.intersect(Rect(xmin, xmax, ymin, ymax))
if node.find("Node") is None:
category = node.attrib["score"]
regions[category].append(rect)
else:
for child in node:
if child.tag == "Node":
recurse(child, rect)
recurse(pmml.find("TreeModel").find("Node"), Rect(None, None, None, None))
return regions
def findFeatures(pmml):
optype = {}
for child in pmml.find("DataDictionary"):
if child.tag == "DataField":
optype[child.attrib["name"]] = child.attrib["optype"]
continuous_features, categorical_features = [], []
for child in pmml.find("TreeModel").find("MiningSchema"):
if child.tag == "MiningField" and child.attrib.get("usageType") != "predicted":
if optype[child.attrib["name"]] == "continuous":
continuous_features.append(child.attrib["name"])
elif optype[child.attrib["name"]] == "categorical":
categorical_features.append(child.attrib["name"])
return continuous_features, categorical_features
def all_categories(data, category_label):
return numpy.unique(data.field(category_label))
def ranges_mask(data, ranges):
ranges_mask = numpy.ones(len(data), dtype=numpy.bool)
for feature, low, high in ranges:
onemask = numpy.logical_and(low < data.field(feature), data.field(feature) < high)
numpy.logical_and(ranges_mask, onemask, ranges_mask)
return ranges_mask
def means(data, continuous_features, ranges_mask):
output = {}
for feature in continuous_features:
selected_data = data.field(feature)[ranges_mask]
if len(selected_data) > 0:
output[feature] = numpy.sum(selected_data)/float(len(selected_data))
else:
output[feature] = None
return output
def scatterplots(data, featureX, featureY, ranges_mask, categories, category_colors, category_markers, category_label="CATEGORY", markeroutline="black"):
plots = []
for category in categories:
mask = numpy.logical_and(ranges_mask, data.field(category_label) == category)
scatter = Scatter(x=data.field(featureX)[mask], y=data.field(featureY)[mask], limit=100, marker=category_markers[category], markercolor=category_colors[category], markeroutline=markeroutline)
plots.append(scatter)
return Overlay(*plots, xlabel=featureX, ylabel=featureY)
def regionplots(pmml, featureX, featureY, feature_means, categories, category_colors, lightening=3.):
regions = describeTree(pmml, featureX, featureY, feature_means, categories)
plots = []
for category, rects in regions.items():
plot = Region(*sum([i.path() for i in rects], []), fillcolor=lighten(category_colors[category], lightening))
plots.append(plot)
return Overlay(*plots, xlabel=featureX, ylabel=featureY)
def regionlegend(categories, category_colors, category_markers, featureX, featureY, continuous_features, feature_means):
legend_data = []
for category in categories:
legend_data.append([category, Style(marker=category_markers[category],
markercolor=category_colors[category],
markeroutline="black",
fillcolor=lighten(category_colors[category], 3.))])
for feature in continuous_features:
if feature != featureX and feature != featureY:
legend_data.append([feature, str_sigfigs(feature_means[feature], 2)])
return Legend(legend_data, justify="lc", colwid=[0.8, 0.2])
#######################################################
# for testing
#######################################################
# import xml.etree.ElementTree
# from augustus.kernel.unitable import UniTable
# from cassius.backends.svg import view
# data = UniTable().load(dataFileName(xml.etree.ElementTree.ElementTree(file="tree_iris_config_3.xml")))
# pmml = xml.etree.ElementTree.ElementTree(file="iris_binarytree_3.pmml")
# continuous_features, categorical_features = findFeatures(pmml)
# category_label = categoryLabel(pmml)
# categories = all_categories(data, category_label)
# category_colors = dict(zip(categories, ["red", "blue", "green", "purple", "yellow", "gray", "fuchsia", "white"]))
# category_markers = dict(zip(categories, ["circle", "circle", "circle", "circle", "circle", "circle", "circle", "circle"]))
# ranges = ranges_mask(data, [])
# feature_means = means(data, continuous_features, ranges)
# scatterplot = scatterplots(data=data,
# featureX="SEPAL_LENGTH",
# featureY="SEPAL_WIDTH",
# ranges_mask=ranges,
# categories=categories,
# category_colors=category_colors,
# category_markers=category_markers,
# category_label=category_label,
# markeroutline="black",
# )
# regionplot = regionplots(pmml, "SEPAL_LENGTH", "SEPAL_WIDTH", feature_means, categories, category_colors)
# legend = regionlegend(categories, category_colors, category_markers, "SEPAL_LENGTH", "SEPAL_WIDTH", continuous_features, feature_means)
# view(Overlay(regionplot, scatterplot, legend, frame=-2))
```
#### File: cassius/trunk/setup.py
```python
from distutils.core import setup, Extension
from distutils.command.build_ext import build_ext
import os
from cassius import __version__
class my_build_ext(build_ext):
def build_extension(self, extension):
try:
build_ext.build_extension(self, extension)
except:
print "*******************************************************************************************"
print
print " Could not build _svgview; the \"view(object)\" function will not work."
print " Use \"draw(object, fileName='...')\" instead, or get the dependencies:"
print
print " sudo apt-get install python-dev libgtk2.0-dev libglib2.0-dev librsvg2-dev libcairo2-dev"
print
print "*******************************************************************************************"
def viewer_pkgconfig():
def drop_whitespace(word): return word != "\n" and word != ""
return filter(drop_whitespace, os.popen("pkg-config --cflags --libs gtk+-2.0 gthread-2.0 librsvg-2.0").read().split(" "))
viewer_extension = Extension(os.path.join("cassius", "_svgview"),
[os.path.join("cassius", "_svgview.c")], {},
libraries=["cairo", "rsvg-2"],
extra_compile_args=viewer_pkgconfig(),
extra_link_args=viewer_pkgconfig())
setup(name="cassius",
version=__version__,
description="Cassius the Plotter",
author="Open Data Group",
author_email="<EMAIL>",
url="http://code.google.com/p/cassius/",
packages=["cassius", "cassius.applications", "cassius.backends"],
# scripts=["applications/CassiusScorePlane"],
cmdclass={"build_ext": my_build_ext},
ext_modules=[viewer_extension],
)
``` |
{
"source": "jpivarski/doremi",
"score": 2
} |
#### File: src/doremi/abstract.py
```python
from fractions import Fraction
from dataclasses import dataclass, field
from typing import List, Tuple, Dict, Optional, Union, Generator
import lark
import doremi.parsing
def is_rest(word: str) -> bool:
return all(x == "_" for x in word)
@dataclass
class AbstractNote:
start: float
stop: float
word: "Word"
emphasis: int = field(default=0)
octave: int = field(default=0)
augmentations: Tuple["Augmentation"] = field(default=())
def copy(self) -> "AbstractNote":
return AbstractNote(
self.start,
self.stop,
self.word,
self.emphasis,
self.octave,
self.augmentations,
)
def inplace_shift(self, shift: float) -> None:
self.start += shift
self.stop += shift
def inplace_scale(self, scale: float) -> None:
self.start *= scale
self.stop *= scale
@dataclass
class Scope:
symbols: Dict[lark.lexer.Token, "NamedPassage"]
def has(self, symbol: lark.lexer.Token) -> bool:
return symbol in self.symbols
def get(self, symbol: lark.lexer.Token) -> Optional["NamedPassage"]:
return self.symbols.get(symbol)
def add(self, passage: "NamedPassage"):
self.symbols[passage.assignment.function.val] = passage
@dataclass
class SubScope(Scope):
parent: Scope
def has(self, symbol: lark.lexer.Token) -> bool:
if symbol in self.symbols:
return True
else:
return self.parent.has(symbol)
def get(self, symbol: lark.lexer.Token) -> Optional["NamedPassage"]:
out = self.symbols.get(symbol)
if out is not None:
return out
else:
return self.parent.get(symbol)
class AST:
pass
class Expression(AST):
pass
@dataclass
class Word(Expression):
val: lark.lexer.Token
@dataclass
class Call(Expression):
function: Word
args: List[Expression]
parsingtree: Optional[lark.tree.Tree] = field(
default=None, repr=False, compare=False, hash=False
)
class Augmentation(AST):
pass
@dataclass
class AugmentStep(Augmentation):
amount: int
parsingtree: Optional[lark.tree.Tree] = field(
default=None, repr=False, compare=False, hash=False
)
@dataclass
class AugmentDegree(Augmentation):
amount: int
parsingtree: Optional[lark.tree.Tree] = field(
default=None, repr=False, compare=False, hash=False
)
@dataclass
class AugmentRatio(Augmentation):
amount: Fraction
parsingtree: Optional[lark.tree.Tree] = field(
default=None, repr=False, compare=False, hash=False
)
@dataclass
class Duration(AST):
amount: Fraction
is_scaling: bool
parsingtree: Optional[lark.tree.Tree] = field(
default=None, repr=False, compare=False, hash=False
)
@dataclass
class Modified(AST):
expression: Union[Expression, List[Expression]]
emphasis: int
absolute: int
octave: int
augmentation: Augmentation
duration: Optional[Duration]
repetition: int
parsingtree: Optional[lark.tree.Tree] = field(
default=None, repr=False, compare=False, hash=False
)
@dataclass
class Line(AST):
modified: List[Modified]
parsingtree: Optional[lark.tree.Tree] = field(
default=None, repr=False, compare=False, hash=False
)
@dataclass
class Assignment(AST):
function: Word
args: List[Word]
parsingtree: Optional[lark.tree.Tree] = field(
default=None, repr=False, compare=False, hash=False
)
class Passage(AST):
pass
@dataclass
class NamedPassage(Passage):
assignment: Assignment
lines: List[Line]
parsingtree: Optional[lark.tree.Tree] = field(
default=None, repr=False, compare=False, hash=False
)
@dataclass
class UnnamedPassage(Passage):
lines: List[Line]
parsingtree: Optional[lark.tree.Tree] = field(
default=None, repr=False, compare=False, hash=False
)
def evaluate(
node: Union[list, Word, Call, Modified, Line, Passage],
scope: Scope,
emphasis: int,
octave: int,
augmentations: Tuple[Augmentation],
breadcrumbs: Tuple[str],
) -> Tuple[float, List[AbstractNote]]:
if isinstance(node, list):
last_stop = 0.0
all_notes = []
for subnode in node:
duration, notes = evaluate(
subnode, scope, emphasis, octave, augmentations, breadcrumbs
)
for note in notes:
note.inplace_shift(last_stop)
all_notes.extend(notes)
last_stop += duration
return last_stop, all_notes
elif isinstance(node, Word):
if scope.has(node.val):
return evaluate(
Call(node, []), scope, emphasis, octave, augmentations, breadcrumbs
)
elif is_rest(node.val):
return float(len(node.val)), []
else:
note = AbstractNote(
0.0,
1.0,
node,
emphasis,
octave,
augmentations,
)
return 1.0, [note]
elif isinstance(node, Call):
if node.function.val in breadcrumbs:
raise RecursiveFunction(node.function.val)
namedpassage = scope.get(node.function.val)
if namedpassage is None:
raise UndefinedSymbol(node.function.val)
parameters = namedpassage.assignment.args
arguments = node.args
if len(parameters) != len(arguments):
raise MismatchingArguments(node.function.val)
subscope = SubScope(
{
param.val: NamedPassage(Assignment(param, []), [arg])
for param, arg in zip(parameters, arguments)
},
scope,
)
breadcrumbs = breadcrumbs + (node.function.val,)
return evaluate(
namedpassage, subscope, emphasis, octave, augmentations, breadcrumbs
)
elif isinstance(node, Modified):
if node.absolute > 0:
augmentations = augmentations[: -node.absolute]
if node.augmentation is not None:
augmentations = augmentations + (node.augmentation,)
if isinstance(node.expression, Expression):
natural_duration, notes = evaluate(
node.expression,
scope,
emphasis + node.emphasis,
octave + node.octave,
augmentations,
breadcrumbs,
)
else:
natural_duration, notes = evaluate(
node.expression,
scope,
emphasis + node.emphasis,
octave + node.octave,
augmentations,
breadcrumbs,
)
if node.duration is not None:
if node.duration.is_scaling:
factor = float(node.duration.amount)
natural_duration = natural_duration * factor
else:
factor = float(node.duration.amount) / natural_duration
natural_duration = float(node.duration.amount)
for note in notes:
note.inplace_scale(factor)
if node.repetition == 1:
duration = natural_duration
else:
all_notes = list(notes)
for i in range(1, node.repetition):
new_notes = [x.copy() for x in notes]
for note in new_notes:
note.inplace_shift(i * natural_duration)
all_notes.extend(new_notes)
duration = node.repetition * natural_duration
notes = all_notes
return duration, notes
elif isinstance(node, Line):
return evaluate(
node.modified, scope, emphasis, octave, augmentations, breadcrumbs
)
elif isinstance(node, Passage):
max_duration = 0.0
all_notes = []
for line in node.lines:
duration, notes = evaluate(
line, scope, emphasis, octave, augmentations, breadcrumbs
)
all_notes.extend(notes)
if max_duration < duration:
max_duration = duration
return max_duration, all_notes
else:
raise AssertionError(repr(node))
@dataclass
class Collection(AST):
passages: List[Passage]
comments: Optional[List[lark.lexer.Token]] = field(
default=None, repr=False, compare=False, hash=False
)
parsingtree: Optional[lark.tree.Tree] = field(
default=None, repr=False, compare=False, hash=False
)
source: Optional[str] = field(default=None, repr=False, compare=False, hash=False)
def evaluate(
self, scope: Optional[Scope]
) -> Tuple[float, List[AbstractNote], Scope]:
if scope is None:
scope = Scope({})
unnamed_passages: List[UnnamedPassage] = []
for passage in self.passages:
if isinstance(passage, NamedPassage):
scope.add(passage)
else:
unnamed_passages.append(passage)
try:
duration, notes = evaluate(unnamed_passages, scope, 0, 0, (), ())
except DoremiError as err:
err.source = self.source
raise
return duration, notes, scope
def get_comments(
node: Union[lark.tree.Tree, lark.lexer.Token]
) -> Generator[str, None, None]:
if isinstance(node, lark.tree.Tree):
if node.data == "start":
for child in node.children:
yield from get_comments(child)
elif node.data == "assign_passage":
for child in node.children:
yield from get_comments(child)
elif node.data == "passage":
for child in node.children:
yield from get_comments(child)
elif node.data == "line":
pass
elif node.data == "assign":
pass
else:
raise AssertionError(repr(node))
else:
if node.type == "BLANK" or node.type == "BLANK_END":
yield node
else:
raise AssertionError(repr(node))
def to_ast(node: Union[lark.tree.Tree, lark.lexer.Token]) -> AST:
if isinstance(node, lark.tree.Tree):
if node.data == "assign_passage":
subnodes = [x for x in node.children if isinstance(x, lark.tree.Tree)]
passage = subnodes[-1]
assert isinstance(passage, lark.tree.Tree) and passage.data == "passage"
lines = [
to_ast(x)
for x in passage.children
if not isinstance(x, lark.lexer.Token)
]
if len(subnodes) == 2:
return NamedPassage(to_ast(subnodes[0]), lines, node)
else:
assert len(subnodes) == 1
return UnnamedPassage(lines, node)
elif node.data == "assign":
assert 1 <= len(node.children) <= 2
subnode1 = node.children[0]
assert isinstance(subnode1, lark.lexer.Token) and subnode1.type == "WORD"
if is_rest(subnode1):
raise SymbolAllUnderscores(subnode1)
function = Word(subnode1)
if len(node.children) == 2:
subnode2 = node.children[1]
assert (
isinstance(subnode2, lark.tree.Tree) and subnode2.data == "defargs"
)
assert all(
isinstance(x, lark.lexer.Token) and x.type == "WORD"
for x in subnode2.children
)
args = [Word(x) for x in subnode2.children]
else:
args = []
return Assignment(function, args, node)
elif node.data == "line":
return Line([to_ast(x) for x in node.children], node)
elif node.data == "modified":
assert all(isinstance(x, lark.tree.Tree) for x in node.children)
assert 1 <= len(node.children) <= 6
index = 0
if node.children[index].data == "emphasis":
emphasis = len(node.children[index].children)
index += 1
else:
emphasis = 0
if node.children[index].data == "absolute":
absolute = len(node.children[index].children)
index += 1
else:
absolute = 0
subnode = node.children[index]
assert subnode.data == "expression"
if isinstance(subnode.children[0], lark.lexer.Token):
if len(subnode.children) == 1:
expression = to_ast(subnode.children[0])
else:
function = to_ast(subnode.children[0])
subsubnode = subnode.children[1]
assert (
isinstance(subsubnode, lark.tree.Tree)
and subsubnode.data == "args"
)
args = [to_ast(x) for x in subsubnode.children]
expression = Call(function, args, subnode)
else:
expression = [to_ast(x) for x in subnode.children]
index = -1
if node.children[index].data == "repetition":
repetition = int(node.children[index].children[0])
index -= 1
else:
repetition = 1
if node.children[index].data == "duration":
subnode = node.children[index].children[0]
assert isinstance(subnode, lark.tree.Tree)
if subnode.data == "dot_duration":
duration = Duration(Fraction(len(subnode.children), 1), False)
elif (
subnode.data == "ratio_duration" or subnode.data == "scale_duration"
):
ints = subnode.children[0].children
assert all(
isinstance(x, lark.lexer.Token) and x.type == "POSITIVE_INT"
for x in ints
)
if len(ints) == 1:
ratio = Fraction(int(ints[0]), 1)
elif len(ints) == 2:
ratio = Fraction(int(ints[0]), int(ints[1]))
else:
raise AssertionError(subnode.children[0])
duration = Duration(
ratio, subnode.data == "scale_duration", subnode
)
else:
raise AssertionError(subnode)
index -= 1
else:
duration = None
if node.children[index].data == "augmentation":
subnode = node.children[index].children[0]
if subnode.data == "upward_step" or subnode.data == "downward_step":
subnodes = subnode.children
if len(subnodes) == 1:
assert isinstance(subnodes[0], lark.lexer.Token)
if subnodes[0].type == "STEP_UPS":
amount = len(subnodes[0])
elif subnodes[0].type == "STEP_DOWNS":
amount = -len(subnodes[0])
else:
raise AssertionError(repr(subnodes[0]))
elif len(subnodes) == 2:
assert isinstance(subnodes[0], lark.lexer.Token)
assert isinstance(subnodes[1], lark.lexer.Token)
assert subnodes[1].type == "INT"
if subnodes[0].type == "STEP_UP":
amount = int(subnodes[1])
elif subnodes[0].type == "STEP_DOWN":
amount = -int(subnodes[1])
else:
raise AssertionError(repr(subnodes[0]))
else:
raise AssertionError(len(subnodes))
if amount == 0:
augmentation = None
else:
augmentation = AugmentStep(amount, subnode)
elif (
subnode.data == "upward_degree" or subnode.data == "downward_degree"
):
subnodes = subnode.children
if len(subnodes) == 1:
assert isinstance(subnodes[0], lark.lexer.Token)
if subnodes[0].type == "DEGREE_UPS":
amount = len(subnodes[0])
elif subnodes[0].type == "DEGREE_DOWNS":
amount = -len(subnodes[0])
else:
raise AssertionError(repr(subnodes[0]))
elif len(subnodes) == 2:
assert isinstance(subnodes[0], lark.lexer.Token)
assert isinstance(subnodes[1], lark.lexer.Token)
assert subnodes[1].type == "INT"
if subnodes[0].type == "DEGREE_UP":
amount = int(subnodes[1])
elif subnodes[0].type == "DEGREE_DOWN":
amount = -int(subnodes[1])
else:
raise AssertionError(repr(subnodes[0]))
else:
raise AssertionError(len(subnodes))
if amount == 0:
augmentation = None
else:
augmentation = AugmentDegree(amount, subnode)
else:
ints = subnode.children[0].children
assert all(
isinstance(x, lark.lexer.Token) and x.type == "POSITIVE_INT"
for x in ints
)
if len(ints) == 1:
ratio = Fraction(int(ints[0]), 1)
elif len(ints) == 2:
ratio = Fraction(int(ints[0]), int(ints[1]))
else:
raise AssertionError(subnode.children[0])
if ratio == Fraction(1, 1):
augmentation = None
else:
augmentation = AugmentRatio(ratio, subnode)
index -= 1
else:
augmentation = None
if node.children[index].data == "octave":
subnodes = node.children[index].children[0].children
if len(subnodes) == 1:
assert isinstance(subnodes[0], lark.lexer.Token)
if subnodes[0].type == "OCTAVE_UPS":
octave = len(subnodes[0])
elif subnodes[0].type == "OCTAVE_DOWNS":
octave = -len(subnodes[0])
else:
raise AssertionError(repr(subnodes[0]))
elif len(subnodes) == 2:
assert isinstance(subnodes[0], lark.lexer.Token)
assert isinstance(subnodes[1], lark.lexer.Token)
assert subnodes[1].type == "INT"
if subnodes[0].type == "OCTAVE_UP":
octave = int(subnodes[1])
elif subnodes[0].type == "OCTAVE_DOWN":
octave = -int(subnodes[1])
else:
raise AssertionError(repr(subnodes[0]))
else:
raise AssertionError(len(subnodes))
index -= 1
else:
octave = 0
return Modified(
expression,
emphasis,
absolute,
octave,
augmentation,
duration,
repetition,
node,
)
raise AssertionError(repr(node))
else:
if node.type == "WORD":
return Word(node)
elif node.type == "CARDINAL":
return Word(node)
else:
raise AssertionError(repr(node))
def abstracttree(source: str) -> AST:
try:
parsingtree = doremi.parsing.parsingtree(source)
except lark.exceptions.LarkError as err:
raise ParsingError(err, source)
assert parsingtree.data == "start"
try:
comments = list(get_comments(parsingtree))
except DoremiError as err:
err.source = source
raise
for i, x in enumerate(comments):
assert i + 1 == x.line, [x.line for x in comments]
try:
passages = [
to_ast(x)
for x in parsingtree.children
if not isinstance(x, lark.lexer.Token)
]
except DoremiError as err:
err.source = source
raise
return Collection(passages, comments, parsingtree, source)
class DoremiError(Exception):
error_message: str
line: Optional[int]
column: Optional[int]
source: Optional[str]
def __str__(self) -> str:
if self.line is None or self.line <= 0:
return self.error_message
else:
out = f"{self.error_message} on line {self.line}"
if self.source is None:
return out
else:
line = self.source.splitlines()[self.line - 1]
return f"""{out}
{line}
{"-" * (self.column - 1) + "^"}"""
class ParsingError(DoremiError):
def __init__(self, error: lark.exceptions.LarkError, source: str):
self.error_message = "composition could note be parsed"
self.line = getattr(error, "line", None)
self.column = getattr(error, "column", None)
self.source = source
class SymbolAllUnderscores(DoremiError):
def __init__(self, node: lark.tree.Tree):
self.error_message = "symbols must not consist entirely of underscores (rest)"
self.line = getattr(node, "line", None)
self.column = getattr(node, "column", None)
self.source = None
class RecursiveFunction(DoremiError):
def __init__(self, node: lark.tree.Tree):
self.error_message = f"function (indirectly?) calls itself: {str(node)!r}"
self.line = getattr(node, "line", None)
self.column = getattr(node, "column", None)
self.source = None
class UndefinedSymbol(DoremiError):
def __init__(self, node: lark.lexer.Token):
self.error_message = (
f"symbol has not been defined (misspelling?): {str(node)!r}"
)
self.line = getattr(node, "line", None)
self.column = getattr(node, "column", None)
self.source = None
class MismatchingArguments(DoremiError):
def __init__(self, node: lark.tree.Tree):
self.error_message = "wrong number of arguments in function call"
self.line = getattr(node, "line", None)
self.column = getattr(node, "column", None)
self.source = None
class NoteNotInScale(DoremiError):
def __init__(self, node: lark.tree.Tree):
self.error_message = (
"cannot augment by a scale degree because this note is not in the scale"
)
self.line = getattr(node, "line", None)
self.column = getattr(node, "column", None)
self.source = None
```
#### File: src/doremi/__init__.py
```python
from ._version import version as __version__
from typing import Optional
import doremi.parsing
import doremi.abstract
import doremi.concrete
def compose(
source: str,
scale: doremi.concrete.AnyScale = "C major",
bpm: float = 120.0,
scope: Optional[doremi.abstract.Scope] = None,
) -> doremi.concrete.Composition:
scale = doremi.concrete.get_scale(scale)
abstract_collection = doremi.abstract.abstracttree(source)
num_beats, abstract_notes, scope = abstract_collection.evaluate(scope)
return doremi.concrete.Composition(
scale, bpm, num_beats, scope, abstract_collection, abstract_notes
)
__all__ = ("__version__", "compose")
```
#### File: doremi/tests/test_abstract.py
```python
from fractions import Fraction
import pytest
from lark.tree import Tree
from lark.lexer import Token
from doremi.abstract import (
AbstractNote,
Scope,
Word,
Call,
AugmentStep,
AugmentDegree,
AugmentRatio,
Duration,
Modified,
Line,
Assignment,
NamedPassage,
UnnamedPassage,
evaluate,
Collection,
abstracttree,
SymbolAllUnderscores,
MismatchingArguments,
RecursiveFunction,
)
def test_decorations():
assert abstracttree("la") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 0, None, None, 1)])])]
)
assert abstracttree("1st") == Collection(
[UnnamedPassage([Line([Modified(Word("1st"), 0, 0, 0, None, None, 1)])])]
)
assert abstracttree("!la") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 1, 0, 0, None, None, 1)])])]
)
assert abstracttree("!!la") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 2, 0, 0, None, None, 1)])])]
)
assert abstracttree("@la") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 1, 0, None, None, 1)])])]
)
assert abstracttree("@ @ la") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 2, 0, None, None, 1)])])]
)
assert abstracttree("la'") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 1, None, None, 1)])])]
)
assert abstracttree("la''") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 2, None, None, 1)])])]
)
assert abstracttree("la '") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 1, None, None, 1)])])]
)
assert abstracttree(" la '") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 1, None, None, 1)])])]
)
assert abstracttree(" la ''") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 2, None, None, 1)])])]
)
assert abstracttree("la'3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 3, None, None, 1)])])]
)
assert abstracttree("la '3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 3, None, None, 1)])])]
)
assert abstracttree(" la' 3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 3, None, None, 1)])])]
)
assert abstracttree(" la ' 3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 3, None, None, 1)])])]
)
assert abstracttree("la'3 ") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 3, None, None, 1)])])]
)
assert abstracttree("la,") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -1, None, None, 1)])])]
)
assert abstracttree("la,,") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -2, None, None, 1)])])]
)
assert abstracttree("la ,") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -1, None, None, 1)])])]
)
assert abstracttree(" la ,") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -1, None, None, 1)])])]
)
assert abstracttree(" la ,,") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -2, None, None, 1)])])]
)
assert abstracttree("la,3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -3, None, None, 1)])])]
)
assert abstracttree("la ,3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -3, None, None, 1)])])]
)
assert abstracttree(" la, 3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -3, None, None, 1)])])]
)
assert abstracttree(" la , 3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -3, None, None, 1)])])]
)
assert abstracttree(" la ,3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -3, None, None, 1)])])]
)
assert abstracttree("la+") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentStep(1), None, 1)])]
)
]
)
assert abstracttree("la ++") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentStep(2), None, 1)])]
)
]
)
assert abstracttree("la+2") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentStep(2), None, 1)])]
)
]
)
assert abstracttree("la-2") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentStep(-2), None, 1)])]
)
]
)
assert abstracttree("la- 3") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentStep(-3), None, 1)])]
)
]
)
assert abstracttree("la>") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentDegree(1), None, 1)])]
)
]
)
assert abstracttree("la >>") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentDegree(2), None, 1)])]
)
]
)
assert abstracttree("la>2") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentDegree(2), None, 1)])]
)
]
)
assert abstracttree("la<2") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentDegree(-2), None, 1)])]
)
]
)
assert abstracttree("la< 3") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentDegree(-3), None, 1)])]
)
]
)
assert abstracttree("la%2") == Collection(
[
UnnamedPassage(
[
Line(
[
Modified(
Word("la"),
0,
0,
0,
AugmentRatio(Fraction(2, 1)),
None,
1,
)
]
)
],
)
]
)
assert abstracttree("la%2/3") == Collection(
[
UnnamedPassage(
[
Line(
[
Modified(
Word("la"),
0,
0,
0,
AugmentRatio(Fraction(2, 3)),
None,
1,
)
]
)
],
)
]
)
assert abstracttree("la...") == Collection(
[
UnnamedPassage(
[
Line(
[
Modified(
Word("la"),
0,
0,
0,
None,
Duration(Fraction(3, 1), False),
1,
)
]
)
],
)
]
)
assert abstracttree("la:3") == Collection(
[
UnnamedPassage(
[
Line(
[
Modified(
Word("la"),
0,
0,
0,
None,
Duration(Fraction(3, 1), False),
1,
)
]
)
],
)
]
)
assert abstracttree("la:3/2") == Collection(
[
UnnamedPassage(
[
Line(
[
Modified(
Word("la"),
0,
0,
0,
None,
Duration(Fraction(3, 2), False),
1,
)
]
)
],
)
]
)
assert abstracttree("la:3 / 2") == Collection(
[
UnnamedPassage(
[
Line(
[
Modified(
Word("la"),
0,
0,
0,
None,
Duration(Fraction(3, 2), False),
1,
)
]
)
],
)
]
)
assert abstracttree("la * 4") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 0, None, None, 4)])])]
)
assert abstracttree("@ la'+... * 4") == Collection(
[
UnnamedPassage(
[
Line(
[
Modified(
Word("la"),
0,
1,
1,
AugmentStep(1),
Duration(Fraction(3, 1), False),
4,
)
]
)
],
)
]
)
def test_call():
aug1 = AugmentStep(1)
dur3 = Duration(Fraction(3, 1), False)
dur32 = Duration(Fraction(3, 2), False)
x = Modified(Word("x"), 0, 0, 0, None, None, 1)
y = Modified(Word("y"), 0, 0, 0, None, None, 1)
assert abstracttree("f") == Collection(
[UnnamedPassage([Line([Modified(Word("f"), 0, 0, 0, None, None, 1)])])]
)
assert abstracttree("f()") == Collection(
[UnnamedPassage([Line([Modified(Word("f"), 0, 0, 0, None, None, 1)])])]
)
assert abstracttree("f(x)") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x]), 0, 0, 0, None, None, 1)])]
)
]
)
assert abstracttree("f(x y)") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x, y]), 0, 0, 0, None, None, 1)])]
)
]
)
assert abstracttree("@f(x y)") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x, y]), 0, 1, 0, None, None, 1)])]
)
]
)
assert abstracttree("f(x y)'") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x, y]), 0, 0, 1, None, None, 1)])]
)
]
)
assert abstracttree("f(x y)+") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x, y]), 0, 0, 0, aug1, None, 1)])]
)
]
)
assert abstracttree("f(x y)...") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x, y]), 0, 0, 0, None, dur3, 1)])]
)
]
)
assert abstracttree("f(x y):3/2") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x, y]), 0, 0, 0, None, dur32, 1)])]
)
]
)
assert abstracttree("f(x y) * 4") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x, y]), 0, 0, 0, None, None, 4)])]
)
]
)
assert abstracttree("@f(x y)'+:3/2 * 4") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x, y]), 0, 1, 1, aug1, dur32, 4)])]
)
]
)
def test_modified():
aug1 = AugmentStep(1)
dur3 = Duration(Fraction(3, 1), False)
dur32 = Duration(Fraction(3, 2), False)
dur32True = Duration(Fraction(3, 2), True)
la = Modified(Word("la"), 0, 0, 0, None, None, 1)
assert abstracttree("{la la la}") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 0, None, None, 1)])])]
)
assert abstracttree("@{la la la}") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 1, 0, None, None, 1)])])]
)
assert abstracttree("{la la la}'") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 1, None, None, 1)])])]
)
assert abstracttree("{la la la}+") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 0, aug1, None, 1)])])]
)
assert abstracttree("{la la la}...") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 0, None, dur3, 1)])])]
)
assert abstracttree("{la la la}:3/2") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 0, None, dur32, 1)])])]
)
assert abstracttree("{la la la} : 3/2") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 0, None, dur32, 1)])])]
)
assert abstracttree("{la la la}:*3/2") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 0, None, dur32True, 1)])])]
)
assert abstracttree("{la la la} :* 3/2") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 0, None, dur32True, 1)])])]
)
assert abstracttree("{la la la} * 4") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 0, None, None, 4)])])]
)
assert abstracttree("@{la la la}'+:3/2 * 4") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 1, 1, aug1, dur32, 4)])])]
)
assert abstracttree("@{la la la}'+:*3/2 * 4") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 1, 1, aug1, dur32True, 4)])])]
)
def test_passage():
do = Modified(Word("do"), 0, 0, 0, None, None, 1)
la = Modified(Word("la"), 0, 0, 0, None, None, 1)
assert abstracttree("do") == Collection([UnnamedPassage([Line([do])])])
assert abstracttree("do\nla") == Collection(
[UnnamedPassage([Line([do]), Line([la])])]
)
assert abstracttree("do do do\nla") == Collection(
[UnnamedPassage([Line([do, do, do]), Line([la])])]
)
assert abstracttree("do do do\nla la la") == Collection(
[UnnamedPassage([Line([do, do, do]), Line([la, la, la])])]
)
assert abstracttree("do\nla\ndo\nla") == Collection(
[UnnamedPassage([Line([do]), Line([la]), Line([do]), Line([la])])]
)
assert abstracttree("do\n\nla") == Collection(
[UnnamedPassage([Line([do])]), UnnamedPassage([Line([la])])]
)
assert abstracttree("do\n\n\nla") == Collection(
[UnnamedPassage([Line([do])]), UnnamedPassage([Line([la])])]
)
assert abstracttree("do\n\nla\ndo") == Collection(
[UnnamedPassage([Line([do])]), UnnamedPassage([Line([la]), Line([do])])]
)
assert abstracttree("do\n\n\nla\ndo") == Collection(
[UnnamedPassage([Line([do])]), UnnamedPassage([Line([la]), Line([do])])]
)
assert abstracttree("do\n\nla\n\ndo") == Collection(
[
UnnamedPassage([Line([do])]),
UnnamedPassage([Line([la])]),
UnnamedPassage([Line([do])]),
]
)
assert abstracttree("do\n\n\nla\n\n\ndo") == Collection(
[
UnnamedPassage([Line([do])]),
UnnamedPassage([Line([la])]),
UnnamedPassage([Line([do])]),
]
)
assert abstracttree("f = do") == Collection(
[NamedPassage(Assignment(Word("f"), []), [Line([do])])]
)
assert abstracttree("f(x) = do") == Collection(
[NamedPassage(Assignment(Word("f"), [Word("x")]), [Line([do])])]
)
assert abstracttree("f(x y) = do") == Collection(
[NamedPassage(Assignment(Word("f"), [Word("x"), Word("y")]), [Line([do])])]
)
assert abstracttree("f(x y) = do la") == Collection(
[NamedPassage(Assignment(Word("f"), [Word("x"), Word("y")]), [Line([do, la])])]
)
assert abstracttree("f(x y) = do\nla") == Collection(
[
NamedPassage(
Assignment(Word("f"), [Word("x"), Word("y")]), [Line([do]), Line([la])]
)
]
)
assert abstracttree("f(x y) =\ndo\nla") == Collection(
[
NamedPassage(
Assignment(Word("f"), [Word("x"), Word("y")]), [Line([do]), Line([la])]
)
]
)
assert abstracttree("f(x y) =\ndo\n\nla") == Collection(
[
NamedPassage(Assignment(Word("f"), [Word("x"), Word("y")]), [Line([do])]),
UnnamedPassage([Line([la])]),
]
)
with pytest.raises(SymbolAllUnderscores):
abstracttree("_ = do")
with pytest.raises(SymbolAllUnderscores):
abstracttree("___ = do")
def test_comments():
do = Modified(Word("do"), 0, 0, 0, None, None, 1)
la = Modified(Word("la"), 0, 0, 0, None, None, 1)
assert abstracttree("""do""").comments == []
assert (
abstracttree(
"""do
"""
).comments
== ["\n"]
)
assert abstracttree("""do | one""").comments == ["| one"]
assert (
abstracttree(
"""do | one
"""
).comments
== ["| one\n"]
)
assert abstracttree("""do |one""").comments == ["|one"]
assert (
abstracttree(
"""do |one
"""
).comments
== ["|one\n"]
)
assert (
abstracttree(
"""do
la"""
).comments
== ["\n"]
)
assert (
abstracttree(
"""do
la
"""
).comments
== ["\n", "\n"]
)
assert (
abstracttree(
"""do
la"""
).comments
== ["\n"]
)
assert (
abstracttree(
"""do
la
"""
).comments
== ["\n", "\n"]
)
assert (
abstracttree(
"""do | one
la"""
).comments
== ["| one\n"]
)
assert (
abstracttree(
"""do | one
la
"""
).comments
== ["| one\n", "\n"]
)
assert (
abstracttree(
"""do | one
la | two"""
).comments
== ["| one\n", "| two"]
)
assert (
abstracttree(
"""do | one
la | two
"""
).comments
== ["| one\n", "| two\n"]
)
assert (
abstracttree(
"""do | one
la | two"""
)
== Collection([UnnamedPassage([Line([do]), Line([la])])])
)
assert (
abstracttree(
"""do | one
la | two
"""
)
== Collection([UnnamedPassage([Line([do]), Line([la])])])
)
assert (
abstracttree(
"""do
la | two"""
).comments
== ["\n", "| two"]
)
assert (
abstracttree(
"""do
la | two
"""
).comments
== ["\n", "| two\n"]
)
assert (
abstracttree(
"""do
la | two"""
).comments
== ["\n", "| two"]
)
assert (
abstracttree(
"""do
la | two
"""
).comments
== ["\n", "| two\n"]
)
assert (
abstracttree(
"""do
| two
la | three"""
).comments
== ["\n", "| two\n", "| three"]
)
assert (
abstracttree(
"""do
| two
la | three
"""
).comments
== ["\n", "| two\n", "| three\n"]
)
assert (
abstracttree(
"""do
| two
la | three"""
)
== Collection([UnnamedPassage([Line([do])]), UnnamedPassage([Line([la])])])
)
assert (
abstracttree(
"""do
| two
la | three
"""
)
== Collection([UnnamedPassage([Line([do])]), UnnamedPassage([Line([la])])])
)
assert abstracttree("""f = do | one""").comments == ["| one"]
assert (
abstracttree(
"""f = do | one
"""
).comments
== ["| one\n"]
)
assert (
abstracttree(
"""f =
do | two"""
).comments
== ["\n", "| two"]
)
assert (
abstracttree(
"""f =
do | two
"""
).comments
== ["\n", "| two\n"]
)
assert (
abstracttree(
"""f = | one
do | two"""
).comments
== ["| one\n", "| two"]
)
assert (
abstracttree(
"""f = | one
do | two
"""
).comments
== ["| one\n", "| two\n"]
)
assert (
abstracttree(
"""| one
f =
do | three"""
).comments
== ["| one\n", "\n", "| three"]
)
assert (
abstracttree(
"""| one
f =
do | three
"""
).comments
== ["| one\n", "\n", "| three\n"]
)
assert (
abstracttree(
"""| one
f = | two
do | three"""
).comments
== ["| one\n", "| two\n", "| three"]
)
assert (
abstracttree(
"""| one
f = | two
do | three
"""
).comments
== ["| one\n", "| two\n", "| three\n"]
)
def test_evaluate():
assert evaluate(abstracttree("do").passages[0], Scope({}), 0, 0, (), ()) == (
1.0,
[AbstractNote(0.0, 1.0, Word("do"))],
)
assert evaluate(abstracttree("do re mi").passages[0], Scope({}), 0, 0, (), ()) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
],
)
assert evaluate(abstracttree("do....").passages[0], Scope({}), 0, 0, (), ()) == (
4.0,
[AbstractNote(0.0, 4.0, Word("do"))],
)
assert evaluate(
abstracttree("do.. re.. mi..").passages[0], Scope({}), 0, 0, (), ()
) == (
6.0,
[
AbstractNote(0.0, 2.0, Word("do")),
AbstractNote(2.0, 4.0, Word("re")),
AbstractNote(4.0, 6.0, Word("mi")),
],
)
assert evaluate(abstracttree("___").passages[0], Scope({}), 0, 0, (), ()) == (
3.0,
[],
)
assert evaluate(abstracttree("do _ mi").passages[0], Scope({}), 0, 0, (), ()) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(2.0, 3.0, Word("mi")),
],
)
assert evaluate(abstracttree("do __ mi").passages[0], Scope({}), 0, 0, (), ()) == (
4.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(3.0, 4.0, Word("mi")),
],
)
assert evaluate(
abstracttree("do __ mi _").passages[0], Scope({}), 0, 0, (), ()
) == (
5.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(3.0, 4.0, Word("mi")),
],
)
assert evaluate(
abstracttree("do\nre\nmi").passages[0], Scope({}), 0, 0, (), ()
) == (
1.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(0.0, 1.0, Word("re")),
AbstractNote(0.0, 1.0, Word("mi")),
],
)
assert evaluate(
abstracttree("do\n_\nre mi").passages[0], Scope({}), 0, 0, (), ()
) == (
2.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(0.0, 1.0, Word("re")),
AbstractNote(1.0, 2.0, Word("mi")),
],
)
assert evaluate(abstracttree("do'").passages[0], Scope({}), 0, 0, (), ()) == (
1.0,
[AbstractNote(0.0, 1.0, Word("do"), octave=1)],
)
assert evaluate(abstracttree("do+1").passages[0], Scope({}), 0, 0, (), ()) == (
1.0,
[AbstractNote(0.0, 1.0, Word("do"), augmentations=(AugmentStep(1),))],
)
assert evaluate(
abstracttree("{do re mi}").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
],
)
assert evaluate(
abstracttree("{do re mi}'").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do"), octave=1),
AbstractNote(1.0, 2.0, Word("re"), octave=1),
AbstractNote(2.0, 3.0, Word("mi"), octave=1),
],
)
assert evaluate(
abstracttree("{do @re mi}'").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do"), octave=1),
AbstractNote(1.0, 2.0, Word("re"), octave=1),
AbstractNote(2.0, 3.0, Word("mi"), octave=1),
],
)
assert evaluate(
abstracttree("{do re mi}+1").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do"), augmentations=(AugmentStep(1),)),
AbstractNote(1.0, 2.0, Word("re"), augmentations=(AugmentStep(1),)),
AbstractNote(2.0, 3.0, Word("mi"), augmentations=(AugmentStep(1),)),
],
)
assert evaluate(
abstracttree("{do @re mi}+1").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do"), augmentations=(AugmentStep(1),)),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi"), augmentations=(AugmentStep(1),)),
],
)
assert evaluate(
abstracttree("{{do @re mi}+1}>2").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(
0.0, 1.0, Word("do"), augmentations=(AugmentDegree(2), AugmentStep(1))
),
AbstractNote(1.0, 2.0, Word("re"), augmentations=(AugmentDegree(2),)),
AbstractNote(
2.0, 3.0, Word("mi"), augmentations=(AugmentDegree(2), AugmentStep(1))
),
],
)
assert evaluate(
abstracttree("{do re mi}:6").passages[0], Scope({}), 0, 0, (), ()
) == (
6.0,
[
AbstractNote(0.0, 2.0, Word("do")),
AbstractNote(2.0, 4.0, Word("re")),
AbstractNote(4.0, 6.0, Word("mi")),
],
)
assert evaluate(
abstracttree("{do re mi}:*2").passages[0], Scope({}), 0, 0, (), ()
) == (
6.0,
[
AbstractNote(0.0, 2.0, Word("do")),
AbstractNote(2.0, 4.0, Word("re")),
AbstractNote(4.0, 6.0, Word("mi")),
],
)
assert evaluate(
abstracttree("{do re mi} fa").passages[0], Scope({}), 0, 0, (), ()
) == (
4.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
AbstractNote(3.0, 4.0, Word("fa")),
],
)
assert evaluate(
abstracttree("{do re mi}:6 fa").passages[0], Scope({}), 0, 0, (), ()
) == (
7.0,
[
AbstractNote(0.0, 2.0, Word("do")),
AbstractNote(2.0, 4.0, Word("re")),
AbstractNote(4.0, 6.0, Word("mi")),
AbstractNote(6.0, 7.0, Word("fa")),
],
)
assert evaluate(abstracttree("do * 2").passages[0], Scope({}), 0, 0, (), ()) == (
2.0,
[AbstractNote(0.0, 1.0, Word("do")), AbstractNote(1.0, 2.0, Word("do"))],
)
assert evaluate(
abstracttree("do re mi * 2").passages[0], Scope({}), 0, 0, (), ()
) == (
4.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
AbstractNote(3.0, 4.0, Word("mi")),
],
)
assert evaluate(
abstracttree("{do re mi} * 2").passages[0], Scope({}), 0, 0, (), ()
) == (
6.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
AbstractNote(3.0, 4.0, Word("do")),
AbstractNote(4.0, 5.0, Word("re")),
AbstractNote(5.0, 6.0, Word("mi")),
],
)
def test_evaluate_assign():
definition = abstracttree("f(x y) = y x").passages[0]
assert evaluate(
abstracttree("do f(mi re) fa so").passages[0],
Scope({"f": definition}),
0,
0,
(),
(),
) == (
5.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
AbstractNote(3.0, 4.0, Word("fa")),
AbstractNote(4.0, 5.0, Word("so")),
],
)
assert evaluate(
abstracttree("do f({mi mi} {re re}) fa so").passages[0],
Scope({"f": definition}),
0,
0,
(),
(),
) == (
7.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("re")),
AbstractNote(3.0, 4.0, Word("mi")),
AbstractNote(4.0, 5.0, Word("mi")),
AbstractNote(5.0, 6.0, Word("fa")),
AbstractNote(6.0, 7.0, Word("so")),
],
)
with pytest.raises(MismatchingArguments):
evaluate(
abstracttree("f(mi)").passages[0], Scope({"f": definition}), 0, 0, (), ()
)
with pytest.raises(MismatchingArguments):
evaluate(
abstracttree("f(la la la)").passages[0],
Scope({"f": definition}),
0,
0,
(),
(),
)
definition = abstracttree("f = do\nmi\nso").passages[0]
assert evaluate(
abstracttree("la f la").passages[0], Scope({"f": definition}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("la")),
AbstractNote(1.0, 2.0, Word("do")),
AbstractNote(1.0, 2.0, Word("mi")),
AbstractNote(1.0, 2.0, Word("so")),
AbstractNote(2.0, 3.0, Word("la")),
],
)
with pytest.raises(MismatchingArguments):
evaluate(
abstracttree("f(mi)").passages[0], Scope({"f": definition}), 0, 0, (), ()
)
definition1 = abstracttree("f = do do").passages[0]
definition2 = abstracttree("g(x) = f x").passages[0]
assert evaluate(
abstracttree("g(mi)").passages[0],
Scope({"f": definition1, "g": definition2}),
0,
0,
(),
(),
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("do")),
AbstractNote(2.0, 3.0, Word("mi")),
],
)
definition1 = abstracttree("f = g(la)").passages[0]
definition2 = abstracttree("g(x) = f x").passages[0]
with pytest.raises(RecursiveFunction):
evaluate(
abstracttree("g(mi)").passages[0],
Scope({"f": definition1, "g": definition2}),
0,
0,
(),
(),
)
definition2 = abstracttree("g = do g mi").passages[0]
with pytest.raises(RecursiveFunction):
evaluate(
abstracttree("la g").passages[0],
Scope({"g": definition2}),
0,
0,
(),
(),
)
def test_evaluate_midlevel():
assert abstracttree(
"""
f(x y) = y x
do f({mi mi} {re re}) fa
"""
).evaluate(None) == (
6.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("re")),
AbstractNote(3.0, 4.0, Word("mi")),
AbstractNote(4.0, 5.0, Word("mi")),
AbstractNote(5.0, 6.0, Word("fa")),
],
Scope(
{
"f": NamedPassage(
Assignment(Word("f"), [Word("x"), Word("y")]),
[
Line(
[
Modified(Word(val="y"), 0, 0, 0, None, None, 1),
Modified(Word("x"), 0, 0, 0, None, None, 1),
]
)
],
)
}
),
)
``` |
{
"source": "jpivarski/jupyter-book",
"score": 2
} |
#### File: jupyter-book/jupyter_book/__init__.py
```python
from .toc import update_indexname, add_toctree
from .yaml import add_yaml_config
__version__ = "0.7.0b2"
# We connect this function to the step after the builder is initialized
def setup(app):
app.connect("config-inited", update_indexname)
app.connect("source-read", add_toctree)
app.add_config_value("globaltoc_path", "toc.yml", "env")
# configuration for YAML metadata
app.add_config_value("yaml_config_path", "", "html")
app.connect("config-inited", add_yaml_config)
return {
"version": __version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
}
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.