file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
server.py | from absl import logging
import tornado.ioloop
from tornado import queues
import tornado.web
from icubam.db import sqlite
from icubam.messaging import sms_sender
from icubam.messaging import scheduler
from icubam.www import token
class MessageServer:
"""Sends and schedule SMS."""
def __init__(self, config, port=8889):
self.config = config
self.db = sqlite.SQLiteDB(self.config.db.sqlite_path)
self.port = port
self.sender = sms_sender.get_sender(self.config)
self.queue = queues.Queue()
self.scheduler = scheduler.MessageScheduler(
db=self.db,
queue=self.queue,
token_encoder=token.TokenEncoder(self.config),
base_url=self.config.server.base_url,
max_retries=self.config.scheduler.max_retries,
reminder_delay=self.config.scheduler.reminder_delay,
when=self.config.scheduler.ping,
)
async def process(self):
|
def run(self, delay=None):
logging.info("Running {}".format(self.__class__.__name__))
app = tornado.web.Application([])
io_loop = tornado.ioloop.IOLoop.current()
io_loop.spawn_callback(self.process)
self.scheduler.schedule_all(delay)
io_loop.start()
| async for msg in self.queue:
try:
self.sender.send_message(msg.phone, msg.text)
finally:
self.queue.task_done() |
BA_MDI1.py | from PySide6 import QtCore
from PySide6 import QtGui
from PySide6 import QtWidgets
import argparse
import sys, os
from Models.login_model import login_stuff, url_builder
from helpers.helpers1 import db_tables
from Views.sample_login import LoginForm
from Views.all_view import VAS_view,VCRH_view ,VCCP_view,VCRH_Edit, VProj_Edit,VCRLD_Edit, RPE_Edit
from Views.editmenu import splitSections, tweakSections, moveSections, copySections
from helpers.mmenus import menu_cascade
from Controllers.orm_select import county_select
from Controllers.controller import connectToDatabase
from Models.my_tables_model import gather_tables
from Controllers.my_MDIArea import MdiArea
__version__ = "1.0.0"
__my_debug__ = True
# -----------------------------------------------------------------------------
# Main window class.
# -----------------------------------------------------------------------------
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.mdiArea = QtWidgets.QMdiArea()
self.setCentralWidget(self.mdiArea)
self.AppTitle = self.tr("MDI Pavement Management")
self.AppVersion = __version__
self.my_login = login_stuff()
self.my_url = None
self.my_session = None
self.login_flag = False
self.my_db_tables = db_tables
self.index = 0
# Setup main window.
self.setWindowTitle(self.AppTitle)
self.setWindowIcon(QtGui.QIcon.fromTheme('utilities-system-monitor'))
self.resize(1200, 600)
self.document_type=0
self.count = 0
menu_cascade(self)
self.statusbarAct = QtGui.QAction(self.tr("&Statusbar"), self)
self.statusbarAct.setCheckable(True)
self.statusbarAct.setChecked(True)
self.statusbarAct.setStatusTip(self.tr("Show or hide the statusbar in the current window"))
self.statusbarAct.toggled.connect(self.onToggleStatusBar)
def onOpen(self):
"""Select a file using a file open dialog."""
if self.document_type == 1:
my_list=[]
#self.actionUpdate.triggered.connect(self.onEdit)
fileName = "VAS"
the_doc = VAS_view(self)
self.count = self.count +1
the_doc.fileName = fileName + str(self.count)
sub = self.mdiArea.addSubWindow(the_doc)
sub.show()
#self.loadDocument(fileName)
if self.document_type == 2:
fileName = "VCCP"
the_doc = VCCP_view(self)
self.count = self.count + 1
the_doc.fileName = fileName + str(self.count)
sub = self.mdiArea.addSubWindow(the_doc)
sub.show()
#self.loadDocument(fileName)
if self.document_type == 3:
fileName = 'VCRH'
the_doc = VCRH_Edit(self)
self.count = self.count + 1
the_doc.fileName = fileName + str(self.count)
sub = self.mdiArea.addSubWindow(the_doc)
sub.show()
if self.document_type == 4:
fileName = 'VCRLD'
the_doc = VCRLD_Edit(self)
self.count = self.count + 1
the_doc.fileName = fileName + str(self.count)
sub = self.mdiArea.addSubWindow(the_doc)
sub.show()
if self.document_type == 5:
fileName = 'VProj'
the_doc = VProj_Edit(self)
self.count = self.count + 1
the_doc.fileName = fileName + str(self.count)
sub = self.mdiArea.addSubWindow(the_doc)
sub.show()
if self.document_type == 6:
fileName = 'EditLayers'
the_doc = RPE_Edit(self)
self.count = self.count + 1
the_doc.fileName = fileName + str(self.count)
sub = self.mdiArea.addSubWindow(the_doc)
sub.show()
if self.document_type == 7:
fileName = 'Split_Section'
the_doc = splitSections(self)
self.count = self.count+1
the_doc.fileName = fileName + str(self.count)
sub = self.mdiArea.addSubWindow(the_doc)
sub.show()
if self.document_type == 8:
fileName = 'Tweak_Section'
the_doc = tweakSections(self)
self.count = self.count+1
the_doc.fileName = fileName + str(self.count)
sub = self.mdiArea.addSubWindow(the_doc)
sub.show()
if self.document_type == 9:
fileName = 'Move_Section'
the_doc = moveSections(self)
self.count = self.count+1
the_doc.fileName = fileName + str(self.count)
sub = self.mdiArea.addSubWindow(the_doc)
sub.show()
if self.document_type == 10:
fileName = 'Copy_Section'
the_doc = copySections(self)
self.count = self.count+1
the_doc.fileName = fileName + str(self.count)
sub = self.mdiArea.addSubWindow(the_doc)
sub.show()
def onToggleStatusBar(self):
"""Toggles the visibility of the status bar."""
self.statusBar().setVisible(self.statusbarAct.isChecked())
def onContents(self):
QtWidgets.QMessageBox.information(self, self.tr("Contents"), self.tr("<p>Please refer to...</p>"))
def onAbout(self):
QtWidgets.QMessageBox.information(self, self.tr("About"),
self.tr("<p><strong>{}</strong></p>"
"<p>Version {}</p>"
"<p>Authors: ...</p>").format(self.AppTitle, self.AppVersion)
)
def onLogin(self):
if __my_debug__ == True:
self.login_flag = True
self.my_db_tables = db_tables
#self.my_url = f'oracle+cx_oracle://USER_MISS:password@ORACLEDEV01:1521/GISDEV'
self.my_login = login_stuff()
self.my_login.user_name = 'USER_MISS'
self.my_login.user_password = 'password'
self.my_login.database_name = 'GISDEV'
self.my_login.database_type = 0
self.my_login.schema ='USER_MISS'
self.my_login.server_name = 'ORACLEDEV01'
self.my_url = url_builder(self.my_login)
self.my_session = connectToDatabase(self.my_url)
self.document_type = 1
gather_tables(self)
menu_cascade(self)
else:
self.my_login = login_stuff()
self.my_url = None
self.login_flag = False
self.document_type = 0
myresults = LoginForm(self)
myresults.exec()
if myresults.login_flag == True:
self.my_db_table = myresults.db
self.my_url = myresults.my_url
self.my_login = myresults.my_login
self.login_flag = True
self.my_session = connectToDatabase(self.my_url)
menu_cascade(self)
else:
print('Failed')
pass
def onLogout(self):
self.my_url = None
self.db = None
self.my_login = None
self.login_flag = False
self.document_type = 0
self.my_session.close()
for item in self.mdiArea.subWindowList():
item.close()
menu_cascade(self)
def onExit(self):
for item in self.mdiArea.subWindowList():
item.my_session.close()
item.close()
self.close()
pass
# View Menu Area
def onVAS(self):
self.document_type = 1
#menu_cascade(self)
##my_doc = Ui_VAS_Form()
self.onOpen()
def onVCRHVCRLDVPROJ(self):
self.document_type = 2
#menu_cascade(self)
self.onOpen()
def onVCRH(self):
self.document_type = 3
#menu_cascade(self)
self.onOpen()
def onVCRLD(self):
self.document_type = 4
#menu_cascade(self)
self.onOpen()
def onVPRJ(self):
self.document_type = 5
#menu_cascade(self)
self.onOpen()
def onLog(self):
print('Made it to Log')
## project menu area
def onNewProject(self):
print('Made it to New Project')
def onRenameProject(self):
print('made it to rename project')
def onCopyProject(self):
print('made it to Copy Project')
def onEditLayers(self):
self.document_type = 6
#menu_cascade(self)
self.onOpen()
# edit Menu Layers
def onSplitSection(self): | self.onOpen()
def onTweakSection(self):
self.document_type = 8
self.onOpen()
def onMoveSection(self):
self.document_type = 9
self.onOpen()
def onCopySection(self):
self.document_type = 10
self.onOpen()
def loadDocument(self, filename):
"""Load document from filename."""
#for index in range(self.mdiArea.count()):
# document = self.mdiArea.widget(index)
# if document:
# if filename == document.filename:
# self.mdiArea.setCurrentIndex(index)
# document.reload()
# return
# Else load from file and create new document tab.
self.statusBar.showMessage(self.tr("Loading..."), 2500)
document = Document(filename, self)
index = self.mdiArea.addDocument(document)
#self.mdiArea.setCurrentIndex(index)
index.show()
# After loading a conenction file, it is possible to refresh the current module.
self.refreshAct.setEnabled(True)
self.statusBar.showMessage(self.tr("Successfully loaded file"), 2500)
# Enable close action
#self.closeAct.setEnabled(self.mdiArea.count())
class MdiArea(QtWidgets.QWidget):
def __init__(self, parent=None):
super(MdiArea, self).__init__(parent)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self._is_untitled = True
self.index = 0
def addDocument(self, document):
document.fileChanged.connect(self.onFileChanged)
document.fileLoaded.connect(self.onFileLoaded)
sub = QtWidgets.QMdiSubWindow()
sub.setWidget(document)
return sub
def currentDocument(self):
"""Return current active document."""
return self.widget(self.currentIndex())
def documents(self):
"""Returns iterator of all documents."""
for index in range(self.count()):
yield self.widget(index)
def closeDocument(self):
"""Close current active document. Provided for convenience.
"""
index = self.currentIndex()
# Finally remove tab by index.
self.removeTab(index)
def setDocumentChanged(self, document, changed):
index = self.indexOf(document)
label = document.filename
self.setTabText(index, "{}{}".format('*' if changed else '', label))
def checkTimestamps(self):
for document in self.documents():
document.checkTimestamp()
def onFileLoaded(self, document):
self.setDocumentChanged(document, False)
def onFileChanged(self, document):
self.setDocumentChanged(document, True)
class Document(QtWidgets.QWidget):
"""Generic document widget."""
fileLoaded = QtCore.Signal(QtWidgets.QWidget)
fileChanged = QtCore.Signal(QtWidgets.QWidget)
def __init__(self, filename, my_parent, parent=None):
super(Document, self).__init__(parent)
self.document_type = my_parent.document_type
self.my_db_tables = my_parent.my_db_tables
self.my_url = my_parent.my_url
self.my_login = my_parent.my_login
self.my_session = my_parent.my_session
self.filename =filename
self.documentEdit = self.createDocumentEdit()
self.my_parent = my_parent
#self.warningLabel = self.createWarningLabel()
#layout = QtWidgets.QVBoxLayout()
#layout.addWidget(self.warningLabel)
#layout.addWidget(self.textEdit)
#layout.setSpacing(0)
#layout.setContentsMargins(0, 0, 0, 0)
#self.setLayout(layout)
## Load the file.
QtCore.QCoreApplication.instance().processEvents()
self.reload()
def createDocumentEdit(self):
if self.document_type == 1:
my_document = VAS_view(self)
if self.document_type == 2:
my_document = VCCP_view(self)
# Disable editing.
#textEdit.setReadOnly(True)
## Set a monospace font for content.
#textEdit.setFont(QtGui.QFont("Monospace", 10))
return my_document
#def createWarningLabel(self):
# label = QtWidgets.QLabel(self)
# label.setObjectName("warningLabel")
# label.setStyleSheet(
# "padding: 16px;"
# "background-color: #f9ac3a;"
# "border: none;"
# )
# label.setWordWrap(True)
# label.hide()
# return label
def reload(self):
#"""Reload from file."""
#with open(self.filename) as f:
# self.timestamp = os.path.getmtime(self.filename)
# self.textEdit.setText(f.read())
# self.fileLoaded.emit(self)
#self.clearWarning()
pass
def clearWarning(self):
"""Clear the warning badge located at the top of the document."""
self.warningLabel.clear()
self.warningLabel.hide()
def showWarning(self, message):
"""Show a warning badge displaying a message located at the top of the document."""
self.warningLabel.setText(message)
self.warningLabel.show()
def checkTimestamp(self):
timestamp = os.path.getmtime(self.filename)
if timestamp > self.timestamp:
self.showWarning(self.tr("<strong>The file {} changed on disk.</strong> Reload (hit Ctrl+R) to see the changes.").format(self.filename))
self.fileChanged.emit(self)
else:
self.clearWarning()
# -----------------------------------------------------------------------------
# Parsing command line arguments
# -----------------------------------------------------------------------------
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(prog=os.path.basename(__file__), description="")
parser.add_argument('filename', nargs="*", metavar='<file>', help="file")
parser.add_argument('-V, --version', action='version', version='%(prog)s {}'.format(__version__))
return parser.parse_args()
# -----------------------------------------------------------------------------
# Main routine
# -----------------------------------------------------------------------------
def main():
"""Main routine."""
args = parse_args()
# Create application and main window.
app = QtWidgets.QApplication(sys.argv)
window = MainWindow()
window.show()
# Open connections file using command line argument.
for filename in args.filename:
window.loadDocument(filename)
# Run execution loop.
return app.exec()
if __name__ == '__main__':
exit(main()) | self.document_type = 7 |
workon.go | package cq
import (
"fmt"
"strconv"
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/Wheeeel/todobot/model"
"github.com/go-sql-driver/mysql"
tg "github.com/go-telegram-bot-api/telegram-bot-api"
"github.com/pkg/errors"
uuid "github.com/satori/go.uuid"
)
func reportErr(bot *tg.BotAPI, cqc tg.CallbackConfig, err error) {
err = errors.Wrap(err, "Workon")
log.Error(err)
cqc.Text = "Invalid Request!"
cqc.ShowAlert = true
bot.AnswerCallbackQuery(cqc)
return
}
func Workon(bot *tg.BotAPI, cq *tg.CallbackQuery) {
req := cq.Message
userID := cq.From.ID
chatID := req.Chat.ID
cqc := tg.NewCallback(cq.ID, "")
argStr, err := ParseArgs(cq)
if err != nil {
reportErr(bot, cqc, err)
return
}
log.Infof("[DEBUG] Workon: userID=%v, chatID=%v", userID, chatID)
// argument check
arg := strings.Split(argStr, ",")
if len(arg) < 2 {
err = errors.New("Argument len less than 2, probably bad protocol")
reportErr(bot, cqc, err)
return
}
taskID, err := strconv.Atoi(arg[1])
if err != nil {
reportErr(bot, cqc, err)
return
}
cqChatID, err := strconv.Atoi(arg[0])
if err != nil {
reportErr(bot, cqc, err)
return
}
if int64(cqChatID) != chatID {
err = errors.New("Callback query chatID != current in chat ID, mismatch, invalid request")
reportErr(bot, cqc, err)
return
}
// get User
phraseGroupUUID := ""
u, err := model.SelectUser(model.DB, cq.From.ID)
if err == nil {
// skip the unecessary error
phraseGroupUUID = u.PhraseUUID
} else {
err = errors.Wrap(err, "Workon")
log.Error(err)
}
taskRealID, err := model.TaskRealID(model.DB, taskID, chatID)
log.Infof("[DEBUG] taskRealID = %v", taskRealID)
if err != nil {
err = errors.Wrap(err, "WorkON")
log.Error(err)
cqc.Text = "唔, 这个 task 可能已经被删除了呢"
bot.AnswerCallbackQuery(cqc)
return | }
// check if the user is the creator
t, err := model.TaskByID(model.DB, taskRealID)
if err != nil {
err = errors.Wrap(err, "WorkON")
log.Error(err)
cqc.Text = "唔, 这个 task 可能已经被删除了呢"
bot.AnswerCallbackQuery(cqc)
return
}
if t.CreateBy != cq.From.ID {
cqc.Text = "唔,为了防止误触,本按钮只有创建 task 的人能点哦,想要 workon 该任务的话请使用 /workon <TaskID>"
cqc.ShowAlert = true
bot.AnswerCallbackQuery(cqc)
return
}
// sanity check
atil, err := model.SelectATIByUserIDAndStateForUpdate(model.DB, userID, model.ATI_STATE_WORKING)
if err != nil {
err = errors.Wrap(err, "WorkON")
log.Error(err)
cqc.Text = "唔,出错了呢,重试如果还没有好的话就 pia @V0ID001 吧QwQ"
bot.AnswerCallbackQuery(cqc)
return
}
UUID := uuid.NewV4()
if len(atil) > 0 {
ts, err := model.TaskByID(model.DB, atil[0].TaskID)
if err != nil {
ok, er := model.TaskExist(model.DB, atil[0].TaskID)
if er != nil {
err = errors.Wrap(er, "WorkON")
log.Error(err)
cqc.Text = "唔,出错了呢,重试如果还没有好的话就 pia @V0ID001 吧QwQ"
bot.AnswerCallbackQuery(cqc)
return
}
// the error is not a "task not found error"
if ok {
err = errors.Wrap(err, "WorkON")
log.Error(err)
cqc.Text = "唔,出错了呢,重试如果还没有好的话就 pia @V0ID001 吧QwQ"
bot.AnswerCallbackQuery(cqc)
return
}
// the error is the mission is removed, just stop the mission now
if !ok {
cqc.Text = "喵,这个任务已经被删掉了呢,那么这里就帮乃把此任务标记为无效了哦"
err = model.UpdateATIStateByUUID(model.DB, atil[0].InstanceUUID, model.ATI_STATE_INVALID)
if err != nil {
err = errors.Wrap(err, "WorkON")
log.Error(err)
cqc.Text = "唔,出错了呢,重试如果还没有好的话就 pia @V0ID001 吧QwQ"
bot.AnswerCallbackQuery(cqc)
return
}
bot.AnswerCallbackQuery(cqc)
goto l1
}
}
txtMsg := fmt.Sprintf("唔,乃正进行着一项工作呢,本bot还不支持心分二用的说QwQ\n正在进行的任务: %s", ts)
cqc.Text = txtMsg
cqc.ShowAlert = true
bot.AnswerCallbackQuery(cqc)
return
}
l1:
// now we know that this user is not working on any task in this group, now create the task for him
ati := new(model.ActiveTaskInstance)
ati.StartAt = mysql.NullTime{Time: time.Now(), Valid: true}
ati.UserID = userID
ati.InstanceState = model.ATI_STATE_WORKING
ati.InstanceUUID = UUID.String()
ati.NotifyID = chatID
ati.TaskID = taskRealID
ati.PhraseGroupUUID = phraseGroupUUID
err = model.InsertATI(model.DB, *ati)
if err != nil {
err = errors.Wrap(err, "WorkON")
log.Error(err)
cqc.Text = "唔,出错了呢,重试如果还没有好的话就 pia @V0ID001 吧QwQ"
bot.AnswerCallbackQuery(cqc)
return
}
cqc.Text = "好的~ 请努力完成任务哦 =w="
cqc.ShowAlert = true
_, err = bot.AnswerCallbackQuery(cqc)
// TODO: Let's add a hint to the message
if err != nil {
err = errors.Wrap(err, "Workon")
}
log.Error(err)
} | |
utest_sas_gen.py | """
Unit tests for the sas_gen
"""
import os.path
import warnings
warnings.simplefilter("ignore")
import unittest
import numpy as np
from sas.sascalc.calculator import sas_gen
def find(filename):
return os.path.join(os.path.dirname(__file__), 'data', filename)
class sas_gen_test(unittest.TestCase):
def setUp(self):
self.sldloader = sas_gen.SLDReader()
self.pdbloader = sas_gen.PDBReader()
self.omfloader = sas_gen.OMFReader()
def test_sldreader(self):
"""
Test .sld file loaded
"""
f = self.sldloader.read(find("sld_file.sld"))
self.assertEqual(f.pos_x[0], -40.5)
self.assertEqual(f.pos_y[0], -13.5)
self.assertEqual(f.pos_z[0], -13.5)
def test_pdbreader(self):
"""
Test .pdb file loaded
"""
f = self.pdbloader.read(find("c60.pdb"))
self.assertEqual(f.pos_x[0], -0.733)
self.assertEqual(f.pos_y[0], -1.008)
self.assertEqual(f.pos_z[0], 3.326)
def test_omfreader(self):
|
def test_calculator(self):
"""
Test that the calculator calculates.
"""
f = self.omfloader.read(find("A_Raw_Example-1.omf"))
omf2sld = sas_gen.OMF2SLD()
omf2sld.set_data(f)
model = sas_gen.GenSAS()
model.set_sld_data(omf2sld.output)
x = np.linspace(0, 0.1, 11)[1:]
model.runXY([x, x])
if __name__ == '__main__':
unittest.main()
| """
Test .omf file loaded
"""
f = self.omfloader.read(find("A_Raw_Example-1.omf"))
output = sas_gen.OMF2SLD()
output.set_data(f)
self.assertEqual(f.mx[0], 0)
self.assertEqual(f.my[0], 0)
self.assertEqual(f.mz[0], 0)
self.assertEqual(output.pos_x[0], 0.0)
self.assertEqual(output.pos_y[0], 0.0)
self.assertEqual(output.pos_z[0], 0.0) |
jobs.py | from flask import Flask
from flask_apscheduler import APScheduler
class Config(object):
JOBS = [
{
'id': 'job1',
'func': 'jobs:job1',
'args': (1, 2),
'trigger': 'interval',
'seconds': 10
}
]
SCHEDULER_API_ENABLED = True
def job1(a, b):
print(str(a) + ' ' + str(b))
if __name__ == '__main__':
app = Flask(__name__)
app.config.from_object(Config())
scheduler = APScheduler()
# it is also possible to enable the API directly
# scheduler.api_enabled = True
scheduler.init_app(app)
scheduler.start() |
app.run() |
|
functions.py | import DBinterface as DB
import random
import datetime as dt
def print_ranking(my_ranking,ranking_size,top_or_bottom):
Tweet=""
if top_or_bottom == True:
|
else:
Tweet += ("The first " + ranking_size + " cities with less CO2 emissions due to traffic are: \r\n" +
"Congratulations!!!!! The Earth loves you :D \r\n")
for i in range(ranking_size):
Tweet += (str((i+1)) + "º " + str(my_ranking[i][0]) + " with a CO2 value of " + str(my_ranking[i][1]) + "\r\n")
return(Tweet)
def rank(api):
interface = DB.nasaDBinterface()
ranking_size = random.randint(2,10)
top_or_bottom = random.choice([True, False])
my_ranking = interface.getranking(ranking_size, top_or_bottom)
Tweet=print_ranking(my_ranking,ranking_size,top_or_bottom)
api.update_status(status=Tweet)
def leer_hashtag(T):
L=list(T)
L.append(" ")
for a in range(len(L)):
if L[a]=="#":
a=a+1
ht=[]
while L[a]!=" ":
ht.append(L[a])
a=a+1
ht_salida= ""
for e in ht:
ht_salida += e
return ht_salida
def get_city(TEXT):
L=TEXT.split()
c=""
ciudad=""
for a in range(len(L)):
if L[a]=="#consulta":
break
if L[a]=="City:":
for i in range(len(L)-a-2):
c += L[a+i+1] + " "
x=c.split()
for i in range(len(x)-1):
ciudad += x[i]+" "
if len(x) != 1:
ciudad += x[len(x)-1]
return ciudad.lower() | Tweet += ("The first " + ranking_size + " cities with more CO2 emissions due to traffic are: \r\n ") |
kzg.go | // +build !bignum_pure,!bignum_hol256
package kzg
import (
"fmt"
"github.com/protolambda/go-kzg/bls"
)
type KZGSettings struct {
*FFTSettings
// setup values
// [b.multiply(b.G1, pow(s, i, MODULUS)) for i in range(WIDTH+1)],
SecretG1 []bls.G1Point
// [b.multiply(b.G2, pow(s, i, MODULUS)) for i in range(WIDTH+1)],
SecretG2 []bls.G2Point
}
func NewKZGSettings(fs *FFTSettings, secretG1 []bls.G1Point, secretG2 []bls.G2Point) *KZGSettings {
if len(secretG1) != len(secretG2) {
panic("secret list lengths don't match")
}
if uint64(len(secretG1)) < fs.MaxWidth {
panic(fmt.Errorf("expected more values for secrets, MaxWidth: %d, got: %d", fs.MaxWidth, len(secretG1)))
}
ks := &KZGSettings{
FFTSettings: fs,
SecretG1: secretG1,
SecretG2: secretG2,
}
return ks |
type FK20SingleSettings struct {
*KZGSettings
xExtFFT []bls.G1Point
}
func NewFK20SingleSettings(ks *KZGSettings, n2 uint64) *FK20SingleSettings {
if n2 > ks.MaxWidth {
panic("extended size is larger than kzg settings supports")
}
if !bls.IsPowerOfTwo(n2) {
panic("extended size is not a power of two")
}
if n2 < 2 {
panic("extended size is too small")
}
n := n2 / 2
fk := &FK20SingleSettings{
KZGSettings: ks,
}
x := make([]bls.G1Point, n, n)
for i, j := uint64(0), n-2; i < n-1; i, j = i+1, j-1 {
bls.CopyG1(&x[i], &ks.SecretG1[j])
}
bls.CopyG1(&x[n-1], &bls.ZeroG1)
fk.xExtFFT = fk.toeplitzPart1(x)
return fk
}
type FK20MultiSettings struct {
*KZGSettings
chunkLen uint64
// chunkLen files, each of size MaxWidth
xExtFFTFiles [][]bls.G1Point
}
func NewFK20MultiSettings(ks *KZGSettings, n2 uint64, chunkLen uint64) *FK20MultiSettings {
if n2 > ks.MaxWidth {
panic("extended size is larger than kzg settings supports")
}
if !bls.IsPowerOfTwo(n2) {
panic("extended size is not a power of two")
}
if n2 < 2 {
panic("extended size is too small")
}
if chunkLen > n2 {
panic("chunk length is too large")
}
if !bls.IsPowerOfTwo(chunkLen) {
panic("chunk length must be power of two")
}
if chunkLen < 1 {
panic("chunk length is too small")
}
fk := &FK20MultiSettings{
KZGSettings: ks,
chunkLen: chunkLen,
xExtFFTFiles: make([][]bls.G1Point, chunkLen, chunkLen),
}
// xext_fft = []
// for i in range(l):
// x = setup[0][n - l - 1 - i::-l] + [b.Z1]
// xext_fft.append(toeplitz_part1(x))
n := n2 / 2
k := n / chunkLen
xExtFFTPrecompute := func(offset uint64) []bls.G1Point {
x := make([]bls.G1Point, k, k)
start := n - chunkLen - 1 - offset
for i, j := uint64(0), start; i+1 < k; i, j = i+1, j-chunkLen {
bls.CopyG1(&x[i], &ks.SecretG1[j])
}
bls.CopyG1(&x[k-1], &bls.ZeroG1)
return ks.toeplitzPart1(x)
}
for i := uint64(0); i < chunkLen; i++ {
fk.xExtFFTFiles[i] = xExtFFTPrecompute(i)
}
return fk
} | } |
event.rs | use {
super::*,
crate::{
app::{
AppContext,
Selection,
},
display::W,
errors::ProgramError,
keys,
skin::PanelSkin,
verb::{Internal, Verb, VerbExecution},
},
termimad::{Area, Event, InputField},
};
/// wrap the input of a panel,
/// receive events and make commands
pub struct PanelInput {
pub input_field: InputField,
tab_cycle_count: usize,
input_before_cycle: Option<String>,
}
impl PanelInput {
pub fn new(area: Area) -> Self {
Self {
input_field: InputField::new(area),
tab_cycle_count: 0,
input_before_cycle: None,
}
}
pub fn set_content(&mut self, content: &str) {
self.input_field.set_content(content);
}
pub fn get_content(&self) -> String {
self.input_field.get_content()
}
pub fn display(
&mut self,
w: &mut W,
active: bool,
area: Area,
panel_skin: &PanelSkin,
) -> Result<(), ProgramError> {
self.input_field.set_normal_style(panel_skin.styles.input.clone());
self.input_field.focused = active;
self.input_field.area = area;
self.input_field.display_on(w)?;
Ok(())
}
/// consume the event to
/// - maybe change the input
/// - build a command
pub fn on_event(
&mut self,
w: &mut W,
event: Event,
con: &AppContext,
sel: Selection<'_>,
) -> Result<Command, ProgramError> {
let cmd = self.get_command(event, con, sel);
self.input_field.display_on(w)?;
Ok(cmd)
}
/// check whether the verb is an action on the input (like
/// deleting a word) and if it's the case, applies it and
/// return true
fn handle_input_related_verb(
&mut self,
verb: &Verb,
_con: &AppContext,
) -> bool {
if let VerbExecution::Internal(internal_exec) = &verb.execution {
match internal_exec.internal {
Internal::input_del_char_left => self.input_field.del_char_left(),
Internal::input_del_char_below => self.input_field.del_char_below(),
Internal::input_del_word_left => self.input_field.del_word_left(),
Internal::input_del_word_right => self.input_field.del_word_right(),
Internal::input_go_left => self.input_field.move_left(),
Internal::input_go_right => self.input_field.move_right(),
Internal::input_go_word_left => self.input_field.move_word_left(),
Internal::input_go_word_right => self.input_field.move_word_right(),
Internal::input_go_to_start => self.input_field.move_to_start(),
Internal::input_go_to_end => self.input_field.move_to_end(),
_ => false,
}
} else {
false
}
}
/// consume the event to
/// - maybe change the input
/// - build a command
fn get_command(
&mut self,
event: Event,
con: &AppContext,
sel: Selection<'_>,
) -> Command {
match event {
Event::Click(x, y, ..) => {
return if self.input_field.apply_event(&event) {
Command::empty()
} else {
Command::Click(x, y)
};
}
Event::DoubleClick(x, y) => {
return Command::DoubleClick(x, y);
}
Event::Key(key) => {
// value of raw and parts before any key related change
let raw = self.input_field.get_content();
let parts = CommandParts::from(raw.clone());
// we first handle the cases that MUST absolutely
// not be overriden by configuration
if key == keys::ESC {
self.tab_cycle_count = 0;
if let Some(raw) = self.input_before_cycle.take() {
// we cancel the tab cycling
self.input_field.set_content(&raw);
self.input_before_cycle = None;
return Command::from_raw(raw, false);
} else {
self.input_field.set_content("");
let internal = Internal::back;
return Command::Internal {
internal,
input_invocation: parts.verb_invocation,
};
}
}
// tab completion
if key == keys::TAB {
if parts.verb_invocation.is_some() {
let parts_before_cycle;
let completable_parts = if let Some(s) = &self.input_before_cycle {
parts_before_cycle = CommandParts::from(s.clone());
&parts_before_cycle
} else {
&parts
};
let completions = Completions::for_input(completable_parts, con, sel);
let added = match completions {
Completions::None => {
debug!("nothing to complete!"); // where to tell this ? input field or status ?
self.tab_cycle_count = 0;
self.input_before_cycle = None;
None
}
Completions::Common(completion) => {
self.tab_cycle_count = 0;
Some(completion)
}
Completions::List(mut completions) => {
let idx = self.tab_cycle_count % completions.len();
if self.tab_cycle_count == 0 {
self.input_before_cycle = Some(raw.to_string());
}
self.tab_cycle_count += 1;
Some(completions.swap_remove(idx))
}
};
if let Some(added) = added {
let mut raw = self.input_before_cycle.as_ref().map_or(raw, |s| s.to_string());
raw.push_str(&added);
self.input_field.set_content(&raw);
return Command::from_raw(raw, false);
} else {
return Command::None;
}
}
} else {
self.tab_cycle_count = 0;
self.input_before_cycle = None;
}
if key == keys::ENTER && parts.verb_invocation.is_some() {
return Command::from_parts(parts, true);
}
if key == keys::QUESTION && (raw.is_empty() || parts.verb_invocation.is_some()) {
// a '?' opens the help when it's the first char
// or when it's part of the verb invocation
return Command::Internal {
internal: Internal::help,
input_invocation: parts.verb_invocation,
};
}
// we now check if the key is the trigger key of one of the verbs
for (index, verb) in con.verb_store.verbs.iter().enumerate() {
for verb_key in &verb.keys {
if *verb_key == key {
if self.handle_input_related_verb(verb, con) {
return Command::from_raw(self.input_field.get_content(), false);
}
if sel.stype.respects(verb.selection_condition) {
return Command::VerbTrigger {
index,
input_invocation: parts.verb_invocation,
};
} else {
debug!("verb not allowed on current selection");
}
}
}
}
if key == keys::LEFT && raw.is_empty() |
if key == keys::RIGHT && raw.is_empty() {
return Command::Internal {
internal: Internal::open_stay,
input_invocation: None,
};
}
// input field management
if self.input_field.apply_event(&event) {
return Command::from_raw(self.input_field.get_content(), false);
}
}
Event::Wheel(lines_count) => {
let internal = if lines_count > 0 {
Internal::line_down
} else {
Internal::line_up
};
return Command::Internal {
internal,
input_invocation: None,
};
}
_ => {}
}
Command::None
}
}
| {
let internal = Internal::back;
return Command::Internal {
internal,
input_invocation: parts.verb_invocation,
};
} |
getRssTickerValues.py | from daoRefactor2 import DAO
from rssTickerInfo import rssTickerInfo
import json
import boto3
table = 'CompanyRSSFeed'
dao = DAO(table)
def main():
|
if __name__ == "__main__":
# calling main function
main()
| tickerValues = dao.getRssTickerValues('UNP')
print(tickerValues) |
__init__.py | import os
import posixpath
from enum import Enum
| from utils import security
class UploadPath(str, Enum):
default = "default"
UPLOAD_PATH_DICT = {
UploadPath.default: "default/"
}
def get_upload(upload_key: UploadPath = Path(..., description="上传文件块位置")):
"""
获取文件上传目录
:param upload_key:
:return:
"""
root_path = posixpath.abspath(UPLOAD_PATH_DICT[upload_key])
def func(folder):
path = security.safe_join(root_path, folder)
os.makedirs(path, exist_ok=True)
return path
return func
class DownloadPath(str, Enum):
default = "default"
DOWNLOAD_PATH_DICT = {
DownloadPath.default: "default/"
}
def get_download(download_key: DownloadPath = Path(..., description="下载文件块位置")):
"""
获取下载文件路径
:param download_key:
:return:
"""
root_path = posixpath.abspath(DOWNLOAD_PATH_DICT[download_key])
def func(folder):
path = security.safe_join(root_path, folder)
if not posixpath.exists(path):
raise HTTPException(404, "The access file does not exist")
for filename in os.listdir(path):
return posixpath.join(path, filename), filename
return func | from fastapi import Path, HTTPException
|
index.ts | import CreditCardForm from './components/CreditCardForm'
import CreditCardProvider from './components/CreditCardProvider'
export { default as Button } from './components/Button'
export { default as Card } from './components/Card'
import { FormModel, CardFields } from './types'
export {
CardFields, | CreditCardForm,
CreditCardProvider,
}
export default CreditCardForm | FormModel, |
source.rs | use crate::prelude::*;
use nu_engine::{script, WholeStreamCommand};
use nu_errors::ShellError;
use nu_path::{canonicalize, canonicalize_with};
use nu_protocol::{Signature, SyntaxShape};
use nu_source::Tagged;
use std::path::Path;
pub struct Source;
#[derive(Deserialize)]
pub struct SourceArgs { | }
impl WholeStreamCommand for Source {
fn name(&self) -> &str {
"source"
}
fn signature(&self) -> Signature {
Signature::build("source").required(
"filename",
SyntaxShape::FilePath,
"the filepath to the script file to source",
)
}
fn usage(&self) -> &str {
"Runs a script file in the current context."
}
fn run(&self, args: CommandArgs) -> Result<OutputStream, ShellError> {
source(args)
}
fn examples(&self) -> Vec<Example> {
vec![]
}
}
pub fn source(args: CommandArgs) -> Result<OutputStream, ShellError> {
let ctx = &args.context;
let filename: Tagged<String> = args.req(0)?;
let source_file = Path::new(&filename.item);
// Note: this is a special case for setting the context from a command
// In this case, if we don't set it now, we'll lose the scope that this
// variable should be set into.
let lib_dirs = &ctx
.configs()
.lock()
.global_config
.as_ref()
.map(|configuration| match configuration.var("lib_dirs") {
Some(paths) => paths
.table_entries()
.cloned()
.map(|path| path.as_string())
.collect(),
None => vec![],
});
if let Some(dir) = lib_dirs {
for lib_path in dir {
match lib_path {
Ok(name) => {
let path = if let Ok(p) = canonicalize_with(&source_file, name) {
p
} else {
continue;
};
if let Ok(contents) = std::fs::read_to_string(path) {
let result = script::run_script_standalone(contents, true, ctx, false);
if let Err(err) = result {
ctx.error(err);
}
return Ok(OutputStream::empty());
}
}
Err(reason) => {
ctx.error(reason.clone());
}
}
}
}
let path = canonicalize(source_file).map_err(|e| {
ShellError::labeled_error(
format!("Can't load source file. Reason: {}", e.to_string()),
"Can't load this file",
filename.span(),
)
})?;
let contents = std::fs::read_to_string(path);
match contents {
Ok(contents) => {
let result = script::run_script_standalone(contents, true, ctx, false);
if let Err(err) = result {
ctx.error(err);
}
Ok(OutputStream::empty())
}
Err(e) => {
ctx.error(ShellError::labeled_error(
format!("Can't load source file. Reason: {}", e.to_string()),
"Can't load this file",
filename.span(),
));
Ok(OutputStream::empty())
}
}
} | pub filename: Tagged<String>, |
github.py | """Functions to interact with github API."""
import json
import re
from io import StringIO
import requests
GITHUB_API_BASE = 'https://api.github.com'
def build_github_url(
repo,
branch=None,
path='requirements.txt',
token=None
):
"""Builds a URL to a file inside a Github repository."""
repo = re.sub(r"^http(s)?://github.com/", "", repo).strip('/')
# args come is as 'None' instead of not being provided
if not path:
path = 'requirements.txt'
if not branch:
branch = get_default_branch(repo)
url = 'https://raw.githubusercontent.com/{}/{}/{}'.format(
repo, branch, path
)
if token:
|
return url
def get_default_branch(repo):
"""Returns the name of the default branch of the repo."""
url = "{}/repos/{}".format(GITHUB_API_BASE, repo)
response = requests.get(url)
if response.status_code == 200:
api_response = json.loads(response.text)
return api_response['default_branch']
else:
return 'master'
def get_requirements_file_from_url(url):
"""Fetches the requiremets from the url."""
response = requests.get(url)
if response.status_code == 200:
return StringIO(response.text)
else:
return StringIO("")
| url = '{}?token={}'.format(url, token) |
polyphony.py | import numpy as np
| ########################################
### Polyphony --- discarded
########################################
def polyphony_level_diff(roll_output,roll_target):
poly_output = np.sum(roll_output,axis=0)
poly_target = np.sum(roll_target,axis=0)
poly_diff = np.abs(poly_output-poly_target)
return np.mean(poly_diff),np.std(poly_diff),np.min(poly_diff),np.max(poly_diff)
# discarded
def false_negative_polyphony_level(roll_target,intervals_target,match):
fs = 100
if len(match) == 0:
unmatched_targets = list(range(intervals_target))
else:
matched_targets, matched_outputs = zip(*match)
# unmatched_targets= list(set(range(len(vel_target)))-set(matched_targets))
unmatched_targets= list(set(range(len(intervals_target)))-set(matched_targets))
unmatched_intervals = intervals_target[unmatched_targets,:]
all_avg_poly = []
for [start,end] in unmatched_intervals:
start_idx = int(round(start*fs))
end_idx = int(round(end*fs))
avg_poly = np.mean(np.sum(roll_target[:,start_idx:end_idx],axis=0))
all_avg_poly += [avg_poly]
return all_avg_poly | |
index.shared.js | // @flow
import without from 'lodash/without'
import uniq from 'lodash/uniq'
import difference from 'lodash/difference'
import {globalColors} from '../styles'
import {proveMessage} from '../util/platforms.js'
import {PlatformsExpanded} from '../constants/types/more'
import type {MissingProof} from '../common-adapters/user-proofs'
import type {Proof} from '../constants/tracker'
import type {PlatformsExpandedType} from '../constants/types/more'
import type {Folder} from '../constants/folders'
export function | (folder: Folder, style: ?Object = {}) {
const type = folder.isPublic
? folder.hasData ? 'iconfont-folder-public-has-files' : 'iconfont-folder-public'
: folder.hasData ? 'iconfont-folder-private-has-files' : 'iconfont-folder-private'
const color = folder.isPublic ? globalColors.yellowGreen : globalColors.darkBlue2
return {
type,
style: {...style, color},
}
}
export function missingProofs(
userProofs: Array<Proof>,
onClick: (missingProof: MissingProof) => void
): Array<MissingProof> {
let availableProofTypes = without(PlatformsExpanded, 'http', 'https', 'dnsOrGenericWebSite', 'dns')
const userProofTypes = uniq((userProofs || []).map(p => p.type))
// $FlowIssue thinks its just a string
const missingRegular = difference(availableProofTypes, userProofTypes).map((type: PlatformsExpanded) => ({
type,
message: proveMessage(type),
onClick,
}))
// always ensure you can add a web site
return missingRegular.concat({
type: 'dnsOrGenericWebSite',
message: proveMessage('dnsOrGenericWebSite'),
onClick,
})
}
export function revokeProofLanguage(platform: PlatformsExpandedType) {
let msg
switch (platform) {
case 'pgp':
msg = 'Drop key'
break
default:
msg = 'Revoke'
}
return msg
}
| folderIconProps |
import_service_types.py | # -*- coding: utf-8 -*-
__author__ = 'ffuentes'
| import sys
import csv
import logging
from apps.noclook.models import ServiceType, ServiceClass
from django.core.management.base import BaseCommand, CommandError
logger = logging.getLogger('noclook_service_types_import')
def insert_service_type(name, service_class):
try:
service_class_, created = ServiceClass.objects\
.get_or_create(name=service_class)
service_type, created = ServiceType.objects\
.get_or_create(name=name,
service_class=service_class_)
except Exception:
logger.warning('Bad things happened importing {} - {}'\
.format(name, service_class))
class Command(BaseCommand):
help = 'Import service types'
def add_arguments(self, parser):
parser.add_argument('--csv_file', help='The csv file to import',
type=str)
parser.add_argument('--no_header', action='store_true',
default=False, help='CSV file has no header')
def handle(self, *args, **options):
with open(options['csv_file'], 'r') as csv_file:
rows = csv.reader(csv_file)
#skip header
if not options['no_header']:
next(rows, None)
for name, service_class in rows:
insert_service_type(name, service_class) | import argparse
import os |
consenter_test.go |
//此源码被清华学神尹成大魔王专业翻译分析并修改
//尹成QQ77025077
//尹成微信18510341407
//尹成所在QQ群721929980
//尹成邮箱 [email protected]
//尹成毕业于清华大学,微软区块链领域全球最有价值专家
//https://mvp.microsoft.com/zh-cn/PublicProfile/4033620
/*
版权所有IBM公司。保留所有权利。
SPDX许可证标识符:Apache-2.0
**/
package etcdraft_test
import (
"io/ioutil"
"os"
"path"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric/common/crypto/tlsgen"
"github.com/hyperledger/fabric/common/flogging"
mockconfig "github.com/hyperledger/fabric/common/mocks/config"
"github.com/hyperledger/fabric/core/comm"
"github.com/hyperledger/fabric/orderer/common/cluster"
clustermocks "github.com/hyperledger/fabric/orderer/common/cluster/mocks"
"github.com/hyperledger/fabric/orderer/common/multichannel"
"github.com/hyperledger/fabric/orderer/consensus/etcdraft"
"github.com/hyperledger/fabric/orderer/consensus/etcdraft/mocks"
consensusmocks "github.com/hyperledger/fabric/orderer/consensus/mocks"
"github.com/hyperledger/fabric/protos/common"
"github.com/hyperledger/fabric/protos/orderer"
etcdraftproto "github.com/hyperledger/fabric/protos/orderer/etcdraft"
"github.com/hyperledger/fabric/protos/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/mock"
)
var _ = Describe("Consenter", func() {
var (
chainGetter *mocks.ChainGetter
support *consensusmocks.FakeConsenterSupport
dataDir string
snapDir string
walDir string
err error
)
BeforeEach(func() {
chainGetter = &mocks.ChainGetter{}
support = &consensusmocks.FakeConsenterSupport{}
dataDir, err = ioutil.TempDir("", "snap-")
Expect(err).NotTo(HaveOccurred())
walDir = path.Join(dataDir, "wal-")
snapDir = path.Join(dataDir, "snap-")
blockBytes, err := ioutil.ReadFile("testdata/mychannel.block")
Expect(err).NotTo(HaveOccurred())
goodConfigBlock := &common.Block{}
proto.Unmarshal(blockBytes, goodConfigBlock)
lastBlock := &common.Block{
Header: &common.BlockHeader{
Number: 1,
},
Data: goodConfigBlock.Data,
Metadata: &common.BlockMetadata{
Metadata: [][]byte{{}, utils.MarshalOrPanic(&common.Metadata{
Value: utils.MarshalOrPanic(&common.LastConfig{Index: 0}),
})},
},
}
support.BlockReturns(lastBlock)
})
AfterEach(func() {
os.RemoveAll(dataDir)
})
When("the consenter is extracting the channel", func() {
It("extracts successfully from step requests", func() {
consenter := newConsenter(chainGetter)
ch := consenter.TargetChannel(&orderer.StepRequest{Channel: "mychannel"})
Expect(ch).To(BeIdenticalTo("mychannel"))
})
It("extracts successfully from submit requests", func() {
consenter := newConsenter(chainGetter)
ch := consenter.TargetChannel(&orderer.SubmitRequest{Channel: "mychannel"})
Expect(ch).To(BeIdenticalTo("mychannel"))
})
It("returns an empty string for the rest of the messages", func() {
consenter := newConsenter(chainGetter)
ch := consenter.TargetChannel(&common.Block{})
Expect(ch).To(BeEmpty())
})
})
When("the consenter is asked for a chain", func() {
chainInstance := &etcdraft.Chain{}
cs := &multichannel.ChainSupport{
Chain: chainInstance,
}
BeforeEach(func() {
chainGetter.On("GetChain", "mychannel").Return(cs)
chainGetter.On("GetChain", "badChainObject").Return(&multichannel.ChainSupport{})
chainGetter.On("GetChain", "notmychannel").Return(nil)
chainGetter.On("GetChain", "notraftchain").Return(&multichannel.ChainSupport{
Chain: &multichannel.ChainSupport{},
})
})
It("calls the chain getter and returns the reference when it is found", func() {
consenter := newConsenter(chainGetter)
Expect(consenter).NotTo(BeNil())
chain := consenter.ReceiverByChain("mychannel")
Expect(chain).NotTo(BeNil())
Expect(chain).To(BeIdenticalTo(chainInstance))
})
It("calls the chain getter and returns nil when it's not found", func() {
consenter := newConsenter(chainGetter)
Expect(consenter).NotTo(BeNil())
chain := consenter.ReceiverByChain("notmychannel")
Expect(chain).To(BeNil())
})
It("calls the chain getter and returns nil when it's not a raft chain", func() {
consenter := newConsenter(chainGetter)
Expect(consenter).NotTo(BeNil())
chain := consenter.ReceiverByChain("notraftchain")
Expect(chain).To(BeNil())
})
It("calls the chain getter and panics when the chain has a bad internal state", func() {
consenter := newConsenter(chainGetter)
Expect(consenter).NotTo(BeNil())
Expect(func() {
consenter.ReceiverByChain("badChainObject")
}).To(Panic())
})
})
It("successfully constructs a Chain", func() {
certBytes := []byte("cert.orderer0.org0")
m := &etcdraftproto.Metadata{
Consenters: []*etcdraftproto.Consenter{
{ServerTlsCert: certBytes},
},
Options: &etcdraftproto.Options{
TickInterval: 100,
ElectionTick: 10,
HeartbeatTick: 1,
MaxInflightMsgs: 256,
MaxSizePerMsg: 1048576,
},
}
metadata := utils.MarshalOrPanic(m)
support.SharedConfigReturns(&mockconfig.Orderer{ConsensusMetadataVal: metadata})
consenter := newConsenter(chainGetter)
consenter.EtcdRaftConfig.WALDir = walDir
consenter.EtcdRaftConfig.SnapDir = snapDir
chain, err := consenter.HandleChain(support, nil)
Expect(err).NotTo(HaveOccurred())
Expect(chain).NotTo(BeNil())
Expect(chain.Start).NotTo(Panic())
})
It("fails to handle chain if no matching cert found", func() {
m := &etcdraftproto.Metadata{
Consenters: []*etcdraftproto.Consenter{
{ServerTlsCert: []byte("cert.orderer1.org1")},
},
Options: &etcdraftproto.Options{
TickInterval: 100,
ElectionTick: 10,
HeartbeatTick: 1,
MaxInflightMsgs: 256,
MaxSizePerMsg: 1048576,
},
}
metadata := utils.MarshalOrPanic(m)
support.SharedConfigReturns(&mockconfig.Orderer{ConsensusMetadataVal: metadata})
consenter := newConsenter(chainGetter)
chain, err := consenter.HandleChain(support, &common.Metadata{})
Expect(chain).To(BeNil())
Expect(err).To(MatchError("failed to detect own Raft ID because no matching certificate found"))
})
It("fails to handle chain if WAL is expected but no data found", func() {
c := &etcdraftproto.Consenter{ServerTlsCert: []byte("cert.orderer0.org0")}
m := &etcdraftproto.Metadata{
Consenters: []*etcdraftproto.Consenter{c},
Options: &etcdraftproto.Options{
TickInterval: 100,
ElectionTick: 10,
HeartbeatTick: 1,
MaxInflightMsgs: 256,
MaxSizePerMsg: 1048576,
},
}
metadata := utils.MarshalOrPanic(m)
support.SharedConfigReturns(&mockconfig.Orderer{ConsensusMetadataVal: metadata})
dir, err := ioutil.TempDir("", "wal-")
Expect(err).NotTo(HaveOccurred())
defer os.RemoveAll(dir)
consenter := newConsenter(chainGetter)
consenter.EtcdRaftConfig.WALDir = walDir
consenter.EtcdRaftConfig.SnapDir = snapDir
d := &etcdraftproto.RaftMetadata{
Consenters: map[uint64]*etcdraftproto.Consenter{1: c},
RaftIndex: uint64(2),
}
chain, err := consenter.HandleChain(support, &common.Metadata{Value: utils.MarshalOrPanic(d)})
Expect(chain).To(BeNil())
Expect(err).To(MatchError(ContainSubstring("no WAL data found")))
})
It("fails to handle chain if etcdraft options have not been provided", func() {
m := &etcdraftproto.Metadata{
Consenters: []*etcdraftproto.Consenter{
{ServerTlsCert: []byte("cert.orderer1.org1")},
},
}
metadata := utils.MarshalOrPanic(m)
support.SharedConfigReturns(&mockconfig.Orderer{ConsensusMetadataVal: metadata})
consenter := newConsenter(chainGetter)
chain, err := consenter.HandleChain(support, nil)
Expect(chain).To(BeNil())
Expect(err).To(MatchError("etcdraft options have not been provided"))
})
})
func newConsenter(chainGetter *mocks.ChainGetter) *etcdraft.Consenter {
communicator := &clustermocks.Communicator{}
ca, err := tlsgen.NewCA()
Expect(err).NotTo(HaveO | communicator.On("Configure", mock.Anything, mock.Anything)
consenter := &etcdraft.Consenter{
Communication: communicator,
Cert: []byte("cert.orderer0.org0"),
Logger: flogging.MustGetLogger("test"),
Chains: chainGetter,
Dispatcher: &etcdraft.Dispatcher{
Logger: flogging.MustGetLogger("test"),
ChainSelector: &mocks.ReceiverGetter{},
},
Dialer: cluster.NewTLSPinningDialer(comm.ClientConfig{
SecOpts: &comm.SecureOptions{
Certificate: ca.CertBytes(),
},
}),
}
return consenter
}
| ccurred())
|
colors.ts | export var Colors = [
"000000",
"800000",
"008000",
"808000",
"000080",
"800080",
"008080",
"c0c0c0",
"808080",
"ff0000",
"00ff00",
"ffff00",
"0000ff",
"ff00ff",
"00ffff",
"ffffff",
"000000",
"00005f",
"000087",
"0000af",
"0000d7",
"0000ff",
"005f00",
"005f5f",
"005f87",
"005faf",
"005fd7",
"005fff",
"008700",
"00875f",
"008787",
"0087af",
"0087d7",
"0087ff",
"00af00",
"00af5f",
"00af87",
"00afaf",
"00afd7",
"00afff",
"00d700",
"00d75f",
"00d787",
"00d7af",
"00d7d7",
"00d7ff",
"00ff00",
"00ff5f",
"00ff87",
"00ffaf",
"00ffd7",
"00ffff",
"5f0000",
"5f005f",
"5f0087",
"5f00af",
"5f00d7",
"5f00ff",
"5f5f00",
"5f5f5f",
"5f5f87",
"5f5faf",
"5f5fd7",
"5f5fff",
"5f8700",
"5f875f",
"5f8787",
"5f87af",
"5f87d7",
"5f87ff",
"5faf00",
"5faf5f",
"5faf87",
"5fafaf",
"5fafd7",
"5fafff",
"5fd700",
"5fd75f",
"5fd787",
"5fd7af",
"5fd7d7",
"5fd7ff",
"5fff00",
"5fff5f",
"5fff87",
"5fffaf",
"5fffd7",
"5fffff",
"870000",
"87005f",
"870087",
"8700af",
"8700d7",
"8700ff",
"875f00",
"875f5f",
"875f87",
"875faf",
"875fd7",
"875fff",
"878700",
"87875f",
"878787",
"8787af",
"8787d7",
"8787ff",
"87af00",
"87af5f",
"87af87",
"87afaf",
"87afd7",
"87afff",
"87d700",
"87d75f",
"87d787",
"87d7af", | "87ff5f",
"87ff87",
"87ffaf",
"87ffd7",
"87ffff",
"af0000",
"af005f",
"af0087",
"af00af",
"af00d7",
"af00ff",
"af5f00",
"af5f5f",
"af5f87",
"af5faf",
"af5fd7",
"af5fff",
"af8700",
"af875f",
"af8787",
"af87af",
"af87d7",
"af87ff",
"afaf00",
"afaf5f",
"afaf87",
"afafaf",
"afafd7",
"afafff",
"afd700",
"afd75f",
"afd787",
"afd7af",
"afd7d7",
"afd7ff",
"afff00",
"afff5f",
"afff87",
"afffaf",
"afffd7",
"afffff",
"d70000",
"d7005f",
"d70087",
"d700af",
"d700d7",
"d700ff",
"d75f00",
"d75f5f",
"d75f87",
"d75faf",
"d75fd7",
"d75fff",
"d78700",
"d7875f",
"d78787",
"d787af",
"d787d7",
"d787ff",
"d7af00",
"d7af5f",
"d7af87",
"d7afaf",
"d7afd7",
"d7afff",
"d7d700",
"d7d75f",
"d7d787",
"d7d7af",
"d7d7d7",
"d7d7ff",
"d7ff00",
"d7ff5f",
"d7ff87",
"d7ffaf",
"d7ffd7",
"d7ffff",
"ff0000",
"ff005f",
"ff0087",
"ff00af",
"ff00d7",
"ff00ff",
"ff5f00",
"ff5f5f",
"ff5f87",
"ff5faf",
"ff5fd7",
"ff5fff",
"ff8700",
"ff875f",
"ff8787",
"ff87af",
"ff87d7",
"ff87ff",
"ffaf00",
"ffaf5f",
"ffaf87",
"ffafaf",
"ffafd7",
"ffafff",
"ffd700",
"ffd75f",
"ffd787",
"ffd7af",
"ffd7d7",
"ffd7ff",
"ffff00",
"ffff5f",
"ffff87",
"ffffaf",
"ffffd7",
"ffffff",
"080808",
"121212",
"1c1c1c",
"262626",
"303030",
"3a3a3a",
"444444",
"4e4e4e",
"585858",
"606060",
"666666",
"767676",
"808080",
"8a8a8a",
"949494",
"9e9e9e",
"a8a8a8",
"b2b2b2",
"bcbcbc",
"c6c6c6",
"d0d0d0",
"dadada",
"e4e4e4",
"eeeeee"
] | "87d7d7",
"87d7ff",
"87ff00", |
api_op_ListResourceInventory.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package licensemanager
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/licensemanager/types"
"github.com/awslabs/smithy-go/middleware"
smithyhttp "github.com/awslabs/smithy-go/transport/http"
)
// Lists resources managed using Systems Manager inventory.
func (c *Client) ListResourceInventory(ctx context.Context, params *ListResourceInventoryInput, optFns ...func(*Options)) (*ListResourceInventoryOutput, error) {
if params == nil {
params = &ListResourceInventoryInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ListResourceInventory", params, optFns, addOperationListResourceInventoryMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ListResourceInventoryOutput)
out.ResultMetadata = metadata
return out, nil
}
type ListResourceInventoryInput struct {
// Filters to scope the results. The following filters and logical operators are
// supported:
//
// * account_id - The ID of the AWS account that owns the resource.
// Logical operators are EQUALS | NOT_EQUALS.
//
// * application_name - The name of the
// application. Logical operators are EQUALS | BEGINS_WITH.
//
// * license_included -
// The type of license included. Logical operators are EQUALS | NOT_EQUALS.
// Possible values are sql-server-enterprise | sql-server-standard | sql-server-web
// | windows-server-datacenter.
//
// * platform - The platform of the resource. Logical
// operators are EQUALS | BEGINS_WITH.
//
// * resource_id - The ID of the resource.
// Logical operators are EQUALS | NOT_EQUALS.
Filters []*types.InventoryFilter
// Maximum number of results to return in a single call.
MaxResults *int32
// Token for the next set of results.
NextToken *string
}
type ListResourceInventoryOutput struct {
// Token for the next set of results.
NextToken *string
// Information about the resources.
ResourceInventoryList []*types.ResourceInventory
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func addOperationListResourceInventoryMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpListResourceInventory{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListResourceInventory{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil |
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddAttemptClockSkewMiddleware(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpListResourceInventoryValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListResourceInventory(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opListResourceInventory(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "license-manager",
OperationName: "ListResourceInventory",
}
}
| {
return err
} |
main.py | """
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import importlib
import os
from stream_alert.rule_processor.handler import StreamAlert
modules_to_import = set()
# walk the rules directory to dymanically import
for folder in ('matchers', 'rules'):
for root, dirs, files in os.walk(folder):
filtered_files = [rule_file for rule_file in files if not (rule_file.startswith((
'.', '__init__')) or rule_file.endswith('.pyc'))]
package_path = root.replace('/', '.')
for import_file in filtered_files:
import_module = os.path.splitext(import_file)[0]
if package_path and import_module:
modules_to_import.add('{}.{}'.format(package_path, import_module))
for module_name in modules_to_import:
importlib.import_module(module_name)
def handler(event, context):
| """Main Lambda handler function"""
StreamAlert(context).run(event) |
|
progressbar.go | package gform
import (
"github.com/Kethsar/w32"
)
type ProgressBar struct {
W32Control
}
func NewProgressBar(parent Controller) *ProgressBar {
pb := new(ProgressBar)
pb.init(parent)
|
return pb
}
func (this *ProgressBar) init(parent Controller) {
this.W32Control.init(w32.PROGRESS_CLASS, parent, 0, w32.WS_CHILD|w32.WS_VISIBLE)
RegMsgHandler(this)
}
func (this *ProgressBar) Value() uint {
ret := w32.SendMessage(this.hwnd, w32.PBM_GETPOS, 0, 0)
return uint(ret)
}
func (this *ProgressBar) SetValue(v uint) {
w32.SendMessage(this.hwnd, w32.PBM_SETPOS, uintptr(v), 0)
}
func (this *ProgressBar) Range() (min, max uint) {
min = uint(w32.SendMessage(this.hwnd, w32.PBM_GETRANGE, uintptr(w32.BoolToBOOL(true)), 0))
max = uint(w32.SendMessage(this.hwnd, w32.PBM_GETRANGE, uintptr(w32.BoolToBOOL(false)), 0))
return
}
func (this *ProgressBar) SetRange(min, max uint) {
w32.SendMessage(this.hwnd, w32.PBM_SETRANGE32, uintptr(min), uintptr(max))
} | pb.SetSize(200, 25) |
tagged-unions.ts | import type { AnyEnv } from '@morphic-ts/common/lib/config'
import { memo } from '@morphic-ts/common/lib/utils'
import type { ModelAlgebraTaggedUnions } from '@morphic-ts/model-algebras/lib/tagged-unions'
import { pipe } from 'fp-ts/pipeable'
import { toArray } from 'fp-ts/Record'
import { chainEitherK as SEchainEitherK } from 'fp-ts-contrib/lib/StateEither'
| import { JsonSchema, JsonSchemaURI } from '../hkt'
import { UnionTypeCtor } from '../json-schema/json-schema-ctors'
import { arrayTraverseStateEither } from '../utils'
/**
* @since 0.0.1
*/
export const jsonSchemaTaggedUnionInterpreter = memo(
<Env extends AnyEnv>(): ModelAlgebraTaggedUnions<JsonSchemaURI, Env> => ({
_F: JsonSchemaURI,
taggedUnion: (_tag, types, _name, config) => env =>
new JsonSchema(
jsonSchemaApplyConfig(config)(
pipe(
arrayTraverseStateEither(toArray(types), ([_, v]) => v(env).schema),
SEchainEitherK(UnionTypeCtor)
),
env
)
)
})
) | import { jsonSchemaApplyConfig } from '../config' |
browser.py | #!/usr/bin/env python3
#
# Copyright 2021 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from argparse import ArgumentParser
from argparse import Namespace
from pprint import PrettyPrinter
import sys
from urllib.parse import urlparse
from distroinfo import info as di
INFO_FILE = 'osp.yml'
RDOINFO_GIT_URL = 'https://code.engineering.redhat.com/gerrit/ospinfo'
APP_DESCRIPTION = 'Find OSP packages, repositories, components and releases.'
def get_distroinfo():
return di.DistroInfo(info_files=INFO_FILE,
cache_ttl=24*60*60, # 1 day in seconds
remote_git_info=RDOINFO_GIT_URL).get_info()
def get_components(**kwargs):
info = get_distroinfo()
components = info.get('components')
if kwargs.get('name'):
components = [component for component in components
if kwargs.get('name') == component.get('name')]
return components
def get_packages(**kwargs):
info = get_distroinfo()
packages = info.get('packages')
packages = [package for package in packages
if 'osp-name' in package.keys()]
if kwargs.get('component'):
packages = [package for package in packages
if kwargs.get('component') == package.get('component')]
if kwargs.get('name'):
packages = [package for package in packages
if kwargs.get('name') == package.get('name')]
if kwargs.get('tag'):
packages = [package for package in packages
if kwargs.get('tag') in package.get('tags')]
if kwargs.get('upstream'):
packages = [package for package in packages
if kwargs.get('upstream') in str(package.get('upstream'))]
for package in packages:
package['osp-project'] = urlparse(package['osp-patches']).path[1:]
return packages
def get_projects_mapping(**kwawrgs) -> dict:
packages = get_packages(**kwawrgs)
projects_mapping = {}
for package in packages:
if 'upstream' in package.keys() and package['upstream']:
upstream_name = urlparse(package['upstream']).path[1:]
upstream_name = upstream_name.replace("/", "-")
else:
upstream_name = package['name']
if 'osp-patches' in package.keys() and package['osp-patches']:
projects_mapping[upstream_name] = urlparse(
package['osp-patches']).path[1:]
else:
projects_mapping[upstream_name] = upstream_name
return projects_mapping
def get_releases(**kwargs):
info = get_distroinfo()
releases = info.get('osp_releases')
if kwargs.get('tag'):
releases = [release for release in releases
if kwargs.get('tag') in release.get('ospinfo_tag_name')]
return releases
def process_arguments(argv=None) -> Namespace:
|
def main(argv=None) -> None:
args = process_arguments(argv)
if args.command == 'components':
results = get_components(**vars(args))
default_output = ['name']
elif args.command == 'packages':
results = get_packages(**vars(args))
default_output = ['osp-name', 'osp-distgit', 'osp-patches']
elif args.command == 'releases':
results = get_releases(**vars(args))
default_output = ['ospinfo_tag_name', 'git_release_branch']
else:
results = None
if args.debug:
pp = PrettyPrinter()
pp.pprint(results)
return
if args.output:
output = [entry.strip() for entry in args.output.split(',')]
else:
output = default_output
if args.header:
print(' '.join(output))
print(' '.join(['-' * len(field) for field in output]))
for result in results:
print(' '.join([result.get(key, 'None') for key in output]))
if __name__ == '__main__':
main()
| parser = ArgumentParser(description=APP_DESCRIPTION)
subparsers = parser.add_subparsers(dest='command', metavar='command')
common = ArgumentParser(add_help=False)
common.add_argument('--debug', dest='debug',
default=False, action='store_true',
help='print all fields in output')
common.add_argument('--header', dest='header',
default=False, action='store_true',
help='print header with output names on top')
common.add_argument('--output', dest='output',
help='comma-separated list of fields to return')
components = subparsers.add_parser('components', help='', parents=[common])
components.add_argument('--name', dest='name')
packages = subparsers.add_parser('packages', help='', parents=[common])
packages.add_argument('--component', dest='component')
packages.add_argument('--name', dest='name')
packages.add_argument('--tag', dest='tag')
packages.add_argument('--upstream', dest='upstream')
releases = subparsers.add_parser('releases', help='', parents=[common])
releases.add_argument('--tag', dest='tag')
arguments = parser.parse_args(argv)
if not arguments.command:
parser.print_help()
sys.exit(1)
return arguments |
__init__.py | from nempy import markets, historical_spot_market_inputs |
||
cli.py | """Console script for python-github-stats.""" |
def cli_args():
"""Parse CLI arguments."""
parser = argparse.ArgumentParser(description="Manage GitHub via API.")
parser.add_argument(
"action", help="Define action to take.", choices=["user-attrs", "user-repos"]
)
parser.add_argument(
"--netrcfile",
help="Path to Netrc file",
default=os.path.join(os.path.expanduser("~"), ".netrc"),
)
parser.add_argument("--token", help="Your GitHub API private token.")
parser.add_argument(
"--url", help="GitHub API url", default="https://api.github.com"
)
args = parser.parse_args()
return args |
import argparse
import os |
minify-js.ts | import Terser from "./terser.js";
import Babel from "./babelstandalone.js";
export const minifyJavascript = (code: string): {
code: string;
error?: string;
} => {
const babelled = Babel.transform(
code,
{ presets: [["env"]] },
);
if (babelled.error) {
return babelled.error; | }; | }
return Terser.minify(babelled.code); |
app.module.ts | import { BrowserModule } from '@angular/platform-browser';
import { NgModule } from '@angular/core';
import { FormsModule, ReactiveFormsModule } from '@angular/forms';
import { HttpModule } from '@angular/http';
import { Routes, RouterModule } from '@angular/router';
import { AppComponent } from './app.component';
import { NgBrazil } from 'ng-brazil';
import { DemoComponent } from './demo/demo.component';
import { TextMaskModule } from 'angular2-text-mask';
const routes: Routes = [
{ path: '', redirectTo: '/demo', pathMatch: 'full' },
{ path: 'demo', component: DemoComponent }
];
@NgModule({
declarations: [
AppComponent, DemoComponent
],
imports: [
BrowserModule,
FormsModule,
ReactiveFormsModule,
TextMaskModule,
HttpModule,
NgBrazil,
RouterModule.forRoot(routes)
],
providers: [],
bootstrap: [AppComponent]
})
export class | { }
| AppModule |
client_strings.ts | const dict = {
'Normal': 'Normal',
'Magic': 'Magic',
'Rare': 'Rare',
'Unique': 'Unique',
'Gem': 'Gem',
'Currency': 'Currency',
'Divination Card': 'Divination Card',
'Map Tier: ': 'Map Tier: ',
'Rarity: ': 'Rarity: ',
'Item Level: ': 'Item Level: ',
'Talisman Tier: ': 'Talisman Tier: ',
'Level: ': 'Level: ',
'Stack Size: ': 'Stack Size: ',
'Sockets: ': 'Sockets: ',
'Quality: ': 'Quality: ',
'Physical Damage: ': 'Physical Damage: ',
'Elemental Damage: ': 'Elemental Damage: ',
'Critical Strike Chance: ': 'Critical Strike Chance: ',
'Attacks per Second: ': 'Attacks per Second: ',
'Armour: ': 'Armour: ',
'Evasion Rating: ': 'Evasion Rating: ',
'Energy Shield: ': 'Energy Shield: ',
'Chance to Block: ': 'Chance to Block: ',
'Corrupted': 'Corrupted',
'Unidentified': 'Unidentified',
'/^Superior (.*)$/': /^Superior (.*)$/,
'/^Blighted (.*)$/': /^Blighted (.*)$/,
'Shaper Item': 'Shaper Item',
'Elder Item': 'Elder Item',
'Crusader Item': 'Crusader Item',
'Hunter Item': 'Hunter Item',
'Redeemer Item': 'Redeemer Item',
'Warlord Item': 'Warlord Item',
'Synthesised Item': 'Synthesised Item', | 'Veiled Prefix': 'Veiled Prefix',
'Veiled Suffix': 'Veiled Suffix',
'/^Currently has \\d+ Charges$/': /^Currently has \d+ Charges$/,
'/^Spawns a Level (\\d+) Monster when Harvested$/': /^Spawns a Level (\d+) Monster when Harvested$/,
'Right-click this item then left-click the ground to plant it in the Sacred Grove.': 'Right-click this item then left-click the ground to plant it in the Sacred Grove.',
"Combine this with four other different samples in Tane's Laboratory.": "Combine this with four other different samples in Tane's Laboratory.",
'Right-click to add this to your bestiary.': 'Right-click to add this to your bestiary.',
'Right-click to add this prophecy to your character.': 'Right-click to add this prophecy to your character.',
'/^.* Brain$/': /^.* Brain$/,
'/^.* Eye$/': /^.* Eye$/,
'/^.* Lung$/': /^.* Lung$/,
'/^.* Heart$/': /^.* Heart$/,
'/^.* Liver$/': /^.* Liver$/,
'You will find Alva and complete her mission.': 'You will find Alva and complete her mission.',
'You will find Einhar and complete his mission.': 'You will find Einhar and complete his mission.',
'You will find Niko and complete his mission.': 'You will find Niko and complete his mission.',
'You will find Jun and complete her mission.': 'You will find Jun and complete her mission.',
'You will find Zana and complete her mission.': 'You will find Zana and complete her mission.',
'Blighted {0}': 'Blighted {0}',
'You cannot use this item. Its stats will be ignored': 'You cannot use this item. Its stats will be ignored',
'/^Anomalous (.*)$/': /^Anomalous (.*)$/,
'/^Divergent (.*)$/': /^Divergent (.*)$/,
'/^Phantasmal (.*)$/': /^Phantasmal (.*)$/,
'/^Requires (.+) \\(Level (\\d+)\\)$/': /^Requires (?<job>.+) \(Level (?<level>\d+)\)$/,
'Area Level: ': 'Area Level: ',
'Lockpicking': 'Lockpicking',
'Counter-Thaumaturgy': 'Counter-Thaumaturgy',
'Perception': 'Perception',
'Deception': 'Deception',
'Agility': 'Agility',
'Engineering': 'Engineering',
'Trap Disarmament': 'Trap Disarmament',
'Demolition': 'Demolition',
'Brute Force': 'Brute Force',
'Mirrored': 'Mirrored'
}
export default dict
export type TranslationDict = typeof dict | '/^Synthesised (.*)$/': /^Synthesised (.*)$/,
'/^Vaal .*$/': /^Vaal .*$/, |
vpnconnection.py | # Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from datetime import datetime
from boto.resultset import ResultSet
"""
Represents a VPN Connectionn
"""
from boto.ec2.ec2object import TaggedEC2Object
class VpnConnectionOptions(object):
"""
Represents VPN connection options
:ivar static_routes_only: Indicates whether the VPN connection uses static
routes only. Static routes must be used for devices that don't support
BGP.
"""
def __init__(self, static_routes_only=None, tunnel_options=None):
self.static_routes_only = static_routes_only
self.tunnel_options = tunnel_options
def __repr__(self):
return 'VpnConnectionOptions'
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'staticRoutesOnly':
self.static_routes_only = True if value == 'true' else False
elif name == 'tunnelOptions':
self.tunnel_options = value
else:
setattr(self, name, value)
class VpnStaticRoute(object):
"""
Represents a static route for a VPN connection.
:ivar destination_cidr_block: The CIDR block associated with the local
subnet of the customer data center.
:ivar source: Indicates how the routes were provided.
:ivar state: The current state of the static route.
"""
def __init__(self, destination_cidr_block=None, source=None, state=None):
self.destination_cidr_block = destination_cidr_block
self.source = source
self.available = state
def __repr__(self):
return 'VpnStaticRoute: %s' % self.destination_cidr_block
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'destinationCidrBlock':
self.destination_cidr_block = value
elif name == 'source':
self.source = value
elif name == 'state':
self.state = value
else:
setattr(self, name, value)
class VpnTunnel(object):
"""
Represents telemetry for a VPN tunnel
:ivar outside_ip_address: The Internet-routable IP address of the
virtual private gateway's outside interface.
:ivar status: The status of the VPN tunnel. Valid values: UP | DOWN
:ivar last_status_change: The date and time of the last change in status.
:ivar status_message: If an error occurs, a description of the error.
:ivar accepted_route_count: The number of accepted routes.
"""
def __init__(self, outside_ip_address=None, status=None, last_status_change=None,
status_message=None, accepted_route_count=None):
self.outside_ip_address = outside_ip_address
self.status = status
self.last_status_change = last_status_change
self.status_message = status_message
self.accepted_route_count = accepted_route_count
def __repr__(self):
return 'VpnTunnel: %s' % self.outside_ip_address
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'outsideIpAddress':
self.outside_ip_address = value
elif name == 'status':
self.status = value
elif name == 'lastStatusChange':
self.last_status_change = datetime.strptime(value,
'%Y-%m-%dT%H:%M:%S.%fZ')
elif name == 'statusMessage':
self.status_message = value
elif name == 'acceptedRouteCount':
try:
value = int(value)
except ValueError:
boto.log.warning('Error converting code (%s) to int' % value)
self.accepted_route_count = value
else:
setattr(self, name, value)
class VpnConnection(TaggedEC2Object):
"""
Represents a VPN Connection
:ivar id: The ID of the VPN connection.
:ivar state: The current state of the VPN connection.
Valid values: pending | available | deleting | deleted
:ivar customer_gateway_configuration: The configuration information for the
VPN connection's customer gateway (in the native XML format). This
element is always present in the
:class:`boto.vpc.VPCConnection.create_vpn_connection` response;
however, it's present in the
:class:`boto.vpc.VPCConnection.get_all_vpn_connections` response only
if the VPN connection is in the pending or available state.
:ivar type: The type of VPN connection (ipsec.1).
:ivar customer_gateway_id: The ID of the customer gateway at your end of
the VPN connection.
:ivar vpn_gateway_id: The ID of the virtual private gateway
at the AWS side of the VPN connection.
:ivar tunnels: A list of the vpn tunnels (always 2)
:ivar options: The option set describing the VPN connection.
:ivar static_routes: A list of static routes associated with a VPN
connection.
"""
def __init__(self, connection=None):
|
def __repr__(self):
return 'VpnConnection:%s' % self.id
def startElement(self, name, attrs, connection):
retval = super(VpnConnection, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'vgwTelemetry':
self.tunnels = ResultSet([('item', VpnTunnel)])
return self.tunnels
elif name == 'routes':
self.static_routes = ResultSet([('item', VpnStaticRoute)])
return self.static_routes
elif name == 'options':
self.options = VpnConnectionOptions()
return self.options
return None
def endElement(self, name, value, connection):
if name == 'vpnConnectionId':
self.id = value
elif name == 'state':
self.state = value
elif name == 'customerGatewayConfiguration':
self.customer_gateway_configuration = value
elif name == 'type':
self.type = value
elif name == 'customerGatewayId':
self.customer_gateway_id = value
elif name == 'vpnGatewayId':
self.vpn_gateway_id = value
else:
setattr(self, name, value)
def delete(self, dry_run=False):
return self.connection.delete_vpn_connection(
self.id,
dry_run=dry_run
)
| super(VpnConnection, self).__init__(connection)
self.id = None
self.state = None
self.customer_gateway_configuration = None
self.type = None
self.customer_gateway_id = None
self.vpn_gateway_id = None
self.tunnels = []
self.options = None
self.static_routes = [] |
SubNav.js | import React, { Component } from 'react'
import Paper from '@material-ui/core/Paper'
import Divider from '@material-ui/core/Divider'
import Tabs from '@material-ui/core/Tabs'
import Tab from '@material-ui/core/Tab'
import { observer, inject } from 'mobx-react'
import { computed } from 'mobx'
@inject('routing')
@inject('usersStore')
@inject('postsStore')
@inject('notificationsStore')
@observer
class SubNav extends Component {
state = {
value: this.currentView,
}
@computed get currentView() {
return this.props.routing.location.pathname
}
@computed get signedIn() {
return this.props.usersStore.signedIn
}
navigate = (event, value) => {
if (!this.signedIn && (value === '/favorites' || value === '/createpost')) {
this.props.notificationsStore.showMustLogin(()=> {
this.props.routing.push('/login')
})
return
}
if (value === '/createpost') {
this.props.postsStore.openPostDialog()
return
}
this.setState({value: value})
if(value === '/profile' && !this.signedIn) {
this.props.routing.push('/login')
return | }
render = props => <Paper square elevation={0}>
<Tabs
value={this.state.value}
onChange={this.navigate}
indicatorColor="primary"
textColor="primary"
fullWidth
>
<Tab style={{textTransform: 'capitalize'}} label="Home" value="/" />
<Tab style={{textTransform: 'capitalize'}} label="Explore" value="/explore" />
<Tab style={{textTransform: 'capitalize'}} label="About" value="/about" />
</Tabs>
<Divider />
</Paper>
}
export default SubNav | }
this.props.routing.push(value) |
multi-skill.spec.js | const assert = require('assert')
const {
ConverseTesting
} = require('../../index')
describe('Multi Skill', () => {
let converse, user
async function | (str, platform = 'website') {
converse = new ConverseTesting({
code: str,
skills: {
helloSkill: [
{
skill: `
displayProfile() {
> Call profile API in Google Actions
}
`,
condition(data, user) {
return data.session.platform == "gactions";
}
},
{
skill: `
displayProfile() {
> Call profile API in Amazon Alexa
}
`,
condition(data, user) {
return data.session.platform == "alexa";
}
},
{
skill: `
displayProfile() {
> Default response
}
`
}
]
}
}, {
loadSkills: false
})
await converse.loadSkills()
user = converse.createUser({
session: {
platform
}
})
}
it('Test condition() in multi skills, default response', async () => {
await code(`
@Event('start')
start() {
helloSkill.displayProfile()
}
`)
return user
.start(testing => {
assert.equal(testing.output(0), 'Default response')
})
.end()
})
it('Test condition() in multi skills, Gactions response', async () => {
await code(`
@Event('start')
start() {
helloSkill.displayProfile()
}
`, 'gactions')
return user
.start(testing => {
assert.equal(testing.output(0), 'Call profile API in Google Actions')
})
.end()
})
})
| code |
collection_replica_test.go | // Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package querynode
import (
"testing"
"github.com/stretchr/testify/assert"
)
//----------------------------------------------------------------------------------------------------- collection
func TestCollectionReplica_getCollectionNum(t *testing.T) {
node := newQueryNodeMock()
initTestMeta(t, node, 0, 0)
assert.Equal(t, node.historical.replica.getCollectionNum(), 1)
err := node.Stop()
assert.NoError(t, err)
}
func TestCollectionReplica_addCollection(t *testing.T) {
node := newQueryNodeMock()
initTestMeta(t, node, 0, 0)
err := node.Stop()
assert.NoError(t, err)
}
func TestCollectionReplica_removeCollection(t *testing.T) {
node := newQueryNodeMock()
initTestMeta(t, node, 0, 0)
assert.Equal(t, node.historical.replica.getCollectionNum(), 1)
err := node.historical.replica.removeCollection(0)
assert.NoError(t, err)
assert.Equal(t, node.historical.replica.getCollectionNum(), 0)
err = node.Stop()
assert.NoError(t, err)
}
func TestCollectionReplica_getCollectionByID(t *testing.T) {
node := newQueryNodeMock()
collectionID := UniqueID(0)
initTestMeta(t, node, collectionID, 0)
targetCollection, err := node.historical.replica.getCollectionByID(collectionID)
assert.NoError(t, err)
assert.NotNil(t, targetCollection)
assert.Equal(t, targetCollection.ID(), collectionID)
err = node.Stop()
assert.NoError(t, err)
}
func TestCollectionReplica_hasCollection(t *testing.T) {
node := newQueryNodeMock()
collectionID := UniqueID(0)
initTestMeta(t, node, collectionID, 0)
hasCollection := node.historical.replica.hasCollection(collectionID)
assert.Equal(t, hasCollection, true)
hasCollection = node.historical.replica.hasCollection(UniqueID(1))
assert.Equal(t, hasCollection, false)
err := node.Stop()
assert.NoError(t, err)
}
//----------------------------------------------------------------------------------------------------- partition
func TestCollectionReplica_getPartitionNum(t *testing.T) {
node := newQueryNodeMock()
collectionID := UniqueID(0)
initTestMeta(t, node, collectionID, 0)
partitionIDs := []UniqueID{1, 2, 3}
for _, id := range partitionIDs {
err := node.historical.replica.addPartition(collectionID, id)
assert.NoError(t, err)
partition, err := node.historical.replica.getPartitionByID(id)
assert.NoError(t, err)
assert.Equal(t, partition.ID(), id)
}
partitionNum := node.historical.replica.getPartitionNum()
assert.Equal(t, partitionNum, len(partitionIDs))
err := node.Stop()
assert.NoError(t, err)
}
func TestCollectionReplica_addPartition(t *testing.T) {
node := newQueryNodeMock()
collectionID := UniqueID(0)
initTestMeta(t, node, collectionID, 0)
partitionIDs := []UniqueID{1, 2, 3}
for _, id := range partitionIDs {
err := node.historical.replica.addPartition(collectionID, id)
assert.NoError(t, err)
partition, err := node.historical.replica.getPartitionByID(id)
assert.NoError(t, err)
assert.Equal(t, partition.ID(), id)
}
err := node.Stop()
assert.NoError(t, err)
}
func TestCollectionReplica_removePartition(t *testing.T) {
node := newQueryNodeMock()
collectionID := UniqueID(0)
initTestMeta(t, node, collectionID, 0)
partitionIDs := []UniqueID{1, 2, 3}
for _, id := range partitionIDs {
err := node.historical.replica.addPartition(collectionID, id)
assert.NoError(t, err)
partition, err := node.historical.replica.getPartitionByID(id)
assert.NoError(t, err)
assert.Equal(t, partition.ID(), id)
err = node.historical.replica.removePartition(id)
assert.NoError(t, err)
}
err := node.Stop()
assert.NoError(t, err)
}
func TestCollectionReplica_getPartitionByTag(t *testing.T) {
node := newQueryNodeMock()
collectionID := UniqueID(0)
initTestMeta(t, node, collectionID, 0)
collectionMeta := genTestCollectionMeta(collectionID, false)
for _, id := range collectionMeta.PartitionIDs {
err := node.historical.replica.addPartition(collectionID, id)
assert.NoError(t, err)
partition, err := node.historical.replica.getPartitionByID(id)
assert.NoError(t, err)
assert.Equal(t, partition.ID(), id)
assert.NotNil(t, partition)
}
err := node.Stop()
assert.NoError(t, err)
}
func TestCollectionReplica_hasPartition(t *testing.T) {
node := newQueryNodeMock()
collectionID := UniqueID(0)
initTestMeta(t, node, collectionID, 0)
collectionMeta := genTestCollectionMeta(collectionID, false)
err := node.historical.replica.addPartition(collectionID, collectionMeta.PartitionIDs[0])
assert.NoError(t, err)
hasPartition := node.historical.replica.hasPartition(defaultPartitionID)
assert.Equal(t, hasPartition, true)
hasPartition = node.historical.replica.hasPartition(defaultPartitionID + 1)
assert.Equal(t, hasPartition, false)
err = node.Stop()
assert.NoError(t, err)
}
//----------------------------------------------------------------------------------------------------- segment
func TestCollectionReplica_addSegment(t *testing.T) {
node := newQueryNodeMock()
collectionID := UniqueID(0)
initTestMeta(t, node, collectionID, 0)
const segmentNum = 3
for i := 0; i < segmentNum; i++ {
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
assert.NoError(t, err)
targetSeg, err := node.historical.replica.getSegmentByID(UniqueID(i))
assert.NoError(t, err)
assert.Equal(t, targetSeg.segmentID, UniqueID(i))
}
err := node.Stop()
assert.NoError(t, err)
}
func TestCollectionReplica_removeSegment(t *testing.T) {
node := newQueryNodeMock()
collectionID := UniqueID(0)
initTestMeta(t, node, collectionID, 0)
const segmentNum = 3
for i := 0; i < segmentNum; i++ {
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
assert.NoError(t, err)
targetSeg, err := node.historical.replica.getSegmentByID(UniqueID(i))
assert.NoError(t, err)
assert.Equal(t, targetSeg.segmentID, UniqueID(i))
err = node.historical.replica.removeSegment(UniqueID(i))
assert.NoError(t, err)
}
err := node.Stop()
assert.NoError(t, err)
}
func TestCollectionReplica_getSegmentByID(t *testing.T) {
node := newQueryNodeMock()
collectionID := UniqueID(0)
initTestMeta(t, node, collectionID, 0)
const segmentNum = 3
for i := 0; i < segmentNum; i++ {
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
assert.NoError(t, err)
targetSeg, err := node.historical.replica.getSegmentByID(UniqueID(i))
assert.NoError(t, err)
assert.Equal(t, targetSeg.segmentID, UniqueID(i))
}
err := node.Stop()
assert.NoError(t, err)
}
func TestCollectionReplica_getSegmentInfosByColID(t *testing.T) {
node := newQueryNodeMock()
collectionID := UniqueID(0)
initTestMeta(t, node, collectionID, 0)
err := node.historical.replica.addSegment(UniqueID(1), defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
assert.NoError(t, err)
err = node.historical.replica.addSegment(UniqueID(2), defaultPartitionID, collectionID, "", segmentTypeSealed, true)
assert.NoError(t, err)
err = node.historical.replica.addSegment(UniqueID(3), defaultPartitionID, collectionID, "", segmentTypeSealed, true)
assert.NoError(t, err)
segment, err := node.historical.replica.getSegmentByID(UniqueID(3))
assert.NoError(t, err)
segment.segmentType = segmentTypeIndexing
targetSeg, err := node.historical.replica.getSegmentInfosByColID(collectionID)
assert.NoError(t, err)
assert.Equal(t, 4, len(targetSeg))
err = node.Stop()
assert.NoError(t, err)
}
func TestCollectionReplica_hasSegment(t *testing.T) |
func TestCollectionReplica_freeAll(t *testing.T) {
node := newQueryNodeMock()
collectionID := UniqueID(0)
initTestMeta(t, node, collectionID, 0)
err := node.Stop()
assert.NoError(t, err)
}
func TestCollectionReplica_statistic(t *testing.T) {
t.Run("test getCollectionIDs", func(t *testing.T) {
replica, err := genSimpleReplica()
assert.NoError(t, err)
ids := replica.getCollectionIDs()
assert.Len(t, ids, 1)
assert.Equal(t, defaultCollectionID, ids[0])
})
t.Run("test getCollectionIDs", func(t *testing.T) {
replica, err := genSimpleReplica()
assert.NoError(t, err)
num := replica.getSegmentNum()
assert.Equal(t, 0, num)
})
}
| {
node := newQueryNodeMock()
collectionID := UniqueID(0)
initTestMeta(t, node, collectionID, 0)
const segmentNum = 3
for i := 0; i < segmentNum; i++ {
err := node.historical.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, "", segmentTypeGrowing, true)
assert.NoError(t, err)
targetSeg, err := node.historical.replica.getSegmentByID(UniqueID(i))
assert.NoError(t, err)
assert.Equal(t, targetSeg.segmentID, UniqueID(i))
hasSeg := node.historical.replica.hasSegment(UniqueID(i))
assert.Equal(t, hasSeg, true)
hasSeg = node.historical.replica.hasSegment(UniqueID(i + 100))
assert.Equal(t, hasSeg, false)
}
err := node.Stop()
assert.NoError(t, err)
} |
applyMiddleware.js | import compose from './compose'
/**
* 创建一个store增强器,将中间件应用于Redux store的dispatch方法。
* 这对于各种任务很方便,例如以简洁的方式表示异步操作,或者记录每个操作的有效负载。
*
* 请参阅`redux-thunk`软件包作为Redux中间件的示例。
*
* 因为中间件可能是异步的,所以应该是组合链中的第一个store增强器。
*
* 注意,每个中间件将`dispatch`和`getState`方法作为命名参数。
*
* @param {...Function} middlewares 要应用的中间件链。
* @returns {Function} 应用中间件的store增强器。
*/
export default function applyMiddleware(...middlewares) {
return createStore => (...args) => {
const store = createStore(...args)
let dispatch = () => {
throw new Error(
`Dispatching while constructing your middleware is not allowed. ` +
`Other middleware would not be appli | atch.`
)
}
let chain = []
const middlewareAPI = {
getState: store.getState,
dispatch: (...args) => dispatch(...args)
}
chain = middlewares.map(middleware => middleware(middlewareAPI))
dispatch = compose(...chain)(store.dispatch)
return {
...store,
dispatch
}
}
}
| ed to this disp |
ModifyFlowProjectClusterSettingRequest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class ModifyFlowProjectClusterSettingRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'ModifyFlowProjectClusterSetting','emr')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
|
def get_UserLists(self):
return self.get_query_params().get('UserLists')
def set_UserLists(self,UserLists):
for i in range(len(UserLists)):
if UserLists[i] is not None:
self.add_query_param('UserList.' + str(i + 1) , UserLists[i]);
def get_QueueLists(self):
return self.get_query_params().get('QueueLists')
def set_QueueLists(self,QueueLists):
for i in range(len(QueueLists)):
if QueueLists[i] is not None:
self.add_query_param('QueueList.' + str(i + 1) , QueueLists[i]);
def get_HostLists(self):
return self.get_query_params().get('HostLists')
def set_HostLists(self,HostLists):
for i in range(len(HostLists)):
if HostLists[i] is not None:
self.add_query_param('HostList.' + str(i + 1) , HostLists[i]);
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_DefaultQueue(self):
return self.get_query_params().get('DefaultQueue')
def set_DefaultQueue(self,DefaultQueue):
self.add_query_param('DefaultQueue',DefaultQueue)
def get_ProjectId(self):
return self.get_query_params().get('ProjectId')
def set_ProjectId(self,ProjectId):
self.add_query_param('ProjectId',ProjectId)
def get_DefaultUser(self):
return self.get_query_params().get('DefaultUser')
def set_DefaultUser(self,DefaultUser):
self.add_query_param('DefaultUser',DefaultUser) | setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) |
file-upload.component.ts | import { Component, Input, OnInit } from '@angular/core';
import { FileUploadService } from './file-upload.service';
import { NgModule } from '@angular/core';
import { Photo } from '../services/util/photo';
import {MatFormFieldControl, MatFormFieldModule} from '@angular/material/form-field';
import { PhotoData } from './photo-data';
import { FormControl, FormGroup} from '@angular/forms';
import { FormsModule } from '@angular/forms';
@Component({
selector: 'app-file-upload',
templateUrl: './file-upload.component.html',
styleUrls: ['./file-upload.component.css']
})
export class FileUploadComponent implements OnInit {
//-------------------- PHOTO FILE UPLOAD STUFF //--------------------
// Inject service
constructor(private fileUploadService: FileUploadService ) { }
// Variable to store shortLink from api response
shortLink: string = "";
loading: boolean = false; // Flag variable
file: File | undefined; // Variable to store file
ngOnInit(): void {
}
// On file Select
onChange(event: any) { | // OnClick of button Upload
onUpload() {
this.loading = !this.loading;
console.log(this.file);
this.fileUploadService.upload(this.file).subscribe(
(event: any) => {
if (typeof (event) === 'object') {
// Short link via api response
this.shortLink = event.link;
this.loading = false; // Flag variable
}
}
);
}
//-------------------- PHOTO INFO BINDING STUFF //--------------------
PhotoInformation = new FormControl('');
// just to know how
photodata1 : PhotoData = new PhotoData("this is my cat", "catpic1", "[email protected]");
photoDescript : string = "";
photoEmail : string = "";
photoName : string = "";
temp : string = "";
onSubmit()
{
console.log(this.photoDescript);
this.temp = this.photoName;
console.log(this.temp);
}
photodata2 : PhotoData = new PhotoData(this.photoDescript, this.photoName, this.photoEmail);
// dont know how to use these from here
} | this.file = event.target.files[0];
}
|
emnist.py | import os.path as pt
import numpy as np
import torchvision.transforms as transforms
import torch
from torch.utils.data import DataLoader
from torchvision.datasets import EMNIST
def ceil(x: float):
return int(np.ceil(x))
class MyEMNIST(EMNIST):
""" Reimplements get_item to transform tensor input to pil image before applying transformation. """
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = transforms.ToPILImage()(img) | if self.target_transform is not None:
target = self.target_transform(target)
if self.transform is not None:
img = self.transform(img)
return img, target
class OEEMNIST(EMNIST):
def __init__(self, size: torch.Size, root: str = None, split='letters', limit_var=20): # split = Train
"""
Outlier Exposure dataset for EMNIST.
:param size: size of the samples in n x c x h x w, samples will be resized to h x w. If n is larger than the
number of samples available in EMNIST, dataset will be enlarged by repetitions to fit n.
This is important as exactly n images are extracted per iteration of the data_loader.
For online supervision n should be set to 1 because only one sample is extracted at a time.
:param root: root directory where data is found or is to be downloaded to.
:param split: The dataset has 6 different splits: ``byclass``, ``bymerge``,
``balanced``, ``letters``, ``digits`` and ``mnist``. This argument specifies
which one to use.
:param limit_var: limits the number of different samples, i.e. randomly chooses limit_var many samples
from all available ones to be the training data.
"""
assert len(size) == 3 and size[1] == size[2]
root = pt.join(root, 'emnist', )
transform = transforms.Compose([
transforms.Resize((size[1], size[2])),
transforms.ToTensor()
])
super().__init__(root, split, transform=transform, download=True)
self.size = size
self.data = self.data.transpose(1, 2)
self.idx_to_class = {v: k for k, v in self.class_to_idx.items()}
if limit_var is not None and limit_var < len(self):
picks = np.random.choice(np.arange(self.data.size(0)), size=limit_var, replace=False)
self.data = self.data[picks]
self.targets = self.targets[picks]
if limit_var is not None and limit_var > len(self):
print(
'OEEMNIST shall be limited to {} samples, but Cifar100 contains only {} samples, thus using all.'
.format(limit_var, len(self))
)
if len(self) < size[0]:
rep = ceil(size[0] / len(self))
old = len(self)
self.data = self.data.repeat(rep, 1, 1)
self.targets = self.targets.repeat(rep)
if rep != size[0] / old:
import warnings
warnings.warn(
'OEEMNIST has been limited to {} samples. '
'Due to the requested size of {}, the dataset will be enlarged. '
'But {} repetitions will make some samples appear more often than others in the dataset, '
'because the final size after repetitions is {}, which is cut to {}'
.format(limit_var, size[0], rep, len(self), size[0])
)
def data_loader(self):
return DataLoader(dataset=self, batch_size=self.size[0], shuffle=True, num_workers=0)
def __getitem__(self, index):
sample, target = super().__getitem__(index)
sample = sample.squeeze().mul(255).byte()
return sample | |
test_panel_field_outputs.py | import numpy as np
from compmech.panel import Panel
from compmech.analysis import Analysis
from compmech.sparse import solve
def | ():
m = 7
n = 6
#TODO implement for conical panels
strain_field = dict(exx=None, eyy=None, gxy=None, kxx=None, kyy=None, kxy=None)
stress_field = dict(Nxx=None, Nyy=None, Nxy=None, Mxx=None, Myy=None, Mxy=None)
for model in ['plate_clt_donnell_bardell',
'cpanel_clt_donnell_bardell']:
p = Panel()
p.model = model
p.u1tx = 1
p.u1ty = 1
p.u2ty = 1
p.v1tx = 0
p.v2tx = 0
p.v1ty = 0
p.v2ty = 0
p.a = 2.
p.b = 1.
p.r = 1.e5
p.stack = [0, -45, +45, 90, +45, -45, 0, 0]
p.plyt = 1e-3*0.125
p.laminaprop = (142.5e9, 8.7e9, 0.28, 5.1e9, 5.1e9, 5.1e9)
p.nx = m
p.ny = n
p.m = m
p.n = n
P = 1000.
npts = 100
p.forces_inc = []
for y in np.linspace(0, p.b, npts):
p.forces_inc.append([0., y, P/(npts-1.), 0, 0])
p.forces_inc[0][2] /= 2.
p.forces_inc[-1][2] /= 2.
p.static()
c = p.analysis.cs[0]
Ns = p.stress(c, gridx=50, gridy=50)
es = p.strain(c, gridx=50, gridy=50)
for k, v in strain_field.items():
if v is None:
strain_field[k] = es.get(k).min()
else:
assert np.isclose(strain_field[k], es.get(k).min(), rtol=0.05)
p.plot(c, vec=k, filename='tmp_test_panel_strain_field_%s.png' % k)
for k, v in stress_field.items():
if v is None:
stress_field[k] = Ns.get(k).min()
else:
assert np.isclose(stress_field[k], Ns.get(k).min(), rtol=0.05)
p.plot(c, vec=k, filename='tmp_test_panel_stress_field_%s.png' % k)
| test_panel_field_outputs |
DataManager.d.ts |
declare namespace DataManager {
interface IConfig {
name?: string,
load?: boolean,
default?: { [key: string]: any },
reset?: boolean
}
}
declare class DataManager extends Phaser.Data.DataManager {
constructor(
config?: DataManager.IConfig
);
constructor(
parent?: object,
config?: DataManager.IConfig
);
constructor(
parent?: object,
eventEmitter?: Phaser.Events.EventEmitter,
config?: DataManager.IConfig
);
load(
defaultValue?: { [key: string]: any },
reset?: boolean
): this;
getDefaultValue(key: string): any;
} | export default DataManager; |
|
modal.js | define([
'jquery',
'plugins/colorbox/jquery.colorbox',
'css!plugins/colorbox/css/colorbox'
], function($) {
return {
initialize : function(callback) {
$.colorbox.settings.className = 'dcf-main';
$(callback);
} | }); | }; |
build_marker.d.ts | /// <amd-module name="@angular/compiler-cli/ngcc/src/packages/build_marker" />
/**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import { AbsoluteFsPath } from '../../../src/ngtsc/file_system';
import { PackageJsonUpdater } from '../writing/package_json_updater';
import { EntryPointPackageJson, PackageJsonFormatProperties } from './entry_point';
export declare const NGCC_VERSION = "12.2.11";
/**
* Returns true if there is a format in this entry-point that was compiled with an outdated version
* of ngcc.
*
* @param packageJson The parsed contents of the package.json for the entry-point
*/
export declare function needsCleaning(packageJson: EntryPointPackageJson): boolean;
/**
* Clean any build marker artifacts from the given `packageJson` object.
* @param packageJson The parsed contents of the package.json to modify
* @returns true if the package was modified during cleaning
*/
export declare function cleanPackageJson(packageJson: EntryPointPackageJson): boolean;
/**
* Check whether ngcc has already processed a given entry-point format.
*
* @param packageJson The parsed contents of the package.json file for the entry-point.
* @param format The entry-point format property in the package.json to check.
* @returns true if the `format` in the entry-point has already been processed by this ngcc version,
* false otherwise. | */
export declare function hasBeenProcessed(packageJson: EntryPointPackageJson, format: PackageJsonFormatProperties): boolean;
/**
* Write a build marker for the given entry-point and format properties, to indicate that they have
* been compiled by this version of ngcc.
*
* @param pkgJsonUpdater The writer to use for updating `package.json`.
* @param packageJson The parsed contents of the `package.json` file for the entry-point.
* @param packageJsonPath The absolute path to the `package.json` file.
* @param properties The properties in the `package.json` of the formats for which we are writing
* the marker.
*/
export declare function markAsProcessed(pkgJsonUpdater: PackageJsonUpdater, packageJson: EntryPointPackageJson, packageJsonPath: AbsoluteFsPath, formatProperties: PackageJsonFormatProperties[]): void; | |
bond-inter-unit-cylinder.ts | /**
* Copyright (c) 2018-2020 mol* contributors, licensed under MIT, See LICENSE file for more info.
*
* @author Alexander Rose <[email protected]>
*/
import { ParamDefinition as PD } from '../../../mol-util/param-definition';
import { VisualContext } from '../../visual';
import { Structure, StructureElement, Bond, Unit } from '../../../mol-model/structure';
import { Theme } from '../../../mol-theme/theme';
import { Mesh } from '../../../mol-geo/geometry/mesh/mesh';
import { Vec3 } from '../../../mol-math/linear-algebra';
import { BitFlags, arrayEqual } from '../../../mol-util';
import { createLinkCylinderMesh, LinkStyle } from './util/link';
import { ComplexMeshParams, ComplexVisual, ComplexMeshVisual } from '../complex-visual';
import { VisualUpdateState } from '../../util';
import { isHydrogen } from './util/common';
import { BondType } from '../../../mol-model/structure/model/types';
import { ignoreBondType, BondCylinderParams, BondIterator, getInterBondLoci, eachInterBond } from './util/bond';
const tmpRefPosBondIt = new Bond.ElementBondIterator();
function setRefPosition(pos: Vec3, structure: Structure, unit: Unit.Atomic, index: StructureElement.UnitIndex) {
tmpRefPosBondIt.setElement(structure, unit, index);
while (tmpRefPosBondIt.hasNext) {
const bA = tmpRefPosBondIt.move();
bA.otherUnit.conformation.position(bA.otherUnit.elements[bA.otherIndex], pos);
return pos;
}
return null;
}
const tmpRef = Vec3();
const tmpLoc = StructureElement.Location.create(void 0);
function | (ctx: VisualContext, structure: Structure, theme: Theme, props: PD.Values<InterUnitBondCylinderParams>, mesh?: Mesh) {
const bonds = structure.interUnitBonds;
const { edgeCount, edges } = bonds;
const { sizeFactor, sizeAspectRatio, ignoreHydrogens, includeTypes, excludeTypes } = props;
const include = BondType.fromNames(includeTypes);
const exclude = BondType.fromNames(excludeTypes);
const ignoreHydrogen = ignoreHydrogens ? (edgeIndex: number) => {
const b = edges[edgeIndex];
const uA = b.unitA, uB = b.unitB;
return isHydrogen(uA, uA.elements[b.indexA]) || isHydrogen(uB, uB.elements[b.indexB]);
} : () => false;
if (!edgeCount) return Mesh.createEmpty(mesh);
const builderProps = {
linkCount: edgeCount,
referencePosition: (edgeIndex: number) => {
const b = edges[edgeIndex];
let unitA: Unit, unitB: Unit;
let indexA: StructureElement.UnitIndex, indexB: StructureElement.UnitIndex;
if (b.unitA.id < b.unitB.id) {
unitA = b.unitA, unitB = b.unitB;
indexA = b.indexA, indexB = b.indexB;
} else if (b.unitA.id > b.unitB.id) {
unitA = b.unitB, unitB = b.unitA;
indexA = b.indexB, indexB = b.indexA;
} else {
throw new Error('same units in createInterUnitBondCylinderMesh');
}
return setRefPosition(tmpRef, structure, unitA, indexA) || setRefPosition(tmpRef, structure, unitB, indexB);
},
position: (posA: Vec3, posB: Vec3, edgeIndex: number) => {
const b = edges[edgeIndex];
const uA = b.unitA, uB = b.unitB;
uA.conformation.position(uA.elements[b.indexA], posA);
uB.conformation.position(uB.elements[b.indexB], posB);
},
style: (edgeIndex: number) => {
const o = edges[edgeIndex].props.order;
const f = BitFlags.create(edges[edgeIndex].props.flag);
if (BondType.is(f, BondType.Flag.MetallicCoordination) || BondType.is(f, BondType.Flag.HydrogenBond)) {
// show metall coordinations and hydrogen bonds with dashed cylinders
return LinkStyle.Dashed;
} else if (o === 2) {
return LinkStyle.Double;
} else if (o === 3) {
return LinkStyle.Triple;
} else {
return LinkStyle.Solid;
}
},
radius: (edgeIndex: number) => {
const b = edges[edgeIndex];
tmpLoc.structure = structure;
tmpLoc.unit = b.unitA;
tmpLoc.element = b.unitA.elements[b.indexA];
const sizeA = theme.size.size(tmpLoc);
tmpLoc.unit = b.unitB;
tmpLoc.element = b.unitB.elements[b.indexB];
const sizeB = theme.size.size(tmpLoc);
return Math.min(sizeA, sizeB) * sizeFactor * sizeAspectRatio;
},
ignore: (edgeIndex: number) => ignoreHydrogen(edgeIndex) || ignoreBondType(include, exclude, edges[edgeIndex].props.flag)
};
return createLinkCylinderMesh(ctx, builderProps, props, mesh);
}
export const InterUnitBondCylinderParams = {
...ComplexMeshParams,
...BondCylinderParams,
sizeFactor: PD.Numeric(0.3, { min: 0, max: 10, step: 0.01 }),
sizeAspectRatio: PD.Numeric(2 / 3, { min: 0, max: 3, step: 0.01 }),
ignoreHydrogens: PD.Boolean(false),
};
export type InterUnitBondCylinderParams = typeof InterUnitBondCylinderParams
export function InterUnitBondCylinderVisual(materialId: number): ComplexVisual<InterUnitBondCylinderParams> {
return ComplexMeshVisual<InterUnitBondCylinderParams>({
defaultProps: PD.getDefaultValues(InterUnitBondCylinderParams),
createGeometry: createInterUnitBondCylinderMesh,
createLocationIterator: BondIterator.fromStructure,
getLoci: getInterBondLoci,
eachLocation: eachInterBond,
setUpdateState: (state: VisualUpdateState, newProps: PD.Values<InterUnitBondCylinderParams>, currentProps: PD.Values<InterUnitBondCylinderParams>) => {
state.createGeometry = (
newProps.sizeFactor !== currentProps.sizeFactor ||
newProps.sizeAspectRatio !== currentProps.sizeAspectRatio ||
newProps.radialSegments !== currentProps.radialSegments ||
newProps.linkScale !== currentProps.linkScale ||
newProps.linkSpacing !== currentProps.linkSpacing ||
newProps.ignoreHydrogens !== currentProps.ignoreHydrogens ||
newProps.linkCap !== currentProps.linkCap ||
!arrayEqual(newProps.includeTypes, currentProps.includeTypes) ||
!arrayEqual(newProps.excludeTypes, currentProps.excludeTypes)
);
}
}, materialId);
}
| createInterUnitBondCylinderMesh |
serializers.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package servicecatalogappregistry
import (
"bytes"
"context"
"fmt"
smithy "github.com/aws/smithy-go"
"github.com/aws/smithy-go/encoding/httpbinding"
smithyjson "github.com/aws/smithy-go/encoding/json"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
type awsRestjson1_serializeOpAssociateAttributeGroup struct {
}
func (*awsRestjson1_serializeOpAssociateAttributeGroup) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpAssociateAttributeGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*AssociateAttributeGroupInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/applications/{application}/attribute-groups/{attributeGroup}")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "PUT"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsAssociateAttributeGroupInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsAssociateAttributeGroupInput(v *AssociateAttributeGroupInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.Application == nil || len(*v.Application) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member application must not be empty")}
}
if v.Application != nil {
if err := encoder.SetURI("application").String(*v.Application); err != nil {
return err
}
}
if v.AttributeGroup == nil || len(*v.AttributeGroup) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member attributeGroup must not be empty")}
}
if v.AttributeGroup != nil {
if err := encoder.SetURI("attributeGroup").String(*v.AttributeGroup); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpAssociateResource struct {
}
func (*awsRestjson1_serializeOpAssociateResource) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpAssociateResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*AssociateResourceInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/applications/{application}/resources/{resourceType}/{resource}")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "PUT"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsAssociateResourceInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsAssociateResourceInput(v *AssociateResourceInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.Application == nil || len(*v.Application) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member application must not be empty")}
}
if v.Application != nil {
if err := encoder.SetURI("application").String(*v.Application); err != nil {
return err
}
}
if v.Resource == nil || len(*v.Resource) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member resource must not be empty")}
}
if v.Resource != nil {
if err := encoder.SetURI("resource").String(*v.Resource); err != nil {
return err
}
}
if len(v.ResourceType) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member resourceType must not be empty")}
}
if len(v.ResourceType) > 0 {
if err := encoder.SetURI("resourceType").String(string(v.ResourceType)); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpCreateApplication struct {
}
func (*awsRestjson1_serializeOpCreateApplication) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpCreateApplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*CreateApplicationInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/applications")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentCreateApplicationInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsCreateApplicationInput(v *CreateApplicationInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentCreateApplicationInput(v *CreateApplicationInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.ClientToken != nil {
ok := object.Key("clientToken")
ok.String(*v.ClientToken)
}
if v.Description != nil {
ok := object.Key("description")
ok.String(*v.Description)
}
if v.Name != nil {
ok := object.Key("name")
ok.String(*v.Name)
}
if v.Tags != nil {
ok := object.Key("tags")
if err := awsRestjson1_serializeDocumentTags(v.Tags, ok); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpCreateAttributeGroup struct {
}
func (*awsRestjson1_serializeOpCreateAttributeGroup) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpCreateAttributeGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*CreateAttributeGroupInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/attribute-groups")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentCreateAttributeGroupInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsCreateAttributeGroupInput(v *CreateAttributeGroupInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentCreateAttributeGroupInput(v *CreateAttributeGroupInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.Attributes != nil {
ok := object.Key("attributes")
ok.String(*v.Attributes)
}
if v.ClientToken != nil {
ok := object.Key("clientToken")
ok.String(*v.ClientToken)
}
if v.Description != nil {
ok := object.Key("description")
ok.String(*v.Description)
}
if v.Name != nil {
ok := object.Key("name")
ok.String(*v.Name)
}
if v.Tags != nil {
ok := object.Key("tags")
if err := awsRestjson1_serializeDocumentTags(v.Tags, ok); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpDeleteApplication struct {
}
func (*awsRestjson1_serializeOpDeleteApplication) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpDeleteApplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*DeleteApplicationInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/applications/{application}")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "DELETE"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsDeleteApplicationInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsDeleteApplicationInput(v *DeleteApplicationInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.Application == nil || len(*v.Application) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member application must not be empty")}
}
if v.Application != nil {
if err := encoder.SetURI("application").String(*v.Application); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpDeleteAttributeGroup struct {
}
func (*awsRestjson1_serializeOpDeleteAttributeGroup) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpDeleteAttributeGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*DeleteAttributeGroupInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/attribute-groups/{attributeGroup}")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "DELETE"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsDeleteAttributeGroupInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsDeleteAttributeGroupInput(v *DeleteAttributeGroupInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.AttributeGroup == nil || len(*v.AttributeGroup) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member attributeGroup must not be empty")}
}
if v.AttributeGroup != nil {
if err := encoder.SetURI("attributeGroup").String(*v.AttributeGroup); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpDisassociateAttributeGroup struct {
}
func (*awsRestjson1_serializeOpDisassociateAttributeGroup) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpDisassociateAttributeGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*DisassociateAttributeGroupInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/applications/{application}/attribute-groups/{attributeGroup}")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "DELETE"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsDisassociateAttributeGroupInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsDisassociateAttributeGroupInput(v *DisassociateAttributeGroupInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.Application == nil || len(*v.Application) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member application must not be empty")}
}
if v.Application != nil {
if err := encoder.SetURI("application").String(*v.Application); err != nil {
return err
}
}
if v.AttributeGroup == nil || len(*v.AttributeGroup) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member attributeGroup must not be empty")}
}
if v.AttributeGroup != nil {
if err := encoder.SetURI("attributeGroup").String(*v.AttributeGroup); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpDisassociateResource struct {
}
func (*awsRestjson1_serializeOpDisassociateResource) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpDisassociateResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*DisassociateResourceInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/applications/{application}/resources/{resourceType}/{resource}")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "DELETE"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsDisassociateResourceInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsDisassociateResourceInput(v *DisassociateResourceInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.Application == nil || len(*v.Application) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member application must not be empty")}
}
if v.Application != nil {
if err := encoder.SetURI("application").String(*v.Application); err != nil {
return err
}
}
if v.Resource == nil || len(*v.Resource) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member resource must not be empty")}
}
if v.Resource != nil {
if err := encoder.SetURI("resource").String(*v.Resource); err != nil {
return err
}
}
if len(v.ResourceType) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member resourceType must not be empty")}
}
if len(v.ResourceType) > 0 {
if err := encoder.SetURI("resourceType").String(string(v.ResourceType)); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpGetApplication struct {
}
func (*awsRestjson1_serializeOpGetApplication) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpGetApplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*GetApplicationInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/applications/{application}")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "GET"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsGetApplicationInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsGetApplicationInput(v *GetApplicationInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.Application == nil || len(*v.Application) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member application must not be empty")}
}
if v.Application != nil {
if err := encoder.SetURI("application").String(*v.Application); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpGetAttributeGroup struct {
}
func (*awsRestjson1_serializeOpGetAttributeGroup) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpGetAttributeGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*GetAttributeGroupInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/attribute-groups/{attributeGroup}")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "GET"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsGetAttributeGroupInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsGetAttributeGroupInput(v *GetAttributeGroupInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.AttributeGroup == nil || len(*v.AttributeGroup) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member attributeGroup must not be empty")}
}
if v.AttributeGroup != nil {
if err := encoder.SetURI("attributeGroup").String(*v.AttributeGroup); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpListApplications struct {
}
func (*awsRestjson1_serializeOpListApplications) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpListApplications) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*ListApplicationsInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/applications")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "GET"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsListApplicationsInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsListApplicationsInput(v *ListApplicationsInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.MaxResults != 0 {
encoder.SetQuery("maxResults").Integer(v.MaxResults)
}
if v.NextToken != nil {
encoder.SetQuery("nextToken").String(*v.NextToken)
}
return nil
}
type awsRestjson1_serializeOpListAssociatedAttributeGroups struct {
}
func (*awsRestjson1_serializeOpListAssociatedAttributeGroups) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpListAssociatedAttributeGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*ListAssociatedAttributeGroupsInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/applications/{application}/attribute-groups")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "GET"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsListAssociatedAttributeGroupsInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsListAssociatedAttributeGroupsInput(v *ListAssociatedAttributeGroupsInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.Application == nil || len(*v.Application) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member application must not be empty")}
}
if v.Application != nil {
if err := encoder.SetURI("application").String(*v.Application); err != nil {
return err
}
}
if v.MaxResults != 0 {
encoder.SetQuery("maxResults").Integer(v.MaxResults)
}
if v.NextToken != nil {
encoder.SetQuery("nextToken").String(*v.NextToken)
}
return nil
}
type awsRestjson1_serializeOpListAssociatedResources struct {
}
func (*awsRestjson1_serializeOpListAssociatedResources) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpListAssociatedResources) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*ListAssociatedResourcesInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/applications/{application}/resources")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "GET"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsListAssociatedResourcesInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsListAssociatedResourcesInput(v *ListAssociatedResourcesInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.Application == nil || len(*v.Application) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member application must not be empty")}
}
if v.Application != nil {
if err := encoder.SetURI("application").String(*v.Application); err != nil {
return err
}
}
if v.MaxResults != 0 |
if v.NextToken != nil {
encoder.SetQuery("nextToken").String(*v.NextToken)
}
return nil
}
type awsRestjson1_serializeOpListAttributeGroups struct {
}
func (*awsRestjson1_serializeOpListAttributeGroups) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpListAttributeGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*ListAttributeGroupsInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/attribute-groups")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "GET"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsListAttributeGroupsInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsListAttributeGroupsInput(v *ListAttributeGroupsInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.MaxResults != 0 {
encoder.SetQuery("maxResults").Integer(v.MaxResults)
}
if v.NextToken != nil {
encoder.SetQuery("nextToken").String(*v.NextToken)
}
return nil
}
type awsRestjson1_serializeOpListTagsForResource struct {
}
func (*awsRestjson1_serializeOpListTagsForResource) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpListTagsForResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*ListTagsForResourceInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/tags/{resourceArn}")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "GET"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsListTagsForResourceInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsListTagsForResourceInput(v *ListTagsForResourceInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.ResourceArn == nil || len(*v.ResourceArn) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member resourceArn must not be empty")}
}
if v.ResourceArn != nil {
if err := encoder.SetURI("resourceArn").String(*v.ResourceArn); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpSyncResource struct {
}
func (*awsRestjson1_serializeOpSyncResource) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpSyncResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*SyncResourceInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/sync/{resourceType}/{resource}")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsSyncResourceInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsSyncResourceInput(v *SyncResourceInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.Resource == nil || len(*v.Resource) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member resource must not be empty")}
}
if v.Resource != nil {
if err := encoder.SetURI("resource").String(*v.Resource); err != nil {
return err
}
}
if len(v.ResourceType) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member resourceType must not be empty")}
}
if len(v.ResourceType) > 0 {
if err := encoder.SetURI("resourceType").String(string(v.ResourceType)); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpTagResource struct {
}
func (*awsRestjson1_serializeOpTagResource) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*TagResourceInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/tags/{resourceArn}")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsTagResourceInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsTagResourceInput(v *TagResourceInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.ResourceArn == nil || len(*v.ResourceArn) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member resourceArn must not be empty")}
}
if v.ResourceArn != nil {
if err := encoder.SetURI("resourceArn").String(*v.ResourceArn); err != nil {
return err
}
}
return nil
}
func awsRestjson1_serializeOpDocumentTagResourceInput(v *TagResourceInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.Tags != nil {
ok := object.Key("tags")
if err := awsRestjson1_serializeDocumentTags(v.Tags, ok); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpUntagResource struct {
}
func (*awsRestjson1_serializeOpUntagResource) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*UntagResourceInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/tags/{resourceArn}")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "DELETE"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsUntagResourceInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsUntagResourceInput(v *UntagResourceInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.ResourceArn == nil || len(*v.ResourceArn) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member resourceArn must not be empty")}
}
if v.ResourceArn != nil {
if err := encoder.SetURI("resourceArn").String(*v.ResourceArn); err != nil {
return err
}
}
if v.TagKeys != nil {
for i := range v.TagKeys {
encoder.AddQuery("tagKeys").String(v.TagKeys[i])
}
}
return nil
}
type awsRestjson1_serializeOpUpdateApplication struct {
}
func (*awsRestjson1_serializeOpUpdateApplication) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpUpdateApplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*UpdateApplicationInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/applications/{application}")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "PATCH"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsUpdateApplicationInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentUpdateApplicationInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsUpdateApplicationInput(v *UpdateApplicationInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.Application == nil || len(*v.Application) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member application must not be empty")}
}
if v.Application != nil {
if err := encoder.SetURI("application").String(*v.Application); err != nil {
return err
}
}
return nil
}
func awsRestjson1_serializeOpDocumentUpdateApplicationInput(v *UpdateApplicationInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.Description != nil {
ok := object.Key("description")
ok.String(*v.Description)
}
if v.Name != nil {
ok := object.Key("name")
ok.String(*v.Name)
}
return nil
}
type awsRestjson1_serializeOpUpdateAttributeGroup struct {
}
func (*awsRestjson1_serializeOpUpdateAttributeGroup) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpUpdateAttributeGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*UpdateAttributeGroupInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/attribute-groups/{attributeGroup}")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "PATCH"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if err := awsRestjson1_serializeOpHttpBindingsUpdateAttributeGroupInput(input, restEncoder); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentUpdateAttributeGroupInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsUpdateAttributeGroupInput(v *UpdateAttributeGroupInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
if v.AttributeGroup == nil || len(*v.AttributeGroup) == 0 {
return &smithy.SerializationError{Err: fmt.Errorf("input member attributeGroup must not be empty")}
}
if v.AttributeGroup != nil {
if err := encoder.SetURI("attributeGroup").String(*v.AttributeGroup); err != nil {
return err
}
}
return nil
}
func awsRestjson1_serializeOpDocumentUpdateAttributeGroupInput(v *UpdateAttributeGroupInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.Attributes != nil {
ok := object.Key("attributes")
ok.String(*v.Attributes)
}
if v.Description != nil {
ok := object.Key("description")
ok.String(*v.Description)
}
if v.Name != nil {
ok := object.Key("name")
ok.String(*v.Name)
}
return nil
}
func awsRestjson1_serializeDocumentTags(v map[string]string, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
for key := range v {
om := object.Key(key)
om.String(v[key])
}
return nil
}
| {
encoder.SetQuery("maxResults").Integer(v.MaxResults)
} |
issue-28324.rs | extern "C" {
static error_message_count: u32;
}
pub static BAZ: u32 = *&error_message_count;
//~^ ERROR use of extern static is unsafe and requires
fn | () {}
| main |
pkcs12.rs | use libc::*;
use *;
pub enum PKCS12 {}
extern "C" {
pub fn PKCS12_free(p12: *mut PKCS12);
}
const_ptr_api! {
extern "C" {
pub fn i2d_PKCS12(a: #[const_ptr_if(ossl300)] PKCS12, buf: *mut *mut u8) -> c_int;
}
}
extern "C" {
pub fn d2i_PKCS12(a: *mut *mut PKCS12, pp: *mut *const u8, length: c_long) -> *mut PKCS12;
pub fn PKCS12_parse(
p12: *mut PKCS12,
pass: *const c_char,
pkey: *mut *mut EVP_PKEY,
cert: *mut *mut X509,
ca: *mut *mut stack_st_X509,
) -> c_int;
pub fn PKCS12_set_mac(
p12: *mut PKCS12,
pass: *const c_char,
passlen: c_int,
salt: *mut c_uchar,
saltlen: c_int,
iter: c_int,
md_type: *const EVP_MD,
) -> c_int;
}
const_ptr_api! {
extern "C" {
pub fn PKCS12_create(
pass: #[const_ptr_if(any(ossl110, libressl280))] c_char,
friendly_name: #[const_ptr_if(any(ossl110, libressl280))] c_char,
pkey: *mut EVP_PKEY,
cert: *mut X509,
ca: *mut stack_st_X509,
nid_key: c_int,
nid_cert: c_int,
iter: c_int,
mac_iter: c_int,
keytype: c_int,
) -> *mut PKCS12; | } |
pub fn i2d_PKCS12_bio(b: *mut BIO, a: #[const_ptr_if(ossl300)] PKCS12) -> c_int;
} |
capture_x.py | """
Capture projection pattern and decode x-coorde.
"""
import cv2
import numpy as np
import structuredlight as sl
def imshowAndCapture(cap, img_pattern, delay=250):
cv2.imshow("", img_pattern)
cv2.waitKey(delay)
ret, img_frame = cap.read()
img_gray = cv2.cvtColor(img_frame, cv2.COLOR_BGR2GRAY)
return img_gray
def main():
width = 640
height = 480
cap = cv2.VideoCapture(1) # External web camera
gray = sl.Gray()
# Generate and Decode x-coord
# Generate
imlist_posi_pat = gray.generate((width, height))
imlist_nega_pat = sl.invert(imlist_posi_pat)
# Capture
imlist_posi_cap = [ imshowAndCapture(cap, img) for img in imlist_posi_pat]
imlist_nega_cap = [ imshowAndCapture(cap, img) for img in imlist_nega_pat]
# Decode
img_index = gray.decode(imlist_posi_cap, imlist_nega_cap)
# Visualize decode result
img_correspondence = np.clip(img_index/width*255.0, 0, 255).astype(np.uint8)
cv2.imshow("corresponnence map", img_correspondence)
cv2.waitKey(0)
cv2.imwrite("correspondence.png", img_correspondence)
cv2.destroyAllWindows() | cap.release()
if __name__=="__main__":
main() |
|
usage.component.ts | import { Component } from "@angular/core";
| })
export class BottomNavUsageComponent { } | @Component({
moduleId: module.id,
templateUrl: "./usage.component.html",
styleUrls: ["./usage.component.css"] |
models.py | import logging
class NullHandler(logging.Handler):
"""
Workaround to support Python 2.6
NullHandler was officially added to the logging package in Python 2.7
"""
def | (self, record):
pass
class MockHandler(logging.Handler):
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
| emit |
mod.rs | use std::collections::HashMap;
use crate::utils::*;
#[derive(Clone)]
/// Tree structure that divides the space in nested cells to perform approximate range counting
/// Each member of this structure is a node in the tree
pub struct TreeStructure<const D: usize>{
/// The index of the cell represented by this node
cell_index: CellIndex<D>,
/// The size of the cell
side_size: f64,
/// The depth inside the tree where this node lays
level: i32,
/// The number of points cointained in the cell
cnt: usize,
/// The collection of nested sub-cells (bounded by 2^D at max, with D constant) | }
impl <const D: usize> TreeStructure<D> {
pub fn new(cell_index: &CellIndex<D>, level: i32, side_size: f64) -> TreeStructure<D> {
let structure = TreeStructure {
cell_index: cell_index.clone(),
level: level,
cnt: 0,
side_size: side_size,
// mettere sempre 2^D come dimensione assicura che non ci siano riallocazione ma occupa
// troppo spazio. sembra che funzioni piu' velocemente senza dare una capacity
children: HashMap::new()
};
structure
}
pub fn new_empty() -> TreeStructure<D>{
TreeStructure{
cell_index: [0;D],
level: 0,
cnt: 0,
side_size: 0.0,
children: HashMap::with_capacity(0)
}
}
/// Generates a tree starting from the points given in input. To function correctly the points in input
/// must be all and only the core points in a given cell of the approximated DBSCAN algorithm with side size
/// equal to `epsilon/sqrt(D)`. This is assumed true during the construction.
pub fn build_structure(points: Vec<Point<D>>, params: &DBSCANParams) -> TreeStructure<D> {
let base_side_size = params.epsilon/(params.dimensionality as f64 ).sqrt();
let levels_count_f = 1.0 + (1.0/params.rho).log(2.0).ceil();
let levels_count = if levels_count_f < 1.0 {
1
} else {
levels_count_f as i32
};
// The approximated DBSCAN algorithm needs one instance of this structure for every core cell.
// This gives that all the points in input are contained in the cell of side size `epsilon/sqrt(D)`.
// All the points can then be added to the root and we proceed directly to divide the core cell in its sub-cells
let mut root = TreeStructure::new(&get_base_cell_index(&points[0], params),0,base_side_size);
root.cnt = points.len();
for point in &points {
let mut curr_side_size = base_side_size;
let mut prev_child = &mut root;
//il livello 0 è occupato dalla radice
for i in 1..=levels_count {
curr_side_size = curr_side_size / 2.0;
let index_arr = get_cell_index(point, curr_side_size);
let curr_child : &mut TreeStructure<D> =
prev_child.children.entry(index_arr.clone())
.or_insert(TreeStructure::new(&index_arr, i, curr_side_size));
curr_child.cnt += 1;
prev_child = curr_child;
}
}
root
}
/// Performs the approximated range counting on the tree given the point in input. It stops as soon as the counting
/// is non zero, so the result is not actually the exact count but rather 0 if there is no point in the tree
/// in the vicinity of `q`, and a value that is less or equal to the number of points in the vicinity of `q` otherwise.
/// The points in the vicinity are found for certain if they are at a distance less than equal to `epsilon` from `q` and
/// are excluded for certain if their distance from `q` is greater than `epsilon(1 + rho)`. All the points in between are
/// counted in an arbitrary way, depending on what is more efficient.
pub fn approximate_range_counting_root(&self, q: &Point<D>, params: &DBSCANParams) -> usize{
self.approximate_range_counting(q,params)
}
fn approximate_range_counting(&self, q: &Point<D>, params: &DBSCANParams) -> usize {
let mut ans : usize = 0;
let levels_count_f = 1.0 + (1.0/params.rho).log(2.0).ceil();
let levels_count = if levels_count_f < 1.0 {
1
} else {
levels_count_f as i32
};
let intersection_type = determine_intersection(q, params, &self.cell_index, self.side_size);
match intersection_type {
IntersectionType::Disjoint => {},
IntersectionType::FullyCovered => {
ans += self.cnt;
},
IntersectionType::Intersecting => {
if self.level < (levels_count - 1) {
for child in self.children.values() {
ans += child.approximate_range_counting(q, params);
/*if ans > 0 {
return ans;
}*/
}
} else {
ans += self.cnt;
}
}
}
ans
}
/*fn print_tree_rec(&self) {
println!("--- node ---");
println!("> Level: {}",self.level);
println!("> cell_index: {:?}",self.cell_index);
println!("> cnt: {}",self.cnt);
for child in self.children.values() {
child.print_tree_rec();
}
}
pub fn print_tree(&self){
println!("----- TREE -----");
self.print_tree_rec();
}*/
}
#[cfg(test)]
mod tests; | children: HashMap<CellIndex<D>, TreeStructure<D>>, |
main.go | // Copyright (c) 2016, Alan Chen
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
package main
import (
"flag"
"fmt"
"github.com/caivega/web3go/provider"
"github.com/caivega/web3go/rpc"
"github.com/caivega/web3go/web3"
)
var hostname = flag.String("hostname", "localhost", "The ethereum client RPC host")
var port = flag.String("port", "8545", "The ethereum client RPC port")
var verbose = flag.Bool("verbose", false, "Print verbose messages")
func main() | {
flag.Parse()
if *verbose {
fmt.Printf("Connect to %s:%s\n", *hostname, *port)
}
provider := provider.NewHTTPProvider(*hostname+":"+*port, rpc.GetDefaultMethod())
web3 := web3.NewWeb3(provider)
if accounts, err := web3.Eth.Accounts(); err == nil {
for _, account := range accounts {
fmt.Printf("%s\n", account.String())
}
} else {
fmt.Printf("%v", err)
}
} |
|
modularitymatrix.py | """Modularity matrix of graphs.
"""
# Copyright (C) 2004-2019 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils import not_implemented_for
__author__ = "\n".join(['Aric Hagberg <[email protected]>',
'Pieter Swart ([email protected])',
'Dan Schult ([email protected])',
'Jean-Gabriel Young ([email protected])'])
__all__ = ['modularity_matrix', 'directed_modularity_matrix']
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def modularity_matrix(G, nodelist=None, weight=None):
r"""Returns the modularity matrix of G.
The modularity matrix is the matrix B = A - <A>, where A is the adjacency
matrix and <A> is the average adjacency matrix, assuming that the graph
is described by the configuration model.
More specifically, the element B_ij of B is defined as
.. math::
A_{ij} - {k_i k_j \over 2 m}
where k_i is the degree of node i, and where m is the number of edges
in the graph. When weight is set to a name of an attribute edge, Aij, k_i,
k_j and m are computed using its value.
Parameters
----------
G : Graph
A NetworkX graph
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist.
If nodelist is None, then the ordering is produced by G.nodes().
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used for
the edge weight. If None then all edge weights are 1.
Returns
-------
B : Numpy matrix
The modularity matrix of G.
Examples
--------
>>> import networkx as nx
>>> k =[3, 2, 2, 1, 0]
>>> G = nx.havel_hakimi_graph(k)
>>> B = nx.modularity_matrix(G)
See Also
--------
to_numpy_matrix
modularity_spectrum
adjacency_matrix
directed_modularity_matrix
References
----------
.. [1] M. E. J. Newman, "Modularity and community structure in networks",
Proc. Natl. Acad. Sci. USA, vol. 103, pp. 8577-8582, 2006.
"""
if nodelist is None:
nodelist = list(G)
A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,
format='csr')
k = A.sum(axis=1)
m = k.sum() * 0.5
# Expected adjacency matrix
X = k * k.transpose() / (2 * m)
return A - X
@not_implemented_for('undirected')
@not_implemented_for('multigraph')
def | (G, nodelist=None, weight=None):
"""Returns the directed modularity matrix of G.
The modularity matrix is the matrix B = A - <A>, where A is the adjacency
matrix and <A> is the expected adjacency matrix, assuming that the graph
is described by the configuration model.
More specifically, the element B_ij of B is defined as
.. math::
B_{ij} = A_{ij} - k_i^{out} k_j^{in} / m
where :math:`k_i^{in}` is the in degree of node i, and :math:`k_j^{out}` is the out degree
of node j, with m the number of edges in the graph. When weight is set
to a name of an attribute edge, Aij, k_i, k_j and m are computed using
its value.
Parameters
----------
G : DiGraph
A NetworkX DiGraph
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist.
If nodelist is None, then the ordering is produced by G.nodes().
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used for
the edge weight. If None then all edge weights are 1.
Returns
-------
B : Numpy matrix
The modularity matrix of G.
Examples
--------
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_edges_from(((1,2), (1,3), (3,1), (3,2), (3,5), (4,5), (4,6),
... (5,4), (5,6), (6,4)))
>>> B = nx.directed_modularity_matrix(G)
Notes
-----
NetworkX defines the element A_ij of the adjacency matrix as 1 if there
is a link going from node i to node j. Leicht and Newman use the opposite
definition. This explains the different expression for B_ij.
See Also
--------
to_numpy_matrix
modularity_spectrum
adjacency_matrix
modularity_matrix
References
----------
.. [1] E. A. Leicht, M. E. J. Newman,
"Community structure in directed networks",
Phys. Rev Lett., vol. 100, no. 11, p. 118703, 2008.
"""
if nodelist is None:
nodelist = list(G)
A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,
format='csr')
k_in = A.sum(axis=0)
k_out = A.sum(axis=1)
m = k_in.sum()
# Expected adjacency matrix
X = k_out * k_in / m
return A - X
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
import scipy
except:
raise SkipTest("NumPy not available")
| directed_modularity_matrix |
mongo_test.go | package net_worth_test
import (
"context"
"fmt"
"github.com/avinashb98/myfin/datasources/mongo"
mocks "github.com/avinashb98/myfin/mocks/datasources/mongo"
"github.com/avinashb98/myfin/repository/net_worth"
"github.com/stretchr/testify/assert"
"testing"
)
func TestRepository_CreateNetWorth(t *testing.T) | {
var db mongo.Database
var netWorthCollection mongo.Collection
db = &mocks.Database{}
netWorthCollection = &mocks.Collection{}
netWorthCollection.(*mocks.Collection).
On("InsertOne", context.Background(), net_worth.NetWorth{Handle: "validHandleName"}).
Return("", nil)
netWorthCollection.(*mocks.Collection).
On("InsertOne", context.Background(), net_worth.NetWorth{Handle: "inValidHandleName"}).
Return("", fmt.Errorf("invalid handle"))
netWorthCollection.(*mocks.Collection).
On("InsertOne", context.Background(), net_worth.NetWorth{Handle: "duplicateHandleName"}).
Return("", fmt.Errorf("handle already exists"))
db.(*mocks.Database).
On("Collection", "net_worth").
Return(netWorthCollection)
repo := net_worth.NewRepository(context.Background(), db)
err := repo.CreateNetWorth(context.Background(), net_worth.NetWorth{Handle: "validHandleName"})
assert.Empty(t, err)
err = repo.CreateNetWorth(context.Background(), net_worth.NetWorth{Handle: "inValidHandleName"})
assert.NotEmpty(t, err)
assert.Equal(t, err.Error(), "something went wrong")
err = repo.CreateNetWorth(context.Background(), net_worth.NetWorth{Handle: "duplicateHandleName"})
assert.NotEmpty(t, err)
assert.Equal(t, err.Error(), "something went wrong")
} |
|
events_endcrypt.rs | #[doc = "Reader of register EVENTS_ENDCRYPT"]
pub type R = crate::R<u32, super::EVENTS_ENDCRYPT>;
#[doc = "Writer for register EVENTS_ENDCRYPT"]
pub type W = crate::W<u32, super::EVENTS_ENDCRYPT>;
#[doc = "Register EVENTS_ENDCRYPT `reset()`'s with value 0"]
impl crate::ResetValue for super::EVENTS_ENDCRYPT {
type Type = u32;
#[inline(always)] | }
#[doc = "Reader of field `EVENTS_ENDCRYPT`"]
pub type EVENTS_ENDCRYPT_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EVENTS_ENDCRYPT`"]
pub struct EVENTS_ENDCRYPT_W<'a> {
w: &'a mut W,
}
impl<'a> EVENTS_ENDCRYPT_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
impl R {
#[doc = "Bit 0"]
#[inline(always)]
pub fn events_endcrypt(&self) -> EVENTS_ENDCRYPT_R {
EVENTS_ENDCRYPT_R::new((self.bits & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0"]
#[inline(always)]
pub fn events_endcrypt(&mut self) -> EVENTS_ENDCRYPT_W {
EVENTS_ENDCRYPT_W { w: self }
}
} | fn reset_value() -> Self::Type {
0
} |
inject_test.go | package inject
import (
"reflect"
"testing"
l5dcharts "github.com/linkerd/linkerd2/pkg/charts/linkerd2"
"github.com/linkerd/linkerd2/pkg/k8s"
"github.com/linkerd/linkerd2/pkg/version"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
k8sResource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
)
func | (t *testing.T) {
// this test uses an annotated deployment and a expected Values object to verify
// the GetOverriddenValues function.
var (
proxyVersionOverride = "proxy-version-override"
pullPolicy = "Always"
)
testConfig, err := l5dcharts.NewValues()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var testCases = []struct {
id string
nsAnnotations map[string]string
spec appsv1.DeploymentSpec
expected func() *l5dcharts.Values
}{
{id: "use overrides",
nsAnnotations: make(map[string]string),
spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
k8s.ProxyDisableIdentityAnnotation: "true",
k8s.ProxyImageAnnotation: "cr.l5d.io/linkerd/proxy",
k8s.ProxyImagePullPolicyAnnotation: pullPolicy,
k8s.ProxyInitImageAnnotation: "cr.l5d.io/linkerd/proxy-init",
k8s.ProxyControlPortAnnotation: "4000",
k8s.ProxyInboundPortAnnotation: "5000",
k8s.ProxyAdminPortAnnotation: "5001",
k8s.ProxyOutboundPortAnnotation: "5002",
k8s.ProxyPodInboundPortsAnnotation: "1234,5678",
k8s.ProxyIgnoreInboundPortsAnnotation: "4222,6222",
k8s.ProxyIgnoreOutboundPortsAnnotation: "8079,8080",
k8s.ProxyCPURequestAnnotation: "0.15",
k8s.ProxyMemoryRequestAnnotation: "120",
k8s.ProxyEphemeralStorageRequestAnnotation: "10",
k8s.ProxyCPULimitAnnotation: "1.5",
k8s.ProxyMemoryLimitAnnotation: "256",
k8s.ProxyEphemeralStorageLimitAnnotation: "50",
k8s.ProxyUIDAnnotation: "8500",
k8s.ProxyLogLevelAnnotation: "debug,linkerd=debug",
k8s.ProxyLogFormatAnnotation: "json",
k8s.ProxyEnableExternalProfilesAnnotation: "false",
k8s.ProxyVersionOverrideAnnotation: proxyVersionOverride,
k8s.ProxyWaitBeforeExitSecondsAnnotation: "123",
k8s.ProxyRequireIdentityOnInboundPortsAnnotation: "8888,9999",
k8s.ProxyOutboundConnectTimeout: "6000ms",
k8s.ProxyInboundConnectTimeout: "600ms",
k8s.ProxyOpaquePortsAnnotation: "4320-4325,3306",
k8s.ProxyAwait: "enabled",
k8s.ProxySkipSubnetsAnnotation: "172.17.0.0/16",
},
},
Spec: corev1.PodSpec{},
},
},
expected: func() *l5dcharts.Values {
values, _ := l5dcharts.NewValues()
values.Proxy.Cores = 2
values.Proxy.DisableIdentity = true
values.Proxy.Image.Name = "cr.l5d.io/linkerd/proxy"
values.Proxy.Image.PullPolicy = pullPolicy
values.Proxy.Image.Version = proxyVersionOverride
values.Proxy.PodInboundPorts = "1234,5678"
values.Proxy.Ports.Control = 4000
values.Proxy.Ports.Inbound = 5000
values.Proxy.Ports.Admin = 5001
values.Proxy.Ports.Outbound = 5002
values.Proxy.WaitBeforeExitSeconds = 123
values.Proxy.LogLevel = "debug,linkerd=debug"
values.Proxy.LogFormat = "json"
values.Proxy.Resources = &l5dcharts.Resources{
CPU: l5dcharts.Constraints{
Limit: "1.5",
Request: "0.15",
},
Memory: l5dcharts.Constraints{
Limit: "256",
Request: "120",
},
EphemeralStorage: l5dcharts.Constraints{
Limit: "50",
Request: "10",
},
}
values.Proxy.UID = 8500
values.ProxyInit.Image.Name = "cr.l5d.io/linkerd/proxy-init"
values.ProxyInit.Image.PullPolicy = pullPolicy
values.ProxyInit.Image.Version = version.ProxyInitVersion
values.ProxyInit.IgnoreInboundPorts = "4222,6222"
values.ProxyInit.IgnoreOutboundPorts = "8079,8080"
values.ProxyInit.SkipSubnets = "172.17.0.0/16"
values.Proxy.RequireIdentityOnInboundPorts = "8888,9999"
values.Proxy.OutboundConnectTimeout = "6000ms"
values.Proxy.InboundConnectTimeout = "600ms"
values.Proxy.OpaquePorts = "4320,4321,4322,4323,4324,4325,3306"
values.Proxy.Await = true
return values
},
},
{id: "use defaults",
nsAnnotations: make(map[string]string),
spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{},
Spec: corev1.PodSpec{},
},
},
expected: func() *l5dcharts.Values {
values, _ := l5dcharts.NewValues()
return values
},
},
{id: "use namespace overrides",
nsAnnotations: map[string]string{
k8s.ProxyDisableIdentityAnnotation: "true",
k8s.ProxyImageAnnotation: "cr.l5d.io/linkerd/proxy",
k8s.ProxyImagePullPolicyAnnotation: pullPolicy,
k8s.ProxyInitImageAnnotation: "cr.l5d.io/linkerd/proxy-init",
k8s.ProxyControlPortAnnotation: "4000",
k8s.ProxyInboundPortAnnotation: "5000",
k8s.ProxyAdminPortAnnotation: "5001",
k8s.ProxyOutboundPortAnnotation: "5002",
k8s.ProxyPodInboundPortsAnnotation: "1234,5678",
k8s.ProxyIgnoreInboundPortsAnnotation: "4222,6222",
k8s.ProxyIgnoreOutboundPortsAnnotation: "8079,8080",
k8s.ProxyCPURequestAnnotation: "0.15",
k8s.ProxyMemoryRequestAnnotation: "120",
k8s.ProxyCPULimitAnnotation: "1.5",
k8s.ProxyMemoryLimitAnnotation: "256",
k8s.ProxyUIDAnnotation: "8500",
k8s.ProxyLogLevelAnnotation: "debug,linkerd=debug",
k8s.ProxyLogFormatAnnotation: "json",
k8s.ProxyEnableExternalProfilesAnnotation: "false",
k8s.ProxyVersionOverrideAnnotation: proxyVersionOverride,
k8s.ProxyWaitBeforeExitSecondsAnnotation: "123",
k8s.ProxyOutboundConnectTimeout: "6000ms",
k8s.ProxyInboundConnectTimeout: "600ms",
k8s.ProxyOpaquePortsAnnotation: "4320-4325,3306",
k8s.ProxyAwait: "enabled",
},
spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{},
},
},
expected: func() *l5dcharts.Values {
values, _ := l5dcharts.NewValues()
values.Proxy.Cores = 2
values.Proxy.DisableIdentity = true
values.Proxy.Image.Name = "cr.l5d.io/linkerd/proxy"
values.Proxy.Image.PullPolicy = pullPolicy
values.Proxy.Image.Version = proxyVersionOverride
values.Proxy.PodInboundPorts = "1234,5678"
values.Proxy.Ports.Control = 4000
values.Proxy.Ports.Inbound = 5000
values.Proxy.Ports.Admin = 5001
values.Proxy.Ports.Outbound = 5002
values.Proxy.WaitBeforeExitSeconds = 123
values.Proxy.LogLevel = "debug,linkerd=debug"
values.Proxy.LogFormat = "json"
values.Proxy.Resources = &l5dcharts.Resources{
CPU: l5dcharts.Constraints{
Limit: "1.5",
Request: "0.15",
},
Memory: l5dcharts.Constraints{
Limit: "256",
Request: "120",
},
}
values.Proxy.UID = 8500
values.ProxyInit.Image.Name = "cr.l5d.io/linkerd/proxy-init"
values.ProxyInit.Image.PullPolicy = pullPolicy
values.ProxyInit.Image.Version = version.ProxyInitVersion
values.ProxyInit.IgnoreInboundPorts = "4222,6222"
values.ProxyInit.IgnoreOutboundPorts = "8079,8080"
values.Proxy.OutboundConnectTimeout = "6000ms"
values.Proxy.InboundConnectTimeout = "600ms"
values.Proxy.OpaquePorts = "4320,4321,4322,4323,4324,4325,3306"
values.Proxy.Await = true
return values
},
},
{id: "use invalid duration for TCP connect timeouts",
nsAnnotations: map[string]string{
k8s.ProxyOutboundConnectTimeout: "6000",
k8s.ProxyInboundConnectTimeout: "600",
},
spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{},
Spec: corev1.PodSpec{},
},
},
expected: func() *l5dcharts.Values {
values, _ := l5dcharts.NewValues()
return values
},
},
{id: "use valid duration for TCP connect timeouts",
nsAnnotations: map[string]string{
// Validate we're converting time values into ms for the proxy to parse correctly.
k8s.ProxyOutboundConnectTimeout: "6s5ms",
k8s.ProxyInboundConnectTimeout: "2s5ms",
},
spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{},
Spec: corev1.PodSpec{},
},
},
expected: func() *l5dcharts.Values {
values, _ := l5dcharts.NewValues()
values.Proxy.OutboundConnectTimeout = "6005ms"
values.Proxy.InboundConnectTimeout = "2005ms"
return values
},
},
{id: "use named port for opaque ports",
nsAnnotations: make(map[string]string),
spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
k8s.ProxyOpaquePortsAnnotation: "mysql",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Ports: []corev1.ContainerPort{
{
Name: "mysql",
ContainerPort: 3306,
},
},
},
},
},
},
},
expected: func() *l5dcharts.Values {
values, _ := l5dcharts.NewValues()
values.Proxy.OpaquePorts = "3306"
values.Proxy.PodInboundPorts = "3306"
return values
},
},
}
for _, tc := range testCases {
testCase := tc
t.Run(testCase.id, func(t *testing.T) {
data, err := yaml.Marshal(&appsv1.Deployment{Spec: testCase.spec})
if err != nil {
t.Fatal(err)
}
resourceConfig := NewResourceConfig(testConfig, OriginUnknown, "linkerd").
WithKind("Deployment").WithNsAnnotations(testCase.nsAnnotations)
if err := resourceConfig.parse(data); err != nil {
t.Fatal(err)
}
resourceConfig.AppendNamespaceAnnotations()
actual, err := resourceConfig.GetOverriddenValues()
if err != nil {
t.Fatal(err)
}
expected := testCase.expected()
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("Expected values to be \n%v\n but was \n%v", expected.String(), actual.String())
}
})
}
}
func TestWholeCPUCores(t *testing.T) {
for _, c := range []struct {
v string
n int
}{
{v: "1", n: 1},
{v: "1m", n: 1},
{v: "1000m", n: 1},
{v: "1001m", n: 2},
} {
q, err := k8sResource.ParseQuantity(c.v)
if err != nil {
t.Fatal(err)
}
n, err := ToWholeCPUCores(q)
if err != nil {
t.Fatal(err)
}
if n != int64(c.n) {
t.Fatalf("Unexpected value: %v != %v", n, c.n)
}
}
}
| TestGetOverriddenValues |
pokemon.rs | use core::ops::{Deref, DerefMut};
use pokedex::{
item::Item,
moves::{Accuracy, CriticalRate, Move, MoveCategory, Power},
pokemon::{
owned::OwnedPokemon,
stat::{BaseStat, StatType},
Experience, Health, Pokemon,
},
types::{Effective, PokemonType},
};
use rand::Rng;
use crate::{
data::BattleType,
moves::{
damage::{DamageKind, DamageResult},
Percent,
},
pokemon::stat::{BattleStatType, StatStages},
};
/// To - do: factor in accuracy
pub fn throw_move<R: rand::Rng>(random: &mut R, accuracy: Option<Accuracy>) -> bool {
accuracy
.map(|accuracy| random.gen_range(0..100) < accuracy)
.unwrap_or(true)
}
pub fn crit(random: &mut impl Rng, crit_rate: CriticalRate) -> bool {
random.gen_bool(match crit_rate {
0 => 0.0625, // 1 / 16
1 => 0.125, // 1 / 8
2 => 0.25, // 1 / 4
3 => 1.0 / 3.0,
_ => 0.5, // rates 4 and above, 1 / 2
})
}
pub fn damage_range(random: &mut impl Rng) -> Percent {
random.gen_range(85..=100u8)
}
#[derive(Debug, Clone)]
pub struct BattlePokemon<
P: Deref<Target = Pokemon>,
M: Deref<Target = Move>,
I: Deref<Target = Item>,
> {
pub p: OwnedPokemon<P, M, I>,
// pub persistent: Option<PersistentMove>,
pub stages: StatStages,
}
impl<P: Deref<Target = Pokemon>, M: Deref<Target = Move>, I: Deref<Target = Item>>
BattlePokemon<P, M, I>
{
// pub fn try_flinch(&mut self) -> bool {
// if self.flinch {
// self.flinch = false;
// true
// } else {
// false
// }
// }
pub fn battle_exp_from(&self, type_: &BattleType) -> Experience {
let experience = self.exp_from();
let experience = match matches!(type_, BattleType::Wild) {
true => experience.saturating_mul(3) / 2,
false => experience,
};
#[cfg(debug_assertions)]
let experience = experience.saturating_mul(7);
experience
}
pub fn stat(&self, stat: StatType) -> BaseStat {
StatStages::mult(
self.p.stat(stat),
self.stages.get(BattleStatType::Basic(stat)),
)
}
pub fn damage_kind(
&self,
random: &mut impl Rng,
target: &Self,
kind: DamageKind,
category: MoveCategory,
move_type: PokemonType,
crit_rate: CriticalRate,
) -> DamageResult<Health> {
let effective = target.pokemon.effective(move_type, category);
let crit = crit(random, crit_rate);
if let DamageKind::Power(power) = kind {
self.move_power_damage_random(random, target, power, category, move_type, crit)
} else {
DamageResult {
damage: match matches!(effective, Effective::Ineffective) {
true => 0,
false => match kind {
DamageKind::PercentCurrent(percent) => {
(target.hp() as f32 * effective.multiplier() * percent as f32 / 100.0)
as Health
}
DamageKind::PercentMax(percent) => {
(target.max_hp() as f32 * effective.multiplier() * percent as f32
/ 100.0) as Health
}
DamageKind::Constant(damage) => damage,
DamageKind::Power(..) => unreachable!(),
},
},
effective,
crit,
}
}
}
pub fn move_power_damage_random(
&self,
random: &mut impl Rng,
target: &Self,
power: Power,
category: MoveCategory,
move_type: PokemonType,
crit: bool,
) -> DamageResult<Health> {
self.move_power_damage(
target,
power,
category,
move_type,
crit,
damage_range(random),
)
}
pub fn | (
&self,
target: &Self,
power: Power,
category: MoveCategory,
move_type: PokemonType,
crit: bool,
range: u8,
) -> DamageResult<Health> {
let effective = target.pokemon.effective(move_type, category);
let (attack, defense) = category.stats();
let attack = self.stat(attack);
let defense = target.stat(defense);
if matches!(effective, Effective::Ineffective) {
return DamageResult::default();
}
/// Same type attack bonus
fn stab(t1: PokemonType, t2: PokemonType) -> f64 {
crit_dmg(t1 == t2)
}
fn crit_dmg(crit: bool) -> f64 {
match crit {
true => 1.5,
false => 1.0,
}
}
let mut e_mult = move_type
.effective(target.pokemon.types.primary, category)
.multiplier();
if let Some(secondary) = target.pokemon.types.secondary {
e_mult *= move_type.effective(secondary, category).multiplier();
}
let e_mult = e_mult as f64;
let mut damage = 2.0 * self.level as f64;
damage /= 5.0;
damage += 2.0;
damage = damage.floor();
damage *= power as f64;
damage *= attack as f64 / defense as f64;
damage = damage.floor();
damage /= 50.0;
damage = damage.floor();
damage += 2.0;
damage *= range as f64 / 100.0;
damage *= stab(self.pokemon.types.primary, move_type);
damage *= crit_dmg(crit);
damage *= e_mult;
// println!(
// "PWR: {}, LVL: {}, ATK: {}, DEF: {}, DMG: {}",
// power, self.level, attack, defense, damage
// );
DamageResult {
damage: damage.round() as _,
effective,
crit,
}
}
}
impl<P: Deref<Target = Pokemon>, M: Deref<Target = Move>, I: Deref<Target = Item>>
From<OwnedPokemon<P, M, I>> for BattlePokemon<P, M, I>
{
fn from(p: OwnedPokemon<P, M, I>) -> Self {
Self {
p,
stages: Default::default(),
}
}
}
impl<P: Deref<Target = Pokemon>, M: Deref<Target = Move>, I: Deref<Target = Item>> Deref
for BattlePokemon<P, M, I>
{
type Target = OwnedPokemon<P, M, I>;
fn deref(&self) -> &Self::Target {
&self.p
}
}
impl<P: Deref<Target = Pokemon>, M: Deref<Target = Move>, I: Deref<Target = Item>> DerefMut
for BattlePokemon<P, M, I>
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.p
}
}
#[cfg(test)]
mod tests {
use firecore_pokedex::{
item::Item,
moves::{set::OwnedMoveSet, Move, MoveCategory},
pokemon::{
data::{Breeding, Gender, GrowthRate, Training},
owned::OwnedPokemon,
stat::StatSet,
Nature, Pokemon,
},
types::{PokemonType, Types},
};
use super::BattlePokemon;
#[test]
fn damage() {
let feraligatr = Pokemon {
id: 160,
name: "Feraligatr".to_owned(),
types: Types {
primary: PokemonType::Water,
secondary: None,
},
moves: vec![],
base: StatSet {
hp: 85,
atk: 105,
def: 100,
sp_atk: 79,
sp_def: 83,
speed: 78,
},
species: "Big Jaw".to_owned(),
evolution: None,
height: 23,
weight: 888,
training: Training {
base_exp: 239,
growth: GrowthRate::MediumSlow,
},
breeding: Breeding { gender: Some(6) },
};
let geodude = Pokemon {
id: 74,
name: "Geodude".to_owned(),
types: Types {
primary: PokemonType::Rock,
secondary: Some(PokemonType::Ground),
},
moves: vec![],
base: StatSet {
hp: 40,
atk: 80,
def: 100,
sp_atk: 30,
sp_def: 30,
speed: 20,
},
species: "Rock".to_owned(),
evolution: None,
height: 0_4,
weight: 20,
training: Training {
base_exp: 60,
growth: GrowthRate::MediumSlow,
},
breeding: Breeding { gender: Some(3) },
};
let mut user = OwnedPokemon {
pokemon: &feraligatr,
level: 50,
gender: Gender::Male,
nature: Nature::Adamant,
hp: 0,
ivs: StatSet::uniform(15),
evs: StatSet::uniform(50),
friendship: Pokemon::default_friendship(),
ailment: None,
nickname: None,
moves: OwnedMoveSet::<&Move>::default(),
item: Option::<&Item>::None,
experience: 0,
};
user.heal_hp(None);
let mut target = OwnedPokemon {
pokemon: &geodude,
level: 10,
gender: Gender::Female,
nature: Nature::Hardy,
hp: 0,
ivs: StatSet::uniform(0),
evs: StatSet::uniform(0),
friendship: Pokemon::default_friendship(),
ailment: None,
nickname: None,
moves: OwnedMoveSet::<&Move>::default(),
item: Option::<&Item>::None,
experience: 0,
};
target.heal_hp(None);
let user = BattlePokemon {
p: user,
stages: Default::default(),
};
let target = target.into();
let damage = user
.move_power_damage(
&target,
80,
MoveCategory::Physical,
PokemonType::Water,
false,
100,
)
.damage;
assert!(
damage <= 1200,
"Damage overreached threshold! {} > 1200",
damage
);
assert!(
damage >= 1100,
"Damage could not reach threshold! {} < 1100",
damage
);
}
}
| move_power_damage |
getApplicationGatewayPrivateEndpointConnection.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package network
import (
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// Private Endpoint connection on an application gateway.
// API Version: 2020-08-01.
func LookupApplicationGatewayPrivateEndpointConnection(ctx *pulumi.Context, args *LookupApplicationGatewayPrivateEndpointConnectionArgs, opts ...pulumi.InvokeOption) (*LookupApplicationGatewayPrivateEndpointConnectionResult, error) |
type LookupApplicationGatewayPrivateEndpointConnectionArgs struct {
// The name of the application gateway.
ApplicationGatewayName string `pulumi:"applicationGatewayName"`
// The name of the application gateway private endpoint connection.
ConnectionName string `pulumi:"connectionName"`
// The name of the resource group.
ResourceGroupName string `pulumi:"resourceGroupName"`
}
// Private Endpoint connection on an application gateway.
type LookupApplicationGatewayPrivateEndpointConnectionResult struct {
// A unique read-only string that changes whenever the resource is updated.
Etag string `pulumi:"etag"`
// Resource ID.
Id *string `pulumi:"id"`
// The consumer link id.
LinkIdentifier string `pulumi:"linkIdentifier"`
// Name of the private endpoint connection on an application gateway.
Name *string `pulumi:"name"`
// The resource of private end point.
PrivateEndpoint PrivateEndpointResponse `pulumi:"privateEndpoint"`
// A collection of information about the state of the connection between service consumer and provider.
PrivateLinkServiceConnectionState *PrivateLinkServiceConnectionStateResponse `pulumi:"privateLinkServiceConnectionState"`
// The provisioning state of the application gateway private endpoint connection resource.
ProvisioningState string `pulumi:"provisioningState"`
// Type of the resource.
Type string `pulumi:"type"`
}
| {
var rv LookupApplicationGatewayPrivateEndpointConnectionResult
err := ctx.Invoke("azure-native:network:getApplicationGatewayPrivateEndpointConnection", args, &rv, opts...)
if err != nil {
return nil, err
}
return &rv, nil
} |
resultproc.go | package consensus
import (
"fmt"
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance"
valuetransaction "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
"github.com/iotaledger/wasp/packages/committee"
"github.com/iotaledger/wasp/packages/hashing"
"github.com/iotaledger/wasp/packages/sctransaction"
"github.com/iotaledger/wasp/packages/util"
"github.com/iotaledger/wasp/packages/vm"
"github.com/iotaledger/wasp/plugins/runvm"
)
type runCalculationsParams struct {
requests []*request
leaderPeerIndex uint16
balances map[valuetransaction.ID][]*balance.Balance
rewardAddress address.Address
timestamp int64
}
// runs the VM for requests and posts result to committee's queue
func (op *operator) runCalculationsAsync(par runCalculationsParams) {
if op.currentSCState == nil {
op.log.Debugf("runCalculationsAsync: variable currentSCState is not known")
return
}
var progHash hashing.HashValue
if ph, ok := op.getProgramHash(); ok {
// may not be needed if ready requests are only built-in
progHash = *ph
}
ctx := &vm.VMTask{
LeaderPeerIndex: par.leaderPeerIndex,
ProgramHash: progHash,
Address: *op.committee.Address(),
Color: *op.committee.Color(),
Entropy: (hashing.HashValue)(op.stateTx.ID()),
Balances: par.balances,
OwnerAddress: *op.committee.OwnerAddress(),
RewardAddress: par.rewardAddress,
MinimumReward: op.getMinimumReward(),
Requests: takeRefs(par.requests),
Timestamp: par.timestamp,
VirtualState: op.currentSCState,
Log: op.log,
}
ctx.OnFinish = func(err error) {
if err != nil {
op.log.Errorf("VM task failed: %v", err)
return
}
op.committee.ReceiveMessage(ctx)
}
if err := runvm.RunComputationsAsync(ctx); err != nil {
op.log.Errorf("RunComputationsAsync: %v", err)
}
}
func (op *operator) sendResultToTheLeader(result *vm.VMTask) {
op.log.Debugw("sendResultToTheLeader")
if op.consensusStage != consensusStageSubCalculationsStarted {
op.log.Debugf("calculation result on SUB dismissed because stage changed from '%s' to '%s'",
stages[consensusStageSubCalculationsStarted].name, stages[op.consensusStage].name)
return
}
sigShare, err := op.dkshare.SignShare(result.ResultTransaction.EssenceBytes())
if err != nil {
op.log.Errorf("error while signing transaction %v", err)
return
}
reqids := make([]sctransaction.RequestId, len(result.Requests))
for i := range reqids {
reqids[i] = *result.Requests[i].RequestId()
}
essenceHash := hashing.HashData(result.ResultTransaction.EssenceBytes())
batchHash := vm.BatchHash(reqids, result.Timestamp, result.LeaderPeerIndex)
op.log.Debugw("sendResultToTheLeader",
"leader", result.LeaderPeerIndex,
"batchHash", batchHash.String(),
"essenceHash", essenceHash.String(),
"ts", result.Timestamp,
)
msgData := util.MustBytes(&committee.SignedHashMsg{
PeerMsgHeader: committee.PeerMsgHeader{
StateIndex: op.mustStateIndex(),
},
BatchHash: batchHash,
OrigTimestamp: result.Timestamp,
EssenceHash: *essenceHash,
SigShare: sigShare,
})
if err := op.committee.SendMsg(result.LeaderPeerIndex, committee.MsgSignedHash, msgData); err != nil {
op.log.Error(err)
return
}
op.sentResultToLeader = result.ResultTransaction
op.sentResultToLeaderIndex = result.LeaderPeerIndex
op.setNextConsensusStage(consensusStageSubCalculationsFinished)
}
func (op *operator) saveOwnResult(result *vm.VMTask) {
if op.consensusStage != consensusStageLeaderCalculationsStarted {
op.log.Debugf("calculation result on LEADER dismissed because stage changed from '%s' to '%s'",
stages[consensusStageLeaderCalculationsStarted].name, stages[op.consensusStage].name)
return
}
sigShare, err := op.dkshare.SignShare(result.ResultTransaction.EssenceBytes())
if err != nil {
op.log.Errorf("error while signing transaction %v", err)
return
}
reqids := make([]sctransaction.RequestId, len(result.Requests))
for i := range reqids {
reqids[i] = *result.Requests[i].RequestId()
}
bh := vm.BatchHash(reqids, result.Timestamp, result.LeaderPeerIndex)
if bh != op.leaderStatus.batchHash {
panic("bh != op.leaderStatus.batchHash")
}
if len(result.Requests) != int(result.ResultBatch.Size()) {
panic("len(result.RequestIds) != int(result.ResultBatch.Size())")
}
essenceHash := hashing.HashData(result.ResultTransaction.EssenceBytes())
op.log.Debugw("saveOwnResult",
"batchHash", bh.String(),
"ts", result.Timestamp,
"essenceHash", essenceHash.String(),
)
op.leaderStatus.resultTx = result.ResultTransaction
op.leaderStatus.batch = result.ResultBatch
op.leaderStatus.signedResults[op.committee.OwnPeerIndex()] = &signedResult{
essenceHash: *essenceHash,
sigShare: sigShare,
}
op.setNextConsensusStage(consensusStageLeaderCalculationsFinished)
}
func (op *operator) aggregateSigShares(sigShares [][]byte) error {
resTx := op.leaderStatus.resultTx
finalSignature, err := op.dkshare.RecoverFullSignature(sigShares, resTx.EssenceBytes())
if err != nil |
if err := resTx.PutSignature(finalSignature); err != nil {
return fmt.Errorf("something wrong while aggregating final signature: %v", err)
}
return nil
}
| {
return err
} |
routes.go | package core
import (
rpc "github.com/teragrid/dgrid/rpc/lib/server"
)
// TODO: better system than "unsafe" prefix
// NOTE: Amino is registered in rpc/core/types/wire.go.
var Routes = map[string]*rpc.RPCFunc{
// subscribe/unsubscribe are reserved for websocket events.
"subscribe": rpc.NewWSRPCFunc(Subscribe, "query"),
"unsubscribe": rpc.NewWSRPCFunc(Unsubscribe, "query"),
"unsubscribe_all": rpc.NewWSRPCFunc(UnsubscribeAll, ""),
// info API
"health": rpc.NewRPCFunc(Health, ""),
"status": rpc.NewRPCFunc(Status, ""),
"net_info": rpc.NewRPCFunc(NetInfo, ""),
"blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight"),
"genesis": rpc.NewRPCFunc(Genesis, ""),
"block": rpc.NewRPCFunc(Block, "height"),
"block_results": rpc.NewRPCFunc(BlockResults, "height"),
"commit": rpc.NewRPCFunc(Commit, "height"),
"tx": rpc.NewRPCFunc(Tx, "hash,prove"),
"tx_search": rpc.NewRPCFunc(TxSearch, "query,prove,page,per_page"),
"validators": rpc.NewRPCFunc(Validators, "height"),
"dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""),
"consensus_state": rpc.NewRPCFunc(ConsensusState, ""),
"consensus_params": rpc.NewRPCFunc(ConsensusParams, "height"),
"unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, "limit"),
"num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""),
// broadcast API
"broadcast_tx_commit": rpc.NewRPCFunc(BroadcastTxCommit, "tx"),
"broadcast_tx_sync": rpc.NewRPCFunc(BroadcastTxSync, "tx"),
"broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsync, "tx"),
// asura API
"abci_query": rpc.NewRPCFunc(AsuraQuery, "path,data,height,prove"),
"abci_info": rpc.NewRPCFunc(AsuraInfo, ""),
}
func | () {
// control API
Routes["dial_seeds"] = rpc.NewRPCFunc(UnsafeDialSeeds, "seeds")
Routes["dial_peers"] = rpc.NewRPCFunc(UnsafeDialPeers, "peers,persistent")
Routes["unsafe_flush_storage"] = rpc.NewRPCFunc(UnsafeFlushStorage, "")
// profiler API
Routes["unsafe_start_cpu_profiler"] = rpc.NewRPCFunc(UnsafeStartCPUProfiler, "filename")
Routes["unsafe_stop_cpu_profiler"] = rpc.NewRPCFunc(UnsafeStopCPUProfiler, "")
Routes["unsafe_write_heap_profile"] = rpc.NewRPCFunc(UnsafeWriteHeapProfile, "filename")
}
| AddUnsafeRoutes |
block.go | // Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package types contains data types related to Ethereum consensus.
package types
import (
"encoding/binary"
"io"
"math/big"
"reflect"
"sort"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
var (
EmptyRootHash = DeriveSha(Transactions{})
EmptyUncleHash = rlpHash([]*Header(nil))
)
// A BlockNonce is a 64-bit hash which proves (combined with the
// mix-hash) that a sufficient amount of computation has been carried
// out on a block.
type BlockNonce [8]byte
// EncodeNonce converts the given integer to a block nonce.
func EncodeNonce(i uint64) BlockNonce {
var n BlockNonce
binary.BigEndian.PutUint64(n[:], i)
return n
}
// Uint64 returns the integer value of a block nonce.
func (n BlockNonce) Uint64() uint64 {
return binary.BigEndian.Uint64(n[:])
}
// MarshalText encodes n as a hex string with 0x prefix.
func (n BlockNonce) MarshalText() ([]byte, error) {
return hexutil.Bytes(n[:]).MarshalText()
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (n *BlockNonce) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("BlockNonce", input, n[:])
}
//go:generate gencodec -type Header -field-override headerMarshaling -out gen_header_json.go
// Header represents a block header in the Ethereum blockchain.
type Header struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"`
Coinbase common.Address `json:"miner" gencodec:"required"`
Root common.Hash `json:"stateRoot" gencodec:"required"`
TxHash common.Hash `json:"transactionsRoot" gencodec:"required"`
ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"`
Bloom Bloom `json:"logsBloom" gencodec:"required"`
Difficulty *big.Int `json:"difficulty" gencodec:"required"`
Number *big.Int `json:"number" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Time uint64 `json:"timestamp" gencodec:"required"`
Extra []byte `json:"extraData" gencodec:"required"`
MixDigest common.Hash `json:"mixHash"`
Nonce BlockNonce `json:"nonce"`
}
// field type overrides for gencodec
type headerMarshaling struct {
Difficulty *hexutil.Big
Number *hexutil.Big
GasLimit hexutil.Uint64
GasUsed hexutil.Uint64
Time hexutil.Uint64
Extra hexutil.Bytes
Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON
}
// Hash returns the block hash of the header, which is simply the keccak256 hash of its
// RLP encoding.
func (h *Header) Hash() common.Hash {
return rlpHash(h)
}
var headerSize = common.StorageSize(reflect.TypeOf(Header{}).Size())
// Size returns the approximate memory used by all internal contents. It is used
// to approximate and limit the memory consumption of various caches.
func (h *Header) Size() common.StorageSize {
return headerSize + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen())/8)
}
func rlpHash(x interface{}) (h common.Hash) {
hw := sha3.NewLegacyKeccak256()
rlp.Encode(hw, x)
hw.Sum(h[:0])
return h
}
// Body is a simple (mutable, non-safe) data container for storing and moving
// a block's data contents (transactions and uncles) together.
type Body struct {
Transactions []*Transaction
Uncles []*Header
}
// Block represents an entire block in the Ethereum blockchain.
type Block struct {
header *Header
uncles []*Header
transactions Transactions
// caches
hash atomic.Value
size atomic.Value
// Td is used by package core to store the total difficulty
// of the chain up to and including the block.
td *big.Int
// These fields are used by package eth to track
// inter-peer block relay.
ReceivedAt time.Time
ReceivedFrom interface{}
}
// DeprecatedTd is an old relic for extracting the TD of a block. It is in the
// code solely to facilitate upgrading the database from the old format to the
// new, after which it should be deleted. Do not use!
func (b *Block) DeprecatedTd() *big.Int {
return b.td
}
// [deprecated by eth/63]
// StorageBlock defines the RLP encoding of a Block stored in the
// state database. The StorageBlock encoding contains fields that
// would otherwise need to be recomputed.
type StorageBlock Block
// "external" block encoding. used for eth protocol, etc.
type extblock struct {
Header *Header
Txs []*Transaction
Uncles []*Header
}
// [deprecated by eth/63]
// "storage" block encoding. used for database.
type storageblock struct {
Header *Header
Txs []*Transaction
Uncles []*Header
TD *big.Int
}
// NewBlock creates a new block. The input data is copied,
// changes to header and to the field values will not affect the
// block.
//
// The values of TxHash, UncleHash, ReceiptHash and Bloom in header
// are ignored and set to values derived from the given txs, uncles
// and receipts.
func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt) *Block {
b := &Block{header: CopyHeader(header), td: new(big.Int)}
// TODO: panic if len(txs) != len(receipts)
if len(txs) == 0 {
b.header.TxHash = EmptyRootHash
} else {
b.header.TxHash = DeriveSha(Transactions(txs))
b.transactions = make(Transactions, len(txs))
copy(b.transactions, txs)
}
if len(receipts) == 0 {
b.header.ReceiptHash = EmptyRootHash
} else {
b.header.ReceiptHash = DeriveSha(Receipts(receipts))
b.header.Bloom = CreateBloom(receipts)
}
if len(uncles) == 0 {
b.header.UncleHash = EmptyUncleHash
} else {
b.header.UncleHash = CalcUncleHash(uncles)
b.uncles = make([]*Header, len(uncles))
for i := range uncles {
b.uncles[i] = CopyHeader(uncles[i])
}
}
return b
}
// NewBlockWithHeader creates a block with the given header data. The
// header data is copied, changes to header and to the field values
// will not affect the block.
func NewBlockWithHeader(header *Header) *Block {
return &Block{header: CopyHeader(header)}
}
// CopyHeader creates a deep copy of a block header to prevent side effects from
// modifying a header variable.
func CopyHeader(h *Header) *Header {
cpy := *h
if cpy.Difficulty = new(big.Int); h.Difficulty != nil {
cpy.Difficulty.Set(h.Difficulty)
}
if cpy.Number = new(big.Int); h.Number != nil {
cpy.Number.Set(h.Number)
}
if len(h.Extra) > 0 {
cpy.Extra = make([]byte, len(h.Extra))
copy(cpy.Extra, h.Extra)
}
return &cpy
}
// DecodeRLP decodes the Ethereum
func (b *Block) DecodeRLP(s *rlp.Stream) error {
var eb extblock
_, size, _ := s.Kind()
if err := s.Decode(&eb); err != nil {
return err
}
b.header, b.uncles, b.transactions = eb.Header, eb.Uncles, eb.Txs
b.size.Store(common.StorageSize(rlp.ListSize(size)))
return nil
}
// EncodeRLP serializes b into the Ethereum RLP block format.
func (b *Block) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, extblock{
Header: b.header,
Txs: b.transactions,
Uncles: b.uncles,
})
}
// [deprecated by eth/63]
func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error {
var sb storageblock
if err := s.Decode(&sb); err != nil {
return err
}
b.header, b.uncles, b.transactions, b.td = sb.Header, sb.Uncles, sb.Txs, sb.TD
return nil
}
// TODO: copies
func (b *Block) Uncles() []*Header { return b.uncles }
func (b *Block) Transactions() Transactions { return b.transactions }
func (b *Block) Transaction(hash common.Hash) *Transaction {
for _, transaction := range b.transactions {
if transaction.Hash() == hash {
return transaction
}
}
return nil
}
func (b *Block) Number() *big.Int { return new(big.Int).Set(b.header.Number) }
func (b *Block) GasLimit() uint64 { return b.header.GasLimit }
func (b *Block) GasUsed() uint64 { return b.header.GasUsed }
func (b *Block) Difficulty() *big.Int { return new(big.Int).Set(b.header.Difficulty) }
func (b *Block) Time() uint64 { return b.header.Time }
func (b *Block) NumberU64() uint64 { return b.header.Number.Uint64() }
func (b *Block) MixDigest() common.Hash { return b.header.MixDigest }
func (b *Block) Nonce() uint64 { return binary.BigEndian.Uint64(b.header.Nonce[:]) }
func (b *Block) Bloom() Bloom { return b.header.Bloom }
func (b *Block) Coinbase() common.Address { return b.header.Coinbase }
func (b *Block) Root() common.Hash { return b.header.Root }
func (b *Block) ParentHash() common.Hash { return b.header.ParentHash }
func (b *Block) TxHash() common.Hash { return b.header.TxHash }
func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash }
func (b *Block) UncleHash() common.Hash { return b.header.UncleHash }
func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) }
func (b *Block) Header() *Header { return CopyHeader(b.header) }
// Body returns the non-header content of the block.
func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles} }
// Size returns the true RLP encoded storage size of the block, either by encoding
// and returning it, or returning a previsouly cached value.
func (b *Block) Size() common.StorageSize {
if size := b.size.Load(); size != nil {
return size.(common.StorageSize)
}
c := writeCounter(0)
rlp.Encode(&c, b)
b.size.Store(common.StorageSize(c))
return common.StorageSize(c)
}
type writeCounter common.StorageSize
func (c *writeCounter) Write(b []byte) (int, error) {
*c += writeCounter(len(b))
return len(b), nil
}
func | (uncles []*Header) common.Hash {
if len(uncles) == 0 {
return EmptyUncleHash
}
return rlpHash(uncles)
}
// WithSeal returns a new block with the data from b but the header replaced with
// the sealed one.
func (b *Block) WithSeal(header *Header) *Block {
cpy := *header
return &Block{
header: &cpy,
transactions: b.transactions,
uncles: b.uncles,
}
}
// WithBody returns a new block with the given transaction and uncle contents.
func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block {
block := &Block{
header: CopyHeader(b.header),
transactions: make([]*Transaction, len(transactions)),
uncles: make([]*Header, len(uncles)),
}
copy(block.transactions, transactions)
for i := range uncles {
block.uncles[i] = CopyHeader(uncles[i])
}
return block
}
// Hash returns the keccak256 hash of b's header.
// The hash is computed on the first call and cached thereafter.
func (b *Block) Hash() common.Hash {
if hash := b.hash.Load(); hash != nil {
return hash.(common.Hash)
}
v := b.header.Hash()
b.hash.Store(v)
return v
}
type Blocks []*Block
type BlockBy func(b1, b2 *Block) bool
func (self BlockBy) Sort(blocks Blocks) {
bs := blockSorter{
blocks: blocks,
by: self,
}
sort.Sort(bs)
}
type blockSorter struct {
blocks Blocks
by func(b1, b2 *Block) bool
}
func (self blockSorter) Len() int { return len(self.blocks) }
func (self blockSorter) Swap(i, j int) {
self.blocks[i], self.blocks[j] = self.blocks[j], self.blocks[i]
}
func (self blockSorter) Less(i, j int) bool { return self.by(self.blocks[i], self.blocks[j]) }
func Number(b1, b2 *Block) bool { return b1.header.Number.Cmp(b2.header.Number) < 0 }
| CalcUncleHash |
0034_auto_20210224_1538.py | # Generated by Django 3.0.8 on 2021-02-24 07:38
from django.db import migrations, models
import django.db.models.deletion
import uuid
import wastd.observations.models
class | (migrations.Migration):
dependencies = [
('observations', '0033_auto_20210219_1350'),
]
operations = [
migrations.AddField(
model_name='survey',
name='area',
field=models.ForeignKey(blank=True, help_text='The general area this survey took place in.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='survey_area', to='observations.Area', verbose_name='Surveyed Area'),
),
migrations.AlterField(
model_name='observation',
name='source_id',
field=models.CharField(default=uuid.UUID('39f553a6-7673-11eb-ac3b-11cca3b7e6cc'), help_text='The ID of the record in the original source, if available.', max_length=1000, verbose_name='Source ID'),
),
migrations.AlterField(
model_name='surveymediaattachment',
name='attachment',
field=models.FileField(help_text='Upload the file.', max_length=500, upload_to=wastd.observations.models.survey_media, verbose_name='File attachment'),
),
migrations.AlterField(
model_name='surveymediaattachment',
name='source_id',
field=models.CharField(default=uuid.UUID('39f553a6-7673-11eb-ac3b-11cca3b7e6cc'), help_text='The ID of the record in the original source, if available.', max_length=1000, verbose_name='Source ID'),
),
migrations.AlterField(
model_name='surveymediaattachment',
name='survey',
field=models.ForeignKey(help_text='The Survey this attachment belongs to.', on_delete=django.db.models.deletion.PROTECT, related_name='attachments', to='observations.Survey', verbose_name='Survey'),
),
migrations.AlterField(
model_name='turtlehatchlingemergenceobservation',
name='light_sources_present',
field=models.CharField(choices=[('na', 'NA'), ('absent', 'Confirmed absent'), ('present', 'Confirmed present')], default='na', help_text='', max_length=300, verbose_name='Light sources present during emergence'),
),
migrations.AlterField(
model_name='turtlehatchlingemergenceobservation',
name='outlier_tracks_present',
field=models.CharField(choices=[('na', 'NA'), ('absent', 'Confirmed absent'), ('present', 'Confirmed present')], default='na', help_text='', max_length=300, verbose_name='Outlier tracks present'),
),
migrations.AlterField(
model_name='turtlehatchlingemergenceoutlierobservation',
name='outlier_group_size',
field=models.PositiveIntegerField(blank=True, help_text='', null=True, verbose_name='Number of tracks in outlier group'),
),
]
| Migration |
MouseMove.js | 'use strict';
import MouseMove from '../src/MouseMove';
describe('MouseMove', function() { | });
}); | it('should be tested', function() {
expect('No tests for this module yet.').toBe('Everything is ok.'); |
index.test.js | const request = require('supertest');
import api from '../src/index';
describe('GET /alive', function() {
it('responds with json by string alive', function(done) {
request(
api({
prefix: '/',
port: 123,
host: '0.0.0.0',
alive: '/alive',
},(router, app) => {
return router;
})
).get('/alive')
.set('Accept', 'application/json')
.expect('Content-Type', /json/)
.expect(200,{
"OK": true,
"status": 200,
"message": "alive"
},done); | it('responds with json by array alive', function(done) {
request(
api({
prefix: '/',
port: 124,
host: '0.0.0.0',
alive: ['/alive', (req, res, next) => {
res.status(200).json({
OK:true,
status:200,
message:"alive-array",
});
}],
},(router, app) => {
return router;
})
).get('/alive')
.set('Accept', 'application/json')
.expect('Content-Type', /json/)
.expect(200,{
"OK": true,
"status": 200,
"message": "alive-array"
},done);
})
}); | });
|
scheduler.py | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 9 16:31:57 2021
@author: beccamayers
"""
import schedule
from datetime import datetime
from alert_bot import get_alert
import time
now = datetime.now()
timestamp = now.strftime("%b%d%Y %H%M%p")
def job():
| schedule.every().hour.do(job)
while True:
schedule.run_pending()
time.sleep(1) | print("Launching Alert Bot app...")
get_alert()
|
hardhat.typechain.config.ts | // This file is needed because many of our hardhat tasks rely on typechain, creating a circular dependency.
import "hardhat-typechain";
import { HardhatUserConfig } from "hardhat/config";
const config: HardhatUserConfig = {
solidity: {
compilers: [
{
version: "0.7.6",
settings: {
optimizer: {
enabled: true,
runs: 200,
},
},
},
{
version: "0.6.12",
settings: {
optimizer: {
enabled: true,
runs: 200,
},
}, | version: "0.6.0",
settings: {
optimizer: {
enabled: true,
runs: 200,
},
},
},
{
version: "0.5.15",
settings: {
optimizer: {
enabled: true,
runs: 200,
},
},
},
],
},
};
export default config; | },
{ |
core.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pprint
from bs4 import BeautifulSoup
from astropy.extern.six.moves.urllib import parse as urlparse
from astropy.extern import six
from astropy import units as u
from . import conf
from ..query import BaseQuery
from ..utils import prepend_docstr_noreturns, commons, async_to_sync
__doctest_skip__ = [
'SkyViewClass.get_images',
'SkyViewClass.get_images_async',
'SkyViewClass.get_image_list']
@async_to_sync
class SkyViewClass(BaseQuery):
URL = conf.url
def __init__(self):
|
def _get_default_form_values(self, form):
"""
Return the already selected values of a given form (a BeautifulSoup
form node) as a dict.
"""
res = []
for elem in form.find_all(['input', 'select']):
# ignore the submit and reset buttons
if elem.get('type') in ['submit', 'reset']:
continue
# check boxes: enabled boxes have the value "on" if not specified
# otherwise. Found out by debugging, perhaps not documented.
if elem.get('type') == 'checkbox' and elem.get('checked') in ["", "checked"]:
value = elem.get('value', 'on')
res.append((elem.get('name'), value))
# radio buttons and simple input fields
if elem.get('type') == 'radio' and\
elem.get('checked') in ["", "checked"] or\
elem.get('type') in [None, 'text']:
res.append((elem.get('name'), elem.get('value')))
# dropdown menu, multi-section possible
if elem.name == 'select':
for option in elem.find_all('option'):
if option.get('selected') == '':
value = option.get('value', option.text.strip())
res.append((elem.get('name'), value))
return {k:v
for (k, v) in res
if v not in [None, u'None', u'null'] and v
}
def _generate_payload(self, input=None):
"""
Fill out the form of the SkyView site and submit it with the
values given in `input` (a dictionary where the keys are the form
element's names and the values are their respective values).
"""
if input is None:
input = {}
form_response = self._request('GET', self.URL)
bs = BeautifulSoup(form_response.content, "html.parser")
form = bs.find('form')
# cache the default values to save HTTP traffic
if self._default_form_values is None:
self._default_form_values = self._get_default_form_values(form)
# only overwrite payload's values if the `input` value is not None
# to avoid overwriting of the form's default values
payload = self._default_form_values.copy()
for k, v in six.iteritems(input):
if v is not None:
payload[k] = v
url = urlparse.urljoin(self.URL, form.get('action'))
return url, payload
def _submit_form(self, input=None, cache=True):
url, payload = self._generate_payload(input=input)
response = self._request('GET', url, params=payload, cache=cache)
return response
def get_images(self, position, survey, coordinates=None, projection=None,
pixels=None, scaling=None, sampler=None, resolver=None,
deedger=None, lut=None, grid=None, gridlabels=None,
radius=None, height=None, width=None, cache=True):
"""
Query the SkyView service, download the FITS file that will be
found and return a generator over the local paths to the
downloaded FITS files.
Note that the files will be downloaded when the generator will be
exhausted, i.e. just calling this method alone without iterating
over the result won't issue a connection to the SkyView server.
Parameters
----------
position : str
Determines the center of the field to be retrieved. Both
coordinates (also equatorial ones) and object names are
supported. Object names are converted to coordinates via the
SIMBAD or NED name resolver. See the reference for more info
on the supported syntax for coordinates.
survey : str or list of str
Select data from one or more surveys. The number of surveys
determines the number of resulting file downloads. Passing a
list with just one string has the same effect as passing this
string directly.
coordinates : str
Choose among common equatorial, galactic and ecliptic
coordinate systems (``"J2000"``, ``"B1950"``, ``"Galactic"``,
``"E2000"``, ``"ICRS"``) or pass a custom string.
projection : str
Choose among the map projections (the value in parentheses
denotes the string to be passed):
Gnomonic (Tan), default value
good for small regions
Rectangular (Car)
simplest projection
Aitoff (Ait)
Hammer-Aitoff, equal area projection good for all sky maps
Orthographic (Sin)
Projection often used in interferometry
Zenith Equal Area (Zea)
equal area, azimuthal projection
COBE Spherical Cube (Csc)
Used in COBE data
Arc (Arc)
Similar to Zea but not equal-area
pixels : str
Selects the pixel dimensions of the image to be produced. A
scalar value or a pair of values separated by comma may be
given. If the value is a scalar the number of width and height
of the image will be the same. By default a 300x300 image is
produced.
scaling : str
Selects the transformation between pixel intensity and
intensity on the displayed image. The supported values are:
``"Log"``, ``"Sqrt"``, ``"Linear"``, ``"HistEq"``,
``"LogLog"``.
sampler : str
The sampling algorithm determines how the data requested will
be resampled so that it can be displayed.
resolver : str
The name resolver allows to choose a name resolver to use when
looking up a name which was passed in the `position` parameter
(as opposed to a numeric coordinate value). The default choice
is to call the SIMBAD name resolver first and then the NED
name resolver if the SIMBAD search fails.
deedger : str
When multiple input images with different backgrounds are
resampled the edges between the images may be apparent because
of the background shift. This parameter makes it possible to
attempt to minimize these edges by applying a de-edging
algorithm. The user can elect to choose the default given for
that survey, to turn de-edging off, or to use the default
de-edging algorithm. The supported values are: ``"_skip_"`` to
use the survey default, ``"skyview.process.Deedger"`` (for
enabling de-edging), and ``"null"`` to disable.
lut : str
Choose from the color table selections to display the data in
false color.
grid : bool
overlay a coordinate grid on the image if True
gridlabels : bool
annotate the grid with coordinates positions if True
radius : `~astropy.units.Quantity` or None
The radius of the specified field. Overrides width and height.
width : `~astropy.units.Quantity` or None
The width of the specified field. Must be specified
with `height`
height : `~astropy.units.Quantity` or None
The height of the specified field. Must be specified
with `width`
References
----------
.. [1] http://skyview.gsfc.nasa.gov/current/help/fields.html
Examples
--------
>>> sv = SkyView()
>>> paths = sv.get_images(position='Eta Carinae', survey=['Fermi 5', 'HRI', 'DSS'])
>>> for path in paths:
... print '\tnew file:', path
Returns
-------
A list of `astropy.fits.HDUList` objects
"""
readable_objects = self.get_images_async(position, survey, coordinates,
projection, pixels, scaling,
sampler, resolver, deedger,
lut, grid, gridlabels,
radius=radius, height=height,
width=width,
cache=cache)
return [obj.get_fits() for obj in readable_objects]
@prepend_docstr_noreturns(get_images.__doc__)
def get_images_async(self, position, survey, coordinates=None,
projection=None, pixels=None, scaling=None,
sampler=None, resolver=None, deedger=None, lut=None,
grid=None, gridlabels=None, radius=None, height=None,
width=None, cache=True):
"""
Returns
-------
A list of context-managers that yield readable file-like objects
"""
image_urls = self.get_image_list(position, survey, coordinates,
projection, pixels, scaling, sampler,
resolver, deedger, lut, grid,
gridlabels, radius=radius,
height=height, width=width,
cache=cache)
return [commons.FileContainer(url) for url in image_urls]
@prepend_docstr_noreturns(get_images.__doc__)
def get_image_list(self, position, survey, coordinates=None,
projection=None, pixels=None, scaling=None,
sampler=None, resolver=None, deedger=None, lut=None,
grid=None, gridlabels=None, radius=None, width=None,
height=None, cache=True):
"""
Returns
-------
list of image urls
Examples
--------
>>> SkyView().get_image_list(position='Eta Carinae', survey=['Fermi 5', 'HRI', 'DSS'])
[u'http://skyview.gsfc.nasa.gov/tempspace/fits/skv6183161285798_1.fits',
u'http://skyview.gsfc.nasa.gov/tempspace/fits/skv6183161285798_2.fits',
u'http://skyview.gsfc.nasa.gov/tempspace/fits/skv6183161285798_3.fits']
"""
self._validate_surveys(survey)
if radius:
size_deg = str(radius.to(u.deg).value)
elif width and height:
size_deg = "{0},{1}".format(width.to(u.deg).value,
height.to(u.deg).value)
elif width and height:
raise ValueError("Must specify width and height if you specify either.")
else:
size_deg = None
input = {
'Position': parse_coordinates(position),
'survey': survey,
'Deedger': deedger,
'lut': lut,
'projection': projection,
'gridlabels': '1' if gridlabels else '0',
'coordinates': coordinates,
'scaling': scaling,
'grid': grid,
'resolver': resolver,
'Sampler': sampler,
'imscale': size_deg,
'size': size_deg,
'pixels': pixels}
response = self._submit_form(input, cache=cache)
urls = self._parse_response(response)
return urls
def _parse_response(self, response):
bs = BeautifulSoup(response.content, "html.parser")
urls = []
for a in bs.find_all('a'):
if a.text == 'FITS':
href = a.get('href')
urls.append(urlparse.urljoin(response.url, href))
return urls
@property
def survey_dict(self):
if not hasattr(self, '_survey_dict'):
response = self._request('GET', self.URL)
page = BeautifulSoup(response.content, "html.parser")
surveys = page.findAll('select', {'name':'survey'})
self._survey_dict = {sel['id']:[x.text for x in sel.findAll('option')]
for sel in surveys}
return self._survey_dict
@property
def _valid_surveys(self):
# Return a flat list of all valid surveys
return [x for v in self.survey_dict.values() for x in v]
def _validate_surveys(self, surveys):
if not isinstance(surveys, list):
surveys = [surveys]
for sv in surveys:
if sv not in self._valid_surveys:
raise ValueError("Survey is not among the surveys hosted "
"at skyview. See list_surveys or "
"survey_dict for valid surveys.")
def list_surveys(self):
"""
Print out a formatted version of the survey dict
"""
pprint.pprint(self.survey_dict)
def parse_coordinates(position):
coord = commons.parse_coordinates(position)
return coord.fk5.to_string()
SkyView = SkyViewClass()
| BaseQuery.__init__(self)
self._default_form_values = None |
arc.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Concurrency-enabled mechanisms for sharing mutable and/or immutable state
* between tasks.
*
* # Example
*
* In this example, a large vector of floats is shared between several tasks.
* With simple pipes, without Arc, a copy would have to be made for each task.
*
* ~~~ {.rust}
* extern mod std;
* use extra::arc;
* let numbers=vec::from_fn(100, |ind| (ind as float)*rand::random());
* let shared_numbers=arc::Arc::new(numbers);
*
* do 10.times {
* let (port, chan) = stream();
* chan.send(shared_numbers.clone());
*
* do spawn {
* let shared_numbers=port.recv();
* let local_numbers=shared_numbers.get();
*
* // Work with the local numbers
* }
* }
* ~~~
*/
#[allow(missing_doc)];
use sync;
use sync::{Mutex, RWLock};
use std::cast;
use std::unstable::sync::UnsafeArc;
use std::task;
use std::borrow;
/// As sync::condvar, a mechanism for unlock-and-descheduling and signaling.
pub struct Condvar<'self> {
priv is_mutex: bool,
priv failed: &'self mut bool,
priv cond: &'self sync::Condvar<'self>
}
impl<'self> Condvar<'self> {
/// Atomically exit the associated Arc and block until a signal is sent.
#[inline]
pub fn wait(&self) { self.wait_on(0) }
/**
* Atomically exit the associated Arc and block on a specified condvar
* until a signal is sent on that same condvar (as sync::cond.wait_on).
*
* wait() is equivalent to wait_on(0).
*/
#[inline]
pub fn wait_on(&self, condvar_id: uint) {
assert!(!*self.failed);
self.cond.wait_on(condvar_id);
// This is why we need to wrap sync::condvar.
check_poison(self.is_mutex, *self.failed);
}
/// Wake up a blocked task. Returns false if there was no blocked task.
#[inline]
pub fn signal(&self) -> bool { self.signal_on(0) }
/**
* Wake up a blocked task on a specified condvar (as
* sync::cond.signal_on). Returns false if there was no blocked task.
*/
#[inline]
pub fn signal_on(&self, condvar_id: uint) -> bool {
assert!(!*self.failed);
self.cond.signal_on(condvar_id)
}
/// Wake up all blocked tasks. Returns the number of tasks woken.
#[inline]
pub fn broadcast(&self) -> uint { self.broadcast_on(0) }
/**
* Wake up all blocked tasks on a specified condvar (as
* sync::cond.broadcast_on). Returns the number of tasks woken.
*/
#[inline]
pub fn broadcast_on(&self, condvar_id: uint) -> uint {
assert!(!*self.failed);
self.cond.broadcast_on(condvar_id)
}
}
/****************************************************************************
* Immutable Arc
****************************************************************************/
/// An atomically reference counted wrapper for shared immutable state.
pub struct Arc<T> { priv x: UnsafeArc<T> }
/**
* Access the underlying data in an atomically reference counted
* wrapper.
*/
impl<T:Freeze+Send> Arc<T> {
/// Create an atomically reference counted wrapper.
pub fn new(data: T) -> Arc<T> {
Arc { x: UnsafeArc::new(data) }
}
pub fn get<'a>(&'a self) -> &'a T {
unsafe { &*self.x.get_immut() }
}
/**
* Retrieve the data back out of the Arc. This function blocks until the
* reference given to it is the last existing one, and then unwrap the data
* instead of destroying it.
*
* If multiple tasks call unwrap, all but the first will fail. Do not call
* unwrap from a task that holds another reference to the same Arc; it is
* guaranteed to deadlock.
*/
pub fn unwrap(self) -> T {
let Arc { x: x } = self;
x.unwrap()
}
}
impl<T:Freeze + Send> Clone for Arc<T> {
/**
* Duplicate an atomically reference counted wrapper.
*
* The resulting two `arc` objects will point to the same underlying data
* object. However, one of the `arc` objects can be sent to another task,
* allowing them to share the underlying data.
*/
fn clone(&self) -> Arc<T> {
Arc { x: self.x.clone() }
}
}
/****************************************************************************
* Mutex protected Arc (unsafe)
****************************************************************************/
#[doc(hidden)]
struct MutexArcInner<T> { priv lock: Mutex, priv failed: bool, priv data: T }
/// An Arc with mutable data protected by a blocking mutex.
struct MutexArc<T> { priv x: UnsafeArc<MutexArcInner<T>> }
impl<T:Send> Clone for MutexArc<T> {
/// Duplicate a mutex-protected Arc. See arc::clone for more details.
fn clone(&self) -> MutexArc<T> {
// NB: Cloning the underlying mutex is not necessary. Its reference
// count would be exactly the same as the shared state's.
MutexArc { x: self.x.clone() }
}
}
impl<T:Send> MutexArc<T> {
/// Create a mutex-protected Arc with the supplied data.
pub fn new(user_data: T) -> MutexArc<T> {
MutexArc::new_with_condvars(user_data, 1)
}
/**
* Create a mutex-protected Arc with the supplied data and a specified number
* of condvars (as sync::Mutex::new_with_condvars).
*/
pub fn new_with_condvars(user_data: T, num_condvars: uint) -> MutexArc<T> {
let data = MutexArcInner {
lock: Mutex::new_with_condvars(num_condvars),
failed: false, data: user_data
};
MutexArc { x: UnsafeArc::new(data) }
}
/**
* Access the underlying mutable data with mutual exclusion from other
* tasks. The argument closure will be run with the mutex locked; all
* other tasks wishing to access the data will block until the closure
* finishes running.
*
* The reason this function is 'unsafe' is because it is possible to
* construct a circular reference among multiple Arcs by mutating the
* underlying data. This creates potential for deadlock, but worse, this
* will guarantee a memory leak of all involved Arcs. Using mutex Arcs
* inside of other Arcs is safe in absence of circular references.
*
* If you wish to nest mutex_arcs, one strategy for ensuring safety at
* runtime is to add a "nesting level counter" inside the stored data, and
* when traversing the arcs, assert that they monotonically decrease.
*
* # Failure
*
* Failing while inside the Arc will unlock the Arc while unwinding, so
* that other tasks won't block forever. It will also poison the Arc:
* any tasks that subsequently try to access it (including those already
* blocked on the mutex) will also fail immediately.
*/
#[inline]
pub unsafe fn access<U>(&self, blk: &fn(x: &mut T) -> U) -> U {
let state = self.x.get();
// Borrowck would complain about this if the function were
// not already unsafe. See borrow_rwlock, far below.
do (&(*state).lock).lock {
check_poison(true, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data)
}
}
/// As access(), but with a condvar, as sync::mutex.lock_cond().
#[inline]
pub unsafe fn access_cond<'x, 'c, U>(&self,
blk: &fn(x: &'x mut T,
c: &'c Condvar) -> U)
-> U {
let state = self.x.get();
do (&(*state).lock).lock_cond |cond| {
check_poison(true, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data,
&Condvar {is_mutex: true,
failed: &mut (*state).failed,
cond: cond })
}
}
/**
* Retrieves the data, blocking until all other references are dropped,
* exactly as arc::unwrap.
*
* Will additionally fail if another task has failed while accessing the arc.
*/
pub fn unwrap(self) -> T {
let MutexArc { x: x } = self;
let inner = x.unwrap();
let MutexArcInner { failed: failed, data: data, _ } = inner;
if failed {
fail!(~"Can't unwrap poisoned MutexArc - another task failed inside!");
}
data
}
}
// Common code for {mutex.access,rwlock.write}{,_cond}.
#[inline]
#[doc(hidden)]
fn check_poison(is_mutex: bool, failed: bool) {
if failed {
if is_mutex {
fail!("Poisoned MutexArc - another task failed inside!");
} else {
fail!("Poisoned rw_arc - another task failed inside!");
}
}
}
#[doc(hidden)]
struct PoisonOnFail {
failed: *mut bool,
}
impl Drop for PoisonOnFail {
fn drop(&self) {
unsafe {
/* assert!(!*self.failed);
-- might be false in case of cond.wait() */
if task::failing() {
*self.failed = true;
}
}
}
}
fn | <'r>(failed: &'r mut bool) -> PoisonOnFail {
PoisonOnFail {
failed: failed
}
}
/****************************************************************************
* R/W lock protected Arc
****************************************************************************/
#[doc(hidden)]
struct RWArcInner<T> { priv lock: RWLock, priv failed: bool, priv data: T }
/**
* A dual-mode Arc protected by a reader-writer lock. The data can be accessed
* mutably or immutably, and immutably-accessing tasks may run concurrently.
*
* Unlike mutex_arcs, rw_arcs are safe, because they cannot be nested.
*/
#[no_freeze]
struct RWArc<T> {
priv x: UnsafeArc<RWArcInner<T>>,
}
impl<T:Freeze + Send> Clone for RWArc<T> {
/// Duplicate a rwlock-protected Arc. See arc::clone for more details.
fn clone(&self) -> RWArc<T> {
RWArc { x: self.x.clone() }
}
}
impl<T:Freeze + Send> RWArc<T> {
/// Create a reader/writer Arc with the supplied data.
pub fn new(user_data: T) -> RWArc<T> {
RWArc::new_with_condvars(user_data, 1)
}
/**
* Create a reader/writer Arc with the supplied data and a specified number
* of condvars (as sync::RWLock::new_with_condvars).
*/
pub fn new_with_condvars(user_data: T, num_condvars: uint) -> RWArc<T> {
let data = RWArcInner {
lock: RWLock::new_with_condvars(num_condvars),
failed: false, data: user_data
};
RWArc { x: UnsafeArc::new(data), }
}
/**
* Access the underlying data mutably. Locks the rwlock in write mode;
* other readers and writers will block.
*
* # Failure
*
* Failing while inside the Arc will unlock the Arc while unwinding, so
* that other tasks won't block forever. As MutexArc.access, it will also
* poison the Arc, so subsequent readers and writers will both also fail.
*/
#[inline]
pub fn write<U>(&self, blk: &fn(x: &mut T) -> U) -> U {
unsafe {
let state = self.x.get();
do (*borrow_rwlock(state)).write {
check_poison(false, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data)
}
}
}
/// As write(), but with a condvar, as sync::rwlock.write_cond().
#[inline]
pub fn write_cond<'x, 'c, U>(&self,
blk: &fn(x: &'x mut T, c: &'c Condvar) -> U)
-> U {
unsafe {
let state = self.x.get();
do (*borrow_rwlock(state)).write_cond |cond| {
check_poison(false, (*state).failed);
let _z = PoisonOnFail(&mut (*state).failed);
blk(&mut (*state).data,
&Condvar {is_mutex: false,
failed: &mut (*state).failed,
cond: cond})
}
}
}
/**
* Access the underlying data immutably. May run concurrently with other
* reading tasks.
*
* # Failure
*
* Failing will unlock the Arc while unwinding. However, unlike all other
* access modes, this will not poison the Arc.
*/
pub fn read<U>(&self, blk: &fn(x: &T) -> U) -> U {
unsafe {
let state = self.x.get();
do (*state).lock.read {
check_poison(false, (*state).failed);
blk(&(*state).data)
}
}
}
/**
* As write(), but with the ability to atomically 'downgrade' the lock.
* See sync::rwlock.write_downgrade(). The RWWriteMode token must be used
* to obtain the &mut T, and can be transformed into a RWReadMode token by
* calling downgrade(), after which a &T can be obtained instead.
*
* # Example
*
* ~~~ {.rust}
* do arc.write_downgrade |mut write_token| {
* do write_token.write_cond |state, condvar| {
* ... exclusive access with mutable state ...
* }
* let read_token = arc.downgrade(write_token);
* do read_token.read |state| {
* ... shared access with immutable state ...
* }
* }
* ~~~
*/
pub fn write_downgrade<U>(&self, blk: &fn(v: RWWriteMode<T>) -> U) -> U {
unsafe {
let state = self.x.get();
do (*borrow_rwlock(state)).write_downgrade |write_mode| {
check_poison(false, (*state).failed);
blk(RWWriteMode {
data: &mut (*state).data,
token: write_mode,
poison: PoisonOnFail(&mut (*state).failed)
})
}
}
}
/// To be called inside of the write_downgrade block.
pub fn downgrade<'a>(&self, token: RWWriteMode<'a, T>)
-> RWReadMode<'a, T> {
unsafe {
// The rwlock should assert that the token belongs to us for us.
let state = self.x.get();
let RWWriteMode {
data: data,
token: t,
poison: _poison
} = token;
// Let readers in
let new_token = (*state).lock.downgrade(t);
// Whatever region the input reference had, it will be safe to use
// the same region for the output reference. (The only 'unsafe' part
// of this cast is removing the mutability.)
let new_data = cast::transmute_immut(data);
// Downgrade ensured the token belonged to us. Just a sanity check.
assert!(borrow::ref_eq(&(*state).data, new_data));
// Produce new token
RWReadMode {
data: new_data,
token: new_token,
}
}
}
/**
* Retrieves the data, blocking until all other references are dropped,
* exactly as arc::unwrap.
*
* Will additionally fail if another task has failed while accessing the arc
* in write mode.
*/
pub fn unwrap(self) -> T {
let RWArc { x: x, _ } = self;
let inner = x.unwrap();
let RWArcInner { failed: failed, data: data, _ } = inner;
if failed {
fail!(~"Can't unwrap poisoned RWArc - another task failed inside!")
}
data
}
}
// Borrowck rightly complains about immutably aliasing the rwlock in order to
// lock it. This wraps the unsafety, with the justification that the 'lock'
// field is never overwritten; only 'failed' and 'data'.
#[doc(hidden)]
fn borrow_rwlock<T:Freeze + Send>(state: *mut RWArcInner<T>) -> *RWLock {
unsafe { cast::transmute(&(*state).lock) }
}
/// The "write permission" token used for RWArc.write_downgrade().
pub struct RWWriteMode<'self, T> {
data: &'self mut T,
token: sync::RWLockWriteMode<'self>,
poison: PoisonOnFail,
}
/// The "read permission" token used for RWArc.write_downgrade().
pub struct RWReadMode<'self, T> {
data: &'self T,
token: sync::RWLockReadMode<'self>,
}
impl<'self, T:Freeze + Send> RWWriteMode<'self, T> {
/// Access the pre-downgrade RWArc in write mode.
pub fn write<U>(&mut self, blk: &fn(x: &mut T) -> U) -> U {
match *self {
RWWriteMode {
data: &ref mut data,
token: ref token,
poison: _
} => {
do token.write {
blk(data)
}
}
}
}
/// Access the pre-downgrade RWArc in write mode with a condvar.
pub fn write_cond<'x, 'c, U>(&mut self,
blk: &fn(x: &'x mut T, c: &'c Condvar) -> U)
-> U {
match *self {
RWWriteMode {
data: &ref mut data,
token: ref token,
poison: ref poison
} => {
do token.write_cond |cond| {
unsafe {
let cvar = Condvar {
is_mutex: false,
failed: &mut *poison.failed,
cond: cond
};
blk(data, &cvar)
}
}
}
}
}
}
impl<'self, T:Freeze + Send> RWReadMode<'self, T> {
/// Access the post-downgrade rwlock in read mode.
pub fn read<U>(&self, blk: &fn(x: &T) -> U) -> U {
match *self {
RWReadMode {
data: data,
token: ref token
} => {
do token.read { blk(data) }
}
}
}
}
/****************************************************************************
* Tests
****************************************************************************/
#[cfg(test)]
mod tests {
use arc::*;
use std::cell::Cell;
use std::comm;
use std::task;
#[test]
fn manually_share_arc() {
let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let arc_v = Arc::new(v);
let (p, c) = comm::stream();
do task::spawn() || {
let arc_v : Arc<~[int]> = p.recv();
let v = (*arc_v.get()).clone();
assert_eq!(v[3], 4);
};
c.send(arc_v.clone());
assert_eq!(arc_v.get()[2], 3);
assert_eq!(arc_v.get()[4], 5);
info!(arc_v);
}
#[test]
fn test_mutex_arc_condvar() {
unsafe {
let arc = ~MutexArc::new(false);
let arc2 = ~arc.clone();
let (p,c) = comm::oneshot();
let (c,p) = (Cell::new(c), Cell::new(p));
do task::spawn || {
// wait until parent gets in
p.take().recv();
do arc2.access_cond |state, cond| {
*state = true;
cond.signal();
}
}
do arc.access_cond |state, cond| {
c.take().send(());
assert!(!*state);
while !*state {
cond.wait();
}
}
}
}
#[test] #[should_fail]
fn test_arc_condvar_poison() {
unsafe {
let arc = ~MutexArc::new(1);
let arc2 = ~arc.clone();
let (p, c) = comm::stream();
do task::spawn_unlinked || {
let _ = p.recv();
do arc2.access_cond |one, cond| {
cond.signal();
// Parent should fail when it wakes up.
assert_eq!(*one, 0);
}
}
do arc.access_cond |one, cond| {
c.send(());
while *one == 1 {
cond.wait();
}
}
}
}
#[test] #[should_fail]
fn test_mutex_arc_poison() {
unsafe {
let arc = ~MutexArc::new(1);
let arc2 = ~arc.clone();
do task::try || {
do arc2.access |one| {
assert_eq!(*one, 2);
}
};
do arc.access |one| {
assert_eq!(*one, 1);
}
}
}
#[test] #[should_fail]
pub fn test_mutex_arc_unwrap_poison() {
let arc = MutexArc::new(1);
let arc2 = ~(&arc).clone();
let (p, c) = comm::stream();
do task::spawn {
unsafe {
do arc2.access |one| {
c.send(());
assert!(*one == 2);
}
}
}
let _ = p.recv();
let one = arc.unwrap();
assert!(one == 1);
}
#[test] #[should_fail]
fn test_rw_arc_poison_wr() {
let arc = ~RWArc::new(1);
let arc2 = (*arc).clone();
do task::try || {
do arc2.write |one| {
assert_eq!(*one, 2);
}
};
do arc.read |one| {
assert_eq!(*one, 1);
}
}
#[test] #[should_fail]
fn test_rw_arc_poison_ww() {
let arc = ~RWArc::new(1);
let arc2 = (*arc).clone();
do task::try || {
do arc2.write |one| {
assert_eq!(*one, 2);
}
};
do arc.write |one| {
assert_eq!(*one, 1);
}
}
#[test] #[should_fail]
fn test_rw_arc_poison_dw() {
let arc = ~RWArc::new(1);
let arc2 = (*arc).clone();
do task::try || {
do arc2.write_downgrade |mut write_mode| {
do write_mode.write |one| {
assert_eq!(*one, 2);
}
}
};
do arc.write |one| {
assert_eq!(*one, 1);
}
}
#[test]
fn test_rw_arc_no_poison_rr() {
let arc = ~RWArc::new(1);
let arc2 = (*arc).clone();
do task::try || {
do arc2.read |one| {
assert_eq!(*one, 2);
}
};
do arc.read |one| {
assert_eq!(*one, 1);
}
}
#[test]
fn test_rw_arc_no_poison_rw() {
let arc = ~RWArc::new(1);
let arc2 = (*arc).clone();
do task::try || {
do arc2.read |one| {
assert_eq!(*one, 2);
}
};
do arc.write |one| {
assert_eq!(*one, 1);
}
}
#[test]
fn test_rw_arc_no_poison_dr() {
let arc = ~RWArc::new(1);
let arc2 = (*arc).clone();
do task::try || {
do arc2.write_downgrade |write_mode| {
let read_mode = arc2.downgrade(write_mode);
do (&read_mode).read |one| {
assert_eq!(*one, 2);
}
}
};
do arc.write |one| {
assert_eq!(*one, 1);
}
}
#[test]
fn test_rw_arc() {
let arc = ~RWArc::new(0);
let arc2 = (*arc).clone();
let (p,c) = comm::stream();
do task::spawn || {
do arc2.write |num| {
do 10.times {
let tmp = *num;
*num = -1;
task::deschedule();
*num = tmp + 1;
}
c.send(());
}
}
// Readers try to catch the writer in the act
let mut children = ~[];
do 5.times {
let arc3 = (*arc).clone();
let mut builder = task::task();
builder.future_result(|r| children.push(r));
do builder.spawn {
do arc3.read |num| {
assert!(*num >= 0);
}
}
}
// Wait for children to pass their asserts
for r in children.iter() {
r.recv();
}
// Wait for writer to finish
p.recv();
do arc.read |num| {
assert_eq!(*num, 10);
}
}
#[test]
fn test_rw_downgrade() {
// (1) A downgrader gets in write mode and does cond.wait.
// (2) A writer gets in write mode, sets state to 42, and does signal.
// (3) Downgrader wakes, sets state to 31337.
// (4) tells writer and all other readers to contend as it downgrades.
// (5) Writer attempts to set state back to 42, while downgraded task
// and all reader tasks assert that it's 31337.
let arc = ~RWArc::new(0);
// Reader tasks
let mut reader_convos = ~[];
do 10.times {
let ((rp1,rc1),(rp2,rc2)) = (comm::stream(),comm::stream());
reader_convos.push((rc1, rp2));
let arcn = (*arc).clone();
do task::spawn || {
rp1.recv(); // wait for downgrader to give go-ahead
do arcn.read |state| {
assert_eq!(*state, 31337);
rc2.send(());
}
}
}
// Writer task
let arc2 = (*arc).clone();
let ((wp1,wc1),(wp2,wc2)) = (comm::stream(),comm::stream());
do task::spawn || {
wp1.recv();
do arc2.write_cond |state, cond| {
assert_eq!(*state, 0);
*state = 42;
cond.signal();
}
wp1.recv();
do arc2.write |state| {
// This shouldn't happen until after the downgrade read
// section, and all other readers, finish.
assert_eq!(*state, 31337);
*state = 42;
}
wc2.send(());
}
// Downgrader (us)
do arc.write_downgrade |mut write_mode| {
do write_mode.write_cond |state, cond| {
wc1.send(()); // send to another writer who will wake us up
while *state == 0 {
cond.wait();
}
assert_eq!(*state, 42);
*state = 31337;
// send to other readers
for &(ref rc, _) in reader_convos.iter() {
rc.send(())
}
}
let read_mode = arc.downgrade(write_mode);
do (&read_mode).read |state| {
// complete handshake with other readers
for &(_, ref rp) in reader_convos.iter() {
rp.recv()
}
wc1.send(()); // tell writer to try again
assert_eq!(*state, 31337);
}
}
wp2.recv(); // complete handshake with writer
}
#[cfg(test)]
fn test_rw_write_cond_downgrade_read_race_helper() {
// Tests that when a downgrader hands off the "reader cloud" lock
// because of a contending reader, a writer can't race to get it
// instead, which would result in readers_and_writers. This tests
// the sync module rather than this one, but it's here because an
// rwarc gives us extra shared state to help check for the race.
// If you want to see this test fail, go to sync.rs and replace the
// line in RWLock::write_cond() that looks like:
// "blk(&Condvar { order: opt_lock, ..*cond })"
// with just "blk(cond)".
let x = ~RWArc::new(true);
let (wp, wc) = comm::stream();
// writer task
let xw = (*x).clone();
do task::spawn {
do xw.write_cond |state, c| {
wc.send(()); // tell downgrader it's ok to go
c.wait();
// The core of the test is here: the condvar reacquire path
// must involve order_lock, so that it cannot race with a reader
// trying to receive the "reader cloud lock hand-off".
*state = false;
}
}
wp.recv(); // wait for writer to get in
do x.write_downgrade |mut write_mode| {
do write_mode.write_cond |state, c| {
assert!(*state);
// make writer contend in the cond-reacquire path
c.signal();
}
// make a reader task to trigger the "reader cloud lock" handoff
let xr = (*x).clone();
let (rp, rc) = comm::stream();
do task::spawn {
rc.send(());
do xr.read |_state| { }
}
rp.recv(); // wait for reader task to exist
let read_mode = x.downgrade(write_mode);
do read_mode.read |state| {
// if writer mistakenly got in, make sure it mutates state
// before we assert on it
do 5.times { task::deschedule(); }
// make sure writer didn't get in.
assert!(*state);
}
}
}
#[test]
fn test_rw_write_cond_downgrade_read_race() {
// Ideally the above test case would have deschedule statements in it that
// helped to expose the race nearly 100% of the time... but adding
// deschedules in the intuitively-right locations made it even less likely,
// and I wasn't sure why :( . This is a mediocre "next best" option.
do 8.times { test_rw_write_cond_downgrade_read_race_helper() }
}
}
| PoisonOnFail |
config_test.go | // Copyright (c) 2017 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"reflect"
goruntime "runtime"
"strconv"
"strings"
"syscall"
"testing"
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/pkg/oci"
"github.com/stretchr/testify/assert"
)
type testRuntimeConfig struct {
RuntimeConfig oci.RuntimeConfig
RuntimeConfigFile string
ConfigPath string
ConfigPathLink string
LogDir string
LogPath string
}
func makeRuntimeConfigFileData(hypervisor, hypervisorPath, kernelPath, imagePath, kernelParams, machineType, shimPath, proxyPath, logPath string, disableBlock bool, blockDeviceDriver string) string {
return `
# Clear Containers runtime configuration file
[hypervisor.` + hypervisor + `]
path = "` + hypervisorPath + `"
kernel = "` + kernelPath + `"
block_device_driver = "` + blockDeviceDriver + `"
kernel_params = "` + kernelParams + `"
image = "` + imagePath + `"
machine_type = "` + machineType + `"
default_vcpus = ` + strconv.FormatUint(uint64(defaultVCPUCount), 10) + `
default_memory = ` + strconv.FormatUint(uint64(defaultMemSize), 10) + `
disable_block_device_use = ` + strconv.FormatBool(disableBlock) + `
[proxy.cc]
path = "` + proxyPath + `"
[shim.cc]
path = "` + shimPath + `"
[agent.cc]
[runtime]
`
}
func createConfig(configPath string, fileData string) error {
err := ioutil.WriteFile(configPath, []byte(fileData), testFileMode)
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to create config file %s %v\n", configPath, err)
return err
}
return nil
}
// createAllRuntimeConfigFiles creates all files necessary to call
// loadConfiguration().
func createAllRuntimeConfigFiles(dir, hypervisor string) (config testRuntimeConfig, err error) {
if dir == "" {
return config, fmt.Errorf("BUG: need directory")
}
if hypervisor == "" {
return config, fmt.Errorf("BUG: need hypervisor")
}
hypervisorPath := path.Join(dir, "hypervisor")
kernelPath := path.Join(dir, "kernel")
kernelParams := "foo=bar xyz"
imagePath := path.Join(dir, "image")
shimPath := path.Join(dir, "shim")
proxyPath := path.Join(dir, "proxy")
logDir := path.Join(dir, "logs")
logPath := path.Join(logDir, "runtime.log")
machineType := "machineType"
disableBlockDevice := true
blockDeviceDriver := "virtio-scsi"
runtimeConfigFileData := makeRuntimeConfigFileData(hypervisor, hypervisorPath, kernelPath, imagePath, kernelParams, machineType, shimPath, proxyPath, logPath, disableBlockDevice, blockDeviceDriver)
configPath := path.Join(dir, "runtime.toml")
err = createConfig(configPath, runtimeConfigFileData)
if err != nil {
return config, err | configPathLink := path.Join(filepath.Dir(configPath), "link-to-configuration.toml")
// create a link to the config file
err = syscall.Symlink(configPath, configPathLink)
if err != nil {
return config, err
}
files := []string{hypervisorPath, kernelPath, imagePath, shimPath, proxyPath}
for _, file := range files {
// create the resource
err = createEmptyFile(file)
if err != nil {
return config, err
}
}
hypervisorConfig := vc.HypervisorConfig{
HypervisorPath: hypervisorPath,
KernelPath: kernelPath,
ImagePath: imagePath,
KernelParams: vc.DeserializeParams(strings.Fields(kernelParams)),
HypervisorMachineType: machineType,
DefaultVCPUs: defaultVCPUCount,
DefaultMemSz: defaultMemSize,
DisableBlockDeviceUse: disableBlockDevice,
BlockDeviceDriver: defaultBlockDeviceDriver,
DefaultBridges: defaultBridgesCount,
Mlock: !defaultEnableSwap,
}
agentConfig := vc.HyperConfig{}
proxyConfig := vc.ProxyConfig{
Path: proxyPath,
}
shimConfig := vc.ShimConfig{
Path: shimPath,
}
runtimeConfig := oci.RuntimeConfig{
HypervisorType: defaultHypervisor,
HypervisorConfig: hypervisorConfig,
AgentType: defaultAgent,
AgentConfig: agentConfig,
ProxyType: defaultProxy,
ProxyConfig: proxyConfig,
ShimType: defaultShim,
ShimConfig: shimConfig,
VMConfig: vc.Resources{
Memory: uint(defaultMemSize),
},
}
config = testRuntimeConfig{
RuntimeConfig: runtimeConfig,
RuntimeConfigFile: configPath,
ConfigPath: configPath,
ConfigPathLink: configPathLink,
LogDir: logDir,
LogPath: logPath,
}
return config, nil
}
// testLoadConfiguration accepts an optional function that can be used
// to modify the test: if a function is specified, it indicates if the
// subsequent call to loadConfiguration() is expected to fail by
// returning a bool. If the function itself fails, that is considered an
// error.
func testLoadConfiguration(t *testing.T, dir string,
fn func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error)) {
subDir := path.Join(dir, "test")
for _, hypervisor := range []string{"qemu"} {
Loop:
for _, ignoreLogging := range []bool{true, false} {
err := os.RemoveAll(subDir)
assert.NoError(t, err)
err = os.MkdirAll(subDir, testDirMode)
assert.NoError(t, err)
testConfig, err := createAllRuntimeConfigFiles(subDir, hypervisor)
assert.NoError(t, err)
configFiles := []string{testConfig.ConfigPath, testConfig.ConfigPathLink, ""}
// override
defaultRuntimeConfiguration = testConfig.ConfigPath
defaultSysConfRuntimeConfiguration = ""
for _, file := range configFiles {
var err error
expectFail := false
if fn != nil {
expectFail, err = fn(testConfig, file, ignoreLogging)
assert.NoError(t, err)
}
resolvedConfigPath, config, err := loadConfiguration(file, ignoreLogging)
if expectFail {
assert.Error(t, err)
// no point proceeding in the error scenario.
break Loop
} else {
assert.NoError(t, err)
}
if file == "" {
assert.Equal(t, defaultRuntimeConfiguration, resolvedConfigPath)
} else {
assert.Equal(t, testConfig.ConfigPath, resolvedConfigPath)
}
assert.Equal(t, defaultRuntimeConfiguration, resolvedConfigPath)
result := reflect.DeepEqual(config, testConfig.RuntimeConfig)
if !result {
t.Fatalf("Expected\n%+v\nGot\n%+v", config, testConfig.RuntimeConfig)
}
assert.True(t, result)
err = os.RemoveAll(testConfig.LogDir)
assert.NoError(t, err)
}
}
}
}
func TestConfigLoadConfiguration(t *testing.T) {
tmpdir, err := ioutil.TempDir(testDir, "load-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
testLoadConfiguration(t, tmpdir, nil)
}
func TestConfigLoadConfigurationFailBrokenSymLink(t *testing.T) {
tmpdir, err := ioutil.TempDir(testDir, "runtime-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
testLoadConfiguration(t, tmpdir,
func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) {
expectFail := false
if configFile == config.ConfigPathLink {
// break the symbolic link
err = os.Remove(config.ConfigPathLink)
if err != nil {
return expectFail, err
}
expectFail = true
}
return expectFail, nil
})
}
func TestConfigLoadConfigurationFailSymLinkLoop(t *testing.T) {
tmpdir, err := ioutil.TempDir(testDir, "runtime-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
testLoadConfiguration(t, tmpdir,
func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) {
expectFail := false
if configFile == config.ConfigPathLink {
// remove the config file
err = os.Remove(config.ConfigPath)
if err != nil {
return expectFail, err
}
// now, create a sym-link loop
err := os.Symlink(config.ConfigPathLink, config.ConfigPath)
if err != nil {
return expectFail, err
}
expectFail = true
}
return expectFail, nil
})
}
func TestConfigLoadConfigurationFailMissingHypervisor(t *testing.T) {
tmpdir, err := ioutil.TempDir(testDir, "runtime-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
testLoadConfiguration(t, tmpdir,
func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) {
expectFail := true
err = os.Remove(config.RuntimeConfig.HypervisorConfig.HypervisorPath)
if err != nil {
return expectFail, err
}
return expectFail, nil
})
}
func TestConfigLoadConfigurationFailMissingImage(t *testing.T) {
tmpdir, err := ioutil.TempDir(testDir, "runtime-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
testLoadConfiguration(t, tmpdir,
func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) {
expectFail := true
err = os.Remove(config.RuntimeConfig.HypervisorConfig.ImagePath)
if err != nil {
return expectFail, err
}
return expectFail, nil
})
}
func TestConfigLoadConfigurationFailMissingKernel(t *testing.T) {
tmpdir, err := ioutil.TempDir(testDir, "runtime-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
testLoadConfiguration(t, tmpdir,
func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) {
expectFail := true
err = os.Remove(config.RuntimeConfig.HypervisorConfig.KernelPath)
if err != nil {
return expectFail, err
}
return expectFail, nil
})
}
func TestConfigLoadConfigurationFailMissingShim(t *testing.T) {
tmpdir, err := ioutil.TempDir(testDir, "runtime-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
testLoadConfiguration(t, tmpdir,
func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) {
expectFail := true
shimConfig, ok := config.RuntimeConfig.ShimConfig.(vc.ShimConfig)
if !ok {
return expectFail, fmt.Errorf("cannot determine shim config")
}
err = os.Remove(shimConfig.Path)
if err != nil {
return expectFail, err
}
return expectFail, nil
})
}
func TestConfigLoadConfigurationFailUnreadableConfig(t *testing.T) {
if os.Geteuid() == 0 {
t.Skip(testDisabledNeedNonRoot)
}
tmpdir, err := ioutil.TempDir(testDir, "runtime-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
testLoadConfiguration(t, tmpdir,
func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) {
expectFail := true
// make file unreadable by non-root user
err = os.Chmod(config.ConfigPath, 0000)
if err != nil {
return expectFail, err
}
return expectFail, nil
})
}
func TestConfigLoadConfigurationFailTOMLConfigFileInvalidContents(t *testing.T) {
if os.Geteuid() == 0 {
t.Skip(testDisabledNeedNonRoot)
}
tmpdir, err := ioutil.TempDir(testDir, "runtime-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
testLoadConfiguration(t, tmpdir,
func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) {
expectFail := true
err := createFile(config.ConfigPath,
`<?xml version="1.0"?>
<foo>I am not TOML! ;-)</foo>
<bar>I am invalid XML!`)
if err != nil {
return expectFail, err
}
return expectFail, nil
})
}
func TestConfigLoadConfigurationFailTOMLConfigFileDuplicatedData(t *testing.T) {
if os.Geteuid() == 0 {
t.Skip(testDisabledNeedNonRoot)
}
tmpdir, err := ioutil.TempDir(testDir, "runtime-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
testLoadConfiguration(t, tmpdir,
func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) {
expectFail := true
text, err := getFileContents(config.ConfigPath)
if err != nil {
return expectFail, err
}
// create a config file containing two sets of
// data.
err = createFile(config.ConfigPath, fmt.Sprintf("%s\n%s\n", text, text))
if err != nil {
return expectFail, err
}
return expectFail, nil
})
}
func TestMinimalRuntimeConfig(t *testing.T) {
dir, err := ioutil.TempDir(testDir, "minimal-runtime-config-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
shimPath := path.Join(dir, "shim")
proxyPath := path.Join(dir, "proxy")
runtimeMinimalConfig := `
# Clear Containers runtime configuration file
[proxy.cc]
path = "` + proxyPath + `"
[shim.cc]
path = "` + shimPath + `"
[agent.cc]
`
configPath := path.Join(dir, "runtime.toml")
err = createConfig(configPath, runtimeMinimalConfig)
if err != nil {
t.Fatal(err)
}
_, config, err := loadConfiguration(configPath, false)
if err == nil {
t.Fatalf("Expected loadConfiguration to fail as shim path does not exist: %+v", config)
}
err = createEmptyFile(shimPath)
if err != nil {
t.Error(err)
}
err = createEmptyFile(proxyPath)
if err != nil {
t.Error(err)
}
_, config, err = loadConfiguration(configPath, false)
if err != nil {
t.Fatal(err)
}
expectedHypervisorConfig := vc.HypervisorConfig{
HypervisorPath: defaultHypervisorPath,
KernelPath: defaultKernelPath,
ImagePath: defaultImagePath,
HypervisorMachineType: defaultMachineType,
DefaultVCPUs: defaultVCPUCount,
DefaultMemSz: defaultMemSize,
DisableBlockDeviceUse: defaultDisableBlockDeviceUse,
DefaultBridges: defaultBridgesCount,
Mlock: !defaultEnableSwap,
BlockDeviceDriver: defaultBlockDeviceDriver,
}
expectedAgentConfig := vc.HyperConfig{}
expectedProxyConfig := vc.ProxyConfig{
Path: proxyPath,
}
expectedShimConfig := vc.ShimConfig{
Path: shimPath,
}
expectedConfig := oci.RuntimeConfig{
HypervisorType: defaultHypervisor,
HypervisorConfig: expectedHypervisorConfig,
AgentType: defaultAgent,
AgentConfig: expectedAgentConfig,
ProxyType: defaultProxy,
ProxyConfig: expectedProxyConfig,
ShimType: defaultShim,
ShimConfig: expectedShimConfig,
}
if reflect.DeepEqual(config, expectedConfig) == false {
t.Fatalf("Got %v\n expecting %v", config, expectedConfig)
}
if err := os.Remove(configPath); err != nil {
t.Fatal(err)
}
}
func TestNewQemuHypervisorConfig(t *testing.T) {
dir, err := ioutil.TempDir(testDir, "hypervisor-config-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
hypervisorPath := path.Join(dir, "hypervisor")
kernelPath := path.Join(dir, "kernel")
imagePath := path.Join(dir, "image")
machineType := "machineType"
disableBlock := true
hypervisor := hypervisor{
Path: hypervisorPath,
Kernel: kernelPath,
Image: imagePath,
MachineType: machineType,
DisableBlockDeviceUse: disableBlock,
}
files := []string{hypervisorPath, kernelPath, imagePath}
filesLen := len(files)
for i, file := range files {
_, err := newQemuHypervisorConfig(hypervisor)
if err == nil {
t.Fatalf("Expected newQemuHypervisorConfig to fail as not all paths exist (not created %v)",
strings.Join(files[i:filesLen], ","))
}
// create the resource
err = createEmptyFile(file)
if err != nil {
t.Error(err)
}
}
// all paths exist now
config, err := newQemuHypervisorConfig(hypervisor)
if err != nil {
t.Fatal(err)
}
if config.HypervisorPath != hypervisor.Path {
t.Errorf("Expected hypervisor path %v, got %v", hypervisor.Path, config.HypervisorPath)
}
if config.KernelPath != hypervisor.Kernel {
t.Errorf("Expected kernel path %v, got %v", hypervisor.Kernel, config.KernelPath)
}
if config.ImagePath != hypervisor.Image {
t.Errorf("Expected image path %v, got %v", hypervisor.Image, config.ImagePath)
}
if config.DisableBlockDeviceUse != disableBlock {
t.Errorf("Expected value for disable block usage %v, got %v", disableBlock, config.DisableBlockDeviceUse)
}
}
func TestNewCCShimConfig(t *testing.T) {
dir, err := ioutil.TempDir(testDir, "shim-config-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
shimPath := path.Join(dir, "shim")
shim := shim{
Path: shimPath,
}
_, err = newShimConfig(shim)
if err == nil {
t.Fatalf("Expected newCCShimConfig to fail as no paths exist")
}
err = createEmptyFile(shimPath)
if err != nil {
t.Error(err)
}
shConfig, err := newShimConfig(shim)
if err != nil {
t.Fatalf("newCCShimConfig failed unexpectedly: %v", err)
}
if shConfig.Path != shimPath {
t.Errorf("Expected shim path %v, got %v", shimPath, shConfig.Path)
}
}
func TestHypervisorDefaults(t *testing.T) {
assert := assert.New(t)
h := hypervisor{}
assert.Equal(h.machineType(), defaultMachineType, "default hypervisor machine type wrong")
assert.Equal(h.defaultVCPUs(), defaultVCPUCount, "default vCPU number is wrong")
assert.Equal(h.defaultMemSz(), defaultMemSize, "default memory size is wrong")
machineType := "foo"
h.MachineType = machineType
assert.Equal(h.machineType(), machineType, "custom hypervisor machine type wrong")
// auto inferring
h.DefaultVCPUs = -1
assert.Equal(h.defaultVCPUs(), uint32(goruntime.NumCPU()), "default vCPU number is wrong")
h.DefaultVCPUs = 2
assert.Equal(h.defaultVCPUs(), uint32(2), "default vCPU number is wrong")
numCPUs := goruntime.NumCPU()
h.DefaultVCPUs = int32(numCPUs) + 1
assert.Equal(h.defaultVCPUs(), uint32(numCPUs), "default vCPU number is wrong")
h.DefaultMemSz = 1024
assert.Equal(h.defaultMemSz(), uint32(1024), "default memory size is wrong")
}
func TestHypervisorDefaultsHypervisor(t *testing.T) {
assert := assert.New(t)
tmpdir, err := ioutil.TempDir(testDir, "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
testHypervisorPath := filepath.Join(tmpdir, "hypervisor")
testHypervisorLinkPath := filepath.Join(tmpdir, "hypervisor-link")
err = createEmptyFile(testHypervisorPath)
assert.NoError(err)
err = syscall.Symlink(testHypervisorPath, testHypervisorLinkPath)
assert.NoError(err)
savedHypervisorPath := defaultHypervisorPath
defer func() {
defaultHypervisorPath = savedHypervisorPath
}()
defaultHypervisorPath = testHypervisorPath
h := hypervisor{}
p, err := h.path()
assert.NoError(err)
assert.Equal(p, defaultHypervisorPath, "default hypervisor path wrong")
// test path resolution
defaultHypervisorPath = testHypervisorLinkPath
h = hypervisor{}
p, err = h.path()
assert.NoError(err)
assert.Equal(p, testHypervisorPath)
}
func TestHypervisorDefaultsKernel(t *testing.T) {
assert := assert.New(t)
tmpdir, err := ioutil.TempDir(testDir, "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
testKernelPath := filepath.Join(tmpdir, "kernel")
testKernelLinkPath := filepath.Join(tmpdir, "kernel-link")
err = createEmptyFile(testKernelPath)
assert.NoError(err)
err = syscall.Symlink(testKernelPath, testKernelLinkPath)
assert.NoError(err)
savedKernelPath := defaultKernelPath
defer func() {
defaultKernelPath = savedKernelPath
}()
defaultKernelPath = testKernelPath
h := hypervisor{}
p, err := h.kernel()
assert.NoError(err)
assert.Equal(p, defaultKernelPath, "default Kernel path wrong")
// test path resolution
defaultKernelPath = testKernelLinkPath
h = hypervisor{}
p, err = h.kernel()
assert.NoError(err)
assert.Equal(p, testKernelPath)
assert.Equal(h.kernelParams(), defaultKernelParams, "default hypervisor image wrong")
kernelParams := "foo=bar xyz"
h.KernelParams = kernelParams
assert.Equal(h.kernelParams(), kernelParams, "custom hypervisor kernel parameterms wrong")
}
func TestHypervisorDefaultsImage(t *testing.T) {
assert := assert.New(t)
tmpdir, err := ioutil.TempDir(testDir, "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
testImagePath := filepath.Join(tmpdir, "image")
testImageLinkPath := filepath.Join(tmpdir, "image-link")
err = createEmptyFile(testImagePath)
assert.NoError(err)
err = syscall.Symlink(testImagePath, testImageLinkPath)
assert.NoError(err)
savedImagePath := defaultImagePath
defer func() {
defaultImagePath = savedImagePath
}()
defaultImagePath = testImagePath
h := hypervisor{}
p, err := h.image()
assert.NoError(err)
assert.Equal(p, defaultImagePath, "default Image path wrong")
// test path resolution
defaultImagePath = testImageLinkPath
h = hypervisor{}
p, err = h.image()
assert.NoError(err)
assert.Equal(p, testImagePath)
}
func TestProxyDefaults(t *testing.T) {
p := proxy{}
assert.Equal(t, p.path(), defaultProxyPath, "default proxy path wrong")
path := "/foo/bar/baz/proxy"
p.Path = path
assert.Equal(t, p.path(), path, "custom proxy path wrong")
}
func TestShimDefaults(t *testing.T) {
assert := assert.New(t)
tmpdir, err := ioutil.TempDir(testDir, "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
testShimPath := filepath.Join(tmpdir, "shim")
testShimLinkPath := filepath.Join(tmpdir, "shim-link")
err = createEmptyFile(testShimPath)
assert.NoError(err)
err = syscall.Symlink(testShimPath, testShimLinkPath)
assert.NoError(err)
savedShimPath := defaultShimPath
defer func() {
defaultShimPath = savedShimPath
}()
defaultShimPath = testShimPath
s := shim{}
p, err := s.path()
assert.NoError(err)
assert.Equal(p, defaultShimPath, "default shim path wrong")
// test path resolution
defaultShimPath = testShimLinkPath
s = shim{}
p, err = s.path()
assert.NoError(err)
assert.Equal(p, testShimPath)
assert.False(s.debug())
s.Debug = true
assert.True(s.debug())
}
func TestGetDefaultConfigFilePaths(t *testing.T) {
assert := assert.New(t)
results := getDefaultConfigFilePaths()
// There should be atleast two config file locations
assert.True(len(results) >= 2)
for _, f := range results {
// Paths cannot be empty
assert.NotNil(f)
}
}
func TestGetDefaultConfigFile(t *testing.T) {
assert := assert.New(t)
tmpdir, err := ioutil.TempDir(testDir, "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
hypervisor := "qemu"
confDir := filepath.Join(tmpdir, "conf")
sysConfDir := filepath.Join(tmpdir, "sysconf")
for _, dir := range []string{confDir, sysConfDir} {
err = os.MkdirAll(dir, testDirMode)
assert.NoError(err)
}
confDirConfig, err := createAllRuntimeConfigFiles(confDir, hypervisor)
assert.NoError(err)
sysConfDirConfig, err := createAllRuntimeConfigFiles(sysConfDir, hypervisor)
assert.NoError(err)
savedConf := defaultRuntimeConfiguration
savedSysConf := defaultSysConfRuntimeConfiguration
defaultRuntimeConfiguration = confDirConfig.ConfigPath
defaultSysConfRuntimeConfiguration = sysConfDirConfig.ConfigPath
defer func() {
defaultRuntimeConfiguration = savedConf
defaultSysConfRuntimeConfiguration = savedSysConf
}()
got, err := getDefaultConfigFile()
assert.NoError(err)
// defaultSysConfRuntimeConfiguration has priority over defaultRuntimeConfiguration
assert.Equal(got, defaultSysConfRuntimeConfiguration)
// force defaultRuntimeConfiguration to be returned
os.Remove(defaultSysConfRuntimeConfiguration)
got, err = getDefaultConfigFile()
assert.NoError(err)
assert.Equal(got, defaultRuntimeConfiguration)
// force error
os.Remove(defaultRuntimeConfiguration)
_, err = getDefaultConfigFile()
assert.Error(err)
}
func TestDefaultBridges(t *testing.T) {
assert := assert.New(t)
h := hypervisor{DefaultBridges: 0}
bridges := h.defaultBridges()
assert.Equal(defaultBridgesCount, bridges)
h.DefaultBridges = maxPCIBridges + 1
bridges = h.defaultBridges()
assert.Equal(maxPCIBridges, bridges)
h.DefaultBridges = maxPCIBridges
bridges = h.defaultBridges()
assert.Equal(maxPCIBridges, bridges)
}
func TestDefaultFirmware(t *testing.T) {
assert := assert.New(t)
f, err := ioutil.TempFile(os.TempDir(), "qboot.bin")
assert.NoError(err)
assert.NoError(f.Close())
defer os.RemoveAll(f.Name())
h := hypervisor{}
defaultFirmwarePath = ""
p, err := h.firmware()
assert.NoError(err)
assert.Empty(p)
defaultFirmwarePath = f.Name()
p, err = h.firmware()
assert.NoError(err)
assert.NotEmpty(p)
}
func TestDefaultMachineAccelerators(t *testing.T) {
assert := assert.New(t)
machineAccelerators := "abc,123,rgb"
h := hypervisor{MachineAccelerators: machineAccelerators}
assert.Equal(machineAccelerators, h.machineAccelerators())
machineAccelerators = ""
h.MachineAccelerators = machineAccelerators
assert.Equal(machineAccelerators, h.machineAccelerators())
machineAccelerators = "abc"
h.MachineAccelerators = machineAccelerators
assert.Equal(machineAccelerators, h.machineAccelerators())
machineAccelerators = "abc,123"
h.MachineAccelerators = "abc,,123"
assert.Equal(machineAccelerators, h.machineAccelerators())
machineAccelerators = "abc,123"
h.MachineAccelerators = ",,abc,,123,,,"
assert.Equal(machineAccelerators, h.machineAccelerators())
machineAccelerators = "abc,123"
h.MachineAccelerators = "abc,,123,,,"
assert.Equal(machineAccelerators, h.machineAccelerators())
machineAccelerators = "abc"
h.MachineAccelerators = ",,abc,"
assert.Equal(machineAccelerators, h.machineAccelerators())
machineAccelerators = "abc"
h.MachineAccelerators = ", , abc , ,"
assert.Equal(machineAccelerators, h.machineAccelerators())
machineAccelerators = "abc"
h.MachineAccelerators = " abc "
assert.Equal(machineAccelerators, h.machineAccelerators())
machineAccelerators = "abc,123"
h.MachineAccelerators = ", abc , 123 ,"
assert.Equal(machineAccelerators, h.machineAccelerators())
machineAccelerators = "abc,123"
h.MachineAccelerators = ",, abc ,,, 123 ,,"
assert.Equal(machineAccelerators, h.machineAccelerators())
}
func TestUpdateRuntimeConfiguration(t *testing.T) {
assert := assert.New(t)
assert.NotEqual(defaultAgent, vc.KataContainersAgent)
config := oci.RuntimeConfig{}
tomlConf := tomlConfig{
Agent: map[string]agent{
// force a non-default value
kataAgentTableType: {},
},
}
assert.NotEqual(config.AgentType, vc.AgentType(kataAgentTableType))
assert.NotEqual(config.AgentConfig, vc.KataAgentConfig{})
err := updateRuntimeConfig("", tomlConf, &config)
assert.NoError(err)
assert.Equal(config.AgentType, vc.AgentType(kataAgentTableType))
assert.Equal(config.AgentConfig, vc.KataAgentConfig{})
}
func TestUpdateRuntimeConfigurationVMConfig(t *testing.T) {
assert := assert.New(t)
vcpus := uint(2)
mem := uint(2048)
config := oci.RuntimeConfig{}
expectedVMConfig := vc.Resources{
Memory: mem,
}
tomlConf := tomlConfig{
Hypervisor: map[string]hypervisor{
qemuHypervisorTableType: {
DefaultVCPUs: int32(vcpus),
DefaultMemSz: uint32(mem),
Path: "/",
Kernel: "/",
Image: "/",
Firmware: "/",
},
},
}
err := updateRuntimeConfig("", tomlConf, &config)
assert.NoError(err)
assert.Equal(expectedVMConfig, config.VMConfig)
} | }
|
taskgroup_types.go | /*
Copyright 2020 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +genclient:noStatus
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// TaskGroup iteratively executes a Task over elements in an array.
// +k8s:openapi-gen=true
type TaskGroup struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata"`
// Spec holds the desired state of the TaskGroup from the client
// +optional
Spec TaskGroupSpec `json:"spec"`
}
// TaskGroupSpec defines the desired state of the TaskGroup
type TaskGroupSpec struct {
// FIXME(vdemeester): define a spec
// Params is a list of input parameters required to run the task. Params
// must be supplied as inputs in TaskRuns unless they declare a default
// value.
// +optional
Params []v1beta1.ParamSpec `json:"params,omitempty"`
// Description is a user-facing description of the task that may be
// used to populate a UI.
// +optional
Description string `json:"description,omitempty"`
// Steps are the steps of the build; each step is run sequentially with the
// source mounted into /workspace.
Steps []Step `json:"steps,omitempty"`
// Volumes is a collection of volumes that are available to mount into the
// steps of the build.
Volumes []corev1.Volume `json:"volumes,omitempty"`
// StepTemplate can be used as the basis for all step containers within the
// Task, so that the steps inherit settings on the base container.
StepTemplate *corev1.Container `json:"stepTemplate,omitempty"`
// Sidecars are run alongside the Task's step containers. They begin before
// the steps start and end after the steps complete.
Sidecars []v1beta1.Sidecar `json:"sidecars,omitempty"`
// Workspaces are the volumes that this Task requires.
Workspaces []v1beta1.WorkspaceDeclaration `json:"workspaces,omitempty"`
// Results are values that this Task can output
Results []v1beta1.TaskResult `json:"results,omitempty"`
}
type Step struct {
v1beta1.Step `json:",inline"`
// +optional
Uses *Uses `json:"uses"`
}
type Uses struct {
TaskRef v1beta1.TaskRef `json:"taskRef"`
ParamBindings []ParamBinding `json:"parambindings"`
WorkspaceBindings []WorkspaceBinding `json:"workspacebindings"`
}
type ParamBinding struct {
Name string `json:"name"`
Param string `json:"param"`
}
type WorkspaceBinding struct {
Name string `json:"name"`
Workspace string `json:"workspace"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// TaskGroupList contains a list of TaskGroups
type TaskGroupList struct {
metav1.TypeMeta `json:",inline"`
// +optional | }
// TaskGroupRunReason represents a reason for the Run "Succeeded" condition
type TaskGroupRunReason string
const (
// TaskGroupRunReasonStarted is the reason set when the Run has just started
TaskGroupRunReasonStarted TaskGroupRunReason = "Started"
// TaskGroupRunReasonRunning indicates that the Run is in progress
TaskGroupRunReasonRunning TaskGroupRunReason = "Running"
// TaskGroupRunReasonFailed indicates that one of the TaskRuns created from the Run failed
TaskGroupRunReasonFailed TaskGroupRunReason = "Failed"
// TaskGroupRunReasonSucceeded indicates that all of the TaskRuns created from the Run completed successfully
TaskGroupRunReasonSucceeded TaskGroupRunReason = "Succeeded"
// TaskGroupRunReasonCouldntCancel indicates that a Run was cancelled but attempting to update
// the running TaskRun as cancelled failed.
TaskGroupRunReasonCouldntCancel TaskGroupRunReason = "TaskGroupRunCouldntCancel"
// TaskGroupRunReasonCouldntGetTaskGroup indicates that the associated TaskGroup couldn't be retrieved
TaskGroupRunReasonCouldntGetTaskGroup TaskGroupRunReason = "CouldntGetTaskGroup"
// TaskGroupRunReasonFailedValidation indicates that the TaskGroup failed runtime validation
TaskGroupRunReasonFailedValidation TaskGroupRunReason = "TaskGroupValidationFailed"
// TaskGroupRunReasonInternalError indicates that the TaskGroup failed due to an internal error in the reconciler
TaskGroupRunReasonInternalError TaskGroupRunReason = "TaskGroupInternalError"
)
func (t TaskGroupRunReason) String() string {
return string(t)
}
// TaskGroupRunStatus contains the status stored in the ExtraFields of a Run that references a TaskGroup.
type TaskGroupRunStatus struct {
// TaskGroupSpec contains the exact spec used to instantiate the Run
// FIXME(vdemeester) can probably remove
TaskGroupSpec *TaskGroupSpec `json:"taskLoopSpec,omitempty"`
// +optional
TaskRun *v1beta1.TaskRunStatus `json:"status,omitempty"`
} | metav1.ListMeta `json:"metadata,omitempty"`
Items []TaskGroup `json:"items"` |
error.rs | use serde::ser::{Serialize, SerializeMap, Serializer};
use thiserror::Error;
use ibc::core::ics24_host::{error::ValidationErrorDetail, identifier::ChainId};
#[derive(Error, Debug)]
pub enum RestApiError {
#[error("failed to send a request through crossbeam channel: {0}")]
ChannelSend(String),
| ChannelRecv(String),
#[error("failed while serializing reply into json value: {0}")]
Serialization(String),
#[error("could not find configuration for chain: {0}")]
ChainConfigNotFound(ChainId),
#[error("failed to parse the string {0} into a valid chain identifier: {1}")]
InvalidChainId(String, ValidationErrorDetail),
#[error("failed while parsing the request body into a chain configuration: {0}")]
InvalidChainConfig(String),
#[error("not implemented")]
Unimplemented,
}
impl RestApiError {
pub fn name(&self) -> &'static str {
match self {
RestApiError::ChannelSend(_) => "ChannelSend",
RestApiError::ChannelRecv(_) => "ChannelRecv",
RestApiError::Serialization(_) => "Serialization",
RestApiError::ChainConfigNotFound(_) => "ChainConfigNotFound",
RestApiError::InvalidChainId(_, _) => "InvalidChainId",
RestApiError::InvalidChainConfig(_) => "InvalidChainConfig",
RestApiError::Unimplemented => "Unimplemented",
}
}
}
impl Serialize for RestApiError {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut map = serializer.serialize_map(Some(3))?;
map.serialize_entry("name", self.name())?;
map.serialize_entry("msg", &self.to_string())?;
map.end()
}
} | #[error("failed to receive a reply from crossbeam channel: {0}")] |
pickAttrs.ts | const attributes = `accept acceptCharset accessKey action allowFullScreen allowTransparency
alt async autoComplete autoFocus autoPlay capture cellPadding cellSpacing challenge
charSet checked classID className colSpan cols content contentEditable contextMenu
controls coords crossOrigin data dateTime default defer dir disabled download draggable
encType form formAction formEncType formMethod formNoValidate formTarget frameBorder
headers height hidden high href hrefLang htmlFor httpEquiv icon id inputMode integrity
is keyParams keyType kind label lang list loop low manifest marginHeight marginWidth max maxLength media
mediaGroup method min minLength multiple muted name noValidate nonce open
optimum pattern placeholder poster preload radioGroup readOnly rel required
reversed role rowSpan rows sandbox scope scoped scrolling seamless selected
shape size sizes span spellCheck src srcDoc srcLang srcSet start step style
summary tabIndex target title type useMap value width wmode wrap`;
const eventsName = `onCopy onCut onPaste onCompositionEnd onCompositionStart onCompositionUpdate onKeyDown
onKeyPress onKeyUp onFocus onBlur onChange onInput onSubmit onClick onContextMenu onDoubleClick
onDrag onDragEnd onDragEnter onDragExit onDragLeave onDragOver onDragStart onDrop onMouseDown
onMouseEnter onMouseLeave onMouseMove onMouseOut onMouseOver onMouseUp onSelect onTouchCancel
onTouchEnd onTouchMove onTouchStart onScroll onWheel onAbort onCanPlay onCanPlayThrough
onDurationChange onEmptied onEncrypted onEnded onError onLoadedData onLoadedMetadata
onLoadStart onPause onPlay onPlaying onProgress onRateChange onSeeked onSeeking onStalled onSuspend onTimeUpdate onVolumeChange onWaiting onLoad onError`;
const propMap = `${attributes} ${eventsName}`
.split(/[\s\n]+/)
.reduce((prev, curr) => {
prev[curr] = true;
return prev;
}, {});
/* eslint-enable max-len */
const ariaPrefix = 'aria-';
const dataPrefix = 'data-';
function match(key: string, prefix: string) {
return key.indexOf(prefix) === 0;
}
export interface PickConfig {
aria?: boolean;
data?: boolean;
attr?: boolean;
}
/**
* Picker props from exist props with filter
* @param props Passed props
* @param ariaOnly boolean | { aria?: boolean; data?: boolean; attr?: boolean; } filter config
*/
export default function | (
props: object,
ariaOnly: boolean | PickConfig = false,
) {
let mergedConfig: PickConfig;
if (ariaOnly === false) {
mergedConfig = {
aria: true,
data: true,
attr: true,
};
} else if (ariaOnly === true) {
mergedConfig = {
aria: true,
};
} else {
mergedConfig = {
...ariaOnly,
};
}
const attrs = {};
Object.keys(props).forEach(key => {
if (
// Aria
(mergedConfig.aria && (key === 'role' || match(key, ariaPrefix))) ||
// Data
(mergedConfig.data && match(key, dataPrefix)) ||
// Attr
(mergedConfig.attr && propMap.hasOwnProperty(key))
) {
attrs[key] = props[key];
}
});
return attrs;
}
| pickAttrs |
api_op_GetVoiceConnectorEmergencyCallingConfiguration.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package chime
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/chime/types"
"github.com/awslabs/smithy-go/middleware"
smithyhttp "github.com/awslabs/smithy-go/transport/http"
)
// Gets the emergency calling configuration details for the specified Amazon Chime
// Voice Connector.
func (c *Client) GetVoiceConnectorEmergencyCallingConfiguration(ctx context.Context, params *GetVoiceConnectorEmergencyCallingConfigurationInput, optFns ...func(*Options)) (*GetVoiceConnectorEmergencyCallingConfigurationOutput, error) {
if params == nil {
params = &GetVoiceConnectorEmergencyCallingConfigurationInput{}
}
result, metadata, err := c.invokeOperation(ctx, "GetVoiceConnectorEmergencyCallingConfiguration", params, optFns, addOperationGetVoiceConnectorEmergencyCallingConfigurationMiddlewares)
if err != nil {
return nil, err
}
out := result.(*GetVoiceConnectorEmergencyCallingConfigurationOutput)
out.ResultMetadata = metadata
return out, nil
}
type GetVoiceConnectorEmergencyCallingConfigurationInput struct {
// The Amazon Chime Voice Connector ID.
//
// This member is required.
VoiceConnectorId *string
}
type GetVoiceConnectorEmergencyCallingConfigurationOutput struct {
// The emergency calling configuration details.
EmergencyCallingConfiguration *types.EmergencyCallingConfiguration
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func addOperationGetVoiceConnectorEmergencyCallingConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) |
func newServiceMetadataMiddleware_opGetVoiceConnectorEmergencyCallingConfiguration(region string) awsmiddleware.RegisterServiceMetadata {
return awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "chime",
OperationName: "GetVoiceConnectorEmergencyCallingConfiguration",
}
}
| {
err = stack.Serialize.Add(&awsRestjson1_serializeOpGetVoiceConnectorEmergencyCallingConfiguration{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetVoiceConnectorEmergencyCallingConfiguration{}, middleware.After)
if err != nil {
return err
}
awsmiddleware.AddRequestInvocationIDMiddleware(stack)
smithyhttp.AddContentLengthMiddleware(stack)
addResolveEndpointMiddleware(stack, options)
v4.AddComputePayloadSHA256Middleware(stack)
addRetryMiddlewares(stack, options)
addHTTPSignerV4Middleware(stack, options)
awsmiddleware.AddAttemptClockSkewMiddleware(stack)
addClientUserAgent(stack)
smithyhttp.AddErrorCloseResponseBodyMiddleware(stack)
smithyhttp.AddCloseResponseBodyMiddleware(stack)
addOpGetVoiceConnectorEmergencyCallingConfigurationValidationMiddleware(stack)
stack.Initialize.Add(newServiceMetadataMiddleware_opGetVoiceConnectorEmergencyCallingConfiguration(options.Region), middleware.Before)
addRequestIDRetrieverMiddleware(stack)
addResponseErrorMiddleware(stack)
return nil
} |
options.dto.js | "use strict";
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __metadata = (this && this.__metadata) || function (k, v) {
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.optionsDto = void 0;
const class_validator_1 = require("class-validator");
class | {
}
__decorate([
class_validator_1.IsNotEmpty(),
__metadata("design:type", String)
], optionsDto.prototype, "name", void 0);
__decorate([
class_validator_1.IsNotEmpty(),
__metadata("design:type", String)
], optionsDto.prototype, "value", void 0);
__decorate([
class_validator_1.IsNotEmpty(),
__metadata("design:type", String)
], optionsDto.prototype, "questions_id", void 0);
__decorate([
class_validator_1.IsNotEmpty(),
__metadata("design:type", Number)
], optionsDto.prototype, "score_value", void 0);
exports.optionsDto = optionsDto;
//# sourceMappingURL=options.dto.js.map | optionsDto |
oclass_utils.py | #!/usr/bin/env python3
"""
(C) Copyright 2018-2022 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import re
def | (oclass):
"""Extract the redundancy factor from an object class.
Args:
oclass (str): the object class.
Returns:
int: the redundancy factor.
"""
match = re.search("EC_[0-9]+P([0-9])+", oclass)
if match:
return int(match.group(1))
match = re.search("RP_([0-9]+)", oclass)
if match:
return int(match.group(1)) - 1
return 0
def calculate_min_servers(oclass):
"""Calculate the minimum number of required servers for an object class.
Args:
oclass (str): the object class.
Returns:
int: the minimum number of required servers.
"""
patterns = [
"EC_([0-9]+)P([0-9])+",
"RP_([0-9]+)"
]
for pattern in patterns:
# Findall returns a list where each element is a tuple of groups ()
match = re.findall(pattern, oclass)
if match:
# Sum all groups (). Only index 0 should exist.
return sum(int(n) for n in match[0])
return 1
| extract_redundancy_factor |
terminalAndDownload.js | import m from 'mithril'
import { app } from '../main'
import * as qs from 'qs'
import io from 'socket.io-client'
import { getSuggestions } from '../views/single'
const state = {
progress: 0, // Current percentage of download progress
loading: false
}
var stdout = []
// Initialize socket
const socket = io()
// Listen on socket. Sockets automatically join a room identified by their id. This room is where the server emits the stdout stream.
socket.on('connect', () => {
// Output stdout to text area
socket.on('console_stdout', data => {
// Regex to match only the download % from stdout. Download % is not present in all output lines.
const match = data.match(/([0-9]{1,3}.[0-9])\%/)
// Use capture group 1 to get value without the % symbol.
match ? state.progress = match[1] : null
// Append console stdout to array. Get new length of the array
const length = stdout.push(data)
// Maintain specific array length (lines in the output)
if (length > 6) {
stdout.shift()
}
m.redraw() // Manually trigger Mithril redraw so textarea gets updated live
})
// Download progress (on complete)
socket.on('download_complete', fileName => {
// document.getElementById('download').classList.remove('is-loading')
state.loading = false
// Fetch file if downloading to browser. fileName contains the file extension.
if (app.prefs.mode == 'browser') {
window.location.href = `/api/download/cache/${fileName}`
}
m.redraw() // Manually trigger redraw in case button gets stuck with is-loading class
})
})
function command(vnode) {
if (stdout[0]) {
return stdout
} else if(vnode.attrs.type == 'single' && app.prefs.format == 'audio') {
return [`youtube-dl -f "bestaudio[ext=m4a]/bestaudio[ext=mp3]" --embed-thumbnail -o "${[...vnode.attrs.path, vnode.attrs.fileName].join('/')}.m4a" ${vnode.attrs.url}`]
} else if(vnode.attrs.type == 'playlist' && app.prefs.format == 'audio') {
return [`youtube-dl -f "bestaudio[ext=m4a]/bestaudio[ext=mp3]" --embed-thumbnail -o "${[...vnode.attrs.path, app.prefs.outputTemplate].join('/')}" ${vnode.attrs.url}`]
} else if(vnode.attrs.type == 'single' && app.prefs.format == 'video') {
return [`youtube-dl -f "bestvideo[height<=?1080]+bestaudio" --merge-output-format "mkv" --write-thumbnail -o "${[...vnode.attrs.path, vnode.attrs.fileName].join('/')}.mkv" ${vnode.attrs.url}`]
} else if(vnode.attrs.type == 'playlist' && app.prefs.format == 'video') {
return [`youtube-dl -f "bestvideo[height<=?1080]+bestaudio" --merge-output-format "mkv" --write-thumbnail -o "${[...vnode.attrs.path, app.prefs.outputTemplate].join('/')}" ${vnode.attrs.url}`]
}
}
// send xhr to begin download
function download(vnode) {
switch (vnode.attrs.type) { | sendDL.url = vnode.attrs.url,
sendDL.fileName = vnode.attrs.fileName,
sendDL.tags = vnode.attrs.tags,
sendDL.socketId = socket.id
// Change path depending on app's download mode setting. A normal path = download to directory. 'false' = download to browser.
if (app.prefs.mode == 'browser') {
sendDL.path = 'false'
} else {
sendDL.path = vnode.attrs.path.join('/')
}
// If a genre was input, add it to the suggestions
if (app.prefs.format == 'audio' && vnode.attrs.tags.genre) {
m.request({
method: 'PUT',
responseType: 'json',
url: '/api/suggest/genre/:name',
params: {name: vnode.attrs.tags.genre}
}).then(response => {
getSuggestions()
}).catch(e => console.error(e))
}
// Send download request
m.request({
method: 'POST',
responseType: 'json',
url: '/api/download',
params: sendDL
}).then(response => {}).catch(e => console.error(e)) // response is handled via socket.io
break
case 'playlist':
state.loading = true
var sendDL = {...app.prefs}
sendDL.playlistName = vnode.attrs.playlistName,
sendDL.path = vnode.attrs.path.join('/'),
sendDL.socketId = socket.id
// Send download request
m.request({
method: 'POST',
responseType: 'json',
url: '/api/downloadPlaylist',
params: sendDL,
body: {playlistEntries: vnode.attrs.playlistEntries}
}).then(response => {}).catch(e => console.error(e)) // response is handled via socket.io
break
}
}
function ready(vnode) {
var a = vnode.attrs
// Enable download button if field contents for current tab are valid
switch (a.type) {
case 'single':
if(a.url && a.fileName && ((app.prefs.format == 'audio' && a.tags.artist && a.tags.title) || app.prefs.format == 'video')) {
// If single audio download then also check tag values
return true
} else {
return false
}
break
case 'playlist':
if (a.url && a.playlistName && app.prefs.format && app.prefs.outputTemplate) {
return true
} else {
return false
}
break
}
}
export default {
view: vnode => m('div', [
m('div', {class: 'field'}, [
m('label', {class: 'label'}, m('i', {class: 'fas fa-terminal'})),
m('div', {class: 'control'}, [
m('progress', {
class: 'progress is-info',
style: `margin-bottom: 0;
height: 0.5em;
position: absolute;
border-radius: 0;
border-top-left-radius: 6px;
border-top-right-radius: 6px;`,
value: state.progress,
max: 100
}),
m('div', {class: 'box', style:'font-family: monospace; white-space: nowrap; overflow-x: scroll;'}, [
command(vnode).map(item =>
// Remove the prepended app tags from youtube-dl. E.g. the "[youtube]" in "[youtube] id: Downloading thumbnail"
m('div', {style: 'width: 1em'}, item.replace(/\[[a-z]+\] +/i, ''))
)
])
])
]),
m('button', {
class: 'button is-info is-fullwidth' + (state.loading ? ' is-loading' : ''),
disabled: !ready(vnode),
onclick: event => download(vnode)
}, [
m('span', {class: 'icon'}, m('i', {class: 'fas fa-download'})),
m('span', 'Download')
])
])
} | case 'single':
state.loading = true
var sendDL = {...app.prefs} |
mesh.py | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import numpy as np
class Mesh(object):
def __init__(self, mesh_cells, domain_upper, mat_map):
assert type(mesh_cells) == int, "mesh_cells must be an int"
self._mesh_params = {'x_cell': mesh_cells,
'cell_length': float(domain_upper)/float(mesh_cells)}
self._mat_map = mat_map
i = np.repeat(np.arange(mesh_cells), mesh_cells)
j = np.tile(np.arange(mesh_cells), mesh_cells)
idxs = zip(i,j)
self._cells = []
for idx in idxs:
self._cells.append(Cell(idx, self._mesh_params, mat_map))
# Save parameters
self._n_cell = mesh_cells**2
assert self._n_cell == len(self._cells),\
"Cell array incorrect length"
self._x_cell = mesh_cells
self._y_cell = mesh_cells
self._x_node = mesh_cells + 1
self._y_node = mesh_cells + 1
self._n_node = self._x_node * self._y_node
self._cell_length = self._mesh_params['cell_length']
def soln_plot(self, solution, plot = True): # pragma: no cover
# Plot a given solution
return self.__plot__(solution, plot)
def test_plot(self, plot = False):
# Plot a test solution of length n_cells
solution = np.zeros(self._n_node)
for cell in self._cells:
for idx in cell.global_idx():
solution[idx] += 0.5
return self.__plot__(solution, plot)
def __plot__(self, solution, plot):
xs = []
ys = []
zs = []
for i,s in enumerate(solution):
x, y = self.__idx_to_xy__(i)
xs.append(x)
ys.append(y)
zs.append(s)
if plot: # pragma: no cover
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X = np.reshape(xs, (self._x_node, self._x_node))
Y = np.reshape(ys, (self._x_node, self._x_node))
Z = np.reshape(zs, (self._x_node, self._x_node))
rstride = int(self._x_node/50) + 1
cstride = int(self._x_node/50) + 1
surf = ax.plot_surface(X,Y,Z, cmap=cm.coolwarm, rstride=rstride,
cstride=cstride, linewidth=0, antialiased=False)
fig.colorbar(surf)
plt.show()
return fig
else:
return xs, ys, zs
def __idx_to_xy__(self, idx):
y = self._cell_length*int(idx/self._x_node)
x = self._cell_length*int(idx % self._y_node)
return (x,y)
def cell_length(self):
return self._cell_length
def cells(self):
return self._cells
def n_cell(self):
return self._n_cell
def n_node(self):
return self._n_node
def x_cell(self):
return self._x_cell
def x_node(self):
return self._x_node
def y_cell(self):
return self._y_cell
def y_node(self):
return self._y_node
class Cell(object):
""" A single cell in the mesh, holds location and material data """
def __init__(self, index, mesh_params, mat_map=None):
""" Cell constructor, give index in a tuple (i,j) """
# Constructor validations
assert isinstance(index, tuple), "Index must be a tuple"
assert len(index) == 2, "Index must be a length 2 tuple"
try:
assert mesh_params['cell_length'] > 0, "cell_length must be greater than 0"
except KeyError:
raise KeyError("Missing 'cell_length' parameter in mesh_params")
self._index = index
try:
self._length = float(mesh_params['cell_length'])
except ValueError:
raise TypeError("cell_length parameter must be a number")
# Calculate global_idx
x_node = mesh_params['x_cell'] + 1
i,j = index[0], index[1]
self._global_idx = [x_node*i + j,
x_node*i + j + 1,
x_node*(i + 1) + j,
x_node*(i + 1) + j + 1]
# Determine if on a boundary
self._bounds = {}
x_cell = mesh_params['x_cell']
try:
y_cell = mesh_params['y_cell']
except KeyError:
y_cell = x_cell
# Verify cell is in the mesh
assert i < x_cell, "Cell i exceeds num of x nodes"
assert j < y_cell, "Cell j exceeds num of y nodes"
if index[0] == 0:
self._bounds.update({'x_min': None})
if index[0] == y_cell - 1:
self._bounds.update({'x_max': None})
if index[1] == 0:
self._bounds.update({'y_min': None})
if index[1] == x_cell - 1:
self._bounds.update({'y_max': None})
# Get material properties
if mat_map:
assert (mat_map.dx * mat_map.n) == (x_cell * self._length),\
"Material map and cells must have the same total x length"
assert (mat_map.dy * mat_map.n) == (y_cell * self._length),\
"Material map and cells must have the same total y length"
self._mat_map = mat_map
# UTILITY FUNCTIONS ================================================
# MATERIAL PROPERTIES ==============================================
def get(self, prop):
try:
x = self._length*(self._index[0] + 0.5)
y = self._length*(self._index[1] + 0.5)
return self._mat_map.get(prop, loc=(x,y))
except AttributeError:
raise AttributeError("This cell has no material map assigned")
# ATTRIBUTES =======================================================
def bounds(self, bound=None, value=None):
if bound and bound in self._bounds:
if value:
self._bounds[bound] = value
else:
return self._bounds[bound]
elif bound and not bound in self._bounds:
raise KeyError("Cell does not have bound " + str(bound))
else:
return self._bounds
def global_idx(self):
""" Returns global index, a list of the node indices """
return self._global_idx
def index(self):
|
def length(self):
return self._length
| return self._index |
mod.rs | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR MIT
//! This module provides utils used across RMC
mod debug;
mod names;
mod utils;
// TODO clean this up
pub use names::*;
pub use utils::*;
pub fn init() | {
debug::init()
} |
|
Home.tsx | import React, { useState, useMemo, useEffect } from 'react'
import { useGetBlogs } from '../../utils/apis'
import BlogPost from '../common/BlogPost'
import { NO_OF_FEEDS } from '../../constants/configs'
import { useLocation } from 'react-router'
import { IBlogFeedsQueryParams, IGenericObject, IRootState } from '../../interfaces'
import { useGetPrevious, decodeSlug } from '../../utils/common'
import { useSelector } from 'react-redux'
import { navigateTo } from '../../utils/router'
const Home = () => {
const [page, setPage] = useState(1)
useEffect(() => window.scrollTo(0, 0), [])
const { pathname: locationPath } = useLocation()
const [routeType, routeSlug] = locationPath.split('/').slice(1)
const previousLocationPath = useGetPrevious(locationPath)
const isPathChanged = previousLocationPath !== locationPath
const blogFeedsParam: IBlogFeedsQueryParams = { page }
let showPathInfo = false
if(routeType && routeSlug) {
(blogFeedsParam as IGenericObject<any>)[routeType] = routeSlug | const categories = useSelector((state: IRootState) => state.blog.categories) || []
const topTags = useSelector((state: IRootState) => state.blog.topTags) || []
const totalPosts = useSelector((state: IRootState) => state.blog.totalBlogs) || []
const pathInfo = useMemo(
() => showPathInfo ?
`${routeType} Archives: ${
(((routeType === 'category' ? categories: topTags)
.find(({slug}) => slug === routeSlug) || {})['name']) ||
decodeSlug(routeSlug)
}` :
'',
[showPathInfo, routeType, routeSlug, categories, topTags]
)
return (
<div className="feeds-container">
{
pathInfo && (
<h4 className="path-info">
{pathInfo}
</h4>
)
}
{
(blogFeeds.isLoading || isPathChanged) ?
(<h2>Loading...</h2>) :
blogFeeds.data ?
(<>
{
blogFeeds.data.map(
blog => (
<div
className="blog-feed-navigation"
key={`${blog.slug}-${blog.ID}`}
onClick={() => navigateTo(`/post/${blog.ID}/${blog.slug}`)}
>
<BlogPost post={blog}/>
<div className="click-continue">Continue Reading → </div>
</div>
)
)
}
{
(totalPosts > page * NO_OF_FEEDS) &&
(
<div className="view-more-btn-container">
{
(blogFeeds.data.length === (page - 1) * NO_OF_FEEDS) ?
(<button> Loading... </button>) :
(<button onClick={()=>setPage(page + 1)}> Older Posts </button>)
}
</div>
)
}
</>):
(<h2>Error Loading Data!!</h2>)
}
</div>
)
}
export default Home | showPathInfo = true
}
const blogFeeds = useGetBlogs(blogFeedsParam, !isPathChanged) |
untyped_box.rs | use std::any::Any;
type Value = dyn Any + Send + Sync;
pub(crate) struct UntypedBox {
inner: Box<Value>,
}
impl UntypedBox {
pub(crate) fn new<T: Send + Sync + 'static>(value: T) -> Self {
Self {
inner: Box::new(value),
}
}
#[allow(dead_code)]
/// Safety: T must be some type that used in UntypedBox::new
pub(crate) unsafe fn consume<T>(self) -> Box<T> |
/// Safety: T must be some type that used in UntypedBox::new
pub(crate) unsafe fn get_ref<T>(&self) -> &T {
&*(self.inner.as_ref() as *const Value as *const T)
}
#[allow(dead_code)]
/// Safety: T must be some type that used in UntypedBox::new
pub(crate) unsafe fn get_mut<T>(&mut self) -> &mut T {
&mut *(self.inner.as_mut() as *mut Value as *mut T)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn untyped_box() {
let mut b = UntypedBox::new(0i32);
assert_eq!(unsafe { *b.get_ref::<i32>() }, 0);
unsafe {
*b.get_mut::<i32>() = 1;
}
assert_eq!(unsafe { *b.get_ref::<i32>() }, 1);
let b = unsafe { b.consume::<i32>() };
assert_eq!(*b, 1);
}
}
| {
let raw = Box::into_raw(self.inner);
Box::from_raw(raw as *mut T)
} |
localservices-gen.go | // Copyright 2020 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated file. DO NOT EDIT.
// Package localservices provides access to the Local Services API.
//
// For product documentation, see: https://ads.google.com/local-services-ads/
//
// Creating a client
//
// Usage example:
//
// import "google.golang.org/api/localservices/v1"
// ...
// ctx := context.Background()
// localservicesService, err := localservices.NewService(ctx)
//
// In this example, Google Application Default Credentials are used for authentication.
//
// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.
//
// Other authentication options
//
// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey:
//
// localservicesService, err := localservices.NewService(ctx, option.WithAPIKey("AIza..."))
//
// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource:
//
// config := &oauth2.Config{...}
// // ...
// token, err := config.Exchange(ctx, ...)
// localservicesService, err := localservices.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))
//
// See https://godoc.org/google.golang.org/api/option/ for details on options.
package localservices // import "google.golang.org/api/localservices/v1"
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
googleapi "google.golang.org/api/googleapi"
gensupport "google.golang.org/api/internal/gensupport"
option "google.golang.org/api/option"
internaloption "google.golang.org/api/option/internaloption"
htransport "google.golang.org/api/transport/http"
)
// Always reference these packages, just in case the auto-generated code
// below doesn't.
var _ = bytes.NewBuffer
var _ = strconv.Itoa
var _ = fmt.Sprintf
var _ = json.NewDecoder
var _ = io.Copy
var _ = url.Parse
var _ = gensupport.MarshalJSON
var _ = googleapi.Version
var _ = errors.New
var _ = strings.Replace
var _ = context.Canceled
var _ = internaloption.WithDefaultEndpoint
const apiId = "localservices:v1"
const apiName = "localservices"
const apiVersion = "v1"
const basePath = "https://localservices.googleapis.com/"
const mtlsBasePath = "https://localservices.mtls.googleapis.com/"
// OAuth2 scopes used by this API.
const (
// Manage your AdWords campaigns
AdwordsScope = "https://www.googleapis.com/auth/adwords"
)
// NewService creates a new Service.
func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) {
scopesOption := option.WithScopes(
"https://www.googleapis.com/auth/adwords",
)
// NOTE: prepend, so we don't override user-specified scopes.
opts = append([]option.ClientOption{scopesOption}, opts...)
opts = append(opts, internaloption.WithDefaultEndpoint(basePath))
opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath))
client, endpoint, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, err
}
s, err := New(client)
if err != nil {
return nil, err
}
if endpoint != "" {
s.BasePath = endpoint
}
return s, nil
}
// New creates a new Service. It uses the provided http.Client for requests.
//
// Deprecated: please use NewService instead.
// To provide a custom HTTP client, use option.WithHTTPClient.
// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead.
func New(client *http.Client) (*Service, error) {
if client == nil {
return nil, errors.New("client is nil")
}
s := &Service{client: client, BasePath: basePath}
s.AccountReports = NewAccountReportsService(s)
s.DetailedLeadReports = NewDetailedLeadReportsService(s)
return s, nil
}
type Service struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
AccountReports *AccountReportsService
DetailedLeadReports *DetailedLeadReportsService
}
func (s *Service) userAgent() string {
if s.UserAgent == "" {
return googleapi.UserAgent
}
return googleapi.UserAgent + " " + s.UserAgent
}
func NewAccountReportsService(s *Service) *AccountReportsService {
rs := &AccountReportsService{s: s}
return rs
}
type AccountReportsService struct {
s *Service
}
func NewDetailedLeadReportsService(s *Service) *DetailedLeadReportsService {
rs := &DetailedLeadReportsService{s: s}
return rs
}
type DetailedLeadReportsService struct {
s *Service
}
// GoogleAdsHomeservicesLocalservicesV1AccountReport: An Account Report
// of a GLS account identified by their account id containing aggregate
// data gathered from a particular date range.
type GoogleAdsHomeservicesLocalservicesV1AccountReport struct {
// AccountId: Unique identifier of the GLS account.
AccountId int64 `json:"accountId,omitempty,string"`
// AggregatorInfo: Aggregator specific information related to the
// account.
AggregatorInfo *GoogleAdsHomeservicesLocalservicesV1AggregatorInfo `json:"aggregatorInfo,omitempty"`
// AverageFiveStarRating: Average review rating score from 1-5 stars.
AverageFiveStarRating float64 `json:"averageFiveStarRating,omitempty"`
// AverageWeeklyBudget: Average weekly budget in the currency code of
// the account.
AverageWeeklyBudget float64 `json:"averageWeeklyBudget,omitempty"`
// BusinessName: Business name of the account.
BusinessName string `json:"businessName,omitempty"`
// CurrencyCode: Currency code of the account.
CurrencyCode string `json:"currencyCode,omitempty"`
// CurrentPeriodChargedLeads: Number of charged leads the account
// received in current specified period.
CurrentPeriodChargedLeads int64 `json:"currentPeriodChargedLeads,omitempty,string"`
// CurrentPeriodConnectedPhoneCalls: Number of connected phone calls
// (duration over 30s) in current specified period.
CurrentPeriodConnectedPhoneCalls int64 `json:"currentPeriodConnectedPhoneCalls,omitempty,string"`
// CurrentPeriodPhoneCalls: Number of phone calls in current specified
// period, including both connected and unconnected calls.
CurrentPeriodPhoneCalls int64 `json:"currentPeriodPhoneCalls,omitempty,string"`
// CurrentPeriodTotalCost: Total cost of the account in current
// specified period in the account's specified currency.
CurrentPeriodTotalCost float64 `json:"currentPeriodTotalCost,omitempty"`
// PhoneLeadResponsiveness: Phone lead responsiveness of the account for
// the past 90 days from current date. This is computed by taking the
// total number of connected calls from charged phone leads and dividing
// by the total number of calls received.
PhoneLeadResponsiveness float64 `json:"phoneLeadResponsiveness,omitempty"`
// PreviousPeriodChargedLeads: Number of charged leads the account
// received in previous specified period.
PreviousPeriodChargedLeads int64 `json:"previousPeriodChargedLeads,omitempty,string"`
// PreviousPeriodConnectedPhoneCalls: Number of connected phone calls
// (duration over 30s) in previous specified period.
PreviousPeriodConnectedPhoneCalls int64 `json:"previousPeriodConnectedPhoneCalls,omitempty,string"`
// PreviousPeriodPhoneCalls: Number of phone calls in previous specified
// period, including both connected and unconnected calls.
PreviousPeriodPhoneCalls int64 `json:"previousPeriodPhoneCalls,omitempty,string"`
// PreviousPeriodTotalCost: Total cost of the account in previous
// specified period in the account's specified currency.
PreviousPeriodTotalCost float64 `json:"previousPeriodTotalCost,omitempty"`
// TotalReview: Total number of reviews the account has up to current
// date.
TotalReview int64 `json:"totalReview,omitempty"`
// ForceSendFields is a list of field names (e.g. "AccountId") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AccountId") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleAdsHomeservicesLocalservicesV1AccountReport) MarshalJSON() ([]byte, error) {
type NoMethod GoogleAdsHomeservicesLocalservicesV1AccountReport
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *GoogleAdsHomeservicesLocalservicesV1AccountReport) UnmarshalJSON(data []byte) error {
type NoMethod GoogleAdsHomeservicesLocalservicesV1AccountReport
var s1 struct {
AverageFiveStarRating gensupport.JSONFloat64 `json:"averageFiveStarRating"`
AverageWeeklyBudget gensupport.JSONFloat64 `json:"averageWeeklyBudget"`
CurrentPeriodTotalCost gensupport.JSONFloat64 `json:"currentPeriodTotalCost"`
PhoneLeadResponsiveness gensupport.JSONFloat64 `json:"phoneLeadResponsiveness"`
PreviousPeriodTotalCost gensupport.JSONFloat64 `json:"previousPeriodTotalCost"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.AverageFiveStarRating = float64(s1.AverageFiveStarRating)
s.AverageWeeklyBudget = float64(s1.AverageWeeklyBudget)
s.CurrentPeriodTotalCost = float64(s1.CurrentPeriodTotalCost)
s.PhoneLeadResponsiveness = float64(s1.PhoneLeadResponsiveness)
s.PreviousPeriodTotalCost = float64(s1.PreviousPeriodTotalCost)
return nil
}
// GoogleAdsHomeservicesLocalservicesV1AggregatorInfo: Conatiner for
// aggregator specific information if lead is for an aggregator GLS
// account.
type GoogleAdsHomeservicesLocalservicesV1AggregatorInfo struct {
// AggregatorProviderId: Provider id (listed in aggregator system) which
// maps to a account id in GLS system.
AggregatorProviderId string `json:"aggregatorProviderId,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "AggregatorProviderId") to unconditionally include in API requests.
// By default, fields with empty values are omitted from API requests.
// However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AggregatorProviderId") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *GoogleAdsHomeservicesLocalservicesV1AggregatorInfo) MarshalJSON() ([]byte, error) {
type NoMethod GoogleAdsHomeservicesLocalservicesV1AggregatorInfo
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleAdsHomeservicesLocalservicesV1DetailedLeadReport: A Detailed
// Lead Report of a lead identified by their lead id and contains
// consumer, account, monetization, and lead data.
type GoogleAdsHomeservicesLocalservicesV1DetailedLeadReport struct {
// AccountId: Identifies account that received the lead.
AccountId int64 `json:"accountId,omitempty,string"`
// AggregatorInfo: Aggregator specific information related to the lead.
AggregatorInfo *GoogleAdsHomeservicesLocalservicesV1AggregatorInfo `json:"aggregatorInfo,omitempty"`
// BusinessName: Business name associated to the account.
BusinessName string `json:"businessName,omitempty"`
// ChargeStatus: Whether the lead has been charged.
//
// Possible values:
// "CHARGE_STATUS_UNSPECIFIED" - Not specified.
// "CHARGED" - Charged.
// "NOT_CHARGED" - Not charged.
ChargeStatus string `json:"chargeStatus,omitempty"`
// CurrencyCode: Currency code.
CurrencyCode string `json:"currencyCode,omitempty"`
// DisputeStatus: Dispute status related to the lead.
DisputeStatus string `json:"disputeStatus,omitempty"`
// Geo: Location of the associated account's home city.
Geo string `json:"geo,omitempty"`
// LeadCategory: Lead category (e.g. hvac, plumber)
LeadCategory string `json:"leadCategory,omitempty"`
// LeadCreationTimestamp: Timestamp of when the lead was created.
LeadCreationTimestamp string `json:"leadCreationTimestamp,omitempty"`
// LeadId: Unique identifier of a Detailed Lead Report.
LeadId int64 `json:"leadId,omitempty,string"`
// LeadPrice: Price of the lead (available only after it has been
// charged).
LeadPrice float64 `json:"leadPrice,omitempty"`
// LeadType: Lead type.
//
// Possible values:
// "LEAD_TYPE_UNSPECIFIED" - Not specified.
// "MESSAGE" - Message lead.
// "PHONE_CALL" - Phone call lead.
LeadType string `json:"leadType,omitempty"`
// MessageLead: More information associated to only message leads.
MessageLead *GoogleAdsHomeservicesLocalservicesV1MessageLead `json:"messageLead,omitempty"`
// PhoneLead: More information associated to only phone leads.
PhoneLead *GoogleAdsHomeservicesLocalservicesV1PhoneLead `json:"phoneLead,omitempty"`
// Timezone: Timezone of the particular provider associated to a lead.
Timezone *GoogleTypeTimeZone `json:"timezone,omitempty"`
// ForceSendFields is a list of field names (e.g. "AccountId") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AccountId") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleAdsHomeservicesLocalservicesV1DetailedLeadReport) MarshalJSON() ([]byte, error) {
type NoMethod GoogleAdsHomeservicesLocalservicesV1DetailedLeadReport
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *GoogleAdsHomeservicesLocalservicesV1DetailedLeadReport) UnmarshalJSON(data []byte) error {
type NoMethod GoogleAdsHomeservicesLocalservicesV1DetailedLeadReport
var s1 struct {
LeadPrice gensupport.JSONFloat64 `json:"leadPrice"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.LeadPrice = float64(s1.LeadPrice)
return nil
}
// GoogleAdsHomeservicesLocalservicesV1MessageLead: Container for
// message lead specific information.
type GoogleAdsHomeservicesLocalservicesV1MessageLead struct {
// ConsumerPhoneNumber: Consumer phone number associated with the
// message lead.
ConsumerPhoneNumber string `json:"consumerPhoneNumber,omitempty"`
// CustomerName: Name of the customer who created the lead.
CustomerName string `json:"customerName,omitempty"`
// JobType: The job type of the specified lead.
JobType string `json:"jobType,omitempty"`
// PostalCode: The postal code of the customer who created the lead.
PostalCode string `json:"postalCode,omitempty"`
// ForceSendFields is a list of field names (e.g. "ConsumerPhoneNumber")
// to unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ConsumerPhoneNumber") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *GoogleAdsHomeservicesLocalservicesV1MessageLead) MarshalJSON() ([]byte, error) {
type NoMethod GoogleAdsHomeservicesLocalservicesV1MessageLead
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleAdsHomeservicesLocalservicesV1PhoneLead: Container for phone
// lead specific information.
type GoogleAdsHomeservicesLocalservicesV1PhoneLead struct {
// ChargedCallTimestamp: Timestamp of the phone call which resulted in a
// charged phone lead.
ChargedCallTimestamp string `json:"chargedCallTimestamp,omitempty"`
// ChargedConnectedCallDurationSeconds: Duration of the charged phone
// call in seconds.
ChargedConnectedCallDurationSeconds string `json:"chargedConnectedCallDurationSeconds,omitempty"`
// ConsumerPhoneNumber: Consumer phone number associated with the phone
// lead.
ConsumerPhoneNumber string `json:"consumerPhoneNumber,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "ChargedCallTimestamp") to unconditionally include in API requests.
// By default, fields with empty values are omitted from API requests.
// However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ChargedCallTimestamp") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *GoogleAdsHomeservicesLocalservicesV1PhoneLead) MarshalJSON() ([]byte, error) {
type NoMethod GoogleAdsHomeservicesLocalservicesV1PhoneLead
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleAdsHomeservicesLocalservicesV1SearchAccountReportsResponse: A
// page of the response received from the SearchAccountReports method. A
// paginated response where more pages are available has
// `next_page_token` set. This token can be used in a subsequent request
// to retrieve the next request page.
type GoogleAdsHomeservicesLocalservicesV1SearchAccountReportsResponse struct {
// AccountReports: List of account reports which maps 1:1 to a
// particular linked GLS account.
AccountReports []*GoogleAdsHomeservicesLocalservicesV1AccountReport `json:"accountReports,omitempty"`
// NextPageToken: Pagination token to retrieve the next page of results.
// When `next_page_token` is not filled in, there is no next page and
// the list returned is the last page in the result set.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "AccountReports") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AccountReports") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *GoogleAdsHomeservicesLocalservicesV1SearchAccountReportsResponse) MarshalJSON() ([]byte, error) {
type NoMethod GoogleAdsHomeservicesLocalservicesV1SearchAccountReportsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleAdsHomeservicesLocalservicesV1SearchDetailedLeadReportsResponse:
// A page of the response received from the SearchDetailedLeadReports
// method. A paginated response where more pages are available has
// `next_page_token` set. This token can be used in a subsequent request
// to retrieve the next request page.
type GoogleAdsHomeservicesLocalservicesV1SearchDetailedLeadReportsResponse struct {
// DetailedLeadReports: List of detailed lead reports uniquely
// identified by external lead id.
DetailedLeadReports []*GoogleAdsHomeservicesLocalservicesV1DetailedLeadReport `json:"detailedLeadReports,omitempty"`
// NextPageToken: Pagination token to retrieve the next page of results.
// When `next_page_token` is not filled in, there is no next page and
// the list returned is the last page in the result set.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "DetailedLeadReports")
// to unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DetailedLeadReports") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *GoogleAdsHomeservicesLocalservicesV1SearchDetailedLeadReportsResponse) MarshalJSON() ([]byte, error) {
type NoMethod GoogleAdsHomeservicesLocalservicesV1SearchDetailedLeadReportsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleTypeTimeZone: Represents a time zone from the [IANA Time Zone
// Database](https://www.iana.org/time-zones).
type GoogleTypeTimeZone struct {
// Id: IANA Time Zone Database time zone, e.g. "America/New_York".
Id string `json:"id,omitempty"`
// Version: Optional. IANA Time Zone Database version number, e.g.
// "2019a".
Version string `json:"version,omitempty"`
// ForceSendFields is a list of field names (e.g. "Id") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Id") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleTypeTimeZone) MarshalJSON() ([]byte, error) {
type NoMethod GoogleTypeTimeZone
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// method id "localservices.accountReports.search":
type AccountReportsSearchCall struct {
s *Service
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Search: Get account reports containing aggregate account data of all
// linked GLS accounts. Caller needs to provide their manager customer
// id and the associated auth credential that allows them read
// permissions on their linked accounts.
func (r *AccountReportsService) Search() *AccountReportsSearchCall {
c := &AccountReportsSearchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
return c
}
// EndDateDay sets the optional parameter "endDate.day": Day of a month.
// Must be from 1 to 31 and valid for the year and month, or 0 to
// specify a year by itself or a year and month where the day isn't
// significant.
func (c *AccountReportsSearchCall) EndDateDay(endDateDay int64) *AccountReportsSearchCall {
c.urlParams_.Set("endDate.day", fmt.Sprint(endDateDay))
return c
}
// EndDateMonth sets the optional parameter "endDate.month": Month of a
// year. Must be from 1 to 12, or 0 to specify a year without a month
// and day.
func (c *AccountReportsSearchCall) EndDateMonth(endDateMonth int64) *AccountReportsSearchCall {
c.urlParams_.Set("endDate.month", fmt.Sprint(endDateMonth))
return c
}
// EndDateYear sets the optional parameter "endDate.year": Year of the
// date. Must be from 1 to 9999, or 0 to specify a date without a year.
func (c *AccountReportsSearchCall) EndDateYear(endDateYear int64) *AccountReportsSearchCall {
c.urlParams_.Set("endDate.year", fmt.Sprint(endDateYear))
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of accounts to return. If the page size is unset, page size will
// default to 1000. Maximum page_size is 10000.
func (c *AccountReportsSearchCall) PageSize(pageSize int64) *AccountReportsSearchCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": The
// `next_page_token` value returned from a previous request to
// SearchAccountReports that indicates where listing should continue.
func (c *AccountReportsSearchCall) PageToken(pageToken string) *AccountReportsSearchCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Query sets the optional parameter "query": A query string for
// searching for account reports. Caller must provide a customer id of
// their MCC account with an associated Gaia Mint that allows read
// permission on their linked accounts. Search expressions are case
// insensitive. Example query: | Query | Description |
// |-------------------------|-------------------------------------------
// ----| | manager_customer_id:123 | Get Account Report for Manager with
// id 123. | Required.
func (c *AccountReportsSearchCall) Query(query string) *AccountReportsSearchCall {
c.urlParams_.Set("query", query)
return c
}
// StartDateDay sets the optional parameter "startDate.day": Day of a
// month. Must be from 1 to 31 and valid for the year and month, or 0 to
// specify a year by itself or a year and month where the day isn't
// significant. | return c
}
// StartDateMonth sets the optional parameter "startDate.month": Month
// of a year. Must be from 1 to 12, or 0 to specify a year without a
// month and day.
func (c *AccountReportsSearchCall) StartDateMonth(startDateMonth int64) *AccountReportsSearchCall {
c.urlParams_.Set("startDate.month", fmt.Sprint(startDateMonth))
return c
}
// StartDateYear sets the optional parameter "startDate.year": Year of
// the date. Must be from 1 to 9999, or 0 to specify a date without a
// year.
func (c *AccountReportsSearchCall) StartDateYear(startDateYear int64) *AccountReportsSearchCall {
c.urlParams_.Set("startDate.year", fmt.Sprint(startDateYear))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *AccountReportsSearchCall) Fields(s ...googleapi.Field) *AccountReportsSearchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *AccountReportsSearchCall) IfNoneMatch(entityTag string) *AccountReportsSearchCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *AccountReportsSearchCall) Context(ctx context.Context) *AccountReportsSearchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *AccountReportsSearchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *AccountReportsSearchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201221")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/accountReports:search")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "localservices.accountReports.search" call.
// Exactly one of
// *GoogleAdsHomeservicesLocalservicesV1SearchAccountReportsResponse or
// error will be non-nil. Any non-2xx status code is an error. Response
// headers are in either
// *GoogleAdsHomeservicesLocalservicesV1SearchAccountReportsResponse.Serv
// erResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was
// returned.
func (c *AccountReportsSearchCall) Do(opts ...googleapi.CallOption) (*GoogleAdsHomeservicesLocalservicesV1SearchAccountReportsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &GoogleAdsHomeservicesLocalservicesV1SearchAccountReportsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Get account reports containing aggregate account data of all linked GLS accounts. Caller needs to provide their manager customer id and the associated auth credential that allows them read permissions on their linked accounts.",
// "flatPath": "v1/accountReports:search",
// "httpMethod": "GET",
// "id": "localservices.accountReports.search",
// "parameterOrder": [],
// "parameters": {
// "endDate.day": {
// "description": "Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "endDate.month": {
// "description": "Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "endDate.year": {
// "description": "Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageSize": {
// "description": "The maximum number of accounts to return. If the page size is unset, page size will default to 1000. Maximum page_size is 10000. Optional.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "The `next_page_token` value returned from a previous request to SearchAccountReports that indicates where listing should continue. Optional.",
// "location": "query",
// "type": "string"
// },
// "query": {
// "description": "A query string for searching for account reports. Caller must provide a customer id of their MCC account with an associated Gaia Mint that allows read permission on their linked accounts. Search expressions are case insensitive. Example query: | Query | Description | |-------------------------|-----------------------------------------------| | manager_customer_id:123 | Get Account Report for Manager with id 123. | Required.",
// "location": "query",
// "type": "string"
// },
// "startDate.day": {
// "description": "Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "startDate.month": {
// "description": "Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "startDate.year": {
// "description": "Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// }
// },
// "path": "v1/accountReports:search",
// "response": {
// "$ref": "GoogleAdsHomeservicesLocalservicesV1SearchAccountReportsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/adwords"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *AccountReportsSearchCall) Pages(ctx context.Context, f func(*GoogleAdsHomeservicesLocalservicesV1SearchAccountReportsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "localservices.detailedLeadReports.search":
type DetailedLeadReportsSearchCall struct {
s *Service
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Search: Get detailed lead reports containing leads that have been
// received by all linked GLS accounts. Caller needs to provide their
// manager customer id and the associated auth credential that allows
// them read permissions on their linked accounts.
func (r *DetailedLeadReportsService) Search() *DetailedLeadReportsSearchCall {
c := &DetailedLeadReportsSearchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
return c
}
// EndDateDay sets the optional parameter "endDate.day": Day of a month.
// Must be from 1 to 31 and valid for the year and month, or 0 to
// specify a year by itself or a year and month where the day isn't
// significant.
func (c *DetailedLeadReportsSearchCall) EndDateDay(endDateDay int64) *DetailedLeadReportsSearchCall {
c.urlParams_.Set("endDate.day", fmt.Sprint(endDateDay))
return c
}
// EndDateMonth sets the optional parameter "endDate.month": Month of a
// year. Must be from 1 to 12, or 0 to specify a year without a month
// and day.
func (c *DetailedLeadReportsSearchCall) EndDateMonth(endDateMonth int64) *DetailedLeadReportsSearchCall {
c.urlParams_.Set("endDate.month", fmt.Sprint(endDateMonth))
return c
}
// EndDateYear sets the optional parameter "endDate.year": Year of the
// date. Must be from 1 to 9999, or 0 to specify a date without a year.
func (c *DetailedLeadReportsSearchCall) EndDateYear(endDateYear int64) *DetailedLeadReportsSearchCall {
c.urlParams_.Set("endDate.year", fmt.Sprint(endDateYear))
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of accounts to return. If the page size is unset, page size will
// default to 1000. Maximum page_size is 10000.
func (c *DetailedLeadReportsSearchCall) PageSize(pageSize int64) *DetailedLeadReportsSearchCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": The
// `next_page_token` value returned from a previous request to
// SearchDetailedLeadReports that indicates where listing should
// continue.
func (c *DetailedLeadReportsSearchCall) PageToken(pageToken string) *DetailedLeadReportsSearchCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Query sets the optional parameter "query": A query string for
// searching for account reports. Caller must provide a customer id of
// their MCC account with an associated Gaia Mint that allows read
// permission on their linked accounts. Search expressions are case
// insensitive. Example query: | Query | Description |
// |-------------------------|-------------------------------------------
// ----| | manager_customer_id:123 | Get Detailed Lead Report for
// Manager with id | | | 123. | Required.
func (c *DetailedLeadReportsSearchCall) Query(query string) *DetailedLeadReportsSearchCall {
c.urlParams_.Set("query", query)
return c
}
// StartDateDay sets the optional parameter "startDate.day": Day of a
// month. Must be from 1 to 31 and valid for the year and month, or 0 to
// specify a year by itself or a year and month where the day isn't
// significant.
func (c *DetailedLeadReportsSearchCall) StartDateDay(startDateDay int64) *DetailedLeadReportsSearchCall {
c.urlParams_.Set("startDate.day", fmt.Sprint(startDateDay))
return c
}
// StartDateMonth sets the optional parameter "startDate.month": Month
// of a year. Must be from 1 to 12, or 0 to specify a year without a
// month and day.
func (c *DetailedLeadReportsSearchCall) StartDateMonth(startDateMonth int64) *DetailedLeadReportsSearchCall {
c.urlParams_.Set("startDate.month", fmt.Sprint(startDateMonth))
return c
}
// StartDateYear sets the optional parameter "startDate.year": Year of
// the date. Must be from 1 to 9999, or 0 to specify a date without a
// year.
func (c *DetailedLeadReportsSearchCall) StartDateYear(startDateYear int64) *DetailedLeadReportsSearchCall {
c.urlParams_.Set("startDate.year", fmt.Sprint(startDateYear))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *DetailedLeadReportsSearchCall) Fields(s ...googleapi.Field) *DetailedLeadReportsSearchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *DetailedLeadReportsSearchCall) IfNoneMatch(entityTag string) *DetailedLeadReportsSearchCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *DetailedLeadReportsSearchCall) Context(ctx context.Context) *DetailedLeadReportsSearchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *DetailedLeadReportsSearchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *DetailedLeadReportsSearchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201221")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/detailedLeadReports:search")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "localservices.detailedLeadReports.search" call.
// Exactly one of
// *GoogleAdsHomeservicesLocalservicesV1SearchDetailedLeadReportsResponse
// or error will be non-nil. Any non-2xx status code is an error.
// Response headers are in either
// *GoogleAdsHomeservicesLocalservicesV1SearchDetailedLeadReportsResponse
// .ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was
// returned.
func (c *DetailedLeadReportsSearchCall) Do(opts ...googleapi.CallOption) (*GoogleAdsHomeservicesLocalservicesV1SearchDetailedLeadReportsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &GoogleAdsHomeservicesLocalservicesV1SearchDetailedLeadReportsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Get detailed lead reports containing leads that have been received by all linked GLS accounts. Caller needs to provide their manager customer id and the associated auth credential that allows them read permissions on their linked accounts.",
// "flatPath": "v1/detailedLeadReports:search",
// "httpMethod": "GET",
// "id": "localservices.detailedLeadReports.search",
// "parameterOrder": [],
// "parameters": {
// "endDate.day": {
// "description": "Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "endDate.month": {
// "description": "Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "endDate.year": {
// "description": "Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageSize": {
// "description": "The maximum number of accounts to return. If the page size is unset, page size will default to 1000. Maximum page_size is 10000. Optional.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "The `next_page_token` value returned from a previous request to SearchDetailedLeadReports that indicates where listing should continue. Optional.",
// "location": "query",
// "type": "string"
// },
// "query": {
// "description": "A query string for searching for account reports. Caller must provide a customer id of their MCC account with an associated Gaia Mint that allows read permission on their linked accounts. Search expressions are case insensitive. Example query: | Query | Description | |-------------------------|-----------------------------------------------| | manager_customer_id:123 | Get Detailed Lead Report for Manager with id | | | 123. | Required.",
// "location": "query",
// "type": "string"
// },
// "startDate.day": {
// "description": "Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "startDate.month": {
// "description": "Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "startDate.year": {
// "description": "Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// }
// },
// "path": "v1/detailedLeadReports:search",
// "response": {
// "$ref": "GoogleAdsHomeservicesLocalservicesV1SearchDetailedLeadReportsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/adwords"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *DetailedLeadReportsSearchCall) Pages(ctx context.Context, f func(*GoogleAdsHomeservicesLocalservicesV1SearchDetailedLeadReportsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
} | func (c *AccountReportsSearchCall) StartDateDay(startDateDay int64) *AccountReportsSearchCall {
c.urlParams_.Set("startDate.day", fmt.Sprint(startDateDay)) |
imagesize.py | import io
import os
import re
import struct
from xml.etree import ElementTree
_UNIT_KM = -3
_UNIT_100M = -2
_UNIT_10M = -1
_UNIT_1M = 0
_UNIT_10CM = 1
_UNIT_CM = 2
_UNIT_MM = 3
_UNIT_0_1MM = 4
_UNIT_0_01MM = 5
_UNIT_UM = 6
_UNIT_INCH = 6
_TIFF_TYPE_SIZES = {
1: 1,
2: 1,
3: 2,
4: 4,
5: 8,
6: 1,
7: 1,
8: 2,
9: 4,
10: 8,
11: 4,
12: 8,
}
def _convertToDPI(density, unit):
if unit == _UNIT_KM:
return int(density * 0.0000254 + 0.5)
elif unit == _UNIT_100M:
return int(density * 0.000254 + 0.5)
elif unit == _UNIT_10M:
return int(density * 0.00254 + 0.5)
elif unit == _UNIT_1M:
return int(density * 0.0254 + 0.5)
elif unit == _UNIT_10CM:
return int(density * 0.254 + 0.5)
elif unit == _UNIT_CM:
return int(density * 2.54 + 0.5)
elif unit == _UNIT_MM:
return int(density * 25.4 + 0.5)
elif unit == _UNIT_0_1MM:
return density * 254
elif unit == _UNIT_0_01MM:
return density * 2540
elif unit == _UNIT_UM:
return density * 25400
return density
def _convertToPx(value):
matched = re.match(r"(\d+(?:\.\d+)?)?([a-z]*)$", value)
if not matched:
raise ValueError("unknown length value: %s" % value)
length, unit = matched.groups()
if unit == "":
return float(length)
elif unit == "cm":
return float(length) * 96 / 2.54
elif unit == "mm":
return float(length) * 96 / 2.54 / 10
elif unit == "in":
return float(length) * 96
elif unit == "pc":
return float(length) * 96 / 6
elif unit == "pt":
return float(length) * 96 / 6
elif unit == "px":
return float(length)
raise ValueError("unknown unit type: %s" % unit)
def get(filepath):
"""
Return (width, height) for a given img file content
no requirements
:type filepath: Union[bytes, str, pathlib.Path]
:rtype Tuple[int, int]
"""
height = -1
width = -1
if isinstance(filepath, io.BytesIO): # file-like object
fhandle = filepath
else:
fhandle = open(filepath, 'rb')
try:
head = fhandle.read(24)
size = len(head)
# handle GIFs
if size >= 10 and head[:6] in (b'GIF87a', b'GIF89a'):
# Check to see if content_type is correct
try:
width, height = struct.unpack("<hh", head[6:10])
except struct.error:
raise ValueError("Invalid GIF file")
# see png edition spec bytes are below chunk length then and finally the
elif size >= 24 and head.startswith(b'\211PNG\r\n\032\n') and head[12:16] == b'IHDR':
try:
width, height = struct.unpack(">LL", head[16:24])
except struct.error:
raise ValueError("Invalid PNG file")
# Maybe this is for an older PNG version.
elif size >= 16 and head.startswith(b'\211PNG\r\n\032\n'):
# Check to see if we have the right content type
try:
width, height = struct.unpack(">LL", head[8:16])
except struct.error:
raise ValueError("Invalid PNG file")
# handle JPEGs
elif size >= 2 and head.startswith(b'\377\330'):
try:
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf or ftype in [0xc4, 0xc8, 0xcc]:
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xff:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fhandle.read(2))[0] - 2
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', fhandle.read(4))
except (struct.error, TypeError):
raise ValueError("Invalid JPEG file")
# handle JPEG2000s
elif size >= 12 and head.startswith(b'\x00\x00\x00\x0cjP \r\n\x87\n'):
fhandle.seek(48)
try:
height, width = struct.unpack('>LL', fhandle.read(8))
except struct.error:
raise ValueError("Invalid JPEG2000 file")
# handle big endian TIFF
elif size >= 8 and head.startswith(b"\x4d\x4d\x00\x2a"):
offset = struct.unpack('>L', head[4:8])[0]
fhandle.seek(offset)
ifdsize = struct.unpack(">H", fhandle.read(2))[0]
for i in range(ifdsize):
tag, datatype, count, data = struct.unpack(">HHLL", fhandle.read(12))
if tag == 256:
if datatype == 3:
width = int(data / 65536)
elif datatype == 4:
width = data
else:
raise ValueError("Invalid TIFF file: width column data type should be SHORT/LONG.")
elif tag == 257:
if datatype == 3:
height = int(data / 65536)
elif datatype == 4:
height = data
else:
raise ValueError("Invalid TIFF file: height column data type should be SHORT/LONG.")
if width != -1 and height != -1:
break
if width == -1 or height == -1:
raise ValueError("Invalid TIFF file: width and/or height IDS entries are missing.")
elif size >= 8 and head.startswith(b"\x49\x49\x2a\x00"):
offset = struct.unpack('<L', head[4:8])[0]
fhandle.seek(offset)
ifdsize = struct.unpack("<H", fhandle.read(2))[0]
for i in range(ifdsize):
tag, datatype, count, data = struct.unpack("<HHLL", fhandle.read(12))
if tag == 256:
width = data
elif tag == 257:
height = data
if width != -1 and height != -1:
break
if width == -1 or height == -1:
raise ValueError("Invalid TIFF file: width and/or height IDS entries are missing.")
# handle little endian BigTiff
elif size >= 8 and head.startswith(b"\x49\x49\x2b\x00"):
bytesize_offset = struct.unpack('<L', head[4:8])[0]
if bytesize_offset != 8:
raise ValueError('Invalid BigTIFF file: Expected offset to be 8, found {} instead.'.format(offset))
offset = struct.unpack('<Q', head[8:16])[0]
fhandle.seek(offset)
ifdsize = struct.unpack("<Q", fhandle.read(8))[0]
for i in range(ifdsize):
tag, datatype, count, data = struct.unpack("<HHQQ", fhandle.read(20))
if tag == 256:
width = data
elif tag == 257:
height = data
if width != -1 and height != -1:
break
if width == -1 or height == -1:
raise ValueError("Invalid BigTIFF file: width and/or height IDS entries are missing.")
# handle SVGs
elif size >= 5 and (head.startswith(b'<?xml') or head.startswith(b'<svg')):
fhandle.seek(0)
data = fhandle.read(1024)
try:
data = data.decode('utf-8')
width = re.search(r'[^-]width="(.*?)"', data).group(1)
height = re.search(r'[^-]height="(.*?)"', data).group(1)
except Exception:
raise ValueError("Invalid SVG file")
width = _convertToPx(width)
height = _convertToPx(height)
# handle Netpbm
elif head[:1] == b"P" and head[1:2] in b"123456":
fhandle.seek(2)
sizes = []
while True:
next_chr = fhandle.read(1)
if next_chr.isspace():
continue
if next_chr == b"":
raise ValueError("Invalid Netpbm file")
if next_chr == b"#":
fhandle.readline()
continue
if not next_chr.isdigit():
raise ValueError("Invalid character found on Netpbm file")
size = next_chr
next_chr = fhandle.read(1)
while next_chr.isdigit():
size += next_chr
next_chr = fhandle.read(1)
sizes.append(int(size))
if len(sizes) == 2:
break
fhandle.seek(-1, os.SEEK_CUR)
width, height = sizes
finally:
fhandle.close()
return width, height
def getDPI(filepath):
| """
Return (x DPI, y DPI) for a given img file content
no requirements
:type filepath: Union[bytes, str, pathlib.Path]
:rtype Tuple[int, int]
"""
xDPI = -1
yDPI = -1
if not isinstance(filepath, bytes):
filepath = str(filepath)
with open(filepath, 'rb') as fhandle:
head = fhandle.read(24)
size = len(head)
# handle GIFs
# GIFs doesn't have density
if size >= 10 and head[:6] in (b'GIF87a', b'GIF89a'):
pass
# see png edition spec bytes are below chunk length then and finally the
elif size >= 24 and head.startswith(b'\211PNG\r\n\032\n'):
chunkOffset = 8
chunk = head[8:]
while True:
chunkType = chunk[4:8]
if chunkType == b'pHYs':
try:
xDensity, yDensity, unit = struct.unpack(">LLB", chunk[8:])
except struct.error:
raise ValueError("Invalid PNG file")
if unit:
xDPI = _convertToDPI(xDensity, _UNIT_1M)
yDPI = _convertToDPI(yDensity, _UNIT_1M)
else: # no unit
xDPI = xDensity
yDPI = yDensity
break
elif chunkType == b'IDAT':
break
else:
try:
dataSize, = struct.unpack(">L", chunk[0:4])
except struct.error:
raise ValueError("Invalid PNG file")
chunkOffset += dataSize + 12
fhandle.seek(chunkOffset)
chunk = fhandle.read(17)
# handle JPEGs
elif size >= 2 and head.startswith(b'\377\330'):
try:
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf:
if ftype == 0xe0: # APP0 marker
fhandle.seek(7, 1)
unit, xDensity, yDensity = struct.unpack(">BHH", fhandle.read(5))
if unit == 1 or unit == 0:
xDPI = xDensity
yDPI = yDensity
elif unit == 2:
xDPI = _convertToDPI(xDensity, _UNIT_CM)
yDPI = _convertToDPI(yDensity, _UNIT_CM)
break
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xff:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fhandle.read(2))[0] - 2
except struct.error:
raise ValueError("Invalid JPEG file")
# handle JPEG2000s
elif size >= 12 and head.startswith(b'\x00\x00\x00\x0cjP \r\n\x87\n'):
fhandle.seek(32)
# skip JP2 image header box
headerSize = struct.unpack('>L', fhandle.read(4))[0] - 8
fhandle.seek(4, 1)
foundResBox = False
try:
while headerSize > 0:
boxHeader = fhandle.read(8)
boxType = boxHeader[4:]
if boxType == b'res ': # find resolution super box
foundResBox = True
headerSize -= 8
break
boxSize, = struct.unpack('>L', boxHeader[:4])
fhandle.seek(boxSize - 8, 1)
headerSize -= boxSize
if foundResBox:
while headerSize > 0:
boxHeader = fhandle.read(8)
boxType = boxHeader[4:]
if boxType == b'resd': # Display resolution box
yDensity, xDensity, yUnit, xUnit = struct.unpack(">HHBB", fhandle.read(10))
xDPI = _convertToDPI(xDensity, xUnit)
yDPI = _convertToDPI(yDensity, yUnit)
break
boxSize, = struct.unpack('>L', boxHeader[:4])
fhandle.seek(boxSize - 8, 1)
headerSize -= boxSize
except struct.error as e:
raise ValueError("Invalid JPEG2000 file")
return xDPI, yDPI |
|
open_stream.go | package link
import (
"errors"
"strconv"
"github.com/aperturerobotics/bifrost/peer"
"github.com/aperturerobotics/bifrost/protocol"
"github.com/aperturerobotics/bifrost/stream"
"github.com/aperturerobotics/controllerbus/directive"
)
// OpenStreamWithPeer is a directive to open a stream with a peer.
// Not de-duplicated, intended to be used with OneOff.
type OpenStreamWithPeer interface {
// Directive indicates OpenStreamWithPeer is a directive.
directive.Directive
// OpenStreamWithPeerProtocolID returns the protocol ID to negotiate with the peer.
// Cannot be empty.
OpenStreamWPProtocolID() protocol.ID
// OpenStreamWithPeerTargetPeerID returns the target peer ID.
// Cannot be empty.
OpenStreamWPTargetPeerID() peer.ID
// OpenStreamWithPeerOpenOpts returns the open stream options.
// Cannot be empty.
OpenStreamWPOpenOpts() stream.OpenOpts
// OpenStreamWithPeerSourcePeerID returns the source peer ID.
// Can be empty.
OpenStreamWPSourcePeerID() peer.ID
// OpenStreamWithPeerTransportConstraint returns a specific transport ID we want.
// Can be empty.
OpenStreamWPTransportConstraint() uint64
}
// openStreamWithPeer implements OpenStreamWithPeer with a peer ID constraint.
// Value: link.MountedStream
type openStreamWithPeer struct {
protocolID protocol.ID
sourcePeerID, targetPeerID peer.ID
transportConstraint uint64
openOpts stream.OpenOpts
}
// NewOpenStreamWithPeer constructs a new openStreamWithPeer directive.
func NewOpenStreamWithPeer(
protocolID protocol.ID,
sourcePeerID, targetPeerID peer.ID,
transportConstraint uint64,
openOpts stream.OpenOpts,
) OpenStreamWithPeer {
return &openStreamWithPeer{
protocolID: protocolID,
sourcePeerID: sourcePeerID,
targetPeerID: targetPeerID,
transportConstraint: transportConstraint,
openOpts: openOpts,
}
}
// OpenStreamWithPeerProtocolID returns a specific protocol ID to negotiate.
func (d *openStreamWithPeer) OpenStreamWPProtocolID() protocol.ID {
return d.protocolID
}
// OpenStreamWithPeerSourcePeerID returns a specific peer ID node we are originating from.
// Can be empty.
func (d *openStreamWithPeer) OpenStreamWPSourcePeerID() peer.ID {
return d.sourcePeerID
}
// OpenStreamWithPeerTargetPeerID returns a specific peer ID node we are looking for.
func (d *openStreamWithPeer) OpenStreamWPTargetPeerID() peer.ID {
return d.targetPeerID
}
// OpenStreamWithPeerTransportConstraint returns the transport ID constraint.
// If empty, any transport is matched.
func (d *openStreamWithPeer) OpenStreamWPTransportConstraint() uint64 {
return d.transportConstraint
}
// OpenStreamWithPeerOpenOpts returns the open options.
func (d *openStreamWithPeer) OpenStreamWPOpenOpts() stream.OpenOpts {
return d.openOpts
}
// Validate validates the directive.
// This is a cursory validation to see if the values "look correct."
func (d *openStreamWithPeer) Validate() error {
if len(d.targetPeerID) == 0 {
return errors.New("peer id constraint required")
}
if len(d.protocolID) == 0 |
return nil
}
// GetValueOptions returns options relating to value handling.
func (d *openStreamWithPeer) GetValueOptions() directive.ValueOptions {
return directive.ValueOptions{
MaxValueCount: 1,
MaxValueHardCap: true,
}
}
// IsEquivalent checks if the other directive is equivalent. If two
// directives are equivalent, and the new directive does not superceed the
// old, then the new directive will be merged (de-duplicated) into the old.
func (d *openStreamWithPeer) IsEquivalent(other directive.Directive) bool {
return false
}
// Superceeds checks if the directive overrides another.
// The other directive will be canceled if superceded.
func (d *openStreamWithPeer) Superceeds(other directive.Directive) bool {
return false
}
// GetName returns the directive's type name.
// This is not necessarily unique, and is primarily intended for display.
func (d *openStreamWithPeer) GetName() string {
return "OpenStreamWithPeer"
}
// GetDebugVals returns the directive arguments as key/value pairs.
// This should be something like param1="test", param2="test".
// This is not necessarily unique, and is primarily intended for display.
func (d *openStreamWithPeer) GetDebugVals() directive.DebugValues {
vals := directive.NewDebugValues()
vals["protocol-id"] = []string{string(d.OpenStreamWPProtocolID())}
vals["target-peer"] = []string{d.OpenStreamWPTargetPeerID().Pretty()}
vals["source-peer"] = []string{d.OpenStreamWPSourcePeerID().Pretty()}
if tpt := d.OpenStreamWPTransportConstraint(); tpt != 0 {
vals["transport"] = []string{strconv.FormatUint(tpt, 10)}
}
return vals
}
// _ is a type constraint
var _ OpenStreamWithPeer = ((*openStreamWithPeer)(nil))
| {
return errors.New("protocol id required")
} |
tasks.py | import logging
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import send_mail
from c3nav.celery import app
logger = logging.getLogger('c3nav')
@app.task(bind=True, max_retries=3)
def send_changeset_proposed_notification(self, pk, author, title, description):
subject = '[c3nav] New Changeset by %s: %s' % (author, title)
for user in User.objects.filter(permissions__review_changesets=True):
if not user.email:
continue
text = (
('Hi %s!\n\n' % user.username) +
('A new Changeset has been proposed by %s:\n\n' % author) +
('---\n\n') +
(title+'\n\n'+description) | )
send_mail(subject, text, settings.MAIL_FROM, [user.email]) | |
attacktocard_base.ts | import CardBase from './card_base';
interface AttackToCardContext {
attackerCard: CardBase;
card: CardBase; | }
export default AttackToCardContext; |
|
views.py | import json
import re
from string import rstrip
from urlparse import urljoin
from django.http import HttpResponseRedirect
from django.shortcuts import render
from cartoview.app_manager.models import *
from forms import BasicAppForm
from geonode import settings
from .models import *
APP_NAME = 'cartoview_esri_app_starter'
VIEW_TPL = "%s/index.html" % APP_NAME
NEW_EDIT_TPL = "%s/new.html" % APP_NAME
def view(request, resource_id):
basic_app_obj = BasicEsriApp.objects.get(pk=resource_id)
config_json = json.loads(remove_json_comments(basic_app_obj.config))
config_json['webmap'] = str(basic_app_obj.web_map_id)
config_json['title'] = basic_app_obj.title
config_json['description'] = basic_app_obj.abstract
config_json['sharinghost'] = rstrip(str(urljoin(settings.SITEURL, reverse("arcportal_home"))), '/')
context = {'config_json': json.dumps(config_json)}
return render(request, VIEW_TPL, context)
def save(request, app_form):
basic_app_obj = app_form.save(commit=False)
# get app by name and add it to app instance.
basic_app_obj.app = App.objects.get(name=APP_NAME)
# get current user and add it as app instance owner.
basic_app_obj.owner = request.user
basic_app_obj.save()
# redirect to app instance details after saving instance.
return HttpResponseRedirect(reverse('appinstance_detail', kwargs={'appinstanceid': basic_app_obj.pk}))
#
def new(request):
if request.method == 'POST':
app_form = BasicAppForm(request.POST, prefix='app_form') | else:
# form is invalid.
context = {'app_form': BasicAppForm(prefix='app_form')}
return render(request, NEW_EDIT_TPL, context)
def edit(request, resource_id):
basic_app_obj = BasicEsriApp.objects.get(pk=resource_id)
if request.method == 'POST':
app_form = BasicAppForm(request.POST, prefix='app_form', instance=basic_app_obj)
return save(request, app_form)
else:
# form is invalid.
context = {'app_form': BasicAppForm(prefix='app_form', instance=basic_app_obj)}
return render(request, NEW_EDIT_TPL, context)
# ------------- Utility functions to handle json comments -------------
# Regular expression for comments
comment_re = re.compile(
'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE
)
comments_exception = {'http://': 'HTTP_PLACE_HOLDER', 'https://': 'HTTPS_PLACE_HOLDER',
'location.protocol + "//': 'LOCATION_PLACE_HOLDER'}
def remove_json_comments(json_string):
""" Parse a JSON file
First remove comments and then use the json module package
Comments look like :
// ...
or
/*
...
*/
"""
content = json_string # ''.join(json_string)
for key in comments_exception:
content = content.replace(key, comments_exception[key])
# Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
for key in comments_exception:
content = content.replace(comments_exception[key], key)
# Return json
return content | return save(request, app_form)
|
action.js | // @flow
/* global window, localStorage */
import type {LocaleNameType} from './const';
import {localeConst} from './const';
export type SetLocaleType = {|
+type: 'locale__set-locale',
+payload: {|
+localeName: LocaleNameType,
|},
|};
export function | (localeName: LocaleNameType): SetLocaleType {
if (typeof window !== 'undefined') {
console.log('---> write to localStorage', localeConst.key.localStorage.localeName, localeName);
localStorage.setItem(localeConst.key.localStorage.localeName, localeName);
}
return {
type: localeConst.action.type.setLocale,
payload: {
localeName,
},
};
}
| setLocale |
sizeof_test.go | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !nacl
package obj
import (
"reflect"
"testing"
"unsafe"
)
// Assert that the size of important structures do not change unexpectedly.
func | (t *testing.T) {
const _64bit = unsafe.Sizeof(uintptr(0)) == 8
var tests = []struct {
val interface{} // type as a value
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
{Addr{}, 52, 80},
{LSym{}, 80, 136},
{Prog{}, 196, 288},
}
for _, tt := range tests {
want := tt._32bit
if _64bit {
want = tt._64bit
}
got := reflect.TypeOf(tt.val).Size()
if want != got {
t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want)
}
}
}
| TestSizeof |
settings.py | """
Django settings for neighbour project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import django_heroku
import dj_database_url
from decouple import config, Csv
MODE = config("MODE", default="dev")
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Email configurations remember to install python-decouple
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
# development
if config('MODE') == "dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
| # Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'watch',
'bootstrap3',
'tinymce',
'bootstrap4',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'neighbour.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'neighbour.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIR = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# configuring the location for media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Configure Django App for Heroku.
django_heroku.settings(locals()) | |
mutex.rs | use alloc::boxed::Box;
use core::{
cell::UnsafeCell,
fmt,
future::Future,
mem::MaybeUninit,
ops::{Deref, DerefMut},
pin::Pin,
sync::atomic::{AtomicU8, Ordering},
task::{Context, Poll, Waker},
};
use crate::sync::linked_list::{LinkedList, Node};
/// A mutual exclusion primitive useful for protecting shared data.
///
/// The mutex can be statically initialized or created via a [`new`]
/// constructor. Each mutex has a type parameter which represents the data that
/// it is protecting. The data can only be accessed through the RAII guards
/// returned from [`lock`] and [`try_lock`], which guarantees that the data is
/// only ever accessed when the mutex is locked.
///
/// [`new`]: Self::new
/// [`lock`]: Self::lock
/// [`try_lock`]: Self::try_lock
pub struct Mutex<T: ?Sized> {
state: AtomicU8,
waiters: LinkedList<Waiter>,
data: UnsafeCell<T>,
}
const DATA_LOCKED: u8 = 1 << 0;
const WAITERS_LOCKED: u8 = 1 << 1;
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
/// dropped (falls out of scope), the lock will be unlocked.
///
/// The data protected by the mutex can be accessed through this guard via its
/// [`Deref`] and [`DerefMut`] implementations.
///
/// This structure is created by the [`lock`] and [`try_lock`] methods on
/// [`Mutex`].
///
/// [`lock`]: Mutex::lock
/// [`try_lock`]: Mutex::try_lock
#[must_use = "if unused the Mutex will immediately unlock"]
pub struct MutexGuard<'a, T: ?Sized> {
mutex: &'a Mutex<T>,
}
/// A future which resolves when the target mutex has been successfully
/// acquired.
pub struct MutexLockFuture<'a, T: ?Sized> {
mutex: &'a Mutex<T>,
waiter: Option<*const Node<Waiter>>,
}
struct Waiter {
state: AtomicU8,
wakers: [UnsafeCell<MaybeUninit<Waker>>; 2],
}
const WAITER_INDEX: u8 = 1 << 0;
const WAITER_DISABLED: u8 = 1 << 1;
unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
unsafe impl<T: ?Sized + Send> Send for MutexGuard<'_, T> {}
unsafe impl<T: ?Sized + Sync> Sync for MutexGuard<'_, T> {}
unsafe impl<T: ?Sized + Send> Send for MutexLockFuture<'_, T> {}
impl<T> Mutex<T> {
/// Creates a new mutex in an unlocked state ready for use.
///
/// # Examples
///
/// ```
/// use drone_core::sync::Mutex;
///
/// let mutex = Mutex::new(0);
/// ```
#[inline]
pub const fn new(data: T) -> Self {
Self { state: AtomicU8::new(0), waiters: LinkedList::new(), data: UnsafeCell::new(data) }
}
/// Consumes this mutex, returning the underlying data.
///
/// # Examples
///
/// ```
/// use drone_core::sync::Mutex;
///
/// let mutex = Mutex::new(0);
/// assert_eq!(mutex.into_inner(), 0);
/// ```
#[inline]
pub fn into_inner(self) -> T {
self.data.into_inner()
}
}
impl<T: ?Sized> Mutex<T> {
/// Attempts to acquire this lock immediately.
///
/// If the lock could not be acquired at this time, then [`None`] is
/// returned. Otherwise, an RAII guard is returned. The lock will be
/// unlocked when the guard is dropped.
#[inline]
pub fn try_lock(&self) -> Option<MutexGuard<'_, T>> {
if self.state.fetch_or(DATA_LOCKED, Ordering::Acquire) & DATA_LOCKED == 0 {
Some(MutexGuard { mutex: self })
} else {
None
}
}
/// Acquires this lock asynchronously.
///
/// This method returns a future that will resolve once the lock has been
/// successfully acquired.
#[inline]
pub fn lock(&self) -> MutexLockFuture<'_, T> {
MutexLockFuture { mutex: self, waiter: None }
}
/// Returns a mutable reference to the underlying data.
///
/// Since this call borrows the `Mutex` mutably, no actual locking needs to
/// take place -- the mutable borrow statically guarantees no locks exist.
///
/// # Examples
///
/// ```
/// use drone_core::sync::Mutex;
///
/// let mut mutex = Mutex::new(0);
/// *mutex.get_mut() = 10;
/// assert_eq!(*mutex.try_lock().unwrap(), 10);
/// ```
#[inline]
pub fn get_mut(&mut self) -> &mut T {
unsafe { &mut *self.data.get() }
}
fn unlock(&self) {
let waiters_lock =
self.state.fetch_or(WAITERS_LOCKED, Ordering::Acquire) & WAITERS_LOCKED == 0;
if waiters_lock {
// This is the only place where nodes can be removed.
unsafe {
self.waiters
.drain_filter_raw(|waiter| (*waiter).is_disabled())
.for_each(|node| drop(Box::from_raw(node)));
}
}
self.state.fetch_and(!DATA_LOCKED, Ordering::Release);
// At this stage no nodes can't be removed.
for waiter in unsafe { self.waiters.iter_mut_unchecked() } {
if waiter.wake() {
break;
}
}
if waiters_lock {
self.state.fetch_and(!WAITERS_LOCKED, Ordering::Release);
}
} | }
impl<T: ?Sized> MutexLockFuture<'_, T> {
fn disable_waiter(&mut self) {
if let Some(waiter) = self.waiter.take() {
unsafe { (*waiter).disable() };
}
}
}
impl<'a, T: ?Sized> Future for MutexLockFuture<'a, T> {
type Output = MutexGuard<'a, T>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if let Some(lock) = self.mutex.try_lock() {
self.disable_waiter();
return Poll::Ready(lock);
}
if let Some(waiter) = self.waiter {
unsafe { (*waiter).register(cx.waker()) };
} else {
let waiter = Box::into_raw(Box::new(Node::from(Waiter::from(cx.waker().clone()))));
self.waiter = Some(waiter);
unsafe { self.mutex.waiters.push_raw(waiter) };
}
if let Some(lock) = self.mutex.try_lock() {
self.disable_waiter();
return Poll::Ready(lock);
}
Poll::Pending
}
}
impl<T: ?Sized> Drop for MutexLockFuture<'_, T> {
fn drop(&mut self) {
if let Some(waiter) = self.waiter {
if unsafe { (*waiter).disable() } & WAITER_DISABLED != 0 {
// This future was awoken, but then dropped before it could
// acquire the lock. Try to lock the mutex and then immediately
// unlock to wake up another thread.
drop(self.mutex.try_lock());
}
}
}
}
impl Waiter {
fn register(&self, waker: &Waker) {
let state = self.state.load(Ordering::Acquire);
let mut index = (state & WAITER_INDEX) as usize;
if state & WAITER_DISABLED != 0
|| !waker
.will_wake(unsafe { (*self.wakers.get_unchecked(index).get()).assume_init_ref() })
{
index = (index + 1) % 2;
unsafe { (*self.wakers.get_unchecked(index).get()).write(waker.clone()) };
self.state.store(index as u8, Ordering::Release);
}
}
fn wake(&self) -> bool {
let state = self.disable();
if state & WAITER_DISABLED == 0 {
let index = (state & WAITER_INDEX) as usize;
unsafe { (*self.wakers.get_unchecked(index).get()).assume_init_read().wake() };
true
} else {
false
}
}
fn disable(&self) -> u8 {
self.state.fetch_or(WAITER_DISABLED, Ordering::Relaxed)
}
fn is_disabled(&self) -> bool {
self.state.load(Ordering::Relaxed) & WAITER_DISABLED != 0
}
}
impl From<Waker> for Waiter {
fn from(waker: Waker) -> Self {
Self {
state: AtomicU8::new(0),
wakers: [
UnsafeCell::new(MaybeUninit::new(waker)),
UnsafeCell::new(MaybeUninit::uninit()),
],
}
}
}
impl<T> From<T> for Mutex<T> {
/// Creates a new mutex in an unlocked state ready for use. This is
/// equivalent to [`Mutex::new`].
#[inline]
fn from(data: T) -> Self {
Self::new(data)
}
}
impl<T: ?Sized + Default> Default for Mutex<T> {
/// Creates a `Mutex<T>`, with the `Default` value for T.
#[inline]
fn default() -> Self {
Self::new(Default::default())
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
#[allow(clippy::option_if_let_else)]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(guard) = self.try_lock() {
f.debug_struct("Mutex").field("data", &&*guard).finish()
} else {
struct LockedPlaceholder;
impl fmt::Debug for LockedPlaceholder {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("<locked>")
}
}
f.debug_struct("Mutex").field("data", &LockedPlaceholder).finish()
}
}
}
impl<T: ?Sized> Deref for MutexGuard<'_, T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
unsafe { &*self.mutex.data.get() }
}
}
impl<T: ?Sized> DerefMut for MutexGuard<'_, T> {
#[inline]
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.mutex.data.get() }
}
}
impl<T: ?Sized> Drop for MutexGuard<'_, T> {
#[inline]
fn drop(&mut self) {
self.mutex.unlock();
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MutexGuard").field("mutex", &self.mutex).finish()
}
}
impl<T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(**self).fmt(f)
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloc::sync::Arc;
use core::{
future::Future,
sync::atomic::{AtomicUsize, Ordering},
task::{Context, Poll, RawWaker, RawWakerVTable, Waker},
};
use futures::pin_mut;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
struct Counter(AtomicUsize);
impl Counter {
fn to_waker(&'static self) -> Waker {
unsafe fn clone(counter: *const ()) -> RawWaker {
RawWaker::new(counter, &VTABLE)
}
unsafe fn wake(counter: *const ()) {
unsafe { (*(counter as *const Counter)).0.fetch_add(1, Ordering::SeqCst) };
}
static VTABLE: RawWakerVTable = RawWakerVTable::new(clone, wake, wake, drop);
unsafe { Waker::from_raw(RawWaker::new(self as *const _ as *const (), &VTABLE)) }
}
}
#[test]
fn try_lock() {
let m = Mutex::new(());
*m.try_lock().unwrap() = ();
}
#[test]
fn lock() {
static COUNTER: Counter = Counter(AtomicUsize::new(0));
let waker = COUNTER.to_waker();
let mut cx = Context::from_waker(&waker);
let a = Arc::new(Mutex::new(1));
let b = Arc::clone(&a);
let c = Arc::clone(&b);
let d = Arc::new(Mutex::new(0));
let e = Arc::clone(&d);
let f = async move {
let mut b = b.lock().await;
let mut _e = e.lock().await;
*b *= 3;
};
let g = async move {
let mut c = c.lock().await;
*c *= 5;
};
pin_mut!(f);
pin_mut!(g);
let d = d.try_lock().unwrap();
assert_eq!(*d, 0);
assert_eq!(f.as_mut().poll(&mut cx), Poll::Pending);
assert_eq!(g.as_mut().poll(&mut cx), Poll::Pending);
assert_eq!(COUNTER.0.load(Ordering::SeqCst), 0);
drop(d);
assert_eq!(COUNTER.0.load(Ordering::SeqCst), 1);
assert_eq!(g.as_mut().poll(&mut cx), Poll::Pending);
assert_eq!(f.as_mut().poll(&mut cx), Poll::Ready(()));
assert_eq!(COUNTER.0.load(Ordering::SeqCst), 2);
assert!(!a.waiters.is_empty());
assert_eq!(g.as_mut().poll(&mut cx), Poll::Ready(()));
assert!(a.waiters.is_empty());
assert_eq!(*a.try_lock().unwrap(), 15);
}
#[test]
fn into_inner() {
let m = Mutex::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = Mutex::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn get_mut() {
let mut m = Mutex::new(NonCopy(10));
*m.get_mut() = NonCopy(20);
assert_eq!(m.into_inner(), NonCopy(20));
}
#[test]
fn mutex_unsized() {
let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
{
let b = &mut *mutex.try_lock().unwrap();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*mutex.try_lock().unwrap(), comp);
}
} | |
api_op_DescribeTransitGatewayAttachments.go | // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package ec2
import (
"context"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/internal/awsutil"
)
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayAttachmentsRequest
type DescribeTransitGatewayAttachmentsInput struct {
_ struct{} `type:"structure"`
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
// the required permissions, the error response is DryRunOperation. Otherwise,
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
// One or more filters. The possible values are:
//
// * association.state - The state of the association (associating | associated
// | disassociating).
//
// * association.transit-gateway-route-table-id - The ID of the route table
// for the transit gateway.
//
// * resource-id - The ID of the resource.
//
// * resource-owner-id - The ID of the AWS account that owns the resource.
//
// * resource-type - The resource type (vpc | vpn).
//
// * state - The state of the attachment (available | deleted | deleting
// | failed | modifying | pendingAcceptance | pending | rollingBack | rejected
// | rejecting).
//
// * transit-gateway-attachment-id - The ID of the attachment.
//
// * transit-gateway-id - The ID of the transit gateway.
//
// * transit-gateway-owner-id - The ID of the AWS account that owns the transit
// gateway.
Filters []Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
// The maximum number of results to return with a single call. To retrieve the
// remaining results, make another call with the returned nextToken value.
MaxResults *int64 `min:"5" type:"integer"`
// The token for the next page of results.
NextToken *string `type:"string"`
// The IDs of the attachments.
TransitGatewayAttachmentIds []string `type:"list"`
}
// String returns the string representation
func (s DescribeTransitGatewayAttachmentsInput) String() string {
return awsutil.Prettify(s)
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DescribeTransitGatewayAttachmentsInput) Validate() error {
invalidParams := aws.ErrInvalidParams{Context: "DescribeTransitGatewayAttachmentsInput"}
if s.MaxResults != nil && *s.MaxResults < 5 {
invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 5))
}
if invalidParams.Len() > 0 |
return nil
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayAttachmentsResult
type DescribeTransitGatewayAttachmentsOutput struct {
_ struct{} `type:"structure"`
// The token to use to retrieve the next page of results. This value is null
// when there are no more results to return.
NextToken *string `locationName:"nextToken" type:"string"`
// Information about the attachments.
TransitGatewayAttachments []TransitGatewayAttachment `locationName:"transitGatewayAttachments" locationNameList:"item" type:"list"`
}
// String returns the string representation
func (s DescribeTransitGatewayAttachmentsOutput) String() string {
return awsutil.Prettify(s)
}
const opDescribeTransitGatewayAttachments = "DescribeTransitGatewayAttachments"
// DescribeTransitGatewayAttachmentsRequest returns a request value for making API operation for
// Amazon Elastic Compute Cloud.
//
// Describes one or more attachments between resources and transit gateways.
// By default, all attachments are described. Alternatively, you can filter
// the results by attachment ID, attachment state, resource ID, or resource
// owner.
//
// // Example sending a request using DescribeTransitGatewayAttachmentsRequest.
// req := client.DescribeTransitGatewayAttachmentsRequest(params)
// resp, err := req.Send(context.TODO())
// if err == nil {
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTransitGatewayAttachments
func (c *Client) DescribeTransitGatewayAttachmentsRequest(input *DescribeTransitGatewayAttachmentsInput) DescribeTransitGatewayAttachmentsRequest {
op := &aws.Operation{
Name: opDescribeTransitGatewayAttachments,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &aws.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &DescribeTransitGatewayAttachmentsInput{}
}
req := c.newRequest(op, input, &DescribeTransitGatewayAttachmentsOutput{})
return DescribeTransitGatewayAttachmentsRequest{Request: req, Input: input, Copy: c.DescribeTransitGatewayAttachmentsRequest}
}
// DescribeTransitGatewayAttachmentsRequest is the request type for the
// DescribeTransitGatewayAttachments API operation.
type DescribeTransitGatewayAttachmentsRequest struct {
*aws.Request
Input *DescribeTransitGatewayAttachmentsInput
Copy func(*DescribeTransitGatewayAttachmentsInput) DescribeTransitGatewayAttachmentsRequest
}
// Send marshals and sends the DescribeTransitGatewayAttachments API request.
func (r DescribeTransitGatewayAttachmentsRequest) Send(ctx context.Context) (*DescribeTransitGatewayAttachmentsResponse, error) {
r.Request.SetContext(ctx)
err := r.Request.Send()
if err != nil {
return nil, err
}
resp := &DescribeTransitGatewayAttachmentsResponse{
DescribeTransitGatewayAttachmentsOutput: r.Request.Data.(*DescribeTransitGatewayAttachmentsOutput),
response: &aws.Response{Request: r.Request},
}
return resp, nil
}
// NewDescribeTransitGatewayAttachmentsRequestPaginator returns a paginator for DescribeTransitGatewayAttachments.
// Use Next method to get the next page, and CurrentPage to get the current
// response page from the paginator. Next will return false, if there are
// no more pages, or an error was encountered.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over pages.
// req := client.DescribeTransitGatewayAttachmentsRequest(input)
// p := ec2.NewDescribeTransitGatewayAttachmentsRequestPaginator(req)
//
// for p.Next(context.TODO()) {
// page := p.CurrentPage()
// }
//
// if err := p.Err(); err != nil {
// return err
// }
//
func NewDescribeTransitGatewayAttachmentsPaginator(req DescribeTransitGatewayAttachmentsRequest) DescribeTransitGatewayAttachmentsPaginator {
return DescribeTransitGatewayAttachmentsPaginator{
Pager: aws.Pager{
NewRequest: func(ctx context.Context) (*aws.Request, error) {
var inCpy *DescribeTransitGatewayAttachmentsInput
if req.Input != nil {
tmp := *req.Input
inCpy = &tmp
}
newReq := req.Copy(inCpy)
newReq.SetContext(ctx)
return newReq.Request, nil
},
},
}
}
// DescribeTransitGatewayAttachmentsPaginator is used to paginate the request. This can be done by
// calling Next and CurrentPage.
type DescribeTransitGatewayAttachmentsPaginator struct {
aws.Pager
}
func (p *DescribeTransitGatewayAttachmentsPaginator) CurrentPage() *DescribeTransitGatewayAttachmentsOutput {
return p.Pager.CurrentPage().(*DescribeTransitGatewayAttachmentsOutput)
}
// DescribeTransitGatewayAttachmentsResponse is the response type for the
// DescribeTransitGatewayAttachments API operation.
type DescribeTransitGatewayAttachmentsResponse struct {
*DescribeTransitGatewayAttachmentsOutput
response *aws.Response
}
// SDKResponseMetdata returns the response metadata for the
// DescribeTransitGatewayAttachments request.
func (r *DescribeTransitGatewayAttachmentsResponse) SDKResponseMetdata() *aws.Response {
return r.response
}
| {
return invalidParams
} |
upgrade.rs | use std::result;
use std::str;
use base64::display::Base64Display;
use bytes::{Buf, BytesMut};
use httparse::{self, Header, Response};
use sha1::{self, Sha1};
use tokio_util::codec::{Decoder, Encoder};
use crate::{Error, Result};
type Sha1Digest = [u8; sha1::DIGEST_LENGTH];
fn build_ws_accept(key: &str) -> Sha1Digest {
let mut s = Sha1::new();
s.update(key.as_bytes());
s.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11");
s.digest().bytes()
}
fn header<'a, 'header: 'a>(headers: &'a [Header<'header>], name: &'a str) -> result::Result<&'header [u8], String> {
let header = headers
.iter()
.find(|header| header.name.eq_ignore_ascii_case(name))
.ok_or_else(|| format!("server didn't respond with {name} header", name = name))?;
Ok(header.value)
}
fn validate_server_response(expected_ws_accept: &Sha1Digest, data: &[u8]) -> Result<Option<usize>> {
let mut headers = [httparse::EMPTY_HEADER; 20];
let mut response = Response::new(&mut headers);
let status = response.parse(data)?;
if !status.is_complete() {
return Ok(None);
}
let response_len = status.unwrap();
let code = response.code.unwrap();
if code != 101 {
return Err(format!("server responded with HTTP error {code}", code = code).into());
}
let ws_accept_header = header(response.headers, "Sec-WebSocket-Accept")?;
let mut ws_accept = Sha1Digest::default();
base64::decode_config_slice(&ws_accept_header, base64::STANDARD, &mut ws_accept)?;
if expected_ws_accept != &ws_accept {
return Err(format!(
"server responded with incorrect Sec-WebSocket-Accept header: expected {expected}, got {actual}",
expected = Base64Display::with_config(expected_ws_accept, base64::STANDARD),
actual = Base64Display::with_config(&ws_accept, base64::STANDARD),
)
.into());
}
Ok(Some(response_len))
}
fn contains_ignore_ascii_case(mut haystack: &[u8], needle: &[u8]) -> bool {
if needle.is_empty() {
return true;
}
while haystack.len() >= needle.len() {
if haystack[..needle.len()].eq_ignore_ascii_case(needle) {
return true;
}
haystack = &haystack[1..];
}
false
}
/// A client's opening handshake.
pub struct ClientRequest {
ws_accept: Sha1Digest,
}
impl ClientRequest {
/// Parses the client's opening handshake.
pub fn parse<'a, F>(header: F) -> Result<Self>
where
F: Fn(&'static str) -> Option<&'a str> + 'a,
{
let header = |name| header(name).ok_or_else(|| format!("client didn't provide {name} header", name = name));
let check_header = |name, expected| {
let actual = header(name)?;
if actual.eq_ignore_ascii_case(expected) {
Ok(())
} else {
Err(format!(
"client provided incorrect {name} header: expected {expected}, got {actual}",
name = name,
expected = expected,
actual = actual
))
}
};
let check_header_contains = |name, expected: &str| {
let actual = header(name)?;
if contains_ignore_ascii_case(actual.as_bytes(), expected.as_bytes()) {
Ok(())
} else {
Err(format!(
"client provided incorrect {name} header: expected string containing {expected}, got {actual}",
name = name,
expected = expected,
actual = actual
))
}
};
check_header("Upgrade", "websocket")?;
check_header_contains("Connection", "Upgrade")?;
check_header("Sec-WebSocket-Version", "13")?;
let key = header("Sec-WebSocket-Key")?;
let ws_accept = build_ws_accept(key);
Ok(Self { ws_accept })
}
/// Copies the value that the client expects to see in the server's `Sec-WebSocket-Accept` header into a `String`.
pub fn ws_accept_buf(&self, s: &mut String) {
base64::encode_config_buf(&self.ws_accept, base64::STANDARD, s)
}
/// Returns the value that the client expects to see in the server's `Sec-WebSocket-Accept` header.
pub fn ws_accept(&self) -> String {
base64::encode_config(&self.ws_accept, base64::STANDARD)
}
}
/// Tokio decoder for parsing the server's response to the client's HTTP `Connection: Upgrade` request.
pub struct UpgradeCodec {
ws_accept: Sha1Digest,
}
impl UpgradeCodec {
/// Returns a new `UpgradeCodec` object.
///
/// The `key` parameter provides the string passed to the server via the HTTP `Sec-WebSocket-Key` header.
pub fn new(key: &str) -> Self {
UpgradeCodec {
ws_accept: build_ws_accept(key),
}
}
}
impl Decoder for UpgradeCodec {
type Item = ();
type Error = Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<()>> |
}
impl Encoder<()> for UpgradeCodec {
type Error = Error;
fn encode(&mut self, _item: (), _dst: &mut BytesMut) -> Result<()> {
unimplemented!()
}
}
#[cfg(test)]
mod tests {
use crate::upgrade::contains_ignore_ascii_case;
#[test]
fn does_not_contain() {
assert!(!contains_ignore_ascii_case(b"World", b"hello"));
}
#[test]
fn contains_exact() {
assert!(contains_ignore_ascii_case(b"Hello", b"hello"));
}
#[test]
fn contains_substring() {
assert!(contains_ignore_ascii_case(b"Hello World", b"hello"));
}
}
| {
if let Some(response_len) = validate_server_response(&self.ws_accept, src)? {
src.advance(response_len);
Ok(Some(()))
} else {
Ok(None)
}
} |
avatar-routing.module.ts | import { NgModule } from '@angular/core';
import { Routes, RouterModule } from '@angular/router';
import { AvatarPage } from './avatar.page';
const routes: Routes = [ | }
];
@NgModule({
imports: [RouterModule.forChild(routes)],
exports: [RouterModule],
})
export class AvatarPageRoutingModule {} | {
path: '',
component: AvatarPage |
tc.go | // +build !windows
package term // import "github.com/ory/dockertest/docker/pkg/term"
import (
"syscall"
"unsafe"
"golang.org/x/sys/unix"
)
func | (fd uintptr, p *Termios) syscall.Errno {
_, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p)))
return err
}
func tcset(fd uintptr, p *Termios) syscall.Errno {
_, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p)))
return err
}
| tcget |
retry.go | // Copyright 2021 BoCloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"context"
"math"
"time"
"github.com/avast/retry-go"
)
// retryForever retry fn until it success
func retryForever(ctx context.Context, retryableFunc retry.RetryableFunc, onRetryFunc retry.OnRetryFunc) | {
_ = retry.Do(
retryableFunc,
retry.Context(ctx),
retry.Attempts(math.MaxUint64),
retry.Delay(5*time.Second),
retry.DelayType(retry.FixedDelay),
retry.LastErrorOnly(true),
retry.OnRetry(onRetryFunc),
)
} |
|
urlutil.go | // Package urlutil provides helper function to check urls kind.
// It supports http urls, git urls and transport url (tcp://, …)
package urlutil
import (
"regexp"
"strings"
)
var (
validPrefixes = map[string][]string{
"url": {"http://", "https://"},
"git": {"git://", "github.com/", "git@"},
"transport": {"tcp://", "udp://", "unix://"},
}
urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$")
)
// IsURL returns true if the provided str is an HTTP(S) URL.
func IsURL(str string) bool {
return checkURL(str, "url")
}
// IsGitURL returns true if the provided str is a git repository URL.
func IsGitURL(str string) bool {
if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) {
| return checkURL(str, "git")
}
// IsGitTransport returns true if the provided str is a git transport by inspecting
// the prefix of the string for known protocols used in git.
func IsGitTransport(str string) bool {
return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@")
}
// IsTransportURL returns true if the provided str is a transport (tcp, udp, unix) URL.
func IsTransportURL(str string) bool {
return checkURL(str, "transport")
}
func checkURL(str, kind string) bool {
for _, prefix := range validPrefixes[kind] {
if strings.HasPrefix(str, prefix) {
return true
}
}
return false
}
| return true
}
|
global_functions.rs | use std::collections::HashMap;
use serde_json::value::{Value, to_value, from_value};
use errors::Result;
/// The global function type definition
pub type GlobalFn = Box<Fn(HashMap<String, Value>) -> Result<Value> + Sync + Send>;
pub fn make_range_fn() -> GlobalFn {
Box::new(move |args| -> Result<Value> {
let start = match args.get("start") {
Some(val) => match from_value::<usize>(val.clone()) {
Ok(v) => v,
Err(_) => bail!("Global function `range` received start={} but `start` can only be a number"),
},
None => 0,
};
let step_by = match args.get("step_by") {
Some(val) => match from_value::<usize>(val.clone()) {
Ok(v) => v,
Err(_) => bail!("Global function `range` received step_by={} but `step` can only be a number"),
},
None => 1,
};
let end = match args.get("end") {
Some(val) => match from_value::<usize>(val.clone()) {
Ok(v) => v,
Err(_) => bail!("Global function `range` received end={} but `end` can only be a number"),
},
None => bail!("Global function `range` was called without a `end` argument"),
};
if start > end {
bail!("Global function `range` was called without a `start` argument greater than the `end` one");
}
let mut i = start;
let mut res = vec![];
while i < end {
res.push(i);
i += step_by;
}
Ok(to_value(res).unwrap())
})
}
//pub let range_fn = |args: HashMap<String, Value>| -> Result<Value> {
// let start = match args.get("start") {
// Some(val) => match from_value::<usize>(val.clone()) {
// Ok(v) => v,
// Err(_) => bail!("Global function `range` received start={} but `start` can only be a number"),
// },
// None => 0,
// };
// let step_by = match args.get("step_by") {
// Some(val) => match from_value::<usize>(val.clone()) {
// Ok(v) => v,
// Err(_) => bail!("Global function `range` received step_by={} but `step` can only be a number"),
// },
// None => 1,
// };
// let end = match args.get("end") {
// Some(val) => match from_value::<usize>(val.clone()) {
// Ok(v) => v,
// Err(_) => bail!("Global function `range` received end={} but `end` can only be a number"),
// },
// None => bail!("Global function `range` was called without a `end` argument"),
// };
//
// if start > end {
// bail!("Global function `range` was called without a `start` argument greater than the `end` one");
// }
//
// let mut i = start;
// let mut res = vec![];
// while i < end {
// res.push(i);
// i += step_by;
// }
// Ok(to_value(res).unwrap())
//}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use serde_json::value::{to_value};
use super::{make_range_fn};
#[test]
fn test_range_default() {
let mut args = HashMap::new();
args.insert("end".to_string(), to_value(5).unwrap());
let res = make_range_fn()(args).unwrap();
assert_eq!(res, to_value(vec![0,1,2,3,4]).unwrap());
}
#[test]
fn | () {
let mut args = HashMap::new();
args.insert("end".to_string(), to_value(5).unwrap());
args.insert("start".to_string(), to_value(1).unwrap());
let res = make_range_fn()(args).unwrap();
assert_eq!(res, to_value(vec![1,2,3,4]).unwrap());
}
#[test]
fn test_range_start_greater_than_end() {
let mut args = HashMap::new();
args.insert("end".to_string(), to_value(5).unwrap());
args.insert("start".to_string(), to_value(6).unwrap());
assert!(make_range_fn()(args).is_err());
}
#[test]
fn test_range_step_by() {
let mut args = HashMap::new();
args.insert("end".to_string(), to_value(10).unwrap());
args.insert("step_by".to_string(), to_value(2).unwrap());
let res = make_range_fn()(args).unwrap();
assert_eq!(res, to_value(vec![0,2,4,6,8]).unwrap());
}
}
| test_range_start |
target.rs | use cranelift::frontend::FunctionBuilderContext;
use cranelift_simplejit::{SimpleJITBuilder, SimpleJITModule};
use object_pool::Pool;
use std::cell::RefCell;
use std::rc::Rc;
#[salsa::query_group(TargetDatabase)]
pub trait Target {
#[salsa::input]
fn module(&self) -> Rc<RefCell<SimpleJITModule>>;
#[salsa::input]
fn func_ctx_pool(&self) -> Rc<Pool<FunctionBuilderContext>>;
}
pub trait TargetExt: Target {
fn reset_module(&mut self) {
let builder = SimpleJITBuilder::new(cranelift_module::default_libcall_names());
let module = SimpleJITModule::new(builder);
self.set_module(Rc::new(RefCell::new(module)));
}
fn with_module<T, F: FnOnce(&SimpleJITModule) -> T>(&self, f: F) -> T {
f(&self.module().borrow())
}
| }
}
impl<T: Target + ?Sized> TargetExt for T {} | fn with_module_mut<T, F: FnOnce(&mut SimpleJITModule) -> T>(&self, f: F) -> T {
f(&mut self.module().borrow_mut()) |
expressroutecrossconnections.go | package network
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// ExpressRouteCrossConnectionsClient is the network Client
type ExpressRouteCrossConnectionsClient struct {
BaseClient
}
// NewExpressRouteCrossConnectionsClient creates an instance of the ExpressRouteCrossConnectionsClient client.
func | (subscriptionID string) ExpressRouteCrossConnectionsClient {
return NewExpressRouteCrossConnectionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewExpressRouteCrossConnectionsClientWithBaseURI creates an instance of the ExpressRouteCrossConnectionsClient
// client using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI
// (sovereign clouds, Azure stack).
func NewExpressRouteCrossConnectionsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCrossConnectionsClient {
return ExpressRouteCrossConnectionsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate update the specified ExpressRouteCrossConnection.
// Parameters:
// resourceGroupName - the name of the resource group.
// crossConnectionName - the name of the ExpressRouteCrossConnection.
// parameters - parameters supplied to the update express route crossConnection operation.
func (client ExpressRouteCrossConnectionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, crossConnectionName string, parameters ExpressRouteCrossConnection) (result ExpressRouteCrossConnectionsCreateOrUpdateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCrossConnectionsClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, crossConnectionName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client ExpressRouteCrossConnectionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, crossConnectionName string, parameters ExpressRouteCrossConnection) (*http.Request, error) {
pathParameters := map[string]interface{}{
"crossConnectionName": autorest.Encode("path", crossConnectionName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
parameters.Etag = nil
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCrossConnectionsClient) CreateOrUpdateSender(req *http.Request) (future ExpressRouteCrossConnectionsCreateOrUpdateFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = func(client ExpressRouteCrossConnectionsClient) (ercc ExpressRouteCrossConnection, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsCreateOrUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if ercc.Response.Response, err = future.GetResult(sender); err == nil && ercc.Response.Response.StatusCode != http.StatusNoContent {
ercc, err = client.CreateOrUpdateResponder(ercc.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsCreateOrUpdateFuture", "Result", ercc.Response.Response, "Failure responding to request")
}
}
return
}
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client ExpressRouteCrossConnectionsClient) CreateOrUpdateResponder(resp *http.Response) (result ExpressRouteCrossConnection, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Get gets details about the specified ExpressRouteCrossConnection.
// Parameters:
// resourceGroupName - the name of the resource group (peering location of the circuit).
// crossConnectionName - the name of the ExpressRouteCrossConnection (service key of the circuit).
func (client ExpressRouteCrossConnectionsClient) Get(ctx context.Context, resourceGroupName string, crossConnectionName string) (result ExpressRouteCrossConnection, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCrossConnectionsClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, resourceGroupName, crossConnectionName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "Get", resp, "Failure responding to request")
return
}
return
}
// GetPreparer prepares the Get request.
func (client ExpressRouteCrossConnectionsClient) GetPreparer(ctx context.Context, resourceGroupName string, crossConnectionName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"crossConnectionName": autorest.Encode("path", crossConnectionName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCrossConnectionsClient) GetSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client ExpressRouteCrossConnectionsClient) GetResponder(resp *http.Response) (result ExpressRouteCrossConnection, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List retrieves all the ExpressRouteCrossConnections in a subscription.
func (client ExpressRouteCrossConnectionsClient) List(ctx context.Context) (result ExpressRouteCrossConnectionListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCrossConnectionsClient.List")
defer func() {
sc := -1
if result.ercclr.Response.Response != nil {
sc = result.ercclr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.ercclr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "List", resp, "Failure sending request")
return
}
result.ercclr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "List", resp, "Failure responding to request")
return
}
if result.ercclr.hasNextLink() && result.ercclr.IsEmpty() {
err = result.NextWithContext(ctx)
return
}
return
}
// ListPreparer prepares the List request.
func (client ExpressRouteCrossConnectionsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCrossConnections", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCrossConnectionsClient) ListSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client ExpressRouteCrossConnectionsClient) ListResponder(resp *http.Response) (result ExpressRouteCrossConnectionListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client ExpressRouteCrossConnectionsClient) listNextResults(ctx context.Context, lastResults ExpressRouteCrossConnectionListResult) (result ExpressRouteCrossConnectionListResult, err error) {
req, err := lastResults.expressRouteCrossConnectionListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client ExpressRouteCrossConnectionsClient) ListComplete(ctx context.Context) (result ExpressRouteCrossConnectionListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCrossConnectionsClient.List")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.List(ctx)
return
}
// ListArpTable gets the currently advertised ARP table associated with the express route cross connection in a
// resource group.
// Parameters:
// resourceGroupName - the name of the resource group.
// crossConnectionName - the name of the ExpressRouteCrossConnection.
// peeringName - the name of the peering.
// devicePath - the path of the device.
func (client ExpressRouteCrossConnectionsClient) ListArpTable(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, devicePath string) (result ExpressRouteCrossConnectionsListArpTableFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCrossConnectionsClient.ListArpTable")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.ListArpTablePreparer(ctx, resourceGroupName, crossConnectionName, peeringName, devicePath)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "ListArpTable", nil, "Failure preparing request")
return
}
result, err = client.ListArpTableSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "ListArpTable", nil, "Failure sending request")
return
}
return
}
// ListArpTablePreparer prepares the ListArpTable request.
func (client ExpressRouteCrossConnectionsClient) ListArpTablePreparer(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, devicePath string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"crossConnectionName": autorest.Encode("path", crossConnectionName),
"devicePath": autorest.Encode("path", devicePath),
"peeringName": autorest.Encode("path", peeringName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/arpTables/{devicePath}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListArpTableSender sends the ListArpTable request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCrossConnectionsClient) ListArpTableSender(req *http.Request) (future ExpressRouteCrossConnectionsListArpTableFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = func(client ExpressRouteCrossConnectionsClient) (ercatlr ExpressRouteCircuitsArpTableListResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListArpTableFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListArpTableFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if ercatlr.Response.Response, err = future.GetResult(sender); err == nil && ercatlr.Response.Response.StatusCode != http.StatusNoContent {
ercatlr, err = client.ListArpTableResponder(ercatlr.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListArpTableFuture", "Result", ercatlr.Response.Response, "Failure responding to request")
}
}
return
}
return
}
// ListArpTableResponder handles the response to the ListArpTable request. The method always
// closes the http.Response Body.
func (client ExpressRouteCrossConnectionsClient) ListArpTableResponder(resp *http.Response) (result ExpressRouteCircuitsArpTableListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListByResourceGroup retrieves all the ExpressRouteCrossConnections in a resource group.
// Parameters:
// resourceGroupName - the name of the resource group.
func (client ExpressRouteCrossConnectionsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ExpressRouteCrossConnectionListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCrossConnectionsClient.ListByResourceGroup")
defer func() {
sc := -1
if result.ercclr.Response.Response != nil {
sc = result.ercclr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listByResourceGroupNextResults
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "ListByResourceGroup", nil, "Failure preparing request")
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.ercclr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "ListByResourceGroup", resp, "Failure sending request")
return
}
result.ercclr, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "ListByResourceGroup", resp, "Failure responding to request")
return
}
if result.ercclr.hasNextLink() && result.ercclr.IsEmpty() {
err = result.NextWithContext(ctx)
return
}
return
}
// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
func (client ExpressRouteCrossConnectionsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCrossConnectionsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
// closes the http.Response Body.
func (client ExpressRouteCrossConnectionsClient) ListByResourceGroupResponder(resp *http.Response) (result ExpressRouteCrossConnectionListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listByResourceGroupNextResults retrieves the next set of results, if any.
func (client ExpressRouteCrossConnectionsClient) listByResourceGroupNextResults(ctx context.Context, lastResults ExpressRouteCrossConnectionListResult) (result ExpressRouteCrossConnectionListResult, err error) {
req, err := lastResults.expressRouteCrossConnectionListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
}
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
}
return
}
// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
func (client ExpressRouteCrossConnectionsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ExpressRouteCrossConnectionListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCrossConnectionsClient.ListByResourceGroup")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
return
}
// ListRoutesTable gets the currently advertised routes table associated with the express route cross connection in a
// resource group.
// Parameters:
// resourceGroupName - the name of the resource group.
// crossConnectionName - the name of the ExpressRouteCrossConnection.
// peeringName - the name of the peering.
// devicePath - the path of the device.
func (client ExpressRouteCrossConnectionsClient) ListRoutesTable(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, devicePath string) (result ExpressRouteCrossConnectionsListRoutesTableFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCrossConnectionsClient.ListRoutesTable")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.ListRoutesTablePreparer(ctx, resourceGroupName, crossConnectionName, peeringName, devicePath)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "ListRoutesTable", nil, "Failure preparing request")
return
}
result, err = client.ListRoutesTableSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "ListRoutesTable", nil, "Failure sending request")
return
}
return
}
// ListRoutesTablePreparer prepares the ListRoutesTable request.
func (client ExpressRouteCrossConnectionsClient) ListRoutesTablePreparer(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, devicePath string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"crossConnectionName": autorest.Encode("path", crossConnectionName),
"devicePath": autorest.Encode("path", devicePath),
"peeringName": autorest.Encode("path", peeringName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTables/{devicePath}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListRoutesTableSender sends the ListRoutesTable request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCrossConnectionsClient) ListRoutesTableSender(req *http.Request) (future ExpressRouteCrossConnectionsListRoutesTableFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = func(client ExpressRouteCrossConnectionsClient) (ercrtlr ExpressRouteCircuitsRoutesTableListResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListRoutesTableFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if ercrtlr.Response.Response, err = future.GetResult(sender); err == nil && ercrtlr.Response.Response.StatusCode != http.StatusNoContent {
ercrtlr, err = client.ListRoutesTableResponder(ercrtlr.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableFuture", "Result", ercrtlr.Response.Response, "Failure responding to request")
}
}
return
}
return
}
// ListRoutesTableResponder handles the response to the ListRoutesTable request. The method always
// closes the http.Response Body.
func (client ExpressRouteCrossConnectionsClient) ListRoutesTableResponder(resp *http.Response) (result ExpressRouteCircuitsRoutesTableListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListRoutesTableSummary gets the route table summary associated with the express route cross connection in a resource
// group.
// Parameters:
// resourceGroupName - the name of the resource group.
// crossConnectionName - the name of the ExpressRouteCrossConnection.
// peeringName - the name of the peering.
// devicePath - the path of the device.
func (client ExpressRouteCrossConnectionsClient) ListRoutesTableSummary(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, devicePath string) (result ExpressRouteCrossConnectionsListRoutesTableSummaryFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCrossConnectionsClient.ListRoutesTableSummary")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.ListRoutesTableSummaryPreparer(ctx, resourceGroupName, crossConnectionName, peeringName, devicePath)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "ListRoutesTableSummary", nil, "Failure preparing request")
return
}
result, err = client.ListRoutesTableSummarySender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "ListRoutesTableSummary", nil, "Failure sending request")
return
}
return
}
// ListRoutesTableSummaryPreparer prepares the ListRoutesTableSummary request.
func (client ExpressRouteCrossConnectionsClient) ListRoutesTableSummaryPreparer(ctx context.Context, resourceGroupName string, crossConnectionName string, peeringName string, devicePath string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"crossConnectionName": autorest.Encode("path", crossConnectionName),
"devicePath": autorest.Encode("path", devicePath),
"peeringName": autorest.Encode("path", peeringName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTablesSummary/{devicePath}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListRoutesTableSummarySender sends the ListRoutesTableSummary request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCrossConnectionsClient) ListRoutesTableSummarySender(req *http.Request) (future ExpressRouteCrossConnectionsListRoutesTableSummaryFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = func(client ExpressRouteCrossConnectionsClient) (erccrtslr ExpressRouteCrossConnectionsRoutesTableSummaryListResult, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if erccrtslr.Response.Response, err = future.GetResult(sender); err == nil && erccrtslr.Response.Response.StatusCode != http.StatusNoContent {
erccrtslr, err = client.ListRoutesTableSummaryResponder(erccrtslr.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsListRoutesTableSummaryFuture", "Result", erccrtslr.Response.Response, "Failure responding to request")
}
}
return
}
return
}
// ListRoutesTableSummaryResponder handles the response to the ListRoutesTableSummary request. The method always
// closes the http.Response Body.
func (client ExpressRouteCrossConnectionsClient) ListRoutesTableSummaryResponder(resp *http.Response) (result ExpressRouteCrossConnectionsRoutesTableSummaryListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// UpdateTags updates an express route cross connection tags.
// Parameters:
// resourceGroupName - the name of the resource group.
// crossConnectionName - the name of the cross connection.
// crossConnectionParameters - parameters supplied to update express route cross connection tags.
func (client ExpressRouteCrossConnectionsClient) UpdateTags(ctx context.Context, resourceGroupName string, crossConnectionName string, crossConnectionParameters TagsObject) (result ExpressRouteCrossConnectionsUpdateTagsFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCrossConnectionsClient.UpdateTags")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, crossConnectionName, crossConnectionParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "UpdateTags", nil, "Failure preparing request")
return
}
result, err = client.UpdateTagsSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsClient", "UpdateTags", nil, "Failure sending request")
return
}
return
}
// UpdateTagsPreparer prepares the UpdateTags request.
func (client ExpressRouteCrossConnectionsClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, crossConnectionName string, crossConnectionParameters TagsObject) (*http.Request, error) {
pathParameters := map[string]interface{}{
"crossConnectionName": autorest.Encode("path", crossConnectionName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-08-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}", pathParameters),
autorest.WithJSON(crossConnectionParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateTagsSender sends the UpdateTags request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCrossConnectionsClient) UpdateTagsSender(req *http.Request) (future ExpressRouteCrossConnectionsUpdateTagsFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = func(client ExpressRouteCrossConnectionsClient) (ercc ExpressRouteCrossConnection, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsUpdateTagsFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCrossConnectionsUpdateTagsFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if ercc.Response.Response, err = future.GetResult(sender); err == nil && ercc.Response.Response.StatusCode != http.StatusNoContent {
ercc, err = client.UpdateTagsResponder(ercc.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCrossConnectionsUpdateTagsFuture", "Result", ercc.Response.Response, "Failure responding to request")
}
}
return
}
return
}
// UpdateTagsResponder handles the response to the UpdateTags request. The method always
// closes the http.Response Body.
func (client ExpressRouteCrossConnectionsClient) UpdateTagsResponder(resp *http.Response) (result ExpressRouteCrossConnection, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
| NewExpressRouteCrossConnectionsClient |
roughenough-kms.rs | // Copyright 2017-2019 int08h LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//!
//! CLI used to encrypt and decrypt the Roughenough long-term key
//! using one of the KMS implementations
//!
#[macro_use]
extern crate log;
use clap::{App, Arg};
#[allow(unused_imports)]
use roughenough::kms::{EnvelopeEncryption, KmsProvider};
use roughenough::roughenough_version;
#[cfg(not(any(feature = "awskms", feature = "gcpkms")))]
fn encrypt_seed(_: &str, _: &str) {
// main() will exit if kms support is not enabled, making this unreachable
unreachable!()
}
#[cfg(any(feature = "awskms", feature = "gcpkms"))]
fn encrypt_seed(kms_key: &str, hex_seed: &str) {
let kms_client = get_kms(kms_key);
let plaintext_seed = hex::decode(hex_seed).expect("Error decoding hex seed value");
if plaintext_seed.len() != 32 {
panic!("Seed must be 32 bytes long; provided seed is {}", plaintext_seed.len());
}
match EnvelopeEncryption::encrypt_seed(&kms_client, &plaintext_seed) {
Ok(encrypted_blob) => {
println!("kms_protection: \"{}\"", kms_key);
println!("seed: {}", hex::encode(&encrypted_blob));
}
Err(e) => {
error!("Error: {:?}", e);
}
}
}
#[cfg(not(any(feature = "awskms", feature = "gcpkms")))]
fn decrypt_blob(_: &str, _: &str) {
// main() will exit if kms support is not enabled, making this unreachable
unreachable!()
}
#[cfg(any(feature = "awskms", feature = "gcpkms"))]
fn decrypt_blob(kms_key: &str, hex_blob: &str) {
let kms_client = get_kms(kms_key);
let ciphertext = hex::decode(hex_blob).expect("Error decoding hex blob value");
match EnvelopeEncryption::decrypt_seed(&kms_client, ciphertext.as_ref()) {
Ok(plaintext) => {
println!("{}", hex::encode(plaintext));
}
Err(e) => {
error!("Error: {:?}", e);
}
}
}
#[cfg(feature = "awskms")]
fn get_kms(kms_key: &str) -> impl KmsProvider {
use roughenough::kms::AwsKms;
AwsKms::from_arn(kms_key).unwrap()
}
#[cfg(feature = "gcpkms")]
fn get_kms(kms_key: &str) -> impl KmsProvider {
use roughenough::kms::GcpKms;
GcpKms::from_resource_id(kms_key).unwrap()
}
#[allow(unused_variables)]
pub fn main() {
use log::Level;
simple_logger::init_with_level(Level::Info).unwrap();
let matches = App::new("roughenough-kms")
.version(roughenough_version().as_ref())
.long_about("Encrypt and decrypt Roughenough long-term server seeds using a KMS")
.arg( | .required(true)
.help("Identity of the KMS key to be used")
).arg(
Arg::with_name("DECRYPT")
.short("d")
.long("decrypt")
.takes_value(true)
.required(false)
.help("Previously encrypted blob to decrypt to plaintext"),
).arg(
Arg::with_name("SEED")
.short("s")
.long("seed")
.takes_value(true)
.required(false)
.help("32 byte hex seed for the server's long-term identity"),
).get_matches();
if !(cfg!(feature = "gcpkms") || cfg!(feature = "awskms")) {
warn!("KMS support was not compiled into this build; nothing to do.");
warn!("See the Roughenough documentation for information on KMS support.");
warn!(" https://github.com/int08h/roughenough/blob/master/doc/OPTIONAL-FEATURES.md");
return
}
let kms_key = matches.value_of("KEY_ID").expect("Invalid KMS key id");
if matches.is_present("SEED") {
let hex_seed = matches.value_of("SEED").expect("Invalid seed value");
encrypt_seed(kms_key, hex_seed);
} else if matches.is_present("DECRYPT") {
let hex_blob = matches.value_of("DECRYPT").expect("Invalid blob value");
decrypt_blob(kms_key, hex_blob);
} else {
error!("Neither seed encryption (-s) or blob decryption (-d) was specified.");
error!("One of them is required.");
}
} | Arg::with_name("KEY_ID")
.short("k")
.long("kms-key")
.takes_value(true) |
appconfig.py | import os
import shutil
from pathlib import Path
import yaml
# gui home paths
from core.gui import themes
HOME_PATH = Path.home().joinpath(".coretk")
BACKGROUNDS_PATH = HOME_PATH.joinpath("backgrounds")
CUSTOM_EMANE_PATH = HOME_PATH.joinpath("custom_emane")
CUSTOM_SERVICE_PATH = HOME_PATH.joinpath("custom_services")
ICONS_PATH = HOME_PATH.joinpath("icons")
MOBILITY_PATH = HOME_PATH.joinpath("mobility")
XMLS_PATH = HOME_PATH.joinpath("xmls")
CONFIG_PATH = HOME_PATH.joinpath("gui.yaml")
LOG_PATH = HOME_PATH.joinpath("gui.log")
# local paths
DATA_PATH = Path(__file__).parent.joinpath("data")
LOCAL_ICONS_PATH = DATA_PATH.joinpath("icons").absolute()
LOCAL_BACKGROUND_PATH = DATA_PATH.joinpath("backgrounds").absolute()
LOCAL_XMLS_PATH = DATA_PATH.joinpath("xmls").absolute()
LOCAL_MOBILITY_PATH = DATA_PATH.joinpath("mobility").absolute()
# configuration data
TERMINALS = [
"$TERM",
"gnome-terminal --window --",
"lxterminal -e",
"konsole -e",
"xterm -e",
"aterm -e",
"eterm -e",
"rxvt -e",
"xfce4-terminal -x",
]
EDITORS = ["$EDITOR", "vim", "emacs", "gedit", "nano", "vi"]
class IndentDumper(yaml.Dumper):
def increase_indent(self, flow=False, indentless=False):
return super().increase_indent(flow, False)
def copy_files(current_path, new_path):
for current_file in current_path.glob("*"):
new_file = new_path.joinpath(current_file.name)
shutil.copy(current_file, new_file)
def check_directory():
if HOME_PATH.exists():
return
HOME_PATH.mkdir()
BACKGROUNDS_PATH.mkdir()
CUSTOM_EMANE_PATH.mkdir()
CUSTOM_SERVICE_PATH.mkdir()
ICONS_PATH.mkdir()
MOBILITY_PATH.mkdir()
XMLS_PATH.mkdir()
copy_files(LOCAL_ICONS_PATH, ICONS_PATH)
copy_files(LOCAL_BACKGROUND_PATH, BACKGROUNDS_PATH)
copy_files(LOCAL_XMLS_PATH, XMLS_PATH)
copy_files(LOCAL_MOBILITY_PATH, MOBILITY_PATH)
if "TERM" in os.environ:
terminal = TERMINALS[0]
else:
terminal = TERMINALS[1]
if "EDITOR" in os.environ:
editor = EDITORS[0]
else:
editor = EDITORS[1]
config = {
"preferences": {
"theme": themes.THEME_DARK,
"editor": editor,
"terminal": terminal,
"gui3d": "/usr/local/bin/std3d.sh",
"width": 1000,
"height": 750,
},
"location": {
"x": 0.0,
"y": 0.0,
"z": 0.0,
"lat": 47.5791667,
"lon": -122.132322,
"alt": 2.0,
"scale": 150.0,
},
"servers": [{"name": "example", "address": "127.0.0.1", "port": 50051}],
"nodes": [],
"recentfiles": [],
"observers": [{"name": "hello", "cmd": "echo hello"}],
}
save(config)
def read():
with CONFIG_PATH.open("r") as f:
return yaml.load(f, Loader=yaml.SafeLoader)
def | (config):
with CONFIG_PATH.open("w") as f:
yaml.dump(config, f, Dumper=IndentDumper, default_flow_style=False)
| save |
condvar.rs | use crate::sys::mutex::Mutex;
use crate::time::Duration;
|
impl Condvar {
pub const fn new() -> Condvar {
Condvar { }
}
#[inline]
pub unsafe fn init(&mut self) {}
#[inline]
pub unsafe fn notify_one(&self) {
}
#[inline]
pub unsafe fn notify_all(&self) {
}
pub unsafe fn wait(&self, _mutex: &Mutex) {
panic!("can't block with web assembly")
}
pub unsafe fn wait_timeout(&self, _mutex: &Mutex, _dur: Duration) -> bool {
panic!("can't block with web assembly");
}
#[inline]
pub unsafe fn destroy(&self) {
}
} | pub struct Condvar { }
pub type MovableCondvar = Box<Condvar>; |
wikicode.py | #
# Copyright (C) 2012-2019 Ben Kurtovic <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
from itertools import chain
from .nodes import (Argument, Comment, ExternalLink, Heading, HTMLEntity,
Node, Tag, Template, Text, Wikilink)
from .smart_list.ListProxy import _ListProxy
from .string_mixin import StringMixIn
from .utils import parse_anything
__all__ = ["Wikicode"]
FLAGS = re.IGNORECASE | re.DOTALL | re.UNICODE
class Wikicode(StringMixIn):
"""A ``Wikicode`` is a container for nodes that operates like a string.
Additionally, it contains methods that can be used to extract data from or
modify the nodes, implemented in an interface similar to a list. For
example, :meth:`index` can get the index of a node in the list, and
:meth:`insert` can add a new node at that index. The :meth:`filter()
<ifilter>` series of functions is very useful for extracting and iterating
over, for example, all of the templates in the object.
"""
RECURSE_OTHERS = 2
def __init__(self, nodes):
super().__init__()
self._nodes = nodes
def __unicode__(self):
return "".join([str(node) for node in self.nodes])
@staticmethod
def _get_children(node, contexts=False, restrict=None, parent=None):
"""Iterate over all child :class:`.Node`\\ s of a given *node*."""
yield (parent, node) if contexts else node
if restrict and isinstance(node, restrict):
return
for code in node.__children__():
for child in code.nodes:
sub = Wikicode._get_children(child, contexts, restrict, code)
yield from sub
@staticmethod
def _slice_replace(code, index, old, new):
"""Replace the string *old* with *new* across *index* in *code*."""
nodes = [str(node) for node in code.get(index)]
substring = "".join(nodes).replace(old, new)
code.nodes[index] = parse_anything(substring).nodes
@staticmethod
def _build_matcher(matches, flags):
"""Helper for :meth:`_indexed_ifilter` and others.
If *matches* is a function, return it. If it's a regex, return a
wrapper around it that can be called with a node to do a search. If
it's ``None``, return a function that always returns ``True``.
"""
if matches:
if callable(matches):
return matches
return lambda obj: re.search(matches, str(obj), flags)
return lambda obj: True
def _indexed_ifilter(self, recursive=True, matches=None, flags=FLAGS,
forcetype=None):
"""Iterate over nodes and their corresponding indices in the node list.
The arguments are interpreted as for :meth:`ifilter`. For each tuple
``(i, node)`` yielded by this method, ``self.index(node) == i``. Note
that if *recursive* is ``True``, ``self.nodes[i]`` might not be the
node itself, but will still contain it.
"""
match = self._build_matcher(matches, flags)
if recursive:
restrict = forcetype if recursive == self.RECURSE_OTHERS else None
def getter(i, node):
for ch in self._get_children(node, restrict=restrict):
yield (i, ch)
inodes = chain(*(getter(i, n) for i, n in enumerate(self.nodes)))
else:
inodes = enumerate(self.nodes)
for i, node in inodes:
if (not forcetype or isinstance(node, forcetype)) and match(node):
yield (i, node)
def _is_child_wikicode(self, obj, recursive=True):
"""Return whether the given :class:`.Wikicode` is a descendant."""
def deref(nodes):
if isinstance(nodes, _ListProxy):
return nodes._parent # pylint: disable=protected-access
return nodes
target = deref(obj.nodes)
if target is deref(self.nodes):
return True
if recursive:
todo = [self]
while todo:
code = todo.pop()
if target is deref(code.nodes):
return True
for node in code.nodes:
todo += list(node.__children__())
return False
def _do_strong_search(self, obj, recursive=True):
"""Search for the specific element *obj* within the node list.
*obj* can be either a :class:`.Node` or a :class:`.Wikicode` object. If
found, we return a tuple (*context*, *index*) where *context* is the
:class:`.Wikicode` that contains *obj* and *index* is its index there,
as a :class:`slice`. Note that if *recursive* is ``False``, *context*
will always be ``self`` (since we only look for *obj* among immediate
descendants), but if *recursive* is ``True``, then it could be any
:class:`.Wikicode` contained by a node within ``self``. If *obj* is not
found, :exc:`ValueError` is raised.
"""
if isinstance(obj, Wikicode):
if not self._is_child_wikicode(obj, recursive):
raise ValueError(obj)
return obj, slice(0, len(obj.nodes))
if isinstance(obj, Node):
mkslice = lambda i: slice(i, i + 1)
if not recursive:
return self, mkslice(self.index(obj))
for node in self.nodes:
for context, child in self._get_children(node, contexts=True):
if obj is child:
if not context:
context = self
return context, mkslice(context.index(child))
raise ValueError(obj)
raise TypeError(obj)
def _do_weak_search(self, obj, recursive):
"""Search for an element that looks like *obj* within the node list.
This follows the same rules as :meth:`_do_strong_search` with some
differences. *obj* is treated as a string that might represent any
:class:`.Node`, :class:`.Wikicode`, or combination of the two present
in the node list. Thus, matching is weak (using string comparisons)
rather than strong (using ``is``). Because multiple nodes can match
*obj*, the result is a list of tuples instead of just one (however,
:exc:`ValueError` is still raised if nothing is found). Individual
matches will never overlap.
The tuples contain a new first element, *exact*, which is ``True`` if
we were able to match *obj* exactly to one or more adjacent nodes, or
``False`` if we found *obj* inside a node or incompletely spanning
multiple nodes.
"""
obj = parse_anything(obj)
if not obj or obj not in self:
raise ValueError(obj)
results = []
contexts = [self]
while contexts:
context = contexts.pop()
i = len(context.nodes) - 1
while i >= 0:
node = context.get(i)
if obj.get(-1) == node:
for j in range(-len(obj.nodes), -1):
if obj.get(j) != context.get(i + j + 1):
break
else:
i -= len(obj.nodes) - 1
index = slice(i, i + len(obj.nodes))
results.append((True, context, index))
elif recursive and obj in node:
contexts.extend(node.__children__())
i -= 1
if not results:
if not recursive:
raise ValueError(obj)
results.append((False, self, slice(0, len(self.nodes))))
return results
def _get_tree(self, code, lines, marker, indent):
|
@classmethod
def _build_filter_methods(cls, **meths):
"""Given Node types, build the corresponding i?filter shortcuts.
The should be given as keys storing the method's base name paired with
values storing the corresponding :class:`.Node` type. For example, the
dict may contain the pair ``("templates", Template)``, which will
produce the methods :meth:`ifilter_templates` and
:meth:`filter_templates`, which are shortcuts for
:meth:`ifilter(forcetype=Template) <ifilter>` and
:meth:`filter(forcetype=Template) <filter>`, respectively. These
shortcuts are added to the class itself, with an appropriate docstring.
"""
doc = """Iterate over {0}.
This is equivalent to :meth:`{1}` with *forcetype* set to
:class:`~{2.__module__}.{2.__name__}`.
"""
make_ifilter = lambda ftype: (lambda self, *a, **kw:
self.ifilter(forcetype=ftype, *a, **kw))
make_filter = lambda ftype: (lambda self, *a, **kw:
self.filter(forcetype=ftype, *a, **kw))
for name, ftype in meths.items():
ifilter = make_ifilter(ftype)
filter = make_filter(ftype)
ifilter.__doc__ = doc.format(name, "ifilter", ftype)
filter.__doc__ = doc.format(name, "filter", ftype)
setattr(cls, "ifilter_" + name, ifilter)
setattr(cls, "filter_" + name, filter)
@property
def nodes(self):
"""A list of :class:`.Node` objects.
This is the internal data actually stored within a :class:`.Wikicode`
object.
"""
return self._nodes
@nodes.setter
def nodes(self, value):
if not isinstance(value, list):
value = parse_anything(value).nodes
self._nodes = value
def get(self, index):
"""Return the *index*\\ th node within the list of nodes."""
return self.nodes[index]
def set(self, index, value):
"""Set the ``Node`` at *index* to *value*.
Raises :exc:`IndexError` if *index* is out of range, or
:exc:`ValueError` if *value* cannot be coerced into one :class:`.Node`.
To insert multiple nodes at an index, use :meth:`get` with either
:meth:`remove` and :meth:`insert` or :meth:`replace`.
"""
nodes = parse_anything(value).nodes
if len(nodes) > 1:
raise ValueError("Cannot coerce multiple nodes into one index")
if index >= len(self.nodes) or -1 * index > len(self.nodes):
raise IndexError("List assignment index out of range")
if nodes:
self.nodes[index] = nodes[0]
else:
self.nodes.pop(index)
def contains(self, obj):
"""Return whether this Wikicode object contains *obj*.
If *obj* is a :class:`.Node` or :class:`.Wikicode` object, then we
search for it exactly among all of our children, recursively.
Otherwise, this method just uses :meth:`.__contains__` on the string.
"""
if not isinstance(obj, (Node, Wikicode)):
return obj in self
try:
self._do_strong_search(obj, recursive=True)
except ValueError:
return False
return True
def index(self, obj, recursive=False):
"""Return the index of *obj* in the list of nodes.
Raises :exc:`ValueError` if *obj* is not found. If *recursive* is
``True``, we will look in all nodes of ours and their descendants, and
return the index of our direct descendant node within *our* list of
nodes. Otherwise, the lookup is done only on direct descendants.
"""
strict = isinstance(obj, Node)
equivalent = (lambda o, n: o is n) if strict else (lambda o, n: o == n)
for i, node in enumerate(self.nodes):
if recursive:
for child in self._get_children(node):
if equivalent(obj, child):
return i
elif equivalent(obj, node):
return i
raise ValueError(obj)
def get_ancestors(self, obj):
"""Return a list of all ancestor nodes of the :class:`.Node` *obj*.
The list is ordered from the most shallow ancestor (greatest great-
grandparent) to the direct parent. The node itself is not included in
the list. For example::
>>> text = "{{a|{{b|{{c|{{d}}}}}}}}"
>>> code = mwparserfromhell.parse(text)
>>> node = code.filter_templates(matches=lambda n: n == "{{d}}")[0]
>>> code.get_ancestors(node)
['{{a|{{b|{{c|{{d}}}}}}}}', '{{b|{{c|{{d}}}}}}', '{{c|{{d}}}}']
Will return an empty list if *obj* is at the top level of this Wikicode
object. Will raise :exc:`ValueError` if it wasn't found.
"""
def _get_ancestors(code, needle):
for node in code.nodes:
if node is needle:
return []
for code in node.__children__():
ancestors = _get_ancestors(code, needle)
if ancestors is not None:
return [node] + ancestors
if isinstance(obj, Wikicode):
obj = obj.get(0)
elif not isinstance(obj, Node):
raise ValueError(obj)
ancestors = _get_ancestors(self, obj)
if ancestors is None:
raise ValueError(obj)
return ancestors
def get_parent(self, obj):
"""Return the direct parent node of the :class:`.Node` *obj*.
This function is equivalent to calling :meth:`.get_ancestors` and
taking the last element of the resulting list. Will return None if
the node exists but does not have a parent; i.e., it is at the top
level of the Wikicode object.
"""
ancestors = self.get_ancestors(obj)
return ancestors[-1] if ancestors else None
def insert(self, index, value):
"""Insert *value* at *index* in the list of nodes.
*value* can be anything parsable by :func:`.parse_anything`, which
includes strings or other :class:`.Wikicode` or :class:`.Node` objects.
"""
nodes = parse_anything(value).nodes
for node in reversed(nodes):
self.nodes.insert(index, node)
def insert_before(self, obj, value, recursive=True):
"""Insert *value* immediately before *obj*.
*obj* can be either a string, a :class:`.Node`, or another
:class:`.Wikicode` object (as created by :meth:`get_sections`, for
example). If *obj* is a string, we will operate on all instances of
that string within the code, otherwise only on the specific instance
given. *value* can be anything parsable by :func:`.parse_anything`. If
*recursive* is ``True``, we will try to find *obj* within our child
nodes even if it is not a direct descendant of this :class:`.Wikicode`
object. If *obj* is not found, :exc:`ValueError` is raised.
"""
if isinstance(obj, (Node, Wikicode)):
context, index = self._do_strong_search(obj, recursive)
context.insert(index.start, value)
else:
for exact, context, index in self._do_weak_search(obj, recursive):
if exact:
context.insert(index.start, value)
else:
obj = str(obj)
self._slice_replace(context, index, obj, str(value) + obj)
def insert_after(self, obj, value, recursive=True):
"""Insert *value* immediately after *obj*.
*obj* can be either a string, a :class:`.Node`, or another
:class:`.Wikicode` object (as created by :meth:`get_sections`, for
example). If *obj* is a string, we will operate on all instances of
that string within the code, otherwise only on the specific instance
given. *value* can be anything parsable by :func:`.parse_anything`. If
*recursive* is ``True``, we will try to find *obj* within our child
nodes even if it is not a direct descendant of this :class:`.Wikicode`
object. If *obj* is not found, :exc:`ValueError` is raised.
"""
if isinstance(obj, (Node, Wikicode)):
context, index = self._do_strong_search(obj, recursive)
context.insert(index.stop, value)
else:
for exact, context, index in self._do_weak_search(obj, recursive):
if exact:
context.insert(index.stop, value)
else:
obj = str(obj)
self._slice_replace(context, index, obj, obj + str(value))
def replace(self, obj, value, recursive=True):
"""Replace *obj* with *value*.
*obj* can be either a string, a :class:`.Node`, or another
:class:`.Wikicode` object (as created by :meth:`get_sections`, for
example). If *obj* is a string, we will operate on all instances of
that string within the code, otherwise only on the specific instance
given. *value* can be anything parsable by :func:`.parse_anything`.
If *recursive* is ``True``, we will try to find *obj* within our child
nodes even if it is not a direct descendant of this :class:`.Wikicode`
object. If *obj* is not found, :exc:`ValueError` is raised.
"""
if isinstance(obj, (Node, Wikicode)):
context, index = self._do_strong_search(obj, recursive)
for i in range(index.start, index.stop):
context.nodes.pop(index.start)
context.insert(index.start, value)
else:
for exact, context, index in self._do_weak_search(obj, recursive):
if exact:
for i in range(index.start, index.stop):
context.nodes.pop(index.start)
context.insert(index.start, value)
else:
self._slice_replace(context, index, str(obj), str(value))
def append(self, value):
"""Insert *value* at the end of the list of nodes.
*value* can be anything parsable by :func:`.parse_anything`.
"""
nodes = parse_anything(value).nodes
for node in nodes:
self.nodes.append(node)
def remove(self, obj, recursive=True):
"""Remove *obj* from the list of nodes.
*obj* can be either a string, a :class:`.Node`, or another
:class:`.Wikicode` object (as created by :meth:`get_sections`, for
example). If *obj* is a string, we will operate on all instances of
that string within the code, otherwise only on the specific instance
given. If *recursive* is ``True``, we will try to find *obj* within our
child nodes even if it is not a direct descendant of this
:class:`.Wikicode` object. If *obj* is not found, :exc:`ValueError` is
raised.
"""
if isinstance(obj, (Node, Wikicode)):
context, index = self._do_strong_search(obj, recursive)
for i in range(index.start, index.stop):
context.nodes.pop(index.start)
else:
for exact, context, index in self._do_weak_search(obj, recursive):
if exact:
for i in range(index.start, index.stop):
context.nodes.pop(index.start)
else:
self._slice_replace(context, index, str(obj), "")
def matches(self, other):
"""Do a loose equivalency test suitable for comparing page names.
*other* can be any string-like object, including :class:`.Wikicode`, or
an iterable of these. This operation is symmetric; both sides are
adjusted. Specifically, whitespace and markup is stripped and the first
letter's case is normalized. Typical usage is
``if template.name.matches("stub"): ...``.
"""
normalize = lambda s: (s[0].upper() + s[1:]).replace("_", " ") if s else s
this = normalize(self.strip_code().strip())
if isinstance(other, (str, bytes, Wikicode, Node)):
that = parse_anything(other).strip_code().strip()
return this == normalize(that)
for obj in other:
that = parse_anything(obj).strip_code().strip()
if this == normalize(that):
return True
return False
def ifilter(self, recursive=True, matches=None, flags=FLAGS,
forcetype=None):
"""Iterate over nodes in our list matching certain conditions.
If *forcetype* is given, only nodes that are instances of this type (or
tuple of types) are yielded. Setting *recursive* to ``True`` will
iterate over all children and their descendants. ``RECURSE_OTHERS``
will only iterate over children that are not the instances of
*forcetype*. ``False`` will only iterate over immediate children.
``RECURSE_OTHERS`` can be used to iterate over all un-nested templates,
even if they are inside of HTML tags, like so:
>>> code = mwparserfromhell.parse("{{foo}}<b>{{foo|{{bar}}}}</b>")
>>> code.filter_templates(code.RECURSE_OTHERS)
["{{foo}}", "{{foo|{{bar}}}}"]
*matches* can be used to further restrict the nodes, either as a
function (taking a single :class:`.Node` and returning a boolean) or a
regular expression (matched against the node's string representation
with :func:`re.search`). If *matches* is a regex, the flags passed to
:func:`re.search` are :const:`re.IGNORECASE`, :const:`re.DOTALL`, and
:const:`re.UNICODE`, but custom flags can be specified by passing
*flags*.
"""
gen = self._indexed_ifilter(recursive, matches, flags, forcetype)
return (node for i, node in gen)
def filter(self, *args, **kwargs):
"""Return a list of nodes within our list matching certain conditions.
This is equivalent to calling :func:`list` on :meth:`ifilter`.
"""
return list(self.ifilter(*args, **kwargs))
def get_sections(self, levels=None, matches=None, flags=FLAGS, flat=False,
include_lead=None, include_headings=True):
"""Return a list of sections within the page.
Sections are returned as :class:`.Wikicode` objects with a shared node
list (implemented using :class:`.SmartList`) so that changes to
sections are reflected in the parent Wikicode object.
Each section contains all of its subsections, unless *flat* is
``True``. If *levels* is given, it should be a iterable of integers;
only sections whose heading levels are within it will be returned. If
*matches* is given, it should be either a function or a regex; only
sections whose headings match it (without the surrounding equal signs)
will be included. *flags* can be used to override the default regex
flags (see :meth:`ifilter`) if a regex *matches* is used.
If *include_lead* is ``True``, the first, lead section (without a
heading) will be included in the list; ``False`` will not include it;
the default will include it only if no specific *levels* were given. If
*include_headings* is ``True``, the section's beginning
:class:`.Heading` object will be included; otherwise, this is skipped.
"""
title_matcher = self._build_matcher(matches, flags)
matcher = lambda heading: (title_matcher(heading.title) and
(not levels or heading.level in levels))
iheadings = self._indexed_ifilter(recursive=False, forcetype=Heading)
sections = [] # Tuples of (index_of_first_node, section)
open_headings = [] # Tuples of (index, heading), where index and
# heading.level are both monotonically increasing
# Add the lead section if appropriate:
if include_lead or not (include_lead is not None or matches or levels):
itr = self._indexed_ifilter(recursive=False, forcetype=Heading)
try:
first = next(itr)[0]
sections.append((0, Wikicode(self.nodes[:first])))
except StopIteration: # No headings in page
sections.append((0, Wikicode(self.nodes[:])))
# Iterate over headings, adding sections to the list as they end:
for i, heading in iheadings:
if flat: # With flat, all sections close at the next heading
newly_closed, open_headings = open_headings, []
else: # Otherwise, figure out which sections have closed, if any
closed_start_index = len(open_headings)
for j, (start, last_heading) in enumerate(open_headings):
if heading.level <= last_heading.level:
closed_start_index = j
break
newly_closed = open_headings[closed_start_index:]
del open_headings[closed_start_index:]
for start, closed_heading in newly_closed:
if matcher(closed_heading):
sections.append((start, Wikicode(self.nodes[start:i])))
start = i if include_headings else (i + 1)
open_headings.append((start, heading))
# Add any remaining open headings to the list of sections:
for start, heading in open_headings:
if matcher(heading):
sections.append((start, Wikicode(self.nodes[start:])))
# Ensure that earlier sections are earlier in the returned list:
return [section for i, section in sorted(sections)]
def strip_code(self, normalize=True, collapse=True,
keep_template_params=False):
"""Return a rendered string without unprintable code such as templates.
The way a node is stripped is handled by the
:meth:`~.Node.__strip__` method of :class:`.Node` objects, which
generally return a subset of their nodes or ``None``. For example,
templates and tags are removed completely, links are stripped to just
their display part, headings are stripped to just their title.
If *normalize* is ``True``, various things may be done to strip code
further, such as converting HTML entities like ``Σ``, ``Σ``,
and ``Σ`` to ``Σ``. If *collapse* is ``True``, we will try to
remove excess whitespace as well (three or more newlines are converted
to two, for example). If *keep_template_params* is ``True``, then
template parameters will be preserved in the output (normally, they are
removed completely).
"""
kwargs = {
"normalize": normalize,
"collapse": collapse,
"keep_template_params": keep_template_params
}
nodes = []
for node in self.nodes:
stripped = node.__strip__(**kwargs)
if stripped:
nodes.append(str(stripped))
if collapse:
stripped = "".join(nodes).strip("\n")
while "\n\n\n" in stripped:
stripped = stripped.replace("\n\n\n", "\n\n")
return stripped
else:
return "".join(nodes)
def get_tree(self):
"""Return a hierarchical tree representation of the object.
The representation is a string makes the most sense printed. It is
built by calling :meth:`_get_tree` on the :class:`.Wikicode` object and
its children recursively. The end result may look something like the
following::
>>> text = "Lorem ipsum {{foo|bar|{{baz}}|spam=eggs}}"
>>> print(mwparserfromhell.parse(text).get_tree())
Lorem ipsum
{{
foo
| 1
= bar
| 2
= {{
baz
}}
| spam
= eggs
}}
"""
marker = object() # Random object we can find with certainty in a list
return "\n".join(self._get_tree(self, [], marker, 0))
Wikicode._build_filter_methods(
arguments=Argument, comments=Comment, external_links=ExternalLink,
headings=Heading, html_entities=HTMLEntity, tags=Tag, templates=Template,
text=Text, wikilinks=Wikilink)
| """Build a tree to illustrate the way the Wikicode object was parsed.
The method that builds the actual tree is ``__showtree__`` of ``Node``
objects. *code* is the ``Wikicode`` object to build a tree for. *lines*
is the list to append the tree to, which is returned at the end of the
method. *marker* is some object to be used to indicate that the builder
should continue on from the last line instead of starting a new one; it
should be any object that can be tested for with ``is``. *indent* is
the starting indentation.
"""
def write(*args):
"""Write a new line following the proper indentation rules."""
if lines and lines[-1] is marker: # Continue from the last line
lines.pop() # Remove the marker
last = lines.pop()
lines.append(last + " ".join(args))
else:
lines.append(" " * 6 * indent + " ".join(args))
get = lambda code: self._get_tree(code, lines, marker, indent + 1)
mark = lambda: lines.append(marker)
for node in code.nodes:
node.__showtree__(write, get, mark)
return lines |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.